repo_id
stringlengths 21
96
| file_path
stringlengths 31
155
| content
stringlengths 1
92.9M
| __index_level_0__
int64 0
0
|
---|---|---|---|
rapidsai_public_repos/code-share/maxflow/galois/include/Galois
|
rapidsai_public_repos/code-share/maxflow/galois/include/Galois/c++11-compat/unordered_set.h
|
#ifndef GALOIS_C__11_COMPAT_UNORDERED_SET_H
#define GALOIS_C__11_COMPAT_UNORDERED_SET_H
#include <boost/tr1/unordered_set.hpp>
namespace std { using namespace std::tr1; }
#endif
| 0 |
rapidsai_public_repos/code-share/maxflow/galois/include/Galois
|
rapidsai_public_repos/code-share/maxflow/galois/include/Galois/c++11-compat/functional.h
|
#ifndef GALOIS_C__11_COMPAT_FUNCTIONAL_H
#define GALOIS_C__11_COMPAT_FUNCTIONAL_H
#include <functional>
#include <boost/tr1/functional.hpp>
namespace std { using namespace std::tr1; }
#endif
| 0 |
rapidsai_public_repos/code-share/maxflow/galois/include/Galois
|
rapidsai_public_repos/code-share/maxflow/galois/include/Galois/c++11-compat/atomic_internal_xlc_ppc.h
|
#include <builtins.h>
#error "Broken"
/*
* From:
* http://www.cl.cam.ac.uk/~pes20/cpp/cpp0xmappings.html
* and
* Batty et al. Clarifying and Compiling C/C++ Concurrency: from C++11 to POWER. POPL 2011.
* (http://www.cl.cam.ac.uk/~pes20/cppppc/)
*/
namespace detail {
inline bool atomic_compare_exchange_strong32(volatile int* __a, int* __e, int* __d, std::memory_order _succ, std::memory_order _fail) {
bool tmp;
int v = *__e;
switch (_succ) {
case std::memory_order_relaxed: return __compare_and_swap(__a, &v, *__d);
case std::memory_order_consume: abort();
case std::memory_order_acquire: tmp = __compare_and_swap(__a, &v, *__d); __isync(); return tmp;
case std::memory_order_release: __lwsync(); return __compare_and_swap(__a, &v, *__d);
case std::memory_order_acq_rel: __lwsync(); tmp = __compare_and_swap(__a, &v, *__d); __isync(); return tmp;
case std::memory_order_seq_cst: __sync(); tmp = __compare_and_swap(__a, &v, *__d); __isync(); return tmp;
default: abort();
}
// v contains old value in __a;
return tmp;
}
#ifdef __PPC64__
inline bool atomic_compare_exchange_strong64(volatile long* __a, long* __e, long* __d, std::memory_order _succ, std::memory_order _fail) {
bool tmp;
long v = *__e;
switch (_succ) {
case std::memory_order_relaxed: return __compare_and_swaplp(__a, &v, *__d);
case std::memory_order_consume: abort();
case std::memory_order_acquire: tmp = __compare_and_swaplp(__a, &v, *__d); __isync(); return tmp;
case std::memory_order_release: __lwsync(); return __compare_and_swaplp(__a, &v, *__d);
case std::memory_order_acq_rel: __lwsync(); tmp = __compare_and_swaplp(__a, &v, *__d); __isync(); return tmp;
case std::memory_order_seq_cst: __sync(); tmp = __compare_and_swaplp(__a, &v, *__d); __isync(); return tmp;
default: abort();
}
// v contains old value in __a;
return tmp;
}
#endif
template<class _Tp>
bool atomic_compare_exchange_strong(volatile _Tp* __a, _Tp* __e, _Tp* __d, std::memory_order _succ, std::memory_order _fail) {
// __sync_XXX gcc-type intrinsics issue a full barrier so implement using
// lower level intrinsics
#ifdef __PPC64__
static_assert(sizeof(_Tp) <= 8, "Operation undefined on larger types");
#else
static_assert(sizeof(_Tp) <= 4, "Operation undefined on larger types");
#endif
if (sizeof(_Tp) <= 4)
return detail::atomic_compare_exchange_strong32(reinterpret_cast<volatile int*>(__a), reinterpret_cast<int*>(__e), reinterpret_cast<int*>(__d), _succ, _fail);
#ifdef __PPC64__
else
return detail::atomic_compare_exchange_strong64(reinterpret_cast<volatile long*>(__a), reinterpret_cast<long*>(__e), reinterpret_cast<long*>(__d), _succ, _fail);
#endif
abort();
return false;
}
/*
* Weak fence (cmp; bc; isync) which depends on PowerPC guaranteeing that
* loads on which a branch condition (bc) instruction depends are completed
* before and any stores happening after.
*
* See:
* http://www.rdrop.com/users/paulmck/scalability/paper/N2745r.2011.03.04a.html
*/
template<class _Tp>
void weak_fence(volatile _Tp* __a) {
// TODO: implement this in asm
while (*__a != *__a)
;
__lwsync();
}
} // end detail
template<class _Tp>
void __atomic_store(volatile _Tp* __a, _Tp* __i, std::memory_order _m) {
switch (_m) {
case std::memory_order_relaxed: *__a = *__i; break;
case std::memory_order_consume:
case std::memory_order_acquire: abort(); break;
case std::memory_order_release:
case std::memory_order_acq_rel: __lwsync(); *__a = *__i;
case std::memory_order_seq_cst: __sync(); *__a = *__i; break;
default: abort();
}
}
template<class _Tp>
void __atomic_load(volatile _Tp* __a, _Tp* __i, std::memory_order _m) {
switch (_m) {
case std::memory_order_relaxed: *__i = *__a; break;
case std::memory_order_consume:
case std::memory_order_acquire: *__i = *__a; detail::weak_fence(__i); break;
case std::memory_order_release: abort(); break;
case std::memory_order_acq_rel: *__i = *__a; detail::weak_fence(__i); break;
case std::memory_order_seq_cst: __sync(); *__i = *__a; detail::weak_fence(__i); break;
default: abort();
}
}
template<class _Tp>
void __atomic_load(volatile const _Tp* __a, _Tp* __i, std::memory_order _m) {
__atomic_load(const_cast<_Tp*>(__a), __i, _m);
}
template<class _Tp>
bool __atomic_compare_exchange(volatile _Tp* __a, _Tp* __e, _Tp* __d, bool _weak, std::memory_order _succ, std::memory_order _fail) {
return detail::atomic_compare_exchange_strong(__a, __e, __d, _succ, _fail);
}
template<class _Tp>
_Tp __atomic_fetch_xor(volatile _Tp* __a, _Tp __i, std::memory_order _m) {
_Tp old;
_Tp newval;
do {
old = *__a;
newval = old ^ __i;
} while (!__atomic_compare_exchange(__a, &old, &newval, true, _m, _m));
return old;
}
template<class _Tp>
_Tp __atomic_fetch_add(volatile _Tp* __a, _Tp __i, std::memory_order _m) {
_Tp old;
_Tp newval;
do {
old = *__a;
newval = old + __i;
} while (!__atomic_compare_exchange(__a, &old, &newval, true, _m, _m));
return old;
}
| 0 |
rapidsai_public_repos/code-share/maxflow/galois/include/Galois
|
rapidsai_public_repos/code-share/maxflow/galois/include/Galois/c++11-compat/random.h
|
#ifndef GALOIS_C__11_COMPAT_RANDOM_H
#define GALOIS_C__11_COMPAT_RANDOM_H
#include <boost/tr1/random.hpp>
namespace std { using namespace std::tr1; }
#endif
| 0 |
rapidsai_public_repos/code-share/maxflow/galois/include/Galois
|
rapidsai_public_repos/code-share/maxflow/galois/include/Galois/c++11-compat/deque.h
|
#ifndef GALOIS_C__11_COMPAT_DEQUE_H
#define GALOIS_C__11_COMPAT_DEQUE_H
#include <deque>
#define GALOIS_CXX11_DEQUE_HAS_NO_EMPLACE
#endif
| 0 |
rapidsai_public_repos/code-share/maxflow/galois/include/Galois
|
rapidsai_public_repos/code-share/maxflow/galois/include/Galois/c++11-compat/algorithm.h
|
#ifndef GALOIS_C__11_COMPAT_ALGORITHM_H
#define GALOIS_C__11_COMPAT_ALGORITHM_H
#include <algorithm>
#include <boost/tr1/type_traits.hpp>
namespace std {
template<typename _Tp>
constexpr typename std::tr1::remove_reference<_Tp>::type&&
move(_Tp&& __t) {
return static_cast<typename std::tr1::remove_reference<_Tp>::type&&>(__t);
}
}
#endif
| 0 |
rapidsai_public_repos/code-share/maxflow/galois/include/Galois
|
rapidsai_public_repos/code-share/maxflow/galois/include/Galois/c++11-compat/unordered_map.h
|
#ifndef GALOIS_C__11_COMPAT_UNORDERED_MAP_H
#define GALOIS_C__11_COMPAT_UNORDERED_MAP_H
#include <boost/tr1/unordered_map.hpp>
namespace std { using namespace std::tr1; }
#endif
| 0 |
rapidsai_public_repos/code-share/maxflow/galois/include/Galois
|
rapidsai_public_repos/code-share/maxflow/galois/include/Galois/c++11-compat/utility.h
|
#ifndef GALOIS_C__11_COMPAT_UTILITY_H
#define GALOIS_C__11_COMPAT_UTILITY_H
#include <boost/tr1/type_traits.hpp>
namespace std {
template<typename _Tp>
constexpr _Tp&& forward(typename std::tr1::remove_reference<_Tp>::type& __t) {
return static_cast<_Tp&&>(__t);
}
template<typename _Tp>
constexpr _Tp&& forward(typename std::tr1::remove_reference<_Tp>::type&& __t) {
return static_cast<_Tp&&>(__t);
}
}
#endif
| 0 |
rapidsai_public_repos/code-share/maxflow/galois/include/Galois
|
rapidsai_public_repos/code-share/maxflow/galois/include/Galois/c++11-compat/vector.h
|
#ifndef GALOIS_C__11_COMPAT_VECTOR_H
#define GALOIS_C__11_COMPAT_VECTOR_H
#include <vector>
#define GALOIS_CXX11_VECTOR_HAS_NO_EMPLACE
#endif
| 0 |
rapidsai_public_repos/code-share/maxflow/galois/include/Galois
|
rapidsai_public_repos/code-share/maxflow/galois/include/Galois/c++11-compat/atomic.h
|
#ifndef GALOIS_C__11_COMPAT_ATOMIC_H
#define GALOIS_C__11_COMPAT_ATOMIC_H
#include "type_traits.h"
namespace std {
typedef enum memory_order
{
memory_order_relaxed,
memory_order_consume,
memory_order_acquire,
memory_order_release,
memory_order_acq_rel,
memory_order_seq_cst
} memory_order;
}
#if __IBMCPP__ && __PPC__
//# include "atomic_internal_xlc_ppc.h"
# include "atomic_internal_gcc_generic.h"
#elif __GNUC__
# include "atomic_internal_gcc_generic.h"
#else
# error "Unknown machine architecture"
#endif
namespace std {
template<class _Tp>
class atomic {
_Tp _M_i;
atomic(const atomic&);
atomic& operator=(const atomic&);
atomic& operator=(const atomic&) volatile;
public:
atomic() { }
constexpr atomic(_Tp __i): _M_i(__i) { }
operator _Tp() const { return load(); }
operator _Tp() const volatile { return load(); }
_Tp operator=(_Tp __i) { store(__i); return __i; }
_Tp operator=(_Tp __i) volatile { store(__i); return __i; }
void store(_Tp __i, memory_order _m = memory_order_seq_cst) { __atomic_store(&_M_i, &__i, _m); }
void store(_Tp __i, memory_order _m = memory_order_seq_cst) volatile { __atomic_store(&_M_i, &__i, _m); }
_Tp load(memory_order _m = memory_order_seq_cst) const {
_Tp tmp;
__atomic_load(&_M_i, &tmp, _m);
return tmp;
}
_Tp load(memory_order _m = memory_order_seq_cst) const volatile {
_Tp tmp;
__atomic_load(&_M_i, &tmp, _m);
return tmp;
}
_Tp exchange(_Tp __i, memory_order _m = memory_order_seq_cst) {
return __atomic_exchange(&_M_i, __i, _m);
}
_Tp exchange(_Tp __i, memory_order _m = memory_order_seq_cst) volatile {
return __atomic_exchange(&_M_i, __i, _m);
}
bool compare_exchange_weak(_Tp& __e, _Tp __i, memory_order _m1, memory_order _m2) {
return __atomic_compare_exchange(&_M_i, &__e, &__i, true, _m1, _m2);
}
bool compare_exchange_weak(_Tp& __e, _Tp __i, memory_order _m1, memory_order _m2) volatile {
return __atomic_compare_exchange(&_M_i, &__e, &__i, true, _m1, _m2);
}
bool compare_exchange_weak(_Tp& __e, _Tp __i, memory_order _m = memory_order_seq_cst) {
return compare_exchange_weak(__e, __i, _m, _m);
}
bool compare_exchange_weak(_Tp& __e, _Tp __i, memory_order _m = memory_order_seq_cst) volatile {
return compare_exchange_weak(__e, __i, _m, _m);
}
bool compare_exchange_strong(_Tp& __e, _Tp __i, memory_order _m1, memory_order _m2) {
return __atomic_compare_exchange(&_M_i, &__e, &__i, false, _m1, _m2);
}
bool compare_exchange_strong(_Tp& __e, _Tp __i, memory_order _m1, memory_order _m2) volatile {
return __atomic_compare_exchange(&_M_i, &__e, &__i, false, _m1, _m2);
}
bool compare_exchange_strong(_Tp& __e, _Tp __i, memory_order _m = memory_order_seq_cst) {
return compare_exchange_strong(__e, __i, _m, _m);
}
bool compare_exchange_strong(_Tp& __e, _Tp __i, memory_order _m = memory_order_seq_cst) volatile {
return compare_exchange_strong(__e, __i, _m, _m);
}
template<bool Enable = std::is_integral<_Tp>::value>
_Tp fetch_xor(_Tp __i, memory_order _m = memory_order_seq_cst, typename std::enable_if<Enable>::type* = 0) {
return __atomic_fetch_xor(&_M_i, __i, _m);
}
template<bool Enable = std::is_integral<_Tp>::value>
_Tp fetch_xor(_Tp __i, memory_order _m = memory_order_seq_cst, typename std::enable_if<Enable>::type* = 0) volatile {
return __atomic_fetch_xor(&_M_i, __i, _m);
}
template<bool Enable = std::is_integral<_Tp>::value>
_Tp fetch_or(_Tp __i, memory_order _m = memory_order_seq_cst, typename std::enable_if<Enable>::type* = 0) {
return __atomic_fetch_or(&_M_i, __i, _m);
}
template<bool Enable = std::is_integral<_Tp>::value>
_Tp fetch_or(_Tp __i, memory_order _m = memory_order_seq_cst, typename std::enable_if<Enable>::type* = 0) volatile {
return __atomic_fetch_or(&_M_i, __i, _m);
}
template<bool Enable = std::is_integral<_Tp>::value>
_Tp fetch_add(_Tp __i, memory_order _m = memory_order_seq_cst, typename std::enable_if<Enable>::type* = 0) {
return __atomic_fetch_add(&_M_i, __i, _m);
}
template<bool Enable = std::is_integral<_Tp>::value>
_Tp operator++() {
return fetch_add(1) + 1;
}
template<bool Enable = std::is_integral<_Tp>::value>
_Tp fetch_add(_Tp __i, memory_order _m = memory_order_seq_cst, typename std::enable_if<Enable>::type* = 0) volatile {
return __atomic_fetch_add(&_M_i, __i, _m);
}
template<bool Enable = std::is_integral<_Tp>::value>
_Tp operator++() volatile {
return fetch_add(1) + 1;
}
};
}
#endif
| 0 |
rapidsai_public_repos/code-share/maxflow/galois/include/Galois
|
rapidsai_public_repos/code-share/maxflow/galois/include/Galois/c++11-compat/type_traits.h
|
#ifndef GALOIS_C__11_COMPAT_TYPE_TRAITS_H
#define GALOIS_C__11_COMPAT_TYPE_TRAITS_H
#include <boost/tr1/functional.hpp>
#include <boost/tr1/type_traits.hpp>
namespace std {
using namespace std::tr1;
template<bool, typename _Tp = void>
struct enable_if { };
template<typename _Tp>
struct enable_if<true, _Tp> { typedef _Tp type; };
}
#endif
| 0 |
rapidsai_public_repos/code-share/maxflow/galois/include/Galois
|
rapidsai_public_repos/code-share/maxflow/galois/include/Galois/Runtime/ParallelWork.h
|
/** Galois scheduler and runtime -*- C++ -*-
* @file
* @section License
*
* Galois, a framework to exploit amorphous data-parallelism in irregular
* programs.
*
* Copyright (C) 2012, The University of Texas at Austin. All rights reserved.
* UNIVERSITY EXPRESSLY DISCLAIMS ANY AND ALL WARRANTIES CONCERNING THIS
* SOFTWARE AND DOCUMENTATION, INCLUDING ANY WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR ANY PARTICULAR PURPOSE, NON-INFRINGEMENT AND WARRANTIES OF
* PERFORMANCE, AND ANY WARRANTY THAT MIGHT OTHERWISE ARISE FROM COURSE OF
* DEALING OR USAGE OF TRADE. NO WARRANTY IS EITHER EXPRESS OR IMPLIED WITH
* RESPECT TO THE USE OF THE SOFTWARE OR DOCUMENTATION. Under no circumstances
* shall University be liable for incidental, special, indirect, direct or
* consequential damages or loss of profits, interruption of business, or
* related expenses which may arise from use of Software or Documentation,
* including but not limited to those resulting from defects in Software and/or
* Documentation, or loss or inaccuracy of data of any kind.
*
* @section Description
*
* Implementation of the Galois foreach iterator. Includes various
* specializations to operators to reduce runtime overhead.
*
* @author Andrew Lenharth <[email protected]>
*/
#ifndef GALOIS_RUNTIME_PARALLELWORK_H
#define GALOIS_RUNTIME_PARALLELWORK_H
#include "Galois/Mem.h"
#include "Galois/Statistic.h"
#include "Galois/Runtime/Barrier.h"
#include "Galois/Runtime/Context.h"
#include "Galois/Runtime/ForEachTraits.h"
#include "Galois/Runtime/Range.h"
#include "Galois/Runtime/Support.h"
#include "Galois/Runtime/Termination.h"
#include "Galois/Runtime/ThreadPool.h"
#include "Galois/Runtime/UserContextAccess.h"
#include "Galois/WorkList/GFifo.h"
#include <algorithm>
#include <functional>
#ifdef GALOIS_USE_HTM
#include <speculation.h>
#endif
namespace Galois {
//! Internal Galois functionality - Use at your own risk.
namespace Runtime {
namespace {
template<bool Enabled>
class LoopStatistics {
unsigned long conflicts;
unsigned long iterations;
const char* loopname;
#ifdef GALOIS_USE_HTM
TmReport_s start;
void init() {
if (LL::getTID()) return;
// Dummy transaction to ensure that tm_get_all_stats doesn't return
// garbage
#pragma tm_atomic
{
conflicts = 0;
}
tm_get_all_stats(&start);
}
void report() {
if (LL::getTID()) return;
TmReport_s stop;
tm_get_all_stats(&stop);
reportStat(loopname, "HTMTransactions",
stop.totalTransactions - start.totalTransactions);
reportStat(loopname, "HTMRollbacks",
stop.totalRollbacks - start.totalRollbacks);
reportStat(loopname, "HTMSerializedJMV",
stop.totalSerializedJMV - start.totalSerializedJMV);
reportStat(loopname, "HTMSerializedMAXRB",
stop.totalSerializedMAXRB - start.totalSerializedMAXRB);
reportStat(loopname, "HTMSerializedOTHER",
stop.totalSerializedOTHER - start.totalSerializedOTHER);
tm_print_stats();
}
#else
void init() { }
void report() { }
#endif
public:
explicit LoopStatistics(const char* ln) :conflicts(0), iterations(0), loopname(ln) { init(); }
~LoopStatistics() {
reportStat(loopname, "Conflicts", conflicts);
reportStat(loopname, "Iterations", iterations);
report();
}
inline void inc_iterations(int amount = 1) {
iterations += amount;
}
inline void inc_conflicts() {
++conflicts;
}
};
template <>
class LoopStatistics<false> {
public:
explicit LoopStatistics(const char* ln) {}
inline void inc_iterations(int amount = 1) const { }
inline void inc_conflicts() const { }
};
template<typename value_type>
class AbortHandler {
struct Item { value_type val; int retries; };
typedef WorkList::GFIFO<Item> AbortedList;
PerThreadStorage<AbortedList> queues;
bool useBasicPolicy;
/**
* Policy: serialize via tree over packages.
*/
void basicPolicy(const Item& item) {
unsigned tid = LL::getTID();
unsigned package = LL::getPackageForSelf(tid);
queues.getRemote(LL::getLeaderForPackage(package / 2))->push(item);
}
/**
* Policy: retry work 2X locally, then serialize via tree on package (trying
* twice at each level), then serialize via tree over packages.
*/
void doublePolicy(const Item& item) {
int retries = item.retries - 1;
if ((retries & 1) == 1) {
queues.getLocal()->push(item);
return;
}
unsigned tid = LL::getTID();
unsigned package = LL::getPackageForSelf(tid);
unsigned leader = LL::getLeaderForPackage(package);
if (tid != leader) {
unsigned next = leader + (tid - leader) / 2;
queues.getRemote(next)->push(item);
} else {
queues.getRemote(LL::getLeaderForPackage(package / 2))->push(item);
}
}
/**
* Policy: retry work 2X locally, then serialize via tree on package but
* try at most 3 levels, then serialize via tree over packages.
*/
void boundedPolicy(const Item& item) {
int retries = item.retries - 1;
if (retries < 2) {
queues.getLocal()->push(item);
return;
}
unsigned tid = LL::getTID();
unsigned package = LL::getPackageForSelf(tid);
unsigned leader = LL::getLeaderForPackage(package);
if (retries < 5 && tid != leader) {
unsigned next = leader + (tid - leader) / 2;
queues.getRemote(next)->push(item);
} else {
queues.getRemote(LL::getLeaderForPackage(package / 2))->push(item);
}
}
/**
* Retry locally only.
*/
void eagerPolicy(const Item& item) {
queues.getLocal()->push(item);
}
public:
AbortHandler() {
// XXX(ddn): Implement smarter adaptive policy
useBasicPolicy = LL::getMaxPackages() > 2;
}
value_type& value(Item& item) const { return item.val; }
value_type& value(value_type& val) const { return val; }
void push(const value_type& val) {
Item item = { val, 1 };
queues.getLocal()->push(item);
}
void push(const Item& item) {
Item newitem = { item.val, item.retries + 1 };
if (useBasicPolicy)
basicPolicy(newitem);
else
doublePolicy(newitem);
}
AbortedList* getQueue() { return queues.getLocal(); }
};
template<class WorkListTy, class T, class FunctionTy>
class ForEachWork {
protected:
typedef T value_type;
typedef typename WorkListTy::template retype<value_type>::type WLTy;
struct ThreadLocalData {
FunctionTy function;
UserContextAccess<value_type> facing;
SimpleRuntimeContext ctx;
LoopStatistics<ForEachTraits<FunctionTy>::NeedsStats> stat;
ThreadLocalData(const FunctionTy& fn, const char* ln): function(fn), stat(ln) {}
};
// NB: Place dynamically growing wl after fixed-size PerThreadStorage
// members to give higher likelihood of reclaiming PerThreadStorage
AbortHandler<value_type> aborted;
TerminationDetection& term;
WLTy wl;
FunctionTy& origFunction;
const char* loopname;
bool broke;
inline void commitIteration(ThreadLocalData& tld) {
if (ForEachTraits<FunctionTy>::NeedsPush) {
auto ii = tld.facing.getPushBuffer().begin();
auto ee = tld.facing.getPushBuffer().end();
if (ii != ee) {
wl.push(ii, ee);
tld.facing.resetPushBuffer();
}
}
if (ForEachTraits<FunctionTy>::NeedsPIA)
tld.facing.resetAlloc();
if (ForEachTraits<FunctionTy>::NeedsAborts)
tld.ctx.commitIteration();
}
template<typename Item>
GALOIS_ATTRIBUTE_NOINLINE
void abortIteration(const Item& item, ThreadLocalData& tld) {
assert(ForEachTraits<FunctionTy>::NeedsAborts);
tld.ctx.cancelIteration();
tld.stat.inc_conflicts(); //Class specialization handles opt
aborted.push(item);
//clear push buffer
if (ForEachTraits<FunctionTy>::NeedsPush)
tld.facing.resetPushBuffer();
//reset allocator
if (ForEachTraits<FunctionTy>::NeedsPIA)
tld.facing.resetAlloc();
}
#ifdef GALOIS_USE_HTM
# ifndef GALOIS_USE_LONGJMP
# error "HTM must be used with GALOIS_USE_LONGJMP"
# endif
#endif
inline void doProcess(value_type& val, ThreadLocalData& tld) {
tld.stat.inc_iterations();
if (ForEachTraits<FunctionTy>::NeedsAborts)
tld.ctx.startIteration();
#ifdef GALOIS_USE_HTM
# ifndef GALOIS_USE_LONGJMP
# error "HTM must be used with GALOIS_USE_LONGJMP"
# endif
#pragma tm_atomic
{
#endif
tld.function(val, tld.facing.data());
#ifdef GALOIS_USE_HTM
}
#endif
clearReleasable();
commitIteration(tld);
}
bool runQueueSimple(ThreadLocalData& tld) {
bool workHappened = false;
Galois::optional<value_type> p = wl.pop();
if (p)
workHappened = true;
while (p) {
doProcess(*p, tld);
p = wl.pop();
}
return workHappened;
}
template<int limit, typename WL>
bool runQueue(ThreadLocalData& tld, WL& lwl) {
bool workHappened = false;
Galois::optional<typename WL::value_type> p = lwl.pop();
unsigned num = 0;
int result = 0;
if (p)
workHappened = true;
#ifdef GALOIS_USE_LONGJMP
if ((result = setjmp(hackjmp)) == 0) {
#else
try {
#endif
while (p) {
doProcess(aborted.value(*p), tld);
if (limit) {
++num;
if (num == limit)
break;
}
p = lwl.pop();
}
#ifdef GALOIS_USE_LONGJMP
} else {
clearReleasable();
clearConflictLock();
}
#else
} catch (ConflictFlag const& flag) {
clearReleasable();
clearConflictLock();
result = flag;
}
#endif
switch (result) {
case 0:
break;
case CONFLICT:
abortIteration(*p, tld);
break;
default:
GALOIS_DIE("unknown conflict type");
}
return workHappened;
}
GALOIS_ATTRIBUTE_NOINLINE
bool handleAborts(ThreadLocalData& tld) {
return runQueue<0>(tld, *aborted.getQueue());
}
void fastPushBack(typename UserContextAccess<value_type>::PushBufferTy& x) {
wl.push(x.begin(), x.end());
x.clear();
}
template<bool couldAbort, bool isLeader>
void go() {
// Thread-local data goes on the local stack to be NUMA friendly
ThreadLocalData tld(origFunction, loopname);
tld.facing.setBreakFlag(&broke);
if (couldAbort)
setThreadContext(&tld.ctx);
if (ForEachTraits<FunctionTy>::NeedsPush && !couldAbort)
tld.facing.setFastPushBack(
std::bind(&ForEachWork::fastPushBack, std::ref(*this), std::placeholders::_1));
bool didWork;
do {
didWork = false;
// Run some iterations
if (couldAbort || ForEachTraits<FunctionTy>::NeedsBreak) {
if (isLeader)
didWork = runQueue<32>(tld, wl);
else
didWork = runQueue<ForEachTraits<FunctionTy>::NeedsBreak ? 32 : 0>(tld, wl);
// Check for abort
if (couldAbort)
didWork |= handleAborts(tld);
} else { // No try/catch
didWork = runQueueSimple(tld);
}
// Update node color and prop token
term.localTermination(didWork);
} while (!term.globalTermination() && (!ForEachTraits<FunctionTy>::NeedsBreak || !broke));
if (couldAbort)
setThreadContext(0);
}
public:
ForEachWork(FunctionTy& f, const char* l): term(getSystemTermination()), origFunction(f), loopname(l), broke(false) { }
template<typename W>
ForEachWork(W& w, FunctionTy& f, const char* l): term(getSystemTermination()), wl(w), origFunction(f), loopname(l), broke(false) { }
template<typename RangeTy>
void AddInitialWork(const RangeTy& range) {
wl.push_initial(range);
}
void initThread(void) {
term.initializeThread();
}
void operator()() {
bool isLeader = LL::isPackageLeaderForSelf(LL::getTID());
bool couldAbort = ForEachTraits<FunctionTy>::NeedsAborts && activeThreads > 1;
#ifdef GALOIS_USE_HTM
couldAbort = false;
#endif
if (couldAbort && isLeader)
go<true, true>();
else if (couldAbort && !isLeader)
go<true, false>();
else if (!couldAbort && isLeader)
go<false, true>();
else
go<false, false>();
}
};
template<typename WLTy, typename RangeTy, typename FunctionTy>
void for_each_impl(const RangeTy& range, FunctionTy f, const char* loopname) {
if (inGaloisForEach)
GALOIS_DIE("Nested for_each not supported");
StatTimer LoopTimer("LoopTime", loopname);
if (ForEachTraits<FunctionTy>::NeedsStats)
LoopTimer.start();
inGaloisForEach = true;
typedef typename RangeTy::value_type T;
typedef ForEachWork<WLTy, T, FunctionTy> WorkTy;
// NB: Initialize barrier before creating WorkTy to increase
// PerThreadStorage reclaimation likelihood
Barrier& barrier = getSystemBarrier();
WorkTy W(f, loopname);
RunCommand w[5] = {
std::bind(&WorkTy::initThread, std::ref(W)),
std::bind(&WorkTy::template AddInitialWork<RangeTy>, std::ref(W), range),
std::ref(barrier),
std::ref(W),
std::ref(barrier)
};
getSystemThreadPool().run(&w[0], &w[5], activeThreads);
if (ForEachTraits<FunctionTy>::NeedsStats)
LoopTimer.stop();
inGaloisForEach = false;
}
template<typename FunctionTy>
struct WOnEach {
FunctionTy& origFunction;
WOnEach(FunctionTy& f): origFunction(f) { }
void operator()(void) {
FunctionTy fn(origFunction);
fn(LL::getTID(), activeThreads);
}
};
template<typename FunctionTy>
void on_each_impl(FunctionTy fn, const char* loopname = 0) {
if (inGaloisForEach)
GALOIS_DIE("Nested for_each not supported");
inGaloisForEach = true;
RunCommand w[2] = {WOnEach<FunctionTy>(fn),
std::ref(getSystemBarrier())};
getSystemThreadPool().run(&w[0], &w[2], activeThreads);
inGaloisForEach = false;
}
//! on each executor with simple barrier.
template<typename FunctionTy>
void on_each_simple_impl(FunctionTy fn, const char* loopname = 0) {
if (inGaloisForEach)
GALOIS_DIE("Nested for_each not supported");
inGaloisForEach = true;
Barrier* b = createSimpleBarrier();
b->reinit(activeThreads);
RunCommand w[2] = {WOnEach<FunctionTy>(fn),
std::ref(*b)};
getSystemThreadPool().run(&w[0], &w[2], activeThreads);
delete b;
inGaloisForEach = false;
}
} // end namespace anonymous
void preAlloc_impl(int num);
} // end namespace Runtime
} // end namespace Galois
#endif
| 0 |
rapidsai_public_repos/code-share/maxflow/galois/include/Galois
|
rapidsai_public_repos/code-share/maxflow/galois/include/Galois/Runtime/Context.h
|
/** simple galois context and contention manager -*- C++ -*-
* @file
* @section License
*
* Galois, a framework to exploit amorphous data-parallelism in irregular
* programs.
*
* Copyright (C) 2012, The University of Texas at Austin. All rights reserved.
* UNIVERSITY EXPRESSLY DISCLAIMS ANY AND ALL WARRANTIES CONCERNING THIS
* SOFTWARE AND DOCUMENTATION, INCLUDING ANY WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR ANY PARTICULAR PURPOSE, NON-INFRINGEMENT AND WARRANTIES OF
* PERFORMANCE, AND ANY WARRANTY THAT MIGHT OTHERWISE ARISE FROM COURSE OF
* DEALING OR USAGE OF TRADE. NO WARRANTY IS EITHER EXPRESS OR IMPLIED WITH
* RESPECT TO THE USE OF THE SOFTWARE OR DOCUMENTATION. Under no circumstances
* shall University be liable for incidental, special, indirect, direct or
* consequential damages or loss of profits, interruption of business, or
* related expenses which may arise from use of Software or Documentation,
* including but not limited to those resulting from defects in Software and/or
* Documentation, or loss or inaccuracy of data of any kind.
*
* @section Description
*
* @author Andrew Lenharth <[email protected]>
*/
#ifndef GALOIS_RUNTIME_CONTEXT_H
#define GALOIS_RUNTIME_CONTEXT_H
#include "Galois/config.h"
#include "Galois/MethodFlags.h"
#include "Galois/Runtime/ll/PtrLock.h"
#include "Galois/Runtime/ll/gio.h"
#include <boost/utility.hpp>
#include <cassert>
#include <cstdlib>
#ifdef GALOIS_USE_LONGJMP
#include <setjmp.h>
#endif
namespace Galois {
namespace Runtime {
enum ConflictFlag {
CONFLICT = -1,
NO_CONFLICT = 0,
REACHED_FAILSAFE = 1,
BREAK = 2
};
enum PendingFlag {
NON_DET,
PENDING,
COMMITTING
};
//! Used by deterministic and ordered executor
void setPending(PendingFlag value);
PendingFlag getPending ();
//! used to release lock over exception path
static inline void clearConflictLock() { }
#ifdef GALOIS_USE_LONGJMP
extern __thread jmp_buf hackjmp;
/**
* Stack-allocated classes that require their deconstructors to be called,
* after an abort for instance, should inherit from this class.
*/
class Releasable {
Releasable* next;
public:
Releasable();
virtual ~Releasable() { }
virtual void release() = 0;
void releaseAll();
};
void clearReleasable();
#else
class Releasable {
public:
virtual ~Releasable() { }
virtual void release() = 0;
};
static inline void clearReleasable() { }
#endif
class LockManagerBase;
#if defined(GALOIS_USE_SEQ_ONLY)
class Lockable { };
class LockManagerBase: private boost::noncopyable {
protected:
enum AcquireStatus {
FAIL, NEW_OWNER, ALREADY_OWNER
};
AcquireStatus tryAcquire(Lockable* lockable) { return FAIL; }
bool stealByCAS(Lockable* lockable, LockManagerBase* other) { return false; }
void ownByForce(Lockable* lockable) { }
void release (Lockable* lockable) {}
static bool tryLock(Lockable* lockable) { return false; }
static LockManagerBase* getOwner(Lockable* lockable) { return 0; }
};
class SimpleRuntimeContext: public LockManagerBase {
protected:
void acquire(Lockable* lockable) { }
void release (Lockable* lockable) {}
virtual void subAcquire(Lockable* lockable);
void addToNhood(Lockable* lockable) { }
static SimpleRuntimeContext* getOwner(Lockable* lockable) { return 0; }
public:
SimpleRuntimeContext(bool child = false): LockManagerBase () { }
virtual ~SimpleRuntimeContext() { }
void startIteration() { }
unsigned cancelIteration() { return 0; }
unsigned commitIteration() { return 0; }
};
#else
/**
* All objects that may be locked (nodes primarily) must inherit from
* Lockable.
*/
class Lockable {
LL::PtrLock<LockManagerBase, true> owner;
//! Use an intrusive list to track neighborhood of a context without allocation overhead.
//! Works for cases where a Lockable needs to be only in one context's neighborhood list
Lockable* next;
friend class LockManagerBase;
friend class SimpleRuntimeContext;
public:
Lockable() :next(0) {}
};
class LockManagerBase: private boost::noncopyable {
protected:
enum AcquireStatus {
FAIL, NEW_OWNER, ALREADY_OWNER
};
AcquireStatus tryAcquire(Lockable* lockable);
inline bool stealByCAS(Lockable* lockable, LockManagerBase* other) {
assert(lockable != nullptr);
return lockable->owner.stealing_CAS(other, this);
}
inline void ownByForce(Lockable* lockable) {
assert(lockable != nullptr);
assert(!lockable->owner.getValue());
lockable->owner.setValue(this);
}
inline void release(Lockable* lockable) {
assert(lockable != nullptr);
assert(getOwner(lockable) == this);
lockable->owner.unlock_and_clear();
}
inline static bool tryLock(Lockable* lockable) {
assert(lockable != nullptr);
return lockable->owner.try_lock();
}
inline static LockManagerBase* getOwner(Lockable* lockable) {
assert(lockable != nullptr);
return lockable->owner.getValue();
}
};
class SimpleRuntimeContext: public LockManagerBase {
//! The locks we hold
Lockable* locks;
bool customAcquire;
protected:
friend void doAcquire(Lockable*);
static SimpleRuntimeContext* getOwner(Lockable* lockable) {
LockManagerBase* owner = LockManagerBase::getOwner (lockable);
return static_cast<SimpleRuntimeContext*>(owner);
}
virtual void subAcquire(Lockable* lockable);
void addToNhood(Lockable* lockable) {
assert(!lockable->next);
lockable->next = locks;
locks = lockable;
}
void acquire(Lockable* lockable);
void release(Lockable* lockable);
public:
SimpleRuntimeContext(bool child = false): locks(0), customAcquire(child) { }
virtual ~SimpleRuntimeContext() { }
void startIteration() {
assert(!locks);
}
unsigned cancelIteration();
unsigned commitIteration();
};
#endif
//! get the current conflict detection class, may be null if not in parallel region
SimpleRuntimeContext* getThreadContext();
//! used by the parallel code to set up conflict detection per thread
void setThreadContext(SimpleRuntimeContext* n);
//! Helper function to decide if the conflict detection lock should be taken
inline bool shouldLock(const Galois::MethodFlag g) {
#ifdef GALOIS_USE_SEQ_ONLY
return false;
#else
// Mask out additional "optional" flags
switch (g & ALL) {
case NONE:
case SAVE_UNDO:
return false;
case ALL:
case CHECK_CONFLICT:
return true;
default:
// XXX(ddn): Adding error checking code here either upsets the inlining
// heuristics or icache behavior. Avoid complex code if possible.
//GALOIS_DIE("shouldn't get here");
assert(false);
}
return false;
#endif
}
//! actual locking function. Will always lock.
inline void doAcquire(Lockable* lockable) {
SimpleRuntimeContext* ctx = getThreadContext();
if (ctx)
ctx->acquire(lockable);
}
//! Master function which handles conflict detection
//! used to acquire a lockable thing
inline void acquire(Lockable* lockable, Galois::MethodFlag m) {
if (shouldLock(m)) {
doAcquire(lockable);
}
}
struct AlwaysLockObj {
void operator()(Lockable* lockable) const {
doAcquire(lockable);
}
};
struct CheckedLockObj {
Galois::MethodFlag m;
CheckedLockObj(Galois::MethodFlag _m) :m(_m) {}
void operator()(Lockable* lockable) const {
acquire(lockable, m);
}
};
void signalConflict(Lockable*);
void forceAbort();
}
} // end namespace Galois
#endif
| 0 |
rapidsai_public_repos/code-share/maxflow/galois/include/Galois
|
rapidsai_public_repos/code-share/maxflow/galois/include/Galois/Runtime/Range.h
|
/** Ranges -*- C++ -*-
* @file
* @section License
*
* Galois, a framework to exploit amorphous data-parallelism in irregular
* programs.
*
* Copyright (C) 2012, The University of Texas at Austin. All rights reserved.
* UNIVERSITY EXPRESSLY DISCLAIMS ANY AND ALL WARRANTIES CONCERNING THIS
* SOFTWARE AND DOCUMENTATION, INCLUDING ANY WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR ANY PARTICULAR PURPOSE, NON-INFRINGEMENT AND WARRANTIES OF
* PERFORMANCE, AND ANY WARRANTY THAT MIGHT OTHERWISE ARISE FROM COURSE OF
* DEALING OR USAGE OF TRADE. NO WARRANTY IS EITHER EXPRESS OR IMPLIED WITH
* RESPECT TO THE USE OF THE SOFTWARE OR DOCUMENTATION. Under no circumstances
* shall University be liable for incidental, special, indirect, direct or
* consequential damages or loss of profits, interruption of business, or
* related expenses which may arise from use of Software or Documentation,
* including but not limited to those resulting from defects in Software and/or
* Documentation, or loss or inaccuracy of data of any kind.
*
* @section Description
*
* @author Donald Nguyen <[email protected]>
*/
#ifndef GALOIS_RUNTIME_RANGE_H
#define GALOIS_RUNTIME_RANGE_H
#include "Galois/gstl.h"
#include "Galois/Runtime/ActiveThreads.h"
#include "Galois/Runtime/ll/TID.h"
#include <iterator>
namespace Galois {
namespace Runtime {
// TODO(ddn): update to have better forward iterator behavor for blocked/local iteration
template<typename T>
class LocalRange {
T* container;
public:
typedef typename T::iterator iterator;
typedef typename T::local_iterator local_iterator;
typedef iterator block_iterator;
typedef typename std::iterator_traits<iterator>::value_type value_type;
LocalRange(T& c): container(&c) { }
iterator begin() const { return container->begin(); }
iterator end() const { return container->end(); }
std::pair<block_iterator, block_iterator> block_pair() const {
return Galois::block_range(begin(), end(), LL::getTID(), activeThreads);
}
std::pair<local_iterator, local_iterator> local_pair() const {
return std::make_pair(container->local_begin(), container->local_end());
}
local_iterator local_begin() const { return container->local_begin(); }
local_iterator local_end() const { return container->local_end(); }
block_iterator block_begin() const { return block_pair().first; }
block_iterator block_end() const { return block_pair().second; }
};
template<typename T>
inline LocalRange<T> makeLocalRange(T& obj) { return LocalRange<T>(obj); }
template<typename IterTy>
class StandardRange {
IterTy ii, ei;
public:
typedef IterTy iterator;
typedef iterator local_iterator;
typedef iterator block_iterator;
typedef typename std::iterator_traits<IterTy>::value_type value_type;
StandardRange(IterTy b, IterTy e): ii(b), ei(e) { }
iterator begin() const { return ii; }
iterator end() const { return ei; }
std::pair<block_iterator, block_iterator> block_pair() const {
return Galois::block_range(ii, ei, LL::getTID(), activeThreads);
}
std::pair<local_iterator, local_iterator> local_pair() const {
return block_pair();
}
local_iterator local_begin() const { return block_begin(); }
local_iterator local_end() const { return block_end(); }
block_iterator block_begin() const { return block_pair().first; }
block_iterator block_end() const { return block_pair().second; }
};
template<typename IterTy>
inline StandardRange<IterTy> makeStandardRange(IterTy begin, IterTy end) {
return StandardRange<IterTy>(begin, end);
}
}
} // end namespace Galois
#endif
| 0 |
rapidsai_public_repos/code-share/maxflow/galois/include/Galois
|
rapidsai_public_repos/code-share/maxflow/galois/include/Galois/Runtime/UserContextAccess.h
|
/** Manipulate the user context -*- C++ -*-
* @file
* @section License
*
* Galois, a framework to exploit amorphous data-parallelism in irregular
* programs.
*
* Copyright (C) 2011, The University of Texas at Austin. All rights reserved.
* UNIVERSITY EXPRESSLY DISCLAIMS ANY AND ALL WARRANTIES CONCERNING THIS
* SOFTWARE AND DOCUMENTATION, INCLUDING ANY WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR ANY PARTICULAR PURPOSE, NON-INFRINGEMENT AND WARRANTIES OF
* PERFORMANCE, AND ANY WARRANTY THAT MIGHT OTHERWISE ARISE FROM COURSE OF
* DEALING OR USAGE OF TRADE. NO WARRANTY IS EITHER EXPRESS OR IMPLIED WITH
* RESPECT TO THE USE OF THE SOFTWARE OR DOCUMENTATION. Under no circumstances
* shall University be liable for incidental, special, indirect, direct or
* consequential damages or loss of profits, interruption of business, or
* related expenses which may arise from use of Software or Documentation,
* including but not limited to those resulting from defects in Software and/or
* Documentation, or loss or inaccuracy of data of any kind.
*
* @author Andrew Lenharth <[email protected]>
*/
#ifndef GALOIS_RUNTIME_USERCONTEXTACCESS_H
#define GALOIS_RUNTIME_USERCONTEXTACCESS_H
#include "Galois/UserContext.h"
namespace Galois {
namespace Runtime {
//! Backdoor to allow runtime methods to access private data in UserContext
template<typename T>
class UserContextAccess : public Galois::UserContext<T> {
public:
typedef Galois::UserContext<T> SuperTy;
typedef typename SuperTy::PushBufferTy PushBufferTy;
typedef typename SuperTy::FastPushBack FastPushBack;
void resetAlloc() { SuperTy::__resetAlloc(); }
PushBufferTy& getPushBuffer() { return SuperTy::__getPushBuffer(); }
void resetPushBuffer() { SuperTy::__resetPushBuffer(); }
SuperTy& data() { return *static_cast<SuperTy*>(this); }
void setLocalState(void *p, bool used) { SuperTy::__setLocalState(p, used); }
void setFastPushBack(FastPushBack f) { SuperTy::__setFastPushBack(f); }
void setBreakFlag(bool *b) { SuperTy::didBreak = b; }
// TODO: move to a separate class dedicated for speculative executors
#ifdef GALOIS_USE_EXP
void rollback () { SuperTy::__rollback (); }
void commit () { SuperTy::__commit (); }
void reset () {
SuperTy::__resetPushBuffer ();
SuperTy::__resetUndoLog ();
SuperTy::__resetCommitLog ();
SuperTy::__resetAlloc ();
}
#endif
};
}
} // end namespace Galois
#endif
| 0 |
rapidsai_public_repos/code-share/maxflow/galois/include/Galois
|
rapidsai_public_repos/code-share/maxflow/galois/include/Galois/Runtime/Termination.h
|
/** Dikstra style termination detection -*- C++ -*-
* @file
* @section License
*
* Galois, a framework to exploit amorphous data-parallelism in
* irregular programs.
* Copyright (C) 2011, The University of Texas at Austin. All rights
* reserved. UNIVERSITY EXPRESSLY DISCLAIMS ANY AND ALL WARRANTIES
* CONCERNING THIS SOFTWARE AND DOCUMENTATION, INCLUDING ANY
* WARRANTIES OF MERCHANTABILITY, FITNESS FOR ANY PARTICULAR PURPOSE,
* NON-INFRINGEMENT AND WARRANTIES OF PERFORMANCE, AND ANY WARRANTY
* THAT MIGHT OTHERWISE ARISE FROM COURSE OF DEALING OR USAGE OF
* TRADE. NO WARRANTY IS EITHER EXPRESS OR IMPLIED WITH RESPECT TO
* THE USE OF THE SOFTWARE OR DOCUMENTATION. Under no circumstances
* shall University be liable for incidental, special, indirect,
* direct or consequential damages or loss of profits, interruption of
* business, or related expenses which may arise from use of Software
* or Documentation, including but not limited to those resulting from
* defects in Software and/or Documentation, or loss or inaccuracy of
* data of any kind.
*
* @section Description
*
* Implementation of Termination Detection
*
* @author Andrew Lenharth <[email protected]>
*/
#ifndef GALOIS_RUNTIME_TERMINATION_H
#define GALOIS_RUNTIME_TERMINATION_H
#include "Galois/Runtime/PerThreadStorage.h"
#include "Galois/Runtime/ll/CacheLineStorage.h"
namespace Galois {
namespace Runtime {
class TerminationDetection {
protected:
LL::CacheLineStorage<volatile bool> globalTerm;
public:
/**
* Initializes the per-thread state. All threads must call this
* before any call localTermination.
*/
virtual void initializeThread() = 0;
/**
* Process termination locally. May be called as often as needed. The
* argument workHappened signals that since last time it was called, some
* progress was made that should prevent termination. All threads must call
* initializeThread() before any thread calls this function. This function
* should not be on the fast path (this is why it takes a flag, to allow the
* caller to buffer up work status changes).
*/
virtual void localTermination(bool workHappened) = 0;
/**
* Returns whether global termination is detected.
*/
bool globalTermination() const {
return globalTerm.data;
}
};
//returns an object. The object will be reused.
TerminationDetection& getSystemTermination();
} // end namespace Runtime
} // end namespace Galois
#endif
| 0 |
rapidsai_public_repos/code-share/maxflow/galois/include/Galois
|
rapidsai_public_repos/code-share/maxflow/galois/include/Galois/Runtime/DeterministicWork.h
|
/** Deterministic execution -*- C++ -*-
* @file
* @section License
*
* Galois, a framework to exploit amorphous data-parallelism in irregular
* programs.
*
* Copyright (C) 2013, The University of Texas at Austin. All rights reserved.
* UNIVERSITY EXPRESSLY DISCLAIMS ANY AND ALL WARRANTIES CONCERNING THIS
* SOFTWARE AND DOCUMENTATION, INCLUDING ANY WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR ANY PARTICULAR PURPOSE, NON-INFRINGEMENT AND WARRANTIES OF
* PERFORMANCE, AND ANY WARRANTY THAT MIGHT OTHERWISE ARISE FROM COURSE OF
* DEALING OR USAGE OF TRADE. NO WARRANTY IS EITHER EXPRESS OR IMPLIED WITH
* RESPECT TO THE USE OF THE SOFTWARE OR DOCUMENTATION. Under no circumstances
* shall University be liable for incidental, special, indirect, direct or
* consequential damages or loss of profits, interruption of business, or
* related expenses which may arise from use of Software or Documentation,
* including but not limited to those resulting from defects in Software and/or
* Documentation, or loss or inaccuracy of data of any kind.
*
* @section Description
*
* @author Donald Nguyen <[email protected]>
*/
#ifndef GALOIS_RUNTIME_DETERMINISTICWORK_H
#define GALOIS_RUNTIME_DETERMINISTICWORK_H
#include "Galois/config.h"
#include "Galois/Threads.h"
#include "Galois/ParallelSTL/ParallelSTL.h"
#include "Galois/TwoLevelIterator.h"
#include "Galois/Runtime/ll/gio.h"
#include <boost/iterator/iterator_facade.hpp>
#include <boost/iterator/transform_iterator.hpp>
#include <boost/iterator/counting_iterator.hpp>
#include GALOIS_CXX11_STD_HEADER(type_traits)
#include <deque>
#include <queue>
namespace Galois {
namespace Runtime {
//! Implementation of deterministic execution
namespace DeterministicImpl {
struct OrderedTag { };
struct UnorderedTag { };
template<typename T>
struct DItem {
T val;
unsigned long id;
void *localState;
DItem(const T& _val, unsigned long _id): val(_val), id(_id), localState(NULL) { }
};
template<typename T, typename CompareTy>
struct DeterministicContext: public SimpleRuntimeContext {
typedef SimpleRuntimeContext Base;
typedef DItem<T> Item;
Item item;
const CompareTy* comp;
bool not_ready;
DeterministicContext(const Item& _item, const CompareTy& _comp):
Base(true),
item(_item),
comp(&_comp),
not_ready(false)
{ }
bool notReady() const {
return not_ready;
}
virtual void subAcquire(Lockable* lockable) {
if (getPending() == COMMITTING)
return;
if (Base::tryLock (lockable)) {
this->addToNhood(lockable);
}
DeterministicContext* other;
do {
other = static_cast<DeterministicContext*>(Base::getOwner(lockable));
if (other == this)
return;
if (other) {
bool conflict = (*comp)(*other, *this); // *other < *this
if (conflict) {
// A lock that I want but can't get
not_ready = true;
return;
}
}
} while (!this->stealByCAS(lockable, other));
// Disable loser
if (other) {
other->not_ready = true; // Only need atomic write
}
return;
}
};
namespace {
template<typename T, typename CompTy>
struct OrderedContextComp {
typedef DeterministicContext<T, OrderedContextComp> DetContext;
const CompTy& comp;
explicit OrderedContextComp(const CompTy& c): comp(c) {}
inline bool operator()(const DetContext& left, const DetContext& right) const {
return comp(left.item.val, right.item.val);
}
};
template<typename T>
struct UnorderedContextComp {
typedef DeterministicContext<T, UnorderedContextComp> DetContext;
inline bool operator()(const DetContext& left, const DetContext& right) const {
return left.item.id < right.item.id;
}
};
template<typename Function1Ty,typename Function2Ty>
struct Options {
static const bool needsStats = ForEachTraits<Function1Ty>::NeedsStats || ForEachTraits<Function2Ty>::NeedsStats;
static const bool needsPush = ForEachTraits<Function1Ty>::NeedsPush || ForEachTraits<Function2Ty>::NeedsPush;
static const bool needsBreak = ForEachTraits<Function1Ty>::NeedsBreak || ForEachTraits<Function2Ty>::NeedsBreak;
static const bool hasFixedWindow = false;
};
template<typename _T,typename _Function1Ty,typename _Function2Ty,typename _CompareTy>
struct OrderedOptions: public Options<_Function1Ty,_Function2Ty> {
typedef _Function1Ty Function1Ty;
typedef _Function2Ty Function2Ty;
typedef _T T;
typedef _CompareTy CompareTy;
typedef OrderedContextComp<T, CompareTy> ContextComp;
typedef DeterministicContext<T, ContextComp> DetContext;
static const bool useOrdered = true;
typedef OrderedTag Tag;
Function1Ty fn1;
Function2Ty fn2;
CompareTy comp;
ContextComp contextComp;
OrderedOptions(const Function1Ty& fn1, const Function2Ty& fn2, const CompareTy& comp):
fn1(fn1), fn2(fn2), comp(comp), contextComp(comp) { }
template<typename WL>
DetContext* emplaceContext(WL& wl, const DItem<T>& item) const {
return wl.emplace(item, contextComp);
}
};
template<typename _T,typename _Function1Ty,typename _Function2Ty>
struct UnorderedOptions: public Options<_Function1Ty,_Function2Ty> {
typedef _Function1Ty Function1Ty;
typedef _Function2Ty Function2Ty;
typedef _T T;
typedef UnorderedContextComp<T> ContextComp;
typedef DeterministicContext<T, ContextComp> DetContext;
static const bool useOrdered = false;
typedef UnorderedTag Tag;
struct DummyCompareTy {
bool operator()(const T&, const T&) const {
return false;
}
};
typedef DummyCompareTy CompareTy;
Function1Ty fn1;
Function2Ty fn2;
CompareTy comp;
ContextComp contextComp;
UnorderedOptions(const Function1Ty& fn1, const Function2Ty& fn2): fn1(fn1), fn2(fn2) { }
template<typename WL>
DetContext* emplaceContext(WL& wl, const DItem<T>& item) const {
return wl.emplace(item, contextComp);
}
};
template<typename T,typename FunctionTy,typename Enable=void>
struct StateManager {
void alloc(UserContextAccess<T>&, FunctionTy& self) { }
void dealloc(UserContextAccess<T>&) { }
void save(UserContextAccess<T>&, void*&) { }
void restore(UserContextAccess<T>&, void*) { }
};
template<typename T,typename FunctionTy>
struct StateManager<T,FunctionTy,typename std::enable_if<has_deterministic_local_state<FunctionTy>::value>::type> {
typedef typename FunctionTy::GaloisDeterministicLocalState LocalState;
void alloc(UserContextAccess<T>& c,FunctionTy& self) {
void *p = c.data().getPerIterAlloc().allocate(sizeof(LocalState));
new (p) LocalState(self, c.data().getPerIterAlloc());
c.setLocalState(p, false);
}
void dealloc(UserContextAccess<T>& c) {
bool dummy;
LocalState *p = reinterpret_cast<LocalState*>(c.data().getLocalState(dummy));
p->~LocalState();
}
void save(UserContextAccess<T>& c, void*& localState) {
bool dummy;
localState = c.data().getLocalState(dummy);
}
void restore(UserContextAccess<T>& c, void* localState) {
c.setLocalState(localState, true);
}
};
template<typename FunctionTy,typename Enable=void>
struct BreakManager {
BreakManager() { }
bool checkBreak(FunctionTy&) { return false; }
};
template<typename FunctionTy>
class BreakManager<FunctionTy,typename std::enable_if<has_deterministic_parallel_break<FunctionTy>::value>::type> {
Barrier& barrier;
LL::CacheLineStorage<volatile long> done;
public:
BreakManager() : barrier(getSystemBarrier()) { }
bool checkBreak(FunctionTy& fn) {
if (LL::getTID() == 0)
done.data = fn.galoisDeterministicParallelBreak();
barrier.wait();
return done.data;
}
};
template<typename T>
struct DNewItem {
T val;
unsigned long parent;
unsigned count;
DNewItem(const T& _val, unsigned long _parent, unsigned _count): val(_val), parent(_parent), count(_count) { }
bool operator<(const DNewItem<T>& o) const {
if (parent < o.parent)
return true;
else if (parent == o.parent)
return count < o.count;
else
return false;
}
bool operator==(const DNewItem<T>& o) const {
return parent == o.parent && count == o.count;
}
bool operator!=(const DNewItem<T>& o) const {
return !(*this == o);
}
struct GetFirst: public std::unary_function<DNewItem<T>,const T&> {
const T& operator()(const DNewItem<T>& x) const {
return x.val;
}
};
};
template<typename InputIteratorTy>
void safe_advance(InputIteratorTy& it, size_t d, size_t& cur, size_t dist) {
if (d + cur >= dist) {
d = dist - cur;
}
std::advance(it, d);
cur += d;
}
//! Wrapper around WorkList::ChunkedFIFO to allow peek() and empty() and still have FIFO order
template<int chunksize,typename T>
struct FIFO {
WorkList::ChunkedFIFO<chunksize,T,false> m_data;
WorkList::ChunkedLIFO<16,T,false> m_buffer;
size_t m_size;
FIFO(): m_size(0) { }
~FIFO() {
Galois::optional<T> p;
while ((p = m_buffer.pop()))
;
while ((p = m_data.pop()))
;
}
Galois::optional<T> pop() {
Galois::optional<T> p;
if ((p = m_buffer.pop()) || (p = m_data.pop())) {
--m_size;
}
return p;
}
Galois::optional<T> peek() {
Galois::optional<T> p;
if ((p = m_buffer.pop())) {
m_buffer.push(*p);
} else if ((p = m_data.pop())) {
m_buffer.push(*p);
}
return p;
}
void push(const T& val) {
m_data.push(val);
++m_size;
}
size_t size() const {
return m_size;
}
bool empty() const {
return m_size == 0;
}
};
template<typename> class DMergeManagerBase;
template<typename,typename> class DMergeManager;
//! Thread-local data for merging and implementations specialized for
//! ordered and unordered implementations.
template<typename OptionsTy>
class DMergeLocal: private boost::noncopyable {
template<typename> friend class DMergeManagerBase;
template<typename,typename> friend class DMergeManager;
typedef typename OptionsTy::T T;
typedef typename OptionsTy::CompareTy CompareTy;
struct HeapCompare {
const CompareTy& comp;
explicit HeapCompare(const CompareTy& c): comp(c) { }
bool operator()(const T& a, const T& b) const {
// reverse sense to get least items out of std::priority_queue
return comp(b, a);
}
};
typedef DItem<T> Item;
typedef DNewItem<T> NewItem;
typedef std::vector<NewItem,typename PerIterAllocTy::rebind<NewItem>::other> NewItemsTy;
typedef FIFO<1024,Item> ReserveTy;
typedef std::vector<T,typename PerIterAllocTy::rebind<T>::other> PQ;
IterAllocBaseTy heap;
PerIterAllocTy alloc;
size_t window;
size_t delta;
size_t committed;
size_t iterations;
size_t aborted;
public:
NewItemsTy newItems;
private:
ReserveTy reserve;
// For ordered execution
PQ newReserve;
Galois::optional<T> mostElement;
Galois::optional<T> windowElement;
// For id based execution
size_t minId;
size_t maxId;
// For general execution
size_t size;
public:
DMergeLocal(): alloc(&heap), newItems(alloc), newReserve(alloc) {
resetStats();
}
private:
//! Update min and max from sorted iterator
template<typename BiIteratorTy>
void initialLimits(BiIteratorTy ii, BiIteratorTy ei) {
minId = std::numeric_limits<size_t>::max();
maxId = std::numeric_limits<size_t>::min();
mostElement = windowElement = Galois::optional<T>();
if (ii != ei) {
if (ii + 1 == ei) {
minId = maxId = ii->parent;
mostElement = Galois::optional<T>(ii->val);
} else {
minId = ii->parent;
maxId = (ei-1)->parent;
mostElement = Galois::optional<T>(ei[-1].val);
}
}
}
template<typename WL>
void nextWindowDispatch(WL* wl, const OptionsTy& options, UnorderedTag) {
window += delta;
Galois::optional<Item> p;
while ((p = reserve.peek())) {
if (p->id >= window)
break;
wl->push(*p);
reserve.pop();
}
}
template<typename WL>
void nextWindowDispatch(WL* wl, const OptionsTy& options, OrderedTag) {
orderedUpdateDispatch<false>(wl, options.comp, 0);
}
template<typename WL>
void updateWindowElement(WL* wl, const CompareTy& comp, size_t count) {
orderedUpdateDispatch<true>(wl, comp, count);
}
//! Common functionality for (1) getting the next N-1 elements and setting windowElement
//! to the nth element and (2) getting the next elements < windowElement.
template<bool updateWE,typename WL>
void orderedUpdateDispatch(WL* wl, const CompareTy& comp, size_t count) {
// count = 0 is a special signal to not do anything
if (updateWE && count == 0)
return;
if (updateWE) {
size_t available = reserve.size() + newReserve.size();
// No more reserve but what should we propose for windowElement? As with
// distributeNewWork, this is a little tricky. Proposing nothing does not
// work because our proposal must be at least as large as any element we
// add to wl, and for progress, the element must be larger than at least
// one element in the reserve. Here, we use the simplest solution which
// is mostElement.
// TODO improve this
if (available < count) {
windowElement = mostElement;
return;
}
count = std::min(count, available);
}
size_t c = 0;
while (true) {
Galois::optional<Item> p1 = reserve.peek();
Galois::optional<T> p2 = peekNewReserve();
bool fromReserve;
if (p1 && p2)
fromReserve = comp(p1->val, *p2);
else if (!p1 && !p2)
break;
else
fromReserve = p1;
T* val = (fromReserve) ? &p1->val : &*p2;
// When there is no mostElement or windowElement, the reserve should be
// empty as well.
assert(mostElement && windowElement);
if (!comp(*val, *mostElement))
break;
if (!updateWE && !comp(*val, *windowElement))
break;
if (updateWE && ++c >= count) {
windowElement = Galois::optional<T>(*val);
break;
}
wl->push(Item(*val, 0));
if (fromReserve)
reserve.pop();
else
popNewReserve(comp);
}
}
template<typename InputIteratorTy,typename WL,typename NewTy>
void copyInDispatch(InputIteratorTy ii, InputIteratorTy ei, size_t dist, WL* wl, NewTy&, unsigned numActive, const CompareTy& comp, UnorderedTag) {
unsigned int tid = LL::getTID();
size_t cur = 0;
size_t k = 0;
safe_advance(ii, tid, cur, dist);
while (ii != ei) {
unsigned long id = k * numActive + tid;
if (id < window)
wl->push(Item(*ii, id));
else
break;
++k;
safe_advance(ii, numActive, cur, dist);
}
while (ii != ei) {
unsigned long id = k * numActive + tid;
reserve.push(Item(*ii, id));
++k;
safe_advance(ii, numActive, cur, dist);
}
}
template<typename InputIteratorTy,typename WL,typename NewTy>
void copyInDispatch(InputIteratorTy ii, InputIteratorTy ei, size_t dist, WL* wl, NewTy& new_, unsigned numActive, const CompareTy& comp, OrderedTag) {
assert(emptyReserve());
unsigned int tid = LL::getTID();
size_t cur = 0;
safe_advance(ii, tid, cur, dist);
while (ii != ei) {
if (windowElement && !comp(*ii, *windowElement))
break;
wl->push(Item(*ii, 0));
safe_advance(ii, numActive, cur, dist);
}
while (ii != ei) {
if (mostElement && !comp(*ii, *mostElement))
break;
reserve.push(Item(*ii, 0));
safe_advance(ii, numActive, cur, dist);
}
while (ii != ei) {
new_.push(NewItem(*ii, 0, 1));
safe_advance(ii, numActive, cur, dist);
}
}
void initialWindow(size_t dist, size_t atleast, size_t base = 0) {
size_t w = std::max(dist / 100, atleast) + base;
// XXX(ddn): use !LL::EnvCheck("GALOIS_FIXED_DET_WINDOW_SIZE", defaultDelta)
if (OptionsTy::hasFixedWindow)
w = 100;
window = delta = w;
}
void receiveLimits(const DMergeLocal<OptionsTy>& other) {
minId = other.minId;
maxId = other.maxId;
mostElement = other.mostElement;
windowElement = other.windowElement;
size = other.size;
}
void reduceLimits(const DMergeLocal<OptionsTy>& other, const CompareTy& comp) {
minId = std::min(other.minId, minId);
maxId = std::max(other.maxId, maxId);
size += other.size;
if (!mostElement) {
mostElement = other.mostElement;
} else if (other.mostElement && comp(*mostElement, *other.mostElement)) {
mostElement = other.mostElement;
}
if (!windowElement) {
windowElement = other.windowElement;
} else if (other.windowElement && comp(*windowElement, *other.windowElement)) {
windowElement = other.windowElement;
}
}
void popNewReserve(const CompareTy& comp) {
std::pop_heap(newReserve.begin(), newReserve.end(), HeapCompare(comp));
newReserve.pop_back();
}
void pushNewReserve(const T& val, const CompareTy& comp) {
newReserve.push_back(val);
std::push_heap(newReserve.begin(), newReserve.end(), HeapCompare(comp));
}
Galois::optional<T> peekNewReserve() {
if (newReserve.empty())
return Galois::optional<T>();
else
return Galois::optional<T>(newReserve.front());
}
template<typename InputIteratorTy,typename WL,typename NewTy>
void copyIn(InputIteratorTy b, InputIteratorTy e, size_t dist, WL* wl, NewTy& new_, unsigned numActive, const CompareTy& comp) {
copyInDispatch(b, e, dist, wl, new_, numActive, comp, typename OptionsTy::Tag());
}
public:
void clear() { heap.clear(); }
void incrementIterations() { ++iterations; }
void incrementCommitted() { ++committed; }
void assertLimits(const T& val, const CompareTy& comp) {
assert(!windowElement || comp(val, *windowElement));
assert(!mostElement || comp(val, *mostElement));
}
template<typename WL>
void nextWindow(WL* wl, const OptionsTy& options) {
nextWindowDispatch(wl, options, typename OptionsTy::Tag());
}
void resetStats() { committed = iterations = aborted = 0; }
bool emptyReserve() { return reserve.empty() && newReserve.empty(); }
};
template<typename OptionsTy,typename Enable=void>
struct MergeTraits {
static const bool value = false;
static const int ChunkSize = 32;
static const int MinDelta = ChunkSize * 40;
};
template<typename OptionsTy>
struct MergeTraits<OptionsTy,typename std::enable_if<OptionsTy::useOrdered>::type> {
static const bool value = true;
static const int ChunkSize = 16;
static const int MinDelta = 4;
template<typename Arg>
static uintptr_t id(const typename OptionsTy::Function1Ty& fn, Arg arg) {
return 0;
}
};
template<typename OptionsTy>
struct MergeTraits<OptionsTy,typename std::enable_if<has_deterministic_id<typename OptionsTy::Function1Ty>::value && !OptionsTy::useOrdered>::type> {
static const bool value = true;
static const int ChunkSize = 32;
static const int MinDelta = ChunkSize * 40;
template<typename Arg>
static uintptr_t id(const typename OptionsTy::Function1Ty& fn, Arg arg) {
return fn.galoisDeterministicId(std::forward<Arg>(arg));
}
};
template<typename OptionsTy>
class DMergeManagerBase {
protected:
typedef typename OptionsTy::T T;
typedef typename OptionsTy::CompareTy CompareTy;
typedef DItem<T> Item;
typedef DNewItem<T> NewItem;
typedef WorkList::dChunkedFIFO<MergeTraits<OptionsTy>::ChunkSize,NewItem> NewWork;
typedef DMergeLocal<OptionsTy> MergeLocal;
typedef typename MergeLocal::NewItemsTy NewItemsTy;
typedef typename NewItemsTy::iterator NewItemsIterator;
IterAllocBaseTy heap;
PerIterAllocTy alloc;
PerThreadStorage<MergeLocal> data;
NewWork new_;
unsigned numActive;
void broadcastLimits(MergeLocal& mlocal, unsigned int tid) {
for (unsigned i = 0; i < this->numActive; ++i) {
if (i == tid) continue;
MergeLocal& mother = *this->data.getRemote(i);
mother.receiveLimits(mlocal);
}
}
void reduceLimits(MergeLocal& mlocal, unsigned int tid, const CompareTy& comp) {
for (unsigned i = 0; i < this->numActive; ++i) {
if (i == tid) continue;
MergeLocal& mother = *this->data.getRemote(i);
mlocal.reduceLimits(mother, comp);
}
}
public:
DMergeManagerBase(): alloc(&heap) {
numActive = getActiveThreads();
}
~DMergeManagerBase() {
Galois::optional<NewItem> p;
assert(!(p = new_.pop()));
}
MergeLocal& get() {
return *data.getLocal();
}
void calculateWindow(bool inner) {
MergeLocal& mlocal = *data.getLocal();
// Accumulate all threads' info
size_t allcommitted = 0;
size_t alliterations = 0;
for (unsigned i = 0; i < numActive; ++i) {
MergeLocal& mlocal = *data.getRemote(i);
allcommitted += mlocal.committed;
alliterations += mlocal.iterations;
}
float commitRatio = alliterations > 0 ? allcommitted / (float) alliterations : 0.0;
if (OptionsTy::hasFixedWindow) {
if (!inner || allcommitted == alliterations) {
mlocal.delta = MergeTraits<OptionsTy>::MinDelta;
} else {
mlocal.delta = 0;
}
} else {
const float target = 0.95;
if (commitRatio >= target)
mlocal.delta += mlocal.delta;
else if (allcommitted == 0) // special case when we don't execute anything
mlocal.delta += mlocal.delta;
else
mlocal.delta = commitRatio / target * mlocal.delta;
if (!inner) {
mlocal.delta = std::max(mlocal.delta, (size_t) MergeTraits<OptionsTy>::MinDelta);
} else if (mlocal.delta < (size_t) MergeTraits<OptionsTy>::MinDelta) {
// Try to get some new work instead of increasing window
mlocal.delta = 0;
}
}
// Useful debugging info
if (false) {
if (LL::getTID() == 0) {
char buf[1024];
snprintf(buf, 1024, "%d %.3f (%zu/%zu) window: %zu delta: %zu\n",
inner, commitRatio, allcommitted, alliterations, mlocal.window, mlocal.delta);
LL::gPrint(buf);
}
}
}
};
//! Default implementation for merging
template<typename OptionsTy,typename Enable=void>
class DMergeManager: public DMergeManagerBase<OptionsTy> {
typedef DMergeManagerBase<OptionsTy> Base;
typedef typename Base::T T;
typedef typename Base::Item Item;
typedef typename Base::NewItem NewItem;
typedef typename Base::MergeLocal MergeLocal;
typedef typename Base::NewItemsTy NewItemsTy;
typedef typename Base::NewItemsIterator NewItemsIterator;
typedef typename Base::CompareTy CompareTy;
struct GetNewItem: public std::unary_function<int,NewItemsTy&> {
PerThreadStorage<MergeLocal>* base;
GetNewItem() { }
GetNewItem(PerThreadStorage<MergeLocal>* b): base(b) { }
NewItemsTy& operator()(int i) const { return base->getRemote(i)->newItems; }
};
typedef boost::transform_iterator<GetNewItem, boost::counting_iterator<int> > MergeOuterIt;
typedef typename ChooseStlTwoLevelIterator<MergeOuterIt, typename NewItemsTy::iterator>::type MergeIt;
std::vector<NewItem,typename PerIterAllocTy::rebind<NewItem>::other> mergeBuf;
std::vector<T,typename PerIterAllocTy::rebind<T>::other> distributeBuf;
Barrier& barrier;
bool merge(int begin, int end) {
if (begin == end)
return false;
else if (begin + 1 == end)
return !this->data.getRemote(begin)->newItems.empty();
bool retval = false;
int mid = (end - begin) / 2 + begin;
retval |= merge(begin, mid);
retval |= merge(mid, end);
MergeOuterIt bbegin(boost::make_counting_iterator(begin), GetNewItem(&this->data));
MergeOuterIt mmid(boost::make_counting_iterator(mid), GetNewItem(&this->data));
MergeOuterIt eend(boost::make_counting_iterator(end), GetNewItem(&this->data));
// MergeIt aa(bbegin, mmid), ea(mmid, mmid);
// MergeIt bb(mmid, eend), eb(eend, eend);
// MergeIt cc(bbegin, eend), ec(eend, eend);
MergeIt aa = stl_two_level_begin(bbegin, mmid);
MergeIt ea = stl_two_level_end(bbegin, mmid);
MergeIt bb = stl_two_level_begin(mmid, eend);
MergeIt eb = stl_two_level_end(mmid, eend);
MergeIt cc = stl_two_level_begin(bbegin, eend);
MergeIt ec = stl_two_level_end(bbegin, eend);
while (aa != ea && bb != eb) {
if (*aa < *bb)
mergeBuf.push_back(*aa++);
else
mergeBuf.push_back(*bb++);
}
for (; aa != ea; ++aa)
mergeBuf.push_back(*aa);
for (; bb != eb; ++bb)
mergeBuf.push_back(*bb);
for (NewItemsIterator ii = mergeBuf.begin(), ei = mergeBuf.end(); ii != ei; ++ii)
*cc++ = *ii;
mergeBuf.clear();
assert(cc == ec);
return retval;
}
//! Slightly complicated reindexing to separate out continuous elements in InputIterator
template<typename InputIteratorTy>
void redistribute(InputIteratorTy ii, InputIteratorTy ei, size_t dist) {
unsigned int tid = LL::getTID();
//const size_t numBlocks = 1 << 7;
//const size_t mask = numBlocks - 1;
//size_t blockSize = dist / numBlocks; // round down
MergeLocal& mlocal = *this->data.getLocal();
//size_t blockSize = std::max((size_t) (0.9*minfo.delta), (size_t) 1);
size_t blockSize = mlocal.delta;
size_t numBlocks = dist / blockSize;
size_t cur = 0;
safe_advance(ii, tid, cur, dist);
while (ii != ei) {
unsigned long id;
if (cur < blockSize * numBlocks)
//id = (cur & mask) * blockSize + (cur / numBlocks);
id = (cur % numBlocks) * blockSize + (cur / numBlocks);
else
id = cur;
distributeBuf[id] = *ii;
safe_advance(ii, this->numActive, cur, dist);
}
}
template<typename InputIteratorTy,typename WL>
void distribute(InputIteratorTy ii, InputIteratorTy ei, size_t dist, WL* wl) {
unsigned int tid = LL::getTID();
MergeLocal& mlocal = *this->data.getLocal();
mlocal.initialWindow(dist, MergeTraits<OptionsTy>::MinDelta);
if (true) {
// Renumber to avoid pathological cases
if (tid == 0) {
distributeBuf.resize(dist);
}
barrier.wait();
redistribute(ii, ei, dist);
barrier.wait();
mlocal.copyIn(distributeBuf.begin(), distributeBuf.end(), dist, wl, this->new_, this->numActive, CompareTy());
} else {
mlocal.copyIn(ii, ei, dist, wl, this->new_, this->numActive, CompareTy());
}
}
template<typename WL>
void parallelSort(WL* wl) {
MergeLocal& mlocal = *this->data.getLocal();
mlocal.newItems.clear();
Galois::optional<NewItem> p;
while ((p = this->new_.pop())) {
mlocal.newItems.push_back(*p);
}
std::sort(mlocal.newItems.begin(), mlocal.newItems.end());
mlocal.size = mlocal.newItems.size();
barrier.wait();
unsigned tid = LL::getTID();
if (tid == 0) {
this->reduceLimits(mlocal, tid, CompareTy());
mergeBuf.reserve(mlocal.size);
this->broadcastLimits(mlocal, tid);
merge(0, this->numActive);
}
barrier.wait();
MergeOuterIt bbegin(boost::make_counting_iterator(0), GetNewItem(&this->data));
MergeOuterIt eend(boost::make_counting_iterator((int) this->numActive), GetNewItem(&this->data));
MergeIt ii = stl_two_level_begin(bbegin, eend);
MergeIt ei = stl_two_level_end(eend, eend);
distribute(boost::make_transform_iterator(ii, typename Base::NewItem::GetFirst()),
boost::make_transform_iterator(ei, typename Base::NewItem::GetFirst()),
mlocal.size, wl);
}
template<typename WL>
void serialSort(WL* wl) {
this->new_.flush();
barrier.wait();
if (LL::getTID() == 0) {
mergeBuf.clear();
Galois::optional<NewItem> p;
while ((p = this->new_.pop())) {
mergeBuf.push_back(*p);
}
std::sort(mergeBuf.begin(), mergeBuf.end());
printf("DEBUG R %zd\n", mergeBuf.size());
}
barrier.wait();
distribute(boost::make_transform_iterator(mergeBuf.begin(), typename NewItem::GetFirst()),
boost::make_transform_iterator(mergeBuf.end(), typename NewItem::GetFirst()),
mergeBuf.size(), wl);
}
public:
DMergeManager(const OptionsTy& o): mergeBuf(this->alloc), distributeBuf(this->alloc), barrier(getSystemBarrier())
{}
template<typename InputIteratorTy>
void presort(const typename OptionsTy::Function1Ty&, InputIteratorTy ii, InputIteratorTy ei) { }
template<typename InputIteratorTy, typename WL>
void addInitialWork(InputIteratorTy b, InputIteratorTy e, WL* wl) {
distribute(b, e, std::distance(b, e), wl);
}
template<typename WL>
void pushNew(const typename OptionsTy::Function1Ty& fn1, const T& val, unsigned long parent, unsigned count,
WL* wl, bool& hasNewWork, bool& nextCommit) {
this->new_.push(NewItem(val, parent, count));
hasNewWork = true;
}
template<typename WL>
bool distributeNewWork(const typename OptionsTy::Function1Ty&, WL* wl) {
if (true)
parallelSort(wl);
else
serialSort(wl);
return false;
}
template<typename WL>
void prepareNextWindow(WL* wl) { }
};
/**
* Implementation of merging specialized for unordered algorithms with an id
* function and ordered algorithms.
*/
// TODO: For consistency should also have thread-local copies of comp
template<typename OptionsTy>
class DMergeManager<OptionsTy,typename std::enable_if<MergeTraits<OptionsTy>::value>::type>: public DMergeManagerBase<OptionsTy> {
typedef DMergeManagerBase<OptionsTy> Base;
typedef typename Base::T T;
typedef typename Base::Item Item;
typedef typename Base::NewItem NewItem;
typedef typename Base::MergeLocal MergeLocal;
typedef typename Base::NewItemsTy NewItemsTy;
typedef typename Base::NewItemsIterator NewItemsIterator;
typedef typename Base::CompareTy CompareTy;
struct CompareNewItems: public std::binary_function<NewItem,NewItem,bool> {
const CompareTy& comp;
CompareNewItems(const CompareTy& c): comp(c) { }
bool operator()(const NewItem& a, const NewItem& b) const {
return comp(a.val, b.val);
}
};
std::vector<NewItem,typename PerIterAllocTy::rebind<NewItem>::other> mergeBuf;
CompareTy comp;
Barrier& barrier;
public:
DMergeManager(const OptionsTy& o): mergeBuf(this->alloc), comp(o.comp), barrier(getSystemBarrier()) { }
template<typename InputIteratorTy, typename WL>
void addInitialWork(InputIteratorTy ii, InputIteratorTy ei, WL* wl) {
MergeLocal& mlocal = *this->data.getLocal();
mlocal.copyIn(
boost::make_transform_iterator(mergeBuf.begin(), typename Base::NewItem::GetFirst()),
boost::make_transform_iterator(mergeBuf.end(), typename Base::NewItem::GetFirst()),
mergeBuf.size(), wl, this->new_, this->numActive, comp);
}
template<typename InputIteratorTy>
void presort(const typename OptionsTy::Function1Ty& fn1, InputIteratorTy ii, InputIteratorTy ei) {
unsigned int tid = LL::getTID();
MergeLocal& mlocal = *this->data.getLocal();
size_t dist = std::distance(ii, ei);
// Ordered algorithms generally have less available parallelism, so start
// window size out small
size_t window;
if (OptionsTy::useOrdered)
window = std::min((size_t) this->numActive, dist);
assert(mergeBuf.empty());
mergeBuf.reserve(dist);
for (; ii != ei; ++ii)
mergeBuf.push_back(NewItem(*ii, MergeTraits<OptionsTy>::id(fn1, *ii), 1));
if (OptionsTy::useOrdered)
ParallelSTL::sort(mergeBuf.begin(), mergeBuf.end(), CompareNewItems(comp));
else
ParallelSTL::sort(mergeBuf.begin(), mergeBuf.end());
mlocal.initialLimits(mergeBuf.begin(), mergeBuf.end());
if (OptionsTy::useOrdered && window) {
mlocal.windowElement = Galois::optional<T>(mergeBuf[window-1].val);
}
this->broadcastLimits(mlocal, tid);
if (!OptionsTy::useOrdered)
window = mlocal.maxId - mlocal.minId;
for (unsigned i = 0; i < this->numActive; ++i) {
MergeLocal& mother = *this->data.getRemote(i);
if (OptionsTy::useOrdered)
mother.initialWindow(window, window);
else
mother.initialWindow(window, MergeTraits<OptionsTy>::MinDelta, mlocal.minId);
}
}
template<typename WL>
void pushNew(const typename OptionsTy::Function1Ty& fn1, const T& val, unsigned long parent, unsigned count,
WL* wl, bool& hasNewWork, bool& nextCommit) {
if (!OptionsTy::useOrdered) {
this->new_.push(NewItem(val, MergeTraits<OptionsTy>::id(fn1, val), 1));
hasNewWork = true;
return;
}
MergeLocal& mlocal = *this->data.getLocal();
// NB: Tricky conditions. If we can not definitively place an item, it must
// go into the current wl.
if (mlocal.mostElement && !comp(val, *mlocal.mostElement)) {
this->new_.push(NewItem(val, MergeTraits<OptionsTy>::id(fn1, val), 1));
hasNewWork = true;
} else if (mlocal.mostElement && mlocal.windowElement && !comp(val, *mlocal.windowElement)) {
mlocal.pushNewReserve(val, comp);
} else {
// TODO: account for this work in calculateWindow
wl->push(Item(val, 0));
nextCommit = true;
}
}
template<typename WL>
bool distributeNewWork(const typename OptionsTy::Function1Ty& fn1, WL* wl) {
unsigned int tid = LL::getTID();
MergeLocal& mlocal = *this->data.getLocal();
assert(mlocal.emptyReserve());
mlocal.newItems.clear();
Galois::optional<NewItem> p;
while ((p = this->new_.pop()))
mlocal.newItems.push_back(*p);
if (OptionsTy::useOrdered)
std::sort(mlocal.newItems.begin(), mlocal.newItems.end(), CompareNewItems(comp));
else
std::sort(mlocal.newItems.begin(), mlocal.newItems.end());
NewItemsIterator ii = mlocal.newItems.begin(), ei = mlocal.newItems.end();
mlocal.initialLimits(ii, ei);
if (OptionsTy::useOrdered) {
// Smallest useful delta is 2 because windowElement is not included into
// current workset
size_t w = std::min(std::max(mlocal.delta / this->numActive, (size_t) 2), mlocal.newItems.size());
if (w)
mlocal.windowElement = Galois::optional<T>(mlocal.newItems[w-1].val);
}
barrier.wait();
if (tid == 0) {
this->reduceLimits(mlocal, tid, comp);
this->broadcastLimits(mlocal, tid);
}
barrier.wait();
bool retval = false;
if (OptionsTy::useOrdered) {
mlocal.initialWindow(this->numActive, this->numActive);
assert(ii == ei || (mlocal.windowElement && mlocal.mostElement));
assert((!mlocal.windowElement && !mlocal.mostElement) || !comp(*mlocal.mostElement, *mlocal.windowElement));
// No new items; we just have the most element X from the previous round.
// The most and window elements are exclusive of the range that they
// define; there is no most or window element that includes X. The
// easiest solution is to not use most or window elements for the next
// round, but the downside is that we will never return to windowed execution.
// TODO: improve this
if (mlocal.windowElement && mlocal.mostElement && !comp(*mlocal.windowElement, *mlocal.mostElement)) {
mlocal.windowElement = mlocal.mostElement = Galois::optional<T>();
for (; ii != ei; ++ii) {
wl->push(Item(ii->val, 0));
}
}
for (; ii != ei; ++ii) {
if (!comp(ii->val, *mlocal.windowElement))
break;
wl->push(Item(ii->val, 0));
}
for (; ii != ei; ++ii) {
if (!comp(ii->val, *mlocal.mostElement))
break;
mlocal.reserve.push(Item(ii->val, 0));
}
for (; ii != ei; ++ii) {
retval = true;
this->new_.push(NewItem(ii->val, MergeTraits<OptionsTy>::id(fn1, ii->val), 1));
}
} else {
mlocal.initialWindow(mlocal.maxId - mlocal.minId, MergeTraits<OptionsTy>::MinDelta, mlocal.minId);
for (; ii != ei; ++ii) {
unsigned long id = ii->parent;
if (id < mlocal.window)
wl->push(Item(ii->val, id));
else
break;
}
for (; ii != ei; ++ii) {
unsigned long id = ii->parent;
mlocal.reserve.push(Item(ii->val, id));
}
}
return retval;
}
template<typename WL>
void prepareNextWindow(WL* wl) {
if (!OptionsTy::useOrdered)
return;
unsigned int tid = LL::getTID();
MergeLocal& mlocal = *this->data.getLocal();
size_t w = 0;
// Convert non-zero deltas into per thread counts
if (mlocal.delta) {
if (mlocal.delta < this->numActive)
w = tid < mlocal.delta ? 1 : 0;
else
w = mlocal.delta / this->numActive;
w++; // exclusive end point
}
mlocal.updateWindowElement(wl, comp, w);
barrier.wait();
if (tid == 0) {
this->reduceLimits(mlocal, tid, comp);
this->broadcastLimits(mlocal, tid);
}
}
};
template<typename OptionsTy>
class Executor {
typedef typename OptionsTy::T value_type;
typedef DItem<value_type> Item;
typedef DNewItem<value_type> NewItem;
typedef DMergeManager<OptionsTy> MergeManager;
typedef DMergeLocal<OptionsTy> MergeLocal;
typedef typename OptionsTy::DetContext DetContext;
typedef WorkList::dChunkedFIFO<MergeTraits<OptionsTy>::ChunkSize,Item> WL;
typedef WorkList::dChunkedFIFO<MergeTraits<OptionsTy>::ChunkSize,DetContext> PendingWork;
typedef WorkList::ChunkedFIFO<MergeTraits<OptionsTy>::ChunkSize,DetContext,false> LocalPendingWork;
static const bool useLocalState = has_deterministic_local_state<typename OptionsTy::Function1Ty>::value;
// Truly thread-local
struct ThreadLocalData: private boost::noncopyable {
OptionsTy options;
LocalPendingWork localPending;
UserContextAccess<value_type> facing;
LoopStatistics<OptionsTy::needsStats> stat;
WL* wlcur;
WL* wlnext;
size_t rounds;
size_t outerRounds;
bool hasNewWork;
ThreadLocalData(const OptionsTy& o, const char* loopname): options(o), stat(loopname), rounds(0), outerRounds(0) { }
};
const OptionsTy& origOptions;
MergeManager mergeManager;
const char* loopname;
BreakManager<typename OptionsTy::Function1Ty> breakManager;
Barrier& barrier;
WL worklists[2];
StateManager<value_type,typename OptionsTy::Function1Ty> stateManager;
PendingWork pending;
LL::CacheLineStorage<volatile long> innerDone;
LL::CacheLineStorage<volatile long> outerDone;
LL::CacheLineStorage<volatile long> hasNewWork;
int numActive;
bool pendingLoop(ThreadLocalData& tld);
bool commitLoop(ThreadLocalData& tld);
void go();
public:
Executor(const OptionsTy& o, const char* ln):
origOptions(o), mergeManager(o), loopname(ln), barrier(getSystemBarrier())
{
static_assert(!OptionsTy::needsBreak
|| has_deterministic_parallel_break<typename OptionsTy::Function1Ty>::value,
"need to use break function to break loop");
}
template<typename RangeTy>
void AddInitialWork(RangeTy range) {
mergeManager.addInitialWork(range.begin(), range.end(), &worklists[1]);
}
template<typename IterTy>
void presort(IterTy ii, IterTy ei) {
ThreadLocalData tld(origOptions, loopname);
mergeManager.presort(tld.options.fn1, ii, ei);
}
void operator()() {
go();
}
};
template<typename OptionsTy>
void Executor<OptionsTy>::go() {
ThreadLocalData tld(origOptions, loopname);
MergeLocal& mlocal = mergeManager.get();
tld.wlcur = &worklists[0];
tld.wlnext = &worklists[1];
// copyIn for ordered algorithms adds at least one initial new item
tld.hasNewWork = OptionsTy::useOrdered ? true : false;
while (true) {
++tld.outerRounds;
while (true) {
++tld.rounds;
std::swap(tld.wlcur, tld.wlnext);
setPending(PENDING);
bool nextPending = pendingLoop(tld);
innerDone.data = true;
barrier.wait();
setPending(COMMITTING);
bool nextCommit = commitLoop(tld);
outerDone.data = true;
if (nextPending || nextCommit)
innerDone.data = false;
barrier.wait();
if (innerDone.data)
break;
mergeManager.calculateWindow(true);
mergeManager.prepareNextWindow(tld.wlnext);
barrier.wait();
mlocal.nextWindow(tld.wlnext, tld.options);
mlocal.resetStats();
}
if (!mlocal.emptyReserve())
outerDone.data = false;
if (tld.hasNewWork)
hasNewWork.data = true;
if (breakManager.checkBreak(tld.options.fn1))
break;
mergeManager.calculateWindow(false);
mergeManager.prepareNextWindow(tld.wlnext);
barrier.wait();
if (outerDone.data) {
if (!OptionsTy::needsPush)
break;
if (!hasNewWork.data) // (1)
break;
tld.hasNewWork = mergeManager.distributeNewWork(tld.options.fn1, tld.wlnext);
// NB: assumes that distributeNewWork has a barrier otherwise checking at (1) is erroneous
hasNewWork.data = false;
} else {
mlocal.nextWindow(tld.wlnext, tld.options);
}
mlocal.resetStats();
}
setPending(NON_DET);
mlocal.clear(); // parallelize clean up too
if (OptionsTy::needsStats) {
if (LL::getTID() == 0) {
reportStat(loopname, "RoundsExecuted", tld.rounds);
reportStat(loopname, "OuterRoundsExecuted", tld.outerRounds);
}
}
}
template<typename OptionsTy>
bool Executor<OptionsTy>::pendingLoop(ThreadLocalData& tld)
{
MergeLocal& mlocal = mergeManager.get();
bool retval = false;
Galois::optional<Item> p;
while ((p = tld.wlcur->pop())) {
// Use a new context for each item because there is a race when reusing
// between aborted iterations.
DetContext* ctx = NULL;
if (useLocalState) {
ctx = tld.options.emplaceContext(tld.localPending, *p);
} else {
ctx = tld.options.emplaceContext(pending, *p);
}
assert(ctx != NULL);
mlocal.incrementIterations();
bool commit = true;
if (OptionsTy::useOrdered) {
mlocal.assertLimits(ctx->item.val, tld.options.comp);
}
ctx->startIteration();
tld.stat.inc_iterations();
setThreadContext(ctx);
stateManager.alloc(tld.facing, tld.options.fn1);
int result = 0;
#ifdef GALOIS_USE_LONGJMP
if ((result = setjmp(hackjmp)) == 0) {
#else
try {
#endif
tld.options.fn1(ctx->item.val, tld.facing.data());
#ifdef GALOIS_USE_LONGJMP
} else { clearConflictLock(); }
#else
} catch (const ConflictFlag& flag) { clearConflictLock(); result = flag; }
#endif
clearReleasable();
switch (result) {
case 0:
case REACHED_FAILSAFE: break;
case CONFLICT: commit = false; break;
default: assert(0 && "Unknown conflict flag"); abort(); break;
}
if (ForEachTraits<typename OptionsTy::Function1Ty>::NeedsPIA && !useLocalState)
tld.facing.resetAlloc();
// XXX Update continuous
if (commit) {
stateManager.save(tld.facing, ctx->item.localState);
} else {
retval = true;
}
}
return retval;
}
template<typename OptionsTy>
bool Executor<OptionsTy>::commitLoop(ThreadLocalData& tld)
{
bool retval = false;
MergeLocal& mlocal = mergeManager.get();
size_t ncommits = 0;
size_t niter = 0;
DetContext* ctx;
while ((ctx = (useLocalState) ? tld.localPending.peek() : pending.peek())) {
++niter;
bool commit = true;
// Can skip this check in prefix by repeating computations but eagerly
// aborting seems more efficient
if (ctx->notReady())
commit = false;
// XXX check continuous
setThreadContext(ctx);
if (commit) {
stateManager.restore(tld.facing, ctx->item.localState);
int result = 0;
#ifdef GALOIS_USE_LONGJMP
if ((result = setjmp(hackjmp)) == 0) {
#else
try {
#endif
tld.options.fn2(ctx->item.val, tld.facing.data());
#ifdef GALOIS_USE_LONGJMP
} else { clearConflictLock(); }
#else
} catch (const ConflictFlag& flag) { clearConflictLock(); result = flag; }
#endif
clearReleasable();
switch (result) {
case 0: break;
case CONFLICT: commit = false; break;
default: assert(0 && "Unknown conflict flag"); abort(); break;
}
}
stateManager.dealloc(tld.facing);
if (commit) {
++ncommits;
mlocal.incrementCommitted();
if (ForEachTraits<typename OptionsTy::Function2Ty>::NeedsPush) {
unsigned long parent = ctx->item.id;
typedef typename UserContextAccess<value_type>::PushBufferTy::iterator iterator;
unsigned count = 0;
for (iterator ii = tld.facing.getPushBuffer().begin(),
ei = tld.facing.getPushBuffer().end(); ii != ei; ++ii) {
mergeManager.pushNew(tld.options.fn1, *ii, parent, ++count, tld.wlnext, tld.hasNewWork, retval);
if (count == 0) {
assert(0 && "Counter overflow");
abort();
}
}
}
assert(ForEachTraits<typename OptionsTy::Function2Ty>::NeedsPush
|| tld.facing.getPushBuffer().begin() == tld.facing.getPushBuffer().end());
} else {
if (useLocalState) { ctx->item.localState = NULL; }
tld.wlnext->push(ctx->item);
tld.stat.inc_conflicts();
retval = true;
}
if (commit) {
ctx->commitIteration();
} else {
ctx->cancelIteration();
}
if (ForEachTraits<typename OptionsTy::Function2Ty>::NeedsPIA && !useLocalState)
tld.facing.resetAlloc();
tld.facing.resetPushBuffer();
if (useLocalState) { tld.localPending.pop_peeked(); } else { pending.pop_peeked(); }
}
if (ForEachTraits<typename OptionsTy::Function2Ty>::NeedsPIA && useLocalState)
tld.facing.resetAlloc();
setThreadContext(0);
return retval;
}
} // end namespace anonymous
} // end namespace DeterministicImpl
template<typename RangeTy, typename WorkTy>
static inline void for_each_det_impl(const RangeTy& range, WorkTy& W) {
W.presort(range.begin(), range.end());
assert(!inGaloisForEach);
inGaloisForEach = true;
RunCommand init(std::bind(&WorkTy::template AddInitialWork<RangeTy>, std::ref(W), std::ref(range)));
RunCommand w[4] = {std::ref(init),
std::ref(getSystemBarrier()),
std::ref(W),
std::ref(getSystemBarrier())};
getSystemThreadPool().run(&w[0], &w[4], activeThreads);
inGaloisForEach = false;
}
#if 0
/**
* TODO(ddn): This executor only properly works for ordered algorithms that do
* not create new work; otherwise, the behavior is deterministic (and clients
* have some control over the order enforced via comp), but this executor does
* not guarantee that a newly added activity, A, will execute before a
* previously created activity, B, even if A < B.
*/
template<typename IterTy, typename ComparatorTy, typename NhFunc, typename OpFunc>
static inline void for_each_ordered_2p(IterTy b, IterTy e, ComparatorTy comp, NhFunc f1, OpFunc f2, const char* loopname) {
typedef Runtime::StandardRange<IterTy> Range;
typedef typename Range::value_type T;
typedef Runtime::DeterministicImpl::OrderedOptions<T,NhFunc,OpFunc,ComparatorTy> OptionsTy;
typedef Runtime::DeterministicImpl::Executor<OptionsTy> WorkTy;
OptionsTy options(f1, f2, comp);
WorkTy W(options, loopname);
for_each_det_impl(makeStandardRange(b,e), W);
}
#endif
} // end namespace Runtime
} // end namespace Galois
namespace Galois {
/**
* Deterministic execution with prefix operator.
* The prefix of the operator should be exactly the same as the operator
* but with execution returning at the failsafe point. The operator
* should conform to a standard Galois unordered set operator {@link for_each()}.
*
* @param b begining of range of initial items
* @param e end of range of initial items
* @param prefix prefix of operator
* @param fn operator
* @param loopname string to identify loop in statistics output
*/
template<typename IterTy, typename Function1Ty, typename Function2Ty>
static inline void for_each_det(IterTy b, IterTy e, Function1Ty prefix, Function2Ty fn, const char* loopname = 0) {
typedef Runtime::StandardRange<IterTy> Range;
typedef typename Range::value_type T;
typedef Runtime::DeterministicImpl::UnorderedOptions<T,Function1Ty,Function2Ty> OptionsTy;
typedef Runtime::DeterministicImpl::Executor<OptionsTy> WorkTy;
OptionsTy options(prefix, fn);
WorkTy W(options, loopname);
Runtime::for_each_det_impl(Runtime::makeStandardRange(b, e), W);
}
/**
* Deterministic execution with prefix operator.
* The prefix of the operator should be exactly the same as the operator
* but with execution returning at the failsafe point. The operator
* should conform to a standard Galois unordered set operator {@link for_each()}.
*
* @param i initial item
* @param prefix prefix of operator
* @param fn operator
* @param loopname string to identify loop in statistics output
*/
template<typename T, typename Function1Ty, typename Function2Ty>
static inline void for_each_det(T i, Function1Ty prefix, Function2Ty fn, const char* loopname = 0) {
T wl[1] = { i };
for_each_det(&wl[0], &wl[1], prefix, fn, loopname);
}
/**
* Deterministic execution with single operator.
* The operator fn is used both for the prefix computation and for the
* continuation of computation, c.f., the prefix operator version which
* uses two different functions. The operator can distinguish between
* the two uses by querying {@link UserContext.getLocalState()}.
*
* @param b begining of range of initial items
* @param e end of range of initial items
* @param fn operator
* @param loopname string to identify loop in statistics output
*/
template<typename IterTy, typename FunctionTy>
static inline void for_each_det(IterTy b, IterTy e, FunctionTy fn, const char* loopname = 0) {
for_each_det(b, e, fn, fn, loopname);
}
/**
* Deterministic execution with single operator.
* The operator fn is used both for the prefix computation and for the
* continuation of computation, c.f., the prefix operator version which
* uses two different functions. The operator can distinguish between
* the two uses by querying {@link UserContext.getLocalState()}.
*
* @param i initial item
* @param fn operator
* @param loopname string to identify loop in statistics output
*/
template<typename T, typename FunctionTy>
static inline void for_each_det(T i, FunctionTy fn, const char* loopname = 0) {
T wl[1] = { i };
for_each_det(&wl[0], &wl[1], fn, fn, loopname);
}
} // end namespace Galois
#endif
| 0 |
rapidsai_public_repos/code-share/maxflow/galois/include/Galois
|
rapidsai_public_repos/code-share/maxflow/galois/include/Galois/Runtime/ActiveThreads.h
|
/** Number of Active Threads -*- C++ -*-
* @file
* @section License
*
* Galois, a framework to exploit amorphous data-parallelism in irregular
* programs.
*
* Copyright (C) 2011, The University of Texas at Austin. All rights reserved.
* UNIVERSITY EXPRESSLY DISCLAIMS ANY AND ALL WARRANTIES CONCERNING THIS
* SOFTWARE AND DOCUMENTATION, INCLUDING ANY WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR ANY PARTICULAR PURPOSE, NON-INFRINGEMENT AND WARRANTIES OF
* PERFORMANCE, AND ANY WARRANTY THAT MIGHT OTHERWISE ARISE FROM COURSE OF
* DEALING OR USAGE OF TRADE. NO WARRANTY IS EITHER EXPRESS OR IMPLIED WITH
* RESPECT TO THE USE OF THE SOFTWARE OR DOCUMENTATION. Under no circumstances
* shall University be liable for incidental, special, indirect, direct or
* consequential damages or loss of profits, interruption of business, or
* related expenses which may arise from use of Software or Documentation,
* including but not limited to those resulting from defects in Software and/or
* Documentation, or loss or inaccuracy of data of any kind.
*
* @author Andrew Lenharth <[email protected]>
*/
#ifndef GALOIS_RUNTIME_ACTIVETHREADS_H
#define GALOIS_RUNTIME_ACTIVETHREADS_H
namespace Galois {
namespace Runtime {
extern unsigned int activeThreads;
}
}
#endif
| 0 |
rapidsai_public_repos/code-share/maxflow/galois/include/Galois
|
rapidsai_public_repos/code-share/maxflow/galois/include/Galois/Runtime/ForEachTraits.h
|
/** Traits of the Foreach loop body functor -*- C++ -*-
* @file
* @section License
*
* Galois, a framework to exploit amorphous data-parallelism in irregular
* programs.
*
* Copyright (C) 2011, The University of Texas at Austin. All rights reserved.
* UNIVERSITY EXPRESSLY DISCLAIMS ANY AND ALL WARRANTIES CONCERNING THIS
* SOFTWARE AND DOCUMENTATION, INCLUDING ANY WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR ANY PARTICULAR PURPOSE, NON-INFRINGEMENT AND WARRANTIES OF
* PERFORMANCE, AND ANY WARRANTY THAT MIGHT OTHERWISE ARISE FROM COURSE OF
* DEALING OR USAGE OF TRADE. NO WARRANTY IS EITHER EXPRESS OR IMPLIED WITH
* RESPECT TO THE USE OF THE SOFTWARE OR DOCUMENTATION. Under no circumstances
* shall University be liable for incidental, special, indirect, direct or
* consequential damages or loss of profits, interruption of business, or
* related expenses which may arise from use of Software or Documentation,
* including but not limited to those resulting from defects in Software and/or
* Documentation, or loss or inaccuracy of data of any kind.
*
* @section Description
*
* Traits of the for_each loop body functor.
*
* @author Andrew Lenharth <[email protected]>
*/
#ifndef GALOIS_RUNTIME_FOREACHTRAITS_H
#define GALOIS_RUNTIME_FOREACHTRAITS_H
#include "Galois/TypeTraits.h"
namespace Galois {
namespace Runtime {
namespace {
template<typename FunctionTy>
struct ForEachTraits {
enum {
NeedsStats = !Galois::does_not_need_stats<FunctionTy>::value,
NeedsBreak = Galois::needs_parallel_break<FunctionTy>::value,
NeedsPush = !Galois::does_not_need_push<FunctionTy>::value,
NeedsPIA = Galois::needs_per_iter_alloc<FunctionTy>::value,
NeedsAborts = !Galois::does_not_need_aborts<FunctionTy>::value
};
};
}
}
} // end namespace Galois
#endif // GALOIS_RUNTIME_FOREACHTRAITS_H
| 0 |
rapidsai_public_repos/code-share/maxflow/galois/include/Galois
|
rapidsai_public_repos/code-share/maxflow/galois/include/Galois/Runtime/OrderedWork.h
|
/** Ordered execution -*- C++ -*-
* @file
* @section License
*
* Galois, a framework to exploit amorphous data-parallelism in irregular
* programs.
*
* Copyright (C) 2012, The University of Texas at Austin. All rights reserved.
* UNIVERSITY EXPRESSLY DISCLAIMS ANY AND ALL WARRANTIES CONCERNING THIS
* SOFTWARE AND DOCUMENTATION, INCLUDING ANY WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR ANY PARTICULAR PURPOSE, NON-INFRINGEMENT AND WARRANTIES OF
* PERFORMANCE, AND ANY WARRANTY THAT MIGHT OTHERWISE ARISE FROM COURSE OF
* DEALING OR USAGE OF TRADE. NO WARRANTY IS EITHER EXPRESS OR IMPLIED WITH
* RESPECT TO THE USE OF THE SOFTWARE OR DOCUMENTATION. Under no circumstances
* shall University be liable for incidental, special, indirect, direct or
* consequential damages or loss of profits, interruption of business, or
* related expenses which may arise from use of Software or Documentation,
* including but not limited to those resulting from defects in Software and/or
* Documentation, or loss or inaccuracy of data of any kind.
*
* @section Description
*
* @author Donald Nguyen <[email protected]>
*/
#ifndef GALOIS_RUNTIME_ORDERED_WORK_H
#define GALOIS_RUNTIME_ORDERED_WORK_H
#include "Galois/Runtime/DeterministicWork.h"
//#include "Galois/Runtime/LCordered.h"
namespace Galois {
namespace Runtime {
template <typename NhFunc, typename OpFunc>
struct OrderedTraits {
static const bool NeedsPush = !Galois::does_not_need_push<OpFunc>::value;
static const bool HasFixedNeighborhood = Galois::has_fixed_neighborhood<NhFunc>::value;
};
template <typename Iter, typename Cmp, typename NhFunc, typename OpFunc>
void for_each_ordered_impl (Iter beg, Iter end, const Cmp& cmp, const NhFunc& nhFunc, const OpFunc& opFunc, const char* loopname) {
#if 0
if (!OrderedTraits<NhFunc, OpFunc>::NeedsPush && OrderedTraits<NhFunc, OpFunc>::HasFixedNeighborhood) {
// TODO: Remove-only/DAG executor
GALOIS_DIE("Remove-only executor not implemented yet");
} else if (OrderedTraits<NhFunc, OpFunc>::HasFixedNeighborhood) {
for_each_ordered_lc (beg, end, cmp, nhFunc, opFunc, loopname);
} else {
for_each_ordered_2p (beg, end, cmp, nhFunc, opFunc, loopname);
}
#else
GALOIS_DIE("not yet implemented");
#endif
}
template <typename Iter, typename Cmp, typename NhFunc, typename OpFunc, typename StableTest>
void for_each_ordered_impl (Iter beg, Iter end, const Cmp& cmp, const NhFunc& nhFunc, const OpFunc& opFunc, const StableTest& stabilityTest, const char* loopname) {
#if 0
if (!OrderedTraits<NhFunc, OpFunc>::NeedsPush && OrderedTraits<NhFunc, OpFunc>::HasFixedNeighborhood) {
GALOIS_DIE("no-adds + fixed-neighborhood == stable-source");
} else if (OrderedTraits<NhFunc, OpFunc>::HasFixedNeighborhood) {
for_each_ordered_lc (beg, end, cmp, nhFunc, opFunc, stabilityTest, loopname);
} else {
GALOIS_DIE("two-phase executor for unstable-source algorithms not implemented yet");
// TODO: implement following
// for_each_ordered_2p (beg, end, cmp, nhFunc, opFunc, stabilityTest, loopname);
}
#else
GALOIS_DIE("not yet implemented");
#endif
}
} // end namespace Runtime
} // end namespace Galois
#endif // GALOIS_RUNTIME_ORDERED_WORK_H
| 0 |
rapidsai_public_repos/code-share/maxflow/galois/include/Galois
|
rapidsai_public_repos/code-share/maxflow/galois/include/Galois/Runtime/Support.h
|
/** Reporting and utility code -*- C++ -*-
* @file
* @section License
*
* Galois, a framework to exploit amorphous data-parallelism in irregular
* programs.
*
* Copyright (C) 2012, The University of Texas at Austin. All rights reserved.
* UNIVERSITY EXPRESSLY DISCLAIMS ANY AND ALL WARRANTIES CONCERNING THIS
* SOFTWARE AND DOCUMENTATION, INCLUDING ANY WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR ANY PARTICULAR PURPOSE, NON-INFRINGEMENT AND WARRANTIES OF
* PERFORMANCE, AND ANY WARRANTY THAT MIGHT OTHERWISE ARISE FROM COURSE OF
* DEALING OR USAGE OF TRADE. NO WARRANTY IS EITHER EXPRESS OR IMPLIED WITH
* RESPECT TO THE USE OF THE SOFTWARE OR DOCUMENTATION. Under no circumstances
* shall University be liable for incidental, special, indirect, direct or
* consequential damages or loss of profits, interruption of business, or
* related expenses which may arise from use of Software or Documentation,
* including but not limited to those resulting from defects in Software and/or
* Documentation, or loss or inaccuracy of data of any kind.
*
* @section Description
*
* @author Andrew Lenharth <[email protected]>
*/
#ifndef GALOIS_RUNTIME_SUPPORT_H
#define GALOIS_RUNTIME_SUPPORT_H
#include <string>
namespace Galois {
class Statistic;
}
namespace Galois {
namespace Runtime {
extern bool inGaloisForEach;
//! Reports stats for a given thread
void reportStat(const char* loopname, const char* category, unsigned long value);
//! Reports stats for a given thread
void reportStat(const std::string& loopname, const std::string& category, unsigned long value);
//! Reports stats for all threads
void reportStat(Galois::Statistic* value);
//! Reports Galois system memory stats for all threads
void reportPageAlloc(const char* category);
//! Reports NUMA memory stats for all NUMA nodes
void reportNumaAlloc(const char* category);
//! Prints all stats
void printStats();
}
} // end namespace Galois
#endif
| 0 |
rapidsai_public_repos/code-share/maxflow/galois/include/Galois
|
rapidsai_public_repos/code-share/maxflow/galois/include/Galois/Runtime/MethodFlags.h
|
/** Galois Conflict flags -*- C++ -*-
* @file
* @section License
*
* Galois, a framework to exploit amorphous data-parallelism in irregular
* programs.
*
* Copyright (C) 2012, The University of Texas at Austin. All rights reserved.
* UNIVERSITY EXPRESSLY DISCLAIMS ANY AND ALL WARRANTIES CONCERNING THIS
* SOFTWARE AND DOCUMENTATION, INCLUDING ANY WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR ANY PARTICULAR PURPOSE, NON-INFRINGEMENT AND WARRANTIES OF
* PERFORMANCE, AND ANY WARRANTY THAT MIGHT OTHERWISE ARISE FROM COURSE OF
* DEALING OR USAGE OF TRADE. NO WARRANTY IS EITHER EXPRESS OR IMPLIED WITH
* RESPECT TO THE USE OF THE SOFTWARE OR DOCUMENTATION. Under no circumstances
* shall University be liable for incidental, special, indirect, direct or
* consequential damages or loss of profits, interruption of business, or
* related expenses which may arise from use of Software or Documentation,
* including but not limited to those resulting from defects in Software and/or
* Documentation, or loss or inaccuracy of data of any kind.
*
* @author Donald Nguyen <[email protected]>
*/
#ifndef GALOIS_RUNTIME_METHODFLAGS_H
#define GALOIS_RUNTIME_METHODFLAGS_H
#include "Galois/config.h"
#include "Galois/MethodFlags.h"
namespace Galois {
namespace Runtime {
void doCheckWrite();
inline bool isWriteMethod(Galois::MethodFlag m, bool write) {
return write || (m & MethodFlag::WRITE) != Galois::MethodFlag::NONE;
}
inline void checkWrite(Galois::MethodFlag m, bool write) {
#ifndef GALOIS_USE_HTM
if (isWriteMethod(m, write))
doCheckWrite();
#endif
}
}
} // end namespace Galois
#endif //GALOIS_RUNTIME_METHODFLAGS_H
| 0 |
rapidsai_public_repos/code-share/maxflow/galois/include/Galois
|
rapidsai_public_repos/code-share/maxflow/galois/include/Galois/Runtime/Sampling.h
|
/** HW Runtime Sampling Control -*- C++ -*-
* @file
* @section License
*
* Galois, a framework to exploit amorphous data-parallelism in irregular
* programs.
*
* Copyright (C) 2011, The University of Texas at Austin. All rights reserved.
* UNIVERSITY EXPRESSLY DISCLAIMS ANY AND ALL WARRANTIES CONCERNING THIS
* SOFTWARE AND DOCUMENTATION, INCLUDING ANY WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR ANY PARTICULAR PURPOSE, NON-INFRINGEMENT AND WARRANTIES OF
* PERFORMANCE, AND ANY WARRANTY THAT MIGHT OTHERWISE ARISE FROM COURSE OF
* DEALING OR USAGE OF TRADE. NO WARRANTY IS EITHER EXPRESS OR IMPLIED WITH
* RESPECT TO THE USE OF THE SOFTWARE OR DOCUMENTATION. Under no circumstances
* shall University be liable for incidental, special, indirect, direct or
* consequential damages or loss of profits, interruption of business, or
* related expenses which may arise from use of Software or Documentation,
* including but not limited to those resulting from defects in Software and/or
* Documentation, or loss or inaccuracy of data of any kind.
*
* @author Andrew Lenharth <[email protected]>
*/
#ifndef GALOIS_RUNTIME_SAMPLING_H
#define GALOIS_RUNTIME_SAMPLING_H
namespace Galois {
namespace Runtime {
void beginSampling();
void endSampling();
void beginThreadSampling();
void endThreadSampling();
}
} // end namespace Galois
#endif
| 0 |
rapidsai_public_repos/code-share/maxflow/galois/include/Galois
|
rapidsai_public_repos/code-share/maxflow/galois/include/Galois/Runtime/DoAll.h
|
/** Galois Simple Parallel Loop -*- C++ -*-
* @file
* @section License
*
* Galois, a framework to exploit amorphous data-parallelism in irregular
* programs.
*
* Copyright (C) 2012, The University of Texas at Austin. All rights reserved.
* UNIVERSITY EXPRESSLY DISCLAIMS ANY AND ALL WARRANTIES CONCERNING THIS
* SOFTWARE AND DOCUMENTATION, INCLUDING ANY WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR ANY PARTICULAR PURPOSE, NON-INFRINGEMENT AND WARRANTIES OF
* PERFORMANCE, AND ANY WARRANTY THAT MIGHT OTHERWISE ARISE FROM COURSE OF
* DEALING OR USAGE OF TRADE. NO WARRANTY IS EITHER EXPRESS OR IMPLIED WITH
* RESPECT TO THE USE OF THE SOFTWARE OR DOCUMENTATION. Under no circumstances
* shall University be liable for incidental, special, indirect, direct or
* consequential damages or loss of profits, interruption of business, or
* related expenses which may arise from use of Software or Documentation,
* including but not limited to those resulting from defects in Software and/or
* Documentation, or loss or inaccuracy of data of any kind.
*
* @section Description
*
* Implementation of the Galois foreach iterator. Includes various
* specializations to operators to reduce runtime overhead.
*
* @author Andrew Lenharth <[email protected]>
*/
#ifndef GALOIS_RUNTIME_DOALL_H
#define GALOIS_RUNTIME_DOALL_H
#include "Galois/gstl.h"
#include "Galois/Statistic.h"
#include "Galois/Runtime/Barrier.h"
#include "Galois/Runtime/Support.h"
#include "Galois/Runtime/Range.h"
#include "Galois/Runtime/ForEachTraits.h"
#include <algorithm>
namespace Galois {
namespace Runtime {
struct EmptyFn {
template<typename T>
void operator()(T a, T b) {}
};
// TODO(ddn): Tune stealing. DMR suffers when stealing is on
// TODO: add loopname + stats
template<class FunctionTy, class ReduceFunTy, class RangeTy>
class DoAllWork {
typedef typename RangeTy::local_iterator local_iterator;
LL::SimpleLock<true> reduceLock;
FunctionTy origF;
FunctionTy outputF;
ReduceFunTy RF;
RangeTy range;
Barrier& barrier;
bool needsReduce;
bool useStealing;
struct SharedState {
local_iterator stealBegin;
local_iterator stealEnd;
LL::SimpleLock<true> stealLock;
};
struct PrivateState {
local_iterator begin;
local_iterator end;
FunctionTy F;
PrivateState(FunctionTy& o) :F(o) {}
};
PerThreadStorage<SharedState> TLDS;
//! Master execution function for this loop type
void processRange(PrivateState& tld) {
for (; tld.begin != tld.end; ++tld.begin)
tld.F(*tld.begin);
}
bool doSteal(SharedState& source, PrivateState& dest) {
//This may not be safe for iterators with complex state
if (source.stealBegin != source.stealEnd) {
source.stealLock.lock();
if (source.stealBegin != source.stealEnd) {
dest.begin = source.stealBegin;
source.stealBegin = dest.end = Galois::split_range(source.stealBegin, source.stealEnd);
}
source.stealLock.unlock();
}
return dest.begin != dest.end;
}
void populateSteal(PrivateState& tld, SharedState& tsd) {
if (tld.begin != tld.end && std::distance(tld.begin, tld.end) > 1) {
tsd.stealLock.lock();
tsd.stealEnd = tld.end;
tsd.stealBegin = tld.end = Galois::split_range(tld.begin, tld.end);
tsd.stealLock.unlock();
}
}
GALOIS_ATTRIBUTE_NOINLINE
bool trySteal(PrivateState& mytld) {
//First try stealing from self
if (doSteal(*TLDS.getLocal(), mytld))
return true;
//Then try stealing from neighbors
unsigned myID = LL::getTID();
for (unsigned x = 1; x < activeThreads; x += x) {
SharedState& r = *TLDS.getRemote((myID + x) % activeThreads);
if (doSteal(r, mytld)) {
//populateSteal(mytld);
return true;
}
}
return false;
}
void doReduce(PrivateState& mytld) {
if (needsReduce) {
reduceLock.lock();
RF(outputF, mytld.F);
reduceLock.unlock();
}
}
public:
DoAllWork(const FunctionTy& F, const ReduceFunTy& R, bool needsReduce, RangeTy r, bool steal)
: origF(F), outputF(F), RF(R), range(r), barrier(getSystemBarrier()), needsReduce(needsReduce), useStealing(steal)
{ }
void operator()() {
//Assume the copy constructor on the functor is readonly
PrivateState thisTLD(origF);
thisTLD.begin = range.local_begin();
thisTLD.end = range.local_end();
if (useStealing) {
populateSteal(thisTLD, *TLDS.getLocal());
// threads could start stealing from other threads whose
// range has not been initialized yet
barrier.wait();
}
do {
processRange(thisTLD);
} while (useStealing && trySteal(thisTLD));
doReduce(thisTLD);
}
FunctionTy getFn() const { return outputF; }
};
template<typename RangeTy, typename FunctionTy, typename ReducerTy>
FunctionTy do_all_dispatch(RangeTy range, FunctionTy f, ReducerTy r, bool doReduce, const char* loopname, bool steal) {
if (Galois::Runtime::inGaloisForEach) {
return std::for_each(range.begin(), range.end(), f);
} else {
StatTimer LoopTimer("LoopTime", loopname);
if (ForEachTraits<FunctionTy>::NeedsStats)
LoopTimer.start();
inGaloisForEach = true;
DoAllWork<FunctionTy, ReducerTy, RangeTy> W(f, r, doReduce, range, steal);
RunCommand w[2] = {std::ref(W),
std::ref(getSystemBarrier())};
getSystemThreadPool().run(&w[0], &w[2],activeThreads);
if (ForEachTraits<FunctionTy>::NeedsStats)
LoopTimer.stop();
inGaloisForEach = false;
return W.getFn();
}
}
template<typename RangeTy, typename FunctionTy>
FunctionTy do_all_impl(RangeTy range, FunctionTy f, const char* loopname = 0, bool steal = false) {
return do_all_dispatch(range, f, EmptyFn(), false, loopname, steal);
}
template<typename RangeTy, typename FunctionTy, typename ReduceTy>
FunctionTy do_all_impl(RangeTy range, FunctionTy f, ReduceTy r, const char* loopname = 0, bool steal = false) {
return do_all_dispatch(range, f, r, true, loopname, steal);
}
} // end namespace Runtime
} // end namespace Galois
#endif // GALOIS_RUNTIME_DOALL_H
| 0 |
rapidsai_public_repos/code-share/maxflow/galois/include/Galois
|
rapidsai_public_repos/code-share/maxflow/galois/include/Galois/Runtime/ThreadPool.h
|
/** Simple thread related classes -*- C++ -*-
* @file
* @section License
*
* Galois, a framework to exploit amorphous data-parallelism in irregular
* programs.
*
* Copyright (C) 2013, The University of Texas at Austin. All rights reserved.
* UNIVERSITY EXPRESSLY DISCLAIMS ANY AND ALL WARRANTIES CONCERNING THIS
* SOFTWARE AND DOCUMENTATION, INCLUDING ANY WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR ANY PARTICULAR PURPOSE, NON-INFRINGEMENT AND WARRANTIES OF
* PERFORMANCE, AND ANY WARRANTY THAT MIGHT OTHERWISE ARISE FROM COURSE OF
* DEALING OR USAGE OF TRADE. NO WARRANTY IS EITHER EXPRESS OR IMPLIED WITH
* RESPECT TO THE USE OF THE SOFTWARE OR DOCUMENTATION. Under no circumstances
* shall University be liable for incidental, special, indirect, direct or
* consequential damages or loss of profits, interruption of business, or
* related expenses which may arise from use of Software or Documentation,
* including but not limited to those resulting from defects in Software and/or
* Documentation, or loss or inaccuracy of data of any kind.
*
* @author Andrew Lenharth <[email protected]>
*/
#ifndef GALOIS_RUNTIME_THREADPOOL_H
#define GALOIS_RUNTIME_THREADPOOL_H
#include "Galois/config.h"
#include GALOIS_CXX11_STD_HEADER(functional)
namespace Galois {
namespace Runtime {
typedef std::function<void (void)> RunCommand;
class ThreadPool {
protected:
unsigned maxThreads;
ThreadPool(unsigned m) :maxThreads(m) {}
public:
virtual ~ThreadPool() { }
//!execute work on all threads
//!preWork and postWork are executed only on the master thread
virtual void run(RunCommand* begin, RunCommand* end, unsigned num) = 0;
//!return the number of threads supported by the thread pool on the current machine
unsigned getMaxThreads() const { return maxThreads; }
};
//!Returns or creates the appropriate thread pool for the system
ThreadPool& getSystemThreadPool();
} //Runtime
} //Galois
#endif
| 0 |
rapidsai_public_repos/code-share/maxflow/galois/include/Galois
|
rapidsai_public_repos/code-share/maxflow/galois/include/Galois/Runtime/Barrier.h
|
/** Barriers -*- C++ -*-
* @file
* @section License
*
* Galois, a framework to exploit amorphous data-parallelism in irregular
* programs.
*
* Copyright (C) 2012, The University of Texas at Austin. All rights reserved.
* UNIVERSITY EXPRESSLY DISCLAIMS ANY AND ALL WARRANTIES CONCERNING THIS
* SOFTWARE AND DOCUMENTATION, INCLUDING ANY WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR ANY PARTICULAR PURPOSE, NON-INFRINGEMENT AND WARRANTIES OF
* PERFORMANCE, AND ANY WARRANTY THAT MIGHT OTHERWISE ARISE FROM COURSE OF
* DEALING OR USAGE OF TRADE. NO WARRANTY IS EITHER EXPRESS OR IMPLIED WITH
* RESPECT TO THE USE OF THE SOFTWARE OR DOCUMENTATION. Under no circumstances
* shall University be liable for incidental, special, indirect, direct or
* consequential damages or loss of profits, interruption of business, or
* related expenses which may arise from use of Software or Documentation,
* including but not limited to those resulting from defects in Software and/or
* Documentation, or loss or inaccuracy of data of any kind.
*
* @author Donald Nguyen <[email protected]>
*/
#ifndef GALOIS_RUNTIME_BARRIER_H
#define GALOIS_RUNTIME_BARRIER_H
namespace Galois {
namespace Runtime {
class Barrier {
public:
virtual ~Barrier();
//not safe if any thread is in wait
virtual void reinit(unsigned val) = 0;
//Wait at this barrier
virtual void wait() = 0;
//wait at this barrier
void operator()(void) { wait(); }
};
/**
* Have a pre-instantiated barrier available for use.
* This is initialized to the current activeThreads. This barrier
* is designed to be fast and should be used in the common
* case.
*
* However, there is a race if the number of active threads
* is modified after using this barrier: some threads may still
* be in the barrier while the main thread reinitializes this
* barrier to the new number of active threads. If that may
* happen, use {@link createSimpleBarrier()} instead.
*/
Barrier& getSystemBarrier();
/**
* Creates a new simple barrier. This barrier is not designed to be fast but
* does gaurantee that all threads have left the barrier before returning
* control. Useful when the number of active threads is modified to avoid a
* race in {@link getSystemBarrier()}. Client is reponsible for deallocating
* returned barrier.
*/
Barrier* createSimpleBarrier();
}
} // end namespace Galois
#endif
| 0 |
rapidsai_public_repos/code-share/maxflow/galois/include/Galois
|
rapidsai_public_repos/code-share/maxflow/galois/include/Galois/Runtime/PerThreadWorkList.h
|
/** Per Thread workLists-*- C++ -*-
* @file
* @section License
*
* Galois, a framework to exploit amorphous data-parallelism in irregular
* programs.
*
* Copyright (C) 2011, The University of Texas at Austin. All rights reserved.
* UNIVERSITY EXPRESSLY DISCLAIMS ANY AND ALL WARRANTIES CONCERNING THIS
* SOFTWARE AND DOCUMENTATION, INCLUDING ANY WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR ANY PARTICULAR PURPOSE, NON-INFRINGEMENT AND WARRANTIES OF
* PERFORMANCE, AND ANY WARRANTY THAT MIGHT OTHERWISE ARISE FROM COURSE OF
* DEALING OR USAGE OF TRADE. NO WARRANTY IS EITHER EXPRESS OR IMPLIED WITH
* RESPECT TO THE USE OF THE SOFTWARE OR DOCUMENTATION. Under no circumstances
* shall University be liable for incidental, special, indirect, direct or
* consequential damages or loss of profits, interruption of business, or
* related expenses which may arise from use of Software or Documentation,
* including but not limited to those resulting from defects in Software and/or
* Documentation, or loss or inaccuracy of data of any kind.
*
* @section Description
*
* a thread local stl container for each thread
*
* @author <[email protected]>
*/
#ifndef GALOIS_RUNTIME_PERTHREADWORKLIST_H
#define GALOIS_RUNTIME_PERTHREADWORKLIST_H
#include <vector>
#include <deque>
#include <list>
#include <set>
#include <limits>
#include <iterator>
#include <cstdio>
#include <boost/iterator/counting_iterator.hpp>
#include <boost/iterator/transform_iterator.hpp>
#include "Galois/Threads.h"
#include "Galois/PriorityQueue.h"
#include "Galois/TwoLevelIterator.h"
#include "Galois/Runtime/PerThreadStorage.h"
#include "Galois/Runtime/ThreadPool.h"
#include "Galois/Runtime/mm/Mem.h"
#include "Galois/Runtime/ll/gio.h"
namespace Galois {
namespace Runtime {
namespace {
enum GlobalPos {
GLOBAL_BEGIN, GLOBAL_END
};
// #define ADAPTOR_BASED_OUTER_ITER
// XXX: use a combination of boost::transform_iterator and
// boost::counting_iterator to implement the following OuterPerThreadWLIter
#ifdef ADAPTOR_BASED_OUTER_ITER
template<typename PerThrdWL>
struct WLindexer:
public std::unary_function<unsigned, typename PerThrdWL::Cont_ty&>
{
typedef typename PerThrdWL::Cont_ty Ret_ty;
PerThrdWL* wl;
WLindexer(): wl(NULL) {}
WLindexer(PerThrdWL& _wl): wl(&_wl) {}
Ret_ty& operator()(unsigned i) const {
assert(wl != NULL);
assert(i < wl->numRows());
return const_cast<Ret_ty&>(wl->get(i));
}
};
template<typename PerThrdWL>
struct TypeFactory {
typedef typename boost::transform_iterator<WLindexer<PerThrdWL>, boost::counting_iterator<unsigned> > OuterIter;
typedef typename std::reverse_iterator<OuterIter> RvrsOuterIter;
};
template<typename PerThrdWL>
typename TypeFactory<PerThrdWL>::OuterIter make_outer_begin(PerThrdWL& wl) {
return boost::make_transform_iterator(
boost::counting_iterator<unsigned>(0), WLindexer<PerThrdWL>(wl));
}
template<typename PerThrdWL>
typename TypeFactory<PerThrdWL>::OuterIter make_outer_end(PerThrdWL& wl) {
return boost::make_transform_iterator(
boost::counting_iterator<unsigned>(wl.numRows()), WLindexer<PerThrdWL>(wl));
}
template<typename PerThrdWL>
typename TypeFactory<PerThrdWL>::RvrsOuterIter make_outer_rbegin(PerThrdWL& wl) {
return typename TypeFactory<PerThrdWL>::RvrsOuterIter(make_outer_end(wl));
}
template<typename PerThrdWL>
typename TypeFactory<PerThrdWL>::RvrsOuterIter make_outer_rend(PerThrdWL& wl) {
return typename TypeFactory<PerThrdWL>::RvrsOuterIter(make_outer_begin(wl));
}
#else
template<typename PerThrdWL>
class OuterPerThreadWLIter: public std::iterator<std::random_access_iterator_tag, typename PerThrdWL::Cont_ty> {
typedef typename PerThrdWL::Cont_ty Cont_ty;
typedef std::iterator<std::random_access_iterator_tag, Cont_ty> Super_ty;
typedef typename Super_ty::difference_type Diff_ty;
PerThrdWL* workList;
// using Diff_ty due to reverse iterator, whose
// end is -1, and, begin is numRows - 1
Diff_ty row;
void assertInRange() const {
assert((row >= 0) && (row < workList->numRows()));
}
Cont_ty& getWL() {
assertInRange();
return (*workList)[row];
}
const Cont_ty& getWL() const {
assertInRange();
return (*workList)[row];
}
public:
OuterPerThreadWLIter(): Super_ty(), workList(NULL), row(0) {}
OuterPerThreadWLIter(PerThrdWL& wl, const GlobalPos& pos)
: Super_ty(), workList(&wl), row(0) {
switch (pos) {
case GLOBAL_BEGIN:
row = 0;
break;
case GLOBAL_END:
row = wl.numRows();
break;
default:
std::abort();
}
}
typename Super_ty::reference operator*() { return getWL(); }
typename Super_ty::reference operator*() const { return getWL(); }
typename Super_ty::pointer operator->() { return &(getWL()); }
typename Super_ty::value_type* operator->() const { return &(getWL()); }
OuterPerThreadWLIter& operator++() {
++row;
return *this;
}
OuterPerThreadWLIter operator++(int) {
OuterPerThreadWLIter tmp(*this);
operator++();
return tmp;
}
OuterPerThreadWLIter& operator--() {
--row;
return *this;
}
OuterPerThreadWLIter operator--(int) {
OuterPerThreadWLIter tmp(*this);
operator--();
return tmp;
}
OuterPerThreadWLIter& operator+=(Diff_ty d) {
row = unsigned(Diff_ty(row) + d);
return *this;
}
OuterPerThreadWLIter& operator-=(Diff_ty d) {
row = unsigned (Diff_ty(row) - d);
return *this;
}
friend OuterPerThreadWLIter operator+(const OuterPerThreadWLIter& it, Diff_ty d) {
OuterPerThreadWLIter tmp(it);
tmp += d;
return tmp;
}
friend OuterPerThreadWLIter operator+(Diff_ty d, const OuterPerThreadWLIter& it) {
return it + d;
}
friend OuterPerThreadWLIter operator-(const OuterPerThreadWLIter& it, Diff_ty d) {
OuterPerThreadWLIter tmp(it);
tmp -= d;
return tmp;
}
friend Diff_ty operator-(const OuterPerThreadWLIter& left, const OuterPerThreadWLIter& right) {
return Diff_ty(left.row) - Diff_ty(right.row);
}
typename Super_ty::reference operator[](Diff_ty d) {
return *((*this) + d);
}
friend bool operator==(const OuterPerThreadWLIter& left, const OuterPerThreadWLIter& right) {
assert(left.workList == right.workList);
return(left.row == right.row);
}
friend bool operator!=(const OuterPerThreadWLIter& left, const OuterPerThreadWLIter& right) {
return !(left == right);
}
friend bool operator<(const OuterPerThreadWLIter& left, const OuterPerThreadWLIter& right) {
assert(left.workList == right.workList);
return (left.row < right.row);
}
friend bool operator<=(const OuterPerThreadWLIter& left, const OuterPerThreadWLIter& right) {
return (left == right) || (left < right);
}
friend bool operator>(const OuterPerThreadWLIter& left, const OuterPerThreadWLIter& right) {
return !(left <= right);
}
friend bool operator>=(const OuterPerThreadWLIter& left, const OuterPerThreadWLIter& right) {
return !(left < right);
}
};
template<typename PerThrdWL>
OuterPerThreadWLIter<PerThrdWL> make_outer_begin(PerThrdWL& wl) {
return OuterPerThreadWLIter<PerThrdWL>(wl, GLOBAL_BEGIN);
}
template<typename PerThrdWL>
OuterPerThreadWLIter<PerThrdWL> make_outer_end(PerThrdWL& wl) {
return OuterPerThreadWLIter<PerThrdWL>(wl, GLOBAL_END);
}
template<typename PerThrdWL>
std::reverse_iterator<OuterPerThreadWLIter<PerThrdWL> >
make_outer_rbegin(PerThrdWL& wl) {
typedef typename std::reverse_iterator<OuterPerThreadWLIter<PerThrdWL> > Ret_ty;
return Ret_ty(make_outer_end(wl));
}
template<typename PerThrdWL>
std::reverse_iterator<OuterPerThreadWLIter<PerThrdWL> >
make_outer_rend(PerThrdWL& wl) {
typedef typename std::reverse_iterator<OuterPerThreadWLIter<PerThrdWL> > Ret_ty;
return Ret_ty(make_outer_begin(wl));
}
#endif
} // end namespace
template<typename Cont_tp>
class PerThreadWorkList {
public:
typedef Cont_tp Cont_ty;
typedef typename Cont_ty::value_type value_type;
typedef typename Cont_ty::reference reference;
typedef typename Cont_ty::pointer pointer;
typedef typename Cont_ty::size_type size_type;
typedef typename Cont_ty::iterator local_iterator;
typedef typename Cont_ty::const_iterator local_const_iterator;
typedef typename Cont_ty::reverse_iterator local_reverse_iterator;
typedef typename Cont_ty::const_reverse_iterator local_const_reverse_iterator;
typedef PerThreadWorkList This_ty;
#ifdef ADAPTOR_BASED_OUTER_ITER
typedef typename TypeFactory<This_ty>::OuterIter OuterIter;
typedef typename TypeFactory<This_ty>::RvrsOuterIter RvrsOuterIter;
#else
typedef OuterPerThreadWLIter<This_ty> OuterIter;
typedef typename std::reverse_iterator<OuterIter> RvrsOuterIter;
#endif
typedef typename Galois::ChooseStlTwoLevelIterator<OuterIter, typename Cont_ty::iterator>::type global_iterator;
typedef typename Galois::ChooseStlTwoLevelIterator<OuterIter, typename Cont_ty::const_iterator>::type global_const_iterator;
typedef typename Galois::ChooseStlTwoLevelIterator<RvrsOuterIter, typename Cont_ty::reverse_iterator>::type global_reverse_iterator;
typedef typename Galois::ChooseStlTwoLevelIterator<RvrsOuterIter, typename Cont_ty::const_reverse_iterator>::type global_const_reverse_iterator;
private:
// XXX: for testing only
#if 0
struct FakePTS {
std::vector<Cont_ty*> v;
FakePTS () {
v.resize (size ());
}
Cont_ty** getLocal () const {
return getRemote (Galois::Runtime::LL::getTID ());
}
Cont_ty** getRemote (size_t i) const {
assert (i < v.size ());
return const_cast<Cont_ty**> (&v[i]);
}
size_t size () const { return Galois::Runtime::LL::getMaxThreads(); }
};
#endif
// typedef FakePTS PerThrdCont_ty;
typedef Galois::Runtime::PerThreadStorage<Cont_ty*> PerThrdCont_ty;
PerThrdCont_ty perThrdCont;
void destroy() {
for (unsigned i = 0; i < perThrdCont.size(); ++i) {
delete *perThrdCont.getRemote(i);
*perThrdCont.getRemote(i) = NULL;
}
}
protected:
PerThreadWorkList(): perThrdCont() {
for (unsigned i = 0; i < perThrdCont.size(); ++i) {
*perThrdCont.getRemote(i) = NULL;
}
}
void init(const Cont_ty& cont) {
for (unsigned i = 0; i < perThrdCont.size(); ++i) {
*perThrdCont.getRemote(i) = new Cont_ty(cont);
}
}
~PerThreadWorkList() {
destroy();
}
public:
unsigned numRows() const { return perThrdCont.size(); }
Cont_ty& get() { return **(perThrdCont.getLocal()); }
const Cont_ty& get() const { return **(perThrdCont.getLocal()); }
Cont_ty& get(unsigned i) { return **(perThrdCont.getRemote(i)); }
const Cont_ty& get(unsigned i) const { return **(perThrdCont.getRemote(i)); }
Cont_ty& operator [](unsigned i) { return get(i); }
const Cont_ty& operator [](unsigned i) const { return get(i); }
global_iterator begin_all() {
return Galois::stl_two_level_begin(
make_outer_begin(*this), make_outer_end(*this));
}
global_iterator end_all() {
return Galois::stl_two_level_end(
make_outer_begin(*this), make_outer_end(*this));
}
global_const_iterator begin_all() const {
return Galois::stl_two_level_cbegin(
make_outer_begin(*this), make_outer_end(*this));
}
global_const_iterator end_all() const {
return Galois::stl_two_level_cend(
make_outer_begin(*this), make_outer_end(*this));
}
global_const_iterator cbegin_all() const {
return Galois::stl_two_level_cbegin(
make_outer_begin(*this), make_outer_end(*this));
}
global_const_iterator cend_all() const {
return Galois::stl_two_level_cend(
make_outer_begin(*this), make_outer_end(*this));
}
global_reverse_iterator rbegin_all() {
return Galois::stl_two_level_rbegin(
make_outer_rbegin(*this), make_outer_rend(*this));
}
global_reverse_iterator rend_all() {
return Galois::stl_two_level_rend(
make_outer_rbegin(*this), make_outer_rend(*this));
}
global_const_reverse_iterator rbegin_all() const {
return Galois::stl_two_level_crbegin(
make_outer_rbegin(*this), make_outer_rend(*this));
}
global_const_reverse_iterator rend_all() const {
return Galois::stl_two_level_crend(
make_outer_rbegin(*this), make_outer_rend(*this));
}
global_const_reverse_iterator crbegin_all() const {
return Galois::stl_two_level_crbegin(
make_outer_rbegin(*this), make_outer_rend(*this));
}
global_const_reverse_iterator crend_all() const {
return Galois::stl_two_level_crend(
make_outer_rbegin(*this), make_outer_rend(*this));
}
size_type size_all() const {
size_type sz = 0;
for (unsigned i = 0; i < perThrdCont.size(); ++i) {
sz += get(i).size();
}
return sz;
}
void clear_all() {
for (unsigned i = 0; i < perThrdCont.size(); ++i) {
get(i).clear();
}
}
bool empty_all() const {
bool res = true;
for (unsigned i = 0; i < perThrdCont.size(); ++i) {
res = res && get(i).empty();
}
return res;
}
// TODO: fill parallel
template<typename Iter, typename R>
void fill_serial(Iter begin, Iter end,
R(Cont_ty::*pushFn)(const value_type&) = &Cont_ty::push_back) {
const unsigned P = Galois::getActiveThreads();
typedef typename std::iterator_traits<Iter>::difference_type Diff_ty;
// integer division, where we want to round up. So adding P-1
Diff_ty block_size = (std::distance(begin, end) + (P-1) ) / P;
assert(block_size >= 1);
Iter block_begin = begin;
for (unsigned i = 0; i < P; ++i) {
Iter block_end = block_begin;
if (std::distance(block_end, end) < block_size) {
block_end = end;
} else {
std::advance(block_end, block_size);
}
for (; block_begin != block_end; ++block_begin) {
// workList[i].push_back(Marked<Value_ty>(*block_begin));
((*this)[i].*pushFn)(value_type(*block_begin));
}
if (block_end == end) {
break;
}
}
}
};
namespace PerThreadFactory {
typedef MM::SimpleBumpPtrWithMallocFallback<MM::FreeListHeap<MM::SystemBaseAlloc> > BasicHeap;
typedef MM::ThreadAwarePrivateHeap<BasicHeap> Heap;
template<typename T>
struct Alloc { typedef typename MM::ExternRefGaloisAllocator<T, Heap> type; };
template<typename T>
struct FSBAlloc { typedef typename MM::FSBGaloisAllocator<T> type; };
template<typename T>
struct Vector { typedef typename std::vector<T, typename Alloc<T>::type > type; };
template<typename T>
struct Deque { typedef typename std::deque<T, typename Alloc<T>::type > type; };
template<typename T>
struct List { typedef typename std::list<T, typename FSBAlloc<T>::type > type; };
template<typename T, typename C>
struct Set { typedef typename std::set<T, C, typename FSBAlloc<T>::type > type; };
template<typename T, typename C>
struct PQ { typedef MinHeap<T, C, typename Vector<T>::type > type; };
};
template<typename T>
class PerThreadVector: public PerThreadWorkList<typename PerThreadFactory::template Vector<T>::type> {
public:
typedef typename PerThreadFactory::Heap Heap_ty;
typedef typename PerThreadFactory::template Alloc<T>::type Alloc_ty;
typedef typename PerThreadFactory::template Vector<T>::type Cont_ty;
protected:
typedef PerThreadWorkList<Cont_ty> Super_ty;
Heap_ty heap;
Alloc_ty alloc;
public:
PerThreadVector(): Super_ty(), heap(), alloc(&heap) {
Super_ty::init(Cont_ty(alloc));
}
void reserve_all(size_t sz) {
size_t numT = Galois::getActiveThreads();
size_t perT = (sz + numT - 1) / numT; // round up
for (unsigned i = 0; i < numT; ++i) {
Super_ty::get(i).reserve(perT);
}
}
};
template<typename T>
class PerThreadDeque:
public PerThreadWorkList<typename PerThreadFactory::template Deque<T>::type> {
public:
typedef typename PerThreadFactory::Heap Heap_ty;
typedef typename PerThreadFactory::template Alloc<T>::type Alloc_ty;
protected:
typedef typename PerThreadFactory::template Deque<T>::type Cont_ty;
typedef PerThreadWorkList<Cont_ty> Super_ty;
Heap_ty heap;
Alloc_ty alloc;
public:
PerThreadDeque(): Super_ty(), heap(), alloc(&heap) {
Super_ty::init(Cont_ty(alloc));
}
};
template<typename T>
class PerThreadList:
public PerThreadWorkList<typename PerThreadFactory::template List<T>::type> {
public:
typedef typename PerThreadFactory::Heap Heap_ty;
typedef typename PerThreadFactory::template Alloc<T>::type Alloc_ty;
protected:
typedef typename PerThreadFactory::template List<T>::type Cont_ty;
typedef PerThreadWorkList<Cont_ty> Super_ty;
Heap_ty heap;
Alloc_ty alloc;
public:
PerThreadList(): Super_ty(), heap(), alloc(&heap) {
Super_ty::init(Cont_ty(alloc));
}
};
template<typename T, typename C=std::less<T> >
class PerThreadSet:
public PerThreadWorkList<typename PerThreadFactory::template Set<T, C>::type> {
public:
typedef typename PerThreadFactory::template FSBAlloc<T>::type Alloc_ty;
protected:
typedef typename PerThreadFactory::template Set<T, C>::type Cont_ty;
typedef PerThreadWorkList<Cont_ty> Super_ty;
Alloc_ty alloc;
public:
explicit PerThreadSet(const C& cmp = C()): Super_ty(), alloc() {
Super_ty::init(Cont_ty(cmp, alloc));
}
typedef typename Super_ty::global_const_iterator global_const_iterator;
typedef typename Super_ty::global_const_reverse_iterator global_const_reverse_iterator;
// hiding non-const (and const) versions in Super_ty
global_const_iterator begin_all() const { return Super_ty::cbegin_all(); }
global_const_iterator end_all() const { return Super_ty::cend_all(); }
// hiding non-const (and const) versions in Super_ty
global_const_reverse_iterator rbegin_all() const { return Super_ty::crbegin_all(); }
global_const_reverse_iterator rend_all() const { return Super_ty::crend_all(); }
};
template<typename T, typename C=std::less<T> >
class PerThreadMinHeap:
public PerThreadWorkList<typename PerThreadFactory::template PQ<T, C>::type> {
public:
typedef typename PerThreadFactory::Heap Heap_ty;
typedef typename PerThreadFactory::template Alloc<T>::type Alloc_ty;
protected:
typedef typename PerThreadFactory::template Vector<T>::type Vec_ty;
typedef typename PerThreadFactory::template PQ<T, C>::type Cont_ty;
typedef PerThreadWorkList<Cont_ty> Super_ty;
Heap_ty heap;
Alloc_ty alloc;
public:
explicit PerThreadMinHeap(const C& cmp = C()): Super_ty(), heap(), alloc(&heap) {
Super_ty::init(Cont_ty(cmp, Vec_ty(alloc)));
}
typedef typename Super_ty::global_const_iterator global_const_iterator;
typedef typename Super_ty::global_const_reverse_iterator global_const_reverse_iterator;
// hiding non-const (and const) versions in Super_ty
global_const_iterator begin_all() const { return Super_ty::cbegin_all(); }
global_const_iterator end_all() const { return Super_ty::cend_all(); }
// hiding non-const (and const) versions in Super_ty
global_const_reverse_iterator rbegin_all() const { return Super_ty::crbegin_all(); }
global_const_reverse_iterator rend_all() const { return Super_ty::crend_all(); }
};
}
} // end namespace Galois
#endif
| 0 |
rapidsai_public_repos/code-share/maxflow/galois/include/Galois
|
rapidsai_public_repos/code-share/maxflow/galois/include/Galois/Runtime/PerThreadStorage.h
|
/** Per Thread Storage -*- C++ -*-
* @file
* @section License
*
* Galois, a framework to exploit amorphous data-parallelism in irregular
* programs.
*
* Copyright (C) 2012, The University of Texas at Austin. All rights reserved.
* UNIVERSITY EXPRESSLY DISCLAIMS ANY AND ALL WARRANTIES CONCERNING THIS
* SOFTWARE AND DOCUMENTATION, INCLUDING ANY WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR ANY PARTICULAR PURPOSE, NON-INFRINGEMENT AND WARRANTIES OF
* PERFORMANCE, AND ANY WARRANTY THAT MIGHT OTHERWISE ARISE FROM COURSE OF
* DEALING OR USAGE OF TRADE. NO WARRANTY IS EITHER EXPRESS OR IMPLIED WITH
* RESPECT TO THE USE OF THE SOFTWARE OR DOCUMENTATION. Under no circumstances
* shall University be liable for incidental, special, indirect, direct or
* consequential damages or loss of profits, interruption of business, or
* related expenses which may arise from use of Software or Documentation,
* including but not limited to those resulting from defects in Software and/or
* Documentation, or loss or inaccuracy of data of any kind.
*
* @author Andrew Lenharth <[email protected]>
*/
#ifndef GALOIS_RUNTIME_PERTHREADSTORAGE_H
#define GALOIS_RUNTIME_PERTHREADSTORAGE_H
#include "Galois/config.h"
#include "Galois/Runtime/ll/TID.h"
#include "Galois/Runtime/ll/HWTopo.h"
#include "Galois/Runtime/ThreadPool.h"
#include "Galois/Runtime/ActiveThreads.h"
#include <boost/utility.hpp>
#include <cassert>
#include <vector>
#include GALOIS_CXX11_STD_HEADER(utility)
namespace Galois {
namespace Runtime {
class PerBackend {
static const unsigned MAX_SIZE = 30;
static const unsigned MIN_SIZE = 3; // 8 bytes
unsigned int nextLoc;
std::vector<char*> heads;
std::vector<std::vector<unsigned> > freeOffsets;
void initCommon();
static unsigned nextLog2(unsigned size);
public:
PerBackend(): nextLoc(0) {
freeOffsets.resize(MAX_SIZE);
}
char* initPerThread();
char* initPerPackage();
#ifdef GALOIS_USE_EXP
char* initPerThread_cilk ();
char* initPerPackage_cilk ();
#endif // GALOIS_USE_EXP
unsigned allocOffset(const unsigned size);
void deallocOffset(const unsigned offset, const unsigned size);
void* getRemote(unsigned thread, unsigned offset);
void* getLocal(unsigned offset, char* base) {
return &base[offset];
}
// faster when (1) you already know the id and (2) shared access to heads is
// not to expensive; otherwise use getLocal(unsigned,char*)
void* getLocal(unsigned offset, unsigned id) {
return &heads[id][offset];
}
};
extern __thread char* ptsBase;
PerBackend& getPTSBackend();
extern __thread char* ppsBase;
PerBackend& getPPSBackend();
void initPTS();
#ifdef GALOIS_USE_EXP
void initPTS_cilk ();
#endif // GALOIS_USE_EXP
template<typename T>
class PerThreadStorage: private boost::noncopyable {
protected:
unsigned offset;
PerBackend& b;
public:
#if defined(__INTEL_COMPILER) && __INTEL_COMPILER <= 1310
// ICC 13.1 doesn't detect the other constructor as the default constructor
PerThreadStorage(): b(getPTSBackend()) {
//in case we make one of these before initializing the thread pool
//This will call initPTS for each thread if it hasn't already
Galois::Runtime::getSystemThreadPool();
offset = b.allocOffset(sizeof(T));
for (unsigned n = 0; n < LL::getMaxThreads(); ++n)
new (b.getRemote(n, offset)) T();
}
#endif
template<typename... Args>
PerThreadStorage(Args&&... args) :b(getPTSBackend()) {
//in case we make one of these before initializing the thread pool
//This will call initPTS for each thread if it hasn't already
Galois::Runtime::getSystemThreadPool();
offset = b.allocOffset(sizeof(T));
for (unsigned n = 0; n < LL::getMaxThreads(); ++n)
new (b.getRemote(n, offset)) T(std::forward<Args>(args)...);
}
~PerThreadStorage() {
for (unsigned n = 0; n < LL::getMaxThreads(); ++n)
reinterpret_cast<T*>(b.getRemote(n, offset))->~T();
b.deallocOffset(offset, sizeof(T));
}
T* getLocal() const {
void* ditem = b.getLocal(offset, ptsBase);
return reinterpret_cast<T*>(ditem);
}
//! Like getLocal() but optimized for when you already know the thread id
T* getLocal(unsigned int thread) const {
void* ditem = b.getLocal(offset, thread);
return reinterpret_cast<T*>(ditem);
}
T* getRemote(unsigned int thread) const {
void* ditem = b.getRemote(thread, offset);
return reinterpret_cast<T*>(ditem);
}
unsigned size() const {
return LL::getMaxThreads();
}
};
template<typename T>
class PerPackageStorage: private boost::noncopyable {
protected:
unsigned offset;
PerBackend& b;
public:
#if defined(__INTEL_COMPILER) && __INTEL_COMPILER <= 1310
// ICC 13.1 doesn't detect the other constructor as the default constructor
PerPackageStorage(): b(getPPSBackend()) {
//in case we make one of these before initializing the thread pool
//This will call initPTS for each thread if it hasn't already
Galois::Runtime::getSystemThreadPool();
offset = b.allocOffset(sizeof(T));
for (unsigned n = 0; n < LL::getMaxPackages(); ++n)
new (b.getRemote(LL::getLeaderForPackage(n), offset)) T();
}
#endif
template<typename... Args>
PerPackageStorage(Args&&... args) :b(getPPSBackend()) {
//in case we make one of these before initializing the thread pool
//This will call initPTS for each thread if it hasn't already
Galois::Runtime::getSystemThreadPool();
offset = b.allocOffset(sizeof(T));
for (unsigned n = 0; n < LL::getMaxPackages(); ++n)
new (b.getRemote(LL::getLeaderForPackage(n), offset)) T(std::forward<Args>(args)...);
}
~PerPackageStorage() {
for (unsigned n = 0; n < LL::getMaxPackages(); ++n)
reinterpret_cast<T*>(b.getRemote(LL::getLeaderForPackage(n), offset))->~T();
b.deallocOffset(offset, sizeof(T));
}
T* getLocal() const {
void* ditem = b.getLocal(offset, ppsBase);
return reinterpret_cast<T*>(ditem);
}
//! Like getLocal() but optimized for when you already know the thread id
T* getLocal(unsigned int thread) const {
void* ditem = b.getLocal(offset, thread);
return reinterpret_cast<T*>(ditem);
}
T* getRemote(unsigned int thread) const {
void* ditem = b.getRemote(thread, offset);
return reinterpret_cast<T*>(ditem);
}
T* getRemoteByPkg(unsigned int pkg) const {
void* ditem = b.getRemote(LL::getLeaderForPackage(pkg), offset);
return reinterpret_cast<T*>(ditem);
}
unsigned size() const {
return LL::getMaxThreads();
}
};
}
} // end namespace Galois
#endif
| 0 |
rapidsai_public_repos/code-share/maxflow/galois/include/Galois/Runtime
|
rapidsai_public_repos/code-share/maxflow/galois/include/Galois/Runtime/ll/HWTopo.h
|
/** Hardware topology and thread binding -*- C++ -*-
* @file
* @section License
*
* Galois, a framework to exploit amorphous data-parallelism in
* irregular programs.
*
* Copyright (C) 2012, The University of Texas at Austin. All rights
* reserved. UNIVERSITY EXPRESSLY DISCLAIMS ANY AND ALL WARRANTIES
* CONCERNING THIS SOFTWARE AND DOCUMENTATION, INCLUDING ANY
* WARRANTIES OF MERCHANTABILITY, FITNESS FOR ANY PARTICULAR PURPOSE,
* NON-INFRINGEMENT AND WARRANTIES OF PERFORMANCE, AND ANY WARRANTY
* THAT MIGHT OTHERWISE ARISE FROM COURSE OF DEALING OR USAGE OF
* TRADE. NO WARRANTY IS EITHER EXPRESS OR IMPLIED WITH RESPECT TO
* THE USE OF THE SOFTWARE OR DOCUMENTATION. Under no circumstances
* shall University be liable for incidental, special, indirect,
* direct or consequential damages or loss of profits, interruption of
* business, or related expenses which may arise from use of Software
* or Documentation, including but not limited to those resulting from
* defects in Software and/or Documentation, or loss or inaccuracy of
* data of any kind.
*
* @section Description
*
* Report HW topology and allow thread binding.
*
* @author Andrew Lenharth <[email protected]>
*/
#ifndef GALOIS_RUNTIME_LL_HWTOPO_H
#define GALOIS_RUNTIME_LL_HWTOPO_H
namespace Galois {
namespace Runtime {
namespace LL {
//! Bind thread specified by id to the correct OS thread
bool bindThreadToProcessor(int galois_thread_id);
//! Get physical processor id from virtual Galois thread id
unsigned getProcessorForThread(int galois_thread_id);
//! Get number of threads supported
unsigned getMaxThreads();
//! Get number of cores supported
unsigned getMaxCores();
//! Get number of packages supported
unsigned getMaxPackages();
//! Map thread to package
unsigned getPackageForThread(int galois_thread_id);
//! Find the maximum package number for all threads up to and including id
unsigned getMaxPackageForThread(int galois_thread_id);
//! is this the first thread in a package
bool isPackageLeader(int galois_thread_id);
unsigned getLeaderForThread(int galois_thread_id);
unsigned getLeaderForPackage(int galois_pkg_id);
extern __thread unsigned PACKAGE_ID;
static inline unsigned fillPackageID(int galois_thread_id) {
unsigned x = getPackageForThread(galois_thread_id);
bool y = isPackageLeader(galois_thread_id);
x = (x << 2) | ((y ? 1 : 0) << 1) | 1;
PACKAGE_ID = x;
return x;
}
//! Optimized when galois_thread_id corresponds to the executing thread
static inline unsigned getPackageForSelf(int galois_thread_id) {
unsigned x = PACKAGE_ID;
if (x & 1)
return x >> 2;
x = fillPackageID(galois_thread_id);
return x >> 2;
}
//! Optimized when galois_thread_id corresponds to the executing thread
static inline bool isPackageLeaderForSelf(int galois_thread_id) {
unsigned x = PACKAGE_ID;
if (x & 1)
return (x >> 1) & 1;
x = fillPackageID(galois_thread_id);
return (x >> 1) & 1;
}
}
}
} // end namespace Galois
#endif //_HWTOPO_H
| 0 |
rapidsai_public_repos/code-share/maxflow/galois/include/Galois/Runtime
|
rapidsai_public_repos/code-share/maxflow/galois/include/Galois/Runtime/ll/TID.h
|
/** Thread ID -*- C++ -*-
* @file
* @section License
*
* Galois, a framework to exploit amorphous data-parallelism in
* irregular programs.
*
* Copyright (C) 2011, The University of Texas at Austin. All rights
* reserved. UNIVERSITY EXPRESSLY DISCLAIMS ANY AND ALL WARRANTIES
* CONCERNING THIS SOFTWARE AND DOCUMENTATION, INCLUDING ANY
* WARRANTIES OF MERCHANTABILITY, FITNESS FOR ANY PARTICULAR PURPOSE,
* NON-INFRINGEMENT AND WARRANTIES OF PERFORMANCE, AND ANY WARRANTY
* THAT MIGHT OTHERWISE ARISE FROM COURSE OF DEALING OR USAGE OF
* TRADE. NO WARRANTY IS EITHER EXPRESS OR IMPLIED WITH RESPECT TO
* THE USE OF THE SOFTWARE OR DOCUMENTATION. Under no circumstances
* shall University be liable for incidental, special, indirect,
* direct or consequential damages or loss of profits, interruption of
* business, or related expenses which may arise from use of Software
* or Documentation, including but not limited to those resulting from
* defects in Software and/or Documentation, or loss or inaccuracy of
* data of any kind.
*
* @section Description
*
* Manage Thread ID. ID's are sequential and dense from zero.
*
* @author Andrew Lenharth <[email protected]>
*/
#ifndef GALOIS_RUNTIME_LL_TID_H
#define GALOIS_RUNTIME_LL_TID_H
namespace Galois {
namespace Runtime {
//! Low-level, Galois-agnostic functionality.
namespace LL {
extern __thread unsigned TID;
//Get this thread's id.
static inline unsigned getTID() {
return TID;
}
//uninitialized TIDs are 0, and only thread 0 accesses TID before
//initializing it
void initTID();
#ifdef GALOIS_USE_EXP
void initTID_cilk();
#endif // GALOIS_USE_EXP
}
}
}
#endif //_TID_H
| 0 |
rapidsai_public_repos/code-share/maxflow/galois/include/Galois/Runtime
|
rapidsai_public_repos/code-share/maxflow/galois/include/Galois/Runtime/ll/CompilerSpecific.h
|
/** Galois configuration -*- C++ -*-
* @file
* @section License
*
* Galois, a framework to exploit amorphous data-parallelism in irregular
* programs.
*
* Copyright (C) 2013, The University of Texas at Austin. All rights reserved.
* UNIVERSITY EXPRESSLY DISCLAIMS ANY AND ALL WARRANTIES CONCERNING THIS
* SOFTWARE AND DOCUMENTATION, INCLUDING ANY WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR ANY PARTICULAR PURPOSE, NON-INFRINGEMENT AND WARRANTIES OF
* PERFORMANCE, AND ANY WARRANTY THAT MIGHT OTHERWISE ARISE FROM COURSE OF
* DEALING OR USAGE OF TRADE. NO WARRANTY IS EITHER EXPRESS OR IMPLIED WITH
* RESPECT TO THE USE OF THE SOFTWARE OR DOCUMENTATION. Under no circumstances
* shall University be liable for incidental, special, indirect, direct or
* consequential damages or loss of profits, interruption of business, or
* related expenses which may arise from use of Software or Documentation,
* including but not limited to those resulting from defects in Software and/or
* Documentation, or loss or inaccuracy of data of any kind.
*
* @section Description
*
* Factor out compiler-specific lowlevel stuff
*
* @author Donald Nguyen <[email protected]>
*/
#ifndef GALOIS_RUNTIME_LL_COMPILERSPECIFIC_H
#define GALOIS_RUNTIME_LL_COMPILERSPECIFIC_H
namespace Galois {
namespace Runtime {
namespace LL {
inline static void asmPause() {
#if defined(__i386__) || defined(__amd64__)
// __builtin_ia32_pause();
asm volatile ("pause");
#endif
}
inline static void compilerBarrier() {
asm volatile ("":::"memory");
}
inline static void flushInstructionPipeline() {
#if defined(__i386__) || defined(__amd64__)
asm volatile (
"xor %%eax, %%eax;"
"cpuid;"
:::"%eax", "%ebx", "%ecx", "%edx");
#endif
}
//xeons have 64 byte cache lines, but will prefetch 2 at a time
#define GALOIS_CACHE_LINE_SIZE 128
#if defined(__INTEL_COMPILER)
#define GALOIS_ATTRIBUTE_NOINLINE __attribute__ ((noinline))
#define GALOIS_ATTRIBUTE_DEPRECATED __attribute__ ((deprecated))
#define GALOIS_ATTRIBUTE_ALIGN_CACHE_LINE __attribute__((aligned(GALOIS_CACHE_LINE_SIZE)))
#elif defined( __GNUC__)
#define GALOIS_ATTRIBUTE_NOINLINE __attribute__ ((noinline))
#define GALOIS_ATTRIBUTE_DEPRECATED __attribute__ ((deprecated))
#define GALOIS_ATTRIBUTE_ALIGN_CACHE_LINE __attribute__((aligned(GALOIS_CACHE_LINE_SIZE)))
#elif defined( _MSC_VER)
#define GALOIS_ATTRIBUTE_NOINLINE __declspec(noinline)
#define GALOIS_ATTRIBUTE_DEPRECATED __declspec ((deprecated))
#define GALOIS_ATTRIBUTE_ALIGN_CACHE_LINE __declspec(align(GALOIS_CACHE_LINE_SIZE))
#else
#define GALOIS_ATTRIBUTE_NOINLINE
#define GALOIS_ATTRIBUTE_ALIGN_CACHE_LINE
#define GALOIS_ATTRIBUTE_DEPRECATED __attribute__ ((deprecated))
#endif
// used to disable inlining of functions so that they
// show up in stack samples when profiling
#ifdef GALOIS_USE_PROF
#define GALOIS_ATTRIBUTE_PROF_NOINLINE GALOIS_ATTRIBUTE_NOINLINE
#else
#define GALOIS_ATTRIBUTE_PROF_NOINLINE inline
#endif
}
}
} // end namespace Galois
#endif
| 0 |
rapidsai_public_repos/code-share/maxflow/galois/include/Galois/Runtime
|
rapidsai_public_repos/code-share/maxflow/galois/include/Galois/Runtime/ll/StaticInstance.h
|
/** Simple Safe Static Global Instance -*- C++ -*-
* @file
* @section License
*
* Galois, a framework to exploit amorphous data-parallelism in
* irregular programs.
*
* Copyright (C) 2011, The University of Texas at Austin. All rights
* reserved. UNIVERSITY EXPRESSLY DISCLAIMS ANY AND ALL WARRANTIES
* CONCERNING THIS SOFTWARE AND DOCUMENTATION, INCLUDING ANY
* WARRANTIES OF MERCHANTABILITY, FITNESS FOR ANY PARTICULAR PURPOSE,
* NON-INFRINGEMENT AND WARRANTIES OF PERFORMANCE, AND ANY WARRANTY
* THAT MIGHT OTHERWISE ARISE FROM COURSE OF DEALING OR USAGE OF
* TRADE. NO WARRANTY IS EITHER EXPRESS OR IMPLIED WITH RESPECT TO
* THE USE OF THE SOFTWARE OR DOCUMENTATION. Under no circumstances
* shall University be liable for incidental, special, indirect,
* direct or consequential damages or loss of profits, interruption of
* business, or related expenses which may arise from use of Software
* or Documentation, including but not limited to those resulting from
* defects in Software and/or Documentation, or loss or inaccuracy of
* data of any kind.
*
* @section Description
*
* This contains a wrapper to declare non-pod globals in a safe way.
*
* @author Andrew Lenharth <[email protected]>
*/
#ifndef GALOIS_RUNTIME_LL_STATICINSTANCE_H
#define GALOIS_RUNTIME_LL_STATICINSTANCE_H
#include "CompilerSpecific.h"
namespace Galois {
namespace Runtime {
namespace LL {
//This should be much simpler in c++03 mode, but be general for now
//This exists because ptrlock is not a pod, but this is.
template<typename T>
struct StaticInstance {
volatile T* V;
volatile int _lock;
inline void lock() {
int oldval;
do {
while (_lock != 0) {
asmPause();
}
oldval = __sync_fetch_and_or(&_lock, 1);
} while (oldval & 1);
}
inline void unlock() {
compilerBarrier();
_lock = 0;
}
T* get() {
volatile T* val = V;
if (val)
return (T*)val;
lock();
val = V;
if (!val)
V = val = new T();
unlock();
return (T*)val;
}
};
}
}
} // end namespace Galois
#endif
| 0 |
rapidsai_public_repos/code-share/maxflow/galois/include/Galois/Runtime
|
rapidsai_public_repos/code-share/maxflow/galois/include/Galois/Runtime/ll/ThreadRWlock.h
|
/** TODO -*- C++ -*-
* @file
* @section License
*
* Galois, a framework to exploit amorphous data-parallelism in irregular
* programs.
*
* Copyright (C) 2011, The University of Texas at Austin. All rights reserved.
* UNIVERSITY EXPRESSLY DISCLAIMS ANY AND ALL WARRANTIES CONCERNING THIS
* SOFTWARE AND DOCUMENTATION, INCLUDING ANY WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR ANY PARTICULAR PURPOSE, NON-INFRINGEMENT AND WARRANTIES OF
* PERFORMANCE, AND ANY WARRANTY THAT MIGHT OTHERWISE ARISE FROM COURSE OF
* DEALING OR USAGE OF TRADE. NO WARRANTY IS EITHER EXPRESS OR IMPLIED WITH
* RESPECT TO THE USE OF THE SOFTWARE OR DOCUMENTATION. Under no circumstances
* shall University be liable for incidental, special, indirect, direct or
* consequential damages or loss of profits, interruption of business, or
* related expenses which may arise from use of Software or Documentation,
* including but not limited to those resulting from defects in Software and/or
* Documentation, or loss or inaccuracy of data of any kind.
*
* @section Description
*
* TODO
*
* @author <[email protected]>
*/
#ifndef GALOIS_THREAD_RW_LOCK_H
#define GALOIS_THREAD_RW_LOCK_H
#include "Galois/Runtime/ll/PaddedLock.h"
#include "Galois/Runtime/PerThreadStorage.h"
namespace Galois {
namespace Runtime {
namespace LL {
//FIXME: nothing in LL should depend on Runtime
class ThreadRWlock {
typedef PaddedLock<true> Lock_ty;
// typedef Galois::Runtime::LL::SimpleLock<true> Lock_ty;
typedef PerThreadStorage<Lock_ty> PerThreadLock;
PerThreadLock locks;
public:
void readLock () {
locks.getLocal ()->lock ();
}
void readUnlock () {
locks.getLocal ()->unlock ();
}
void writeLock () {
for (unsigned i = 0; i < locks.size (); ++i) {
locks.getRemote (i)->lock ();
}
}
void writeUnlock () {
for (unsigned i = 0; i < locks.size (); ++i) {
locks.getRemote (i)->unlock ();
}
}
};
} // end namespace LL
} // end namespace Runtime
} // end namespace Galois
#endif // GALOIS_THREAD_RW_LOCK_H
| 0 |
rapidsai_public_repos/code-share/maxflow/galois/include/Galois/Runtime
|
rapidsai_public_repos/code-share/maxflow/galois/include/Galois/Runtime/ll/SimpleLock.h
|
/** Simple Spin Lock -*- C++ -*-
* @file
* @section License
*
* Galois, a framework to exploit amorphous data-parallelism in
* irregular programs.
*
* Copyright (C) 2013, The University of Texas at Austin. All rights
* reserved. UNIVERSITY EXPRESSLY DISCLAIMS ANY AND ALL WARRANTIES
* CONCERNING THIS SOFTWARE AND DOCUMENTATION, INCLUDING ANY
* WARRANTIES OF MERCHANTABILITY, FITNESS FOR ANY PARTICULAR PURPOSE,
* NON-INFRINGEMENT AND WARRANTIES OF PERFORMANCE, AND ANY WARRANTY
* THAT MIGHT OTHERWISE ARISE FROM COURSE OF DEALING OR USAGE OF
* TRADE. NO WARRANTY IS EITHER EXPRESS OR IMPLIED WITH RESPECT TO
* THE USE OF THE SOFTWARE OR DOCUMENTATION. Under no circumstances
* shall University be liable for incidental, special, indirect,
* direct or consequential damages or loss of profits, interruption of
* business, or related expenses which may arise from use of Software
* or Documentation, including but not limited to those resulting from
* defects in Software and/or Documentation, or loss or inaccuracy of
* data of any kind.
*
* @section Description
*
* This contains the basic spinlock used in Galois. We use a
* test-and-test-and-set approach, with pause instructions on x86 and
* compiler barriers on unlock.
*
* @author Andrew Lenharth <[email protected]>
*/
#ifndef GALOIS_RUNTIME_LL_SIMPLE_LOCK_H
#define GALOIS_RUNTIME_LL_SIMPLE_LOCK_H
#include "Galois/config.h"
#include "Galois/Runtime/ll/CompilerSpecific.h"
#include <cassert>
#include GALOIS_CXX11_STD_HEADER(atomic)
namespace Galois {
namespace Runtime {
namespace LL {
/// SimpleLock is a spinlock. If the template parameter is
/// false, the lock is a noop.
/// Copying a lock is unsynchronized (relaxed ordering)
template<bool isALock>
class SimpleLock;
template<>
class SimpleLock<true> {
mutable std::atomic<int> _lock;
GALOIS_ATTRIBUTE_NOINLINE
void slow_lock() const {
int oldval = 0;
do {
while (_lock.load(std::memory_order_acquire) != 0) {
asmPause();
}
oldval = 0;
} while (!_lock.compare_exchange_weak(oldval, 1, std::memory_order_acq_rel, std::memory_order_relaxed));
assert(is_locked());
}
public:
SimpleLock() : _lock(0) { }
//relaxed order for copy
SimpleLock(const SimpleLock& p) :_lock(p._lock.load(std::memory_order_relaxed)) {}
SimpleLock& operator= (const SimpleLock& p) {
if (&p == this) return *this;
//relaxed order for initialization
_lock.store(p._lock.load(std::memory_order_relaxed), std::memory_order_relaxed);
return *this;
}
inline void lock() const {
int oldval = 0;
if (_lock.load(std::memory_order_relaxed))
goto slow_path;
if (!_lock.compare_exchange_weak(oldval, 1, std::memory_order_acq_rel, std::memory_order_relaxed))
goto slow_path;
assert(is_locked());
return;
slow_path:
slow_lock();
}
inline void unlock() const {
assert(is_locked());
//HMMMM
_lock.store(0, std::memory_order_release);
//_lock = 0;
}
inline bool try_lock() const {
int oldval = 0;
if (_lock.load(std::memory_order_acquire))
return false;
if (!_lock.compare_exchange_weak(oldval, 1, std::memory_order_acq_rel))
return false;
assert(is_locked());
return true;
}
inline bool is_locked() const {
return _lock.load(std::memory_order_acquire) & 1;
}
};
template<>
class SimpleLock<false> {
public:
inline void lock() const {}
inline void unlock() const {}
inline bool try_lock() const { return true; }
inline bool is_locked() const { return false; }
};
void LockPairOrdered(SimpleLock<true>& L1, SimpleLock<true>& L2);
bool TryLockPairOrdered(SimpleLock<true>& L1, SimpleLock<true>& L2);
void UnLockPairOrdered(SimpleLock<true>& L1, SimpleLock<true>& L2);
void LockPairOrdered(SimpleLock<false>& L1, SimpleLock<false>& L2);
bool TryLockPairOrdered(SimpleLock<false>& L1, SimpleLock<false>& L2);
void UnLockPairOrdered(SimpleLock<false>& L1, SimpleLock<false>& L2);
}
}
} // end namespace Galois
#endif
| 0 |
rapidsai_public_repos/code-share/maxflow/galois/include/Galois/Runtime
|
rapidsai_public_repos/code-share/maxflow/galois/include/Galois/Runtime/ll/gio.h
|
/** Galois IO routines -*- C++ -*-
* @file
* @section License
*
* Galois, a framework to exploit amorphous data-parallelism in irregular
* programs.
*
* Copyright (C) 2012, The University of Texas at Austin. All rights reserved.
* UNIVERSITY EXPRESSLY DISCLAIMS ANY AND ALL WARRANTIES CONCERNING THIS
* SOFTWARE AND DOCUMENTATION, INCLUDING ANY WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR ANY PARTICULAR PURPOSE, NON-INFRINGEMENT AND WARRANTIES OF
* PERFORMANCE, AND ANY WARRANTY THAT MIGHT OTHERWISE ARISE FROM COURSE OF
* DEALING OR USAGE OF TRADE. NO WARRANTY IS EITHER EXPRESS OR IMPLIED WITH
* RESPECT TO THE USE OF THE SOFTWARE OR DOCUMENTATION. Under no circumstances
* shall University be liable for incidental, special, indirect, direct or
* consequential damages or loss of profits, interruption of business, or
* related expenses which may arise from use of Software or Documentation,
* including but not limited to those resulting from defects in Software and/or
* Documentation, or loss or inaccuracy of data of any kind.
*
* @section Description
*
* IO support for galois. We use this to handle output redirection,
* and common formating issues.
*
* @author Andrew Lenharth <[email protected]>
*/
#ifndef GALOIS_RUNTIME_LL_GIO_H
#define GALOIS_RUNTIME_LL_GIO_H
#include <sstream>
#include <cerrno>
//FIXME: move to Runtime
namespace Galois {
namespace Runtime {
namespace LL {
//! Prints a string
void gPrintStr(const std::string&);
//! Prints an info string (for easy parsing)
void gInfoStr(const std::string&);
//! Prints a warning string (for easy parsing)
void gWarnStr(const std::string&);
//! Prints a debug string (for easy parsing)
void gDebugStr(const std::string&);
//! Prints an error string (for easy parsing)
void gErrorStr(const std::string&);
//! Converts a sequence of things to a string
template<typename T>
bool toString(std::ostringstream& os, const T& val) { os << val; return true; }
//! Prints a sequence of things
template<typename... Args>
void gPrint(Args... args) {
std::ostringstream os;
__attribute__((unused)) bool tmp[] = {toString(os, args)...};
gPrintStr(os.str());
}
//! Prints an info string from a sequence of things
template<typename... Args>
void gInfo(Args... args) {
std::ostringstream os;
__attribute__((unused)) bool tmp[] = {toString(os, args)...};
gInfoStr(os.str());
}
//! Prints a warning string from a sequence of things
template<typename... Args>
void gWarn(Args... args) {
std::ostringstream os;
__attribute__((unused)) bool tmp[] = {toString(os, args)...};
gWarnStr(os.str());
}
//! Prints a debug string from a sequence of things; prints nothing if NDEBUG
//! is defined.
template<typename... Args>
void gDebug(Args... args) {
#ifndef NDEBUG
std::ostringstream os;
__attribute__((unused)) bool tmp[] = {toString(os, args)...};
gDebugStr(os.str());
#endif
}
//! Prints error message
template<typename... Args>
void gError(Args... args) {
std::ostringstream os;
__attribute__((unused)) bool tmp[] = {toString(os, args)...};
gErrorStr(os.str());
}
void gFlush();
#define GALOIS_SYS_ERROR(...) do { Galois::Runtime::LL::gError(__FILE__, ":", __LINE__, ": ", strerror(errno), ": ", ##__VA_ARGS__); } while (0)
#define GALOIS_ERROR(...) do { Galois::Runtime::LL::gError(__FILE__, ":", __LINE__, ": ", ##__VA_ARGS__); } while (0)
#define GALOIS_SYS_DIE(...) do { Galois::Runtime::LL::gError(__FILE__, ":", __LINE__, ": ", strerror(errno), ": ", ##__VA_ARGS__); abort(); } while (0)
#define GALOIS_DIE(...) do { Galois::Runtime::LL::gError(__FILE__, ":", __LINE__, ": ", ##__VA_ARGS__); abort(); } while (0)
}
}
} // end namespace Galois
#endif //_GIO_H
| 0 |
rapidsai_public_repos/code-share/maxflow/galois/include/Galois/Runtime
|
rapidsai_public_repos/code-share/maxflow/galois/include/Galois/Runtime/ll/CacheLineStorage.h
|
/** One element per cache line -*- C++ -*-
* @file
* @section License
*
* Galois, a framework to exploit amorphous data-parallelism in
* irregular programs.
*
* Copyright (C) 2013, The University of Texas at Austin. All rights
* reserved. UNIVERSITY EXPRESSLY DISCLAIMS ANY AND ALL WARRANTIES
* CONCERNING THIS SOFTWARE AND DOCUMENTATION, INCLUDING ANY
* WARRANTIES OF MERCHANTABILITY, FITNESS FOR ANY PARTICULAR PURPOSE,
* NON-INFRINGEMENT AND WARRANTIES OF PERFORMANCE, AND ANY WARRANTY
* THAT MIGHT OTHERWISE ARISE FROM COURSE OF DEALING OR USAGE OF
* TRADE. NO WARRANTY IS EITHER EXPRESS OR IMPLIED WITH RESPECT TO
* THE USE OF THE SOFTWARE OR DOCUMENTATION. Under no circumstances
* shall University be liable for incidental, special, indirect,
* direct or consequential damages or loss of profits, interruption of
* business, or related expenses which may arise from use of Software
* or Documentation, including but not limited to those resulting from
* defects in Software and/or Documentation, or loss or inaccuracy of
* data of any kind.
*
* @section Description
*
* This wrapper ensures the contents occupy its own cache line(s).
*
* @author Andrew Lenharth <[email protected]>
*/
#ifndef GALOIS_RUNTIME_CACHELINESTORAGE_H
#define GALOIS_RUNTIME_CACHELINESTORAGE_H
#include "Galois/config.h"
#include "Galois/Runtime/ll/CompilerSpecific.h"
#include GALOIS_CXX11_STD_HEADER(utility)
namespace Galois {
namespace Runtime {
namespace LL {
template<typename T, int REM>
struct CacheLineImpl {
GALOIS_ATTRIBUTE_ALIGN_CACHE_LINE T data;
char pad[REM];
CacheLineImpl() :data() {}
CacheLineImpl(const T& v) :data(v) {}
template<typename A>
explicit CacheLineImpl(A&& v) :data(std::forward<A>(v)) {}
explicit operator T() { return data; }
};
template<typename T>
struct CacheLineImpl<T, 0> {
GALOIS_ATTRIBUTE_ALIGN_CACHE_LINE T data;
CacheLineImpl() :data() {}
CacheLineImpl(const T& v) :data(v) {}
template<typename A>
explicit CacheLineImpl(A&& v) :data(std::forward<A>(v)) {}
explicit operator T() { return data; }
};
// Store an item with padding
template<typename T>
struct CacheLineStorage : public CacheLineImpl<T, GALOIS_CACHE_LINE_SIZE % sizeof(T)> {
typedef CacheLineImpl<T, GALOIS_CACHE_LINE_SIZE % sizeof(T)> PTy;
CacheLineStorage() :PTy() {}
CacheLineStorage(const T& v) :PTy(v) {}
// XXX(ddn): Forwarding is still wonky in XLC
#if !defined(__IBMCPP__) || __IBMCPP__ > 1210
template<typename A>
explicit CacheLineStorage(A&& v) :PTy(std::forward<A>(v)) {}
#endif
explicit operator T() { return this->data; }
CacheLineStorage& operator=(const T& v) { this->data = v; return *this; }
};
}
}
} // end namespace Galois
#endif
| 0 |
rapidsai_public_repos/code-share/maxflow/galois/include/Galois/Runtime
|
rapidsai_public_repos/code-share/maxflow/galois/include/Galois/Runtime/ll/PtrLock.h
|
/** Pointer Spin Lock -*- C++ -*-
* @file
* @section License
*
* Galois, a framework to exploit amorphous data-parallelism in
* irregular programs.
*
* Copyright (C) 2013, The University of Texas at Austin. All rights
* reserved. UNIVERSITY EXPRESSLY DISCLAIMS ANY AND ALL WARRANTIES
* CONCERNING THIS SOFTWARE AND DOCUMENTATION, INCLUDING ANY
* WARRANTIES OF MERCHANTABILITY, FITNESS FOR ANY PARTICULAR PURPOSE,
* NON-INFRINGEMENT AND WARRANTIES OF PERFORMANCE, AND ANY WARRANTY
* THAT MIGHT OTHERWISE ARISE FROM COURSE OF DEALING OR USAGE OF
* TRADE. NO WARRANTY IS EITHER EXPRESS OR IMPLIED WITH RESPECT TO
* THE USE OF THE SOFTWARE OR DOCUMENTATION. Under no circumstances
* shall University be liable for incidental, special, indirect,
* direct or consequential damages or loss of profits, interruption of
* business, or related expenses which may arise from use of Software
* or Documentation, including but not limited to those resulting from
* defects in Software and/or Documentation, or loss or inaccuracy of
* data of any kind.
*
* @section Description
*
* This contains the pointer-based spinlock used in Galois. We use a
* test-and-test-and-set approach, with pause instructions on x86 and
* compiler barriers on unlock. A pointer-lock uses the low-order bit
* in a pointer to store the lock, thus assumes a non-one-byte
* alignment.
*
* @author Andrew Lenharth <[email protected]>
*/
#ifndef GALOIS_RUNTIME_LL_PTRLOCK_H
#define GALOIS_RUNTIME_LL_PTRLOCK_H
#include "Galois/config.h"
#include "Galois/Runtime/ll/CompilerSpecific.h"
#include <stdint.h>
#include <cassert>
#include GALOIS_CXX11_STD_HEADER(atomic)
namespace Galois {
namespace Runtime {
namespace LL {
/// PtrLock is a spinlock and a pointer. If the second template
/// parameter is false, the lock is a noop. This wraps a pointer and
/// uses the low order bit for the lock flag
/// Copying a lock is unsynchronized (relaxed ordering)
template<typename T, bool isALock>
class PtrLock;
template<typename T>
class PtrLock<T, true> {
std::atomic<uintptr_t> _lock;
GALOIS_ATTRIBUTE_NOINLINE
void slow_lock() {
uintptr_t oldval;
do {
while ((_lock.load(std::memory_order_acquire) & 1) != 0) {
asmPause();
}
oldval = _lock.fetch_or(1, std::memory_order_acq_rel);
} while (oldval & 1);
assert(_lock);
}
public:
PtrLock() : _lock(0) {}
//relaxed order for copy
PtrLock(const PtrLock& p) : _lock(p._lock.load(std::memory_order_relaxed)) {}
PtrLock& operator=(const PtrLock& p) {
if (&p == this) return *this;
//relaxed order for initialization
_lock.store(p._lock.load(std::memory_order_relaxed), std::memory_order_relaxed);
return *this;
}
inline void lock() {
uintptr_t oldval = _lock.load(std::memory_order_relaxed);
if (oldval & 1)
goto slow_path;
if (!_lock.compare_exchange_weak(oldval, oldval | 1, std::memory_order_acq_rel, std::memory_order_relaxed))
goto slow_path;
assert(is_locked());
return;
slow_path:
slow_lock();
}
inline void unlock() {
assert(is_locked());
_lock.store(_lock.load(std::memory_order_relaxed) & ~(uintptr_t)1, std::memory_order_release);
}
inline void unlock_and_clear() {
assert(is_locked());
_lock.store(0, std::memory_order_release);
}
inline void unlock_and_set(T* val) {
assert(is_locked());
assert(!((uintptr_t)val & 1));
_lock.store((uintptr_t) val, std::memory_order_release);
}
inline T* getValue() const {
return (T*)(_lock.load(std::memory_order_relaxed) & ~(uintptr_t)1);
}
inline void setValue(T* val) {
uintptr_t nval = (uintptr_t)val;
nval |= (_lock & 1);
//relaxed OK since this doesn't clear lock
_lock.store(nval, std::memory_order_relaxed);
}
inline bool try_lock() {
uintptr_t oldval = _lock.load(std::memory_order_relaxed);
if ((oldval & 1) != 0)
return false;
oldval = _lock.fetch_or(1, std::memory_order_acq_rel);
return !(oldval & 1);
}
inline bool is_locked() const {
return _lock.load(std::memory_order_acquire) & 1;
}
//! CAS only works on unlocked values
//! the lock bit will prevent a successful cas
inline bool CAS(T* oldval, T* newval) {
assert(!((uintptr_t)oldval & 1) && !((uintptr_t)newval & 1));
uintptr_t old = (uintptr_t)oldval;
return _lock.compare_exchange_strong(old, (uintptr_t)newval);
}
//! CAS that works on locked values; this can be very dangerous
//! when used incorrectly
inline bool stealing_CAS(T* oldval, T* newval) {
uintptr_t old = 1 | (uintptr_t)oldval;
return _lock.compare_exchange_strong(old, 1 | (uintptr_t)newval);
}
};
template<typename T>
class PtrLock<T, false> {
T* _lock;
public:
PtrLock() : _lock() {}
inline void lock() {}
inline void unlock() {}
inline void unlock_and_clear() { _lock = 0; }
inline void unlock_and_set(T* val) { _lock = val; }
inline T* getValue() const { return _lock; }
inline void setValue(T* val) { _lock = val; }
inline bool try_lock() const { return true; }
inline bool is_locked() const { return false; }
inline bool CAS(T* oldval, T* newval) {
if (_lock == oldval) {
_lock = newval;
return true;
}
return false;
}
inline bool stealing_CAS(T* oldval, T* newval) {
return CAS(oldval, newval);
}
};
}
}
} // end namespace Galois
#endif
| 0 |
rapidsai_public_repos/code-share/maxflow/galois/include/Galois/Runtime
|
rapidsai_public_repos/code-share/maxflow/galois/include/Galois/Runtime/ll/PaddedLock.h
|
/** Cache-line padded Simple Spin Lock -*- C++ -*-
* @file
* @section License
*
* Galois, a framework to exploit amorphous data-parallelism in
* irregular programs.
*
* Copyright (C) 2011, The University of Texas at Austin. All rights
* reserved. UNIVERSITY EXPRESSLY DISCLAIMS ANY AND ALL WARRANTIES
* CONCERNING THIS SOFTWARE AND DOCUMENTATION, INCLUDING ANY
* WARRANTIES OF MERCHANTABILITY, FITNESS FOR ANY PARTICULAR PURPOSE,
* NON-INFRINGEMENT AND WARRANTIES OF PERFORMANCE, AND ANY WARRANTY
* THAT MIGHT OTHERWISE ARISE FROM COURSE OF DEALING OR USAGE OF
* TRADE. NO WARRANTY IS EITHER EXPRESS OR IMPLIED WITH RESPECT TO
* THE USE OF THE SOFTWARE OR DOCUMENTATION. Under no circumstances
* shall University be liable for incidental, special, indirect,
* direct or consequential damages or loss of profits, interruption of
* business, or related expenses which may arise from use of Software
* or Documentation, including but not limited to those resulting from
* defects in Software and/or Documentation, or loss or inaccuracy of
* data of any kind.
*
* @section Description
*
* This contains the basic spinlock padded and aligned to use a cache
* line.
*
* @author Andrew Lenharth <[email protected]>
*/
#ifndef GALOIS_RUNTIME_LL_PADDED_LOCK_H
#define GALOIS_RUNTIME_LL_PADDED_LOCK_H
#include "SimpleLock.h"
#include "CacheLineStorage.h"
namespace Galois {
namespace Runtime {
namespace LL {
/// PaddedLock is a spinlock. If the second template parameter is
/// false, the lock is a noop.
template<bool concurrent>
class PaddedLock;
void LockPairOrdered(PaddedLock<true>& L1, PaddedLock<true>& L2);
bool TryLockPairOrdered(PaddedLock<true>& L1, PaddedLock<true>& L2);
void UnLockPairOrdered(PaddedLock<true>& L1, PaddedLock<true>& L2);
void LockPairOrdered(PaddedLock<false>& L1, PaddedLock<false>& L2);
bool TryLockPairOrdered(PaddedLock<false>& L1, PaddedLock<false>& L2);
void UnLockPairOrdered(PaddedLock<false>& L1, PaddedLock<false>& L2);
template<>
class PaddedLock<true> {
mutable CacheLineStorage<SimpleLock<true> > Lock;
public:
void lock() const { Lock.data.lock(); }
bool try_lock() const { return Lock.data.try_lock(); }
void unlock() const { Lock.data.unlock(); }
friend void LockPairOrdered(PaddedLock<true>& L1, PaddedLock<true>& L2);
friend bool TryLockPairOrdered(PaddedLock<true>& L1, PaddedLock<true>& L2);
friend void UnLockPairOrdered(PaddedLock<true>& L1, PaddedLock<true>& L2);
};
template<>
class PaddedLock<false> {
public:
void lock() const {}
bool try_lock() const { return true; }
void unlock() const {}
};
}
}
} // end namespace Galois
#endif
| 0 |
rapidsai_public_repos/code-share/maxflow/galois/include/Galois/Runtime
|
rapidsai_public_repos/code-share/maxflow/galois/include/Galois/Runtime/ll/EnvCheck.h
|
/** Enviroment Checking Code -*- C++ -*-
* @file
* @section License
*
* Galois, a framework to exploit amorphous data-parallelism in irregular
* programs.
*
* Copyright (C) 2011, The University of Texas at Austin. All rights reserved.
* UNIVERSITY EXPRESSLY DISCLAIMS ANY AND ALL WARRANTIES CONCERNING THIS
* SOFTWARE AND DOCUMENTATION, INCLUDING ANY WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR ANY PARTICULAR PURPOSE, NON-INFRINGEMENT AND WARRANTIES OF
* PERFORMANCE, AND ANY WARRANTY THAT MIGHT OTHERWISE ARISE FROM COURSE OF
* DEALING OR USAGE OF TRADE. NO WARRANTY IS EITHER EXPRESS OR IMPLIED WITH
* RESPECT TO THE USE OF THE SOFTWARE OR DOCUMENTATION. Under no circumstances
* shall University be liable for incidental, special, indirect, direct or
* consequential damages or loss of profits, interruption of business, or
* related expenses which may arise from use of Software or Documentation,
* including but not limited to those resulting from defects in Software and/or
* Documentation, or loss or inaccuracy of data of any kind.
*
* @author Andrew Lenharth <[email protected]>
*/
#ifndef GALOIS_RUNTIME_LL_ENVCHECK_H
#define GALOIS_RUNTIME_LL_ENVCHECK_H
namespace Galois {
namespace Runtime {
namespace LL {
//PLEASE document all enviroment variables here;
//ThreadPool_pthread.cpp: "GALOIS_DO_NOT_BIND_MAIN_THREAD"
//ThreadPool_pthread.cpp: "GALOIS_DO_NOT_BIND_THREADS"
//HWTopoLinux.cpp: "GALOIS_DEBUG_TOPO"
//Sampling.cpp: "GALOIS_EXIT_BEFORE_SAMPLING"
//Sampling.cpp: "GALOIS_EXIT_AFTER_SAMPLING"
//gIO.cpp: "GALOIS_DEBUG_TO_FILE"
//gIO.cpp: "GALOIS_DEBUG_SKIP"
//DeterministicWork.h: "GALOIS_FIXED_DET_WINDOW_SIZE"
//! Return true if the Enviroment variable is set
bool EnvCheck(const char* parm);
bool EnvCheck(const char* parm, int& val);
}
}
} // end namespace Galois
#endif
| 0 |
rapidsai_public_repos/code-share/maxflow/galois/include/Galois/Runtime
|
rapidsai_public_repos/code-share/maxflow/galois/include/Galois/Runtime/mm/Mem.h
|
/** heap building blocks -*- C++ -*-
* @file
* @section License
*
* Galois, a framework to exploit amorphous data-parallelism in irregular
* programs.
*
* Copyright (C) 2013, The University of Texas at Austin. All rights reserved.
* UNIVERSITY EXPRESSLY DISCLAIMS ANY AND ALL WARRANTIES CONCERNING THIS
* SOFTWARE AND DOCUMENTATION, INCLUDING ANY WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR ANY PARTICULAR PURPOSE, NON-INFRINGEMENT AND WARRANTIES OF
* PERFORMANCE, AND ANY WARRANTY THAT MIGHT OTHERWISE ARISE FROM COURSE OF
* DEALING OR USAGE OF TRADE. NO WARRANTY IS EITHER EXPRESS OR IMPLIED WITH
* RESPECT TO THE USE OF THE SOFTWARE OR DOCUMENTATION. Under no circumstances
* shall University be liable for incidental, special, indirect, direct or
* consequential damages or loss of profits, interruption of business, or
* related expenses which may arise from use of Software or Documentation,
* including but not limited to those resulting from defects in Software and/or
* Documentation, or loss or inaccuracy of data of any kind.
*
* @section Description
*
* Strongly inspired by heap layers:
* http://www.heaplayers.org/
* FSB is modified from:
* http://warp.povusers.org/FSBAllocator/
*
* @author Andrew Lenharth <[email protected]>
*/
#ifndef GALOIS_RUNTIME_MEM_H
#define GALOIS_RUNTIME_MEM_H
#include "Galois/Runtime/PerThreadStorage.h"
#include "Galois/Runtime/ll/SimpleLock.h"
#include "Galois/Runtime/ll/PtrLock.h"
#include "Galois/Runtime/ll/CacheLineStorage.h"
//#include "Galois/Runtime/ll/ThreadRWlock.h"
#include <boost/utility.hpp>
#include <cstdlib>
#include <cstring>
#include <map>
#include <cstddef>
#include <memory.h>
namespace Galois {
namespace Runtime {
//! Memory management functionality.
namespace MM {
const size_t smallPageSize = 4*1024;
const size_t pageSize = 2*1024*1024;
void* pageAlloc();
void pageFree(void*);
//! Preallocate numpages large pages for each thread
void pagePreAlloc(int numpages);
//! Forces the given block to be paged into physical memory
void pageIn(void *buf, size_t len);
//! Returns total large pages allocated by Galois memory management subsystem
int numPageAllocTotal();
//! Returns total large pages allocated for thread by Galois memory management subsystem
int numPageAllocForThread(unsigned tid);
//! Returns total small pages allocated by OS on a NUMA node
int numNumaAllocForNode(unsigned nodeid);
//! Returns number of NUMA nodes on machine
int numNumaNodes();
/**
* Allocates memory interleaved across NUMA nodes.
*
* If full, allocate across all NUMA nodes; otherwise,
* allocate across NUMA nodes corresponding to active
* threads.
*/
void* largeInterleavedAlloc(size_t bytes, bool full = true);
//! Frees memory allocated by {@link largeInterleavedAlloc()}
void largeInterleavedFree(void* mem, size_t bytes);
//! Allocates a large block of memory
void* largeAlloc(size_t bytes, bool preFault = true);
//! Frees memory allocated by {@link largeAlloc()}
void largeFree(void* mem, size_t bytes);
//! Print lines from /proc/pid/numa_maps that contain at least n small pages
void printInterleavedStats(int minPages = 16*1024);
//! Per-thread heaps using Galois thread aware construct
template<class LocalHeap>
class ThreadAwarePrivateHeap {
PerThreadStorage<LocalHeap> heaps;
public:
enum { AllocSize = LocalHeap::AllocSize };
ThreadAwarePrivateHeap() {}
~ThreadAwarePrivateHeap() {
clear();
}
inline void* allocate(size_t size) {
return heaps.getLocal()->allocate(size);
}
inline void deallocate(void* ptr) {
heaps.getLocal()->deallocate(ptr);
}
void clear() {
for (unsigned int i = 0; i < heaps.size(); i++)
heaps.getRemote(i)->clear();
}
};
//! Apply a lock to a heap
template<class RealHeap>
class LockedHeap : public RealHeap {
LL::SimpleLock<true> lock;
public :
enum { AllocSize = RealHeap::AllocSize };
inline void* allocate(size_t size) {
lock.lock();
void* retval = RealHeap::allocate(size);
lock.unlock();
return retval;
}
inline void deallocate(void* ptr) {
lock.lock();
RealHeap::deallocate(ptr);
lock.unlock();
}
};
template<typename SourceHeap>
class ZeroOut : public SourceHeap {
public:
enum { AllocSize = SourceHeap::AllocSize } ;
inline void* allocate(size_t size) {
void* retval = SourceHeap::allocate(size);
memset(retval, 0, size);
return retval;
}
inline void deallocate(void* ptr) {
SourceHeap::deallocate(ptr);
}
};
//! Add a header to objects
template<typename Header, typename SourceHeap>
class AddHeader : public SourceHeap {
enum { offset = (sizeof(Header) + (sizeof(double) - 1)) & ~(sizeof(double) - 1) };
public:
inline void* allocate(size_t size) {
//First increase the size of the header to be aligned to a double
void* ptr = SourceHeap::allocate(size + offset);
//Now return the offseted pointer
return (char*)ptr + offset;
}
inline void deallocate(void* ptr) {
SourceHeap::deallocate(getHeader(ptr));
}
inline static Header* getHeader(void* ptr) {
return (Header*)((char*)ptr - offset);
}
};
//! Allow looking up parent heap pointers
template<class SourceHeap>
class OwnerTaggedHeap : public AddHeader<void*, SourceHeap> {
typedef AddHeader<OwnerTaggedHeap*, SourceHeap> Src;
public:
inline void* allocate(size_t size) {
void* retval = Src::allocate(size);
*(Src::getHeader(retval)) = this;
return retval;
}
inline void deallocate(void* ptr) {
assert(*(Src::getHeader(ptr)) == this);
Src::deallocate(ptr);
}
inline static OwnerTaggedHeap* owner(void* ptr) {
return *(OwnerTaggedHeap**)Src::getHeader(ptr);
}
};
//! Maintain a freelist
template<class SourceHeap>
class FreeListHeap : public SourceHeap {
struct FreeNode {
FreeNode* next;
};
FreeNode* head;
public:
enum { AllocSize = SourceHeap::AllocSize };
void clear() {
while (head) {
FreeNode* N = head;
head = N->next;
SourceHeap::deallocate(N);
}
}
FreeListHeap() : head(0) {}
~FreeListHeap() {
clear();
}
inline void* allocate(size_t size) {
if (head) {
void* ptr = head;
head = head->next;
return ptr;
}
return SourceHeap::allocate(size);
}
inline void deallocate(void* ptr) {
if (!ptr) return;
assert((uintptr_t)ptr > 0x100);
FreeNode* NH = (FreeNode*)ptr;
NH->next = head;
head = NH;
}
};
//! Maintain a freelist using a lock which doesn't cover SourceHeap
template<class SourceHeap>
class SelfLockFreeListHeap : public SourceHeap {
struct FreeNode {
FreeNode* next;
};
FreeNode* head;
public:
enum { AllocSize = SourceHeap::AllocSize };
void clear() {
FreeNode* h = 0;
do {
h = head;
} while (!__sync_bool_compare_and_swap(&head, h, 0));
while (h) {
FreeNode* N = h;
h = N->next;
SourceHeap::deallocate(N);
}
}
SelfLockFreeListHeap() : head(0) {}
~SelfLockFreeListHeap() {
clear();
}
inline void* allocate(size_t size) {
static LL::SimpleLock<true> lock;
lock.lock();
FreeNode* OH = 0;
FreeNode* NH = 0;
do {
OH = head;
if (!OH) {
lock.unlock();
return SourceHeap::allocate(size);
}
NH = OH->next; //The lock protects this line
} while (!__sync_bool_compare_and_swap(&head, OH, NH));
lock.unlock();
assert(OH);
return (void*)OH;
}
inline void deallocate(void* ptr) {
if (!ptr) return;
FreeNode* OH;
FreeNode* NH;
do {
OH = head;
NH = (FreeNode*)ptr;
NH->next = OH;
} while (!__sync_bool_compare_and_swap(&head, OH, NH));
}
};
template<unsigned ElemSize, typename SourceHeap>
class BlockAlloc : public SourceHeap {
struct TyEq {
double data[((ElemSize + sizeof(double) - 1) & ~(sizeof(double) - 1))/sizeof(double)];
};
struct Block_basic {
union {
Block_basic* next;
double dummy;
};
TyEq data[1];
};
enum {BytesLeft = (SourceHeap::AllocSize - sizeof(Block_basic)),
BytesLeftR = BytesLeft & ~(sizeof(double) - 1),
FitLeft = BytesLeftR / sizeof(TyEq[1]),
TotalFit = FitLeft + 1
};
struct Block {
union {
Block* next;
double dummy;
};
TyEq data[TotalFit];
};
Block* head;
int headIndex;
void refill() {
void* P = SourceHeap::allocate(SourceHeap::AllocSize);
Block* BP = (Block*)P;
BP->next = head;
head = BP;
headIndex = 0;
}
public:
enum { AllocSize = ElemSize };
void clear() {
while(head) {
Block* B = head;
head = B->next;
SourceHeap::deallocate(B);
}
}
BlockAlloc() :SourceHeap(), head(0), headIndex(0) {
// std::cerr << "BA " << TotalFit << " " << ElemSize << " " << sizeof(TyEq) << " " << sizeof(Block) << " " << SourceHeap::AllocSize << "\n";
assert(sizeof(Block) <= SourceHeap::AllocSize);
}
~BlockAlloc() {
clear();
}
inline void* allocate(size_t size) {
assert(size == ElemSize);
if (!head || headIndex == TotalFit)
refill();
return &head->data[headIndex++];
}
inline void deallocate(void* ptr) {}
};
//! This implements a bump pointer though chunks of memory
template<typename SourceHeap>
class SimpleBumpPtr : public SourceHeap {
struct Block {
union {
Block* next;
double dummy; // for alignment
};
};
Block* head;
int offset;
void refill() {
void* P = SourceHeap::allocate(SourceHeap::AllocSize);
Block* BP = (Block*)P;
BP->next = head;
head = BP;
offset = sizeof(Block);
}
public:
enum { AllocSize = 0 };
SimpleBumpPtr(): SourceHeap(), head(0), offset(0) {}
~SimpleBumpPtr() {
clear();
}
void clear() {
while (head) {
Block* B = head;
head = B->next;
SourceHeap::deallocate(B);
}
}
inline void* allocate(size_t size) {
//increase to alignment
size = (size + sizeof(double) - 1) & ~(sizeof(double) - 1);
//Check current block
if (!head || offset + size > SourceHeap::AllocSize)
refill();
//Make sure this will fit
if (offset + size > SourceHeap::AllocSize) {
assert(0 && "Too large");
return 0;
}
char* retval = (char*)head;
retval += offset;
offset += size;
return retval;
}
inline void deallocate(void* ptr) {}
};
/**
* This implements a bump pointer though chunks of memory that falls back
* to malloc if the source heap cannot accommodate an allocation.
*/
template<typename SourceHeap>
class SimpleBumpPtrWithMallocFallback : public SourceHeap {
struct Block {
union {
Block* next;
double dummy; // for alignment
};
};
Block* head;
Block* fallbackHead;
int offset;
//! Given block of memory P, update head pointer and offset metadata
void refill(void* P, Block*& h, int* o) {
Block* BP = (Block*)P;
BP->next = h;
h = BP;
if (o)
*o = sizeof(Block);
}
public:
enum { AllocSize = 0 };
SimpleBumpPtrWithMallocFallback(): SourceHeap(), head(0), fallbackHead(0), offset(0) { }
~SimpleBumpPtrWithMallocFallback() {
clear();
}
void clear() {
while (head) {
Block* B = head;
head = B->next;
SourceHeap::deallocate(B);
}
while (fallbackHead) {
Block* B = fallbackHead;
fallbackHead = B->next;
free(B);
}
}
inline void* allocate(size_t size) {
//increase to alignment
size = (size + sizeof(double) - 1) & ~(sizeof(double) - 1);
//Check current block
if (!head || offset + size > SourceHeap::AllocSize)
refill(SourceHeap::allocate(SourceHeap::AllocSize), head, &offset);
//Make sure this will fit
if (offset + size > SourceHeap::AllocSize) {
void* p = malloc(size + sizeof(Block));
refill(p, fallbackHead, NULL);
return (char*)p + sizeof(Block);
}
char* retval = (char*)head;
retval += offset;
offset += size;
return retval;
}
inline void deallocate(void* ptr) {}
};
//! This is the base source of memory for all allocators.
//! It maintains a freelist of hunks acquired from the system
class SystemBaseAlloc {
public:
enum { AllocSize = pageSize };
SystemBaseAlloc();
~SystemBaseAlloc();
inline void* allocate(size_t size) {
return pageAlloc();
}
inline void deallocate(void* ptr) {
pageFree(ptr);
}
};
class SizedAllocatorFactory: private boost::noncopyable {
public:
typedef ThreadAwarePrivateHeap<
FreeListHeap<SimpleBumpPtr<SystemBaseAlloc> > > SizedAlloc;
static SizedAlloc* getAllocatorForSize(const size_t);
private:
static SizedAllocatorFactory* getInstance();
static LL::PtrLock<SizedAllocatorFactory, true> instance;
typedef std::map<size_t, SizedAlloc*> AllocatorsMap;
AllocatorsMap allocators;
LL::SimpleLock<true> lock;
SizedAlloc* getAllocForSize(const size_t);
static __thread AllocatorsMap* localAllocators;
SizedAllocatorFactory();
~SizedAllocatorFactory();
};
class FixedSizeAllocator {
SizedAllocatorFactory::SizedAlloc* alloc;
public:
FixedSizeAllocator(size_t sz) {
alloc = SizedAllocatorFactory::getAllocatorForSize(sz);
}
inline void* allocate(size_t sz) {
return alloc->allocate(sz);
}
inline void deallocate(void* ptr) {
alloc->deallocate(ptr);
}
inline bool operator!=(const FixedSizeAllocator& rhs) const {
return alloc != rhs.alloc;
}
};
////////////////////////////////////////////////////////////////////////////////
// Now adapt to standard std allocators
////////////////////////////////////////////////////////////////////////////////
//!A fixed size block allocator
template<typename Ty>
class FSBGaloisAllocator;
template<>
class FSBGaloisAllocator<void> {
public:
typedef size_t size_type;
typedef ptrdiff_t difference_type;
typedef void* pointer;
typedef const void* const_pointer;
typedef void value_type;
template<typename Other>
struct rebind { typedef FSBGaloisAllocator<Other> other; };
};
template<typename Ty>
class FSBGaloisAllocator {
inline void destruct(char*) const { }
inline void destruct(wchar_t*) const { }
template<typename T> inline void destruct(T* t) const { t->~T(); }
FixedSizeAllocator Alloc;
public:
typedef size_t size_type;
typedef ptrdiff_t difference_type;
typedef Ty *pointer;
typedef const Ty *const_pointer;
typedef Ty& reference;
typedef const Ty& const_reference;
typedef Ty value_type;
template<class Other>
struct rebind { typedef FSBGaloisAllocator<Other> other; };
FSBGaloisAllocator() throw(): Alloc(sizeof(Ty)) {}
template <class U> FSBGaloisAllocator (const FSBGaloisAllocator<U>&) throw(): Alloc(sizeof(Ty)) {}
inline pointer address(reference val) const { return &val; }
inline const_pointer address(const_reference val) const { return &val; }
pointer allocate(size_type size) {
if (size > max_size())
throw std::bad_alloc();
return static_cast<pointer>(Alloc.allocate(sizeof(Ty)));
}
void deallocate(pointer ptr, size_type) {
Alloc.deallocate(ptr);
}
template<class U, class... Args>
inline void construct(U* p, Args&&... args ) const {
::new((void*)p) U(std::forward<Args>(args)...);
}
inline void destroy(pointer ptr) const {
destruct(ptr);
}
size_type max_size() const throw() { return 1; }
template<typename T1>
inline bool operator!=(const FSBGaloisAllocator<T1>& rhs) const {
return Alloc != rhs.Alloc;
}
};
//template<typename T1,typename T2>
//bool operator!=(const FSBGaloisAllocator<T1>& lhs, const FSBGaloisAllocator<T2>& rhs) {
// return lhs.Alloc != rhs.Alloc;
//}
//!Keep a reference to an external allocator
template<typename Ty, typename AllocTy>
class ExternRefGaloisAllocator;
template<typename AllocTy>
class ExternRefGaloisAllocator<void,AllocTy> {
public:
typedef size_t size_type;
typedef ptrdiff_t difference_type;
typedef void* pointer;
typedef const void* const_pointer;
typedef void value_type;
template<typename Other>
struct rebind { typedef ExternRefGaloisAllocator<Other,AllocTy> other; };
};
template<typename Ty, typename AllocTy>
class ExternRefGaloisAllocator {
inline void destruct(char*) const {}
inline void destruct(wchar_t*) const { }
template<typename T> inline void destruct(T* t) const { t->~T(); }
public:
AllocTy* Alloc; // Should be private except that makes copy hard
typedef size_t size_type;
typedef ptrdiff_t difference_type;
typedef Ty *pointer;
typedef const Ty *const_pointer;
typedef Ty& reference;
typedef const Ty& const_reference;
typedef Ty value_type;
template<class Other>
struct rebind {
typedef ExternRefGaloisAllocator<Other, AllocTy> other;
};
explicit ExternRefGaloisAllocator(AllocTy* a) throw(): Alloc(a) {}
template<class T1>
ExternRefGaloisAllocator(const ExternRefGaloisAllocator<T1,AllocTy>& rhs) throw() {
Alloc = rhs.Alloc;
}
inline pointer address(reference val) const { return &val; }
inline const_pointer address(const_reference val) const { return &val; }
pointer allocate(size_type size) {
if (size > max_size())
throw std::bad_alloc();
return static_cast<pointer>(Alloc->allocate(size*sizeof(Ty)));
}
void deallocate(pointer ptr, size_type x) {
Alloc->deallocate(ptr);
}
inline void construct(pointer ptr, const_reference val) const {
new (ptr) Ty(val);
}
template<class U, class... Args >
inline void construct(U* p, Args&&... args ) const {
::new((void*)p) U(std::forward<Args>(args)...);
}
void destroy(pointer ptr) const {
destruct(ptr);
}
size_type max_size() const throw() { return size_t(-1)/sizeof(Ty); }
template<typename T1,typename A1>
bool operator!=(const ExternRefGaloisAllocator<T1,A1>& rhs) const {
return Alloc != rhs.Alloc;
}
};
}
}
} // end namespace Galois
#endif
| 0 |
rapidsai_public_repos/code-share/maxflow/galois/include/Galois
|
rapidsai_public_repos/code-share/maxflow/galois/include/Galois/WorkList/WorkListHelpers.h
|
/** Worklist building blocks -*- C++ -*-
* @file
* @section License
*
* Galois, a framework to exploit amorphous data-parallelism in irregular
* programs.
*
* Copyright (C) 2013, The University of Texas at Austin. All rights reserved.
* UNIVERSITY EXPRESSLY DISCLAIMS ANY AND ALL WARRANTIES CONCERNING THIS
* SOFTWARE AND DOCUMENTATION, INCLUDING ANY WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR ANY PARTICULAR PURPOSE, NON-INFRINGEMENT AND WARRANTIES OF
* PERFORMANCE, AND ANY WARRANTY THAT MIGHT OTHERWISE ARISE FROM COURSE OF
* DEALING OR USAGE OF TRADE. NO WARRANTY IS EITHER EXPRESS OR IMPLIED WITH
* RESPECT TO THE USE OF THE SOFTWARE OR DOCUMENTATION. Under no circumstances
* shall University be liable for incidental, special, indirect, direct or
* consequential damages or loss of profits, interruption of business, or
* related expenses which may arise from use of Software or Documentation,
* including but not limited to those resulting from defects in Software and/or
* Documentation, or loss or inaccuracy of data of any kind.
*
* @author Andrew Lenharth <[email protected]>
*/
#ifndef GALOIS_RUNTIME_WORKLISTHELPERS_H
#define GALOIS_RUNTIME_WORKLISTHELPERS_H
#include "WLCompileCheck.h"
#include "Galois/Runtime/ll/PtrLock.h"
#include <boost/iterator/iterator_facade.hpp>
namespace Galois {
namespace WorkList {
template<typename T>
class ConExtListNode {
T* next;
public:
ConExtListNode() :next(0) {}
T*& getNext() { return next; }
T*const& getNext() const { return next; }
};
template<typename T>
class ConExtIterator: public boost::iterator_facade<
ConExtIterator<T>, T, boost::forward_traversal_tag> {
friend class boost::iterator_core_access;
T* at;
template<typename OtherTy>
bool equal(const ConExtIterator<OtherTy>& o) const { return at == o.at; }
T& dereference() const { return *at; }
void increment() { at = at->getNext(); }
public:
ConExtIterator(): at(0) { }
template<typename OtherTy>
ConExtIterator(const ConExtIterator<OtherTy>& o): at(o.at) { }
explicit ConExtIterator(T* x): at(x) { }
};
template<typename T, bool concurrent>
class ConExtLinkedStack {
Runtime::LL::PtrLock<T, concurrent> head;
public:
typedef ConExtListNode<T> ListNode;
bool empty() const {
return !head.getValue();
}
void push(T* C) {
T* oldhead(0);
do {
oldhead = head.getValue();
C->getNext() = oldhead;
} while (!head.CAS(oldhead, C));
}
T* pop() {
//lock free Fast path (empty)
if (empty()) return 0;
//Disable CAS
head.lock();
T* C = head.getValue();
if (!C) {
head.unlock();
return 0;
}
head.unlock_and_set(C->getNext());
C->getNext() = 0;
return C;
}
//! iterators not safe with concurrent modifications
typedef T value_type;
typedef T& reference;
typedef ConExtIterator<T> iterator;
typedef ConExtIterator<const T> const_iterator;
iterator begin() { return iterator(head.getValue()); }
iterator end() { return iterator(); }
const_iterator begin() const { return const_iterator(head.getValue()); }
const_iterator end() const { return const_iterator(); }
};
template<typename T, bool concurrent>
class ConExtLinkedQueue {
Runtime::LL::PtrLock<T,concurrent> head;
T* tail;
public:
typedef ConExtListNode<T> ListNode;
ConExtLinkedQueue() :tail(0) { }
bool empty() const {
return !tail;
}
void push(T* C) {
head.lock();
//std::cerr << "in(" << C << ") ";
C->getNext() = 0;
if (tail) {
tail->getNext() = C;
tail = C;
head.unlock();
} else {
assert(!head.getValue());
tail = C;
head.unlock_and_set(C);
}
}
T* pop() {
//lock free Fast path empty case
if (empty()) return 0;
head.lock();
T* C = head.getValue();
if (!C) {
head.unlock();
return 0;
}
if (tail == C) {
tail = 0;
assert(!C->getNext());
head.unlock_and_clear();
} else {
head.unlock_and_set(C->getNext());
C->getNext() = 0;
}
return C;
}
//! iterators not safe with concurrent modifications
typedef T value_type;
typedef T& reference;
typedef ConExtIterator<T> iterator;
typedef ConExtIterator<const T> const_iterator;
iterator begin() { return iterator(head.getValue()); }
iterator end() { return iterator(); }
const_iterator begin() const { return const_iterator(head.getValue()); }
const_iterator end() const { return const_iterator(); }
};
template<typename T>
struct DummyIndexer: public std::unary_function<const T&,unsigned> {
unsigned operator()(const T& x) { return 0; }
};
}
} // end namespace Galois
#endif
| 0 |
rapidsai_public_repos/code-share/maxflow/galois/include/Galois
|
rapidsai_public_repos/code-share/maxflow/galois/include/Galois/WorkList/Lifo.h
|
/** LIFO worklist -*- C++ -*-
* @file
* @section License
*
* Galois, a framework to exploit amorphous data-parallelism in irregular
* programs.
*
* Copyright (C) 2013, The University of Texas at Austin. All rights reserved.
* UNIVERSITY EXPRESSLY DISCLAIMS ANY AND ALL WARRANTIES CONCERNING THIS
* SOFTWARE AND DOCUMENTATION, INCLUDING ANY WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR ANY PARTICULAR PURPOSE, NON-INFRINGEMENT AND WARRANTIES OF
* PERFORMANCE, AND ANY WARRANTY THAT MIGHT OTHERWISE ARISE FROM COURSE OF
* DEALING OR USAGE OF TRADE. NO WARRANTY IS EITHER EXPRESS OR IMPLIED WITH
* RESPECT TO THE USE OF THE SOFTWARE OR DOCUMENTATION. Under no circumstances
* shall University be liable for incidental, special, indirect, direct or
* consequential damages or loss of profits, interruption of business, or
* related expenses which may arise from use of Software or Documentation,
* including but not limited to those resulting from defects in Software and/or
* Documentation, or loss or inaccuracy of data of any kind.
*
* @author Andrew Lenharth <[email protected]>
*/
#ifndef GALOIS_WORKLIST_LIFO_H
#define GALOIS_WORKLIST_LIFO_H
#include "Galois/Runtime/ll/PaddedLock.h"
#include "WLCompileCheck.h"
#include <deque>
namespace Galois {
namespace WorkList {
//! Simple LIFO worklist (not scalable).
template<typename T = int, bool Concurrent = true>
struct LIFO : private boost::noncopyable, private Runtime::LL::PaddedLock<Concurrent> {
template<bool _concurrent>
struct rethread { typedef LIFO<T, _concurrent> type; };
template<typename _T>
struct retype { typedef LIFO<_T, Concurrent> type; };
private:
std::deque<T> wl;
using Runtime::LL::PaddedLock<Concurrent>::lock;
using Runtime::LL::PaddedLock<Concurrent>::try_lock;
using Runtime::LL::PaddedLock<Concurrent>::unlock;
public:
typedef T value_type;
void push(const value_type& val) {
lock();
wl.push_back(val);
unlock();
}
template<typename Iter>
void push(Iter b, Iter e) {
lock();
wl.insert(wl.end(),b,e);
unlock();
}
template<typename RangeTy>
void push_initial(const RangeTy& range) {
if (Runtime::LL::getTID() == 0)
push(range.begin(), range.end());
}
Galois::optional<value_type> steal(LIFO& victim, bool half, bool pop) {
Galois::optional<value_type> retval;
//guard against self stealing
if (&victim == this) return retval;
//Ordered lock to preent deadlock
if (!Runtime::LL::TryLockPairOrdered(*this, victim)) return retval;
if (half) {
typename std::deque<T>::iterator split = split_range(victim.wl.begin(), victim.wl.end());
wl.insert(wl.end(), victim.wl.begin(), split);
victim.wl.erase(victim.wl.begin(), split);
} else {
if (wl.empty()) {
wl.swap(victim.wl);
} else {
wl.insert(wl.end(), victim.wl.begin(), victim.wl.end());
victim.wl.clear();
}
}
if (pop && !wl.empty()) {
retval = wl.back();
wl.pop_back();
}
UnLockPairOrdered(*this, victim);
return retval;
}
Galois::optional<value_type> pop() {
Galois::optional<value_type> retval;
lock();
if (!wl.empty()) {
retval = wl.back();
wl.pop_back();
}
unlock();
return retval;
}
};
GALOIS_WLCOMPILECHECK(LIFO)
} // end namespace WorkList
} // end namespace Galois
#endif
| 0 |
rapidsai_public_repos/code-share/maxflow/galois/include/Galois
|
rapidsai_public_repos/code-share/maxflow/galois/include/Galois/WorkList/GFifo.h
|
/** GFIFO worklist -*- C++ -*-
* @file
* @section License
*
* Galois, a framework to exploit amorphous data-parallelism in irregular
* programs.
*
* Copyright (C) 2013, The University of Texas at Austin. All rights reserved.
* UNIVERSITY EXPRESSLY DISCLAIMS ANY AND ALL WARRANTIES CONCERNING THIS
* SOFTWARE AND DOCUMENTATION, INCLUDING ANY WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR ANY PARTICULAR PURPOSE, NON-INFRINGEMENT AND WARRANTIES OF
* PERFORMANCE, AND ANY WARRANTY THAT MIGHT OTHERWISE ARISE FROM COURSE OF
* DEALING OR USAGE OF TRADE. NO WARRANTY IS EITHER EXPRESS OR IMPLIED WITH
* RESPECT TO THE USE OF THE SOFTWARE OR DOCUMENTATION. Under no circumstances
* shall University be liable for incidental, special, indirect, direct or
* consequential damages or loss of profits, interruption of business, or
* related expenses which may arise from use of Software or Documentation,
* including but not limited to those resulting from defects in Software and/or
* Documentation, or loss or inaccuracy of data of any kind.
*
* @author Andrew Lenharth <[email protected]>
*/
#ifndef GALOIS_WORKLIST_GFIFO_H
#define GALOIS_WORKLIST_GFIFO_H
#include "Galois/Runtime/ll/PaddedLock.h"
#include "Galois/gdeque.h"
#include "WLCompileCheck.h"
namespace Galois {
namespace WorkList {
template<typename T = int, bool Concurrent = true>
struct GFIFO : private boost::noncopyable, private Runtime::LL::PaddedLock<Concurrent> {
template<bool _concurrent>
struct rethread { typedef GFIFO<T, _concurrent> type; };
template<typename _T>
struct retype { typedef GFIFO<_T, Concurrent> type; };
private:
gdeque<T> wl;
using Runtime::LL::PaddedLock<Concurrent>::lock;
using Runtime::LL::PaddedLock<Concurrent>::try_lock;
using Runtime::LL::PaddedLock<Concurrent>::unlock;
public:
GFIFO() {} //required for apparent bug in clang
typedef T value_type;
void push(const value_type& val) {
lock();
wl.push_back(val);
unlock();
}
template<typename Iter>
void push(Iter b, Iter e) {
lock();
while (b != e)
wl.push_back(*b++);
unlock();
}
template<typename RangeTy>
void push_initial(const RangeTy& range) {
if (Runtime::LL::getTID() == 0)
push(range.begin(), range.end());
}
Galois::optional<value_type> pop() {
Galois::optional<value_type> retval;
lock();
if (!wl.empty()) {
retval = wl.front();
wl.pop_front();
}
unlock();
return retval;
}
};
GALOIS_WLCOMPILECHECK(GFIFO)
} // end namespace WorkList
} // end namespace Galois
#endif
| 0 |
rapidsai_public_repos/code-share/maxflow/galois/include/Galois
|
rapidsai_public_repos/code-share/maxflow/galois/include/Galois/WorkList/Obim.h
|
/** Scalable priority worklist -*- C++ -*-
* @file
* @section License
*
* Galois, a framework to exploit amorphous data-parallelism in irregular
* programs.
*
* Copyright (C) 2013, The University of Texas at Austin. All rights reserved.
* UNIVERSITY EXPRESSLY DISCLAIMS ANY AND ALL WARRANTIES CONCERNING THIS
* SOFTWARE AND DOCUMENTATION, INCLUDING ANY WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR ANY PARTICULAR PURPOSE, NON-INFRINGEMENT AND WARRANTIES OF
* PERFORMANCE, AND ANY WARRANTY THAT MIGHT OTHERWISE ARISE FROM COURSE OF
* DEALING OR USAGE OF TRADE. NO WARRANTY IS EITHER EXPRESS OR IMPLIED WITH
* RESPECT TO THE USE OF THE SOFTWARE OR DOCUMENTATION. Under no circumstances
* shall University be liable for incidental, special, indirect, direct or
* consequential damages or loss of profits, interruption of business, or
* related expenses which may arise from use of Software or Documentation,
* including but not limited to those resulting from defects in Software and/or
* Documentation, or loss or inaccuracy of data of any kind.
*
* @author Andrew Lenharth <[email protected]>
*/
#ifndef GALOIS_WORKLIST_OBIM_H
#define GALOIS_WORKLIST_OBIM_H
#include "Galois/config.h"
#include "Galois/FlatMap.h"
#include "Galois/Runtime/PerThreadStorage.h"
#include "Galois/WorkList/Fifo.h"
#include "Galois/WorkList/WorkListHelpers.h"
#include GALOIS_CXX11_STD_HEADER(type_traits)
#include <limits>
namespace Galois {
namespace WorkList {
/**
* Approximate priority scheduling. Indexer is a default-constructable class
* whose instances conform to <code>R r = indexer(item)</code> where R is
* some type with a total order defined by <code>operator<</code> and <code>operator==</code>
* and item is an element from the Galois set iterator.
*
* An example:
* \code
* struct Item { int index; };
*
* struct Indexer {
* int operator()(Item i) const { return i.index; }
* };
*
* typedef Galois::WorkList::OrderedByIntegerMetric<Indexer> WL;
* Galois::for_each<WL>(items.begin(), items.end(), Fn);
* \endcode
*
* @tparam Indexer Indexer class
* @tparam Container Scheduler for each bucket
* @tparam BlockPeriod Check for higher priority work every 2^BlockPeriod
* iterations
* @tparam BSP Use back-scan prevention
*/
template<class Indexer = DummyIndexer<int>, typename Container = FIFO<>,
unsigned BlockPeriod=0,
bool BSP=true,
typename T=int,
typename Index=int,
bool Concurrent=true>
struct OrderedByIntegerMetric : private boost::noncopyable {
template<bool _concurrent>
struct rethread { typedef OrderedByIntegerMetric<Indexer, typename Container::template rethread<_concurrent>::type, BlockPeriod, BSP, T, Index, _concurrent> type; };
template<typename _T>
struct retype { typedef OrderedByIntegerMetric<Indexer, typename Container::template retype<_T>::type, BlockPeriod, BSP, _T, typename std::result_of<Indexer(_T)>::type, Concurrent> type; };
template<unsigned _period>
struct with_block_period { typedef OrderedByIntegerMetric<Indexer, Container, _period, BSP, T, Index, Concurrent> type; };
template<typename _container>
struct with_container { typedef OrderedByIntegerMetric<Indexer, _container, BlockPeriod, BSP, T, Index, Concurrent> type; };
template<typename _indexer>
struct with_indexer { typedef OrderedByIntegerMetric<_indexer, Container, BlockPeriod, BSP, T, Index, Concurrent> type; };
template<bool _bsp>
struct with_back_scan_prevention { typedef OrderedByIntegerMetric<Indexer, Container, BlockPeriod, _bsp, T, Index, Concurrent> type; };
typedef T value_type;
private:
typedef typename Container::template rethread<Concurrent>::type CTy;
typedef Galois::flat_map<Index, CTy*> LMapTy;
//typedef std::map<Index, CTy*> LMapTy;
struct perItem {
LMapTy local;
Index curIndex;
Index scanStart;
CTy* current;
unsigned int lastMasterVersion;
unsigned int numPops;
perItem() :
curIndex(std::numeric_limits<Index>::min()),
scanStart(std::numeric_limits<Index>::min()),
current(0), lastMasterVersion(0), numPops(0) { }
};
typedef std::deque<std::pair<Index, CTy*> > MasterLog;
// NB: Place dynamically growing masterLog after fixed-size PerThreadStorage
// members to give higher likelihood of reclaiming PerThreadStorage
Runtime::PerThreadStorage<perItem> current;
Runtime::LL::PaddedLock<Concurrent> masterLock;
MasterLog masterLog;
std::atomic<unsigned int> masterVersion;
Indexer indexer;
bool updateLocal(perItem& p) {
if (p.lastMasterVersion != masterVersion.load(std::memory_order_relaxed)) {
//masterLock.lock();
for (; p.lastMasterVersion < masterVersion.load(std::memory_order_relaxed); ++p.lastMasterVersion) {
// XXX(ddn): Somehow the second block is better than
// the first for bipartite matching (GCC 4.7.2)
#if 0
p.local.insert(masterLog[p.lastMasterVersion]);
#else
std::pair<Index, CTy*> logEntry = masterLog[p.lastMasterVersion];
p.local[logEntry.first] = logEntry.second;
assert(logEntry.second);
#endif
}
//masterLock.unlock();
return true;
}
return false;
}
GALOIS_ATTRIBUTE_NOINLINE
Galois::optional<T> slowPop(perItem& p) {
//Failed, find minimum bin
updateLocal(p);
unsigned myID = Runtime::LL::getTID();
bool localLeader = Runtime::LL::isPackageLeaderForSelf(myID);
Index msS = std::numeric_limits<Index>::min();
if (BSP) {
msS = p.scanStart;
if (localLeader) {
for (unsigned i = 0; i < Runtime::activeThreads; ++i)
msS = std::min(msS, current.getRemote(i)->scanStart);
} else {
msS = std::min(msS, current.getRemote(Runtime::LL::getLeaderForThread(myID))->scanStart);
}
}
for (auto ii = p.local.lower_bound(msS), ee = p.local.end(); ii != ee; ++ii) {
Galois::optional<T> retval;
if ((retval = ii->second->pop())) {
p.current = ii->second;
p.curIndex = ii->first;
p.scanStart = ii->first;
return retval;
}
}
return Galois::optional<value_type>();
}
GALOIS_ATTRIBUTE_NOINLINE
CTy* slowUpdateLocalOrCreate(perItem& p, Index i) {
//update local until we find it or we get the write lock
do {
CTy* lC;
updateLocal(p);
if ((lC = p.local[i]))
return lC;
} while (!masterLock.try_lock());
//we have the write lock, update again then create
updateLocal(p);
CTy*& lC2 = p.local[i];
if (!lC2) {
lC2 = new CTy();
p.lastMasterVersion = masterVersion.load(std::memory_order_relaxed) + 1;
masterLog.push_back(std::make_pair(i, lC2));
masterVersion.fetch_add(1);
}
masterLock.unlock();
return lC2;
}
inline CTy* updateLocalOrCreate(perItem& p, Index i) {
//Try local then try update then find again or else create and update the master log
CTy* lC;
if ((lC = p.local[i]))
return lC;
//slowpath
return slowUpdateLocalOrCreate(p, i);
}
public:
OrderedByIntegerMetric(const Indexer& x = Indexer()): masterVersion(0), indexer(x) { }
~OrderedByIntegerMetric() {
// Deallocate in LIFO order to give opportunity for simple garbage
// collection
for (auto ii = masterLog.rbegin(), ei = masterLog.rend(); ii != ei; ++ii) {
delete ii->second;
}
}
void push(const value_type& val) {
Index index = indexer(val);
perItem& p = *current.getLocal();
// Fast path
if (index == p.curIndex && p.current) {
p.current->push(val);
return;
}
// Slow path
CTy* lC = updateLocalOrCreate(p, index);
if (BSP && index < p.scanStart)
p.scanStart = index;
// Opportunistically move to higher priority work
if (index < p.curIndex) {
p.curIndex = index;
p.current = lC;
}
lC->push(val);
}
template<typename Iter>
void push(Iter b, Iter e) {
while (b != e)
push(*b++);
}
template<typename RangeTy>
void push_initial(const RangeTy& range) {
auto rp = range.local_pair();
push(rp.first, rp.second);
}
Galois::optional<value_type> pop() {
// Find a successful pop
perItem& p = *current.getLocal();
CTy* C = p.current;
if (BlockPeriod && (p.numPops++ & ((1<<BlockPeriod)-1)) == 0)
return slowPop(p);
Galois::optional<value_type> retval;
if (C && (retval = C->pop()))
return retval;
// Slow path
return slowPop(p);
}
};
GALOIS_WLCOMPILECHECK(OrderedByIntegerMetric)
} // end namespace WorkList
} // end namespace Galois
#endif
| 0 |
rapidsai_public_repos/code-share/maxflow/galois/include/Galois
|
rapidsai_public_repos/code-share/maxflow/galois/include/Galois/WorkList/StableIterator.h
|
/** Stable Iterator worklist -*- C++ -*-
* @file
* @section License
*
* Galois, a framework to exploit amorphous data-parallelism in irregular
* programs.
*
* Copyright (C) 2013, The University of Texas at Austin. All rights reserved.
* UNIVERSITY EXPRESSLY DISCLAIMS ANY AND ALL WARRANTIES CONCERNING THIS
* SOFTWARE AND DOCUMENTATION, INCLUDING ANY WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR ANY PARTICULAR PURPOSE, NON-INFRINGEMENT AND WARRANTIES OF
* PERFORMANCE, AND ANY WARRANTY THAT MIGHT OTHERWISE ARISE FROM COURSE OF
* DEALING OR USAGE OF TRADE. NO WARRANTY IS EITHER EXPRESS OR IMPLIED WITH
* RESPECT TO THE USE OF THE SOFTWARE OR DOCUMENTATION. Under no circumstances
* shall University be liable for incidental, special, indirect, direct or
* consequential damages or loss of profits, interruption of business, or
* related expenses which may arise from use of Software or Documentation,
* including but not limited to those resulting from defects in Software and/or
* Documentation, or loss or inaccuracy of data of any kind.
*
* @section description
* This dereferences iterators lazily. This is only safe if they are not
* invalidated by the operator
*
* @author Andrew Lenharth <[email protected]>
*/
#ifndef GALOIS_WORKLIST_STABLEITERATOR_H
#define GALOIS_WORKLIST_STABLEITERATOR_H
#include "Galois/gstl.h"
namespace Galois {
namespace WorkList {
template<typename Iterator=int*, bool Steal = false>
struct StableIterator {
typedef typename std::iterator_traits<Iterator>::value_type value_type;
//! change the concurrency flag
template<bool _concurrent>
struct rethread { typedef StableIterator<Iterator, Steal> type; };
//! change the type the worklist holds
template<typename _T>
struct retype { typedef StableIterator<Iterator, Steal> type; };
template<typename _iterator>
struct with_iterator { typedef StableIterator<_iterator, Steal> type; };
template<bool _steal>
struct with_steal { typedef StableIterator<Iterator, _steal> type; };
private:
struct shared_state {
Iterator stealBegin;
Iterator stealEnd;
Runtime::LL::SimpleLock<true> stealLock;
bool stealAvail;
void resetAvail() {
if (stealBegin != stealEnd)
stealAvail = true;
}
};
struct state {
Runtime::LL::CacheLineStorage<shared_state> stealState;
Iterator localBegin;
Iterator localEnd;
unsigned int nextVictim;
void populateSteal() {
if (Steal && localBegin != localEnd) {
shared_state& s = stealState.data;
s.stealLock.lock();
s.stealEnd = localEnd;
s.stealBegin = localEnd = Galois::split_range(localBegin, localEnd);
s.resetAvail();
s.stealLock.unlock();
}
}
};
Runtime::PerThreadStorage<state> TLDS;
bool doSteal(state& dst, state& src, bool wait) {
shared_state& s = src.stealState.data;
if (s.stealAvail) {
if (wait) {
s.stealLock.lock();
} else if (!s.stealLock.try_lock()) {
return false;
}
if (s.stealBegin != s.stealEnd) {
dst.localBegin = s.stealBegin;
s.stealBegin = dst.localEnd = Galois::split_range(s.stealBegin, s.stealEnd);
s.resetAvail();
}
s.stealLock.unlock();
}
return dst.localBegin != dst.localEnd;
}
//pop already failed, try again with stealing
Galois::optional<value_type> pop_steal(state& data) {
//always try stealing self
if (doSteal(data, data, true))
return *data.localBegin++;
//only try stealing one other
if (doSteal(data, *TLDS.getRemote(data.nextVictim), false)) {
//share the wealth
if (data.nextVictim != Runtime::LL::getTID())
data.populateSteal();
return *data.localBegin++;
}
++data.nextVictim;
data.nextVictim %= Runtime::activeThreads;
return Galois::optional<value_type>();
}
public:
//! push initial range onto the queue
//! called with the same b and e on each thread
template<typename RangeTy>
void push_initial(const RangeTy& r) {
state& data = *TLDS.getLocal();
data.localBegin = r.local_begin();
data.localEnd = r.local_end();
data.nextVictim = Runtime::LL::getTID();
data.populateSteal();
}
//! pop a value from the queue.
Galois::optional<value_type> pop() {
state& data = *TLDS.getLocal();
if (data.localBegin != data.localEnd)
return *data.localBegin++;
if (Steal)
return pop_steal(data);
return Galois::optional<value_type>();
}
void push(const value_type& val) {
abort();
}
template<typename Iter>
void push(Iter b, Iter e) {
abort();
}
};
GALOIS_WLCOMPILECHECK(StableIterator)
}
}
#endif
| 0 |
rapidsai_public_repos/code-share/maxflow/galois/include/Galois
|
rapidsai_public_repos/code-share/maxflow/galois/include/Galois/WorkList/WorkList.h
|
/** Worklists -*- C++ -*-
* @file
* @section License
*
* Galois, a framework to exploit amorphous data-parallelism in irregular
* programs.
*
* Copyright (C) 2013, The University of Texas at Austin. All rights reserved.
* UNIVERSITY EXPRESSLY DISCLAIMS ANY AND ALL WARRANTIES CONCERNING THIS
* SOFTWARE AND DOCUMENTATION, INCLUDING ANY WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR ANY PARTICULAR PURPOSE, NON-INFRINGEMENT AND WARRANTIES OF
* PERFORMANCE, AND ANY WARRANTY THAT MIGHT OTHERWISE ARISE FROM COURSE OF
* DEALING OR USAGE OF TRADE. NO WARRANTY IS EITHER EXPRESS OR IMPLIED WITH
* RESPECT TO THE USE OF THE SOFTWARE OR DOCUMENTATION. Under no circumstances
* shall University be liable for incidental, special, indirect, direct or
* consequential damages or loss of profits, interruption of business, or
* related expenses which may arise from use of Software or Documentation,
* including but not limited to those resulting from defects in Software and/or
* Documentation, or loss or inaccuracy of data of any kind.
*
* @author Andrew Lenharth <[email protected]>
*/
#ifndef GALOIS_RUNTIME_WORKLIST_H
#define GALOIS_RUNTIME_WORKLIST_H
#include "Galois/optional.h"
#include "AltChunked.h"
#include "BulkSynchronous.h"
#include "Chunked.h"
#include "Fifo.h"
#include "GFifo.h"
#include "Lifo.h"
#include "LocalQueue.h"
#include "Obim.h"
#include "OrderedList.h"
#include "OwnerComputes.h"
#include "StableIterator.h"
namespace Galois {
/**
* Scheduling policies for Galois iterators. Unless you have very specific
* scheduling requirement, {@link dChunkedLIFO} or {@link dChunkedFIFO} is a
* reasonable scheduling policy. If you need approximate priority scheduling,
* use {@link OrderedByIntegerMetric}. For debugging, you may be interested
* in {@link FIFO} or {@link LIFO}, which try to follow serial order exactly.
*
* The way to use a worklist is to pass it as a template parameter to
* {@link for_each()}. For example,
*
* \code
* Galois::for_each<Galois::WorkList::dChunkedFIFO<32> >(begin, end, fn);
* \endcode
*/
namespace WorkList {
namespace { // don't pollute the symbol table with the example
// Worklists may not be copied.
// Worklists should be default instantiatable
// All classes (should) conform to:
template<typename T, bool Concurrent>
class AbstractWorkList {
AbstractWorkList(const AbstractWorkList&);
const AbstractWorkList& operator=(const AbstractWorkList&);
public:
AbstractWorkList() { }
//! T is the value type of the WL
typedef T value_type;
//! change the concurrency flag
template<bool _concurrent>
struct rethread { typedef AbstractWorkList<T, _concurrent> type; };
//! change the type the worklist holds
template<typename _T>
struct retype { typedef AbstractWorkList<_T, Concurrent> type; };
//! push a value onto the queue
void push(const value_type& val);
//! push a range onto the queue
template<typename Iter>
void push(Iter b, Iter e);
//! push initial range onto the queue
//! called with the same b and e on each thread
template<typename RangeTy>
void push_initial(const RangeTy&);
//! pop a value from the queue.
Galois::optional<value_type> pop();
};
} // end namespace anonymous
} // end namespace WorkList
} // end namespace Galois
#endif
| 0 |
rapidsai_public_repos/code-share/maxflow/galois/include/Galois
|
rapidsai_public_repos/code-share/maxflow/galois/include/Galois/WorkList/Fifo.h
|
/** FIFO worklist -*- C++ -*-
* @file
* @section License
*
* Galois, a framework to exploit amorphous data-parallelism in irregular
* programs.
*
* Copyright (C) 2013, The University of Texas at Austin. All rights reserved.
* UNIVERSITY EXPRESSLY DISCLAIMS ANY AND ALL WARRANTIES CONCERNING THIS
* SOFTWARE AND DOCUMENTATION, INCLUDING ANY WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR ANY PARTICULAR PURPOSE, NON-INFRINGEMENT AND WARRANTIES OF
* PERFORMANCE, AND ANY WARRANTY THAT MIGHT OTHERWISE ARISE FROM COURSE OF
* DEALING OR USAGE OF TRADE. NO WARRANTY IS EITHER EXPRESS OR IMPLIED WITH
* RESPECT TO THE USE OF THE SOFTWARE OR DOCUMENTATION. Under no circumstances
* shall University be liable for incidental, special, indirect, direct or
* consequential damages or loss of profits, interruption of business, or
* related expenses which may arise from use of Software or Documentation,
* including but not limited to those resulting from defects in Software and/or
* Documentation, or loss or inaccuracy of data of any kind.
*
* @author Andrew Lenharth <[email protected]>
*/
#ifndef GALOIS_WORKLIST_FIFO_H
#define GALOIS_WORKLIST_FIFO_H
#include "Galois/Runtime/ll/PaddedLock.h"
#include "WLCompileCheck.h"
#include <deque>
namespace Galois {
namespace WorkList {
//! Simple FIFO worklist (not scalable).
template<typename T = int, bool Concurrent = true>
struct FIFO : private boost::noncopyable, private Runtime::LL::PaddedLock<Concurrent> {
template<bool _concurrent>
struct rethread { typedef FIFO<T, _concurrent> type; };
template<typename _T>
struct retype { typedef FIFO<_T, Concurrent> type; };
private:
std::deque<T> wl;
using Runtime::LL::PaddedLock<Concurrent>::lock;
using Runtime::LL::PaddedLock<Concurrent>::try_lock;
using Runtime::LL::PaddedLock<Concurrent>::unlock;
public:
typedef T value_type;
void push(const value_type& val) {
lock();
wl.push_back(val);
unlock();
}
template<typename Iter>
void push(Iter b, Iter e) {
lock();
wl.insert(wl.end(),b,e);
unlock();
}
template<typename RangeTy>
void push_initial(const RangeTy& range) {
if (Runtime::LL::getTID() == 0)
push(range.begin(), range.end());
}
void steal(FIFO& victim) {
if (!Runtime::LL::TryLockPairOrdered(*this, victim))
return;
typename std::deque<T>::iterator split = split_range(victim.wl.begin(), wl.victim.end());
wl.insert(wl.end(), victim.wl.begin(), split);
victim.wl.erase(victim.wl.begin(), split);
UnLockPairOrdered(*this, victim);
}
Galois::optional<value_type> pop() {
Galois::optional<value_type> retval;
lock();
if (!wl.empty()) {
retval = wl.front();
wl.pop_front();
}
unlock();
return retval;
}
};
GALOIS_WLCOMPILECHECK(FIFO)
} // end namespace WorkList
} // end namespace Galois
#endif
| 0 |
rapidsai_public_repos/code-share/maxflow/galois/include/Galois
|
rapidsai_public_repos/code-share/maxflow/galois/include/Galois/WorkList/AltChunked.h
|
/** Alternative chunked interface -*- C++ -*-
* @file
* @section License
*
* Galois, a framework to exploit amorphous data-parallelism in irregular
* programs.
*
* Copyright (C) 2013, The University of Texas at Austin. All rights reserved.
* UNIVERSITY EXPRESSLY DISCLAIMS ANY AND ALL WARRANTIES CONCERNING THIS
* SOFTWARE AND DOCUMENTATION, INCLUDING ANY WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR ANY PARTICULAR PURPOSE, NON-INFRINGEMENT AND WARRANTIES OF
* PERFORMANCE, AND ANY WARRANTY THAT MIGHT OTHERWISE ARISE FROM COURSE OF
* DEALING OR USAGE OF TRADE. NO WARRANTY IS EITHER EXPRESS OR IMPLIED WITH
* RESPECT TO THE USE OF THE SOFTWARE OR DOCUMENTATION. Under no circumstances
* shall University be liable for incidental, special, indirect, direct or
* consequential damages or loss of profits, interruption of business, or
* related expenses which may arise from use of Software or Documentation,
* including but not limited to those resulting from defects in Software and/or
* Documentation, or loss or inaccuracy of data of any kind.
*
* @author Andrew Lenharth <[email protected]>
*/
#ifndef GALOIS_RUNTIME_ALTCHUNKED_H
#define GALOIS_RUNTIME_ALTCHUNKED_H
#include "Galois/FixedSizeRing.h"
#include "Galois/Threads.h"
#include "Galois/Runtime/PerThreadStorage.h"
#include "Galois/Runtime/ll/CompilerSpecific.h"
#include "Galois/Runtime/ll/PtrLock.h"
#include "Galois/Runtime/mm/Mem.h"
#include "WLCompileCheck.h"
namespace Galois {
namespace WorkList {
struct ChunkHeader {
ChunkHeader* next;
ChunkHeader* prev;
};
#if 0
class AtomicChunkedDeque {
Runtime::LL::PtrLock<ChunkHeader, true> head;
ChunkHeader* volatile tail;
public:
void push_front(ChunkHeader* obj) {
head.lock();
obj->prev = 0;
obj->next = head.getValue();
if (obj->next)
obj->next->prev = obj;
if (!tail)
tail = obj;
head.unlock_and_set(obj);
}
void push_back(ChunkHeader* obj) {
head.lock();
obj->next = 0;
obj->prev = tail;
if (obj->prev)
obj->prev->next = obj;
tail = obj;
if (head.getValue())
head.unlock();
else
head.unlock_and_set(obj);
}
ChunkHeader* pop_back() {
//lockfree empty fast path
if (!tail) return 0;
head.lock();
ChunkHeader* retval = tail;
if (retval) {
if (retval->prev)
retval->prev->next = 0;
tail = retval->prev;
if (head.getValue() == retval)
head.unlock_and_clear();
else
head.unlock();
//clean up obj
retval->prev = retval->next = 0;
return retval;
} else {
head.unlock();
return 0;
}
}
ChunkHeader* pop_front() {
//lockfree empty fast path
if (!tail) return 0; //tail is just as useful as head
head.lock();
ChunkHeader* retval = head.getValue();
if (retval) {
if (retval->next)
retval->next->prev = 0;
if (tail == retval)
tail = 0;
head.unlock_and_set(retval->next);
//clean up obj
retval->prev = retval->next = 0;
return retval;
} else {
head.unlock();
return 0;
}
}
};
#endif
class AltChunkedQueue {
Runtime::LL::PtrLock<ChunkHeader, true> head;
ChunkHeader* tail;
void prepend(ChunkHeader* C) {
//Find tail of stolen stuff
ChunkHeader* t = C;
while (t->next) { t = t->next; }
head.lock();
t->next = head.getValue();
if (!t->next)
tail = t;
head.unlock_and_set(C);
}
public:
AltChunkedQueue(): tail(0) { }
bool empty() const {
return !tail;
}
void push(ChunkHeader* obj) {
head.lock();
obj->next = 0;
if (tail) {
tail->next = obj;
tail = obj;
head.unlock();
} else {
assert(!head.getValue());
tail = obj;
head.unlock_and_set(obj);
}
}
ChunkHeader* pop() {
//lock free Fast path empty case
if (empty()) return 0;
head.lock();
ChunkHeader* h = head.getValue();
if (!h) {
head.unlock();
return 0;
}
if (tail == h) {
tail = 0;
assert(!h->next);
head.unlock_and_clear();
} else {
head.unlock_and_set(h->next);
h->next = 0;
}
return h;
}
ChunkHeader* stealAllAndPop(AltChunkedQueue& victim) {
//Don't do work on empty victims (lockfree check)
if (victim.empty()) return 0;
//Steal everything
victim.head.lock();
ChunkHeader* C = victim.head.getValue();
if (C)
victim.tail = 0;
victim.head.unlock_and_clear();
if (!C) return 0; //Didn't get anything
ChunkHeader* retval = C;
C = C->next;
retval->next = 0;
if (!C) return retval; //Only got one thing
prepend(C);
return retval;
}
ChunkHeader* stealHalfAndPop(AltChunkedQueue& victim) {
//Don't do work on empty victims (lockfree check)
if (victim.empty()) return 0;
//Steal half
victim.head.lock();
ChunkHeader* C = victim.head.getValue();
ChunkHeader* ntail = C;
bool count = false;
while (C) {
C = C->next;
if (count)
ntail = ntail->next;
count = !count;
}
if (ntail) {
C = ntail->next;
ntail->next = 0;
victim.tail = ntail;
}
victim.head.unlock();
if (!C) return 0; //Didn't get anything
ChunkHeader* retval = C;
C = C->next;
retval->next = 0;
if (!C) return retval; //Only got one thing
prepend(C);
return retval;
}
};
class AltChunkedStack {
Runtime::LL::PtrLock<ChunkHeader, true> head;
void prepend(ChunkHeader* C) {
//Find tail of stolen stuff
ChunkHeader* tail = C;
while (tail->next) { tail = tail->next; }
head.lock();
tail->next = head.getValue();
head.unlock_and_set(C);
}
public:
bool empty() const {
return !head.getValue();
}
void push(ChunkHeader* obj) {
ChunkHeader* oldhead = 0;
do {
oldhead = head.getValue();
obj->next = oldhead;
} while (!head.CAS(oldhead, obj));
}
ChunkHeader* pop() {
//lock free Fast empty path
if (empty()) return 0;
//Disable CAS
head.lock();
ChunkHeader* retval = head.getValue();
ChunkHeader* setval = 0;
if (retval) {
setval = retval->next;
retval->next = 0;
}
head.unlock_and_set(setval);
return retval;
}
ChunkHeader* stealAllAndPop(AltChunkedStack& victim) {
//Don't do work on empty victims (lockfree check)
if (victim.empty()) return 0;
//Steal everything
victim.head.lock();
ChunkHeader* C = victim.head.getValue();
victim.head.unlock_and_clear();
if (!C) return 0; //Didn't get anything
ChunkHeader* retval = C;
C = C->next;
retval->next = 0;
if (!C) return retval; //Only got one thing
prepend(C);
return retval;
}
ChunkHeader* stealHalfAndPop(AltChunkedStack& victim) {
//Don't do work on empty victims (lockfree check)
if (victim.empty()) return 0;
//Steal half
victim.head.lock();
ChunkHeader* C = victim.head.getValue();
ChunkHeader* ntail = C;
bool count = false;
while (C) {
C = C->next;
if (count)
ntail = ntail->next;
count = !count;
}
if (ntail) {
C = ntail->next;
ntail->next = 0;
}
victim.head.unlock();
if (!C) return 0; //Didn't get anything
ChunkHeader* retval = C;
C = C->next;
retval->next = 0;
if (!C) return retval; //Only got one thing
prepend(C);
return retval;
}
};
template<typename InnerWL>
class StealingQueue : private boost::noncopyable {
Runtime::PerThreadStorage<std::pair<InnerWL, unsigned> > local;
GALOIS_ATTRIBUTE_NOINLINE
ChunkHeader* doSteal() {
std::pair<InnerWL, unsigned>& me = *local.getLocal();
unsigned id = Runtime::LL::getTID();
unsigned pkg = Runtime::LL::getPackageForSelf(id);
unsigned num = Galois::getActiveThreads();
//First steal from this package
for (unsigned eid = id + 1; eid < num; ++eid) {
if (Runtime::LL::getPackageForThread(eid) == pkg) {
ChunkHeader* c = me.first.stealHalfAndPop(local.getRemote(eid)->first);
if (c)
return c;
}
}
for (unsigned eid = 0; eid < id; ++eid) {
if (Runtime::LL::getPackageForThread(eid) == pkg) {
ChunkHeader* c = me.first.stealHalfAndPop(local.getRemote(eid)->first);
if (c)
return c;
}
}
//Leaders can cross package
if (Runtime::LL::isPackageLeaderForSelf(id)) {
unsigned eid = (id + me.second) % num;
++me.second;
if (id != eid && Runtime::LL::isPackageLeader(eid)) {
ChunkHeader* c = me.first.stealAllAndPop(local.getRemote(eid)->first);
if (c)
return c;
}
}
return 0;
}
public:
void push(ChunkHeader* c) {
local.getLocal()->first.push(c);
}
ChunkHeader* pop() {
if (ChunkHeader* c = local.getLocal()->first.pop())
return c;
return doSteal();
}
};
template<bool IsLocallyLIFO, int ChunkSize, typename Container, typename T>
struct AltChunkedMaster : private boost::noncopyable {
template<typename _T>
struct retype { typedef AltChunkedMaster<IsLocallyLIFO, ChunkSize, Container, _T> type; };
template<bool _concurrent>
struct rethread { typedef AltChunkedMaster<IsLocallyLIFO, ChunkSize, Container, T> type; };
template<int _chunk_size>
struct with_chunk_size { typedef AltChunkedMaster<IsLocallyLIFO, _chunk_size, Container, T> type; };
private:
class Chunk : public ChunkHeader, public Galois::FixedSizeRing<T, ChunkSize> {};
Runtime::MM::FixedSizeAllocator heap;
Runtime::PerThreadStorage<std::pair<Chunk*, Chunk*> > data;
Container worklist;
Chunk* mkChunk() {
return new (heap.allocate(sizeof(Chunk))) Chunk();
}
void delChunk(Chunk* C) {
C->~Chunk();
heap.deallocate(C);
}
void swapInPush(std::pair<Chunk*, Chunk*>& d) {
if (!IsLocallyLIFO)
std::swap(d.first, d.second);
}
Chunk*& getPushChunk(std::pair<Chunk*, Chunk*>& d) {
if (!IsLocallyLIFO)
return d.second;
else
return d.first;
}
Chunk*& getPopChunk(std::pair<Chunk*, Chunk*>& d) {
return d.first;
}
bool doPush(Chunk* c, const T& val) {
return c->push_back(val);
}
Galois::optional<T> doPop(Chunk* c) {
if (!IsLocallyLIFO)
return c->extract_front();
else
return c->extract_back();
}
void push_internal(std::pair<Chunk*, Chunk*>& tld, Chunk*& n, const T& val) {
//Simple case, space in current chunk
if (n && doPush(n, val))
return;
//full chunk, push
if (n)
worklist.push(static_cast<ChunkHeader*>(n));
//get empty chunk;
n = mkChunk();
//There better be some room in the new chunk
doPush(n, val);
}
public:
typedef T value_type;
AltChunkedMaster() : heap(sizeof(Chunk)) {}
void push(value_type val) {
std::pair<Chunk*, Chunk*>& tld = *data.getLocal();
Chunk*& n = getPushChunk(tld);
push_internal(tld, n, val);
}
template<typename Iter>
void push(Iter b, Iter e) {
std::pair<Chunk*, Chunk*>& tld = *data.getLocal();
Chunk*& n = getPushChunk(tld);
while (b != e)
push_internal(tld, n, *b++);
}
template<typename RangeTy>
void push_initial(const RangeTy& range) {
auto rp = range.local_pair();
push(rp.first, rp.second);
}
Galois::optional<value_type> pop() {
std::pair<Chunk*, Chunk*>& tld = *data.getLocal();
Chunk*& n = getPopChunk(tld);
Galois::optional<value_type> retval;
//simple case, things in current chunk
if (n && (retval = doPop(n)))
return retval;
//empty chunk, trash it
if (n)
delChunk(n);
//get a new chunk
n = static_cast<Chunk*>(worklist.pop());
if (n && (retval = doPop(n)))
return retval;
//try stealing the push buffer if we can
swapInPush(tld);
if (n)
retval = doPop(n);
return retval;
}
};
template<int ChunkSize=64, typename T = int>
class AltChunkedLIFO : public AltChunkedMaster<true, ChunkSize, StealingQueue<AltChunkedStack>, T> {};
GALOIS_WLCOMPILECHECK(AltChunkedLIFO)
template<int ChunkSize=64, typename T = int>
class AltChunkedFIFO : public AltChunkedMaster<false, ChunkSize, StealingQueue<AltChunkedQueue>, T> {};
GALOIS_WLCOMPILECHECK(AltChunkedFIFO)
} // end namespace
} // end namespace
#endif
| 0 |
rapidsai_public_repos/code-share/maxflow/galois/include/Galois
|
rapidsai_public_repos/code-share/maxflow/galois/include/Galois/WorkList/LocalQueue.h
|
/** LocalQueues worklist -*- C++ -*-
* @file
* @section License
*
* Galois, a framework to exploit amorphous data-parallelism in irregular
* programs.
*
* Copyright (C) 2013, The University of Texas at Austin. All rights reserved.
* UNIVERSITY EXPRESSLY DISCLAIMS ANY AND ALL WARRANTIES CONCERNING THIS
* SOFTWARE AND DOCUMENTATION, INCLUDING ANY WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR ANY PARTICULAR PURPOSE, NON-INFRINGEMENT AND WARRANTIES OF
* PERFORMANCE, AND ANY WARRANTY THAT MIGHT OTHERWISE ARISE FROM COURSE OF
* DEALING OR USAGE OF TRADE. NO WARRANTY IS EITHER EXPRESS OR IMPLIED WITH
* RESPECT TO THE USE OF THE SOFTWARE OR DOCUMENTATION. Under no circumstances
* shall University be liable for incidental, special, indirect, direct or
* consequential damages or loss of profits, interruption of business, or
* related expenses which may arise from use of Software or Documentation,
* including but not limited to those resulting from defects in Software and/or
* Documentation, or loss or inaccuracy of data of any kind.
*
* @author Andrew Lenharth <[email protected]>
*/
#ifndef GALOIS_WORKLIST_LOCALQUEUE_H
#define GALOIS_WORKLIST_LOCALQUEUE_H
#include "Galois/config.h"
#include <boost/mpl/if.hpp>
#include GALOIS_CXX11_STD_HEADER(type_traits)
namespace Galois {
namespace WorkList {
template<typename T = int>
struct NoGlobalQueue {
template<bool _concurrent>
struct rethread { typedef NoGlobalQueue<T> type; };
template<typename _T>
struct retype { typedef NoGlobalQueue<_T> type; };
};
template<typename Global = NoGlobalQueue<>, typename Local = FIFO<>, typename T = int>
struct LocalQueue : private boost::noncopyable {
template<bool _concurrent>
struct rethread { typedef LocalQueue<Global, Local, T> type; };
template<typename _T>
struct retype { typedef LocalQueue<typename Global::template retype<_T>::type, typename Local::template retype<_T>::type, _T> type; };
template<typename _global>
struct with_global { typedef LocalQueue<_global, Local, T> type; };
template<typename _local>
struct with_local { typedef LocalQueue<Global, _local, T> type; };
private:
typedef typename Local::template rethread<false>::type lWLTy;
Runtime::PerThreadStorage<lWLTy> local;
Global global;
template<typename RangeTy, bool Enable = std::is_same<Global,NoGlobalQueue<T> >::value>
void pushGlobal(const RangeTy& range, typename std::enable_if<Enable>::type* = 0) {
auto rp = range.local_pair();
local.getLocal()->push(rp.first, rp.second);
}
template<typename RangeTy, bool Enable = std::is_same<Global,NoGlobalQueue<T> >::value>
void pushGlobal(const RangeTy& range, typename std::enable_if<!Enable>::type* = 0) {
global.push_initial(range);
}
template<bool Enable = std::is_same<Global,NoGlobalQueue<T> >::value>
Galois::optional<T> popGlobal(typename std::enable_if<Enable>::type* = 0) {
return Galois::optional<value_type>();
}
template<bool Enable = std::is_same<Global,NoGlobalQueue<T> >::value>
Galois::optional<T> popGlobal(typename std::enable_if<!Enable>::type* = 0) {
return global.pop();
}
public:
typedef T value_type;
void push(const value_type& val) {
local.getLocal()->push(val);
}
template<typename Iter>
void push(Iter b, Iter e) {
local.getLocal()->push(b,e);
}
template<typename RangeTy>
void push_initial(const RangeTy& range) {
pushGlobal(range);
}
Galois::optional<value_type> pop() {
Galois::optional<value_type> ret = local.getLocal()->pop();
if (ret)
return ret;
return popGlobal();
}
};
GALOIS_WLCOMPILECHECK(LocalQueue)
} // end namespace WorkList
} // end namespace Galois
#endif
| 0 |
rapidsai_public_repos/code-share/maxflow/galois/include/Galois
|
rapidsai_public_repos/code-share/maxflow/galois/include/Galois/WorkList/BulkSynchronous.h
|
/** BulkSynchronous worklist -*- C++ -*-
* @file
* @section License
*
* Galois, a framework to exploit amorphous data-parallelism in irregular
* programs.
*
* Copyright (C) 2013, The University of Texas at Austin. All rights reserved.
* UNIVERSITY EXPRESSLY DISCLAIMS ANY AND ALL WARRANTIES CONCERNING THIS
* SOFTWARE AND DOCUMENTATION, INCLUDING ANY WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR ANY PARTICULAR PURPOSE, NON-INFRINGEMENT AND WARRANTIES OF
* PERFORMANCE, AND ANY WARRANTY THAT MIGHT OTHERWISE ARISE FROM COURSE OF
* DEALING OR USAGE OF TRADE. NO WARRANTY IS EITHER EXPRESS OR IMPLIED WITH
* RESPECT TO THE USE OF THE SOFTWARE OR DOCUMENTATION. Under no circumstances
* shall University be liable for incidental, special, indirect, direct or
* consequential damages or loss of profits, interruption of business, or
* related expenses which may arise from use of Software or Documentation,
* including but not limited to those resulting from defects in Software and/or
* Documentation, or loss or inaccuracy of data of any kind.
*
* @author Donald Nguyen <[email protected]>
*/
#ifndef GALOIS_WORKLIST_BULKSYNCHRONOUS_H
#define GALOIS_WORKLIST_BULKSYNCHRONOUS_H
#include "Galois/Runtime/Barrier.h"
#include "Chunked.h"
#include "WLCompileCheck.h"
namespace Galois {
namespace WorkList {
/**
* Bulk-synchronous scheduling. Work is processed in rounds, and all newly
* created work is processed after all the current work in a round is
* completed.
*/
template<class Container=dChunkedFIFO<>, class T=int, bool Concurrent = true>
class BulkSynchronous : private boost::noncopyable {
public:
template<bool _concurrent>
struct rethread { typedef BulkSynchronous<Container, T, _concurrent> type; };
template<typename _T>
struct retype { typedef BulkSynchronous<typename Container::template retype<_T>::type, _T, Concurrent> type; };
template<typename _container>
struct with_container { typedef BulkSynchronous<_container, T, Concurrent> type; };
private:
typedef typename Container::template rethread<Concurrent>::type CTy;
struct TLD {
unsigned round;
TLD(): round(0) { }
};
CTy wls[2];
Runtime::PerThreadStorage<TLD> tlds;
Runtime::Barrier& barrier;
Runtime::LL::CacheLineStorage<volatile long> some;
volatile bool empty;
public:
typedef T value_type;
BulkSynchronous(): barrier(Runtime::getSystemBarrier()), empty(false) { }
void push(const value_type& val) {
wls[(tlds.getLocal()->round + 1) & 1].push(val);
}
template<typename ItTy>
void push(ItTy b, ItTy e) {
while (b != e)
push(*b++);
}
template<typename RangeTy>
void push_initial(const RangeTy& range) {
auto rp = range.local_pair();
push(rp.first, rp.second);
tlds.getLocal()->round = 1;
some.data = true;
}
Galois::optional<value_type> pop() {
TLD& tld = *tlds.getLocal();
Galois::optional<value_type> r;
while (true) {
if (empty)
return r; // empty
r = wls[tld.round].pop();
if (r)
return r;
barrier.wait();
if (Runtime::LL::getTID() == 0) {
if (!some.data)
empty = true;
some.data = false;
}
tld.round = (tld.round + 1) & 1;
barrier.wait();
r = wls[tld.round].pop();
if (r) {
some.data = true;
return r;
}
}
}
};
GALOIS_WLCOMPILECHECK(BulkSynchronous)
} // end namespace WorkList
} // end namespace Galois
#endif
| 0 |
rapidsai_public_repos/code-share/maxflow/galois/include/Galois
|
rapidsai_public_repos/code-share/maxflow/galois/include/Galois/WorkList/OwnerComputes.h
|
/** Owner Computes worklist -*- C++ -*-
* @file
* @section License
*
* Galois, a framework to exploit amorphous data-parallelism in irregular
* programs.
*
* Copyright (C) 2013, The University of Texas at Austin. All rights reserved.
* UNIVERSITY EXPRESSLY DISCLAIMS ANY AND ALL WARRANTIES CONCERNING THIS
* SOFTWARE AND DOCUMENTATION, INCLUDING ANY WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR ANY PARTICULAR PURPOSE, NON-INFRINGEMENT AND WARRANTIES OF
* PERFORMANCE, AND ANY WARRANTY THAT MIGHT OTHERWISE ARISE FROM COURSE OF
* DEALING OR USAGE OF TRADE. NO WARRANTY IS EITHER EXPRESS OR IMPLIED WITH
* RESPECT TO THE USE OF THE SOFTWARE OR DOCUMENTATION. Under no circumstances
* shall University be liable for incidental, special, indirect, direct or
* consequential damages or loss of profits, interruption of business, or
* related expenses which may arise from use of Software or Documentation,
* including but not limited to those resulting from defects in Software and/or
* Documentation, or loss or inaccuracy of data of any kind.
*
* @author Andrew Lenharth <[email protected]>
*/
#ifndef GALOIS_WORKLIST_OWNERCOMPUTES_H
#define GALOIS_WORKLIST_OWNERCOMPUTES_H
#include "WLCompileCheck.h"
namespace Galois {
namespace WorkList {
template<typename OwnerFn=DummyIndexer<int>, typename Container=ChunkedLIFO<>, typename T = int>
struct OwnerComputes : private boost::noncopyable {
template<bool _concurrent>
struct rethread { typedef OwnerComputes<OwnerFn, typename Container::template rethread<_concurrent>::type, T> type; };
template<typename _T>
struct retype { typedef OwnerComputes<OwnerFn, typename Container::template retype<_T>::type, _T> type; };
template<typename _container>
struct with_container { typedef OwnerComputes<OwnerFn, _container, T> type; };
template<typename _indexer>
struct with_indexer { typedef OwnerComputes<_indexer, Container, T> type; };
private:
typedef typename Container::template retype<T>::type lWLTy;
typedef lWLTy cWL;
typedef lWLTy pWL;
OwnerFn Fn;
Runtime::PerPackageStorage<cWL> items;
Runtime::PerPackageStorage<pWL> pushBuffer;
public:
typedef T value_type;
void push(const value_type& val) {
unsigned int index = Fn(val);
unsigned int tid = Runtime::LL::getTID();
unsigned int mindex = Runtime::LL::getPackageForThread(index);
//std::cerr << "[" << index << "," << index % active << "]\n";
if (mindex == Runtime::LL::getPackageForSelf(tid))
items.getLocal()->push(val);
else
pushBuffer.getRemote(mindex)->push(val);
}
template<typename ItTy>
void push(ItTy b, ItTy e) {
while (b != e)
push(*b++);
}
template<typename RangeTy>
void push_initial(const RangeTy& range) {
auto rp = range.local_pair();
push(rp.first, rp.second);
for (unsigned int x = 0; x < pushBuffer.size(); ++x)
pushBuffer.getRemote(x)->flush();
}
Galois::optional<value_type> pop() {
cWL& wl = *items.getLocal();
Galois::optional<value_type> retval = wl.pop();
if (retval)
return retval;
pWL& p = *pushBuffer.getLocal();
while ((retval = p.pop()))
wl.push(*retval);
return wl.pop();
}
};
GALOIS_WLCOMPILECHECK(OwnerComputes)
} // end namespace WorkList
} // end namespace Galois
#endif
| 0 |
rapidsai_public_repos/code-share/maxflow/galois/include/Galois
|
rapidsai_public_repos/code-share/maxflow/galois/include/Galois/WorkList/WLCompileCheck.h
|
/** worklists common test macro -*- C++ -*-
* @file
* @section License
*
* Galois, a framework to exploit amorphous data-parallelism in irregular
* programs.
*
* Copyright (C) 2013, The University of Texas at Austin. All rights reserved.
* UNIVERSITY EXPRESSLY DISCLAIMS ANY AND ALL WARRANTIES CONCERNING THIS
* SOFTWARE AND DOCUMENTATION, INCLUDING ANY WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR ANY PARTICULAR PURPOSE, NON-INFRINGEMENT AND WARRANTIES OF
* PERFORMANCE, AND ANY WARRANTY THAT MIGHT OTHERWISE ARISE FROM COURSE OF
* DEALING OR USAGE OF TRADE. NO WARRANTY IS EITHER EXPRESS OR IMPLIED WITH
* RESPECT TO THE USE OF THE SOFTWARE OR DOCUMENTATION. Under no circumstances
* shall University be liable for incidental, special, indirect, direct or
* consequential damages or loss of profits, interruption of business, or
* related expenses which may arise from use of Software or Documentation,
* including but not limited to those resulting from defects in Software and/or
* Documentation, or loss or inaccuracy of data of any kind.
*
* @author Andrew Lenharth <[email protected]>
*/
#ifndef GALOIS_WORKLIST_WLCOMPILECHECK_H
#define GALOIS_WORKLIST_WLCOMPILECHECK_H
#ifndef GALOIS_WLCOMPILECHECK
#define GALOIS_WLCOMPILECHECK(name) //
#endif
#endif
| 0 |
rapidsai_public_repos/code-share/maxflow/galois/include/Galois
|
rapidsai_public_repos/code-share/maxflow/galois/include/Galois/WorkList/ExternRef.h
|
/** External worklist -*- C++ -*-
* @file
* @section License
*
* Galois, a framework to exploit amorphous data-parallelism in irregular
* programs.
*
* Copyright (C) 2013, The University of Texas at Austin. All rights reserved.
* UNIVERSITY EXPRESSLY DISCLAIMS ANY AND ALL WARRANTIES CONCERNING THIS
* SOFTWARE AND DOCUMENTATION, INCLUDING ANY WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR ANY PARTICULAR PURPOSE, NON-INFRINGEMENT AND WARRANTIES OF
* PERFORMANCE, AND ANY WARRANTY THAT MIGHT OTHERWISE ARISE FROM COURSE OF
* DEALING OR USAGE OF TRADE. NO WARRANTY IS EITHER EXPRESS OR IMPLIED WITH
* RESPECT TO THE USE OF THE SOFTWARE OR DOCUMENTATION. Under no circumstances
* shall University be liable for incidental, special, indirect, direct or
* consequential damages or loss of profits, interruption of business, or
* related expenses which may arise from use of Software or Documentation,
* including but not limited to those resulting from defects in Software and/or
* Documentation, or loss or inaccuracy of data of any kind.
*
* @section Description
* This let's you use an external worklist by reference
*
* @author Andrew Lenharth <[email protected]>
*/
#ifndef GALOIS_WORKLIST_EXTERNREF_H
#define GALOIS_WORKLIST_EXTERNREF_H
namespace Galois {
namespace WorkList {
template<typename Container, bool pushinit = true>
struct ExternRef {
//! change the concurrency flag
template<bool _concurrent>
struct rethread { typedef ExternRef<typename Container::template rethread<_concurrent>::type> type; };
//! change the type the worklist holds
template<typename _T>
struct retype { typedef ExternRef<typename Container::template retype<_T>::type> type; };
private:
Container& wl;
public:
ExternRef(Container& _wl) :wl(_wl) {}
//! T is the value type of the WL
typedef typename Container::value_type value_type;
//! push a value onto the queue
void push(const value_type& val) { wl.push(val); }
//! push a range onto the queue
template<typename Iter>
void push(Iter b, Iter e) { wl.push(b,e); }
//! push initial range onto the queue
//! called with the same b and e on each thread
template<typename RangeTy>
void push_initial(const RangeTy& r) { if (pushinit) wl.push_initial(r); }
//! pop a value from the queue.
Galois::optional<value_type> pop() { return wl.pop(); }
};
}
}
#endif
| 0 |
rapidsai_public_repos/code-share/maxflow/galois/include/Galois
|
rapidsai_public_repos/code-share/maxflow/galois/include/Galois/WorkList/Chunked.h
|
/** (d)Chunked(F|L)ifo worklist -*- C++ -*-
* @file
* @section License
*
* Galois, a framework to exploit amorphous data-parallelism in irregular
* programs.
*
* Copyright (C) 2013, The University of Texas at Austin. All rights reserved.
* UNIVERSITY EXPRESSLY DISCLAIMS ANY AND ALL WARRANTIES CONCERNING THIS
* SOFTWARE AND DOCUMENTATION, INCLUDING ANY WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR ANY PARTICULAR PURPOSE, NON-INFRINGEMENT AND WARRANTIES OF
* PERFORMANCE, AND ANY WARRANTY THAT MIGHT OTHERWISE ARISE FROM COURSE OF
* DEALING OR USAGE OF TRADE. NO WARRANTY IS EITHER EXPRESS OR IMPLIED WITH
* RESPECT TO THE USE OF THE SOFTWARE OR DOCUMENTATION. Under no circumstances
* shall University be liable for incidental, special, indirect, direct or
* consequential damages or loss of profits, interruption of business, or
* related expenses which may arise from use of Software or Documentation,
* including but not limited to those resulting from defects in Software and/or
* Documentation, or loss or inaccuracy of data of any kind.
*
* @author Andrew Lenharth <[email protected]>
*/
#ifndef GALOIS_WORKLIST_CHUNKED_H
#define GALOIS_WORKLIST_CHUNKED_H
#include "Galois/FixedSizeRing.h"
#include "Galois/Runtime/ll/PaddedLock.h"
#include "Galois/WorkList/WorkListHelpers.h"
#include "WLCompileCheck.h"
namespace Galois {
namespace WorkList {
//This overly complex specialization avoids a pointer indirection for non-distributed WL when accessing PerLevel
template<bool, template<typename> class PS, typename TQ>
struct squeue {
PS<TQ> queues;
TQ& get(int i) { return *queues.getRemote(i); }
TQ& get() { return *queues.getLocal(); }
int myEffectiveID() { return Runtime::LL::getTID(); }
int size() { return Runtime::activeThreads; }
};
template<template<typename> class PS, typename TQ>
struct squeue<false, PS, TQ> {
TQ queue;
TQ& get(int i) { return queue; }
TQ& get() { return queue; }
int myEffectiveID() { return 0; }
int size() { return 0; }
};
//! Common functionality to all chunked worklists
template<typename T, template<typename, bool> class QT, bool Distributed, bool IsStack, int ChunkSize, bool Concurrent>
struct ChunkedMaster : private boost::noncopyable {
template<bool _concurrent>
struct rethread { typedef ChunkedMaster<T, QT, Distributed, IsStack, ChunkSize, _concurrent> type; };
template<typename _T>
struct retype { typedef ChunkedMaster<_T, QT, Distributed, IsStack, ChunkSize, Concurrent> type; };
template<int _chunk_size>
struct with_chunk_size { typedef ChunkedMaster<T, QT, Distributed, IsStack, _chunk_size, Concurrent> type; };
private:
class Chunk : public FixedSizeRing<T, ChunkSize>, public QT<Chunk, Concurrent>::ListNode {};
Runtime::MM::FixedSizeAllocator heap;
struct p {
Chunk* cur;
Chunk* next;
p(): cur(0), next(0) { }
};
typedef QT<Chunk, Concurrent> LevelItem;
squeue<Concurrent, Runtime::PerThreadStorage, p> data;
squeue<Distributed, Runtime::PerPackageStorage, LevelItem> Q;
Chunk* mkChunk() {
return new (heap.allocate(sizeof(Chunk))) Chunk();
}
void delChunk(Chunk* C) {
C->~Chunk();
heap.deallocate(C);
}
void pushChunk(Chunk* C) {
LevelItem& I = Q.get();
I.push(C);
}
Chunk* popChunkByID(unsigned int i) {
LevelItem& I = Q.get(i);
return I.pop();
}
Chunk* popChunk() {
int id = Q.myEffectiveID();
Chunk* r = popChunkByID(id);
if (r)
return r;
for (int i = id + 1; i < (int) Q.size(); ++i) {
r = popChunkByID(i);
if (r)
return r;
}
for (int i = 0; i < id; ++i) {
r = popChunkByID(i);
if (r)
return r;
}
return 0;
}
template<typename... Args>
T* emplacei(p& n, Args&&... args) {
T* retval = 0;
if (n.next && (retval = n.next->emplace_back(std::forward<Args>(args)...)))
return retval;
if (n.next)
pushChunk(n.next);
n.next = mkChunk();
retval = n.next->emplace_back(std::forward<Args>(args)...);
assert(retval);
return retval;
}
public:
typedef T value_type;
ChunkedMaster() : heap(sizeof(Chunk)) { }
void flush() {
p& n = data.get();
if (n.next)
pushChunk(n.next);
n.next = 0;
}
/**
* Construct an item on the worklist and return a pointer to its value.
*
* This pointer facilitates some internal runtime uses and is not designed
* to be used by general clients. The address is generally not safe to use
* in the presence of concurrent pops.
*/
template<typename... Args>
value_type* emplace(Args&&... args) {
p& n = data.get();
return emplacei(n, std::forward<Args>(args)...);
}
/**
* Return pointer to next value to be returned by pop.
*
* For internal runtime use.
*/
value_type* peek() {
p& n = data.get();
if (IsStack) {
if (n.next && !n.next->empty())
return &n.next->back();
if (n.next)
delChunk(n.next);
n.next = popChunk();
if (n.next && !n.next->empty())
return &n.next->back();
return NULL;
} else {
if (n.cur && !n.cur->empty())
return &n.cur->front();
if (n.cur)
delChunk(n.cur);
n.cur = popChunk();
if (!n.cur) {
n.cur = n.next;
n.next = 0;
}
if (n.cur && !n.cur->empty())
return &n.cur->front();
return NULL;
}
}
/**
* Remove the value returned from peek() from the worklist.
*
* For internal runtime use.
*/
void pop_peeked() {
p& n = data.get();
if (IsStack) {
n.next->pop_back();
return;
} else {
n.cur->pop_front();
return;
}
}
void push(const value_type& val) {
p& n = data.get();
emplacei(n, val);
}
template<typename Iter>
void push(Iter b, Iter e) {
p& n = data.get();
while (b != e)
emplacei(n, *b++);
}
template<typename RangeTy>
void push_initial(const RangeTy& range) {
auto rp = range.local_pair();
push(rp.first, rp.second);
}
Galois::optional<value_type> pop() {
p& n = data.get();
Galois::optional<value_type> retval;
if (IsStack) {
if (n.next && (retval = n.next->extract_back()))
return retval;
if (n.next)
delChunk(n.next);
n.next = popChunk();
if (n.next)
return n.next->extract_back();
return Galois::optional<value_type>();
} else {
if (n.cur && (retval = n.cur->extract_front()))
return retval;
if (n.cur)
delChunk(n.cur);
n.cur = popChunk();
if (!n.cur) {
n.cur = n.next;
n.next = 0;
}
if (n.cur)
return n.cur->extract_front();
return Galois::optional<value_type>();
}
}
};
/**
* Chunked FIFO. A global FIFO of chunks of some fixed size.
*
* @tparam ChunkSize chunk size
*/
template<int ChunkSize=64, typename T = int, bool Concurrent=true>
class ChunkedFIFO : public ChunkedMaster<T, ConExtLinkedQueue, false, false, ChunkSize, Concurrent> {};
GALOIS_WLCOMPILECHECK(ChunkedFIFO)
/**
* Chunked LIFO. A global LIFO of chunks of some fixed size.
*
* @tparam ChunkSize chunk size
*/
template<int ChunkSize=64, typename T = int, bool Concurrent=true>
class ChunkedLIFO : public ChunkedMaster<T, ConExtLinkedStack, false, true, ChunkSize, Concurrent> {};
GALOIS_WLCOMPILECHECK(ChunkedLIFO)
/**
* Distributed chunked FIFO. A more scalable version of {@link ChunkedFIFO}.
*
* @tparam ChunkSize chunk size
*/
template<int ChunkSize=64, typename T = int, bool Concurrent=true>
class dChunkedFIFO : public ChunkedMaster<T, ConExtLinkedQueue, true, false, ChunkSize, Concurrent> {};
GALOIS_WLCOMPILECHECK(dChunkedFIFO)
/**
* Distributed chunked LIFO. A more scalable version of {@link ChunkedLIFO}.
*
* @tparam chunksize chunk size
*/
template<int ChunkSize=64, typename T = int, bool Concurrent=true>
class dChunkedLIFO : public ChunkedMaster<T, ConExtLinkedStack, true, true, ChunkSize, Concurrent> {};
GALOIS_WLCOMPILECHECK(dChunkedLIFO)
/**
* Distributed chunked bag. A scalable and resource-efficient policy when you
* are agnostic to the particular scheduling order.
*
* @tparam chunksize chunk size
*/
template<int ChunkSize=64, typename T = int, bool Concurrent=true>
class dChunkedBag : public ChunkedMaster<T, ConExtLinkedQueue, true, true, ChunkSize, Concurrent> {};
GALOIS_WLCOMPILECHECK(dChunkedBag)
} // end namespace WorkList
} // end namespace Galois
#endif
| 0 |
rapidsai_public_repos/code-share/maxflow/galois/include/Galois
|
rapidsai_public_repos/code-share/maxflow/galois/include/Galois/WorkList/OrderedList.h
|
/** Scalable priority worklist -*- C++ -*-
* @file
* @section License
*
* Galois, a framework to exploit amorphous data-parallelism in irregular
* programs.
*
* Copyright (C) 2013, The University of Texas at Austin. All rights reserved.
* UNIVERSITY EXPRESSLY DISCLAIMS ANY AND ALL WARRANTIES CONCERNING THIS
* SOFTWARE AND DOCUMENTATION, INCLUDING ANY WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR ANY PARTICULAR PURPOSE, NON-INFRINGEMENT AND WARRANTIES OF
* PERFORMANCE, AND ANY WARRANTY THAT MIGHT OTHERWISE ARISE FROM COURSE OF
* DEALING OR USAGE OF TRADE. NO WARRANTY IS EITHER EXPRESS OR IMPLIED WITH
* RESPECT TO THE USE OF THE SOFTWARE OR DOCUMENTATION. Under no circumstances
* shall University be liable for incidental, special, indirect, direct or
* consequential damages or loss of profits, interruption of business, or
* related expenses which may arise from use of Software or Documentation,
* including but not limited to those resulting from defects in Software and/or
* Documentation, or loss or inaccuracy of data of any kind.
*
* @author Andrew Lenharth <[email protected]>
*/
#ifndef GALOIS_WORKLIST_ORDEREDLIST_H
#define GALOIS_WORKLIST_ORDEREDLIST_H
#include "Galois/FlatMap.h"
namespace Galois {
namespace WorkList {
template<class Compare = std::less<int>, typename T = int, bool concurrent = true>
class OrderedList : private boost::noncopyable, private Runtime::LL::PaddedLock<concurrent> {
typedef Galois::flat_map<T, std::deque<T>, Compare> Map;
Map map;
using Runtime::LL::PaddedLock<concurrent>::lock;
using Runtime::LL::PaddedLock<concurrent>::try_lock;
using Runtime::LL::PaddedLock<concurrent>::unlock;
public:
template<bool newconcurrent>
struct rethread { typedef OrderedList<Compare, T, newconcurrent> type; };
template<typename Tnew>
struct retype { typedef OrderedList<Compare, Tnew, concurrent> type; };
typedef T value_type;
void push(value_type val) {
lock();
std::deque<T>& list = map[val];
list.push_back(val);
unlock();
}
template<typename Iter>
void push(Iter b, Iter e) {
lock();
while (b != e) {
std::deque<T>& list = map[*b];
list.push_back(*b);
++b;
}
unlock();
}
template<typename RangeTy>
void push_initial(RangeTy range) {
if (Runtime::LL::getTID() == 0)
push(range.begin(), range.end());
}
Galois::optional<value_type> pop() {
lock();
if (map.empty()) {
unlock();
return Galois::optional<value_type>();
}
auto ii = map.begin();
std::deque<T>& list = ii->second;
Galois::optional<value_type> v(list.front());
list.pop_front();
if (list.empty())
map.erase(ii);
unlock();
return v;
}
};
GALOIS_WLCOMPILECHECK(OrderedList)
}
}
#endif
| 0 |
rapidsai_public_repos/code-share/maxflow/galois/include/Galois
|
rapidsai_public_repos/code-share/maxflow/galois/include/Galois/Graph/memScalGraph.h
|
/*
* memScalGraph.h
*
* Created on: Mar 4, 2013
* Author: nyadav
*/
#ifndef GALOIS_GRAPH_MEMSCALGRAPH_H
#define GALOIS_GRAPH_MEMSCALGRAPH_H
#include "Galois/Bag.h"
#include "Galois/Graph/Details.h"
#include "Galois/Runtime/Context.h"
#include "Galois/Runtime/MethodFlags.h"
#include "Galois/gdeque.h"
#include <boost/functional.hpp>
#include <boost/iterator/transform_iterator.hpp>
#include <boost/iterator/filter_iterator.hpp>
#include <algorithm>
#include <map>
#include <set>
#include <vector>
namespace Galois {
namespace Graph {
template<typename NodeTy, typename EdgeTy, bool Directional>
class MemScalGraph : private boost::noncopyable {
template<typename T>
struct first_eq_and_valid {
T N2;
first_eq_and_valid(T& n) :N2(n) {}
template <typename T2>
bool operator()(const T2& ii) const {
return ii.first() == N2 && ii.first();
}
};
struct first_not_valid {
template <typename T2>
bool operator()(const T2& ii) const { return !ii.first();}
};
struct gNode: public Galois::Runtime::Lockable {
//! The storage type for an edge
typedef GraphImpl::EdgeItem<gNode, EdgeTy, Directional> EITy;
//! The storage type for edges
typedef Galois::gdeque<EITy,32> EdgesTy;
typedef typename EdgesTy::iterator iterator;
EdgesTy edges;
NodeTy data;
template<typename... Args>
gNode(Args&&... args): data(std::forward<Args>(args)...) { }
iterator begin() { return edges.begin(); }
iterator end() { return edges.end(); }
void erase(iterator ii) {
*ii = edges.back();
edges.pop_back();
}
void erase(gNode* N) {
iterator ii = find(N);
if (ii != end())
edges.erase(ii);
}
iterator find(gNode* N) {
return std::find_if(begin(), end(), first_eq_and_valid<gNode*>(N));
}
template<typename... Args>
iterator createEdge(gNode* N, EdgeTy* v, Args&&... args) {
edges.push_front(EITy(N, v, std::forward<Args>(args)...));
return edges.begin();
}
template<typename... Args>
iterator createEdgeWithReuse(gNode* N, EdgeTy* v, Args&&... args) {
//First check for holes
iterator ii = std::find_if(begin(), end(), first_not_valid());
if (ii != end()) {
*ii = EITy(N, v, std::forward<Args>(args)...);
return ii;
}
edges.push_front(EITy(N, v, std::forward<Args>(args)...));
return edges.begin();
}
};
//The graph manages the lifetimes of the data in the nodes and edges
typedef Galois::InsertBag<gNode> NodeListTy;
NodeListTy nodes;
GraphImpl::EdgeFactory<EdgeTy> edges;
//Helpers for iterator classes
struct is_node : public std::unary_function<gNode&, bool>{
bool operator() (const gNode& g) const { return true; }
};
struct is_edge : public std::unary_function<typename gNode::EITy&, bool> {
bool operator()(typename gNode::EITy& e) const { return true; }
};
struct makeGraphNode: public std::unary_function<gNode&, gNode*> {
gNode* operator()(gNode& data) const { return &data; }
};
public:
//! Graph node handle
typedef gNode* GraphNode;
//! Edge data type
typedef EdgeTy edge_type;
//! Node data type
typedef NodeTy node_type;
//! Edge iterator
typedef typename gNode::iterator edge_iterator;
//! Reference to edge data
typedef typename gNode::EITy::reference edge_data_reference;
//! Node iterator
typedef boost::transform_iterator<makeGraphNode,
boost::filter_iterator<is_node,
typename NodeListTy::iterator> > iterator;
private:
template<typename... Args>
edge_iterator createEdgeWithReuse(GraphNode src, GraphNode dst, Galois::MethodFlag mflag, Args&&... args) {
assert(src);
assert(dst);
Galois::Runtime::checkWrite(mflag, true);
Galois::Runtime::acquire(src, mflag);
typename gNode::iterator ii = src->find(dst);
if (ii == src->end()) {
if (Directional) {
ii = src->createEdgeWithReuse(dst, 0, std::forward<Args>(args)...);
} else {
Galois::Runtime::acquire(dst, mflag);
EdgeTy* e = edges.mkEdge(std::forward<Args>(args)...);
ii = dst->createEdgeWithReuse(src, e, std::forward<Args>(args)...);
ii = src->createEdgeWithReuse(dst, e, std::forward<Args>(args)...);
}
}
//return boost::make_filter_iterator(is_edge(), ii, src->end());
return ii;
}
template<typename... Args>
edge_iterator createEdge(GraphNode src, GraphNode dst, Galois::MethodFlag mflag, Args&&... args) {
assert(src);
assert(dst);
Galois::Runtime::checkWrite(mflag, true);
Galois::Runtime::acquire(src, mflag);
typename gNode::iterator ii = src->end();
if (ii == src->end()) {
if (Directional) {
ii = src->createEdge(dst, 0, std::forward<Args>(args)...);
} else {
Galois::Runtime::acquire(dst, mflag);
EdgeTy* e = edges.mkEdge(std::forward<Args>(args)...);
ii = dst->createEdge(src, e, std::forward<Args>(args)...);
ii = src->createEdge(dst, e, std::forward<Args>(args)...);
}
}
return ii;
}
public:
/**
* Creates a new node holding the indicated data. Usually you should call
* {@link addNode()} afterwards.
*/
template<typename... Args>
GraphNode createNode(Args&&... args) {
gNode* N = &(nodes.emplace(std::forward<Args>(args)...));
return GraphNode(N);
}
/**
* Adds a node to the graph.
*/
void addNode(const GraphNode& n, Galois::MethodFlag mflag = MethodFlag::ALL) {
Galois::Runtime::checkWrite(mflag, true);
Galois::Runtime::acquire(n, mflag);
}
//! Gets the node data for a node.
NodeTy& getData(const GraphNode& n, Galois::MethodFlag mflag = MethodFlag::ALL) const {
assert(n);
Galois::Runtime::checkWrite(mflag, false);
Galois::Runtime::acquire(n, mflag);
return n->data;
}
//! Checks if a node is in the graph
bool containsNode(const GraphNode& n, Galois::MethodFlag mflag = MethodFlag::ALL) const {
assert(n);
Galois::Runtime::acquire(n, mflag);
}
/**
* Adds an edge to graph, replacing existing value if edge already exists.
*
* Ignore the edge data, let the caller use the returned iterator to set the
* value if desired. This frees us from dealing with the void edge data
* problem in this API
*/
edge_iterator addEdge(GraphNode src, GraphNode dst, Galois::MethodFlag mflag = MethodFlag::ALL) {
return createEdgeWithReuse(src, dst, mflag);
}
//! Adds and initializes an edge to graph but does not check for duplicate edges
template<typename... Args>
edge_iterator addMultiEdge(GraphNode src, GraphNode dst, Galois::MethodFlag mflag, Args&&... args) {
return createEdge(src, dst, mflag, std::forward<Args>(args)...);
}
//! Finds if an edge between src and dst exists
edge_iterator findEdge(GraphNode src, GraphNode dst, Galois::MethodFlag mflag = MethodFlag::ALL) {
assert(src);
assert(dst);
Galois::Runtime::acquire(src, mflag);
/*return boost::make_filter_iterator(is_edge(), src->find(dst), src->end());*/
return src->find(dst);
}
/**
* Returns the edge data associated with the edge. It is an error to
* get the edge data for a non-existent edge. It is an error to get
* edge data for inactive edges. By default, the mflag is Galois::NONE
* because edge_begin() dominates this call and should perform the
* appropriate locking.
*/
edge_data_reference getEdgeData(edge_iterator ii, Galois::MethodFlag mflag = MethodFlag::NONE) const {
Galois::Runtime::checkWrite(mflag, false);
Galois::Runtime::acquire(ii->first(), mflag);
return *ii->second();
}
//! Returns the destination of an edge
GraphNode getEdgeDst(edge_iterator ii) {
return GraphNode(ii->first());
}
//// General Things ////
//! Returns an iterator to the neighbors of a node
edge_iterator edge_begin(GraphNode N, Galois::MethodFlag mflag = MethodFlag::ALL) {
assert(N);
Galois::Runtime::acquire(N, mflag);
if (Galois::Runtime::shouldLock(mflag)) {
for (typename gNode::iterator ii = N->begin(), ee = N->end(); ii != ee; ++ii) {
Galois::Runtime::acquire(ii->first(), mflag);
}
}
return N->begin();
}
//! Returns the end of the neighbor iterator
edge_iterator edge_end(GraphNode N, Galois::MethodFlag mflag = MethodFlag::ALL) {
assert(N);
// Not necessary; no valid use for an end pointer should ever require it
//if (shouldLock(mflag))
// acquire(N);
return N->end();
}
/**
* An object with begin() and end() methods to iterate over the outgoing
* edges of N.
*/
detail::EdgesIterator<MemScalGraph> out_edges(GraphNode N, MethodFlag mflag = MethodFlag::ALL) {
return detail::EdgesIterator<MemScalGraph>(*this, N, mflag);
}
/**
* Returns an iterator to all the nodes in the graph. Not thread-safe.
*/
iterator begin() {
return boost::make_transform_iterator(nodes.begin(),makeGraphNode());
}
//! Returns the end of the node iterator. Not thread-safe.
iterator end() {
return boost::make_transform_iterator(nodes.end(),makeGraphNode());
}
typedef iterator local_iterator;
local_iterator local_begin() {
return boost::make_transform_iterator(nodes.local_begin(),makeGraphNode());
}
local_iterator local_end() {
return boost::make_transform_iterator(nodes.local_end(),makeGraphNode());
}
/**
* Returns the number of nodes in the graph. Not thread-safe.
*/
unsigned int size() {
return std::distance(begin(), end());
}
//! Returns the size of edge data.
size_t sizeOfEdgeData() const {
return gNode::EITy::sizeOfSecond();
}
MemScalGraph() { }
};
}
}
#endif
| 0 |
rapidsai_public_repos/code-share/maxflow/galois/include/Galois
|
rapidsai_public_repos/code-share/maxflow/galois/include/Galois/Graph/Util.h
|
/** Useful classes and methods for graphs -*- C++ -*-
* @file
* @section License
*
* Galois, a framework to exploit amorphous data-parallelism in irregular
* programs.
*
* Copyright (C) 2013, The University of Texas at Austin. All rights reserved.
* UNIVERSITY EXPRESSLY DISCLAIMS ANY AND ALL WARRANTIES CONCERNING THIS
* SOFTWARE AND DOCUMENTATION, INCLUDING ANY WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR ANY PARTICULAR PURPOSE, NON-INFRINGEMENT AND WARRANTIES OF
* PERFORMANCE, AND ANY WARRANTY THAT MIGHT OTHERWISE ARISE FROM COURSE OF
* DEALING OR USAGE OF TRADE. NO WARRANTY IS EITHER EXPRESS OR IMPLIED WITH
* RESPECT TO THE USE OF THE SOFTWARE OR DOCUMENTATION. Under no circumstances
* shall University be liable for incidental, special, indirect, direct or
* consequential damages or loss of profits, interruption of business, or
* related expenses which may arise from use of Software or Documentation,
* including but not limited to those resulting from defects in Software and/or
* Documentation, or loss or inaccuracy of data of any kind.
*
* @section Description
*
* @author Donald Nguyen <[email protected]>
*/
#ifndef GALOIS_GRAPH_UTIL_H
#define GALOIS_GRAPH_UTIL_H
#include "Galois/Galois.h"
#include "Galois/Graph/Details.h"
namespace Galois {
namespace Graph {
/**
* Allocates and constructs a graph from a file. Tries to balance
* memory evenly across system. Cannot be called during parallel
* execution.
*/
template<typename GraphTy, typename... Args>
void readGraph(GraphTy& graph, Args&&... args) {
typename GraphTy::read_tag tag;
readGraphDispatch(graph, tag, std::forward<Args>(args)...);
}
template<typename GraphTy>
void readGraphDispatch(GraphTy& graph, read_default_graph_tag tag, const std::string& filename) {
FileGraph f;
f.structureFromFileInterleaved<typename GraphTy::edge_data_type>(filename);
readGraphDispatch(graph, tag, f);
}
template<typename GraphTy>
struct ReadGraphConstructFrom {
GraphTy& graph;
FileGraph& f;
ReadGraphConstructFrom(GraphTy& g, FileGraph& _f): graph(g), f(_f) { }
void operator()(unsigned tid, unsigned total) {
graph.constructFrom(f, tid, total);
}
};
template<typename GraphTy, typename Aux>
struct ReadGraphConstructNodesFrom {
GraphTy& graph;
FileGraph& f;
Aux& aux;
ReadGraphConstructNodesFrom(GraphTy& g, FileGraph& _f, Aux& a): graph(g), f(_f), aux(a) { }
void operator()(unsigned tid, unsigned total) {
graph.constructNodesFrom(f, tid, total, aux);
}
};
template<typename GraphTy, typename Aux>
struct ReadGraphConstructEdgesFrom {
GraphTy& graph;
FileGraph& f;
Aux& aux;
ReadGraphConstructEdgesFrom(GraphTy& g, FileGraph& _f, Aux& a): graph(g), f(_f), aux(a) { }
void operator()(unsigned tid, unsigned total) {
graph.constructEdgesFrom(f, tid, total, aux);
}
};
template<typename GraphTy>
void readGraphDispatch(GraphTy& graph, read_default_graph_tag, FileGraph& f) {
graph.allocateFrom(f);
Galois::on_each(ReadGraphConstructFrom<GraphTy>(graph, f));
}
template<typename GraphTy>
void readGraphDispatch(GraphTy& graph, read_with_aux_graph_tag tag, const std::string& filename) {
FileGraph f;
f.structureFromFileInterleaved<typename GraphTy::edge_data_type>(filename);
readGraphDispatch(graph, tag, f);
}
template<typename GraphTy>
void readGraphDispatch(GraphTy& graph, read_with_aux_graph_tag, FileGraph& f) {
typedef typename GraphTy::ReadGraphAuxData Aux;
Aux aux;
graph.allocateFrom(f, aux);
Galois::on_each(ReadGraphConstructNodesFrom<GraphTy, Aux>(graph, f, aux));
Galois::on_each(ReadGraphConstructEdgesFrom<GraphTy, Aux>(graph, f, aux));
}
template<typename GraphTy>
void readGraphDispatch(GraphTy& graph, read_lc_inout_graph_tag, const std::string& f1, const std::string& f2) {
graph.createAsymmetric();
typename GraphTy::out_graph_type::read_tag tag1;
readGraphDispatch(graph, tag1, f1);
typename GraphTy::in_graph_type::read_tag tag2;
readGraphDispatch(graph.inGraph, tag2, f2);
}
template<typename GraphTy>
void readGraphDispatch(GraphTy& graph, read_lc_inout_graph_tag, const std::string& f1) {
typename GraphTy::out_graph_type::read_tag tag1;
readGraphDispatch(graph, tag1, f1);
}
} // end namespace
} // end namespace
#endif
| 0 |
rapidsai_public_repos/code-share/maxflow/galois/include/Galois
|
rapidsai_public_repos/code-share/maxflow/galois/include/Galois/Graph/LC_CSR_Graph.h
|
/** Local Computation graphs -*- C++ -*-
* @file
* @section License
*
* Galois, a framework to exploit amorphous data-parallelism in irregular
* programs.
*
* Copyright (C) 2013, The University of Texas at Austin. All rights reserved.
* UNIVERSITY EXPRESSLY DISCLAIMS ANY AND ALL WARRANTIES CONCERNING THIS
* SOFTWARE AND DOCUMENTATION, INCLUDING ANY WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR ANY PARTICULAR PURPOSE, NON-INFRINGEMENT AND WARRANTIES OF
* PERFORMANCE, AND ANY WARRANTY THAT MIGHT OTHERWISE ARISE FROM COURSE OF
* DEALING OR USAGE OF TRADE. NO WARRANTY IS EITHER EXPRESS OR IMPLIED WITH
* RESPECT TO THE USE OF THE SOFTWARE OR DOCUMENTATION. Under no circumstances
* shall University be liable for incidental, special, indirect, direct or
* consequential damages or loss of profits, interruption of business, or
* related expenses which may arise from use of Software or Documentation,
* including but not limited to those resulting from defects in Software and/or
* Documentation, or loss or inaccuracy of data of any kind.
*
* @section Description
*
* @author Andrew Lenharth <[email protected]>
* @author Donald Nguyen <[email protected]>
*/
#ifndef GALOIS_GRAPH_LC_CSR_GRAPH_H
#define GALOIS_GRAPH_LC_CSR_GRAPH_H
#include "Galois/config.h"
#include "Galois/LargeArray.h"
#include "Galois/Graph/FileGraph.h"
#include "Galois/Graph/Details.h"
#include "Galois/Runtime/MethodFlags.h"
#include GALOIS_CXX11_STD_HEADER(type_traits)
namespace Galois {
namespace Graph {
/**
* Local computation graph (i.e., graph structure does not change). The data representation
* is the traditional compressed-sparse-row (CSR) format.
*
* The position of template parameters may change between Galois releases; the
* most robust way to specify them is through the with_XXX nested templates.
*
* An example of use:
*
* \code
* typedef Galois::Graph::LC_CSR_Graph<int,int> Graph;
*
* // Create graph
* Graph g;
* g.structureFromFile(inputfile);
*
* // Traverse graph
* for (Graph::iterator ii = g.begin(), ei = g.end(); ii != ei; ++ii) {
* Graph::GraphNode src = *ii;
* for (Graph::edge_iterator jj = g.edge_begin(src), ej = g.edge_end(src); jj != ej; ++jj) {
* Graph::GraphNode dst = g.getEdgeDst(jj);
* int edgeData = g.getEdgeData(jj);
* int nodeData = g.getData(dst);
* }
* }
* \endcode
*
* And in C++11:
*
* \code
* typedef Galois::Graph::LC_CSR_Graph<int,int> Graph;
* // or typedef Galois::Graph::LC_CSR_Graph<int,int>::with_no_lockable<true>::with_numa_alloc<true>
*
* // Create graph
* Graph g;
* g.structureFromFile(inputfile);
*
* // Traverse graph
* for (Graph::GraphNode src : g) {
* for (Graph::edge_iterator edge : g.out_edges(src)) {
* Graph::GraphNode dst = g.getEdgeDst(edge);
* int edgeData = g.getEdgeData(edge);
* int nodeData = g.getData(dst);
* }
* }
* \endcode
*
* @tparam NodeTy data on nodes
* @tparam EdgeTy data on out edges
*/
template<typename NodeTy, typename EdgeTy,
bool HasNoLockable=false,
bool UseNumaAlloc=false,
bool HasOutOfLineLockable=false>
class LC_CSR_Graph:
private boost::noncopyable,
private detail::LocalIteratorFeature<UseNumaAlloc>,
private detail::OutOfLineLockableFeature<HasOutOfLineLockable && !HasNoLockable> {
template<typename Graph> friend class LC_InOut_Graph;
public:
template<bool _has_id>
struct with_id { typedef LC_CSR_Graph type; };
template<typename _node_data>
struct with_node_data { typedef LC_CSR_Graph<_node_data,EdgeTy,HasNoLockable,UseNumaAlloc,HasOutOfLineLockable> type; };
//! If true, do not use abstract locks in graph
template<bool _has_no_lockable>
struct with_no_lockable { typedef LC_CSR_Graph<NodeTy,EdgeTy,_has_no_lockable,UseNumaAlloc,HasOutOfLineLockable> type; };
//! If true, use NUMA-aware graph allocation
template<bool _use_numa_alloc>
struct with_numa_alloc { typedef LC_CSR_Graph<NodeTy,EdgeTy,HasNoLockable,_use_numa_alloc,HasOutOfLineLockable> type; };
//! If true, store abstract locks separate from nodes
template<bool _has_out_of_line_lockable>
struct with_out_of_line_lockable { typedef LC_CSR_Graph<NodeTy,EdgeTy,HasNoLockable,UseNumaAlloc,_has_out_of_line_lockable> type; };
typedef read_default_graph_tag read_tag;
protected:
typedef LargeArray<EdgeTy> EdgeData;
typedef LargeArray<uint32_t> EdgeDst;
typedef detail::NodeInfoBaseTypes<NodeTy,!HasNoLockable && !HasOutOfLineLockable> NodeInfoTypes;
typedef detail::NodeInfoBase<NodeTy,!HasNoLockable && !HasOutOfLineLockable> NodeInfo;
typedef LargeArray<uint64_t> EdgeIndData;
typedef LargeArray<NodeInfo> NodeData;
public:
typedef uint32_t GraphNode;
typedef EdgeTy edge_data_type;
typedef NodeTy node_data_type;
typedef typename EdgeData::reference edge_data_reference;
typedef typename NodeInfoTypes::reference node_data_reference;
typedef boost::counting_iterator<typename EdgeIndData::value_type> edge_iterator;
typedef boost::counting_iterator<typename EdgeDst::value_type> iterator;
typedef iterator const_iterator;
typedef iterator local_iterator;
typedef iterator const_local_iterator;
protected:
NodeData nodeData;
EdgeIndData edgeIndData;
EdgeDst edgeDst;
EdgeData edgeData;
uint64_t numNodes;
uint64_t numEdges;
typedef detail::EdgeSortIterator<GraphNode,typename EdgeIndData::value_type,EdgeDst,EdgeData> edge_sort_iterator;
edge_iterator raw_begin(GraphNode N) const {
return edge_iterator((N == 0) ? 0 : edgeIndData[N-1]);
}
edge_iterator raw_end(GraphNode N) const {
return edge_iterator(edgeIndData[N]);
}
edge_sort_iterator edge_sort_begin(GraphNode N) {
return edge_sort_iterator(*raw_begin(N), &edgeDst, &edgeData);
}
edge_sort_iterator edge_sort_end(GraphNode N) {
return edge_sort_iterator(*raw_end(N), &edgeDst, &edgeData);
}
template<bool _A1 = HasNoLockable, bool _A2 = HasOutOfLineLockable>
void acquireNode(GraphNode N, MethodFlag mflag, typename std::enable_if<!_A1 && !_A2>::type* = 0) {
Galois::Runtime::acquire(&nodeData[N], mflag);
}
template<bool _A1 = HasOutOfLineLockable, bool _A2 = HasNoLockable>
void acquireNode(GraphNode N, MethodFlag mflag, typename std::enable_if<_A1 && !_A2>::type* = 0) {
this->outOfLineAcquire(getId(N), mflag);
}
template<bool _A1 = HasOutOfLineLockable, bool _A2 = HasNoLockable>
void acquireNode(GraphNode N, MethodFlag mflag, typename std::enable_if<_A2>::type* = 0) { }
size_t getId(GraphNode N) {
return N;
}
GraphNode getNode(size_t n) {
return n;
}
public:
node_data_reference getData(GraphNode N, MethodFlag mflag = MethodFlag::ALL) {
Galois::Runtime::checkWrite(mflag, false);
NodeInfo& NI = nodeData[N];
acquireNode(N, mflag);
return NI.getData();
}
edge_data_reference getEdgeData(edge_iterator ni, MethodFlag mflag = MethodFlag::NONE) {
Galois::Runtime::checkWrite(mflag, false);
return edgeData[*ni];
}
GraphNode getEdgeDst(edge_iterator ni) {
return edgeDst[*ni];
}
uint64_t size() const { return numNodes; }
uint64_t sizeEdges() const { return numEdges; }
iterator begin() const { return iterator(0); }
iterator end() const { return iterator(numNodes); }
const_local_iterator local_begin() const { return const_local_iterator(this->localBegin(numNodes)); }
const_local_iterator local_end() const { return const_local_iterator(this->localEnd(numNodes)); }
local_iterator local_begin() { return local_iterator(this->localBegin(numNodes)); }
local_iterator local_end() { return local_iterator(this->localEnd(numNodes)); }
edge_iterator edge_begin(GraphNode N, MethodFlag mflag = MethodFlag::ALL) {
acquireNode(N, mflag);
if (Galois::Runtime::shouldLock(mflag)) {
for (edge_iterator ii = raw_begin(N), ee = raw_end(N); ii != ee; ++ii) {
acquireNode(edgeDst[*ii], mflag);
}
}
return raw_begin(N);
}
edge_iterator edge_end(GraphNode N, MethodFlag mflag = MethodFlag::ALL) {
acquireNode(N, mflag);
return raw_end(N);
}
detail::EdgesIterator<LC_CSR_Graph> out_edges(GraphNode N, MethodFlag mflag = MethodFlag::ALL) {
return detail::EdgesIterator<LC_CSR_Graph>(*this, N, mflag);
}
/**
* Sorts outgoing edges of a node. Comparison function is over EdgeTy.
*/
template<typename CompTy>
void sortEdgesByEdgeData(GraphNode N, const CompTy& comp = std::less<EdgeTy>(), MethodFlag mflag = MethodFlag::ALL) {
acquireNode(N, mflag);
std::sort(edge_sort_begin(N), edge_sort_end(N), detail::EdgeSortCompWrapper<EdgeSortValue<GraphNode,EdgeTy>,CompTy>(comp));
}
/**
* Sorts outgoing edges of a node. Comparison function is over <code>EdgeSortValue<EdgeTy></code>.
*/
template<typename CompTy>
void sortEdges(GraphNode N, const CompTy& comp, MethodFlag mflag = MethodFlag::ALL) {
acquireNode(N, mflag);
std::sort(edge_sort_begin(N), edge_sort_end(N), comp);
}
void allocateFrom(FileGraph& graph) {
numNodes = graph.size();
numEdges = graph.sizeEdges();
if (UseNumaAlloc) {
nodeData.allocateLocal(numNodes, false);
edgeIndData.allocateLocal(numNodes, false);
edgeDst.allocateLocal(numEdges, false);
edgeData.allocateLocal(numEdges, false);
this->outOfLineAllocateLocal(numNodes, false);
} else {
nodeData.allocateInterleaved(numNodes);
edgeIndData.allocateInterleaved(numNodes);
edgeDst.allocateInterleaved(numEdges);
edgeData.allocateInterleaved(numEdges);
this->outOfLineAllocateInterleaved(numNodes);
}
}
void constructFrom(FileGraph& graph, unsigned tid, unsigned total) {
auto r = graph.divideBy(
NodeData::size_of::value + EdgeIndData::size_of::value + LC_CSR_Graph::size_of_out_of_line::value,
EdgeDst::size_of::value + EdgeData::size_of::value,
tid, total);
this->setLocalRange(*r.first, *r.second);
for (FileGraph::iterator ii = r.first, ei = r.second; ii != ei; ++ii) {
nodeData.constructAt(*ii);
edgeIndData[*ii] = *graph.edge_end(*ii);
this->outOfLineConstructAt(*ii);
for (FileGraph::edge_iterator nn = graph.edge_begin(*ii), en = graph.edge_end(*ii); nn != en; ++nn) {
if (EdgeData::has_value)
edgeData.set(*nn, graph.getEdgeData<typename EdgeData::value_type>(nn));
edgeDst[*nn] = graph.getEdgeDst(nn);
}
}
}
};
} // end namespace
} // end namespace
#endif
| 0 |
rapidsai_public_repos/code-share/maxflow/galois/include/Galois
|
rapidsai_public_repos/code-share/maxflow/galois/include/Galois/Graph/LC_InlineEdge_Graph.h
|
/** Local Computation graphs -*- C++ -*-
* @file
* @section License
*
* Galois, a framework to exploit amorphous data-parallelism in irregular
* programs.
*
* Copyright (C) 2013, The University of Texas at Austin. All rights reserved.
* UNIVERSITY EXPRESSLY DISCLAIMS ANY AND ALL WARRANTIES CONCERNING THIS
* SOFTWARE AND DOCUMENTATION, INCLUDING ANY WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR ANY PARTICULAR PURPOSE, NON-INFRINGEMENT AND WARRANTIES OF
* PERFORMANCE, AND ANY WARRANTY THAT MIGHT OTHERWISE ARISE FROM COURSE OF
* DEALING OR USAGE OF TRADE. NO WARRANTY IS EITHER EXPRESS OR IMPLIED WITH
* RESPECT TO THE USE OF THE SOFTWARE OR DOCUMENTATION. Under no circumstances
* shall University be liable for incidental, special, indirect, direct or
* consequential damages or loss of profits, interruption of business, or
* related expenses which may arise from use of Software or Documentation,
* including but not limited to those resulting from defects in Software and/or
* Documentation, or loss or inaccuracy of data of any kind.
*
* @section Description
*
* @author Andrew Lenharth <[email protected]>
* @author Donald Nguyen <[email protected]>
*/
#ifndef GALOIS_GRAPH_LC_INLINEEDGE_GRAPH_H
#define GALOIS_GRAPH_LC_INLINEEDGE_GRAPH_H
#include "Galois/config.h"
#include "Galois/LargeArray.h"
#include "Galois/Graph/FileGraph.h"
#include "Galois/Graph/Details.h"
#include "Galois/Runtime/MethodFlags.h"
#include <boost/mpl/if.hpp>
#include GALOIS_CXX11_STD_HEADER(type_traits)
namespace Galois {
namespace Graph {
/**
* Local computation graph (i.e., graph structure does not change). The data representation
* is a modification of {@link LC_CSR_Graph} where the edge data is stored inline with the
* adjacency information.
*
* The position of template parameters may change between Galois releases; the
* most robust way to specify them is through the with_XXX nested templates.
*/
template<typename NodeTy, typename EdgeTy,
bool HasNoLockable=false,
bool UseNumaAlloc=false,
bool HasOutOfLineLockable=false,
bool HasCompressedNodePtr=false>
class LC_InlineEdge_Graph:
private boost::noncopyable,
private detail::LocalIteratorFeature<UseNumaAlloc>,
private detail::OutOfLineLockableFeature<HasOutOfLineLockable && !HasNoLockable> {
template<typename Graph> friend class LC_InOut_Graph;
public:
template<bool _has_id>
struct with_id { typedef LC_InlineEdge_Graph type; };
template<typename _node_data>
struct with_node_data { typedef LC_InlineEdge_Graph<_node_data,EdgeTy,HasNoLockable,UseNumaAlloc,HasOutOfLineLockable,HasCompressedNodePtr> type; };
template<bool _has_no_lockable>
struct with_no_lockable { typedef LC_InlineEdge_Graph<NodeTy,EdgeTy,_has_no_lockable,UseNumaAlloc,HasOutOfLineLockable,HasCompressedNodePtr> type; };
template<bool _use_numa_alloc>
struct with_numa_alloc { typedef LC_InlineEdge_Graph<NodeTy,EdgeTy,HasNoLockable,_use_numa_alloc,HasOutOfLineLockable,HasCompressedNodePtr> type; };
template<bool _has_out_of_line_lockable>
struct with_out_of_line_lockable { typedef LC_InlineEdge_Graph<NodeTy,EdgeTy,HasNoLockable,UseNumaAlloc,_has_out_of_line_lockable,HasCompressedNodePtr> type; };
/**
* Compress representation of graph at the expense of one level of indirection on accessing
* neighbors of a node
*/
template<bool _has_compressed_node_ptr>
struct with_compressed_node_ptr { typedef LC_InlineEdge_Graph<NodeTy,EdgeTy,HasNoLockable,UseNumaAlloc,HasOutOfLineLockable,_has_compressed_node_ptr> type; };
typedef read_default_graph_tag read_tag;
protected:
class NodeInfo;
typedef detail::EdgeInfoBase<typename boost::mpl::if_c<HasCompressedNodePtr,uint32_t,NodeInfo*>::type,EdgeTy> EdgeInfo;
typedef LargeArray<EdgeInfo> EdgeData;
typedef LargeArray<NodeInfo> NodeData;
typedef detail::NodeInfoBaseTypes<NodeTy,!HasNoLockable && !HasOutOfLineLockable> NodeInfoTypes;
class NodeInfo: public detail::NodeInfoBase<NodeTy,!HasNoLockable && !HasOutOfLineLockable> {
EdgeInfo* m_edgeBegin;
EdgeInfo* m_edgeEnd;
public:
EdgeInfo*& edgeBegin() { return m_edgeBegin; }
EdgeInfo*& edgeEnd() { return m_edgeEnd; }
};
public:
typedef NodeInfo* GraphNode;
typedef EdgeTy edge_data_type;
typedef NodeTy node_data_type;
typedef typename EdgeInfo::reference edge_data_reference;
typedef typename NodeInfoTypes::reference node_data_reference;
typedef EdgeInfo* edge_iterator;
typedef Galois::NoDerefIterator<NodeInfo*> iterator;
typedef Galois::NoDerefIterator<const NodeInfo*> const_iterator;
typedef iterator local_iterator;
typedef const_iterator const_local_iterator;
protected:
NodeData nodeData;
EdgeData edgeData;
uint64_t numNodes;
uint64_t numEdges;
template<bool _C = HasCompressedNodePtr>
NodeInfo* getDst(edge_iterator ii, typename std::enable_if<_C>::type* x = 0) const {
return const_cast<NodeInfo*>(&nodeData[ii->dst]);
}
template<bool _C = HasCompressedNodePtr>
NodeInfo* getDst(edge_iterator ii, typename std::enable_if<!_C>::type* x = 0) const {
return ii->dst;
}
template<typename Container,typename Index, bool _C = HasCompressedNodePtr>
void setEdgeDst(Container& c, edge_iterator edge, Index idx, typename std::enable_if<_C>::type* = 0) {
edge->dst = idx;
}
template<typename Container,typename Index, bool _C = HasCompressedNodePtr>
void setEdgeDst(Container& c, edge_iterator edge, Index idx, typename std::enable_if<!_C>::type* = 0) {
edge->dst = &c[idx];
}
template<bool _A1 = HasNoLockable, bool _A2 = HasOutOfLineLockable>
void acquireNode(GraphNode N, MethodFlag mflag, typename std::enable_if<!_A1 && !_A2>::type* = 0) {
Galois::Runtime::acquire(N, mflag);
}
template<bool _A1 = HasOutOfLineLockable, bool _A2 = HasNoLockable>
void acquireNode(GraphNode N, MethodFlag mflag, typename std::enable_if<_A1 && !_A2>::type* = 0) {
this->outOfLineAcquire(getId(N), mflag);
}
template<bool _A1 = HasOutOfLineLockable, bool _A2 = HasNoLockable>
void acquireNode(GraphNode N, MethodFlag mflag, typename std::enable_if<_A2>::type* = 0) { }
edge_iterator raw_begin(GraphNode N) {
return nodeData[getId(N)].edgeBegin();
}
edge_iterator raw_end(GraphNode N) {
return nodeData[getId(N)].edgeEnd();
}
size_t getId(GraphNode N) {
return std::distance(this->nodeData.data(), N);
}
GraphNode getNode(size_t n) {
return &nodeData[n];
}
public:
~LC_InlineEdge_Graph() {
if (!EdgeInfo::has_value) return;
if (numNodes == 0) return;
for (edge_iterator ii = nodeData[0].edgeBegin(), ei = nodeData[numNodes-1].edgeEnd(); ii != ei; ++ii) {
ii->destroy();
}
}
node_data_reference getData(GraphNode N, MethodFlag mflag = MethodFlag::ALL) {
Galois::Runtime::checkWrite(mflag, false);
acquireNode(N, mflag);
return N->getData();
}
edge_data_reference getEdgeData(edge_iterator ni, MethodFlag mflag = MethodFlag::NONE) const {
Galois::Runtime::checkWrite(mflag, false);
return ni->get();
}
GraphNode getEdgeDst(edge_iterator ni) const {
return getDst(ni);
}
uint64_t size() const { return numNodes; }
uint64_t sizeEdges() const { return numEdges; }
const_iterator begin() const { return const_iterator(nodeData.begin()); }
const_iterator end() const { return const_iterator(nodeData.end()); }
iterator begin() { return iterator(nodeData.data()); }
iterator end() { return iterator(nodeData.end()); }
local_iterator local_begin() { return local_iterator(&nodeData[this->localBegin(numNodes)]); }
local_iterator local_end() { return local_iterator(&nodeData[this->localEnd(numNodes)]); }
const_local_iterator local_begin() const { return const_local_iterator(&nodeData[this->localBegin(numNodes)]); }
const_local_iterator local_end() const { return const_local_iterator(&nodeData[this->localEnd(numNodes)]); }
edge_iterator edge_begin(GraphNode N, MethodFlag mflag = MethodFlag::ALL) {
acquireNode(N, mflag);
if (Galois::Runtime::shouldLock(mflag)) {
for (edge_iterator ii = N->edgeBegin(), ee = N->edgeEnd(); ii != ee; ++ii) {
acquireNode(getDst(ii), mflag);
}
}
return N->edgeBegin();
}
edge_iterator edge_end(GraphNode N, MethodFlag mflag = MethodFlag::ALL) {
acquireNode(N, mflag);
return N->edgeEnd();
}
detail::EdgesIterator<LC_InlineEdge_Graph> out_edges(GraphNode N, MethodFlag mflag = MethodFlag::ALL) {
return detail::EdgesIterator<LC_InlineEdge_Graph>(*this, N, mflag);
}
#if 0
/**
* Sorts outgoing edges of a node. Comparison function is over EdgeTy.
*/
template<typename CompTy>
void sortEdgesByEdgeData(GraphNode N, const CompTy& comp = std::less<EdgeTy>(), MethodFlag mflag = MethodFlag::ALL) {
Galois::Runtime::acquire(N, mflag);
std::sort(edge_sort_begin(N), edge_sort_end(N), EdgeSortCompWrapper<EdgeSortValue<GraphNode,EdgeTy>,CompTy>(comp));
}
/**
* Sorts outgoing edges of a node. Comparison function is over <code>EdgeSortValue<EdgeTy></code>.
*/
template<typename CompTy>
void sortEdges(GraphNode N, const CompTy& comp, MethodFlag mflag = MethodFlag::ALL) {
Galois::Runtime::acquire(N, mflag);
std::sort(edge_sort_begin(N), edge_sort_end(N), comp);
}
#endif
void allocateFrom(FileGraph& graph) {
numNodes = graph.size();
numEdges = graph.sizeEdges();
if (UseNumaAlloc) {
nodeData.allocateLocal(numNodes, false);
edgeData.allocateLocal(numEdges, false);
this->outOfLineAllocateLocal(numNodes, false);
} else {
nodeData.allocateInterleaved(numNodes);
edgeData.allocateInterleaved(numEdges);
this->outOfLineAllocateInterleaved(numNodes);
}
}
void constructFrom(FileGraph& graph, unsigned tid, unsigned total) {
typedef typename EdgeInfo::value_type EDV;
auto r = graph.divideBy(
NodeData::size_of::value + LC_InlineEdge_Graph::size_of_out_of_line::value,
EdgeData::size_of::value,
tid, total);
EdgeInfo* curEdge = edgeData.data() + *graph.edge_begin(*r.first);
this->setLocalRange(*r.first, *r.second);
for (FileGraph::iterator ii = r.first, ei = r.second; ii != ei; ++ii) {
nodeData.constructAt(*ii);
this->outOfLineConstructAt(*ii);
nodeData[*ii].edgeBegin() = curEdge;
for (FileGraph::edge_iterator nn = graph.edge_begin(*ii), en = graph.edge_end(*ii); nn != en; ++nn) {
if (EdgeInfo::has_value)
curEdge->construct(graph.getEdgeData<EDV>(nn));
setEdgeDst(nodeData, curEdge, graph.getEdgeDst(nn));
++curEdge;
}
nodeData[*ii].edgeEnd() = curEdge;
}
}
};
} // end namespace
} // end namespace
#endif
| 0 |
rapidsai_public_repos/code-share/maxflow/galois/include/Galois
|
rapidsai_public_repos/code-share/maxflow/galois/include/Galois/Graph/GraphNodeBag.h
|
#ifndef GALOIS_GRAPHNODEBAG_H
#define GALOIS_GRAPHNODEBAG_H
#include "Galois/Accumulator.h"
#include "Galois/LargeArray.h"
#include "Galois/Bag.h"
namespace Galois {
/**
* Stores graph nodes to execute for {@link Ligra} executor.
*/
template<unsigned int BlockSize = 0>
class GraphNodeBag {
typedef Galois::InsertBag<size_t, BlockSize> Bag;
typedef Galois::LargeArray<bool> Bitmask;
Bag bag;
Galois::GAccumulator<size_t> counts;
Galois::GAccumulator<size_t> numNodes;
Bitmask bitmask;
size_t size;
bool isDense;
struct InitializeSmall {
GraphNodeBag* self;
void operator()(size_t n) {
self->bitmask[n] = 0;
}
};
struct InitializeBig {
GraphNodeBag* self;
void operator()(unsigned id, unsigned total) {
typedef typename Bitmask::iterator It;
std::pair<It,It> p = Galois::block_range(self->bitmask.begin(), self->bitmask.end(), id, total);
std::fill(p.first, p.second, 0);
}
};
struct Densify {
GraphNodeBag* self;
void operator()(size_t n) {
self->bitmask[n] = true;
}
};
public:
GraphNodeBag(size_t n): size(n), isDense(false) { }
typedef typename Bag::iterator iterator;
typedef typename Bag::local_iterator local_iterator;
iterator begin() { return bag.begin(); }
iterator end() { return bag.end(); }
local_iterator local_begin() { return bag.local_begin(); }
local_iterator local_end() { return bag.local_end(); }
void pushDense(size_t n, size_t numEdges) {
assert(isDense);
if (!bitmask[n]) {
bitmask[n] = true;
push(n, numEdges);
}
}
void push(size_t n, size_t numEdges) {
bag.push(n);
numNodes += 1;
counts += 1 + numEdges;
}
size_t getCount() { return counts.reduce(); }
size_t getSize() { return numNodes.reduce(); }
void clear() {
if (isDense) {
if (numNodes.reduce() < bitmask.size() / 4) {
InitializeSmall fn = { this };
Galois::do_all_local(bag, fn);
} else {
InitializeBig fn = { this };
Galois::on_each(fn);
}
}
bag.clear();
counts.reset();
numNodes.reset();
isDense = false;
}
bool contains(size_t n) {
assert(isDense);
return bitmask[n];
}
bool empty() const { return bag.empty(); }
void densify() {
isDense = true;
if (bitmask.size() == 0) {
bitmask.create(size);
}
Densify fn = { this };
Galois::do_all_local(bag, fn);
}
};
/**
* Stores graph nodes to execute for {@link Ligra} executor. Unlike {@link
* GraphNodeBag}, this class stores two bags to facilitate bulk-synchronous
* processing.
*/
template<unsigned int BlockSize = 0>
class GraphNodeBagPair {
GraphNodeBag<BlockSize> bag1;
GraphNodeBag<BlockSize> bag2;
int curp;
public:
typedef GraphNodeBag<BlockSize> bag_type;
GraphNodeBagPair(size_t n): bag1(n), bag2(n), curp(0) { }
GraphNodeBag<BlockSize>& cur() { return (*this)[curp]; }
GraphNodeBag<BlockSize>& next() { return (*this)[(curp+1) & 1]; }
void swap() {
curp = (curp + 1) & 1;
next().clear();
}
GraphNodeBag<BlockSize>& operator[](int i) {
if (i == 0)
return bag1;
else
return bag2;
}
};
}
#endif
| 0 |
rapidsai_public_repos/code-share/maxflow/galois/include/Galois
|
rapidsai_public_repos/code-share/maxflow/galois/include/Galois/Graph/Details.h
|
/** Implementation details for implementing graphs -*- C++ -*-
* @file
* @section License
*
* Galois, a framework to exploit amorphous data-parallelism in irregular
* programs.
*
* Copyright (C) 2013, The University of Texas at Austin. All rights reserved.
* UNIVERSITY EXPRESSLY DISCLAIMS ANY AND ALL WARRANTIES CONCERNING THIS
* SOFTWARE AND DOCUMENTATION, INCLUDING ANY WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR ANY PARTICULAR PURPOSE, NON-INFRINGEMENT AND WARRANTIES OF
* PERFORMANCE, AND ANY WARRANTY THAT MIGHT OTHERWISE ARISE FROM COURSE OF
* DEALING OR USAGE OF TRADE. NO WARRANTY IS EITHER EXPRESS OR IMPLIED WITH
* RESPECT TO THE USE OF THE SOFTWARE OR DOCUMENTATION. Under no circumstances
* shall University be liable for incidental, special, indirect, direct or
* consequential damages or loss of profits, interruption of business, or
* related expenses which may arise from use of Software or Documentation,
* including but not limited to those resulting from defects in Software and/or
* Documentation, or loss or inaccuracy of data of any kind.
*
* @section Description
*
* Implementation details for various graphs.
*
* @author Donald Nguyen <[email protected]>
*/
#ifndef GALOIS_GRAPH_DETAILS_H
#define GALOIS_GRAPH_DETAILS_H
#include "Galois/LargeArray.h"
#include "Galois/LazyObject.h"
#include "Galois/NoDerefIterator.h"
#include "Galois/Threads.h"
#include "Galois/Runtime/Context.h"
#include "Galois/Runtime/MethodFlags.h"
#include "Galois/Runtime/PerThreadStorage.h"
#include <boost/mpl/if.hpp>
#include <algorithm>
namespace Galois {
namespace Graph {
struct read_default_graph_tag { };
struct read_with_aux_graph_tag { };
struct read_lc_inout_graph_tag { };
//! Proxy object for {@link detail::EdgeSortIterator}
template<typename GraphNode, typename EdgeTy>
struct EdgeSortValue: public StrictObject<EdgeTy> {
typedef StrictObject<EdgeTy> Super;
typedef typename Super::value_type value_type;
GraphNode dst;
EdgeSortValue(GraphNode d, const value_type& v): Super(v), dst(d) { }
template<typename ER>
EdgeSortValue(const ER& ref) {
ref.initialize(*this);
}
};
//! Implementation details for graphs
namespace detail {
template<bool Enable>
class LocalIteratorFeature {
typedef std::pair<uint64_t,uint64_t> Range;
Runtime::PerThreadStorage<Range> localIterators;
public:
uint64_t localBegin(uint64_t numNodes) const {
return localIterators.getLocal()->first;
}
uint64_t localEnd(uint64_t numNodes) const {
return localIterators.getLocal()->second;
}
void setLocalRange(uint64_t begin, uint64_t end) {
Range& r = *localIterators.getLocal();
r.first = begin;
r.second = end;
}
};
template<>
struct LocalIteratorFeature<false> {
uint64_t localBegin(uint64_t numNodes) const {
unsigned int id = Galois::Runtime::LL::getTID();
unsigned int num = Galois::getActiveThreads();
return (numNodes + num - 1) / num * id;
}
uint64_t localEnd(uint64_t numNodes) const {
unsigned int id = Galois::Runtime::LL::getTID();
unsigned int num = Galois::getActiveThreads();
uint64_t end = (numNodes + num - 1) / num * (id + 1);
return std::min(end, numNodes);
}
void setLocalRange(uint64_t begin, uint64_t end) { }
};
//! Proxy object for {@link EdgeSortIterator}
template<typename GraphNode, typename EdgeIndex, typename EdgeDst, typename EdgeData>
struct EdgeSortReference {
typedef typename EdgeData::raw_value_type EdgeTy;
EdgeIndex at;
EdgeDst* edgeDst;
EdgeData* edgeData;
EdgeSortReference(EdgeIndex x, EdgeDst* dsts, EdgeData* data): at(x), edgeDst(dsts), edgeData(data) { }
EdgeSortReference operator=(const EdgeSortValue<GraphNode, EdgeTy>& x) {
edgeDst->set(at, x.dst);
edgeData->set(at, x.get());
return *this;
}
EdgeSortReference operator=(const EdgeSortReference<GraphNode,EdgeIndex,EdgeDst,EdgeData>& x) {
edgeDst->set(at, edgeDst->at(x.at));
edgeData->set(at, edgeData->at(x.at));
return *this;
}
EdgeSortValue<GraphNode, EdgeTy> operator*() const {
return EdgeSortValue<GraphNode, EdgeTy>(edgeDst->at(at), edgeData->at(at));
}
void initialize(EdgeSortValue<GraphNode, EdgeTy>& value) const {
value = *(*this);
}
};
/**
* Converts comparison functions over EdgeTy to be over {@link EdgeSortValue}.
*/
template<typename EdgeSortValueTy,typename CompTy>
struct EdgeSortCompWrapper {
const CompTy& comp;
EdgeSortCompWrapper(const CompTy& c): comp(c) { }
bool operator()(const EdgeSortValueTy& a, const EdgeSortValueTy& b) const {
return comp(a.get(), b.get());
}
};
/**
* Iterator to facilitate sorting of CSR-like graphs. Converts random access operations
* on iterator to appropriate computations on edge destinations and edge data.
*
* @tparam GraphNode Graph node pointer
* @tparam EdgeIndex Integer-like value that is passed to EdgeDst and EdgeData
* @tparam EdgeDst {@link LargeArray}-like container of edge destinations
* @tparam EdgeData {@link LargeArray}-like container of edge data
*/
template<typename GraphNode, typename EdgeIndex, typename EdgeDst, typename EdgeData>
class EdgeSortIterator: public boost::iterator_facade<
EdgeSortIterator<GraphNode, EdgeIndex, EdgeDst, EdgeData>,
EdgeSortValue<GraphNode, typename EdgeData::raw_value_type>,
boost::random_access_traversal_tag,
EdgeSortReference<GraphNode, EdgeIndex, EdgeDst, EdgeData>
> {
typedef EdgeSortIterator<GraphNode,EdgeIndex,EdgeDst,EdgeData> Self;
typedef EdgeSortReference<GraphNode,EdgeIndex,EdgeDst,EdgeData> Reference;
EdgeIndex at;
EdgeDst* edgeDst;
EdgeData* edgeData;
public:
EdgeSortIterator(): at(0) { }
EdgeSortIterator(EdgeIndex x, EdgeDst* dsts, EdgeData* data):
at(x), edgeDst(dsts), edgeData(data) { }
private:
friend class boost::iterator_core_access;
bool equal(const Self& other) const { return at == other.at; }
Reference dereference() const { return Reference(at, edgeDst, edgeData); }
ptrdiff_t distance_to(const Self& other) const { return other.at - (ptrdiff_t) at; }
void increment() { ++at; }
void decrement() { --at; }
void advance(ptrdiff_t n) { at += n; }
};
template<typename IdTy>
class IntrusiveId {
IdTy id;
public:
IdTy& getId() { return id; }
void setId(size_t n) { id = n; }
};
template<>
class IntrusiveId<void> {
public:
char getId() { return 0; }
void setId(size_t n) { }
};
//! Empty class for HasLockable optimization
class NoLockable { };
//! Separate types from definitions to allow incomplete types as NodeTy
template<typename NodeTy, bool HasLockable>
struct NodeInfoBaseTypes {
typedef NodeTy& reference;
};
template<bool HasLockable>
struct NodeInfoBaseTypes<void, HasLockable> {
typedef void* reference;
};
//! Specializations for void node data
template<typename NodeTy, bool HasLockable>
class NodeInfoBase:
public boost::mpl::if_c<HasLockable,Galois::Runtime::Lockable,NoLockable>::type,
public NodeInfoBaseTypes<NodeTy, HasLockable>
{
NodeTy data;
public:
template<typename... Args>
NodeInfoBase(Args&&... args): data(std::forward<Args>(args)...) { }
typename NodeInfoBase::reference getData() { return data; }
};
template<bool HasLockable>
struct NodeInfoBase<void, HasLockable>:
public boost::mpl::if_c<HasLockable,Galois::Runtime::Lockable,NoLockable>::type,
public NodeInfoBaseTypes<void, HasLockable>
{
typename NodeInfoBase::reference getData() { return 0; }
};
template<bool Enable>
class OutOfLineLockableFeature {
typedef NodeInfoBase<void,true> OutOfLineLock;
LargeArray<OutOfLineLock> outOfLineLocks;
public:
struct size_of_out_of_line {
static const size_t value = sizeof(OutOfLineLock);
};
void outOfLineAcquire(size_t n, MethodFlag mflag) {
Galois::Runtime::acquire(&outOfLineLocks[n], mflag);
}
void outOfLineAllocateLocal(size_t numNodes, bool preFault) {
outOfLineLocks.allocateLocal(numNodes, preFault);
}
void outOfLineAllocateInterleaved(size_t numNodes) {
outOfLineLocks.allocateInterleaved(numNodes);
}
void outOfLineConstructAt(size_t n) {
outOfLineLocks.constructAt(n);
}
};
template<>
class OutOfLineLockableFeature<false> {
public:
struct size_of_out_of_line {
static const size_t value = 0;
};
void outOfLineAcquire(size_t n, MethodFlag mflag) { }
void outOfLineAllocateLocal(size_t numNodes, bool preFault) { }
void outOfLineAllocateInterleaved(size_t numNodes) { }
void outOfLineConstructAt(size_t n) { }
};
//! Edge specialization for void edge data
template<typename NodeInfoPtrTy,typename EdgeTy>
struct EdgeInfoBase: public LazyObject<EdgeTy>
{
NodeInfoPtrTy dst;
};
/**
* Convenience wrapper around Graph.edge_begin and Graph.edge_end to allow
* C++11 foreach iteration of edges.
*/
template<typename GraphTy>
class EdgesIterator {
GraphTy& g;
typename GraphTy::GraphNode n;
MethodFlag flag;
public:
typedef NoDerefIterator<typename GraphTy::edge_iterator> iterator;
EdgesIterator(GraphTy& g, typename GraphTy::GraphNode n, MethodFlag f): g(g), n(n), flag(f) { }
iterator begin() { return make_no_deref_iterator(g.edge_begin(n, flag)); }
iterator end() { return make_no_deref_iterator(g.edge_end(n, flag)); }
};
/**
* Convenience wrapper around Graph.in_edge_begin and Graph.in_edge_end to allow
* C++11 foreach iteration of in edges.
*/
template<typename GraphTy>
class InEdgesIterator {
GraphTy& g;
typename GraphTy::GraphNode n;
MethodFlag flag;
public:
typedef NoDerefIterator<typename GraphTy::in_edge_iterator> iterator;
InEdgesIterator(GraphTy& g, typename GraphTy::GraphNode n, MethodFlag f): g(g), n(n), flag(f) { }
iterator begin() { return make_no_deref_iterator(g.in_edge_begin(n, flag)); }
iterator end() { return make_no_deref_iterator(g.in_edge_end(n, flag)); }
};
template<typename GraphTy>
class EdgesWithNoFlagIterator {
GraphTy& g;
typename GraphTy::GraphNode n;
public:
typedef NoDerefIterator<typename GraphTy::edge_iterator> iterator;
EdgesWithNoFlagIterator(GraphTy& g, typename GraphTy::GraphNode n): g(g), n(n) { }
iterator begin() { return make_no_deref_iterator(g.edge_begin(n)); }
iterator end() { return make_no_deref_iterator(g.edge_end(n)); }
};
template<typename GN, typename EI, typename EdgeData,typename Data>
void swap(EdgeSortReference<GN,EI,EdgeData,Data> a, EdgeSortReference<GN,EI,EdgeData,Data> b) {
auto aa = *a;
auto bb = *b;
a = bb;
b = aa;
}
} // end namespace
} // end namespace
} // end namespace
#endif
| 0 |
rapidsai_public_repos/code-share/maxflow/galois/include/Galois
|
rapidsai_public_repos/code-share/maxflow/galois/include/Galois/Graph/Graph.h
|
/** Basic morph graphs -*- C++ -*-
* @file
* @section License
*
* Galois, a framework to exploit amorphous data-parallelism in irregular
* programs.
*
* Copyright (C) 2013, The University of Texas at Austin. All rights reserved.
* UNIVERSITY EXPRESSLY DISCLAIMS ANY AND ALL WARRANTIES CONCERNING THIS
* SOFTWARE AND DOCUMENTATION, INCLUDING ANY WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR ANY PARTICULAR PURPOSE, NON-INFRINGEMENT AND WARRANTIES OF
* PERFORMANCE, AND ANY WARRANTY THAT MIGHT OTHERWISE ARISE FROM COURSE OF
* DEALING OR USAGE OF TRADE. NO WARRANTY IS EITHER EXPRESS OR IMPLIED WITH
* RESPECT TO THE USE OF THE SOFTWARE OR DOCUMENTATION. Under no circumstances
* shall University be liable for incidental, special, indirect, direct or
* consequential damages or loss of profits, interruption of business, or
* related expenses which may arise from use of Software or Documentation,
* including but not limited to those resulting from defects in Software and/or
* Documentation, or loss or inaccuracy of data of any kind.
*
* @section Description
*
* @author Andrew Lenharth <[email protected]>
*/
#ifndef GALOIS_GRAPH_GRAPH_H
#define GALOIS_GRAPH_GRAPH_H
#include "FirstGraph.h"
#include "LCGraph.h"
#include "Util.h"
#endif
| 0 |
rapidsai_public_repos/code-share/maxflow/galois/include/Galois
|
rapidsai_public_repos/code-share/maxflow/galois/include/Galois/Graph/LCGraph.h
|
/** Local Computation graphs -*- C++ -*-
* @file
* @section License
*
* Galois, a framework to exploit amorphous data-parallelism in irregular
* programs.
*
* Copyright (C) 2013, The University of Texas at Austin. All rights reserved.
* UNIVERSITY EXPRESSLY DISCLAIMS ANY AND ALL WARRANTIES CONCERNING THIS
* SOFTWARE AND DOCUMENTATION, INCLUDING ANY WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR ANY PARTICULAR PURPOSE, NON-INFRINGEMENT AND WARRANTIES OF
* PERFORMANCE, AND ANY WARRANTY THAT MIGHT OTHERWISE ARISE FROM COURSE OF
* DEALING OR USAGE OF TRADE. NO WARRANTY IS EITHER EXPRESS OR IMPLIED WITH
* RESPECT TO THE USE OF THE SOFTWARE OR DOCUMENTATION. Under no circumstances
* shall University be liable for incidental, special, indirect, direct or
* consequential damages or loss of profits, interruption of business, or
* related expenses which may arise from use of Software or Documentation,
* including but not limited to those resulting from defects in Software and/or
* Documentation, or loss or inaccuracy of data of any kind.
*
* @section Description
*
* There are two main classes, ::FileGraph and ::LC_XXX_Graph. The former
* represents the pure structure of a graph (i.e., whether an edge exists between
* two nodes) and cannot be modified. The latter allows values to be stored on
* nodes and edges, but the structure of the graph cannot be modified.
*
* @author Andrew Lenharth <[email protected]>
* @author Donald Nguyen <[email protected]>
*/
#ifndef GALOIS_GRAPH_LCGRAPH_H
#define GALOIS_GRAPH_LCGRAPH_H
#include "LC_CSR_Graph.h"
#include "LC_InlineEdge_Graph.h"
#include "LC_Linear_Graph.h"
#include "LC_Morph_Graph.h"
#include "LC_InOut_Graph.h"
#include "Util.h"
#endif
| 0 |
rapidsai_public_repos/code-share/maxflow/galois/include/Galois
|
rapidsai_public_repos/code-share/maxflow/galois/include/Galois/Graph/LC_InOut_Graph.h
|
/** Local computation graphs with in and out edges -*- C++ -*-
* @file
* @section License
*
* Galois, a framework to exploit amorphous data-parallelism in irregular
* programs.
*
* Copyright (C) 2013, The University of Texas at Austin. All rights reserved.
* UNIVERSITY EXPRESSLY DISCLAIMS ANY AND ALL WARRANTIES CONCERNING THIS
* SOFTWARE AND DOCUMENTATION, INCLUDING ANY WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR ANY PARTICULAR PURPOSE, NON-INFRINGEMENT AND WARRANTIES OF
* PERFORMANCE, AND ANY WARRANTY THAT MIGHT OTHERWISE ARISE FROM COURSE OF
* DEALING OR USAGE OF TRADE. NO WARRANTY IS EITHER EXPRESS OR IMPLIED WITH
* RESPECT TO THE USE OF THE SOFTWARE OR DOCUMENTATION. Under no circumstances
* shall University be liable for incidental, special, indirect, direct or
* consequential damages or loss of profits, interruption of business, or
* related expenses which may arise from use of Software or Documentation,
* including but not limited to those resulting from defects in Software and/or
* Documentation, or loss or inaccuracy of data of any kind.
*
* @section Description
*
* @author Donald Nguyen <[email protected]>
*/
#ifndef GALOIS_GRAPH_LC_INOUT_GRAPH_H
#define GALOIS_GRAPH_LC_INOUT_GRAPH_H
#include "Galois/Graph/Details.h"
#include <boost/iterator/iterator_facade.hpp>
#include <boost/fusion/include/vector.hpp>
#include <boost/fusion/include/at_c.hpp>
namespace Galois {
namespace Graph {
/**
* Modify a LC_Graph to have in and out edges. In edges are stored by value, so
* modifying them does not modify the corresponding out edge.
*/
template<typename GraphTy>
class LC_InOut_Graph: public GraphTy::template with_id<true>::type {
template<typename G>
friend void readGraphDispatch(G&, read_lc_inout_graph_tag, const std::string&, const std::string&);
typedef typename GraphTy
::template with_id<true>::type Super;
typedef typename GraphTy
::template with_id<true>::type
::template with_node_data<void>::type
::template with_no_lockable<true>::type InGraph;
InGraph inGraph;
bool asymmetric;
typename InGraph::GraphNode inGraphNode(typename Super::GraphNode n) {
return inGraph.getNode(idFromNode(n));
}
void createAsymmetric() { asymmetric = true; }
public:
typedef Super out_graph_type;
typedef InGraph in_graph_type;
typedef typename Super::GraphNode GraphNode;
typedef typename Super::edge_data_type edge_data_type;
typedef typename Super::node_data_type node_data_type;
typedef typename Super::edge_data_reference edge_data_reference;
typedef typename Super::node_data_reference node_data_reference;
typedef typename Super::edge_iterator edge_iterator;
typedef typename Super::iterator iterator;
typedef typename Super::const_iterator const_iterator;
typedef typename Super::local_iterator local_iterator;
typedef typename Super::const_local_iterator const_local_iterator;
typedef read_lc_inout_graph_tag read_tag;
// Union of edge_iterator and InGraph::edge_iterator
class in_edge_iterator: public boost::iterator_facade<in_edge_iterator, void*, boost::forward_traversal_tag, void*> {
friend class boost::iterator_core_access;
friend class LC_InOut_Graph;
typedef edge_iterator Iterator0;
typedef typename InGraph::edge_iterator Iterator1;
typedef boost::fusion::vector<Iterator0, Iterator1> Iterators;
Iterators its;
LC_InOut_Graph* self;
int type;
void increment() {
if (type == 0)
++boost::fusion::at_c<0>(its);
else
++boost::fusion::at_c<1>(its);
}
bool equal(const in_edge_iterator& o) const {
if (type != o.type)
return false;
if (type == 0) {
return boost::fusion::at_c<0>(its) == boost::fusion::at_c<0>(o.its);
} else {
return boost::fusion::at_c<1>(its) == boost::fusion::at_c<1>(o.its);
}
}
void* dereference() const { return 0; }
public:
in_edge_iterator(): type(0) { }
in_edge_iterator(Iterator0 it): type(0) { boost::fusion::at_c<0>(its) = it; }
in_edge_iterator(Iterator1 it, int): type(1) { boost::fusion::at_c<1>(its) = it; }
};
LC_InOut_Graph(): asymmetric(false) { }
edge_data_reference getInEdgeData(in_edge_iterator ni, MethodFlag mflag = MethodFlag::NONE) {
Galois::Runtime::checkWrite(mflag, false);
if (ni.type == 0) {
return this->getEdgeData(boost::fusion::at_c<0>(ni.its));
} else {
return inGraph.getEdgeData(boost::fusion::at_c<1>(ni.its));
}
}
GraphNode getInEdgeDst(in_edge_iterator ni) {
if (ni.type == 0) {
return this->getEdgeDst(boost::fusion::at_c<0>(ni.its));
} else {
return nodeFromId(inGraph.getId(inGraph.getEdgeDst(boost::fusion::at_c<1>(ni.its))));
}
}
in_edge_iterator in_edge_begin(GraphNode N, MethodFlag mflag = MethodFlag::ALL) {
this->acquireNode(N, mflag);
if (!asymmetric) {
if (Galois::Runtime::shouldLock(mflag)) {
for (edge_iterator ii = this->raw_begin(N), ei = this->raw_end(N); ii != ei; ++ii) {
this->acquireNode(this->getEdgeDst(ii), mflag);
}
}
return in_edge_iterator(this->raw_begin(N));
} else {
if (Galois::Runtime::shouldLock(mflag)) {
for (typename InGraph::edge_iterator ii = inGraph.raw_begin(inGraphNode(N)),
ei = inGraph.raw_end(inGraphNode(N)); ii != ei; ++ii) {
this->acquireNode(nodeFromId(inGraph.getId(inGraph.getEdgeDst(ii))), mflag);
}
}
return in_edge_iterator(inGraph.raw_begin(inGraphNode(N)), 0);
}
}
in_edge_iterator in_edge_end(GraphNode N, MethodFlag mflag = MethodFlag::ALL) {
this->acquireNode(N, mflag);
if (!asymmetric) {
return in_edge_iterator(this->raw_end(N));
} else {
return in_edge_iterator(inGraph.raw_end(inGraphNode(N)), 0);
}
}
detail::InEdgesIterator<LC_InOut_Graph> in_edges(GraphNode N, MethodFlag mflag = MethodFlag::ALL) {
return detail::InEdgesIterator<LC_InOut_Graph>(*this, N, mflag);
}
/**
* Sorts incoming edges of a node. Comparison function is over Graph::edge_data_type.
*/
template<typename CompTy>
void sortInEdgesByEdgeData(GraphNode N,
const CompTy& comp = std::less<typename GraphTy::edge_data_type>(),
MethodFlag mflag = MethodFlag::ALL) {
this->acquireNode(N, mflag);
if (!asymmetric) {
std::sort(this->edge_sort_begin(N), this->edge_sort_end(N),
detail::EdgeSortCompWrapper<EdgeSortValue<GraphNode,typename GraphTy::edge_data_type>,CompTy>(comp));
} else {
std::sort(inGraph.edge_sort_begin(inGraphNode(N)),
inGraph.edge_sort_end(inGraphNode(N)),
detail::EdgeSortCompWrapper<EdgeSortValue<GraphNode,typename GraphTy::edge_data_type>,CompTy>(comp));
}
}
/**
* Sorts incoming edges of a node. Comparison function is over <code>EdgeSortValue<GraphTy::edge_data_type></code>.
*/
template<typename CompTy>
void sortInEdges(GraphNode N, const CompTy& comp, MethodFlag mflag = MethodFlag::ALL) {
this->acquireNode(N, mflag);
if (!asymmetric) {
std::sort(this->edge_sort_begin(N), this->edge_sort_end(N), comp);
} else {
std::sort(inGraph.edge_sort_begin(inGraphNode(N)), inGraph.edge_sort_end(inGraphNode(N)), comp);
}
}
size_t idFromNode(GraphNode N) {
return this->getId(N);
}
GraphNode nodeFromId(size_t N) {
return this->getNode(N);
}
};
}
}
#endif
| 0 |
rapidsai_public_repos/code-share/maxflow/galois/include/Galois
|
rapidsai_public_repos/code-share/maxflow/galois/include/Galois/Graph/SpatialTree.h
|
/** A quad-tree -*- C++ -*-
* @file
* @section License
*
* Galois, a framework to exploit amorphous data-parallelism in irregular
* programs.
*
* Copyright (C) 2012, The University of Texas at Austin. All rights reserved.
* UNIVERSITY EXPRESSLY DISCLAIMS ANY AND ALL WARRANTIES CONCERNING THIS
* SOFTWARE AND DOCUMENTATION, INCLUDING ANY WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR ANY PARTICULAR PURPOSE, NON-INFRINGEMENT AND WARRANTIES OF
* PERFORMANCE, AND ANY WARRANTY THAT MIGHT OTHERWISE ARISE FROM COURSE OF
* DEALING OR USAGE OF TRADE. NO WARRANTY IS EITHER EXPRESS OR IMPLIED WITH
* RESPECT TO THE USE OF THE SOFTWARE OR DOCUMENTATION. Under no circumstances
* shall University be liable for incidental, special, indirect, direct or
* consequential damages or loss of profits, interruption of business, or
* related expenses which may arise from use of Software or Documentation,
* including but not limited to those resulting from defects in Software and/or
* Documentation, or loss or inaccuracy of data of any kind.
*
* @author Andrew Lenharth <[email protected]>
*/
#ifndef GALOIS_GRAPH_SPATIALTREE_H
#define GALOIS_GRAPH_SPATIALTREE_H
namespace Galois {
namespace Graph {
//! Stores sets of objects at specific spatial coordinates in a quad tree.
//! Lookup returns an approximation of the closest item
template<typename T>
class SpatialTree2d {
struct Box2d {
double xmin;
double ymin;
double xmax;
double ymax;
double xmid() const { return (xmin + xmax) / 2.0; }
double ymid() const { return (ymin + ymax) / 2.0; }
void decimate(int quad, double midx, double midy) {
if (quad & 1)
xmin = midx;
else
xmax = midx;
if (quad & 2)
ymin = midy;
else
ymax = midy;
}
};
struct Node {
//existing item
T val;
double x, y;
//center
double midx, midy;
Node* children[4];
//needs c++11: Node(const T& v) :val(v), children({0,0,0,0}) {}
Node(const T& v, double _x, double _y) :val(v), x(_x), y(_y) {
children[0] = children[1] = children[2] = children[3] = 0;
}
void setCenter(double cx, double cy) {
midx = cx;
midy = cy;
}
int getQuad(double _x, double _y) {
int retval = 0;
if (_x > midx) retval += 1;
if (_y > midy) retval += 2;
return retval;
}
};
Galois::Runtime::MM::FSBGaloisAllocator<Node> nodeAlloc;
Node* root;
Box2d bounds;
//true if x,y is closer to testx, testy than oldx, oldy
bool closer(double x, double y, double testx, double testy, double oldx, double oldy) const {
double doldx = x - oldx;
double doldy = y - oldy;
double dtestx = x - testx;
double dtesty = y - testy;
doldx *= doldx;
doldy *= doldy;
dtestx *= dtestx;
dtesty *= dtesty;
return (dtestx + dtesty) < (doldx + doldy);
}
/*
T* recfind(Node* n, T* best, double bestx, double besty, double x, double y, Box2d b) {
if (!n)
return best;
if (!best) { // || closer(x, y, n->x, n->y, bestx, besty)) {
best = &n->val;
bestx = n->x;
besty = n->y;
}
int quad = b.getQuad(x,y);
b.decimate(quad);
return recfind(n->children[quad], best, bestx, besty, x, y, b);
}
*/
T* recfind(Node* n, double x, double y) {
Node* best = 0;
while (n) {
if (!best || closer(x, y, n->x, n->y, best->x, best->y))
best = n;
// best = &n->val;
int quad = n->getQuad(x,y);
n = n->children[quad];
}
return &best->val;
}
void recinsert(Node** pos, Box2d b, Node* node) {
if (!*pos) {
//only do an atomic if it looks empty
node->setCenter(b.xmid(), b.ymid());
if (__sync_bool_compare_and_swap(pos, 0, node))
return; //worked!
}
//We should recurse
int quad = (*pos)->getQuad(node->x, node->y);
b.decimate(quad, (*pos)->midx, (*pos)->midy);
recinsert(&(*pos)->children[quad], b, node);
}
Node* mkNode(const T& v, double x, double y) {
Node* n = nodeAlloc.allocate(1);
nodeAlloc.construct(n, Node(v,x,y));
return n;
//return new Node(v,x,y);
}
void delNode(Node* n) {
nodeAlloc.destroy(n);
nodeAlloc.deallocate(n, 1);
//delete n;
}
void freeTree(Node* n) {
if (!n) return;
for (int x = 0; x < 4; ++x)
freeTree(n->children[x]);
delNode(n);
}
public:
SpatialTree2d(double xmin = 0.0, double ymin = 0.0, double xmax = 0.0, double ymax = 0.0)
:root(0) {
init(xmin, ymin, xmax, ymax);
}
~SpatialTree2d() {
freeTree(root);
root = 0;
}
void init(double xmin, double ymin, double xmax, double ymax) {
bounds.xmin = xmin;
bounds.ymin = ymin;
bounds.xmax = xmax;
bounds.ymax = ymax;
}
//! Returns null if tree is empty
T* find(double x, double y) {
assert(root);
return recfind(root, x, y);
}
//! Insert an element. Will always insert and never roll back and thus must
//! be used after failsafe point.
void insert(double x, double y, const T& v) {
recinsert(&root, bounds, mkNode(v,x,y));
}
};
}
}
#endif
| 0 |
rapidsai_public_repos/code-share/maxflow/galois/include/Galois
|
rapidsai_public_repos/code-share/maxflow/galois/include/Galois/Graph/FirstGraph.h
|
/** Basic morph graphs -*- C++ -*-
* @file
* @section License
*
* Galois, a framework to exploit amorphous data-parallelism in irregular
* programs.
*
* Copyright (C) 2013, The University of Texas at Austin. All rights reserved.
* UNIVERSITY EXPRESSLY DISCLAIMS ANY AND ALL WARRANTIES CONCERNING THIS
* SOFTWARE AND DOCUMENTATION, INCLUDING ANY WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR ANY PARTICULAR PURPOSE, NON-INFRINGEMENT AND WARRANTIES OF
* PERFORMANCE, AND ANY WARRANTY THAT MIGHT OTHERWISE ARISE FROM COURSE OF
* DEALING OR USAGE OF TRADE. NO WARRANTY IS EITHER EXPRESS OR IMPLIED WITH
* RESPECT TO THE USE OF THE SOFTWARE OR DOCUMENTATION. Under no circumstances
* shall University be liable for incidental, special, indirect, direct or
* consequential damages or loss of profits, interruption of business, or
* related expenses which may arise from use of Software or Documentation,
* including but not limited to those resulting from defects in Software and/or
* Documentation, or loss or inaccuracy of data of any kind.
*
* @section Description
*
* @author Andrew Lenharth <[email protected]>
*/
#ifndef GALOIS_GRAPH_FIRSTGRAPH_H
#define GALOIS_GRAPH_FIRSTGRAPH_H
#include "Galois/Bag.h"
#include "Galois/Graph/Details.h"
#include "Galois/Runtime/MethodFlags.h"
#include "llvm/ADT/SmallVector.h"
#include <boost/functional.hpp>
#include <boost/iterator/transform_iterator.hpp>
#include <boost/iterator/filter_iterator.hpp>
#include <algorithm>
#include <map>
#include <set>
#include <vector>
namespace Galois {
//! Parallel graph data structures.
namespace Graph {
namespace FirstGraphImpl {
/**
* Wrapper class to have a valid type on void edges
*/
template<typename NTy, typename ETy, bool Directed>
struct UEdgeInfoBase;
template<typename NTy, typename ETy>
struct UEdgeInfoBase<NTy, ETy, true> {
typedef ETy& reference;
NTy* N;
ETy Ea;
inline NTy*& first() { assert(N); return N; }
inline NTy* const& first() const { assert(N); return N; }
inline ETy* second() { return &Ea; }
inline const ETy* second() const { return &Ea; }
template<typename... Args>
UEdgeInfoBase(NTy* n, ETy* v, Args&&... args) : N(n), Ea(std::forward<Args>(args)...) {}
template<typename... Args>
UEdgeInfoBase(ETy* v, Args&&... args) :Ea(std::forward<Args>(args)...) {}
template<typename... Args>
UEdgeInfoBase(NTy* n, ETy &v, Args&&... args): N(n) { Ea = v; }
static size_t sizeOfSecond() { return sizeof(ETy); }
};
template<typename NTy, typename ETy>
struct UEdgeInfoBase<NTy, ETy, false> {
typedef ETy& reference;
NTy* N;
ETy* Ea;
inline NTy*& first() { assert(N); return N; }
inline NTy* const& first() const { assert(N); return N; }
inline ETy* second() { return Ea; }
inline const ETy* second() const { return Ea; }
template<typename... Args>
UEdgeInfoBase(NTy* n, ETy* v, Args&&... args) : N(n), Ea(v) {}
static size_t sizeOfSecond() { return sizeof(ETy); }
};
template<typename NTy>
struct UEdgeInfoBase<NTy, void, true> {
typedef char& reference;
NTy* N;
inline NTy*& first() { return N; }
inline NTy* const& first() const { return N; }
inline char* second() const { return static_cast<char*>(NULL); }
inline char* addr() const { return second(); }
template<typename... Args>
UEdgeInfoBase(NTy* n, void* v, Args&&... args) : N(n) {}
static size_t sizeOfSecond() { return 0; }
};
template<typename NTy>
struct UEdgeInfoBase<NTy, void, false> {
typedef char& reference;
NTy* N;
inline NTy*& first() { return N; }
inline NTy* const& first() const { return N; }
inline char* second() const { return static_cast<char*>(NULL); }
inline char* addr() const { return second(); }
template<typename... Args>
UEdgeInfoBase(NTy* n, void* v, Args&&... args) : N(n) {}
static size_t sizeOfSecond() { return 0; }
};
template<typename ETy>
struct EdgeFactory {
Galois::Runtime::MM::FSBGaloisAllocator<ETy> mem;
template<typename... Args>
ETy* mkEdge(Args&&... args) {
ETy* e = mem.allocate(1);
mem.construct(e, std::forward<Args>(args)...);
return e;
}
void delEdge(ETy* e) {
mem.destroy(e);
mem.deallocate(e, 1);
}
bool mustDel() const { return true; }
};
template<>
struct EdgeFactory<void> {
void* mkEdge() { return static_cast<void*>(NULL); }
void delEdge(void*) {}
bool mustDel() const { return false; }
};
} // end namespace impl
/**
* A Graph.
*
* An example of use:
*
* \code
* struct Node {
* ... // Definition of node data
* };
*
* typedef Galois::Graph::FastGraph<Node,int,true> Graph;
*
* // Create graph
* Graph g;
* Node n1, n2;
* Graph::GraphNode a, b;
* a = g.createNode(n1);
* g.addNode(a);
* b = g.createNode(n2);
* g.addNode(b);
* g.getEdgeData(g.addEdge(a, b)) = 5;
*
* // Traverse graph
* for (Graph::iterator ii = g.begin(), ei = g.end(); ii != ei; ++ii) {
* Graph::GraphNode src = *ii;
* for (Graph::edge_iterator jj = g.edge_begin(src), ej = g.edge_end(src); ++jj) {
* Graph::GraphNode dst = graph.getEdgeDst(jj);
* int edgeData = g.getEdgeData(jj);
* assert(edgeData == 5);
* }
* }
* \endcode
*
* And in C++11:
*
* \code
* // Traverse graph
* for (Graph::GraphNode src : g) {
* for (Graph::edge_iterator edge : g.out_edges(src)) {
* Graph::GraphNode dst = g.getEdgeDst(edge);
* int edgeData = g.getEdgeData(edge);
* assert(edgeData == 5);
* }
* }
* \endcode
*
* @tparam NodeTy Type of node data
* @tparam EdgeTy Type of edge data
* @tparam Directional true if graph is directed
*/
template<typename NodeTy, typename EdgeTy, bool Directional,
bool HasNoLockable=false
>
class FirstGraph : private boost::noncopyable {
public:
//! If true, do not use abstract locks in graph
template<bool _has_no_lockable>
struct with_no_lockable { typedef FirstGraph<NodeTy,EdgeTy,Directional,_has_no_lockable> type; };
private:
template<typename T>
struct first_eq_and_valid {
T N2;
first_eq_and_valid(T& n) :N2(n) {}
template <typename T2>
bool operator()(const T2& ii) const {
return ii.first() == N2 && ii.first() && ii.first()->active;
}
};
struct first_not_valid {
template <typename T2>
bool operator()(const T2& ii) const { return !ii.first() || !ii.first()->active; }
};
class gNode;
struct gNodeTypes: public detail::NodeInfoBaseTypes<NodeTy, !HasNoLockable> {
//! The storage type for an edge
typedef FirstGraphImpl::UEdgeInfoBase<gNode, EdgeTy, Directional> EdgeInfo;
//! The storage type for edges
typedef llvm::SmallVector<EdgeInfo, 3> EdgesTy;
typedef typename EdgesTy::iterator iterator;
};
class gNode:
public detail::NodeInfoBase<NodeTy, !HasNoLockable>,
public gNodeTypes
{
friend class FirstGraph;
typedef detail::NodeInfoBase<NodeTy, !HasNoLockable> NodeInfo;
typename gNode::EdgesTy edges;
typedef typename gNode::iterator iterator;
typedef typename gNode::EdgeInfo EdgeInfo;
bool active;
iterator begin() { return edges.begin(); }
iterator end() { return edges.end(); }
void erase(iterator ii) {
*ii = edges.back();
edges.pop_back();
}
void erase(gNode* N) {
iterator ii = find(N);
if (ii != end())
edges.erase(ii);
}
iterator find(gNode* N) {
return std::find_if(begin(), end(), first_eq_and_valid<gNode*>(N));
}
void resizeEdges(size_t size) {
edges.resize(size, EdgeInfo(new gNode(), 0));
}
template<typename... Args>
iterator createEdge(gNode* N, EdgeTy* v, Args&&... args) {
return edges.insert(edges.end(), EdgeInfo(N, v, std::forward<Args>(args)...));
}
template<typename... Args>
iterator createEdgeWithReuse(gNode* N, EdgeTy* v, Args&&... args) {
//First check for holes
iterator ii = std::find_if(begin(), end(), first_not_valid());
if (ii != end()) {
*ii = EdgeInfo(N, v, std::forward<Args>(args)...);
return ii;
}
return edges.insert(edges.end(), EdgeInfo(N, v, std::forward<Args>(args)...));
}
template<bool _A1 = HasNoLockable>
void acquire(MethodFlag mflag, typename std::enable_if<!_A1>::type* = 0) {
Galois::Runtime::acquire(this, mflag);
}
template<bool _A1 = HasNoLockable>
void acquire(MethodFlag mflag, typename std::enable_if<_A1>::type* = 0) { }
public:
template<typename... Args>
gNode(Args&&... args): NodeInfo(std::forward<Args>(args)...), active(false) { }
};
//The graph manages the lifetimes of the data in the nodes and edges
typedef Galois::InsertBag<gNode> NodeListTy;
NodeListTy nodes;
FirstGraphImpl::EdgeFactory<EdgeTy> edges;
//Helpers for iterator classes
struct is_node : public std::unary_function<gNode&, bool>{
bool operator() (const gNode& g) const { return g.active; }
};
struct is_edge : public std::unary_function<typename gNodeTypes::EdgeInfo&, bool> {
bool operator()(typename gNodeTypes::EdgeInfo& e) const { return e.first()->active; }
};
struct makeGraphNode: public std::unary_function<gNode&, gNode*> {
gNode* operator()(gNode& data) const { return &data; }
};
public:
//! Graph node handle
typedef gNode* GraphNode;
//! Edge data type
typedef EdgeTy edge_type;
//! Node data type
typedef NodeTy node_type;
//! Edge iterator
typedef typename boost::filter_iterator<is_edge, typename gNodeTypes::iterator> edge_iterator;
//! Reference to edge data
typedef typename gNodeTypes::EdgeInfo::reference edge_data_reference;
//! Reference to node data
typedef typename gNodeTypes::reference node_data_reference;
//! Node iterator
typedef boost::transform_iterator<makeGraphNode,
boost::filter_iterator<is_node,
typename NodeListTy::iterator> > iterator;
private:
template<typename... Args>
edge_iterator createEdgeWithReuse(GraphNode src, GraphNode dst, Galois::MethodFlag mflag, Args&&... args) {
assert(src);
assert(dst);
Galois::Runtime::checkWrite(mflag, true);
src->acquire(mflag);
typename gNode::iterator ii = src->find(dst);
if (ii == src->end()) {
if (Directional) {
ii = src->createEdgeWithReuse(dst, 0, std::forward<Args>(args)...);
} else {
dst->acquire(mflag);
EdgeTy* e = edges.mkEdge(std::forward<Args>(args)...);
ii = dst->createEdgeWithReuse(src, e, std::forward<Args>(args)...);
ii = src->createEdgeWithReuse(dst, e, std::forward<Args>(args)...);
}
}
return boost::make_filter_iterator(is_edge(), ii, src->end());
}
template<typename... Args>
edge_iterator createEdge(GraphNode src, GraphNode dst, Galois::MethodFlag mflag, Args&&... args) {
assert(src);
assert(dst);
Galois::Runtime::checkWrite(mflag, true);
src->acquire(mflag);
typename gNode::iterator ii = src->end();
if (ii == src->end()) {
if (Directional) {
ii = src->createEdge(dst, 0, std::forward<Args>(args)...);
} else {
dst->acquire(mflag);
EdgeTy* e = edges.mkEdge(std::forward<Args>(args)...);
ii = dst->createEdge(src, e, std::forward<Args>(args)...);
ii = src->createEdge(dst, e, std::forward<Args>(args)...);
}
}
return boost::make_filter_iterator(is_edge(), ii, src->end());
}
public:
/**
* Creates a new node holding the indicated data. Usually you should call
* {@link addNode()} afterwards.
*/
template<typename... Args>
GraphNode createNode(Args&&... args) {
gNode* N = &(nodes.emplace(std::forward<Args>(args)...));
N->active = false;
return GraphNode(N);
}
/**
* Adds a node to the graph.
*/
void addNode(const GraphNode& n, Galois::MethodFlag mflag = MethodFlag::ALL) {
Galois::Runtime::checkWrite(mflag, true);
n->acquire(mflag);
n->active = true;
}
//! Gets the node data for a node.
node_data_reference getData(const GraphNode& n, Galois::MethodFlag mflag = MethodFlag::ALL) const {
assert(n);
Galois::Runtime::checkWrite(mflag, false);
n->acquire(mflag);
return n->getData();
}
//! Checks if a node is in the graph
bool containsNode(const GraphNode& n, Galois::MethodFlag mflag = MethodFlag::ALL) const {
assert(n);
n->acquire(mflag);
return n->active;
}
/**
* Removes a node from the graph along with all its outgoing/incoming edges
* for undirected graphs or outgoing edges for directed graphs.
*/
//FIXME: handle edge memory
void removeNode(GraphNode n, Galois::MethodFlag mflag = MethodFlag::ALL) {
assert(n);
Galois::Runtime::checkWrite(mflag, true);
n->acquire(mflag);
gNode* N = n;
if (N->active) {
N->active = false;
if (!Directional && edges.mustDel())
for (edge_iterator ii = edge_begin(n, MethodFlag::NONE), ee = edge_end(n, MethodFlag::NONE); ii != ee; ++ii)
edges.delEdge(ii->second());
N->edges.clear();
}
}
/**
* Resize the edges of the node. For best performance, should be done serially.
*/
void resizeEdges(GraphNode src, size_t size, Galois::MethodFlag mflag = MethodFlag::ALL) {
assert(src);
Galois::Runtime::checkWrite(mflag, false);
src->acquire(mflag);
src->resizeEdges(size);
}
/**
* Adds an edge to graph, replacing existing value if edge already exists.
*
* Ignore the edge data, let the caller use the returned iterator to set the
* value if desired. This frees us from dealing with the void edge data
* problem in this API
*/
edge_iterator addEdge(GraphNode src, GraphNode dst, Galois::MethodFlag mflag = MethodFlag::ALL) {
return createEdgeWithReuse(src, dst, mflag);
}
//! Adds and initializes an edge to graph but does not check for duplicate edges
template<typename... Args>
edge_iterator addMultiEdge(GraphNode src, GraphNode dst, Galois::MethodFlag mflag, Args&&... args) {
return createEdge(src, dst, mflag, std::forward<Args>(args)...);
}
//! Removes an edge from the graph
void removeEdge(GraphNode src, edge_iterator dst, Galois::MethodFlag mflag = MethodFlag::ALL) {
assert(src);
Galois::Runtime::checkWrite(mflag, true);
src->acquire(mflag);
if (Directional) {
src->erase(dst.base());
} else {
dst->first()->acquire(mflag);
EdgeTy* e = dst->second();
edges.delEdge(e);
src->erase(dst.base());
dst->first()->erase(src);
}
}
//! Finds if an edge between src and dst exists
edge_iterator findEdge(GraphNode src, GraphNode dst, Galois::MethodFlag mflag = MethodFlag::ALL) {
assert(src);
assert(dst);
src->acquire(mflag);
return boost::make_filter_iterator(is_edge(), src->find(dst), src->end());
}
/**
* Returns the edge data associated with the edge. It is an error to
* get the edge data for a non-existent edge. It is an error to get
* edge data for inactive edges. By default, the mflag is Galois::NONE
* because edge_begin() dominates this call and should perform the
* appropriate locking.
*/
edge_data_reference getEdgeData(edge_iterator ii, Galois::MethodFlag mflag = MethodFlag::NONE) const {
assert(ii->first()->active);
Galois::Runtime::checkWrite(mflag, false);
ii->first()->acquire(mflag);
return *ii->second();
}
//! Returns the destination of an edge
GraphNode getEdgeDst(edge_iterator ii) {
assert(ii->first()->active);
return GraphNode(ii->first());
}
//// General Things ////
//! Returns an iterator to the neighbors of a node
edge_iterator edge_begin(GraphNode N, Galois::MethodFlag mflag = MethodFlag::ALL) {
assert(N);
N->acquire(mflag);
if (Galois::Runtime::shouldLock(mflag)) {
for (typename gNode::iterator ii = N->begin(), ee = N->end(); ii != ee; ++ii) {
if (ii->first()->active)
ii->first()->acquire(mflag);
}
}
return boost::make_filter_iterator(is_edge(), N->begin(), N->end());
}
//! Returns the end of the neighbor iterator
edge_iterator edge_end(GraphNode N, Galois::MethodFlag mflag = MethodFlag::ALL) {
assert(N);
// Acquiring lock is not necessary: no valid use for an end pointer should
// ever require it
// N->acquire(mflag);
return boost::make_filter_iterator(is_edge(), N->end(), N->end());
}
/**
* An object with begin() and end() methods to iterate over the outgoing
* edges of N.
*/
detail::EdgesIterator<FirstGraph> out_edges(GraphNode N, MethodFlag mflag = MethodFlag::ALL) {
return detail::EdgesIterator<FirstGraph>(*this, N, mflag);
}
/**
* Returns an iterator to all the nodes in the graph. Not thread-safe.
*/
iterator begin() {
return boost::make_transform_iterator(
boost::make_filter_iterator(is_node(),
nodes.begin(), nodes.end()),
makeGraphNode());
}
//! Returns the end of the node iterator. Not thread-safe.
iterator end() {
return boost::make_transform_iterator(
boost::make_filter_iterator(is_node(),
nodes.end(), nodes.end()),
makeGraphNode());
}
typedef iterator local_iterator;
local_iterator local_begin() {
return boost::make_transform_iterator(
boost::make_filter_iterator(is_node(),
nodes.local_begin(), nodes.local_end()),
makeGraphNode());
}
local_iterator local_end() {
return boost::make_transform_iterator(
boost::make_filter_iterator(is_node(),
nodes.local_end(), nodes.local_end()),
makeGraphNode());
}
/**
* Returns the number of nodes in the graph. Not thread-safe.
*/
unsigned int size() {
return std::distance(begin(), end());
}
//! Returns the size of edge data.
size_t sizeOfEdgeData() const {
return gNode::EdgeInfo::sizeOfSecond();
}
};
}
}
#endif
| 0 |
rapidsai_public_repos/code-share/maxflow/galois/include/Galois
|
rapidsai_public_repos/code-share/maxflow/galois/include/Galois/Graph/OCGraph.h
|
/** Out of core graphs -*- C++ -*-
* @file
* @section License
*
* Galois, a framework to exploit amorphous data-parallelism in irregular
* programs.
*
* Copyright (C) 2013, The University of Texas at Austin. All rights reserved.
* UNIVERSITY EXPRESSLY DISCLAIMS ANY AND ALL WARRANTIES CONCERNING THIS
* SOFTWARE AND DOCUMENTATION, INCLUDING ANY WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR ANY PARTICULAR PURPOSE, NON-INFRINGEMENT AND WARRANTIES OF
* PERFORMANCE, AND ANY WARRANTY THAT MIGHT OTHERWISE ARISE FROM COURSE OF
* DEALING OR USAGE OF TRADE. NO WARRANTY IS EITHER EXPRESS OR IMPLIED WITH
* RESPECT TO THE USE OF THE SOFTWARE OR DOCUMENTATION. Under no circumstances
* shall University be liable for incidental, special, indirect, direct or
* consequential damages or loss of profits, interruption of business, or
* related expenses which may arise from use of Software or Documentation,
* including but not limited to those resulting from defects in Software and/or
* Documentation, or loss or inaccuracy of data of any kind.
*
* @section Description
*
* @author Donald Nguyen <[email protected]>
*/
#ifndef GALOIS_GRAPH_OCGRAPH_H
#define GALOIS_GRAPH_OCGRAPH_H
#include "Galois/config.h"
#include "Galois/optional.h"
#include "Galois/LazyObject.h"
#include "Galois/LargeArray.h"
#include "Galois/Graph/Details.h"
#include "Galois/Runtime/MethodFlags.h"
#include <boost/iterator/counting_iterator.hpp>
#include <boost/utility.hpp>
#include GALOIS_CXX11_STD_HEADER(type_traits)
#include <string>
namespace Galois {
namespace Graph {
/**
* Binds the segment parameter of an out-of-core graph so that it can be used in
* place of a non out-of-core graph.
*/
template<typename Graph>
class BindSegmentGraph: private boost::noncopyable {
typedef typename Graph::segment_type segment_type;
Graph& graph;
segment_type segment;
public:
explicit BindSegmentGraph(Graph& g): graph(g) { }
BindSegmentGraph(Graph& g, segment_type s): graph(g), segment(s) { }
void setSegment(const segment_type& s) {
segment = s;
}
typedef typename Graph::GraphNode GraphNode;
typedef typename Graph::edge_data_type edge_data_type;
typedef typename Graph::node_data_type node_data_type;
typedef typename Graph::edge_data_reference edge_data_reference;
typedef typename Graph::node_data_reference node_data_reference;
typedef typename Graph::edge_iterator edge_iterator;
typedef typename Graph::in_edge_iterator in_edge_iterator;
typedef typename Graph::iterator iterator;
typedef typename Graph::const_iterator const_iterator;
typedef typename Graph::local_iterator local_iterator;
typedef typename Graph::const_local_iterator const_local_iterator;
node_data_reference getData(GraphNode N, MethodFlag mflag = MethodFlag::ALL) {
return graph.getData(N, mflag);
}
edge_data_reference getEdgeData(edge_iterator ni, MethodFlag mflag = MethodFlag::NONE) {
return graph.getEdgeData(segment, ni, mflag);
}
GraphNode getEdgeDst(edge_iterator ni) {
return graph.getEdgeDst(segment, ni);
}
uint64_t size() const { return graph.size(); }
uint64_t sizeEdges() const { return graph.sizeEdges(); }
iterator begin() const { return graph.begin(); }
iterator end() const { return graph.end(); }
local_iterator local_begin() const { return graph.local_begin(); }
local_iterator local_end() const { return graph.local_end(); }
edge_iterator edge_begin(GraphNode N, MethodFlag mflag = MethodFlag::ALL) {
return graph.edge_begin(segment, N, mflag);
}
edge_iterator edge_end(GraphNode N, MethodFlag mflag = MethodFlag::ALL) {
return graph.edge_end(segment, N, mflag);
}
detail::EdgesIterator<BindSegmentGraph> out_edges(GraphNode N, MethodFlag mflag = MethodFlag::ALL) {
return detail::EdgesIterator<BindSegmentGraph>(*this, N, mflag);
}
edge_data_reference getInEdgeData(edge_iterator ni, MethodFlag mflag = MethodFlag::NONE) {
return graph.getInEdgeData(segment, ni, mflag);
}
GraphNode getInEdgeDst(in_edge_iterator ni) {
return graph.getInEdgeDst(segment, ni);
}
in_edge_iterator in_edge_begin(GraphNode N, MethodFlag mflag = MethodFlag::ALL) {
return graph.in_edge_begin(segment, N, mflag);
}
in_edge_iterator in_edge_end(GraphNode N, MethodFlag mflag = MethodFlag::ALL) {
return graph.in_edge_end(segment, N, mflag);
}
detail::InEdgesIterator<BindSegmentGraph> in_edges(GraphNode N, MethodFlag mflag = MethodFlag::ALL) {
return detail::InEdgesIterator<BindSegmentGraph>(*this, N, mflag);
}
size_t idFromNode(GraphNode N) {
return graph.idFromNode(N);
}
GraphNode nodeFromId(size_t N) {
return graph.nodeFromId(N);
}
};
//! Like {@link FileGraph} but allows partial loading of the graph.
class OCFileGraph: private boost::noncopyable {
public:
typedef uint32_t GraphNode;
typedef boost::counting_iterator<uint32_t> iterator;
typedef boost::counting_iterator<uint64_t> edge_iterator;
typedef uint64_t* edge_offset_iterator;
template<typename EdgeTy>
struct EdgeReference {
typedef typename LazyObject<EdgeTy>::reference type;
};
private:
class Block {
friend class OCFileGraph;
void* m_mapping;
size_t m_length;
char* m_data;
size_t m_begin;
size_t m_sizeof_data;
void unload();
void load(int fd, off64_t offset, size_t begin, size_t len, size_t sizeof_data);
public:
Block(): m_mapping(0) { }
char* get(size_t index) const {
char* p = m_data + (m_sizeof_data * (index - m_begin));
assert(p < reinterpret_cast<char*>(m_mapping) + m_length);
assert(m_mapping <= p);
return p;
}
};
struct Segment {
Block outs;
Block edgeData;
bool loaded;
Segment(): loaded(false) { }
void unload() {
outs.unload();
edgeData.unload();
loaded = false;
}
};
void* masterMapping;
int masterFD;
size_t masterLength;
uint64_t numEdges;
uint64_t numNodes;
uint64_t* outIdx;
public:
typedef Segment segment_type;
OCFileGraph(): masterMapping(0), masterFD(-1), numEdges(0), numNodes(0), outIdx(0) { }
~OCFileGraph();
iterator begin() const { return iterator(0); }
iterator end() const { return iterator(numNodes); }
uint64_t size() const { return numNodes; }
uint64_t sizeEdges() const { return numEdges; }
edge_iterator edge_begin(GraphNode n) const { return edge_iterator(n == 0 ? 0 : outIdx[n-1]); }
edge_iterator edge_end(GraphNode n) const { return edge_iterator(outIdx[n]); }
edge_offset_iterator edge_offset_begin() const { return outIdx; }
edge_offset_iterator edge_offset_end() const { return outIdx + numNodes; }
template<typename EdgeTy>
typename EdgeReference<EdgeTy>::type getEdgeData(const segment_type& s, edge_iterator it, typename std::enable_if<!std::is_same<void,EdgeTy>::value>::type* = 0) {
EdgeTy* p = reinterpret_cast<EdgeTy*>(s.edgeData.get(*it));
return *p;
}
template<typename EdgeTy>
typename EdgeReference<EdgeTy>::type getEdgeData(const segment_type& s, edge_iterator it, typename std::enable_if<std::is_same<void,EdgeTy>::value>::type* = 0) {
return 0;
}
GraphNode getEdgeDst(const segment_type& s, edge_iterator it) {
uint32_t* p = reinterpret_cast<uint32_t*>(s.outs.get(*it));
return *p;
}
void unload(segment_type& s) {
if (!s.loaded)
return;
s.outs.unload();
s.edgeData.unload();
s.loaded = false;
}
void load(segment_type& s, edge_iterator begin, edge_iterator end, size_t sizeof_data);
void structureFromFile(const std::string& fname);
};
struct read_oc_immutable_edge_graph_tag { };
template<typename NodeTy, typename EdgeTy,
bool HasNoLockable=false,
//bool UseNumaAlloc=false, // XXX: implement this
bool HasOutOfLineLockable=false>
class OCImmutableEdgeGraph:
private detail::LocalIteratorFeature<false>,
private detail::OutOfLineLockableFeature<HasOutOfLineLockable && !HasNoLockable> {
public:
template<bool _has_id>
struct with_id {
typedef OCImmutableEdgeGraph type;
};
template<typename _node_data>
struct with_node_data {
typedef OCImmutableEdgeGraph<_node_data,EdgeTy,HasNoLockable,HasOutOfLineLockable> type;
};
template<bool _has_no_lockable>
struct with_no_lockable {
typedef OCImmutableEdgeGraph<NodeTy,EdgeTy,_has_no_lockable,HasOutOfLineLockable> type;
};
template<bool _use_numa_alloc>
struct with_numa_alloc {
typedef OCImmutableEdgeGraph type;
};
template<bool _has_out_of_line_lockable>
struct with_out_of_line_lockable {
typedef OCImmutableEdgeGraph<NodeTy,EdgeTy,HasNoLockable,_has_out_of_line_lockable> type;
};
typedef read_oc_immutable_edge_graph_tag read_tag;
private:
typedef detail::NodeInfoBase<NodeTy,!HasNoLockable && !HasOutOfLineLockable> NodeInfo;
typedef LargeArray<NodeInfo> NodeData;
NodeData nodeData;
OCFileGraph outGraph;
OCFileGraph inGraphStorage;
OCFileGraph* inGraph;
uint64_t numNodes;
uint64_t numEdges;
public:
typedef int tt_is_segmented;
typedef typename OCFileGraph::GraphNode GraphNode;
typedef EdgeTy edge_data_type;
typedef NodeTy node_data_type;
typedef typename OCFileGraph::template EdgeReference<EdgeTy>::type edge_data_reference;
typedef typename NodeInfo::reference node_data_reference;
typedef typename OCFileGraph::edge_iterator edge_iterator;
typedef edge_iterator in_edge_iterator;
typedef typename OCFileGraph::iterator iterator;
typedef iterator const_iterator;
typedef boost::counting_iterator<GraphNode> local_iterator;
typedef local_iterator const_local_iterator;
class segment_type {
template<typename,typename,bool,bool> friend class OCImmutableEdgeGraph;
OCFileGraph::segment_type out;
OCFileGraph::segment_type in;
iterator nodeBegin;
iterator nodeEnd;
public:
//! Returns true if segment has been loaded into memory
bool loaded() const { return out.loaded; }
//! Returns true if segment represents a non-empty range
explicit operator bool() { return nodeBegin != nodeEnd; }
size_t size() const { return std::distance(nodeBegin, nodeEnd); }
bool containsNode(size_t n) const { // XXX: hack
return *nodeBegin <= n && n < *nodeEnd;
}
};
private:
Galois::optional<segment_type> memorySegment;
segment_type computeSegment(size_t startNode, size_t numEdges) {
typedef typename OCFileGraph::edge_offset_iterator edge_offset_iterator;
segment_type ret;
edge_offset_iterator outStart = outGraph.edge_offset_begin();
edge_offset_iterator outEnd = outGraph.edge_offset_end();
std::advance(outStart, startNode);
if (outStart == outEnd) {
ret.nodeBegin = ret.nodeEnd = iterator(0);
return ret;
}
edge_offset_iterator outNext = std::lower_bound(outStart + 1, outEnd, *outStart + numEdges);
ptrdiff_t outNodes = std::distance(outStart, outNext);
edge_offset_iterator inStart = inGraph->edge_offset_begin();
edge_offset_iterator inEnd = inGraph->edge_offset_end();
std::advance(inStart, startNode);
edge_offset_iterator inNext = std::lower_bound(inStart + 1, inEnd, *inStart + numEdges);
ptrdiff_t inNodes = std::distance(inStart, inNext);
ptrdiff_t nodes = std::min(outNodes, inNodes);
ret.nodeBegin = iterator(startNode);
ret.nodeEnd = iterator(startNode + nodes);
return ret;
}
void load(segment_type& seg, size_t sizeof_data) {
outGraph.load(seg.out, outGraph.edge_begin(*seg.nodeBegin), outGraph.edge_end(seg.nodeEnd[-1]), sizeof_data);
if (inGraph != &outGraph)
inGraph->load(seg.in, inGraph->edge_begin(*seg.nodeBegin), inGraph->edge_end(seg.nodeEnd[-1]), sizeof_data);
else
seg.in = seg.out;
}
template<bool _A1 = HasNoLockable, bool _A2 = HasOutOfLineLockable>
void acquireNode(GraphNode N, MethodFlag mflag, typename std::enable_if<!_A1 && !_A2>::type* = 0) {
Galois::Runtime::acquire(&nodeData[N], mflag);
}
template<bool _A1 = HasOutOfLineLockable, bool _A2 = HasNoLockable>
void acquireNode(GraphNode N, MethodFlag mflag, typename std::enable_if<_A1 && !_A2>::type* = 0) {
this->outOfLineAcquire(idFromNode(N), mflag);
}
template<bool _A1 = HasOutOfLineLockable, bool _A2 = HasNoLockable>
void acquireNode(GraphNode N, MethodFlag mflag, typename std::enable_if<_A2>::type* = 0) { }
public:
~OCImmutableEdgeGraph() {
if (memorySegment) {
outGraph.unload(memorySegment->out);
if (inGraph != &outGraph)
inGraph->unload(memorySegment->in);
}
}
void keepInMemory() {
memorySegment = Galois::optional<segment_type>(computeSegment(0, numEdges));
load(*memorySegment, LazyObject<EdgeTy>::size_of::value);
}
/**
* Returns a segment starting from the beginning of the graph with either
* (1) some number of nodes with all their edges but no more than numEdges
* else (2) one node and all its edges.
*/
segment_type nextSegment(size_t edges) {
if (memorySegment)
return *memorySegment;
else
return computeSegment(0, edges);
}
/**
* Returns the next segment after cur.
*/
segment_type nextSegment(const segment_type& cur, size_t edges) {
return computeSegment(*cur.nodeEnd, edges);
}
void load(segment_type& seg) {
if (memorySegment)
return;
load(seg, LazyObject<EdgeTy>::size_of::value);
}
void unload(segment_type& seg) {
if (memorySegment)
return;
outGraph.unload(seg.out);
if (inGraph != &outGraph)
inGraph->unload(seg.in);
}
iterator begin(const segment_type& cur) { return cur.nodeBegin; }
iterator end(const segment_type& cur) { return cur.nodeEnd; }
node_data_reference getData(GraphNode N, MethodFlag mflag = MethodFlag::ALL) {
Galois::Runtime::checkWrite(mflag, false);
NodeInfo& NI = nodeData[N];
acquireNode(N, mflag);
return NI.getData();
}
edge_data_reference getEdgeData(const segment_type& segment, edge_iterator ni, MethodFlag mflag = MethodFlag::NONE) {
Galois::Runtime::checkWrite(mflag, false);
return outGraph.getEdgeData<EdgeTy>(segment.out, ni);
}
GraphNode getEdgeDst(const segment_type& segment, edge_iterator ni) {
return outGraph.getEdgeDst(segment.out, ni);
}
uint64_t size() const { return numNodes; }
uint64_t sizeEdges() const { return numEdges; }
iterator begin() const { return outGraph.begin(); }
iterator end() const { return outGraph.end(); }
const_local_iterator local_begin() const { return const_local_iterator(this->localBegin(numNodes)); }
const_local_iterator local_end() const { return const_local_iterator(this->localEnd(numNodes)); }
local_iterator local_begin() { return local_iterator(this->localBegin(numNodes)); }
local_iterator local_end() { return local_iterator(this->localEnd(numNodes)); }
edge_iterator edge_begin(const segment_type& segment, GraphNode N, MethodFlag mflag = MethodFlag::ALL) {
acquireNode(N, mflag);
if (Galois::Runtime::shouldLock(mflag)) {
for (edge_iterator ii = outGraph.edge_begin(N), ee = outGraph.edge_end(N); ii != ee; ++ii) {
acquireNode(outGraph.getEdgeDst(segment.out, *ii), mflag);
}
}
return outGraph.edge_begin(N);
}
edge_iterator edge_end(const segment_type& segment, GraphNode N, MethodFlag mflag = MethodFlag::ALL) {
acquireNode(N, mflag);
return outGraph.edge_end(N);
}
edge_data_reference getInEdgeData(const segment_type& segment, edge_iterator ni, MethodFlag mflag = MethodFlag::NONE) {
Galois::Runtime::checkWrite(mflag, false);
return inGraph->getEdgeData<EdgeTy>(segment.in, ni);
}
GraphNode getInEdgeDst(const segment_type& segment, in_edge_iterator ni) {
return inGraph->getEdgeDst(segment.in, ni);
}
in_edge_iterator in_edge_begin(const segment_type& segment, GraphNode N, MethodFlag mflag = MethodFlag::ALL) {
acquireNode(N, mflag);
if (Galois::Runtime::shouldLock(mflag)) {
for (in_edge_iterator ii = inGraph->edge_begin(N), ee = inGraph->edge_end(N); ii != ee; ++ii) {
acquireNode(inGraph->getEdgeDst(segment.in, ii), mflag);
}
}
return inGraph->edge_begin(N);
}
in_edge_iterator in_edge_end(const segment_type& segment, GraphNode N, MethodFlag mflag = MethodFlag::ALL) {
acquireNode(N, mflag);
return inGraph->edge_end(N);
}
size_t idFromNode(GraphNode N) {
return N;
}
GraphNode nodeFromId(size_t N) {
return N;
}
//! Assumes that the graph is symmetric
void createFrom(const std::string& fname) {
outGraph.structureFromFile(fname);
numNodes = outGraph.size();
numEdges = outGraph.sizeEdges();
nodeData.create(numNodes);
inGraph = &outGraph;
this->outOfLineAllocateInterleaved(numNodes);
for (size_t i = 0; i < numNodes; ++i)
this->outOfLineConstructAt(i);
}
void createFrom(const std::string& fname, const std::string& transpose) {
outGraph.structureFromFile(fname);
inGraphStorage.structureFromFile(transpose);
numNodes = outGraph.size();
if (numNodes != inGraphStorage.size())
GALOIS_DIE("graph does not have the same number of nodes as its transpose");
numEdges = outGraph.sizeEdges();
nodeData.create(numNodes);
inGraph = &inGraphStorage;
this->outOfLineAllocateInterleaved(numNodes);
for (size_t i = 0; i < numNodes; ++i)
this->outOfLineConstructAt(i);
}
};
template<typename GraphTy,typename... Args>
void readGraphDispatch(GraphTy& graph, read_oc_immutable_edge_graph_tag, Args&&... args) {
graph.createFrom(std::forward<Args>(args)...);
}
} // end namespace
} // end namespace
#endif
| 0 |
rapidsai_public_repos/code-share/maxflow/galois/include/Galois
|
rapidsai_public_repos/code-share/maxflow/galois/include/Galois/Graph/TypeTraits.h
|
/** Graph type traits -*- C++ -*-
* @file
* @section License
*
* Galois, a framework to exploit amorphous data-parallelism in irregular
* programs.
*
* Copyright (C) 2013, The University of Texas at Austin. All rights reserved.
* UNIVERSITY EXPRESSLY DISCLAIMS ANY AND ALL WARRANTIES CONCERNING THIS
* SOFTWARE AND DOCUMENTATION, INCLUDING ANY WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR ANY PARTICULAR PURPOSE, NON-INFRINGEMENT AND WARRANTIES OF
* PERFORMANCE, AND ANY WARRANTY THAT MIGHT OTHERWISE ARISE FROM COURSE OF
* DEALING OR USAGE OF TRADE. NO WARRANTY IS EITHER EXPRESS OR IMPLIED WITH
* RESPECT TO THE USE OF THE SOFTWARE OR DOCUMENTATION. Under no circumstances
* shall University be liable for incidental, special, indirect, direct or
* consequential damages or loss of profits, interruption of business, or
* related expenses which may arise from use of Software or Documentation,
* including but not limited to those resulting from defects in Software and/or
* Documentation, or loss or inaccuracy of data of any kind.
*
* @section Description
*
* @author Donald Nguyen <[email protected]>
*/
#ifndef GALOIS_GRAPH_TYPETRAITS_H
#define GALOIS_GRAPH_TYPETRAITS_H
#include <boost/mpl/has_xxx.hpp>
namespace Galois {
namespace Graph {
BOOST_MPL_HAS_XXX_TRAIT_DEF(tt_is_segmented)
template<typename T>
struct is_segmented: public has_tt_is_segmented<T> {};
}
}
#endif
| 0 |
rapidsai_public_repos/code-share/maxflow/galois/include/Galois
|
rapidsai_public_repos/code-share/maxflow/galois/include/Galois/Graph/LC_Morph_Graph.h
|
/** Appendable semi-LC graphs -*- C++ -*-
* @file
* @section License
*
* Graph which is like other LC graphs but allows adding edges.
*
* Galois, a framework to exploit amorphous data-parallelism in irregular
* programs.
*
* Copyright (C) 2013, The University of Texas at Austin. All rights reserved.
* UNIVERSITY EXPRESSLY DISCLAIMS ANY AND ALL WARRANTIES CONCERNING THIS
* SOFTWARE AND DOCUMENTATION, INCLUDING ANY WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR ANY PARTICULAR PURPOSE, NON-INFRINGEMENT AND WARRANTIES OF
* PERFORMANCE, AND ANY WARRANTY THAT MIGHT OTHERWISE ARISE FROM COURSE OF
* DEALING OR USAGE OF TRADE. NO WARRANTY IS EITHER EXPRESS OR IMPLIED WITH
* RESPECT TO THE USE OF THE SOFTWARE OR DOCUMENTATION. Under no circumstances
* shall University be liable for incidental, special, indirect, direct or
* consequential damages or loss of profits, interruption of business, or
* related expenses which may arise from use of Software or Documentation,
* including but not limited to those resulting from defects in Software and/or
* Documentation, or loss or inaccuracy of data of any kind.
*
* @author Nikunj Yadav [email protected]
*/
#ifndef GALOIS_GRAPH_LC_MORPH_GRAPH_H
#define GALOIS_GRAPH_LC_MORPH_GRAPH_H
#include "Galois/config.h"
#include "Galois/Bag.h"
#include "Galois/LargeArray.h"
#include "Galois/Graph/FileGraph.h"
#include "Galois/Graph/Details.h"
#include "Galois/Runtime/MethodFlags.h"
#include <boost/mpl/if.hpp>
#include GALOIS_CXX11_STD_HEADER(type_traits)
namespace Galois {
namespace Graph {
//! Local computation graph (i.e., graph structure does not change)
template<typename NodeTy, typename EdgeTy,
bool HasNoLockable=false,
bool UseNumaAlloc=false,
bool HasOutOfLineLockable=false,
bool HasId=false>
class LC_Morph_Graph:
private boost::noncopyable,
private detail::OutOfLineLockableFeature<HasOutOfLineLockable && !HasNoLockable> {
template<typename Graph> friend class LC_InOut_Graph;
public:
template<bool _has_id>
struct with_id { typedef LC_Morph_Graph<NodeTy,EdgeTy,HasNoLockable,UseNumaAlloc,HasOutOfLineLockable,_has_id> type; };
template<typename _node_data>
struct with_node_data { typedef LC_Morph_Graph<_node_data,EdgeTy,HasNoLockable,UseNumaAlloc,HasOutOfLineLockable,HasId> type; };
template<bool _has_no_lockable>
struct with_no_lockable { typedef LC_Morph_Graph<NodeTy,EdgeTy,_has_no_lockable,UseNumaAlloc,HasOutOfLineLockable,HasId> type; };
template<bool _use_numa_alloc>
struct with_numa_alloc { typedef LC_Morph_Graph<NodeTy,EdgeTy,HasNoLockable,_use_numa_alloc,HasOutOfLineLockable,HasId> type; };
template<bool _has_out_of_line_lockable>
struct with_out_of_line_lockable { typedef LC_Morph_Graph<NodeTy,EdgeTy,HasNoLockable,UseNumaAlloc,_has_out_of_line_lockable,_has_out_of_line_lockable||HasId> type; };
typedef read_with_aux_graph_tag read_tag;
protected:
class NodeInfo;
typedef detail::EdgeInfoBase<NodeInfo*, EdgeTy> EdgeInfo;
typedef Galois::InsertBag<NodeInfo> Nodes;
typedef detail::NodeInfoBaseTypes<NodeTy,!HasNoLockable && !HasOutOfLineLockable> NodeInfoTypes;
struct EdgeHolder {
EdgeInfo* begin;
EdgeInfo* end;
EdgeHolder* next;
};
class NodeInfo: public detail::NodeInfoBase<NodeTy,!HasNoLockable && !HasOutOfLineLockable> {
typedef detail::NodeInfoBase<NodeTy,!HasNoLockable && !HasOutOfLineLockable> Super;
friend class LC_Morph_Graph;
EdgeInfo* edgeBegin;
EdgeInfo* edgeEnd;
public:
template<typename... Args>
NodeInfo(Args&&... args): Super(std::forward<Args>(args)...) { }
};
struct makeGraphNode: public std::unary_function<NodeInfo&, NodeInfo*> {
NodeInfo* operator()(NodeInfo& data) const { return &data; }
};
struct dst_equals {
NodeInfo* dst;
dst_equals(NodeInfo* d): dst(d) { }
bool operator()(const EdgeInfo& edge) { return edge.dst == dst; }
};
public:
typedef NodeInfo* GraphNode;
typedef EdgeTy edge_data_type;
typedef NodeTy node_data_type;
typedef typename NodeInfoTypes::reference node_data_reference;
typedef typename EdgeInfo::reference edge_data_reference;
typedef EdgeInfo* edge_iterator;
typedef boost::transform_iterator<makeGraphNode,typename Nodes::iterator> iterator;
typedef boost::transform_iterator<makeGraphNode,typename Nodes::const_iterator> const_iterator;
typedef iterator local_iterator;
typedef const_iterator const_local_iterator;
typedef LargeArray<GraphNode> ReadGraphAuxData;
protected:
Nodes nodes;
Galois::Runtime::PerThreadStorage<EdgeHolder*> edges;
template<bool _A1 = HasNoLockable, bool _A2 = HasOutOfLineLockable>
void acquireNode(GraphNode N, MethodFlag mflag, typename std::enable_if<!_A1 && !_A2>::type* = 0) {
Galois::Runtime::acquire(N, mflag);
}
template<bool _A1 = HasOutOfLineLockable, bool _A2 = HasNoLockable>
void acquireNode(GraphNode N, MethodFlag mflag, typename std::enable_if<_A1 && !_A2>::type* = 0) {
this->outOfLineAcquire(getId(N), mflag);
}
template<bool _A1 = HasOutOfLineLockable, bool _A2 = HasNoLockable>
void acquireNode(GraphNode N, MethodFlag mflag, typename std::enable_if<_A2>::type* = 0) { }
template<bool _Enable = HasId>
size_t getId(GraphNode N, typename std::enable_if<_Enable>::type* = 0) {
return N->getId();
}
public:
~LC_Morph_Graph() {
for (typename Nodes::iterator ii = nodes.begin(), ei = nodes.end(); ii != ei; ++ii) {
NodeInfo& n = *ii;
EdgeInfo* edgeBegin = n.edgeBegin;
EdgeInfo* edgeEnd = n.edgeEnd;
if (EdgeInfo::has_value) {
while (edgeBegin != edgeEnd) {
edgeBegin->destroy();
++edgeBegin;
}
}
}
}
node_data_reference getData(const GraphNode& N, MethodFlag mflag = MethodFlag::ALL) {
Galois::Runtime::checkWrite(mflag, false);
acquireNode(N, mflag);
return N->getData();
}
edge_data_reference getEdgeData(edge_iterator ni, MethodFlag mflag = MethodFlag::NONE) {
Galois::Runtime::checkWrite(mflag, false);
acquireNode(ni->dst, mflag);
return ni->get();
}
GraphNode getEdgeDst(edge_iterator ni) {
//Galois::Runtime::checkWrite(mflag, false);
//acquireNode(ni->dst, mflag);
return GraphNode(ni->dst);
}
/**
* Returns an iterator to all the nodes in the graph. Not thread-safe.
*/
iterator begin() {
return boost::make_transform_iterator(nodes.begin(), makeGraphNode());
}
//! Returns the end of the node iterator. Not thread-safe.
iterator end() {
return boost::make_transform_iterator(nodes.end(), makeGraphNode());
}
local_iterator local_begin() {
return boost::make_transform_iterator(nodes.local_begin(), makeGraphNode());
}
local_iterator local_end() {
return boost::make_transform_iterator(nodes.local_end(), makeGraphNode());
}
edge_iterator edge_begin(GraphNode N, MethodFlag mflag = MethodFlag::ALL) {
acquireNode(N, mflag);
if (Galois::Runtime::shouldLock(mflag)) {
for (edge_iterator ii = N->edgeBegin, ee = N->edgeEnd; ii != ee; ++ii) {
acquireNode(ii->dst, mflag);
}
}
return N->edgeBegin;
}
edge_iterator edge_end(GraphNode N, MethodFlag mflag = MethodFlag::ALL) {
return N->edgeEnd;
}
template<typename... Args>
GraphNode createNode(int nedges, Args&&... args) {
Galois::Runtime::checkWrite(MethodFlag::ALL, true);
NodeInfo* N = &nodes.emplace(std::forward<Args>(args)...);
acquireNode(N, MethodFlag::ALL);
EdgeHolder*& local_edges = *edges.getLocal();
if (!local_edges || std::distance(local_edges->begin, local_edges->end) < nedges) {
EdgeHolder* old = local_edges;
char* newblock = (char*)Runtime::MM::pageAlloc();
local_edges = (EdgeHolder*)newblock;
local_edges->next = old;
char* estart = newblock + sizeof(EdgeHolder);
if ((uintptr_t)estart % sizeof(EdgeInfo)) // Not aligned
#ifdef HAVE_CXX11_ALIGNOF
estart += sizeof(EdgeInfo) - ((uintptr_t)estart % alignof(EdgeInfo));
#else
estart += sizeof(EdgeInfo) - ((uintptr_t)estart % 8);
#endif
local_edges->begin = (EdgeInfo*)estart;
char* eend = newblock + Runtime::MM::pageSize;
eend -= (uintptr_t)eend % sizeof(EdgeInfo);
local_edges->end = (EdgeInfo*)eend;
}
N->edgeBegin = N->edgeEnd = local_edges->begin;
local_edges->begin += nedges;
return GraphNode(N);
}
template<typename... Args>
edge_iterator addEdge(GraphNode src, GraphNode dst, Galois::MethodFlag mflag, Args&&... args) {
Galois::Runtime::checkWrite(mflag, true);
acquireNode(src, mflag);
auto it = std::find_if(src->edgeBegin, src->edgeEnd, dst_equals(dst));
if (it == src->edgeEnd) {
it->dst = dst;
it->construct(std::forward<Args>(args)...);
src->edgeEnd++;
}
return it;
}
template<typename... Args>
edge_iterator addEdgeWithoutCheck(GraphNode src, GraphNode dst, Galois::MethodFlag mflag, Args&&... args) {
Galois::Runtime::checkWrite(mflag, true);
acquireNode(src, mflag);
auto it = src->edgeEnd;
it->dst = dst;
it->construct(std::forward<Args>(args)...);
src->edgeEnd++;
return it;
}
edge_iterator findEdge(GraphNode src, GraphNode dst, Galois::MethodFlag mflag = MethodFlag::ALL) {
Galois::Runtime::checkWrite(mflag, true);
acquireNode(src, mflag);
return std::find_if(src->edgeBegin, src->edgeEnd, dst_equals(dst));
}
void allocateFrom(FileGraph& graph, ReadGraphAuxData& aux) {
size_t numNodes = graph.size();
if (UseNumaAlloc) {
aux.allocateLocal(numNodes, false);
this->outOfLineAllocateLocal(numNodes, false);
} else {
aux.allocateInterleaved(numNodes);
this->outOfLineAllocateInterleaved(numNodes);
}
}
void constructNodesFrom(FileGraph& graph, unsigned tid, unsigned total, ReadGraphAuxData& aux) {
auto r = graph.divideBy(
sizeof(NodeInfo) + LC_Morph_Graph::size_of_out_of_line::value,
sizeof(EdgeInfo),
tid, total);
size_t id = *r.first;
for (FileGraph::iterator ii = r.first, ei = r.second; ii != ei; ++ii, ++id) {
aux[id] = createNode(std::distance(graph.edge_begin(*ii), graph.edge_end(*ii)));
}
}
void constructEdgesFrom(FileGraph& graph, unsigned tid, unsigned total, const ReadGraphAuxData& aux) {
auto r = graph.divideBy(
sizeof(NodeInfo) + LC_Morph_Graph::size_of_out_of_line::value,
sizeof(EdgeInfo),
tid, total);
for (FileGraph::iterator ii = r.first, ei = r.second; ii != ei; ++ii) {
for (FileGraph::edge_iterator nn = graph.edge_begin(*ii), en = graph.edge_end(*ii); nn != en; ++nn) {
if (EdgeInfo::has_value) {
addEdgeWithoutCheck(aux[*ii], aux[graph.getEdgeDst(nn)], Galois::MethodFlag::NONE, graph.getEdgeData<uint32_t>(nn));
} else {
addEdgeWithoutCheck(aux[*ii], aux[graph.getEdgeDst(nn)], Galois::MethodFlag::NONE);
}
}
}
}
};
} // end namespace
} // end namespace
#endif /* LC_MORPH_GRAPH_H_ */
| 0 |
rapidsai_public_repos/code-share/maxflow/galois/include/Galois
|
rapidsai_public_repos/code-share/maxflow/galois/include/Galois/Graph/LC_Linear_Graph.h
|
/** Local Computation graphs -*- C++ -*-
* @file
* @section License
*
* Galois, a framework to exploit amorphous data-parallelism in irregular
* programs.
*
* Copyright (C) 2013, The University of Texas at Austin. All rights reserved.
* UNIVERSITY EXPRESSLY DISCLAIMS ANY AND ALL WARRANTIES CONCERNING THIS
* SOFTWARE AND DOCUMENTATION, INCLUDING ANY WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR ANY PARTICULAR PURPOSE, NON-INFRINGEMENT AND WARRANTIES OF
* PERFORMANCE, AND ANY WARRANTY THAT MIGHT OTHERWISE ARISE FROM COURSE OF
* DEALING OR USAGE OF TRADE. NO WARRANTY IS EITHER EXPRESS OR IMPLIED WITH
* RESPECT TO THE USE OF THE SOFTWARE OR DOCUMENTATION. Under no circumstances
* shall University be liable for incidental, special, indirect, direct or
* consequential damages or loss of profits, interruption of business, or
* related expenses which may arise from use of Software or Documentation,
* including but not limited to those resulting from defects in Software and/or
* Documentation, or loss or inaccuracy of data of any kind.
*
* @section Description
*
* @author Andrew Lenharth <[email protected]>
* @author Donald Nguyen <[email protected]>
*/
#ifndef GALOIS_GRAPH_LC_LINEAR_GRAPH_H
#define GALOIS_GRAPH_LC_LINEAR_GRAPH_H
#include "Galois/config.h"
#include "Galois/LargeArray.h"
#include "Galois/Graph/FileGraph.h"
#include "Galois/Graph/Details.h"
#include "Galois/Runtime/MethodFlags.h"
#include <boost/mpl/if.hpp>
#include GALOIS_CXX11_STD_HEADER(type_traits)
namespace Galois {
namespace Graph {
/**
* Local computation graph (i.e., graph structure does not change). The data
* representation is a modification of {@link LC_CSR_Graph} where the edge data
* and node data is stored inline with the adjacency information.
*
* The position of template parameters may change between Galois releases; the
* most robust way to specify them is through the with_XXX nested templates.
*/
template<typename NodeTy, typename EdgeTy,
bool HasNoLockable=false,
bool UseNumaAlloc=false,
bool HasOutOfLineLockable=false,
bool HasId=false>
class LC_Linear_Graph:
private boost::noncopyable,
private detail::LocalIteratorFeature<UseNumaAlloc>,
private detail::OutOfLineLockableFeature<HasOutOfLineLockable && !HasNoLockable> {
template<typename Graph> friend class LC_InOut_Graph;
public:
template<bool _has_id>
struct with_id { typedef LC_Linear_Graph<NodeTy,EdgeTy,HasNoLockable,UseNumaAlloc,HasOutOfLineLockable,_has_id> type; };
template<typename _node_data>
struct with_node_data { typedef LC_Linear_Graph<_node_data,EdgeTy,HasNoLockable,UseNumaAlloc,HasOutOfLineLockable,HasId> type; };
template<bool _has_no_lockable>
struct with_no_lockable { typedef LC_Linear_Graph<NodeTy,EdgeTy,_has_no_lockable,UseNumaAlloc,HasOutOfLineLockable,HasId> type; };
template<bool _use_numa_alloc>
struct with_numa_alloc { typedef LC_Linear_Graph<NodeTy,EdgeTy,HasNoLockable,_use_numa_alloc,HasOutOfLineLockable,HasId> type; };
template<bool _has_out_of_line_lockable>
struct with_out_of_line_lockable { typedef LC_Linear_Graph<NodeTy,EdgeTy,HasNoLockable,UseNumaAlloc,_has_out_of_line_lockable,_has_out_of_line_lockable||HasId> type; };
typedef read_with_aux_graph_tag read_tag;
protected:
class NodeInfo;
typedef detail::EdgeInfoBase<NodeInfo*,EdgeTy> EdgeInfo;
typedef LargeArray<NodeInfo*> Nodes;
typedef detail::NodeInfoBaseTypes<NodeTy,!HasNoLockable && !HasOutOfLineLockable> NodeInfoTypes;
class NodeInfo:
public detail::NodeInfoBase<NodeTy,!HasNoLockable && !HasOutOfLineLockable>,
public detail::IntrusiveId<typename boost::mpl::if_c<HasId,uint32_t,void>::type> {
friend class LC_Linear_Graph;
int numEdges;
EdgeInfo* edgeBegin() {
NodeInfo* n = this;
++n; //start of edges
return reinterpret_cast<EdgeInfo*>(n);
}
EdgeInfo* edgeEnd() {
EdgeInfo* ei = edgeBegin();
ei += numEdges;
return ei;
}
NodeInfo* next() {
NodeInfo* ni = this;
EdgeInfo* ei = edgeEnd();
while (reinterpret_cast<char*>(ni) < reinterpret_cast<char*>(ei))
++ni;
return ni;
}
};
public:
typedef NodeInfo* GraphNode;
typedef EdgeTy edge_data_type;
typedef NodeTy node_data_type;
typedef typename NodeInfoTypes::reference node_data_reference;
typedef typename EdgeInfo::reference edge_data_reference;
typedef EdgeInfo* edge_iterator;
typedef NodeInfo** iterator;
typedef NodeInfo*const * const_iterator;
typedef iterator local_iterator;
typedef const_iterator const_local_iterator;
typedef int ReadGraphAuxData;
protected:
LargeArray<char> data;
uint64_t numNodes;
uint64_t numEdges;
Nodes nodes;
template<bool _A1 = HasNoLockable, bool _A2 = HasOutOfLineLockable>
void acquireNode(GraphNode N, MethodFlag mflag, typename std::enable_if<!_A1 && !_A2>::type* = 0) {
Galois::Runtime::acquire(N, mflag);
}
template<bool _A1 = HasOutOfLineLockable, bool _A2 = HasNoLockable>
void acquireNode(GraphNode N, MethodFlag mflag, typename std::enable_if<_A1 && !_A2>::type* = 0) {
this->outOfLineAcquire(getId(N), mflag);
}
template<bool _A1 = HasOutOfLineLockable, bool _A2 = HasNoLockable>
void acquireNode(GraphNode N, MethodFlag mflag, typename std::enable_if<_A2>::type* = 0) { }
edge_iterator raw_begin(GraphNode N) {
return N->edgeBegin();
}
edge_iterator raw_end(GraphNode N) {
return N->edgeEnd();
}
template<bool _Enable = HasId>
size_t getId(GraphNode N, typename std::enable_if<_Enable>::type* = 0) {
return N->getId();
}
template<bool _Enable = HasId>
GraphNode getNode(size_t n, typename std::enable_if<_Enable>::type* = 0) {
return nodes[n];
}
public:
~LC_Linear_Graph() {
for (typename Nodes::iterator ii = nodes.begin(), ei = nodes.end(); ii != ei; ++ii) {
NodeInfo* n = *ii;
EdgeInfo* edgeBegin = n->edgeBegin();
EdgeInfo* edgeEnd = n->edgeEnd();
if (EdgeInfo::has_value) {
while (edgeBegin != edgeEnd) {
edgeBegin->destroy();
++edgeBegin;
}
}
n->~NodeInfo();
}
}
node_data_reference getData(GraphNode N, MethodFlag mflag = MethodFlag::ALL) {
Galois::Runtime::checkWrite(mflag, false);
acquireNode(N, mflag);
return N->getData();
}
edge_data_reference getEdgeData(edge_iterator ni, MethodFlag mflag = MethodFlag::NONE) const {
Galois::Runtime::checkWrite(mflag, false);
return ni->get();
}
GraphNode getEdgeDst(edge_iterator ni) const {
return ni->dst;
}
uint64_t size() const { return numNodes; }
uint64_t sizeEdges() const { return numEdges; }
iterator begin() { return &nodes[0]; }
iterator end() { return &nodes[numNodes]; }
const_iterator begin() const { return &nodes[0]; }
const_iterator end() const { return &nodes[numNodes]; }
local_iterator local_begin() { return &nodes[this->localBegin(numNodes)]; }
local_iterator local_end() { return &nodes[this->localEnd(numNodes)]; }
const_local_iterator local_begin() const { return &nodes[this->localBegin(numNodes)]; }
const_local_iterator local_end() const { return &nodes[this->localEnd(numNodes)]; }
edge_iterator edge_begin(GraphNode N, MethodFlag mflag = MethodFlag::ALL) {
acquireNode(N, mflag);
if (Galois::Runtime::shouldLock(mflag)) {
for (edge_iterator ii = N->edgeBegin(), ee = N->edgeEnd(); ii != ee; ++ii) {
acquireNode(ii->dst, mflag);
}
}
return N->edgeBegin();
}
edge_iterator edge_end(GraphNode N, MethodFlag mflag = MethodFlag::ALL) {
acquireNode(N, mflag);
return N->edgeEnd();
}
detail::EdgesIterator<LC_Linear_Graph> out_edges(GraphNode N, MethodFlag mflag = MethodFlag::ALL) {
return detail::EdgesIterator<LC_Linear_Graph>(*this, N, mflag);
}
/**
* Sorts outgoing edges of a node. Comparison function is over EdgeTy.
*/
template<typename CompTy>
void sortEdgesByEdgeData(GraphNode N, const CompTy& comp = std::less<EdgeTy>(), MethodFlag mflag = MethodFlag::ALL) {
acquireNode(N, mflag);
std::sort(N->edgeBegin(), N->edgeEnd(), detail::EdgeSortCompWrapper<EdgeInfo,CompTy>(comp));
}
/**
* Sorts outgoing edges of a node. Comparison function is over <code>EdgeSortValue<EdgeTy></code>.
*/
template<typename CompTy>
void sortEdges(GraphNode N, const CompTy& comp, MethodFlag mflag = MethodFlag::ALL) {
acquireNode(N, mflag);
std::sort(N->edgeBegin(), N->edgeEnd(), comp);
}
void allocateFrom(FileGraph& graph, const ReadGraphAuxData&) {
numNodes = graph.size();
numEdges = graph.sizeEdges();
if (UseNumaAlloc) {
data.allocateLocal(sizeof(NodeInfo) * numNodes * 2 + sizeof(EdgeInfo) * numEdges, false);
nodes.allocateLocal(numNodes, false);
this->outOfLineAllocateLocal(numNodes, false);
} else {
data.allocateInterleaved(sizeof(NodeInfo) * numNodes * 2 + sizeof(EdgeInfo) * numEdges);
nodes.allocateInterleaved(numNodes);
this->outOfLineAllocateInterleaved(numNodes);
}
}
void constructNodesFrom(FileGraph& graph, unsigned tid, unsigned total, const ReadGraphAuxData&) {
auto r = graph.divideBy(
Nodes::size_of::value + 2 * sizeof(NodeInfo) + LC_Linear_Graph::size_of_out_of_line::value,
sizeof(EdgeInfo),
tid, total);
this->setLocalRange(*r.first, *r.second);
NodeInfo* curNode = reinterpret_cast<NodeInfo*>(data.data());
size_t id = *r.first;
size_t edges = *graph.edge_begin(*r.first);
size_t bytes = edges * sizeof(EdgeInfo) + 2 * (id + 1) * sizeof(NodeInfo);
curNode += bytes / sizeof(NodeInfo);
for (FileGraph::iterator ii = r.first, ei = r.second; ii != ei; ++ii, ++id) {
nodes.constructAt(*ii);
new (curNode) NodeInfo();
//curNode->construct();
curNode->setId(id);
curNode->numEdges = std::distance(graph.edge_begin(*ii), graph.edge_end(*ii));
nodes[*ii] = curNode;
curNode = curNode->next();
}
}
void constructEdgesFrom(FileGraph& graph, unsigned tid, unsigned total, const ReadGraphAuxData&) {
typedef typename EdgeInfo::value_type EDV;
auto r = graph.divideBy(
Nodes::size_of::value + 2 * sizeof(NodeInfo) + LC_Linear_Graph::size_of_out_of_line::value,
sizeof(EdgeInfo),
tid, total);
for (FileGraph::iterator ii = r.first, ei = r.second; ii != ei; ++ii) {
EdgeInfo* edge = nodes[*ii]->edgeBegin();
for (FileGraph::edge_iterator nn = graph.edge_begin(*ii), en = graph.edge_end(*ii); nn != en; ++nn) {
if (EdgeInfo::has_value)
edge->construct(graph.getEdgeData<EDV>(nn));
edge->dst = nodes[graph.getEdgeDst(nn)];
++edge;
}
}
}
};
} // end namespace
} // end namespace
#endif
| 0 |
rapidsai_public_repos/code-share/maxflow/galois/include/Galois
|
rapidsai_public_repos/code-share/maxflow/galois/include/Galois/Graph/FileGraph.h
|
/** Basic serialized graphs -*- C++ -*-
* @file
* @section License
*
* Galois, a framework to exploit amorphous data-parallelism in irregular
* programs.
*
* Copyright (C) 2013, The University of Texas at Austin. All rights reserved.
* UNIVERSITY EXPRESSLY DISCLAIMS ANY AND ALL WARRANTIES CONCERNING THIS
* SOFTWARE AND DOCUMENTATION, INCLUDING ANY WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR ANY PARTICULAR PURPOSE, NON-INFRINGEMENT AND WARRANTIES OF
* PERFORMANCE, AND ANY WARRANTY THAT MIGHT OTHERWISE ARISE FROM COURSE OF
* DEALING OR USAGE OF TRADE. NO WARRANTY IS EITHER EXPRESS OR IMPLIED WITH
* RESPECT TO THE USE OF THE SOFTWARE OR DOCUMENTATION. Under no circumstances
* shall University be liable for incidental, special, indirect, direct or
* consequential damages or loss of profits, interruption of business, or
* related expenses which may arise from use of Software or Documentation,
* including but not limited to those resulting from defects in Software and/or
* Documentation, or loss or inaccuracy of data of any kind.
*
* @section Description
*
* This file contains low-level representations of graphs, closely tied with
* their serialized form in the Galois system. These graphs are very basic
* (e.g., they don't support concurrency) and are intended to be converted
* to/from more specialized graph data structures. More full featured graphs
* are available in LCGraph.h.
*
* @author Andrew Lenharth <[email protected]>
*/
#ifndef GALOIS_GRAPH_FILEGRAPH_H
#define GALOIS_GRAPH_FILEGRAPH_H
#include "Galois/Endian.h"
#include "Galois/MethodFlags.h"
#include "Galois/LargeArray.h"
#include "Galois/Graph/Details.h"
#include "Galois/Runtime/Context.h"
#include "Galois/Runtime/ll/CacheLineStorage.h"
#include "Galois/Runtime/ll/CompilerSpecific.h"
#include <boost/iterator/counting_iterator.hpp>
#include <boost/iterator/transform_iterator.hpp>
#include <boost/utility.hpp>
#include GALOIS_CXX11_STD_HEADER(type_traits)
//#include <fstream>
#include <string.h>
namespace Galois {
namespace Graph {
//! Graph serialized to a file
class FileGraph: private boost::noncopyable {
friend class FileGraphAllocator;
public:
typedef uint32_t GraphNode;
protected:
void* volatile masterMapping;
size_t masterLength;
uint64_t sizeofEdge;
int masterFD;
uint64_t* outIdx;
uint32_t* outs;
char* edgeData;
uint64_t numEdges;
uint64_t numNodes;
uint64_t getEdgeIdx(GraphNode src, GraphNode dst) const;
uint32_t* raw_neighbor_begin(GraphNode N) const;
uint32_t* raw_neighbor_end(GraphNode N) const;
struct Convert32: public std::unary_function<uint32_t, uint32_t> {
uint32_t operator()(uint32_t x) const {
return convert_le32(x);
}
};
struct Convert64: public std::unary_function<uint64_t,uint64_t> {
uint64_t operator()(uint64_t x) const {
return convert_le64(x);
}
};
//! Initializes a graph from block of memory
void parse(void* m);
//! Reads graph connectivity information from memory
void structureFromMem(void* mem, size_t len, bool clone);
void* structureFromArrays(uint64_t* outIdxs, uint64_t numNodes,
uint32_t* outs, uint64_t numEdges, size_t sizeofEdgeData);
void* structureFromGraph(FileGraph& g, size_t sizeofEdgeData);
/**
* Finds the first node N such that
*
* N * nodeSize +
* (sum_{i=0}^{N-1} E[i]) * edgeSize
* >=
* targetSize
*
* in range [lb, ub). Returns ub if unsuccessful.
*/
size_t findIndex(size_t nodeSize, size_t edgeSize, size_t targetSize, size_t lb, size_t ub);
public:
// Node Handling
//! Checks if a node is in the graph (already added)
bool containsNode(const GraphNode n) const {
return n < numNodes;
}
// Edge Handling
template<typename EdgeTy>
EdgeTy& getEdgeData(GraphNode src, GraphNode dst) {
assert(sizeofEdge == sizeof(EdgeTy));
return reinterpret_cast<EdgeTy*>(edgeData)[getEdgeIdx(src, dst)];
}
// Iterators
typedef boost::counting_iterator<uint64_t> edge_iterator;
edge_iterator edge_begin(GraphNode N) const;
edge_iterator edge_end(GraphNode N) const;
detail::EdgesWithNoFlagIterator<FileGraph> out_edges(GraphNode N) {
return detail::EdgesWithNoFlagIterator<FileGraph>(*this, N);
}
/**
* Sorts outgoing edges of a node. Comparison function is over EdgeTy.
*/
template<typename EdgeTy, typename CompTy>
void sortEdgesByEdgeData(GraphNode N, const CompTy& comp = std::less<EdgeTy>()) {
typedef LargeArray<GraphNode> EdgeDst;
typedef LargeArray<EdgeTy> EdgeData;
typedef detail::EdgeSortIterator<GraphNode,uint64_t,EdgeDst,EdgeData> edge_sort_iterator;
EdgeDst edgeDst(outs, numEdges);
EdgeData ed(edgeData, numEdges);
edge_sort_iterator begin(std::distance(outs, raw_neighbor_begin(N)), &edgeDst, &ed);
edge_sort_iterator end(std::distance(outs, raw_neighbor_end(N)), &edgeDst, &ed);
std::sort(begin, end, detail::EdgeSortCompWrapper<EdgeSortValue<GraphNode,EdgeTy>,CompTy>(comp));
}
/**
* Sorts outgoing edges of a node. Comparison function is over <code>EdgeSortValue<EdgeTy></code>.
*/
template<typename EdgeTy, typename CompTy>
void sortEdges(GraphNode N, const CompTy& comp) {
typedef LargeArray<GraphNode> EdgeDst;
typedef LargeArray<EdgeTy> EdgeData;
typedef detail::EdgeSortIterator<GraphNode,uint64_t,EdgeDst,EdgeData> edge_sort_iterator;
EdgeDst edgeDst(outs, numEdges);
EdgeData ed(edgeData, numEdges);
edge_sort_iterator begin(std::distance(outs, raw_neighbor_begin(N)), &edgeDst, &ed);
edge_sort_iterator end(std::distance(outs, raw_neighbor_end(N)), &edgeDst, &ed);
std::sort(begin, end, comp);
}
template<typename EdgeTy>
EdgeTy& getEdgeData(edge_iterator it) const {
return reinterpret_cast<EdgeTy*>(edgeData)[*it];
}
GraphNode getEdgeDst(edge_iterator it) const;
typedef boost::transform_iterator<Convert32, uint32_t*> neighbor_iterator;
typedef boost::transform_iterator<Convert32, uint32_t*> node_id_iterator;
typedef boost::transform_iterator<Convert64, uint64_t*> edge_id_iterator;
typedef boost::counting_iterator<uint64_t> iterator;
neighbor_iterator neighbor_begin(GraphNode N) const {
return boost::make_transform_iterator(raw_neighbor_begin(N), Convert32());
}
neighbor_iterator neighbor_end(GraphNode N) const {
return boost::make_transform_iterator(raw_neighbor_end(N), Convert32());
}
template<typename EdgeTy>
EdgeTy* edge_data_begin() const {
return reinterpret_cast<EdgeTy*>(edgeData);
}
template<typename EdgeTy>
EdgeTy* edge_data_end() const {
assert(sizeof(EdgeTy) == sizeofEdge);
EdgeTy* r = reinterpret_cast<EdgeTy*>(edgeData);
return &r[numEdges];
}
iterator begin() const;
iterator end() const;
/**
* Divides nodes into balanced ranges.
*/
std::pair<iterator,iterator> divideBy(size_t nodeSize, size_t edgeSize, unsigned id, unsigned total);
node_id_iterator node_id_begin() const;
node_id_iterator node_id_end() const;
edge_id_iterator edge_id_begin() const;
edge_id_iterator edge_id_end() const;
template<typename EdgeTy>
EdgeTy& getEdgeData(neighbor_iterator it) {
return reinterpret_cast<EdgeTy*>(edgeData)[std::distance(outs, it.base())];
}
bool hasNeighbor(GraphNode N1, GraphNode N2) const;
//! Returns the number of nodes in the graph
unsigned int size() const { return numNodes; }
//! Returns the number of edges in the graph
unsigned int sizeEdges() const { return numEdges; }
//! Returns the size of an edge
size_t edgeSize() const { return sizeofEdge; }
FileGraph();
~FileGraph();
//! Reads graph connectivity information from file
void structureFromFile(const std::string& filename, bool preFault = true);
/**
* Reads graph connectivity information from file. Tries to balance memory
* evenly across system. Cannot be called during parallel execution.
*/
void structureFromFileInterleaved(const std::string& filename, size_t sizeofEdgeData);
template<typename EdgeTy>
void structureFromFileInterleaved(const std::string& filename,
typename std::enable_if<!std::is_void<EdgeTy>::value>::type* = 0) {
structureFromFileInterleaved(filename, sizeof(EdgeTy));
}
template<typename EdgeTy>
void structureFromFileInterleaved(const std::string& filename,
typename std::enable_if<std::is_void<EdgeTy>::value>::type* = 0) {
structureFromFileInterleaved(filename, 0);
}
/**
* Reads graph connectivity information from arrays. Returns a pointer to
* array to populate with edge data.
*/
template<typename T>
T* structureFromArrays(uint64_t* outIdxs, uint64_t numNodes,
uint32_t* outs, uint64_t numEdges) {
return reinterpret_cast<T*>(structureFromArrays(outIdx, numNodes, outs, numEdges, sizeof(T)));
}
/**
* Reads graph connectivity information from arrays. Returns a pointer to
* array to populate with edge data.
*/
template<typename T>
T* structureFromGraph(FileGraph& g) {
return reinterpret_cast<T*>(structureFromGraph(g, sizeof(T)));
}
//! Writes graph connectivity information to file
void structureToFile(const std::string& file);
void swap(FileGraph& other);
void cloneFrom(FileGraph& other);
};
/**
* Simplifies writing graphs.
*
* Writer your file in rounds:
* <ol>
* <li>setNumNodes(), setNumEdges(), setSizeofEdgeData()</li>
* <li>phase1(), for each node, incrementDegree(Node x)</li>
* <li>phase2(), add neighbors for each node, addNeighbor(Node src, Node
* dst)</li>
* <li>finish(), use as FileGraph</li>
* </ol>
*/
class FileGraphWriter: public FileGraph {
uint64_t *outIdx; // outIdxs
uint32_t *starts;
uint32_t *outs; // outs
size_t sizeofEdgeData;
public:
FileGraphWriter(): outIdx(0), starts(0), outs(0), sizeofEdgeData(0) { }
~FileGraphWriter() {
if (outIdx)
delete [] outIdx;
if (starts)
delete [] starts;
if (outs)
delete [] outs;
}
void setNumNodes(uint64_t n) { this->numNodes = n; }
void setNumEdges(uint64_t n) { this->numEdges = n; }
void setSizeofEdgeData(size_t n) { sizeofEdgeData = n; }
//! Marks the transition to next phase of parsing, counting the degree of
//! nodes
void phase1() {
assert(!outIdx);
outIdx = new uint64_t[this->numNodes];
memset(outIdx, 0, sizeof(*outIdx) * this->numNodes);
}
//! Increments degree of id by delta
void incrementDegree(size_t id, int delta = 1) {
assert(id < this->numNodes);
outIdx[id] += delta;
}
//! Marks the transition to next phase of parsing, adding edges
void phase2() {
if (this->numNodes == 0)
return;
// Turn counts into partial sums
uint64_t* prev = outIdx;
for (uint64_t *ii = outIdx + 1, *ei = outIdx + this->numNodes; ii != ei; ++ii, ++prev) {
*ii += *prev;
}
assert(outIdx[this->numNodes-1] == this->numEdges);
starts = new uint32_t[this->numNodes];
memset(starts, 0, sizeof(*starts) * this->numNodes);
outs = new uint32_t[this->numEdges];
}
//! Adds a neighbor between src and dst
size_t addNeighbor(size_t src, size_t dst) {
size_t base = src ? outIdx[src-1] : 0;
size_t idx = base + starts[src]++;
assert(idx < outIdx[src]);
outs[idx] = dst;
return idx;
}
/**
* Finish making graph. Returns pointer to block of memory that should be
* used to store edge data.
*/
template<typename T>
T* finish() {
void* ret = structureFromArrays(outIdx, this->numNodes, outs, this->numEdges, sizeofEdgeData);
delete [] outIdx;
outIdx = 0;
delete [] starts;
starts = 0;
delete [] outs;
outs = 0;
return reinterpret_cast<T*>(ret);
}
};
/**
* Adds reverse edges to a graph. Reverse edges have edge data copied from the
* original edge. New graph is placed in out parameter. The previous graph in
* out is destroyed.
*/
template<typename EdgeTy>
void makeSymmetric(FileGraph& in, FileGraph& out) {
typedef FileGraph::GraphNode GNode;
typedef LargeArray<EdgeTy> EdgeData;
typedef typename EdgeData::value_type edge_value_type;
FileGraphWriter g;
EdgeData edgeData;
size_t numEdges = in.sizeEdges() * 2;
g.setNumNodes(in.size());
g.setNumEdges(numEdges);
g.setSizeofEdgeData(EdgeData::has_value ? sizeof(edge_value_type) : 0);
g.phase1();
for (FileGraph::iterator ii = in.begin(), ei = in.end(); ii != ei; ++ii) {
GNode src = *ii;
for (FileGraph::edge_iterator jj = in.edge_begin(src), ej = in.edge_end(src); jj != ej; ++jj) {
GNode dst = in.getEdgeDst(jj);
g.incrementDegree(src);
g.incrementDegree(dst);
}
}
g.phase2();
edgeData.create(numEdges);
for (FileGraph::iterator ii = in.begin(), ei = in.end(); ii != ei; ++ii) {
GNode src = *ii;
for (FileGraph::edge_iterator jj = in.edge_begin(src), ej = in.edge_end(src); jj != ej; ++jj) {
GNode dst = in.getEdgeDst(jj);
if (EdgeData::has_value) {
edge_value_type& data = in.getEdgeData<edge_value_type>(jj);
edgeData.set(g.addNeighbor(src, dst), data);
edgeData.set(g.addNeighbor(dst, src), data);
} else {
g.addNeighbor(src, dst);
g.addNeighbor(dst, src);
}
}
}
edge_value_type* rawEdgeData = g.finish<edge_value_type>();
if (EdgeData::has_value)
std::copy(edgeData.begin(), edgeData.end(), rawEdgeData);
out.swap(g);
}
/**
* Permutes a graph.
*
* Permutation array, P, conforms to: P[i] = j where i is a node index from the
* original graph and j is a node index in the permuted graph. New, permuted
* graph is placed in the out parameter. The previous graph in out is destroyed.
*
* @param in original graph
* @param p permutation array
* @param out permuted graph
*/
template<typename EdgeTy,typename PTy>
void permute(FileGraph& in, const PTy& p, FileGraph& out) {
typedef FileGraph::GraphNode GNode;
typedef LargeArray<EdgeTy> EdgeData;
typedef typename EdgeData::value_type edge_value_type;
FileGraphWriter g;
EdgeData edgeData;
size_t numEdges = in.sizeEdges();
g.setNumNodes(in.size());
g.setNumEdges(numEdges);
g.setSizeofEdgeData(EdgeData::has_value ? sizeof(edge_value_type) : 0);
g.phase1();
for (FileGraph::iterator ii = in.begin(), ei = in.end(); ii != ei; ++ii) {
GNode src = *ii;
for (FileGraph::edge_iterator jj = in.edge_begin(src), ej = in.edge_end(src); jj != ej; ++jj) {
g.incrementDegree(p[src]);
}
}
g.phase2();
edgeData.create(numEdges);
for (FileGraph::iterator ii = in.begin(), ei = in.end(); ii != ei; ++ii) {
GNode src = *ii;
for (FileGraph::edge_iterator jj = in.edge_begin(src), ej = in.edge_end(src); jj != ej; ++jj) {
GNode dst = in.getEdgeDst(jj);
if (EdgeData::has_value) {
edge_value_type& data = in.getEdgeData<edge_value_type>(jj);
edgeData.set(g.addNeighbor(p[src], p[dst]), data);
} else {
g.addNeighbor(p[src], p[dst]);
}
}
}
edge_value_type* rawEdgeData = g.finish<edge_value_type>();
if (EdgeData::has_value)
std::copy(edgeData.begin(), edgeData.end(), rawEdgeData);
out.swap(g);
}
template<typename GraphTy,typename... Args>
GALOIS_ATTRIBUTE_DEPRECATED
void structureFromFile(GraphTy& g, const std::string& fname, Args&&... args) {
FileGraph graph;
graph.structureFromFile(fname);
g.structureFromGraph(graph, std::forward<Args>(args)...);
}
}
}
#endif
| 0 |
rapidsai_public_repos/code-share/maxflow/galois/include/llvm
|
rapidsai_public_repos/code-share/maxflow/galois/include/llvm/Support/SwapByteOrder.h
|
//===- SwapByteOrder.h - Generic and optimized byte swaps -------*- C++ -*-===//
//
// The LLVM Compiler Infrastructure
//
// This file is distributed under the University of Illinois Open Source
// License. See LICENSE.TXT for details.
//
//===----------------------------------------------------------------------===//
//
// This file declares generic and optimized functions to swap the byte order of
// an integral type.
//
//===----------------------------------------------------------------------===//
#ifndef LLVM_SYSTEM_SWAP_BYTE_ORDER_H
#define LLVM_SYSTEM_SWAP_BYTE_ORDER_H
#include "llvm/Support/DataTypes.h"
#include <cstddef>
#include <limits>
namespace llvm {
namespace sys {
/// SwapByteOrder_16 - This function returns a byte-swapped representation of
/// the 16-bit argument.
inline uint16_t SwapByteOrder_16(uint16_t value) {
#if defined(_MSC_VER) && !defined(_DEBUG)
// The DLL version of the runtime lacks these functions (bug!?), but in a
// release build they're replaced with BSWAP instructions anyway.
return _byteswap_ushort(value);
#else
uint16_t Hi = value << 8;
uint16_t Lo = value >> 8;
return Hi | Lo;
#endif
}
/// SwapByteOrder_32 - This function returns a byte-swapped representation of
/// the 32-bit argument.
inline uint32_t SwapByteOrder_32(uint32_t value) {
#if defined(__llvm__) || \
(__GNUC__ > 4 || (__GNUC__ == 4 && __GNUC_MINOR__ >= 3)) && !defined(__ICC)
return __builtin_bswap32(value);
#elif defined(_MSC_VER) && !defined(_DEBUG)
return _byteswap_ulong(value);
#else
uint32_t Byte0 = value & 0x000000FF;
uint32_t Byte1 = value & 0x0000FF00;
uint32_t Byte2 = value & 0x00FF0000;
uint32_t Byte3 = value & 0xFF000000;
return (Byte0 << 24) | (Byte1 << 8) | (Byte2 >> 8) | (Byte3 >> 24);
#endif
}
/// SwapByteOrder_64 - This function returns a byte-swapped representation of
/// the 64-bit argument.
inline uint64_t SwapByteOrder_64(uint64_t value) {
#if defined(__llvm__) || \
(__GNUC__ > 4 || (__GNUC__ == 4 && __GNUC_MINOR__ >= 3)) && !defined(__ICC)
return __builtin_bswap64(value);
#elif defined(_MSC_VER) && !defined(_DEBUG)
return _byteswap_uint64(value);
#else
uint64_t Hi = SwapByteOrder_32(uint32_t(value));
uint32_t Lo = SwapByteOrder_32(uint32_t(value >> 32));
return (Hi << 32) | Lo;
#endif
}
inline unsigned char SwapByteOrder(unsigned char C) { return C; }
inline signed char SwapByteOrder(signed char C) { return C; }
inline char SwapByteOrder(char C) { return C; }
inline unsigned short SwapByteOrder(unsigned short C) { return SwapByteOrder_16(C); }
inline signed short SwapByteOrder( signed short C) { return SwapByteOrder_16(C); }
inline unsigned int SwapByteOrder(unsigned int C) { return SwapByteOrder_32(C); }
inline signed int SwapByteOrder( signed int C) { return SwapByteOrder_32(C); }
#if __LONG_MAX__ == __INT_MAX__
inline unsigned long SwapByteOrder(unsigned long C) { return SwapByteOrder_32(C); }
inline signed long SwapByteOrder( signed long C) { return SwapByteOrder_32(C); }
#elif __LONG_MAX__ == __LONG_LONG_MAX__
inline unsigned long SwapByteOrder(unsigned long C) { return SwapByteOrder_64(C); }
inline signed long SwapByteOrder( signed long C) { return SwapByteOrder_64(C); }
#else
#error "Unknown long size!"
#endif
inline unsigned long long SwapByteOrder(unsigned long long C) {
return SwapByteOrder_64(C);
}
inline signed long long SwapByteOrder(signed long long C) {
return SwapByteOrder_64(C);
}
} // end namespace sys
} // end namespace llvm
#endif
| 0 |
rapidsai_public_repos/code-share/maxflow/galois/include/llvm
|
rapidsai_public_repos/code-share/maxflow/galois/include/llvm/Support/MathExtras.h
|
//===-- llvm/Support/MathExtras.h - Useful math functions -------*- C++ -*-===//
//
// The LLVM Compiler Infrastructure
//
// This file is distributed under the University of Illinois Open Source
// License. See LICENSE.TXT for details.
//
//===----------------------------------------------------------------------===//
//
// This file contains some functions that are useful for math stuff.
//
//===----------------------------------------------------------------------===//
#ifndef LLVM_SUPPORT_MATHEXTRAS_H
#define LLVM_SUPPORT_MATHEXTRAS_H
#include "llvm/Support/SwapByteOrder.h"
namespace llvm {
// NOTE: The following support functions use the _32/_64 extensions instead of
// type overloading so that signed and unsigned integers can be used without
// ambiguity.
/// Hi_32 - This function returns the high 32 bits of a 64 bit value.
inline uint32_t Hi_32(uint64_t Value) {
return static_cast<uint32_t>(Value >> 32);
}
/// Lo_32 - This function returns the low 32 bits of a 64 bit value.
inline uint32_t Lo_32(uint64_t Value) {
return static_cast<uint32_t>(Value);
}
/// isInt - Checks if an integer fits into the given bit width.
template<unsigned N>
inline bool isInt(int64_t x) {
return N >= 64 || (-(INT64_C(1)<<(N-1)) <= x && x < (INT64_C(1)<<(N-1)));
}
// Template specializations to get better code for common cases.
template<>
inline bool isInt<8>(int64_t x) {
return static_cast<int8_t>(x) == x;
}
template<>
inline bool isInt<16>(int64_t x) {
return static_cast<int16_t>(x) == x;
}
template<>
inline bool isInt<32>(int64_t x) {
return static_cast<int32_t>(x) == x;
}
/// isUInt - Checks if an unsigned integer fits into the given bit width.
template<unsigned N>
inline bool isUInt(uint64_t x) {
return N >= 64 || x < (UINT64_C(1)<<N);
}
// Template specializations to get better code for common cases.
template<>
inline bool isUInt<8>(uint64_t x) {
return static_cast<uint8_t>(x) == x;
}
template<>
inline bool isUInt<16>(uint64_t x) {
return static_cast<uint16_t>(x) == x;
}
template<>
inline bool isUInt<32>(uint64_t x) {
return static_cast<uint32_t>(x) == x;
}
/// isUIntN - Checks if an unsigned integer fits into the given (dynamic)
/// bit width.
inline bool isUIntN(unsigned N, uint64_t x) {
return x == (x & (~0ULL >> (64 - N)));
}
/// isIntN - Checks if an signed integer fits into the given (dynamic)
/// bit width.
inline bool isIntN(unsigned N, int64_t x) {
return N >= 64 || (-(INT64_C(1)<<(N-1)) <= x && x < (INT64_C(1)<<(N-1)));
}
/// isMask_32 - This function returns true if the argument is a sequence of ones
/// starting at the least significant bit with the remainder zero (32 bit
/// version). Ex. isMask_32(0x0000FFFFU) == true.
inline bool isMask_32(uint32_t Value) {
return Value && ((Value + 1) & Value) == 0;
}
/// isMask_64 - This function returns true if the argument is a sequence of ones
/// starting at the least significant bit with the remainder zero (64 bit
/// version).
inline bool isMask_64(uint64_t Value) {
return Value && ((Value + 1) & Value) == 0;
}
/// isShiftedMask_32 - This function returns true if the argument contains a
/// sequence of ones with the remainder zero (32 bit version.)
/// Ex. isShiftedMask_32(0x0000FF00U) == true.
inline bool isShiftedMask_32(uint32_t Value) {
return isMask_32((Value - 1) | Value);
}
/// isShiftedMask_64 - This function returns true if the argument contains a
/// sequence of ones with the remainder zero (64 bit version.)
inline bool isShiftedMask_64(uint64_t Value) {
return isMask_64((Value - 1) | Value);
}
/// isPowerOf2_32 - This function returns true if the argument is a power of
/// two > 0. Ex. isPowerOf2_32(0x00100000U) == true (32 bit edition.)
inline bool isPowerOf2_32(uint32_t Value) {
return Value && !(Value & (Value - 1));
}
/// isPowerOf2_64 - This function returns true if the argument is a power of two
/// > 0 (64 bit edition.)
inline bool isPowerOf2_64(uint64_t Value) {
return Value && !(Value & (Value - int64_t(1L)));
}
/// ByteSwap_16 - This function returns a byte-swapped representation of the
/// 16-bit argument, Value.
inline uint16_t ByteSwap_16(uint16_t Value) {
return sys::SwapByteOrder_16(Value);
}
/// ByteSwap_32 - This function returns a byte-swapped representation of the
/// 32-bit argument, Value.
inline uint32_t ByteSwap_32(uint32_t Value) {
return sys::SwapByteOrder_32(Value);
}
/// ByteSwap_64 - This function returns a byte-swapped representation of the
/// 64-bit argument, Value.
inline uint64_t ByteSwap_64(uint64_t Value) {
return sys::SwapByteOrder_64(Value);
}
/// CountLeadingZeros_32 - this function performs the platform optimal form of
/// counting the number of zeros from the most significant bit to the first one
/// bit. Ex. CountLeadingZeros_32(0x00F000FF) == 8.
/// Returns 32 if the word is zero.
inline unsigned CountLeadingZeros_32(uint32_t Value) {
unsigned Count; // result
#if __GNUC__ >= 4
// PowerPC is defined for __builtin_clz(0)
#if !defined(__ppc__) && !defined(__ppc64__)
if (!Value) return 32;
#endif
Count = __builtin_clz(Value);
#else
if (!Value) return 32;
Count = 0;
// bisection method for count leading zeros
for (unsigned Shift = 32 >> 1; Shift; Shift >>= 1) {
uint32_t Tmp = Value >> Shift;
if (Tmp) {
Value = Tmp;
} else {
Count |= Shift;
}
}
#endif
return Count;
}
/// CountLeadingOnes_32 - this function performs the operation of
/// counting the number of ones from the most significant bit to the first zero
/// bit. Ex. CountLeadingOnes_32(0xFF0FFF00) == 8.
/// Returns 32 if the word is all ones.
inline unsigned CountLeadingOnes_32(uint32_t Value) {
return CountLeadingZeros_32(~Value);
}
/// CountLeadingZeros_64 - This function performs the platform optimal form
/// of counting the number of zeros from the most significant bit to the first
/// one bit (64 bit edition.)
/// Returns 64 if the word is zero.
inline unsigned CountLeadingZeros_64(uint64_t Value) {
unsigned Count; // result
#if __GNUC__ >= 4
// PowerPC is defined for __builtin_clzll(0)
#if !defined(__ppc__) && !defined(__ppc64__)
if (!Value) return 64;
#endif
Count = __builtin_clzll(Value);
#else
if (sizeof(long) == sizeof(int64_t)) {
if (!Value) return 64;
Count = 0;
// bisection method for count leading zeros
for (unsigned Shift = 64 >> 1; Shift; Shift >>= 1) {
uint64_t Tmp = Value >> Shift;
if (Tmp) {
Value = Tmp;
} else {
Count |= Shift;
}
}
} else {
// get hi portion
uint32_t Hi = Hi_32(Value);
// if some bits in hi portion
if (Hi) {
// leading zeros in hi portion plus all bits in lo portion
Count = CountLeadingZeros_32(Hi);
} else {
// get lo portion
uint32_t Lo = Lo_32(Value);
// same as 32 bit value
Count = CountLeadingZeros_32(Lo)+32;
}
}
#endif
return Count;
}
/// CountLeadingOnes_64 - This function performs the operation
/// of counting the number of ones from the most significant bit to the first
/// zero bit (64 bit edition.)
/// Returns 64 if the word is all ones.
inline unsigned CountLeadingOnes_64(uint64_t Value) {
return CountLeadingZeros_64(~Value);
}
/// CountTrailingZeros_32 - this function performs the platform optimal form of
/// counting the number of zeros from the least significant bit to the first one
/// bit. Ex. CountTrailingZeros_32(0xFF00FF00) == 8.
/// Returns 32 if the word is zero.
inline unsigned CountTrailingZeros_32(uint32_t Value) {
#if __GNUC__ >= 4
return Value ? __builtin_ctz(Value) : 32;
#else
static const unsigned Mod37BitPosition[] = {
32, 0, 1, 26, 2, 23, 27, 0, 3, 16, 24, 30, 28, 11, 0, 13,
4, 7, 17, 0, 25, 22, 31, 15, 29, 10, 12, 6, 0, 21, 14, 9,
5, 20, 8, 19, 18
};
return Mod37BitPosition[(-Value & Value) % 37];
#endif
}
/// CountTrailingOnes_32 - this function performs the operation of
/// counting the number of ones from the least significant bit to the first zero
/// bit. Ex. CountTrailingOnes_32(0x00FF00FF) == 8.
/// Returns 32 if the word is all ones.
inline unsigned CountTrailingOnes_32(uint32_t Value) {
return CountTrailingZeros_32(~Value);
}
/// CountTrailingZeros_64 - This function performs the platform optimal form
/// of counting the number of zeros from the least significant bit to the first
/// one bit (64 bit edition.)
/// Returns 64 if the word is zero.
inline unsigned CountTrailingZeros_64(uint64_t Value) {
#if __GNUC__ >= 4
return Value ? __builtin_ctzll(Value) : 64;
#else
static const unsigned Mod67Position[] = {
64, 0, 1, 39, 2, 15, 40, 23, 3, 12, 16, 59, 41, 19, 24, 54,
4, 64, 13, 10, 17, 62, 60, 28, 42, 30, 20, 51, 25, 44, 55,
47, 5, 32, 65, 38, 14, 22, 11, 58, 18, 53, 63, 9, 61, 27,
29, 50, 43, 46, 31, 37, 21, 57, 52, 8, 26, 49, 45, 36, 56,
7, 48, 35, 6, 34, 33, 0
};
return Mod67Position[(-Value & Value) % 67];
#endif
}
/// CountTrailingOnes_64 - This function performs the operation
/// of counting the number of ones from the least significant bit to the first
/// zero bit (64 bit edition.)
/// Returns 64 if the word is all ones.
inline unsigned CountTrailingOnes_64(uint64_t Value) {
return CountTrailingZeros_64(~Value);
}
/// CountPopulation_32 - this function counts the number of set bits in a value.
/// Ex. CountPopulation(0xF000F000) = 8
/// Returns 0 if the word is zero.
inline unsigned CountPopulation_32(uint32_t Value) {
#if __GNUC__ >= 4
return __builtin_popcount(Value);
#else
uint32_t v = Value - ((Value >> 1) & 0x55555555);
v = (v & 0x33333333) + ((v >> 2) & 0x33333333);
return ((v + (v >> 4) & 0xF0F0F0F) * 0x1010101) >> 24;
#endif
}
/// CountPopulation_64 - this function counts the number of set bits in a value,
/// (64 bit edition.)
inline unsigned CountPopulation_64(uint64_t Value) {
#if __GNUC__ >= 4
return __builtin_popcountll(Value);
#else
uint64_t v = Value - ((Value >> 1) & 0x5555555555555555ULL);
v = (v & 0x3333333333333333ULL) + ((v >> 2) & 0x3333333333333333ULL);
v = (v + (v >> 4)) & 0x0F0F0F0F0F0F0F0FULL;
return unsigned((uint64_t)(v * 0x0101010101010101ULL) >> 56);
#endif
}
/// Log2_32 - This function returns the floor log base 2 of the specified value,
/// -1 if the value is zero. (32 bit edition.)
/// Ex. Log2_32(32) == 5, Log2_32(1) == 0, Log2_32(0) == -1, Log2_32(6) == 2
inline unsigned Log2_32(uint32_t Value) {
return 31 - CountLeadingZeros_32(Value);
}
/// Log2_64 - This function returns the floor log base 2 of the specified value,
/// -1 if the value is zero. (64 bit edition.)
inline unsigned Log2_64(uint64_t Value) {
return 63 - CountLeadingZeros_64(Value);
}
/// Log2_32_Ceil - This function returns the ceil log base 2 of the specified
/// value, 32 if the value is zero. (32 bit edition).
/// Ex. Log2_32_Ceil(32) == 5, Log2_32_Ceil(1) == 0, Log2_32_Ceil(6) == 3
inline unsigned Log2_32_Ceil(uint32_t Value) {
return 32-CountLeadingZeros_32(Value-1);
}
/// Log2_64_Ceil - This function returns the ceil log base 2 of the specified
/// value, 64 if the value is zero. (64 bit edition.)
inline unsigned Log2_64_Ceil(uint64_t Value) {
return 64-CountLeadingZeros_64(Value-1);
}
/// GreatestCommonDivisor64 - Return the greatest common divisor of the two
/// values using Euclid's algorithm.
inline uint64_t GreatestCommonDivisor64(uint64_t A, uint64_t B) {
while (B) {
uint64_t T = B;
B = A % B;
A = T;
}
return A;
}
/// BitsToDouble - This function takes a 64-bit integer and returns the bit
/// equivalent double.
inline double BitsToDouble(uint64_t Bits) {
union {
uint64_t L;
double D;
} T;
T.L = Bits;
return T.D;
}
/// BitsToFloat - This function takes a 32-bit integer and returns the bit
/// equivalent float.
inline float BitsToFloat(uint32_t Bits) {
union {
uint32_t I;
float F;
} T;
T.I = Bits;
return T.F;
}
/// DoubleToBits - This function takes a double and returns the bit
/// equivalent 64-bit integer. Note that copying doubles around
/// changes the bits of NaNs on some hosts, notably x86, so this
/// routine cannot be used if these bits are needed.
inline uint64_t DoubleToBits(double Double) {
union {
uint64_t L;
double D;
} T;
T.D = Double;
return T.L;
}
/// FloatToBits - This function takes a float and returns the bit
/// equivalent 32-bit integer. Note that copying floats around
/// changes the bits of NaNs on some hosts, notably x86, so this
/// routine cannot be used if these bits are needed.
inline uint32_t FloatToBits(float Float) {
union {
uint32_t I;
float F;
} T;
T.F = Float;
return T.I;
}
/// Platform-independent wrappers for the C99 isnan() function.
int IsNAN(float f);
int IsNAN(double d);
/// Platform-independent wrappers for the C99 isinf() function.
int IsInf(float f);
int IsInf(double d);
/// MinAlign - A and B are either alignments or offsets. Return the minimum
/// alignment that may be assumed after adding the two together.
static inline uint64_t MinAlign(uint64_t A, uint64_t B) {
// The largest power of 2 that divides both A and B.
return (A | B) & -(A | B);
}
/// NextPowerOf2 - Returns the next power of two (in 64-bits)
/// that is strictly greater than A. Returns zero on overflow.
static inline uint64_t NextPowerOf2(uint64_t A) {
A |= (A >> 1);
A |= (A >> 2);
A |= (A >> 4);
A |= (A >> 8);
A |= (A >> 16);
A |= (A >> 32);
return A + 1;
}
/// RoundUpToAlignment - Returns the next integer (mod 2**64) that is
/// greater than or equal to \arg Value and is a multiple of \arg
/// Align. Align must be non-zero.
///
/// Examples:
/// RoundUpToAlignment(5, 8) = 8
/// RoundUpToAlignment(17, 8) = 24
/// RoundUpToAlignment(~0LL, 8) = 0
inline uint64_t RoundUpToAlignment(uint64_t Value, uint64_t Align) {
return ((Value + Align - 1) / Align) * Align;
}
/// OffsetToAlignment - Return the offset to the next integer (mod 2**64) that
/// is greater than or equal to \arg Value and is a multiple of \arg
/// Align. Align must be non-zero.
inline uint64_t OffsetToAlignment(uint64_t Value, uint64_t Align) {
return RoundUpToAlignment(Value, Align) - Value;
}
/// abs64 - absolute value of a 64-bit int. Not all environments support
/// "abs" on whatever their name for the 64-bit int type is. The absolute
/// value of the largest negative number is undefined, as with "abs".
inline int64_t abs64(int64_t x) {
return (x < 0) ? -x : x;
}
/// SignExtend32 - Sign extend B-bit number x to 32-bit int.
/// Usage int32_t r = SignExtend32<5>(x);
template <unsigned B> inline int32_t SignExtend32(uint32_t x) {
return int32_t(x << (32 - B)) >> (32 - B);
}
/// SignExtend64 - Sign extend B-bit number x to 64-bit int.
/// Usage int64_t r = SignExtend64<5>(x);
template <unsigned B> inline int64_t SignExtend64(uint64_t x) {
return int64_t(x << (64 - B)) >> (64 - B);
}
} // End llvm namespace
#endif
| 0 |
rapidsai_public_repos/code-share/maxflow/galois/include/llvm
|
rapidsai_public_repos/code-share/maxflow/galois/include/llvm/Support/CommandLine.h
|
//===- llvm/Support/CommandLine.h - Command line handler --------*- C++ -*-===//
//
// The LLVM Compiler Infrastructure
//
// This file is distributed under the University of Illinois Open Source
// License. See LICENSE.TXT for details.
//
//===----------------------------------------------------------------------===//
//
// This class implements a command line argument processor that is useful when
// creating a tool. It provides a simple, minimalistic interface that is easily
// extensible and supports nonlocal (library) command line options.
//
// Note that rather than trying to figure out what this code does, you should
// read the library documentation located in docs/CommandLine.html or looks at
// the many example usages in tools/*/*.cpp
//
//===----------------------------------------------------------------------===//
#ifndef LLVM_SUPPORT_COMMANDLINE_H
#define LLVM_SUPPORT_COMMANDLINE_H
#include "llvm/Support/type_traits.h"
#include "llvm/Support/Compiler.h"
#include "llvm/ADT/SmallVector.h"
#include "llvm/ADT/Twine.h"
#include <cassert>
#include <climits>
#include <cstdarg>
#include <utility>
#include <vector>
namespace llvm {
/// cl Namespace - This namespace contains all of the command line option
/// processing machinery. It is intentionally a short name to make qualified
/// usage concise.
namespace cl {
//===----------------------------------------------------------------------===//
// ParseCommandLineOptions - Command line option processing entry point.
//
void ParseCommandLineOptions(int argc, char **argv,
const char *Overview = 0);
//===----------------------------------------------------------------------===//
// ParseEnvironmentOptions - Environment variable option processing alternate
// entry point.
//
void ParseEnvironmentOptions(const char *progName, const char *envvar,
const char *Overview = 0);
///===---------------------------------------------------------------------===//
/// SetVersionPrinter - Override the default (LLVM specific) version printer
/// used to print out the version when --version is given
/// on the command line. This allows other systems using the
/// CommandLine utilities to print their own version string.
void SetVersionPrinter(void (*func)());
///===---------------------------------------------------------------------===//
/// AddExtraVersionPrinter - Add an extra printer to use in addition to the
/// default one. This can be called multiple times,
/// and each time it adds a new function to the list
/// which will be called after the basic LLVM version
/// printing is complete. Each can then add additional
/// information specific to the tool.
void AddExtraVersionPrinter(void (*func)());
// PrintOptionValues - Print option values.
// With -print-options print the difference between option values and defaults.
// With -print-all-options print all option values.
// (Currently not perfect, but best-effort.)
void PrintOptionValues();
// MarkOptionsChanged - Internal helper function.
void MarkOptionsChanged();
//===----------------------------------------------------------------------===//
// Flags permitted to be passed to command line arguments
//
enum NumOccurrencesFlag { // Flags for the number of occurrences allowed
Optional = 0x01, // Zero or One occurrence
ZeroOrMore = 0x02, // Zero or more occurrences allowed
Required = 0x03, // One occurrence required
OneOrMore = 0x04, // One or more occurrences required
// ConsumeAfter - Indicates that this option is fed anything that follows the
// last positional argument required by the application (it is an error if
// there are zero positional arguments, and a ConsumeAfter option is used).
// Thus, for example, all arguments to LLI are processed until a filename is
// found. Once a filename is found, all of the succeeding arguments are
// passed, unprocessed, to the ConsumeAfter option.
//
ConsumeAfter = 0x05,
OccurrencesMask = 0x07
};
enum ValueExpected { // Is a value required for the option?
ValueOptional = 0x08, // The value can appear... or not
ValueRequired = 0x10, // The value is required to appear!
ValueDisallowed = 0x18, // A value may not be specified (for flags)
ValueMask = 0x18
};
enum OptionHidden { // Control whether -help shows this option
NotHidden = 0x20, // Option included in -help & -help-hidden
Hidden = 0x40, // -help doesn't, but -help-hidden does
ReallyHidden = 0x60, // Neither -help nor -help-hidden show this arg
HiddenMask = 0x60
};
// Formatting flags - This controls special features that the option might have
// that cause it to be parsed differently...
//
// Prefix - This option allows arguments that are otherwise unrecognized to be
// matched by options that are a prefix of the actual value. This is useful for
// cases like a linker, where options are typically of the form '-lfoo' or
// '-L../../include' where -l or -L are the actual flags. When prefix is
// enabled, and used, the value for the flag comes from the suffix of the
// argument.
//
// Grouping - With this option enabled, multiple letter options are allowed to
// bunch together with only a single hyphen for the whole group. This allows
// emulation of the behavior that ls uses for example: ls -la === ls -l -a
//
enum FormattingFlags {
NormalFormatting = 0x000, // Nothing special
Positional = 0x080, // Is a positional argument, no '-' required
Prefix = 0x100, // Can this option directly prefix its value?
Grouping = 0x180, // Can this option group with other options?
FormattingMask = 0x180 // Union of the above flags.
};
enum MiscFlags { // Miscellaneous flags to adjust argument
CommaSeparated = 0x200, // Should this cl::list split between commas?
PositionalEatsArgs = 0x400, // Should this positional cl::list eat -args?
Sink = 0x800, // Should this cl::list eat all unknown options?
MiscMask = 0xE00 // Union of the above flags.
};
//===----------------------------------------------------------------------===//
// Option Base class
//
class alias;
class Option {
friend class alias;
// handleOccurrences - Overriden by subclasses to handle the value passed into
// an argument. Should return true if there was an error processing the
// argument and the program should exit.
//
virtual bool handleOccurrence(unsigned pos, StringRef ArgName,
StringRef Arg) = 0;
virtual enum ValueExpected getValueExpectedFlagDefault() const {
return ValueOptional;
}
// Out of line virtual function to provide home for the class.
virtual void anchor();
int NumOccurrences; // The number of times specified
int Flags; // Flags for the argument
unsigned Position; // Position of last occurrence of the option
unsigned AdditionalVals;// Greater than 0 for multi-valued option.
Option *NextRegistered; // Singly linked list of registered options.
public:
const char *ArgStr; // The argument string itself (ex: "help", "o")
const char *HelpStr; // The descriptive text message for -help
const char *ValueStr; // String describing what the value of this option is
inline enum NumOccurrencesFlag getNumOccurrencesFlag() const {
return static_cast<enum NumOccurrencesFlag>(Flags & OccurrencesMask);
}
inline enum ValueExpected getValueExpectedFlag() const {
int VE = Flags & ValueMask;
return VE ? static_cast<enum ValueExpected>(VE)
: getValueExpectedFlagDefault();
}
inline enum OptionHidden getOptionHiddenFlag() const {
return static_cast<enum OptionHidden>(Flags & HiddenMask);
}
inline enum FormattingFlags getFormattingFlag() const {
return static_cast<enum FormattingFlags>(Flags & FormattingMask);
}
inline unsigned getMiscFlags() const {
return Flags & MiscMask;
}
inline unsigned getPosition() const { return Position; }
inline unsigned getNumAdditionalVals() const { return AdditionalVals; }
// hasArgStr - Return true if the argstr != ""
bool hasArgStr() const { return ArgStr[0] != 0; }
//-------------------------------------------------------------------------===
// Accessor functions set by OptionModifiers
//
void setArgStr(const char *S) { ArgStr = S; }
void setDescription(const char *S) { HelpStr = S; }
void setValueStr(const char *S) { ValueStr = S; }
void setFlag(unsigned Flag, unsigned FlagMask) {
Flags &= ~FlagMask;
Flags |= Flag;
}
void setNumOccurrencesFlag(enum NumOccurrencesFlag Val) {
setFlag(Val, OccurrencesMask);
}
void setValueExpectedFlag(enum ValueExpected Val) { setFlag(Val, ValueMask); }
void setHiddenFlag(enum OptionHidden Val) { setFlag(Val, HiddenMask); }
void setFormattingFlag(enum FormattingFlags V) { setFlag(V, FormattingMask); }
void setMiscFlag(enum MiscFlags M) { setFlag(M, M); }
void setPosition(unsigned pos) { Position = pos; }
protected:
explicit Option(unsigned DefaultFlags)
: NumOccurrences(0), Flags(DefaultFlags | NormalFormatting), Position(0),
AdditionalVals(0), NextRegistered(0),
ArgStr(""), HelpStr(""), ValueStr("") {
assert(getNumOccurrencesFlag() != 0 &&
getOptionHiddenFlag() != 0 && "Not all default flags specified!");
}
inline void setNumAdditionalVals(unsigned n) { AdditionalVals = n; }
public:
// addArgument - Register this argument with the commandline system.
//
void addArgument();
Option *getNextRegisteredOption() const { return NextRegistered; }
// Return the width of the option tag for printing...
virtual size_t getOptionWidth() const = 0;
// printOptionInfo - Print out information about this option. The
// to-be-maintained width is specified.
//
virtual void printOptionInfo(size_t GlobalWidth) const = 0;
virtual void printOptionValue(size_t GlobalWidth, bool Force) const = 0;
virtual void getExtraOptionNames(SmallVectorImpl<const char*> &) {}
// addOccurrence - Wrapper around handleOccurrence that enforces Flags.
//
bool addOccurrence(unsigned pos, StringRef ArgName,
StringRef Value, bool MultiArg = false);
// Prints option name followed by message. Always returns true.
bool error(const Twine &Message, StringRef ArgName = StringRef());
public:
inline int getNumOccurrences() const { return NumOccurrences; }
virtual ~Option() {}
};
//===----------------------------------------------------------------------===//
// Command line option modifiers that can be used to modify the behavior of
// command line option parsers...
//
// desc - Modifier to set the description shown in the -help output...
struct desc {
const char *Desc;
desc(const char *Str) : Desc(Str) {}
void apply(Option &O) const { O.setDescription(Desc); }
};
// value_desc - Modifier to set the value description shown in the -help
// output...
struct value_desc {
const char *Desc;
value_desc(const char *Str) : Desc(Str) {}
void apply(Option &O) const { O.setValueStr(Desc); }
};
// init - Specify a default (initial) value for the command line argument, if
// the default constructor for the argument type does not give you what you
// want. This is only valid on "opt" arguments, not on "list" arguments.
//
template<class Ty>
struct initializer {
const Ty &Init;
initializer(const Ty &Val) : Init(Val) {}
template<class Opt>
void apply(Opt &O) const { O.setInitialValue(Init); }
};
template<class Ty>
initializer<Ty> init(const Ty &Val) {
return initializer<Ty>(Val);
}
// location - Allow the user to specify which external variable they want to
// store the results of the command line argument processing into, if they don't
// want to store it in the option itself.
//
template<class Ty>
struct LocationClass {
Ty &Loc;
LocationClass(Ty &L) : Loc(L) {}
template<class Opt>
void apply(Opt &O) const { O.setLocation(O, Loc); }
};
template<class Ty>
LocationClass<Ty> location(Ty &L) { return LocationClass<Ty>(L); }
//===----------------------------------------------------------------------===//
// OptionValue class
// Support value comparison outside the template.
struct GenericOptionValue {
virtual ~GenericOptionValue() {}
virtual bool compare(const GenericOptionValue &V) const = 0;
};
template<class DataType> struct OptionValue;
// The default value safely does nothing. Option value printing is only
// best-effort.
template<class DataType, bool isClass>
struct OptionValueBase : public GenericOptionValue {
// Temporary storage for argument passing.
typedef OptionValue<DataType> WrapperType;
bool hasValue() const { return false; }
const DataType &getValue() const { assert(false && "no default value"); }
// Some options may take their value from a different data type.
template<class DT>
void setValue(const DT& /*V*/) {}
bool compare(const DataType &/*V*/) const { return false; }
virtual bool compare(const GenericOptionValue& /*V*/) const { return false; }
};
// Simple copy of the option value.
template<class DataType>
class OptionValueCopy : public GenericOptionValue {
DataType Value;
bool Valid;
public:
OptionValueCopy() : Valid(false) {}
bool hasValue() const { return Valid; }
const DataType &getValue() const {
assert(Valid && "invalid option value");
return Value;
}
void setValue(const DataType &V) { Valid = true; Value = V; }
bool compare(const DataType &V) const {
return Valid && (Value != V);
}
virtual bool compare(const GenericOptionValue &V) const {
const OptionValueCopy<DataType> &VC =
static_cast< const OptionValueCopy<DataType>& >(V);
if (!VC.hasValue()) return false;
return compare(VC.getValue());
}
};
// Non-class option values.
template<class DataType>
struct OptionValueBase<DataType, false> : OptionValueCopy<DataType> {
typedef DataType WrapperType;
};
// Top-level option class.
template<class DataType>
struct OptionValue : OptionValueBase<DataType, is_class<DataType>::value> {
OptionValue() {}
OptionValue(const DataType& V) {
this->setValue(V);
}
// Some options may take their value from a different data type.
template<class DT>
OptionValue<DataType> &operator=(const DT& V) {
this->setValue(V);
return *this;
}
};
// Other safe-to-copy-by-value common option types.
enum boolOrDefault { BOU_UNSET, BOU_TRUE, BOU_FALSE };
template<>
struct OptionValue<cl::boolOrDefault> : OptionValueCopy<cl::boolOrDefault> {
typedef cl::boolOrDefault WrapperType;
OptionValue() {}
OptionValue(const cl::boolOrDefault& V) {
this->setValue(V);
}
OptionValue<cl::boolOrDefault> &operator=(const cl::boolOrDefault& V) {
setValue(V);
return *this;
}
};
template<>
struct OptionValue<std::string> : OptionValueCopy<std::string> {
typedef StringRef WrapperType;
OptionValue() {}
OptionValue(const std::string& V) {
this->setValue(V);
}
OptionValue<std::string> &operator=(const std::string& V) {
setValue(V);
return *this;
}
};
//===----------------------------------------------------------------------===//
// Enum valued command line option
//
#define clEnumVal(ENUMVAL, DESC) #ENUMVAL, int(ENUMVAL), DESC
#define clEnumValN(ENUMVAL, FLAGNAME, DESC) FLAGNAME, int(ENUMVAL), DESC
#define clEnumValEnd (reinterpret_cast<void*>(0))
// values - For custom data types, allow specifying a group of values together
// as the values that go into the mapping that the option handler uses. Note
// that the values list must always have a 0 at the end of the list to indicate
// that the list has ended.
//
template<class DataType>
class ValuesClass {
// Use a vector instead of a map, because the lists should be short,
// the overhead is less, and most importantly, it keeps them in the order
// inserted so we can print our option out nicely.
SmallVector<std::pair<const char *, std::pair<int, const char *> >,4> Values;
void processValues(va_list Vals);
public:
ValuesClass(const char *EnumName, DataType Val, const char *Desc,
va_list ValueArgs) {
// Insert the first value, which is required.
Values.push_back(std::make_pair(EnumName, std::make_pair(Val, Desc)));
// Process the varargs portion of the values...
while (const char *enumName = va_arg(ValueArgs, const char *)) {
DataType EnumVal = static_cast<DataType>(va_arg(ValueArgs, int));
const char *EnumDesc = va_arg(ValueArgs, const char *);
Values.push_back(std::make_pair(enumName, // Add value to value map
std::make_pair(EnumVal, EnumDesc)));
}
}
template<class Opt>
void apply(Opt &O) const {
for (unsigned i = 0, e = static_cast<unsigned>(Values.size());
i != e; ++i)
O.getParser().addLiteralOption(Values[i].first, Values[i].second.first,
Values[i].second.second);
}
};
template<class DataType>
ValuesClass<DataType> END_WITH_NULL values(const char *Arg, DataType Val,
const char *Desc, ...) {
va_list ValueArgs;
va_start(ValueArgs, Desc);
ValuesClass<DataType> Vals(Arg, Val, Desc, ValueArgs);
va_end(ValueArgs);
return Vals;
}
//===----------------------------------------------------------------------===//
// parser class - Parameterizable parser for different data types. By default,
// known data types (string, int, bool) have specialized parsers, that do what
// you would expect. The default parser, used for data types that are not
// built-in, uses a mapping table to map specific options to values, which is
// used, among other things, to handle enum types.
//--------------------------------------------------
// generic_parser_base - This class holds all the non-generic code that we do
// not need replicated for every instance of the generic parser. This also
// allows us to put stuff into CommandLine.cpp
//
class generic_parser_base {
protected:
class GenericOptionInfo {
public:
GenericOptionInfo(const char *name, const char *helpStr) :
Name(name), HelpStr(helpStr) {}
const char *Name;
const char *HelpStr;
};
public:
virtual ~generic_parser_base() {} // Base class should have virtual-dtor
// getNumOptions - Virtual function implemented by generic subclass to
// indicate how many entries are in Values.
//
virtual unsigned getNumOptions() const = 0;
// getOption - Return option name N.
virtual const char *getOption(unsigned N) const = 0;
// getDescription - Return description N
virtual const char *getDescription(unsigned N) const = 0;
// Return the width of the option tag for printing...
virtual size_t getOptionWidth(const Option &O) const;
virtual const GenericOptionValue &getOptionValue(unsigned N) const = 0;
// printOptionInfo - Print out information about this option. The
// to-be-maintained width is specified.
//
virtual void printOptionInfo(const Option &O, size_t GlobalWidth) const;
void printGenericOptionDiff(const Option &O, const GenericOptionValue &V,
const GenericOptionValue &Default,
size_t GlobalWidth) const;
// printOptionDiff - print the value of an option and it's default.
//
// Template definition ensures that the option and default have the same
// DataType (via the same AnyOptionValue).
template<class AnyOptionValue>
void printOptionDiff(const Option &O, const AnyOptionValue &V,
const AnyOptionValue &Default,
size_t GlobalWidth) const {
printGenericOptionDiff(O, V, Default, GlobalWidth);
}
void initialize(Option &O) {
// All of the modifiers for the option have been processed by now, so the
// argstr field should be stable, copy it down now.
//
hasArgStr = O.hasArgStr();
}
void getExtraOptionNames(SmallVectorImpl<const char*> &OptionNames) {
// If there has been no argstr specified, that means that we need to add an
// argument for every possible option. This ensures that our options are
// vectored to us.
if (!hasArgStr)
for (unsigned i = 0, e = getNumOptions(); i != e; ++i)
OptionNames.push_back(getOption(i));
}
enum ValueExpected getValueExpectedFlagDefault() const {
// If there is an ArgStr specified, then we are of the form:
//
// -opt=O2 or -opt O2 or -optO2
//
// In which case, the value is required. Otherwise if an arg str has not
// been specified, we are of the form:
//
// -O2 or O2 or -la (where -l and -a are separate options)
//
// If this is the case, we cannot allow a value.
//
if (hasArgStr)
return ValueRequired;
else
return ValueDisallowed;
}
// findOption - Return the option number corresponding to the specified
// argument string. If the option is not found, getNumOptions() is returned.
//
unsigned findOption(const char *Name);
protected:
bool hasArgStr;
};
// Default parser implementation - This implementation depends on having a
// mapping of recognized options to values of some sort. In addition to this,
// each entry in the mapping also tracks a help message that is printed with the
// command line option for -help. Because this is a simple mapping parser, the
// data type can be any unsupported type.
//
template <class DataType>
class parser : public generic_parser_base {
protected:
class OptionInfo : public GenericOptionInfo {
public:
OptionInfo(const char *name, DataType v, const char *helpStr) :
GenericOptionInfo(name, helpStr), V(v) {}
OptionValue<DataType> V;
};
SmallVector<OptionInfo, 8> Values;
public:
typedef DataType parser_data_type;
// Implement virtual functions needed by generic_parser_base
unsigned getNumOptions() const { return unsigned(Values.size()); }
const char *getOption(unsigned N) const { return Values[N].Name; }
const char *getDescription(unsigned N) const {
return Values[N].HelpStr;
}
// getOptionValue - Return the value of option name N.
virtual const GenericOptionValue &getOptionValue(unsigned N) const {
return Values[N].V;
}
// parse - Return true on error.
bool parse(Option &O, StringRef ArgName, StringRef Arg, DataType &V) {
StringRef ArgVal;
if (hasArgStr)
ArgVal = Arg;
else
ArgVal = ArgName;
for (unsigned i = 0, e = static_cast<unsigned>(Values.size());
i != e; ++i)
if (Values[i].Name == ArgVal) {
V = Values[i].V.getValue();
return false;
}
return O.error("Cannot find option named '" + ArgVal + "'!");
}
/// addLiteralOption - Add an entry to the mapping table.
///
template <class DT>
void addLiteralOption(const char *Name, const DT &V, const char *HelpStr) {
assert(findOption(Name) == Values.size() && "Option already exists!");
OptionInfo X(Name, static_cast<DataType>(V), HelpStr);
Values.push_back(X);
MarkOptionsChanged();
}
/// removeLiteralOption - Remove the specified option.
///
void removeLiteralOption(const char *Name) {
unsigned N = findOption(Name);
assert(N != Values.size() && "Option not found!");
Values.erase(Values.begin()+N);
}
};
//--------------------------------------------------
// basic_parser - Super class of parsers to provide boilerplate code
//
class basic_parser_impl { // non-template implementation of basic_parser<t>
public:
virtual ~basic_parser_impl() {}
enum ValueExpected getValueExpectedFlagDefault() const {
return ValueRequired;
}
void getExtraOptionNames(SmallVectorImpl<const char*> &) {}
void initialize(Option &) {}
// Return the width of the option tag for printing...
size_t getOptionWidth(const Option &O) const;
// printOptionInfo - Print out information about this option. The
// to-be-maintained width is specified.
//
void printOptionInfo(const Option &O, size_t GlobalWidth) const;
// printOptionNoValue - Print a placeholder for options that don't yet support
// printOptionDiff().
void printOptionNoValue(const Option &O, size_t GlobalWidth) const;
// getValueName - Overload in subclass to provide a better default value.
virtual const char *getValueName() const { return "value"; }
// An out-of-line virtual method to provide a 'home' for this class.
virtual void anchor();
protected:
// A helper for basic_parser::printOptionDiff.
void printOptionName(const Option &O, size_t GlobalWidth) const;
};
// basic_parser - The real basic parser is just a template wrapper that provides
// a typedef for the provided data type.
//
template<class DataType>
class basic_parser : public basic_parser_impl {
public:
typedef DataType parser_data_type;
typedef OptionValue<DataType> OptVal;
};
//--------------------------------------------------
// parser<bool>
//
template<>
class parser<bool> : public basic_parser<bool> {
const char *ArgStr;
public:
// parse - Return true on error.
bool parse(Option &O, StringRef ArgName, StringRef Arg, bool &Val);
template <class Opt>
void initialize(Opt &O) {
ArgStr = O.ArgStr;
}
enum ValueExpected getValueExpectedFlagDefault() const {
return ValueOptional;
}
// getValueName - Do not print =<value> at all.
virtual const char *getValueName() const { return 0; }
void printOptionDiff(const Option &O, bool V, OptVal Default,
size_t GlobalWidth) const;
// An out-of-line virtual method to provide a 'home' for this class.
virtual void anchor();
};
EXTERN_TEMPLATE_INSTANTIATION(class basic_parser<bool>);
//--------------------------------------------------
// parser<boolOrDefault>
template<>
class parser<boolOrDefault> : public basic_parser<boolOrDefault> {
public:
// parse - Return true on error.
bool parse(Option &O, StringRef ArgName, StringRef Arg, boolOrDefault &Val);
enum ValueExpected getValueExpectedFlagDefault() const {
return ValueOptional;
}
// getValueName - Do not print =<value> at all.
virtual const char *getValueName() const { return 0; }
void printOptionDiff(const Option &O, boolOrDefault V, OptVal Default,
size_t GlobalWidth) const;
// An out-of-line virtual method to provide a 'home' for this class.
virtual void anchor();
};
EXTERN_TEMPLATE_INSTANTIATION(class basic_parser<boolOrDefault>);
//--------------------------------------------------
// parser<int>
//
template<>
class parser<int> : public basic_parser<int> {
public:
// parse - Return true on error.
bool parse(Option &O, StringRef ArgName, StringRef Arg, int &Val);
// getValueName - Overload in subclass to provide a better default value.
virtual const char *getValueName() const { return "int"; }
void printOptionDiff(const Option &O, int V, OptVal Default,
size_t GlobalWidth) const;
// An out-of-line virtual method to provide a 'home' for this class.
virtual void anchor();
};
EXTERN_TEMPLATE_INSTANTIATION(class basic_parser<int>);
//--------------------------------------------------
// parser<unsigned>
//
template<>
class parser<unsigned> : public basic_parser<unsigned> {
public:
// parse - Return true on error.
bool parse(Option &O, StringRef ArgName, StringRef Arg, unsigned &Val);
// getValueName - Overload in subclass to provide a better default value.
virtual const char *getValueName() const { return "uint"; }
void printOptionDiff(const Option &O, unsigned V, OptVal Default,
size_t GlobalWidth) const;
// An out-of-line virtual method to provide a 'home' for this class.
virtual void anchor();
};
EXTERN_TEMPLATE_INSTANTIATION(class basic_parser<unsigned>);
//--------------------------------------------------
// parser<unsigned long long>
//
template<>
class parser<unsigned long long> : public basic_parser<unsigned long long> {
public:
// parse - Return true on error.
bool parse(Option &O, StringRef ArgName, StringRef Arg,
unsigned long long &Val);
// getValueName - Overload in subclass to provide a better default value.
virtual const char *getValueName() const { return "uint"; }
void printOptionDiff(const Option &O, unsigned long long V, OptVal Default,
size_t GlobalWidth) const;
// An out-of-line virtual method to provide a 'home' for this class.
virtual void anchor();
};
EXTERN_TEMPLATE_INSTANTIATION(class basic_parser<unsigned long long>);
//--------------------------------------------------
// parser<double>
//
template<>
class parser<double> : public basic_parser<double> {
public:
// parse - Return true on error.
bool parse(Option &O, StringRef ArgName, StringRef Arg, double &Val);
// getValueName - Overload in subclass to provide a better default value.
virtual const char *getValueName() const { return "number"; }
void printOptionDiff(const Option &O, double V, OptVal Default,
size_t GlobalWidth) const;
// An out-of-line virtual method to provide a 'home' for this class.
virtual void anchor();
};
EXTERN_TEMPLATE_INSTANTIATION(class basic_parser<double>);
//--------------------------------------------------
// parser<float>
//
template<>
class parser<float> : public basic_parser<float> {
public:
// parse - Return true on error.
bool parse(Option &O, StringRef ArgName, StringRef Arg, float &Val);
// getValueName - Overload in subclass to provide a better default value.
virtual const char *getValueName() const { return "number"; }
void printOptionDiff(const Option &O, float V, OptVal Default,
size_t GlobalWidth) const;
// An out-of-line virtual method to provide a 'home' for this class.
virtual void anchor();
};
EXTERN_TEMPLATE_INSTANTIATION(class basic_parser<float>);
//--------------------------------------------------
// parser<std::string>
//
template<>
class parser<std::string> : public basic_parser<std::string> {
public:
// parse - Return true on error.
bool parse(Option &, StringRef, StringRef Arg, std::string &Value) {
Value = Arg.str();
return false;
}
// getValueName - Overload in subclass to provide a better default value.
virtual const char *getValueName() const { return "string"; }
void printOptionDiff(const Option &O, StringRef V, OptVal Default,
size_t GlobalWidth) const;
// An out-of-line virtual method to provide a 'home' for this class.
virtual void anchor();
};
EXTERN_TEMPLATE_INSTANTIATION(class basic_parser<std::string>);
//--------------------------------------------------
// parser<char>
//
template<>
class parser<char> : public basic_parser<char> {
public:
// parse - Return true on error.
bool parse(Option &, StringRef, StringRef Arg, char &Value) {
Value = Arg[0];
return false;
}
// getValueName - Overload in subclass to provide a better default value.
virtual const char *getValueName() const { return "char"; }
void printOptionDiff(const Option &O, char V, OptVal Default,
size_t GlobalWidth) const;
// An out-of-line virtual method to provide a 'home' for this class.
virtual void anchor();
};
EXTERN_TEMPLATE_INSTANTIATION(class basic_parser<char>);
//--------------------------------------------------
// PrintOptionDiff
//
// This collection of wrappers is the intermediary between class opt and class
// parser to handle all the template nastiness.
// This overloaded function is selected by the generic parser.
template<class ParserClass, class DT>
void printOptionDiff(const Option &O, const generic_parser_base &P, const DT &V,
const OptionValue<DT> &Default, size_t GlobalWidth) {
OptionValue<DT> OV = V;
P.printOptionDiff(O, OV, Default, GlobalWidth);
}
// This is instantiated for basic parsers when the parsed value has a different
// type than the option value. e.g. HelpPrinter.
template<class ParserDT, class ValDT>
struct OptionDiffPrinter {
void print(const Option &O, const parser<ParserDT> P, const ValDT &/*V*/,
const OptionValue<ValDT> &/*Default*/, size_t GlobalWidth) {
P.printOptionNoValue(O, GlobalWidth);
}
};
// This is instantiated for basic parsers when the parsed value has the same
// type as the option value.
template<class DT>
struct OptionDiffPrinter<DT, DT> {
void print(const Option &O, const parser<DT> P, const DT &V,
const OptionValue<DT> &Default, size_t GlobalWidth) {
P.printOptionDiff(O, V, Default, GlobalWidth);
}
};
// This overloaded function is selected by the basic parser, which may parse a
// different type than the option type.
template<class ParserClass, class ValDT>
void printOptionDiff(
const Option &O,
const basic_parser<typename ParserClass::parser_data_type> &P,
const ValDT &V, const OptionValue<ValDT> &Default,
size_t GlobalWidth) {
OptionDiffPrinter<typename ParserClass::parser_data_type, ValDT> printer;
printer.print(O, static_cast<const ParserClass&>(P), V, Default,
GlobalWidth);
}
//===----------------------------------------------------------------------===//
// applicator class - This class is used because we must use partial
// specialization to handle literal string arguments specially (const char* does
// not correctly respond to the apply method). Because the syntax to use this
// is a pain, we have the 'apply' method below to handle the nastiness...
//
template<class Mod> struct applicator {
template<class Opt>
static void opt(const Mod &M, Opt &O) { M.apply(O); }
};
// Handle const char* as a special case...
template<unsigned n> struct applicator<char[n]> {
template<class Opt>
static void opt(const char *Str, Opt &O) { O.setArgStr(Str); }
};
template<unsigned n> struct applicator<const char[n]> {
template<class Opt>
static void opt(const char *Str, Opt &O) { O.setArgStr(Str); }
};
template<> struct applicator<const char*> {
template<class Opt>
static void opt(const char *Str, Opt &O) { O.setArgStr(Str); }
};
template<> struct applicator<NumOccurrencesFlag> {
static void opt(NumOccurrencesFlag NO, Option &O) {
O.setNumOccurrencesFlag(NO);
}
};
template<> struct applicator<ValueExpected> {
static void opt(ValueExpected VE, Option &O) { O.setValueExpectedFlag(VE); }
};
template<> struct applicator<OptionHidden> {
static void opt(OptionHidden OH, Option &O) { O.setHiddenFlag(OH); }
};
template<> struct applicator<FormattingFlags> {
static void opt(FormattingFlags FF, Option &O) { O.setFormattingFlag(FF); }
};
template<> struct applicator<MiscFlags> {
static void opt(MiscFlags MF, Option &O) { O.setMiscFlag(MF); }
};
// apply method - Apply a modifier to an option in a type safe way.
template<class Mod, class Opt>
void apply(const Mod &M, Opt *O) {
applicator<Mod>::opt(M, *O);
}
//===----------------------------------------------------------------------===//
// opt_storage class
// Default storage class definition: external storage. This implementation
// assumes the user will specify a variable to store the data into with the
// cl::location(x) modifier.
//
template<class DataType, bool ExternalStorage, bool isClass>
class opt_storage {
DataType *Location; // Where to store the object...
OptionValue<DataType> Default;
void check() const {
assert(Location != 0 && "cl::location(...) not specified for a command "
"line option with external storage, "
"or cl::init specified before cl::location()!!");
}
public:
opt_storage() : Location(0) {}
bool setLocation(Option &O, DataType &L) {
if (Location)
return O.error("cl::location(x) specified more than once!");
Location = &L;
Default = L;
return false;
}
template<class T>
void setValue(const T &V, bool initial = false) {
check();
*Location = V;
if (initial)
Default = V;
}
DataType &getValue() { check(); return *Location; }
const DataType &getValue() const { check(); return *Location; }
operator DataType() const { return this->getValue(); }
const OptionValue<DataType> &getDefault() const { return Default; }
};
// Define how to hold a class type object, such as a string. Since we can
// inherit from a class, we do so. This makes us exactly compatible with the
// object in all cases that it is used.
//
template<class DataType>
class opt_storage<DataType,false,true> : public DataType {
public:
OptionValue<DataType> Default;
template<class T>
void setValue(const T &V, bool initial = false) {
DataType::operator=(V);
if (initial)
Default = V;
}
DataType &getValue() { return *this; }
const DataType &getValue() const { return *this; }
const OptionValue<DataType> &getDefault() const { return Default; }
};
// Define a partial specialization to handle things we cannot inherit from. In
// this case, we store an instance through containment, and overload operators
// to get at the value.
//
template<class DataType>
class opt_storage<DataType, false, false> {
public:
DataType Value;
OptionValue<DataType> Default;
// Make sure we initialize the value with the default constructor for the
// type.
opt_storage() : Value(DataType()) {}
template<class T>
void setValue(const T &V, bool initial = false) {
Value = V;
if (initial)
Default = V;
}
DataType &getValue() { return Value; }
DataType getValue() const { return Value; }
const OptionValue<DataType> &getDefault() const { return Default; }
operator DataType() const { return getValue(); }
// If the datatype is a pointer, support -> on it.
DataType operator->() const { return Value; }
};
//===----------------------------------------------------------------------===//
// opt - A scalar command line option.
//
template <class DataType, bool ExternalStorage = false,
class ParserClass = parser<DataType> >
class opt : public Option,
public opt_storage<DataType, ExternalStorage,
is_class<DataType>::value> {
ParserClass Parser;
virtual bool handleOccurrence(unsigned pos, StringRef ArgName,
StringRef Arg) {
typename ParserClass::parser_data_type Val =
typename ParserClass::parser_data_type();
if (Parser.parse(*this, ArgName, Arg, Val))
return true; // Parse error!
this->setValue(Val);
this->setPosition(pos);
return false;
}
virtual enum ValueExpected getValueExpectedFlagDefault() const {
return Parser.getValueExpectedFlagDefault();
}
virtual void getExtraOptionNames(SmallVectorImpl<const char*> &OptionNames) {
return Parser.getExtraOptionNames(OptionNames);
}
// Forward printing stuff to the parser...
virtual size_t getOptionWidth() const {return Parser.getOptionWidth(*this);}
virtual void printOptionInfo(size_t GlobalWidth) const {
Parser.printOptionInfo(*this, GlobalWidth);
}
virtual void printOptionValue(size_t GlobalWidth, bool Force) const {
if (Force || this->getDefault().compare(this->getValue())) {
cl::printOptionDiff<ParserClass>(
*this, Parser, this->getValue(), this->getDefault(), GlobalWidth);
}
}
void done() {
addArgument();
Parser.initialize(*this);
}
public:
// setInitialValue - Used by the cl::init modifier...
void setInitialValue(const DataType &V) { this->setValue(V, true); }
ParserClass &getParser() { return Parser; }
template<class T>
DataType &operator=(const T &Val) {
this->setValue(Val);
return this->getValue();
}
// One option...
template<class M0t>
explicit opt(const M0t &M0) : Option(Optional | NotHidden) {
apply(M0, this);
done();
}
// Two options...
template<class M0t, class M1t>
opt(const M0t &M0, const M1t &M1) : Option(Optional | NotHidden) {
apply(M0, this); apply(M1, this);
done();
}
// Three options...
template<class M0t, class M1t, class M2t>
opt(const M0t &M0, const M1t &M1,
const M2t &M2) : Option(Optional | NotHidden) {
apply(M0, this); apply(M1, this); apply(M2, this);
done();
}
// Four options...
template<class M0t, class M1t, class M2t, class M3t>
opt(const M0t &M0, const M1t &M1, const M2t &M2,
const M3t &M3) : Option(Optional | NotHidden) {
apply(M0, this); apply(M1, this); apply(M2, this); apply(M3, this);
done();
}
// Five options...
template<class M0t, class M1t, class M2t, class M3t, class M4t>
opt(const M0t &M0, const M1t &M1, const M2t &M2, const M3t &M3,
const M4t &M4) : Option(Optional | NotHidden) {
apply(M0, this); apply(M1, this); apply(M2, this); apply(M3, this);
apply(M4, this);
done();
}
// Six options...
template<class M0t, class M1t, class M2t, class M3t,
class M4t, class M5t>
opt(const M0t &M0, const M1t &M1, const M2t &M2, const M3t &M3,
const M4t &M4, const M5t &M5) : Option(Optional | NotHidden) {
apply(M0, this); apply(M1, this); apply(M2, this); apply(M3, this);
apply(M4, this); apply(M5, this);
done();
}
// Seven options...
template<class M0t, class M1t, class M2t, class M3t,
class M4t, class M5t, class M6t>
opt(const M0t &M0, const M1t &M1, const M2t &M2, const M3t &M3,
const M4t &M4, const M5t &M5,
const M6t &M6) : Option(Optional | NotHidden) {
apply(M0, this); apply(M1, this); apply(M2, this); apply(M3, this);
apply(M4, this); apply(M5, this); apply(M6, this);
done();
}
// Eight options...
template<class M0t, class M1t, class M2t, class M3t,
class M4t, class M5t, class M6t, class M7t>
opt(const M0t &M0, const M1t &M1, const M2t &M2, const M3t &M3,
const M4t &M4, const M5t &M5, const M6t &M6,
const M7t &M7) : Option(Optional | NotHidden) {
apply(M0, this); apply(M1, this); apply(M2, this); apply(M3, this);
apply(M4, this); apply(M5, this); apply(M6, this); apply(M7, this);
done();
}
};
EXTERN_TEMPLATE_INSTANTIATION(class opt<unsigned>);
EXTERN_TEMPLATE_INSTANTIATION(class opt<int>);
EXTERN_TEMPLATE_INSTANTIATION(class opt<std::string>);
EXTERN_TEMPLATE_INSTANTIATION(class opt<char>);
EXTERN_TEMPLATE_INSTANTIATION(class opt<bool>);
//===----------------------------------------------------------------------===//
// list_storage class
// Default storage class definition: external storage. This implementation
// assumes the user will specify a variable to store the data into with the
// cl::location(x) modifier.
//
template<class DataType, class StorageClass>
class list_storage {
StorageClass *Location; // Where to store the object...
public:
list_storage() : Location(0) {}
bool setLocation(Option &O, StorageClass &L) {
if (Location)
return O.error("cl::location(x) specified more than once!");
Location = &L;
return false;
}
template<class T>
void addValue(const T &V) {
assert(Location != 0 && "cl::location(...) not specified for a command "
"line option with external storage!");
Location->push_back(V);
}
};
// Define how to hold a class type object, such as a string. Since we can
// inherit from a class, we do so. This makes us exactly compatible with the
// object in all cases that it is used.
//
template<class DataType>
class list_storage<DataType, bool> : public std::vector<DataType> {
public:
template<class T>
void addValue(const T &V) { std::vector<DataType>::push_back(V); }
};
//===----------------------------------------------------------------------===//
// list - A list of command line options.
//
template <class DataType, class Storage = bool,
class ParserClass = parser<DataType> >
class list : public Option, public list_storage<DataType, Storage> {
std::vector<unsigned> Positions;
ParserClass Parser;
virtual enum ValueExpected getValueExpectedFlagDefault() const {
return Parser.getValueExpectedFlagDefault();
}
virtual void getExtraOptionNames(SmallVectorImpl<const char*> &OptionNames) {
return Parser.getExtraOptionNames(OptionNames);
}
virtual bool handleOccurrence(unsigned pos, StringRef ArgName, StringRef Arg){
typename ParserClass::parser_data_type Val =
typename ParserClass::parser_data_type();
if (Parser.parse(*this, ArgName, Arg, Val))
return true; // Parse Error!
list_storage<DataType, Storage>::addValue(Val);
setPosition(pos);
Positions.push_back(pos);
return false;
}
// Forward printing stuff to the parser...
virtual size_t getOptionWidth() const {return Parser.getOptionWidth(*this);}
virtual void printOptionInfo(size_t GlobalWidth) const {
Parser.printOptionInfo(*this, GlobalWidth);
}
// Unimplemented: list options don't currently store their default value.
virtual void printOptionValue(size_t /*GlobalWidth*/, bool /*Force*/) const {}
void done() {
addArgument();
Parser.initialize(*this);
}
public:
ParserClass &getParser() { return Parser; }
unsigned getPosition(unsigned optnum) const {
assert(optnum < this->size() && "Invalid option index");
return Positions[optnum];
}
void setNumAdditionalVals(unsigned n) {
Option::setNumAdditionalVals(n);
}
// One option...
template<class M0t>
explicit list(const M0t &M0) : Option(ZeroOrMore | NotHidden) {
apply(M0, this);
done();
}
// Two options...
template<class M0t, class M1t>
list(const M0t &M0, const M1t &M1) : Option(ZeroOrMore | NotHidden) {
apply(M0, this); apply(M1, this);
done();
}
// Three options...
template<class M0t, class M1t, class M2t>
list(const M0t &M0, const M1t &M1, const M2t &M2)
: Option(ZeroOrMore | NotHidden) {
apply(M0, this); apply(M1, this); apply(M2, this);
done();
}
// Four options...
template<class M0t, class M1t, class M2t, class M3t>
list(const M0t &M0, const M1t &M1, const M2t &M2, const M3t &M3)
: Option(ZeroOrMore | NotHidden) {
apply(M0, this); apply(M1, this); apply(M2, this); apply(M3, this);
done();
}
// Five options...
template<class M0t, class M1t, class M2t, class M3t, class M4t>
list(const M0t &M0, const M1t &M1, const M2t &M2, const M3t &M3,
const M4t &M4) : Option(ZeroOrMore | NotHidden) {
apply(M0, this); apply(M1, this); apply(M2, this); apply(M3, this);
apply(M4, this);
done();
}
// Six options...
template<class M0t, class M1t, class M2t, class M3t,
class M4t, class M5t>
list(const M0t &M0, const M1t &M1, const M2t &M2, const M3t &M3,
const M4t &M4, const M5t &M5) : Option(ZeroOrMore | NotHidden) {
apply(M0, this); apply(M1, this); apply(M2, this); apply(M3, this);
apply(M4, this); apply(M5, this);
done();
}
// Seven options...
template<class M0t, class M1t, class M2t, class M3t,
class M4t, class M5t, class M6t>
list(const M0t &M0, const M1t &M1, const M2t &M2, const M3t &M3,
const M4t &M4, const M5t &M5, const M6t &M6)
: Option(ZeroOrMore | NotHidden) {
apply(M0, this); apply(M1, this); apply(M2, this); apply(M3, this);
apply(M4, this); apply(M5, this); apply(M6, this);
done();
}
// Eight options...
template<class M0t, class M1t, class M2t, class M3t,
class M4t, class M5t, class M6t, class M7t>
list(const M0t &M0, const M1t &M1, const M2t &M2, const M3t &M3,
const M4t &M4, const M5t &M5, const M6t &M6,
const M7t &M7) : Option(ZeroOrMore | NotHidden) {
apply(M0, this); apply(M1, this); apply(M2, this); apply(M3, this);
apply(M4, this); apply(M5, this); apply(M6, this); apply(M7, this);
done();
}
};
// multi_val - Modifier to set the number of additional values.
struct multi_val {
unsigned AdditionalVals;
explicit multi_val(unsigned N) : AdditionalVals(N) {}
template <typename D, typename S, typename P>
void apply(list<D, S, P> &L) const { L.setNumAdditionalVals(AdditionalVals); }
};
//===----------------------------------------------------------------------===//
// bits_storage class
// Default storage class definition: external storage. This implementation
// assumes the user will specify a variable to store the data into with the
// cl::location(x) modifier.
//
template<class DataType, class StorageClass>
class bits_storage {
unsigned *Location; // Where to store the bits...
template<class T>
static unsigned Bit(const T &V) {
unsigned BitPos = reinterpret_cast<unsigned>(V);
assert(BitPos < sizeof(unsigned) * CHAR_BIT &&
"enum exceeds width of bit vector!");
return 1 << BitPos;
}
public:
bits_storage() : Location(0) {}
bool setLocation(Option &O, unsigned &L) {
if (Location)
return O.error("cl::location(x) specified more than once!");
Location = &L;
return false;
}
template<class T>
void addValue(const T &V) {
assert(Location != 0 && "cl::location(...) not specified for a command "
"line option with external storage!");
*Location |= Bit(V);
}
unsigned getBits() { return *Location; }
template<class T>
bool isSet(const T &V) {
return (*Location & Bit(V)) != 0;
}
};
// Define how to hold bits. Since we can inherit from a class, we do so.
// This makes us exactly compatible with the bits in all cases that it is used.
//
template<class DataType>
class bits_storage<DataType, bool> {
unsigned Bits; // Where to store the bits...
template<class T>
static unsigned Bit(const T &V) {
unsigned BitPos = (unsigned)V;
assert(BitPos < sizeof(unsigned) * CHAR_BIT &&
"enum exceeds width of bit vector!");
return 1 << BitPos;
}
public:
template<class T>
void addValue(const T &V) {
Bits |= Bit(V);
}
unsigned getBits() { return Bits; }
template<class T>
bool isSet(const T &V) {
return (Bits & Bit(V)) != 0;
}
};
//===----------------------------------------------------------------------===//
// bits - A bit vector of command options.
//
template <class DataType, class Storage = bool,
class ParserClass = parser<DataType> >
class bits : public Option, public bits_storage<DataType, Storage> {
std::vector<unsigned> Positions;
ParserClass Parser;
virtual enum ValueExpected getValueExpectedFlagDefault() const {
return Parser.getValueExpectedFlagDefault();
}
virtual void getExtraOptionNames(SmallVectorImpl<const char*> &OptionNames) {
return Parser.getExtraOptionNames(OptionNames);
}
virtual bool handleOccurrence(unsigned pos, StringRef ArgName, StringRef Arg){
typename ParserClass::parser_data_type Val =
typename ParserClass::parser_data_type();
if (Parser.parse(*this, ArgName, Arg, Val))
return true; // Parse Error!
addValue(Val);
setPosition(pos);
Positions.push_back(pos);
return false;
}
// Forward printing stuff to the parser...
virtual size_t getOptionWidth() const {return Parser.getOptionWidth(*this);}
virtual void printOptionInfo(size_t GlobalWidth) const {
Parser.printOptionInfo(*this, GlobalWidth);
}
// Unimplemented: bits options don't currently store their default values.
virtual void printOptionValue(size_t /*GlobalWidth*/, bool /*Force*/) const {}
void done() {
addArgument();
Parser.initialize(*this);
}
public:
ParserClass &getParser() { return Parser; }
unsigned getPosition(unsigned optnum) const {
assert(optnum < this->size() && "Invalid option index");
return Positions[optnum];
}
// One option...
template<class M0t>
explicit bits(const M0t &M0) : Option(ZeroOrMore | NotHidden) {
apply(M0, this);
done();
}
// Two options...
template<class M0t, class M1t>
bits(const M0t &M0, const M1t &M1) : Option(ZeroOrMore | NotHidden) {
apply(M0, this); apply(M1, this);
done();
}
// Three options...
template<class M0t, class M1t, class M2t>
bits(const M0t &M0, const M1t &M1, const M2t &M2)
: Option(ZeroOrMore | NotHidden) {
apply(M0, this); apply(M1, this); apply(M2, this);
done();
}
// Four options...
template<class M0t, class M1t, class M2t, class M3t>
bits(const M0t &M0, const M1t &M1, const M2t &M2, const M3t &M3)
: Option(ZeroOrMore | NotHidden) {
apply(M0, this); apply(M1, this); apply(M2, this); apply(M3, this);
done();
}
// Five options...
template<class M0t, class M1t, class M2t, class M3t, class M4t>
bits(const M0t &M0, const M1t &M1, const M2t &M2, const M3t &M3,
const M4t &M4) : Option(ZeroOrMore | NotHidden) {
apply(M0, this); apply(M1, this); apply(M2, this); apply(M3, this);
apply(M4, this);
done();
}
// Six options...
template<class M0t, class M1t, class M2t, class M3t,
class M4t, class M5t>
bits(const M0t &M0, const M1t &M1, const M2t &M2, const M3t &M3,
const M4t &M4, const M5t &M5) : Option(ZeroOrMore | NotHidden) {
apply(M0, this); apply(M1, this); apply(M2, this); apply(M3, this);
apply(M4, this); apply(M5, this);
done();
}
// Seven options...
template<class M0t, class M1t, class M2t, class M3t,
class M4t, class M5t, class M6t>
bits(const M0t &M0, const M1t &M1, const M2t &M2, const M3t &M3,
const M4t &M4, const M5t &M5, const M6t &M6)
: Option(ZeroOrMore | NotHidden) {
apply(M0, this); apply(M1, this); apply(M2, this); apply(M3, this);
apply(M4, this); apply(M5, this); apply(M6, this);
done();
}
// Eight options...
template<class M0t, class M1t, class M2t, class M3t,
class M4t, class M5t, class M6t, class M7t>
bits(const M0t &M0, const M1t &M1, const M2t &M2, const M3t &M3,
const M4t &M4, const M5t &M5, const M6t &M6,
const M7t &M7) : Option(ZeroOrMore | NotHidden) {
apply(M0, this); apply(M1, this); apply(M2, this); apply(M3, this);
apply(M4, this); apply(M5, this); apply(M6, this); apply(M7, this);
done();
}
};
//===----------------------------------------------------------------------===//
// Aliased command line option (alias this name to a preexisting name)
//
class alias : public Option {
Option *AliasFor;
virtual bool handleOccurrence(unsigned pos, StringRef /*ArgName*/,
StringRef Arg) {
return AliasFor->handleOccurrence(pos, AliasFor->ArgStr, Arg);
}
// Handle printing stuff...
virtual size_t getOptionWidth() const;
virtual void printOptionInfo(size_t GlobalWidth) const;
// Aliases do not need to print their values.
virtual void printOptionValue(size_t /*GlobalWidth*/, bool /*Force*/) const {}
void done() {
if (!hasArgStr())
error("cl::alias must have argument name specified!");
if (AliasFor == 0)
error("cl::alias must have an cl::aliasopt(option) specified!");
addArgument();
}
public:
void setAliasFor(Option &O) {
if (AliasFor)
error("cl::alias must only have one cl::aliasopt(...) specified!");
AliasFor = &O;
}
// One option...
template<class M0t>
explicit alias(const M0t &M0) : Option(Optional | Hidden), AliasFor(0) {
apply(M0, this);
done();
}
// Two options...
template<class M0t, class M1t>
alias(const M0t &M0, const M1t &M1) : Option(Optional | Hidden), AliasFor(0) {
apply(M0, this); apply(M1, this);
done();
}
// Three options...
template<class M0t, class M1t, class M2t>
alias(const M0t &M0, const M1t &M1, const M2t &M2)
: Option(Optional | Hidden), AliasFor(0) {
apply(M0, this); apply(M1, this); apply(M2, this);
done();
}
// Four options...
template<class M0t, class M1t, class M2t, class M3t>
alias(const M0t &M0, const M1t &M1, const M2t &M2, const M3t &M3)
: Option(Optional | Hidden), AliasFor(0) {
apply(M0, this); apply(M1, this); apply(M2, this); apply(M3, this);
done();
}
};
// aliasfor - Modifier to set the option an alias aliases.
struct aliasopt {
Option &Opt;
explicit aliasopt(Option &O) : Opt(O) {}
void apply(alias &A) const { A.setAliasFor(Opt); }
};
// extrahelp - provide additional help at the end of the normal help
// output. All occurrences of cl::extrahelp will be accumulated and
// printed to stderr at the end of the regular help, just before
// exit is called.
struct extrahelp {
const char * morehelp;
explicit extrahelp(const char* help);
};
void PrintVersionMessage();
// This function just prints the help message, exactly the same way as if the
// -help option had been given on the command line.
// NOTE: THIS FUNCTION TERMINATES THE PROGRAM!
void PrintHelpMessage();
} // End namespace cl
} // End namespace llvm
#endif
| 0 |
rapidsai_public_repos/code-share/maxflow/galois/include/llvm
|
rapidsai_public_repos/code-share/maxflow/galois/include/llvm/Support/PointerLikeTypeTraits.h
|
//===- llvm/Support/PointerLikeTypeTraits.h - Pointer Traits ----*- C++ -*-===//
//
// The LLVM Compiler Infrastructure
//
// This file is distributed under the University of Illinois Open Source
// License. See LICENSE.TXT for details.
//
//===----------------------------------------------------------------------===//
//
// This file defines the PointerLikeTypeTraits class. This allows data
// structures to reason about pointers and other things that are pointer sized.
//
//===----------------------------------------------------------------------===//
#ifndef LLVM_SUPPORT_POINTERLIKETYPETRAITS_H
#define LLVM_SUPPORT_POINTERLIKETYPETRAITS_H
#include "llvm/Support/DataTypes.h"
namespace llvm {
/// PointerLikeTypeTraits - This is a traits object that is used to handle
/// pointer types and things that are just wrappers for pointers as a uniform
/// entity.
template <typename T>
class PointerLikeTypeTraits {
// getAsVoidPointer
// getFromVoidPointer
// getNumLowBitsAvailable
};
// Provide PointerLikeTypeTraits for non-cvr pointers.
template<typename T>
class PointerLikeTypeTraits<T*> {
public:
static inline void *getAsVoidPointer(T* P) { return P; }
static inline T *getFromVoidPointer(void *P) {
return static_cast<T*>(P);
}
/// Note, we assume here that malloc returns objects at least 4-byte aligned.
/// However, this may be wrong, or pointers may be from something other than
/// malloc. In this case, you should specialize this template to reduce this.
///
/// All clients should use assertions to do a run-time check to ensure that
/// this is actually true.
enum { NumLowBitsAvailable = 2 };
};
// Provide PointerLikeTypeTraits for const pointers.
template<typename T>
class PointerLikeTypeTraits<const T*> {
typedef PointerLikeTypeTraits<T*> NonConst;
public:
static inline const void *getAsVoidPointer(const T* P) {
return NonConst::getAsVoidPointer(const_cast<T*>(P));
}
static inline const T *getFromVoidPointer(const void *P) {
return NonConst::getFromVoidPointer(const_cast<void*>(P));
}
enum { NumLowBitsAvailable = NonConst::NumLowBitsAvailable };
};
// Provide PointerLikeTypeTraits for uintptr_t.
template<>
class PointerLikeTypeTraits<uintptr_t> {
public:
static inline void *getAsVoidPointer(uintptr_t P) {
return reinterpret_cast<void*>(P);
}
static inline uintptr_t getFromVoidPointer(void *P) {
return reinterpret_cast<uintptr_t>(P);
}
// No bits are available!
enum { NumLowBitsAvailable = 0 };
};
} // end namespace llvm
#endif
| 0 |
rapidsai_public_repos/code-share/maxflow/galois/include/llvm
|
rapidsai_public_repos/code-share/maxflow/galois/include/llvm/Support/Compiler.h
|
//===-- llvm/Support/Compiler.h - Compiler abstraction support --*- C++ -*-===//
//
// The LLVM Compiler Infrastructure
//
// This file is distributed under the University of Illinois Open Source
// License. See LICENSE.TXT for details.
//
//===----------------------------------------------------------------------===//
//
// This file defines several macros, based on the current compiler. This allows
// use of compiler-specific features in a way that remains portable.
//
//===----------------------------------------------------------------------===//
#ifndef LLVM_SUPPORT_COMPILER_H
#define LLVM_SUPPORT_COMPILER_H
#ifndef __has_feature
# define __has_feature(x) 0
#endif
/// LLVM_LIBRARY_VISIBILITY - If a class marked with this attribute is linked
/// into a shared library, then the class should be private to the library and
/// not accessible from outside it. Can also be used to mark variables and
/// functions, making them private to any shared library they are linked into.
#if (__GNUC__ >= 4) && !defined(__MINGW32__) && !defined(__CYGWIN__)
#define LLVM_LIBRARY_VISIBILITY __attribute__ ((visibility("hidden")))
#else
#define LLVM_LIBRARY_VISIBILITY
#endif
#if (__GNUC__ >= 4 || (__GNUC__ == 3 && __GNUC_MINOR__ >= 1))
#define LLVM_ATTRIBUTE_USED __attribute__((__used__))
#else
#define LLVM_ATTRIBUTE_USED
#endif
// Some compilers warn about unused functions. When a function is sometimes
// used or not depending on build settings (e.g. a function only called from
// within "assert"), this attribute can be used to suppress such warnings.
//
// However, it shouldn't be used for unused *variables*, as those have a much
// more portable solution:
// (void)unused_var_name;
// Prefer cast-to-void wherever it is sufficient.
#if (__GNUC__ >= 4 || (__GNUC__ == 3 && __GNUC_MINOR__ >= 1))
#define LLVM_ATTRIBUTE_UNUSED __attribute__((__unused__))
#else
#define LLVM_ATTRIBUTE_UNUSED
#endif
#ifdef __GNUC__ // aka 'ATTRIBUTE_CONST' but following LLVM Conventions.
#define LLVM_ATTRIBUTE_READNONE __attribute__((__const__))
#else
#define LLVM_ATTRIBUTE_READNONE
#endif
#ifdef __GNUC__ // aka 'ATTRIBUTE_PURE' but following LLVM Conventions.
#define LLVM_ATTRIBUTE_READONLY __attribute__((__pure__))
#else
#define LLVM_ATTRIBUTE_READONLY
#endif
#if (__GNUC__ >= 4)
#define BUILTIN_EXPECT(EXPR, VALUE) __builtin_expect((EXPR), (VALUE))
#else
#define BUILTIN_EXPECT(EXPR, VALUE) (EXPR)
#endif
// C++ doesn't support 'extern template' of template specializations. GCC does,
// but requires __extension__ before it. In the header, use this:
// EXTERN_TEMPLATE_INSTANTIATION(class foo<bar>);
// in the .cpp file, use this:
// TEMPLATE_INSTANTIATION(class foo<bar>);
#ifdef __GNUC__
#define EXTERN_TEMPLATE_INSTANTIATION(X) __extension__ extern template X
#define TEMPLATE_INSTANTIATION(X) template X
#else
#define EXTERN_TEMPLATE_INSTANTIATION(X)
#define TEMPLATE_INSTANTIATION(X)
#endif
// LLVM_ATTRIBUTE_NOINLINE - On compilers where we have a directive to do so,
// mark a method "not for inlining".
#if (__GNUC__ > 3 || (__GNUC__ == 3 && __GNUC_MINOR__ >= 4))
#define LLVM_ATTRIBUTE_NOINLINE __attribute__((noinline))
#elif defined(_MSC_VER)
#define LLVM_ATTRIBUTE_NOINLINE __declspec(noinline)
#else
#define LLVM_ATTRIBUTE_NOINLINE
#endif
// LLVM_ATTRIBUTE_ALWAYS_INLINE - On compilers where we have a directive to do
// so, mark a method "always inline" because it is performance sensitive. GCC
// 3.4 supported this but is buggy in various cases and produces unimplemented
// errors, just use it in GCC 4.0 and later.
#if __GNUC__ > 3
#define LLVM_ATTRIBUTE_ALWAYS_INLINE __attribute__((always_inline))
#elif defined(_MSC_VER)
#define LLVM_ATTRIBUTE_ALWAYS_INLINE __forceinline
#else
#define LLVM_ATTRIBUTE_ALWAYS_INLINE
#endif
#ifdef __GNUC__
#define LLVM_ATTRIBUTE_NORETURN __attribute__((noreturn))
#elif defined(_MSC_VER)
#define LLVM_ATTRIBUTE_NORETURN __declspec(noreturn)
#else
#define LLVM_ATTRIBUTE_NORETURN
#endif
// LLVM_ATTRIBUTE_DEPRECATED(decl, "message")
#if __has_feature(attribute_deprecated_with_message)
# define LLVM_ATTRIBUTE_DEPRECATED(decl, message) \
decl __attribute__((deprecated(message)))
#elif defined(__GNUC__)
# define LLVM_ATTRIBUTE_DEPRECATED(decl, message) \
decl __attribute__((deprecated))
#elif defined(_MSC_VER)
# define LLVM_ATTRIBUTE_DEPRECATED(decl, message) \
__declspec(deprecated(message)) decl
#else
# define LLVM_ATTRIBUTE_DEPRECATED(decl, message) \
decl
#endif
// LLVM_BUILTIN_UNREACHABLE - On compilers which support it, expands
// to an expression which states that it is undefined behavior for the
// compiler to reach this point. Otherwise is not defined.
#if defined(__clang__) || (__GNUC__ > 4) \
|| (__GNUC__ == 4 && __GNUC_MINOR__ >= 5)
# define LLVM_BUILTIN_UNREACHABLE __builtin_unreachable()
#endif
#endif
| 0 |
rapidsai_public_repos/code-share/maxflow/galois/include/llvm
|
rapidsai_public_repos/code-share/maxflow/galois/include/llvm/Support/Recycler.h
|
//==- llvm/Support/Recycler.h - Recycling Allocator --------------*- C++ -*-==//
//
// The LLVM Compiler Infrastructure
//
// This file is distributed under the University of Illinois Open Source
// License. See LICENSE.TXT for details.
//
//===----------------------------------------------------------------------===//
//
// This file defines the Recycler class template. See the doxygen comment for
// Recycler for more details.
//
//===----------------------------------------------------------------------===//
#ifndef LLVM_SUPPORT_RECYCLER_H
#define LLVM_SUPPORT_RECYCLER_H
#include "llvm/ADT/ilist.h"
#include "llvm/Support/AlignOf.h"
#include <cassert>
namespace llvm {
/// PrintRecyclingAllocatorStats - Helper for RecyclingAllocator for
/// printing statistics.
///
void PrintRecyclerStats(size_t Size, size_t Align, size_t FreeListSize);
/// RecyclerStruct - Implementation detail for Recycler. This is a
/// class that the recycler imposes on free'd memory to carve out
/// next/prev pointers.
struct RecyclerStruct {
RecyclerStruct *Prev, *Next;
};
template<>
struct ilist_traits<RecyclerStruct> :
public ilist_default_traits<RecyclerStruct> {
static RecyclerStruct *getPrev(const RecyclerStruct *t) { return t->Prev; }
static RecyclerStruct *getNext(const RecyclerStruct *t) { return t->Next; }
static void setPrev(RecyclerStruct *t, RecyclerStruct *p) { t->Prev = p; }
static void setNext(RecyclerStruct *t, RecyclerStruct *n) { t->Next = n; }
mutable RecyclerStruct Sentinel;
RecyclerStruct *createSentinel() const {
return &Sentinel;
}
static void destroySentinel(RecyclerStruct *) {}
RecyclerStruct *provideInitialHead() const { return createSentinel(); }
RecyclerStruct *ensureHead(RecyclerStruct*) const { return createSentinel(); }
static void noteHead(RecyclerStruct*, RecyclerStruct*) {}
static void deleteNode(RecyclerStruct *) {
assert(0 && "Recycler's ilist_traits shouldn't see a deleteNode call!");
}
};
/// Recycler - This class manages a linked-list of deallocated nodes
/// and facilitates reusing deallocated memory in place of allocating
/// new memory.
///
template<class T, size_t Size = sizeof(T), size_t Align = AlignOf<T>::Alignment>
class Recycler {
/// FreeList - Doubly-linked list of nodes that have deleted contents and
/// are not in active use.
///
iplist<RecyclerStruct> FreeList;
public:
~Recycler() {
// If this fails, either the callee has lost track of some allocation,
// or the callee isn't tracking allocations and should just call
// clear() before deleting the Recycler.
assert(FreeList.empty() && "Non-empty recycler deleted!");
}
/// clear - Release all the tracked allocations to the allocator. The
/// recycler must be free of any tracked allocations before being
/// deleted; calling clear is one way to ensure this.
template<class AllocatorType>
void clear(AllocatorType &Allocator) {
while (!FreeList.empty()) {
T *t = reinterpret_cast<T *>(FreeList.remove(FreeList.begin()));
Allocator.Deallocate(t);
}
}
template<class SubClass, class AllocatorType>
SubClass *Allocate(AllocatorType &Allocator) {
assert(sizeof(SubClass) <= Size &&
"Recycler allocation size is less than object size!");
assert(AlignOf<SubClass>::Alignment <= Align &&
"Recycler allocation alignment is less than object alignment!");
return !FreeList.empty() ?
reinterpret_cast<SubClass *>(FreeList.remove(FreeList.begin())) :
static_cast<SubClass *>(Allocator.Allocate(Size, Align));
}
template<class AllocatorType>
T *Allocate(AllocatorType &Allocator) {
return Allocate<T>(Allocator);
}
template<class SubClass, class AllocatorType>
void Deallocate(AllocatorType & /*Allocator*/, SubClass* Element) {
FreeList.push_front(reinterpret_cast<RecyclerStruct *>(Element));
}
void PrintStats() {
PrintRecyclerStats(Size, Align, FreeList.size());
}
};
}
#endif
| 0 |
rapidsai_public_repos/code-share/maxflow/galois/include/llvm
|
rapidsai_public_repos/code-share/maxflow/galois/include/llvm/Support/Allocator.h
|
//===--- Allocator.h - Simple memory allocation abstraction -----*- C++ -*-===//
//
// The LLVM Compiler Infrastructure
//
// This file is distributed under the University of Illinois Open Source
// License. See LICENSE.TXT for details.
//
//===----------------------------------------------------------------------===//
//
// This file defines the MallocAllocator and BumpPtrAllocator interfaces.
//
//===----------------------------------------------------------------------===//
#ifndef LLVM_SUPPORT_ALLOCATOR_H
#define LLVM_SUPPORT_ALLOCATOR_H
#include "llvm/Support/AlignOf.h"
#include "llvm/Support/MathExtras.h"
#include "llvm/Support/DataTypes.h"
#include <algorithm>
#include <cassert>
#include <cstdlib>
#include <cstddef>
namespace llvm {
template <typename T> struct ReferenceAdder { typedef T& result; };
template <typename T> struct ReferenceAdder<T&> { typedef T result; };
class MallocAllocator {
public:
MallocAllocator() {}
~MallocAllocator() {}
void Reset() {}
void *Allocate(size_t Size, size_t /*Alignment*/) { return malloc(Size); }
template <typename T>
T *Allocate() { return static_cast<T*>(malloc(sizeof(T))); }
template <typename T>
T *Allocate(size_t Num) {
return static_cast<T*>(malloc(sizeof(T)*Num));
}
void Deallocate(const void *Ptr) { free(const_cast<void*>(Ptr)); }
void PrintStats() const {}
};
/// MemSlab - This structure lives at the beginning of every slab allocated by
/// the bump allocator.
class MemSlab {
public:
size_t Size;
MemSlab *NextPtr;
};
/// SlabAllocator - This class can be used to parameterize the underlying
/// allocation strategy for the bump allocator. In particular, this is used
/// by the JIT to allocate contiguous swathes of executable memory. The
/// interface uses MemSlab's instead of void *'s so that the allocator
/// doesn't have to remember the size of the pointer it allocated.
class SlabAllocator {
public:
virtual ~SlabAllocator();
virtual MemSlab *Allocate(size_t Size) = 0;
virtual void Deallocate(MemSlab *Slab) = 0;
};
/// MallocSlabAllocator - The default slab allocator for the bump allocator
/// is an adapter class for MallocAllocator that just forwards the method
/// calls and translates the arguments.
class MallocSlabAllocator : public SlabAllocator {
/// Allocator - The underlying allocator that we forward to.
///
MallocAllocator Allocator;
public:
MallocSlabAllocator() : Allocator() { }
virtual ~MallocSlabAllocator();
virtual MemSlab *Allocate(size_t Size);
virtual void Deallocate(MemSlab *Slab);
};
/// BumpPtrAllocator - This allocator is useful for containers that need
/// very simple memory allocation strategies. In particular, this just keeps
/// allocating memory, and never deletes it until the entire block is dead. This
/// makes allocation speedy, but must only be used when the trade-off is ok.
class BumpPtrAllocator {
BumpPtrAllocator(const BumpPtrAllocator &); // do not implement
void operator=(const BumpPtrAllocator &); // do not implement
/// SlabSize - Allocate data into slabs of this size unless we get an
/// allocation above SizeThreshold.
size_t SlabSize;
/// SizeThreshold - For any allocation larger than this threshold, we should
/// allocate a separate slab.
size_t SizeThreshold;
/// Allocator - The underlying allocator we use to get slabs of memory. This
/// defaults to MallocSlabAllocator, which wraps malloc, but it could be
/// changed to use a custom allocator.
SlabAllocator &Allocator;
/// CurSlab - The slab that we are currently allocating into.
///
MemSlab *CurSlab;
/// CurPtr - The current pointer into the current slab. This points to the
/// next free byte in the slab.
char *CurPtr;
/// End - The end of the current slab.
///
char *End;
/// BytesAllocated - This field tracks how many bytes we've allocated, so
/// that we can compute how much space was wasted.
size_t BytesAllocated;
/// AlignPtr - Align Ptr to Alignment bytes, rounding up. Alignment should
/// be a power of two. This method rounds up, so AlignPtr(7, 4) == 8 and
/// AlignPtr(8, 4) == 8.
static char *AlignPtr(char *Ptr, size_t Alignment);
/// StartNewSlab - Allocate a new slab and move the bump pointers over into
/// the new slab. Modifies CurPtr and End.
void StartNewSlab();
/// DeallocateSlabs - Deallocate all memory slabs after and including this
/// one.
void DeallocateSlabs(MemSlab *Slab);
static MallocSlabAllocator DefaultSlabAllocator;
template<typename T> friend class SpecificBumpPtrAllocator;
public:
BumpPtrAllocator(size_t size = 4096, size_t threshold = 4096,
SlabAllocator &allocator = DefaultSlabAllocator);
~BumpPtrAllocator();
/// Reset - Deallocate all but the current slab and reset the current pointer
/// to the beginning of it, freeing all memory allocated so far.
void Reset();
/// Allocate - Allocate space at the specified alignment.
///
void *Allocate(size_t Size, size_t Alignment);
/// Allocate space, but do not construct, one object.
///
template <typename T>
T *Allocate() {
return static_cast<T*>(Allocate(sizeof(T),AlignOf<T>::Alignment));
}
/// Allocate space for an array of objects. This does not construct the
/// objects though.
template <typename T>
T *Allocate(size_t Num) {
return static_cast<T*>(Allocate(Num * sizeof(T), AlignOf<T>::Alignment));
}
/// Allocate space for a specific count of elements and with a specified
/// alignment.
template <typename T>
T *Allocate(size_t Num, size_t Alignment) {
// Round EltSize up to the specified alignment.
size_t EltSize = (sizeof(T)+Alignment-1)&(-Alignment);
return static_cast<T*>(Allocate(Num * EltSize, Alignment));
}
void Deallocate(const void * /*Ptr*/) {}
unsigned GetNumSlabs() const;
void PrintStats() const;
/// Compute the total physical memory allocated by this allocator.
size_t getTotalMemory() const;
};
/// SpecificBumpPtrAllocator - Same as BumpPtrAllocator but allows only
/// elements of one type to be allocated. This allows calling the destructor
/// in DestroyAll() and when the allocator is destroyed.
template <typename T>
class SpecificBumpPtrAllocator {
BumpPtrAllocator Allocator;
public:
SpecificBumpPtrAllocator(size_t size = 4096, size_t threshold = 4096,
SlabAllocator &allocator = BumpPtrAllocator::DefaultSlabAllocator)
: Allocator(size, threshold, allocator) {}
~SpecificBumpPtrAllocator() {
DestroyAll();
}
/// Call the destructor of each allocated object and deallocate all but the
/// current slab and reset the current pointer to the beginning of it, freeing
/// all memory allocated so far.
void DestroyAll() {
MemSlab *Slab = Allocator.CurSlab;
while (Slab) {
char *End = Slab == Allocator.CurSlab ? Allocator.CurPtr :
(char *)Slab + Slab->Size;
for (char *Ptr = (char*)(Slab+1); Ptr < End; Ptr += sizeof(T)) {
Ptr = Allocator.AlignPtr(Ptr, alignOf<T>());
if (Ptr + sizeof(T) <= End)
reinterpret_cast<T*>(Ptr)->~T();
}
Slab = Slab->NextPtr;
}
Allocator.Reset();
}
/// Allocate space for a specific count of elements.
T *Allocate(size_t num = 1) {
return Allocator.Allocate<T>(num);
}
};
} // end namespace llvm
inline void *operator new(size_t Size, llvm::BumpPtrAllocator &Allocator) {
struct S {
char c;
union {
double D;
long double LD;
long long L;
void *P;
} x;
};
return Allocator.Allocate(Size, std::min((size_t)llvm::NextPowerOf2(Size),
offsetof(S, x)));
}
inline void operator delete(void *, llvm::BumpPtrAllocator &) {}
#endif // LLVM_SUPPORT_ALLOCATOR_H
| 0 |
rapidsai_public_repos/code-share/maxflow/galois/include/llvm
|
rapidsai_public_repos/code-share/maxflow/galois/include/llvm/Support/Memory.h
|
//===- llvm/Support/Memory.h - Memory Support --------------------*- C++ -*-===//
//
// The LLVM Compiler Infrastructure
//
// This file is distributed under the University of Illinois Open Source
// License. See LICENSE.TXT for details.
//
//===----------------------------------------------------------------------===//
//
// This file declares the llvm::sys::Memory class.
//
//===----------------------------------------------------------------------===//
#ifndef LLVM_SYSTEM_MEMORY_H
#define LLVM_SYSTEM_MEMORY_H
#include "llvm/Support/DataTypes.h"
#include <string>
namespace llvm {
namespace sys {
/// This class encapsulates the notion of a memory block which has an address
/// and a size. It is used by the Memory class (a friend) as the result of
/// various memory allocation operations.
/// @see Memory
/// @brief Memory block abstraction.
class MemoryBlock {
public:
MemoryBlock() : Address(0), Size(0) { }
MemoryBlock(void *addr, size_t size) : Address(addr), Size(size) { }
void *base() const { return Address; }
size_t size() const { return Size; }
private:
void *Address; ///< Address of first byte of memory area
size_t Size; ///< Size, in bytes of the memory area
friend class Memory;
};
/// This class provides various memory handling functions that manipulate
/// MemoryBlock instances.
/// @since 1.4
/// @brief An abstraction for memory operations.
class Memory {
public:
/// This method allocates a block of Read/Write/Execute memory that is
/// suitable for executing dynamically generated code (e.g. JIT). An
/// attempt to allocate \p NumBytes bytes of virtual memory is made.
/// \p NearBlock may point to an existing allocation in which case
/// an attempt is made to allocate more memory near the existing block.
///
/// On success, this returns a non-null memory block, otherwise it returns
/// a null memory block and fills in *ErrMsg.
///
/// @brief Allocate Read/Write/Execute memory.
static MemoryBlock AllocateRWX(size_t NumBytes,
const MemoryBlock *NearBlock,
std::string *ErrMsg = 0);
/// This method releases a block of Read/Write/Execute memory that was
/// allocated with the AllocateRWX method. It should not be used to
/// release any memory block allocated any other way.
///
/// On success, this returns false, otherwise it returns true and fills
/// in *ErrMsg.
/// @brief Release Read/Write/Execute memory.
static bool ReleaseRWX(MemoryBlock &block, std::string *ErrMsg = 0);
/// InvalidateInstructionCache - Before the JIT can run a block of code
/// that has been emitted it must invalidate the instruction cache on some
/// platforms.
static void InvalidateInstructionCache(const void *Addr, size_t Len);
/// setExecutable - Before the JIT can run a block of code, it has to be
/// given read and executable privilege. Return true if it is already r-x
/// or the system is able to change its previlege.
static bool setExecutable(MemoryBlock &M, std::string *ErrMsg = 0);
/// setWritable - When adding to a block of code, the JIT may need
/// to mark a block of code as RW since the protections are on page
/// boundaries, and the JIT internal allocations are not page aligned.
static bool setWritable(MemoryBlock &M, std::string *ErrMsg = 0);
/// setRangeExecutable - Mark the page containing a range of addresses
/// as executable.
static bool setRangeExecutable(const void *Addr, size_t Size);
/// setRangeWritable - Mark the page containing a range of addresses
/// as writable.
static bool setRangeWritable(const void *Addr, size_t Size);
};
}
}
#endif
| 0 |
rapidsai_public_repos/code-share/maxflow/galois/include/llvm
|
rapidsai_public_repos/code-share/maxflow/galois/include/llvm/Support/type_traits.h
|
//===- llvm/Support/type_traits.h - Simplfied type traits -------*- C++ -*-===//
//
// The LLVM Compiler Infrastructure
//
// This file is distributed under the University of Illinois Open Source
// License. See LICENSE.TXT for details.
//
//===----------------------------------------------------------------------===//
//
// This file provides a template class that determines if a type is a class or
// not. The basic mechanism, based on using the pointer to member function of
// a zero argument to a function was "boosted" from the boost type_traits
// library. See http://www.boost.org/ for all the gory details.
//
//===----------------------------------------------------------------------===//
#ifndef LLVM_SUPPORT_TYPE_TRAITS_H
#define LLVM_SUPPORT_TYPE_TRAITS_H
#include <utility>
// This is actually the conforming implementation which works with abstract
// classes. However, enough compilers have trouble with it that most will use
// the one in boost/type_traits/object_traits.hpp. This implementation actually
// works with VC7.0, but other interactions seem to fail when we use it.
namespace llvm {
namespace dont_use
{
// These two functions should never be used. They are helpers to
// the is_class template below. They cannot be located inside
// is_class because doing so causes at least GCC to think that
// the value of the "value" enumerator is not constant. Placing
// them out here (for some strange reason) allows the sizeof
// operator against them to magically be constant. This is
// important to make the is_class<T>::value idiom zero cost. it
// evaluates to a constant 1 or 0 depending on whether the
// parameter T is a class or not (respectively).
template<typename T> char is_class_helper(void(T::*)());
template<typename T> double is_class_helper(...);
}
template <typename T>
struct is_class
{
// is_class<> metafunction due to Paul Mensonides ([email protected]). For
// more details:
// http://groups.google.com/groups?hl=en&selm=000001c1cc83%24e154d5e0%247772e50c%40c161550a&rnum=1
public:
enum { value = sizeof(char) == sizeof(dont_use::is_class_helper<T>(0)) };
};
/// isPodLike - This is a type trait that is used to determine whether a given
/// type can be copied around with memcpy instead of running ctors etc.
template <typename T>
struct isPodLike {
// If we don't know anything else, we can (at least) assume that all non-class
// types are PODs.
static const bool value = !is_class<T>::value;
};
// std::pair's are pod-like if their elements are.
template<typename T, typename U>
struct isPodLike<std::pair<T, U> > {
static const bool value = isPodLike<T>::value & isPodLike<U>::value;
};
/// \brief Metafunction that determines whether the two given types are
/// equivalent.
template<typename T, typename U>
struct is_same {
static const bool value = false;
};
template<typename T>
struct is_same<T, T> {
static const bool value = true;
};
// enable_if_c - Enable/disable a template based on a metafunction
template<bool Cond, typename T = void>
struct enable_if_c {
typedef T type;
};
template<typename T> struct enable_if_c<false, T> { };
// enable_if - Enable/disable a template based on a metafunction
template<typename Cond, typename T = void>
struct enable_if : public enable_if_c<Cond::value, T> { };
namespace dont_use {
template<typename Base> char base_of_helper(const volatile Base*);
template<typename Base> double base_of_helper(...);
}
/// is_base_of - Metafunction to determine whether one type is a base class of
/// (or identical to) another type.
template<typename Base, typename Derived>
struct is_base_of {
static const bool value
= is_class<Base>::value && is_class<Derived>::value &&
sizeof(char) == sizeof(dont_use::base_of_helper<Base>((Derived*)0));
};
// remove_pointer - Metafunction to turn Foo* into Foo. Defined in
// C++0x [meta.trans.ptr].
template <typename T> struct remove_pointer { typedef T type; };
template <typename T> struct remove_pointer<T*> { typedef T type; };
template <typename T> struct remove_pointer<T*const> { typedef T type; };
template <typename T> struct remove_pointer<T*volatile> { typedef T type; };
template <typename T> struct remove_pointer<T*const volatile> {
typedef T type; };
template <bool, typename T, typename F>
struct conditional { typedef T type; };
template <typename T, typename F>
struct conditional<false, T, F> { typedef F type; };
}
#endif
| 0 |
rapidsai_public_repos/code-share/maxflow/galois/include/llvm
|
rapidsai_public_repos/code-share/maxflow/galois/include/llvm/Support/DataTypes.h.cmake
|
/*===-- include/Support/DataTypes.h - Define fixed size types -----*- C -*-===*\
|* *|
|* The LLVM Compiler Infrastructure *|
|* *|
|* This file is distributed under the University of Illinois Open Source *|
|* License. See LICENSE.TXT for details. *|
|* *|
|*===----------------------------------------------------------------------===*|
|* *|
|* This file contains definitions to figure out the size of _HOST_ data types.*|
|* This file is important because different host OS's define different macros,*|
|* which makes portability tough. This file exports the following *|
|* definitions: *|
|* *|
|* [u]int(32|64)_t : typedefs for signed and unsigned 32/64 bit system types*|
|* [U]INT(8|16|32|64)_(MIN|MAX) : Constants for the min and max values. *|
|* *|
|* No library is required when using these functinons. *|
|* *|
|*===----------------------------------------------------------------------===*/
/* Please leave this file C-compatible. */
#ifndef SUPPORT_DATATYPES_H
#define SUPPORT_DATATYPES_H
#cmakedefine HAVE_SYS_TYPES_H ${HAVE_SYS_TYPES_H}
#cmakedefine HAVE_INTTYPES_H ${HAVE_INTTYPES_H}
#cmakedefine HAVE_STDINT_H ${HAVE_STDINT_H}
#cmakedefine HAVE_UINT64_T ${HAVE_UINT64_T}
#cmakedefine HAVE_U_INT64_T ${HAVE_U_INT64_T}
#ifdef __cplusplus
#include <cmath>
#else
#include <math.h>
#endif
#ifndef _MSC_VER
/* Note that this header's correct operation depends on __STDC_LIMIT_MACROS
being defined. We would define it here, but in order to prevent Bad Things
happening when system headers or C++ STL headers include stdint.h before we
define it here, we define it on the g++ command line (in Makefile.rules). */
#if !defined(__STDC_LIMIT_MACROS)
# error "Must #define __STDC_LIMIT_MACROS before #including Support/DataTypes.h"
#endif
#if !defined(__STDC_CONSTANT_MACROS)
# error "Must #define __STDC_CONSTANT_MACROS before " \
"#including Support/DataTypes.h"
#endif
/* Note that <inttypes.h> includes <stdint.h>, if this is a C99 system. */
#ifdef HAVE_SYS_TYPES_H
#include <sys/types.h>
#endif
#ifdef HAVE_INTTYPES_H
#include <inttypes.h>
#endif
#ifdef HAVE_STDINT_H
#include <stdint.h>
#endif
#ifdef _AIX
#include "llvm/Support/AIXDataTypesFix.h"
#endif
/* Handle incorrect definition of uint64_t as u_int64_t */
#ifndef HAVE_UINT64_T
#ifdef HAVE_U_INT64_T
typedef u_int64_t uint64_t;
#else
# error "Don't have a definition for uint64_t on this platform"
#endif
#endif
#ifdef _OpenBSD_
#define INT8_MAX 127
#define INT8_MIN -128
#define UINT8_MAX 255
#define INT16_MAX 32767
#define INT16_MIN -32768
#define UINT16_MAX 65535
#define INT32_MAX 2147483647
#define INT32_MIN -2147483648
#define UINT32_MAX 4294967295U
#endif
#else /* _MSC_VER */
/* Visual C++ doesn't provide standard integer headers, but it does provide
built-in data types. */
#include <stdlib.h>
#include <stddef.h>
#include <sys/types.h>
#ifdef __cplusplus
#include <cmath>
#else
#include <math.h>
#endif
typedef __int64 int64_t;
typedef unsigned __int64 uint64_t;
typedef signed int int32_t;
typedef unsigned int uint32_t;
typedef short int16_t;
typedef unsigned short uint16_t;
typedef signed char int8_t;
typedef unsigned char uint8_t;
typedef signed int ssize_t;
#ifndef INT8_MAX
# define INT8_MAX 127
#endif
#ifndef INT8_MIN
# define INT8_MIN -128
#endif
#ifndef UINT8_MAX
# define UINT8_MAX 255
#endif
#ifndef INT16_MAX
# define INT16_MAX 32767
#endif
#ifndef INT16_MIN
# define INT16_MIN -32768
#endif
#ifndef UINT16_MAX
# define UINT16_MAX 65535
#endif
#ifndef INT32_MAX
# define INT32_MAX 2147483647
#endif
#ifndef INT32_MIN
/* MSC treats -2147483648 as -(2147483648U). */
# define INT32_MIN (-INT32_MAX - 1)
#endif
#ifndef UINT32_MAX
# define UINT32_MAX 4294967295U
#endif
/* Certain compatibility updates to VC++ introduce the `cstdint'
* header, which defines the INT*_C macros. On default installs they
* are absent. */
#ifndef INT8_C
# define INT8_C(C) C##i8
#endif
#ifndef UINT8_C
# define UINT8_C(C) C##ui8
#endif
#ifndef INT16_C
# define INT16_C(C) C##i16
#endif
#ifndef UINT16_C
# define UINT16_C(C) C##ui16
#endif
#ifndef INT32_C
# define INT32_C(C) C##i32
#endif
#ifndef UINT32_C
# define UINT32_C(C) C##ui32
#endif
#ifndef INT64_C
# define INT64_C(C) C##i64
#endif
#ifndef UINT64_C
# define UINT64_C(C) C##ui64
#endif
#endif /* _MSC_VER */
/* Set defaults for constants which we cannot find. */
#if !defined(INT64_MAX)
# define INT64_MAX 9223372036854775807LL
#endif
#if !defined(INT64_MIN)
# define INT64_MIN ((-INT64_MAX)-1)
#endif
#if !defined(UINT64_MAX)
# define UINT64_MAX 0xffffffffffffffffULL
#endif
#if __GNUC__ > 3
#define END_WITH_NULL __attribute__((sentinel))
#else
#define END_WITH_NULL
#endif
#ifndef HUGE_VALF
#define HUGE_VALF (float)HUGE_VAL
#endif
#endif /* SUPPORT_DATATYPES_H */
| 0 |
rapidsai_public_repos/code-share/maxflow/galois/include/llvm
|
rapidsai_public_repos/code-share/maxflow/galois/include/llvm/Support/AlignOf.h
|
//===--- AlignOf.h - Portable calculation of type alignment -----*- C++ -*-===//
//
// The LLVM Compiler Infrastructure
//
// This file is distributed under the University of Illinois Open Source
// License. See LICENSE.TXT for details.
//
//===----------------------------------------------------------------------===//
//
// This file defines the AlignOf function that computes alignments for
// arbitrary types.
//
//===----------------------------------------------------------------------===//
#ifndef LLVM_SUPPORT_ALIGNOF_H
#define LLVM_SUPPORT_ALIGNOF_H
namespace llvm {
template <typename T>
struct AlignmentCalcImpl {
char x;
T t;
private:
AlignmentCalcImpl() {} // Never instantiate.
};
/// AlignOf - A templated class that contains an enum value representing
/// the alignment of the template argument. For example,
/// AlignOf<int>::Alignment represents the alignment of type "int". The
/// alignment calculated is the minimum alignment, and not necessarily
/// the "desired" alignment returned by GCC's __alignof__ (for example). Note
/// that because the alignment is an enum value, it can be used as a
/// compile-time constant (e.g., for template instantiation).
template <typename T>
struct AlignOf {
enum { Alignment =
static_cast<unsigned int>(sizeof(AlignmentCalcImpl<T>) - sizeof(T)) };
enum { Alignment_GreaterEqual_2Bytes = Alignment >= 2 ? 1 : 0 };
enum { Alignment_GreaterEqual_4Bytes = Alignment >= 4 ? 1 : 0 };
enum { Alignment_GreaterEqual_8Bytes = Alignment >= 8 ? 1 : 0 };
enum { Alignment_GreaterEqual_16Bytes = Alignment >= 16 ? 1 : 0 };
enum { Alignment_LessEqual_2Bytes = Alignment <= 2 ? 1 : 0 };
enum { Alignment_LessEqual_4Bytes = Alignment <= 4 ? 1 : 0 };
enum { Alignment_LessEqual_8Bytes = Alignment <= 8 ? 1 : 0 };
enum { Alignment_LessEqual_16Bytes = Alignment <= 16 ? 1 : 0 };
};
/// alignOf - A templated function that returns the minimum alignment of
/// of a type. This provides no extra functionality beyond the AlignOf
/// class besides some cosmetic cleanliness. Example usage:
/// alignOf<int>() returns the alignment of an int.
template <typename T>
static inline unsigned alignOf() { return AlignOf<T>::Alignment; }
} // end namespace llvm
#endif
| 0 |
rapidsai_public_repos/code-share/maxflow/galois/include/llvm
|
rapidsai_public_repos/code-share/maxflow/galois/include/llvm/ADT/ilist_node.h
|
//==-- llvm/ADT/ilist_node.h - Intrusive Linked List Helper ------*- C++ -*-==//
//
// The LLVM Compiler Infrastructure
//
// This file is distributed under the University of Illinois Open Source
// License. See LICENSE.TXT for details.
//
//===----------------------------------------------------------------------===//
//
// This file defines the ilist_node class template, which is a convenient
// base class for creating classes that can be used with ilists.
//
//===----------------------------------------------------------------------===//
#ifndef LLVM_ADT_ILIST_NODE_H
#define LLVM_ADT_ILIST_NODE_H
namespace llvm {
template<typename NodeTy>
struct ilist_traits;
/// ilist_half_node - Base class that provides prev services for sentinels.
///
template<typename NodeTy>
class ilist_half_node {
friend struct ilist_traits<NodeTy>;
NodeTy *Prev;
protected:
NodeTy *getPrev() { return Prev; }
const NodeTy *getPrev() const { return Prev; }
void setPrev(NodeTy *P) { Prev = P; }
ilist_half_node() : Prev(0) {}
};
template<typename NodeTy>
struct ilist_nextprev_traits;
/// ilist_node - Base class that provides next/prev services for nodes
/// that use ilist_nextprev_traits or ilist_default_traits.
///
template<typename NodeTy>
class ilist_node : private ilist_half_node<NodeTy> {
friend struct ilist_nextprev_traits<NodeTy>;
friend struct ilist_traits<NodeTy>;
NodeTy *Next;
NodeTy *getNext() { return Next; }
const NodeTy *getNext() const { return Next; }
void setNext(NodeTy *N) { Next = N; }
protected:
ilist_node() : Next(0) {}
public:
/// @name Adjacent Node Accessors
/// @{
/// \brief Get the previous node, or 0 for the list head.
NodeTy *getPrevNode() {
NodeTy *Prev = this->getPrev();
// Check for sentinel.
if (!Prev->getNext())
return 0;
return Prev;
}
/// \brief Get the previous node, or 0 for the list head.
const NodeTy *getPrevNode() const {
const NodeTy *Prev = this->getPrev();
// Check for sentinel.
if (!Prev->getNext())
return 0;
return Prev;
}
/// \brief Get the next node, or 0 for the list tail.
NodeTy *getNextNode() {
NodeTy *Next = getNext();
// Check for sentinel.
if (!Next->getNext())
return 0;
return Next;
}
/// \brief Get the next node, or 0 for the list tail.
const NodeTy *getNextNode() const {
const NodeTy *Next = getNext();
// Check for sentinel.
if (!Next->getNext())
return 0;
return Next;
}
/// @}
};
} // End llvm namespace
#endif
| 0 |
rapidsai_public_repos/code-share/maxflow/galois/include/llvm
|
rapidsai_public_repos/code-share/maxflow/galois/include/llvm/ADT/SmallString.h
|
//===- llvm/ADT/SmallString.h - 'Normally small' strings --------*- C++ -*-===//
//
// The LLVM Compiler Infrastructure
//
// This file is distributed under the University of Illinois Open Source
// License. See LICENSE.TXT for details.
//
//===----------------------------------------------------------------------===//
//
// This file defines the SmallString class.
//
//===----------------------------------------------------------------------===//
#ifndef LLVM_ADT_SMALLSTRING_H
#define LLVM_ADT_SMALLSTRING_H
#include "llvm/ADT/SmallVector.h"
#include "llvm/ADT/StringRef.h"
namespace llvm {
/// SmallString - A SmallString is just a SmallVector with methods and accessors
/// that make it work better as a string (e.g. operator+ etc).
template<unsigned InternalLen>
class SmallString : public SmallVector<char, InternalLen> {
public:
// Default ctor - Initialize to empty.
SmallString() {}
// Initialize from a StringRef.
SmallString(StringRef S) : SmallVector<char, InternalLen>(S.begin(), S.end()) {}
// Initialize with a range.
template<typename ItTy>
SmallString(ItTy S, ItTy E) : SmallVector<char, InternalLen>(S, E) {}
// Copy ctor.
SmallString(const SmallString &RHS) : SmallVector<char, InternalLen>(RHS) {}
// Extra methods.
StringRef str() const { return StringRef(this->begin(), this->size()); }
// TODO: Make this const, if it's safe...
const char* c_str() {
this->push_back(0);
this->pop_back();
return this->data();
}
// Implicit conversion to StringRef.
operator StringRef() const { return str(); }
// Extra operators.
const SmallString &operator=(StringRef RHS) {
this->clear();
return *this += RHS;
}
SmallString &operator+=(StringRef RHS) {
this->append(RHS.begin(), RHS.end());
return *this;
}
SmallString &operator+=(char C) {
this->push_back(C);
return *this;
}
};
}
#endif
| 0 |
rapidsai_public_repos/code-share/maxflow/galois/include/llvm
|
rapidsai_public_repos/code-share/maxflow/galois/include/llvm/ADT/APSInt.h
|
//===-- llvm/ADT/APSInt.h - Arbitrary Precision Signed Int -----*- C++ -*--===//
//
// The LLVM Compiler Infrastructure
//
// This file is distributed under the University of Illinois Open Source
// License. See LICENSE.TXT for details.
//
//===----------------------------------------------------------------------===//
//
// This file implements the APSInt class, which is a simple class that
// represents an arbitrary sized integer that knows its signedness.
//
//===----------------------------------------------------------------------===//
#ifndef LLVM_APSINT_H
#define LLVM_APSINT_H
#include "llvm/ADT/APInt.h"
namespace llvm {
class APSInt : public APInt {
bool IsUnsigned;
public:
/// Default constructor that creates an uninitialized APInt.
explicit APSInt() {}
/// APSInt ctor - Create an APSInt with the specified width, default to
/// unsigned.
explicit APSInt(uint32_t BitWidth, bool isUnsigned = true)
: APInt(BitWidth, 0), IsUnsigned(isUnsigned) {}
explicit APSInt(const APInt &I, bool isUnsigned = true)
: APInt(I), IsUnsigned(isUnsigned) {}
APSInt &operator=(const APSInt &RHS) {
APInt::operator=(RHS);
IsUnsigned = RHS.IsUnsigned;
return *this;
}
APSInt &operator=(const APInt &RHS) {
// Retain our current sign.
APInt::operator=(RHS);
return *this;
}
APSInt &operator=(uint64_t RHS) {
// Retain our current sign.
APInt::operator=(RHS);
return *this;
}
// Query sign information.
bool isSigned() const { return !IsUnsigned; }
bool isUnsigned() const { return IsUnsigned; }
void setIsUnsigned(bool Val) { IsUnsigned = Val; }
void setIsSigned(bool Val) { IsUnsigned = !Val; }
/// toString - Append this APSInt to the specified SmallString.
void toString(SmallVectorImpl<char> &Str, unsigned Radix = 10) const {
APInt::toString(Str, Radix, isSigned());
}
/// toString - Converts an APInt to a std::string. This is an inefficient
/// method, your should prefer passing in a SmallString instead.
std::string toString(unsigned Radix) const {
return APInt::toString(Radix, isSigned());
}
using APInt::toString;
APSInt trunc(uint32_t width) const {
return APSInt(APInt::trunc(width), IsUnsigned);
}
APSInt extend(uint32_t width) const {
if (IsUnsigned)
return APSInt(zext(width), IsUnsigned);
else
return APSInt(sext(width), IsUnsigned);
}
APSInt extOrTrunc(uint32_t width) const {
if (IsUnsigned)
return APSInt(zextOrTrunc(width), IsUnsigned);
else
return APSInt(sextOrTrunc(width), IsUnsigned);
}
const APSInt &operator%=(const APSInt &RHS) {
assert(IsUnsigned == RHS.IsUnsigned && "Signedness mismatch!");
if (IsUnsigned)
*this = urem(RHS);
else
*this = srem(RHS);
return *this;
}
const APSInt &operator/=(const APSInt &RHS) {
assert(IsUnsigned == RHS.IsUnsigned && "Signedness mismatch!");
if (IsUnsigned)
*this = udiv(RHS);
else
*this = sdiv(RHS);
return *this;
}
APSInt operator%(const APSInt &RHS) const {
assert(IsUnsigned == RHS.IsUnsigned && "Signedness mismatch!");
return IsUnsigned ? APSInt(urem(RHS), true) : APSInt(srem(RHS), false);
}
APSInt operator/(const APSInt &RHS) const {
assert(IsUnsigned == RHS.IsUnsigned && "Signedness mismatch!");
return IsUnsigned ? APSInt(udiv(RHS), true) : APSInt(sdiv(RHS), false);
}
APSInt operator>>(unsigned Amt) const {
return IsUnsigned ? APSInt(lshr(Amt), true) : APSInt(ashr(Amt), false);
}
APSInt& operator>>=(unsigned Amt) {
*this = *this >> Amt;
return *this;
}
inline bool operator<(const APSInt& RHS) const {
assert(IsUnsigned == RHS.IsUnsigned && "Signedness mismatch!");
return IsUnsigned ? ult(RHS) : slt(RHS);
}
inline bool operator>(const APSInt& RHS) const {
assert(IsUnsigned == RHS.IsUnsigned && "Signedness mismatch!");
return IsUnsigned ? ugt(RHS) : sgt(RHS);
}
inline bool operator<=(const APSInt& RHS) const {
assert(IsUnsigned == RHS.IsUnsigned && "Signedness mismatch!");
return IsUnsigned ? ule(RHS) : sle(RHS);
}
inline bool operator>=(const APSInt& RHS) const {
assert(IsUnsigned == RHS.IsUnsigned && "Signedness mismatch!");
return IsUnsigned ? uge(RHS) : sge(RHS);
}
// The remaining operators just wrap the logic of APInt, but retain the
// signedness information.
APSInt operator<<(unsigned Bits) const {
return APSInt(static_cast<const APInt&>(*this) << Bits, IsUnsigned);
}
APSInt& operator<<=(unsigned Amt) {
*this = *this << Amt;
return *this;
}
APSInt& operator++() {
static_cast<APInt&>(*this)++;
return *this;
}
APSInt& operator--() {
static_cast<APInt&>(*this)--;
return *this;
}
APSInt operator++(int) {
return APSInt(++static_cast<APInt&>(*this), IsUnsigned);
}
APSInt operator--(int) {
return APSInt(--static_cast<APInt&>(*this), IsUnsigned);
}
APSInt operator-() const {
return APSInt(-static_cast<const APInt&>(*this), IsUnsigned);
}
APSInt& operator+=(const APSInt& RHS) {
assert(IsUnsigned == RHS.IsUnsigned && "Signedness mismatch!");
static_cast<APInt&>(*this) += RHS;
return *this;
}
APSInt& operator-=(const APSInt& RHS) {
assert(IsUnsigned == RHS.IsUnsigned && "Signedness mismatch!");
static_cast<APInt&>(*this) -= RHS;
return *this;
}
APSInt& operator*=(const APSInt& RHS) {
assert(IsUnsigned == RHS.IsUnsigned && "Signedness mismatch!");
static_cast<APInt&>(*this) *= RHS;
return *this;
}
APSInt& operator&=(const APSInt& RHS) {
assert(IsUnsigned == RHS.IsUnsigned && "Signedness mismatch!");
static_cast<APInt&>(*this) &= RHS;
return *this;
}
APSInt& operator|=(const APSInt& RHS) {
assert(IsUnsigned == RHS.IsUnsigned && "Signedness mismatch!");
static_cast<APInt&>(*this) |= RHS;
return *this;
}
APSInt& operator^=(const APSInt& RHS) {
assert(IsUnsigned == RHS.IsUnsigned && "Signedness mismatch!");
static_cast<APInt&>(*this) ^= RHS;
return *this;
}
APSInt operator&(const APSInt& RHS) const {
assert(IsUnsigned == RHS.IsUnsigned && "Signedness mismatch!");
return APSInt(static_cast<const APInt&>(*this) & RHS, IsUnsigned);
}
APSInt And(const APSInt& RHS) const {
return this->operator&(RHS);
}
APSInt operator|(const APSInt& RHS) const {
assert(IsUnsigned == RHS.IsUnsigned && "Signedness mismatch!");
return APSInt(static_cast<const APInt&>(*this) | RHS, IsUnsigned);
}
APSInt Or(const APSInt& RHS) const {
return this->operator|(RHS);
}
APSInt operator^(const APSInt& RHS) const {
assert(IsUnsigned == RHS.IsUnsigned && "Signedness mismatch!");
return APSInt(static_cast<const APInt&>(*this) ^ RHS, IsUnsigned);
}
APSInt Xor(const APSInt& RHS) const {
return this->operator^(RHS);
}
APSInt operator*(const APSInt& RHS) const {
assert(IsUnsigned == RHS.IsUnsigned && "Signedness mismatch!");
return APSInt(static_cast<const APInt&>(*this) * RHS, IsUnsigned);
}
APSInt operator+(const APSInt& RHS) const {
assert(IsUnsigned == RHS.IsUnsigned && "Signedness mismatch!");
return APSInt(static_cast<const APInt&>(*this) + RHS, IsUnsigned);
}
APSInt operator-(const APSInt& RHS) const {
assert(IsUnsigned == RHS.IsUnsigned && "Signedness mismatch!");
return APSInt(static_cast<const APInt&>(*this) - RHS, IsUnsigned);
}
APSInt operator~() const {
return APSInt(~static_cast<const APInt&>(*this), IsUnsigned);
}
/// getMaxValue - Return the APSInt representing the maximum integer value
/// with the given bit width and signedness.
static APSInt getMaxValue(uint32_t numBits, bool Unsigned) {
return APSInt(Unsigned ? APInt::getMaxValue(numBits)
: APInt::getSignedMaxValue(numBits), Unsigned);
}
/// getMinValue - Return the APSInt representing the minimum integer value
/// with the given bit width and signedness.
static APSInt getMinValue(uint32_t numBits, bool Unsigned) {
return APSInt(Unsigned ? APInt::getMinValue(numBits)
: APInt::getSignedMinValue(numBits), Unsigned);
}
/// Profile - Used to insert APSInt objects, or objects that contain APSInt
/// objects, into FoldingSets.
void Profile(FoldingSetNodeID& ID) const;
};
inline std::ostream &operator<<(std::ostream &OS, const APSInt &I) {
I.print(OS, I.isSigned());
return OS;
}
} // end namespace llvm
#endif
| 0 |
rapidsai_public_repos/code-share/maxflow/galois/include/llvm
|
rapidsai_public_repos/code-share/maxflow/galois/include/llvm/ADT/APFloat.h
|
//== llvm/Support/APFloat.h - Arbitrary Precision Floating Point -*- C++ -*-==//
//
// The LLVM Compiler Infrastructure
//
// This file is distributed under the University of Illinois Open Source
// License. See LICENSE.TXT for details.
//
//===----------------------------------------------------------------------===//
//
// This file declares a class to represent arbitrary precision floating
// point values and provide a variety of arithmetic operations on them.
//
//===----------------------------------------------------------------------===//
/* A self-contained host- and target-independent arbitrary-precision
floating-point software implementation. It uses bignum integer
arithmetic as provided by static functions in the APInt class.
The library will work with bignum integers whose parts are any
unsigned type at least 16 bits wide, but 64 bits is recommended.
Written for clarity rather than speed, in particular with a view
to use in the front-end of a cross compiler so that target
arithmetic can be correctly performed on the host. Performance
should nonetheless be reasonable, particularly for its intended
use. It may be useful as a base implementation for a run-time
library during development of a faster target-specific one.
All 5 rounding modes in the IEEE-754R draft are handled correctly
for all implemented operations. Currently implemented operations
are add, subtract, multiply, divide, fused-multiply-add,
conversion-to-float, conversion-to-integer and
conversion-from-integer. New rounding modes (e.g. away from zero)
can be added with three or four lines of code.
Four formats are built-in: IEEE single precision, double
precision, quadruple precision, and x87 80-bit extended double
(when operating with full extended precision). Adding a new
format that obeys IEEE semantics only requires adding two lines of
code: a declaration and definition of the format.
All operations return the status of that operation as an exception
bit-mask, so multiple operations can be done consecutively with
their results or-ed together. The returned status can be useful
for compiler diagnostics; e.g., inexact, underflow and overflow
can be easily diagnosed on constant folding, and compiler
optimizers can determine what exceptions would be raised by
folding operations and optimize, or perhaps not optimize,
accordingly.
At present, underflow tininess is detected after rounding; it
should be straight forward to add support for the before-rounding
case too.
The library reads hexadecimal floating point numbers as per C99,
and correctly rounds if necessary according to the specified
rounding mode. Syntax is required to have been validated by the
caller. It also converts floating point numbers to hexadecimal
text as per the C99 %a and %A conversions. The output precision
(or alternatively the natural minimal precision) can be specified;
if the requested precision is less than the natural precision the
output is correctly rounded for the specified rounding mode.
It also reads decimal floating point numbers and correctly rounds
according to the specified rounding mode.
Conversion to decimal text is not currently implemented.
Non-zero finite numbers are represented internally as a sign bit,
a 16-bit signed exponent, and the significand as an array of
integer parts. After normalization of a number of precision P the
exponent is within the range of the format, and if the number is
not denormal the P-th bit of the significand is set as an explicit
integer bit. For denormals the most significant bit is shifted
right so that the exponent is maintained at the format's minimum,
so that the smallest denormal has just the least significant bit
of the significand set. The sign of zeroes and infinities is
significant; the exponent and significand of such numbers is not
stored, but has a known implicit (deterministic) value: 0 for the
significands, 0 for zero exponent, all 1 bits for infinity
exponent. For NaNs the sign and significand are deterministic,
although not really meaningful, and preserved in non-conversion
operations. The exponent is implicitly all 1 bits.
TODO
====
Some features that may or may not be worth adding:
Binary to decimal conversion (hard).
Optional ability to detect underflow tininess before rounding.
New formats: x87 in single and double precision mode (IEEE apart
from extended exponent range) (hard).
New operations: sqrt, IEEE remainder, C90 fmod, nextafter,
nexttoward.
*/
#ifndef LLVM_FLOAT_H
#define LLVM_FLOAT_H
// APInt contains static functions implementing bignum arithmetic.
#include "llvm/ADT/APInt.h"
namespace llvm {
/* Exponents are stored as signed numbers. */
typedef signed short exponent_t;
struct fltSemantics;
class APSInt;
class StringRef;
/* When bits of a floating point number are truncated, this enum is
used to indicate what fraction of the LSB those bits represented.
It essentially combines the roles of guard and sticky bits. */
enum lostFraction { // Example of truncated bits:
lfExactlyZero, // 000000
lfLessThanHalf, // 0xxxxx x's not all zero
lfExactlyHalf, // 100000
lfMoreThanHalf // 1xxxxx x's not all zero
};
class APFloat {
public:
/* We support the following floating point semantics. */
static const fltSemantics IEEEhalf;
static const fltSemantics IEEEsingle;
static const fltSemantics IEEEdouble;
static const fltSemantics IEEEquad;
static const fltSemantics PPCDoubleDouble;
static const fltSemantics x87DoubleExtended;
/* And this pseudo, used to construct APFloats that cannot
conflict with anything real. */
static const fltSemantics Bogus;
static unsigned int semanticsPrecision(const fltSemantics &);
/* Floating point numbers have a four-state comparison relation. */
enum cmpResult {
cmpLessThan,
cmpEqual,
cmpGreaterThan,
cmpUnordered
};
/* IEEE-754R gives five rounding modes. */
enum roundingMode {
rmNearestTiesToEven,
rmTowardPositive,
rmTowardNegative,
rmTowardZero,
rmNearestTiesToAway
};
// Operation status. opUnderflow or opOverflow are always returned
// or-ed with opInexact.
enum opStatus {
opOK = 0x00,
opInvalidOp = 0x01,
opDivByZero = 0x02,
opOverflow = 0x04,
opUnderflow = 0x08,
opInexact = 0x10
};
// Category of internally-represented number.
enum fltCategory {
fcInfinity,
fcNaN,
fcNormal,
fcZero
};
enum uninitializedTag {
uninitialized
};
// Constructors.
APFloat(const fltSemantics &); // Default construct to 0.0
APFloat(const fltSemantics &, StringRef);
APFloat(const fltSemantics &, integerPart);
APFloat(const fltSemantics &, fltCategory, bool negative);
APFloat(const fltSemantics &, uninitializedTag);
explicit APFloat(double d);
explicit APFloat(float f);
explicit APFloat(const APInt &, bool isIEEE = false);
APFloat(const APFloat &);
~APFloat();
// Convenience "constructors"
static APFloat getZero(const fltSemantics &Sem, bool Negative = false) {
return APFloat(Sem, fcZero, Negative);
}
static APFloat getInf(const fltSemantics &Sem, bool Negative = false) {
return APFloat(Sem, fcInfinity, Negative);
}
/// getNaN - Factory for QNaN values.
///
/// \param Negative - True iff the NaN generated should be negative.
/// \param type - The unspecified fill bits for creating the NaN, 0 by
/// default. The value is truncated as necessary.
static APFloat getNaN(const fltSemantics &Sem, bool Negative = false,
unsigned type = 0) {
if (type) {
APInt fill(64, type);
return getQNaN(Sem, Negative, &fill);
} else {
return getQNaN(Sem, Negative, 0);
}
}
/// getQNan - Factory for QNaN values.
static APFloat getQNaN(const fltSemantics &Sem,
bool Negative = false,
const APInt *payload = 0) {
return makeNaN(Sem, false, Negative, payload);
}
/// getSNan - Factory for SNaN values.
static APFloat getSNaN(const fltSemantics &Sem,
bool Negative = false,
const APInt *payload = 0) {
return makeNaN(Sem, true, Negative, payload);
}
/// getLargest - Returns the largest finite number in the given
/// semantics.
///
/// \param Negative - True iff the number should be negative
static APFloat getLargest(const fltSemantics &Sem, bool Negative = false);
/// getSmallest - Returns the smallest (by magnitude) finite number
/// in the given semantics. Might be denormalized, which implies a
/// relative loss of precision.
///
/// \param Negative - True iff the number should be negative
static APFloat getSmallest(const fltSemantics &Sem, bool Negative = false);
/// getSmallestNormalized - Returns the smallest (by magnitude)
/// normalized finite number in the given semantics.
///
/// \param Negative - True iff the number should be negative
static APFloat getSmallestNormalized(const fltSemantics &Sem,
bool Negative = false);
/// getAllOnesValue - Returns a float which is bitcasted from
/// an all one value int.
///
/// \param BitWidth - Select float type
/// \param isIEEE - If 128 bit number, select between PPC and IEEE
static APFloat getAllOnesValue(unsigned BitWidth, bool isIEEE = false);
/// Profile - Used to insert APFloat objects, or objects that contain
/// APFloat objects, into FoldingSets.
void Profile(FoldingSetNodeID& NID) const;
/// @brief Used by the Bitcode serializer to emit APInts to Bitcode.
void Emit(Serializer& S) const;
/// @brief Used by the Bitcode deserializer to deserialize APInts.
static APFloat ReadVal(Deserializer& D);
/* Arithmetic. */
opStatus add(const APFloat &, roundingMode);
opStatus subtract(const APFloat &, roundingMode);
opStatus multiply(const APFloat &, roundingMode);
opStatus divide(const APFloat &, roundingMode);
/* IEEE remainder. */
opStatus remainder(const APFloat &);
/* C fmod, or llvm frem. */
opStatus mod(const APFloat &, roundingMode);
opStatus fusedMultiplyAdd(const APFloat &, const APFloat &, roundingMode);
/* Sign operations. */
void changeSign();
void clearSign();
void copySign(const APFloat &);
/* Conversions. */
opStatus convert(const fltSemantics &, roundingMode, bool *);
opStatus convertToInteger(integerPart *, unsigned int, bool,
roundingMode, bool *) const;
opStatus convertToInteger(APSInt&, roundingMode, bool *) const;
opStatus convertFromAPInt(const APInt &,
bool, roundingMode);
opStatus convertFromSignExtendedInteger(const integerPart *, unsigned int,
bool, roundingMode);
opStatus convertFromZeroExtendedInteger(const integerPart *, unsigned int,
bool, roundingMode);
opStatus convertFromString(StringRef, roundingMode);
APInt bitcastToAPInt() const;
double convertToDouble() const;
float convertToFloat() const;
/* The definition of equality is not straightforward for floating point,
so we won't use operator==. Use one of the following, or write
whatever it is you really mean. */
// bool operator==(const APFloat &) const; // DO NOT IMPLEMENT
/* IEEE comparison with another floating point number (NaNs
compare unordered, 0==-0). */
cmpResult compare(const APFloat &) const;
/* Bitwise comparison for equality (QNaNs compare equal, 0!=-0). */
bool bitwiseIsEqual(const APFloat &) const;
/* Write out a hexadecimal representation of the floating point
value to DST, which must be of sufficient size, in the C99 form
[-]0xh.hhhhp[+-]d. Return the number of characters written,
excluding the terminating NUL. */
unsigned int convertToHexString(char *dst, unsigned int hexDigits,
bool upperCase, roundingMode) const;
/* Simple queries. */
fltCategory getCategory() const { return category; }
const fltSemantics &getSemantics() const { return *semantics; }
bool isZero() const { return category == fcZero; }
bool isNonZero() const { return category != fcZero; }
bool isNaN() const { return category == fcNaN; }
bool isInfinity() const { return category == fcInfinity; }
bool isNegative() const { return sign; }
bool isPosZero() const { return isZero() && !isNegative(); }
bool isNegZero() const { return isZero() && isNegative(); }
APFloat& operator=(const APFloat &);
/* Return an arbitrary integer value usable for hashing. */
uint32_t getHashValue() const;
/// Converts this value into a decimal string.
///
/// \param FormatPrecision The maximum number of digits of
/// precision to output. If there are fewer digits available,
/// zero padding will not be used unless the value is
/// integral and small enough to be expressed in
/// FormatPrecision digits. 0 means to use the natural
/// precision of the number.
/// \param FormatMaxPadding The maximum number of zeros to
/// consider inserting before falling back to scientific
/// notation. 0 means to always use scientific notation.
///
/// Number Precision MaxPadding Result
/// ------ --------- ---------- ------
/// 1.01E+4 5 2 10100
/// 1.01E+4 4 2 1.01E+4
/// 1.01E+4 5 1 1.01E+4
/// 1.01E-2 5 2 0.0101
/// 1.01E-2 4 2 0.0101
/// 1.01E-2 4 1 1.01E-2
void toString(SmallVectorImpl<char> &Str,
unsigned FormatPrecision = 0,
unsigned FormatMaxPadding = 3) const;
/// getExactInverse - If this value has an exact multiplicative inverse,
/// store it in inv and return true.
bool getExactInverse(APFloat *inv) const;
private:
/* Trivial queries. */
integerPart *significandParts();
const integerPart *significandParts() const;
unsigned int partCount() const;
/* Significand operations. */
integerPart addSignificand(const APFloat &);
integerPart subtractSignificand(const APFloat &, integerPart);
lostFraction addOrSubtractSignificand(const APFloat &, bool subtract);
lostFraction multiplySignificand(const APFloat &, const APFloat *);
lostFraction divideSignificand(const APFloat &);
void incrementSignificand();
void initialize(const fltSemantics *);
void shiftSignificandLeft(unsigned int);
lostFraction shiftSignificandRight(unsigned int);
unsigned int significandLSB() const;
unsigned int significandMSB() const;
void zeroSignificand();
/* Arithmetic on special values. */
opStatus addOrSubtractSpecials(const APFloat &, bool subtract);
opStatus divideSpecials(const APFloat &);
opStatus multiplySpecials(const APFloat &);
opStatus modSpecials(const APFloat &);
/* Miscellany. */
static APFloat makeNaN(const fltSemantics &Sem, bool SNaN, bool Negative,
const APInt *fill);
void makeNaN(bool SNaN = false, bool Neg = false, const APInt *fill = 0);
opStatus normalize(roundingMode, lostFraction);
opStatus addOrSubtract(const APFloat &, roundingMode, bool subtract);
cmpResult compareAbsoluteValue(const APFloat &) const;
opStatus handleOverflow(roundingMode);
bool roundAwayFromZero(roundingMode, lostFraction, unsigned int) const;
opStatus convertToSignExtendedInteger(integerPart *, unsigned int, bool,
roundingMode, bool *) const;
opStatus convertFromUnsignedParts(const integerPart *, unsigned int,
roundingMode);
opStatus convertFromHexadecimalString(StringRef, roundingMode);
opStatus convertFromDecimalString(StringRef, roundingMode);
char *convertNormalToHexString(char *, unsigned int, bool,
roundingMode) const;
opStatus roundSignificandWithExponent(const integerPart *, unsigned int,
int, roundingMode);
APInt convertHalfAPFloatToAPInt() const;
APInt convertFloatAPFloatToAPInt() const;
APInt convertDoubleAPFloatToAPInt() const;
APInt convertQuadrupleAPFloatToAPInt() const;
APInt convertF80LongDoubleAPFloatToAPInt() const;
APInt convertPPCDoubleDoubleAPFloatToAPInt() const;
void initFromAPInt(const APInt& api, bool isIEEE = false);
void initFromHalfAPInt(const APInt& api);
void initFromFloatAPInt(const APInt& api);
void initFromDoubleAPInt(const APInt& api);
void initFromQuadrupleAPInt(const APInt &api);
void initFromF80LongDoubleAPInt(const APInt& api);
void initFromPPCDoubleDoubleAPInt(const APInt& api);
void assign(const APFloat &);
void copySignificand(const APFloat &);
void freeSignificand();
/* What kind of semantics does this value obey? */
const fltSemantics *semantics;
/* Significand - the fraction with an explicit integer bit. Must be
at least one bit wider than the target precision. */
union Significand
{
integerPart part;
integerPart *parts;
} significand;
/* The exponent - a signed number. */
exponent_t exponent;
/* What kind of floating point number this is. */
/* Only 2 bits are required, but VisualStudio incorrectly sign extends
it. Using the extra bit keeps it from failing under VisualStudio */
fltCategory category: 3;
/* The sign bit of this number. */
unsigned int sign: 1;
/* For PPCDoubleDouble, we have a second exponent and sign (the second
significand is appended to the first one, although it would be wrong to
regard these as a single number for arithmetic purposes). These fields
are not meaningful for any other type. */
exponent_t exponent2 : 11;
unsigned int sign2: 1;
};
} /* namespace llvm */
#endif /* LLVM_FLOAT_H */
| 0 |
rapidsai_public_repos/code-share/maxflow/galois/include/llvm
|
rapidsai_public_repos/code-share/maxflow/galois/include/llvm/ADT/OwningPtr.h
|
//===- llvm/ADT/OwningPtr.h - Smart ptr that owns the pointee ---*- C++ -*-===//
//
// The LLVM Compiler Infrastructure
//
// This file is distributed under the University of Illinois Open Source
// License. See LICENSE.TXT for details.
//
//===----------------------------------------------------------------------===//
//
// This file defines and implements the OwningPtr class.
//
//===----------------------------------------------------------------------===//
#ifndef LLVM_ADT_OWNING_PTR_H
#define LLVM_ADT_OWNING_PTR_H
#include <cassert>
#include <cstddef>
namespace llvm {
/// OwningPtr smart pointer - OwningPtr mimics a built-in pointer except that it
/// guarantees deletion of the object pointed to, either on destruction of the
/// OwningPtr or via an explicit reset(). Once created, ownership of the
/// pointee object can be taken away from OwningPtr by using the take method.
template<class T>
class OwningPtr {
OwningPtr(OwningPtr const &); // DO NOT IMPLEMENT
OwningPtr &operator=(OwningPtr const &); // DO NOT IMPLEMENT
T *Ptr;
public:
explicit OwningPtr(T *P = 0) : Ptr(P) {}
~OwningPtr() {
delete Ptr;
}
/// reset - Change the current pointee to the specified pointer. Note that
/// calling this with any pointer (including a null pointer) deletes the
/// current pointer.
void reset(T *P = 0) {
if (P == Ptr) return;
T *Tmp = Ptr;
Ptr = P;
delete Tmp;
}
/// take - Reset the owning pointer to null and return its pointer. This does
/// not delete the pointer before returning it.
T *take() {
T *Tmp = Ptr;
Ptr = 0;
return Tmp;
}
T &operator*() const {
assert(Ptr && "Cannot dereference null pointer");
return *Ptr;
}
T *operator->() const { return Ptr; }
T *get() const { return Ptr; }
operator bool() const { return Ptr != 0; }
bool operator!() const { return Ptr == 0; }
void swap(OwningPtr &RHS) {
T *Tmp = RHS.Ptr;
RHS.Ptr = Ptr;
Ptr = Tmp;
}
};
template<class T>
inline void swap(OwningPtr<T> &a, OwningPtr<T> &b) {
a.swap(b);
}
/// OwningArrayPtr smart pointer - OwningArrayPtr provides the same
/// functionality as OwningPtr, except that it works for array types.
template<class T>
class OwningArrayPtr {
OwningArrayPtr(OwningArrayPtr const &); // DO NOT IMPLEMENT
OwningArrayPtr &operator=(OwningArrayPtr const &); // DO NOT IMPLEMENT
T *Ptr;
public:
explicit OwningArrayPtr(T *P = 0) : Ptr(P) {}
~OwningArrayPtr() {
delete [] Ptr;
}
/// reset - Change the current pointee to the specified pointer. Note that
/// calling this with any pointer (including a null pointer) deletes the
/// current pointer.
void reset(T *P = 0) {
if (P == Ptr) return;
T *Tmp = Ptr;
Ptr = P;
delete [] Tmp;
}
/// take - Reset the owning pointer to null and return its pointer. This does
/// not delete the pointer before returning it.
T *take() {
T *Tmp = Ptr;
Ptr = 0;
return Tmp;
}
T &operator[](std::ptrdiff_t i) const {
assert(Ptr && "Cannot dereference null pointer");
return Ptr[i];
}
T *get() const { return Ptr; }
operator bool() const { return Ptr != 0; }
bool operator!() const { return Ptr == 0; }
void swap(OwningArrayPtr &RHS) {
T *Tmp = RHS.Ptr;
RHS.Ptr = Ptr;
Ptr = Tmp;
}
};
template<class T>
inline void swap(OwningArrayPtr<T> &a, OwningArrayPtr<T> &b) {
a.swap(b);
}
} // end namespace llvm
#endif
| 0 |
rapidsai_public_repos/code-share/maxflow/galois/include/llvm
|
rapidsai_public_repos/code-share/maxflow/galois/include/llvm/ADT/StringMap.h
|
//===--- StringMap.h - String Hash table map interface ----------*- C++ -*-===//
//
// The LLVM Compiler Infrastructure
//
// This file is distributed under the University of Illinois Open Source
// License. See LICENSE.TXT for details.
//
//===----------------------------------------------------------------------===//
//
// This file defines the StringMap class.
//
//===----------------------------------------------------------------------===//
#ifndef LLVM_ADT_STRINGMAP_H
#define LLVM_ADT_STRINGMAP_H
#include "llvm/ADT/StringRef.h"
#include "llvm/Support/Allocator.h"
#include <cstring>
namespace llvm {
template<typename ValueT>
class StringMapConstIterator;
template<typename ValueT>
class StringMapIterator;
template<typename ValueTy>
class StringMapEntry;
/// StringMapEntryInitializer - This datatype can be partially specialized for
/// various datatypes in a stringmap to allow them to be initialized when an
/// entry is default constructed for the map.
template<typename ValueTy>
class StringMapEntryInitializer {
public:
template <typename InitTy>
static void Initialize(StringMapEntry<ValueTy> &T, InitTy InitVal) {
T.second = InitVal;
}
};
/// StringMapEntryBase - Shared base class of StringMapEntry instances.
class StringMapEntryBase {
unsigned StrLen;
public:
explicit StringMapEntryBase(unsigned Len) : StrLen(Len) {}
unsigned getKeyLength() const { return StrLen; }
};
/// StringMapImpl - This is the base class of StringMap that is shared among
/// all of its instantiations.
class StringMapImpl {
public:
/// ItemBucket - The hash table consists of an array of these. If Item is
/// non-null, this is an extant entry, otherwise, it is a hole.
struct ItemBucket {
/// FullHashValue - This remembers the full hash value of the key for
/// easy scanning.
unsigned FullHashValue;
/// Item - This is a pointer to the actual item object.
StringMapEntryBase *Item;
};
protected:
ItemBucket *TheTable;
unsigned NumBuckets;
unsigned NumItems;
unsigned NumTombstones;
unsigned ItemSize;
protected:
explicit StringMapImpl(unsigned itemSize) : ItemSize(itemSize) {
// Initialize the map with zero buckets to allocation.
TheTable = 0;
NumBuckets = 0;
NumItems = 0;
NumTombstones = 0;
}
StringMapImpl(unsigned InitSize, unsigned ItemSize);
void RehashTable();
/// LookupBucketFor - Look up the bucket that the specified string should end
/// up in. If it already exists as a key in the map, the Item pointer for the
/// specified bucket will be non-null. Otherwise, it will be null. In either
/// case, the FullHashValue field of the bucket will be set to the hash value
/// of the string.
unsigned LookupBucketFor(StringRef Key);
/// FindKey - Look up the bucket that contains the specified key. If it exists
/// in the map, return the bucket number of the key. Otherwise return -1.
/// This does not modify the map.
int FindKey(StringRef Key) const;
/// RemoveKey - Remove the specified StringMapEntry from the table, but do not
/// delete it. This aborts if the value isn't in the table.
void RemoveKey(StringMapEntryBase *V);
/// RemoveKey - Remove the StringMapEntry for the specified key from the
/// table, returning it. If the key is not in the table, this returns null.
StringMapEntryBase *RemoveKey(StringRef Key);
private:
void init(unsigned Size);
public:
static StringMapEntryBase *getTombstoneVal() {
return (StringMapEntryBase*)-1;
}
unsigned getNumBuckets() const { return NumBuckets; }
unsigned getNumItems() const { return NumItems; }
bool empty() const { return NumItems == 0; }
unsigned size() const { return NumItems; }
};
/// StringMapEntry - This is used to represent one value that is inserted into
/// a StringMap. It contains the Value itself and the key: the string length
/// and data.
template<typename ValueTy>
class StringMapEntry : public StringMapEntryBase {
public:
ValueTy second;
explicit StringMapEntry(unsigned strLen)
: StringMapEntryBase(strLen), second() {}
StringMapEntry(unsigned strLen, const ValueTy &V)
: StringMapEntryBase(strLen), second(V) {}
StringRef getKey() const {
return StringRef(getKeyData(), getKeyLength());
}
const ValueTy &getValue() const { return second; }
ValueTy &getValue() { return second; }
void setValue(const ValueTy &V) { second = V; }
/// getKeyData - Return the start of the string data that is the key for this
/// value. The string data is always stored immediately after the
/// StringMapEntry object.
const char *getKeyData() const {return reinterpret_cast<const char*>(this+1);}
StringRef first() const { return StringRef(getKeyData(), getKeyLength()); }
/// Create - Create a StringMapEntry for the specified key and default
/// construct the value.
template<typename AllocatorTy, typename InitType>
static StringMapEntry *Create(const char *KeyStart, const char *KeyEnd,
AllocatorTy &Allocator,
InitType InitVal) {
unsigned KeyLength = static_cast<unsigned>(KeyEnd-KeyStart);
// Okay, the item doesn't already exist, and 'Bucket' is the bucket to fill
// in. Allocate a new item with space for the string at the end and a null
// terminator.
unsigned AllocSize = static_cast<unsigned>(sizeof(StringMapEntry))+
KeyLength+1;
unsigned Alignment = alignOf<StringMapEntry>();
StringMapEntry *NewItem =
static_cast<StringMapEntry*>(Allocator.Allocate(AllocSize,Alignment));
// Default construct the value.
new (NewItem) StringMapEntry(KeyLength);
// Copy the string information.
char *StrBuffer = const_cast<char*>(NewItem->getKeyData());
memcpy(StrBuffer, KeyStart, KeyLength);
StrBuffer[KeyLength] = 0; // Null terminate for convenience of clients.
// Initialize the value if the client wants to.
StringMapEntryInitializer<ValueTy>::Initialize(*NewItem, InitVal);
return NewItem;
}
template<typename AllocatorTy>
static StringMapEntry *Create(const char *KeyStart, const char *KeyEnd,
AllocatorTy &Allocator) {
return Create(KeyStart, KeyEnd, Allocator, 0);
}
/// Create - Create a StringMapEntry with normal malloc/free.
template<typename InitType>
static StringMapEntry *Create(const char *KeyStart, const char *KeyEnd,
InitType InitVal) {
MallocAllocator A;
return Create(KeyStart, KeyEnd, A, InitVal);
}
static StringMapEntry *Create(const char *KeyStart, const char *KeyEnd) {
return Create(KeyStart, KeyEnd, ValueTy());
}
/// GetStringMapEntryFromValue - Given a value that is known to be embedded
/// into a StringMapEntry, return the StringMapEntry itself.
static StringMapEntry &GetStringMapEntryFromValue(ValueTy &V) {
StringMapEntry *EPtr = 0;
char *Ptr = reinterpret_cast<char*>(&V) -
(reinterpret_cast<char*>(&EPtr->second) -
reinterpret_cast<char*>(EPtr));
return *reinterpret_cast<StringMapEntry*>(Ptr);
}
static const StringMapEntry &GetStringMapEntryFromValue(const ValueTy &V) {
return GetStringMapEntryFromValue(const_cast<ValueTy&>(V));
}
/// GetStringMapEntryFromKeyData - Given key data that is known to be embedded
/// into a StringMapEntry, return the StringMapEntry itself.
static StringMapEntry &GetStringMapEntryFromKeyData(const char *KeyData) {
char *Ptr = const_cast<char*>(KeyData) - sizeof(StringMapEntry<ValueTy>);
return *reinterpret_cast<StringMapEntry*>(Ptr);
}
/// Destroy - Destroy this StringMapEntry, releasing memory back to the
/// specified allocator.
template<typename AllocatorTy>
void Destroy(AllocatorTy &Allocator) {
// Free memory referenced by the item.
this->~StringMapEntry();
Allocator.Deallocate(this);
}
/// Destroy this object, releasing memory back to the malloc allocator.
void Destroy() {
MallocAllocator A;
Destroy(A);
}
};
/// StringMap - This is an unconventional map that is specialized for handling
/// keys that are "strings", which are basically ranges of bytes. This does some
/// funky memory allocation and hashing things to make it extremely efficient,
/// storing the string data *after* the value in the map.
template<typename ValueTy, typename AllocatorTy = MallocAllocator>
class StringMap : public StringMapImpl {
AllocatorTy Allocator;
typedef StringMapEntry<ValueTy> MapEntryTy;
public:
StringMap() : StringMapImpl(static_cast<unsigned>(sizeof(MapEntryTy))) {}
explicit StringMap(unsigned InitialSize)
: StringMapImpl(InitialSize, static_cast<unsigned>(sizeof(MapEntryTy))) {}
explicit StringMap(AllocatorTy A)
: StringMapImpl(static_cast<unsigned>(sizeof(MapEntryTy))), Allocator(A) {}
explicit StringMap(const StringMap &RHS)
: StringMapImpl(static_cast<unsigned>(sizeof(MapEntryTy))) {
assert(RHS.empty() &&
"Copy ctor from non-empty stringmap not implemented yet!");
(void)RHS;
}
void operator=(const StringMap &RHS) {
assert(RHS.empty() &&
"assignment from non-empty stringmap not implemented yet!");
(void)RHS;
clear();
}
typedef typename ReferenceAdder<AllocatorTy>::result AllocatorRefTy;
typedef typename ReferenceAdder<const AllocatorTy>::result AllocatorCRefTy;
AllocatorRefTy getAllocator() { return Allocator; }
AllocatorCRefTy getAllocator() const { return Allocator; }
typedef const char* key_type;
typedef ValueTy mapped_type;
typedef StringMapEntry<ValueTy> value_type;
typedef size_t size_type;
typedef StringMapConstIterator<ValueTy> const_iterator;
typedef StringMapIterator<ValueTy> iterator;
iterator begin() {
return iterator(TheTable, NumBuckets == 0);
}
iterator end() {
return iterator(TheTable+NumBuckets, true);
}
const_iterator begin() const {
return const_iterator(TheTable, NumBuckets == 0);
}
const_iterator end() const {
return const_iterator(TheTable+NumBuckets, true);
}
iterator find(StringRef Key) {
int Bucket = FindKey(Key);
if (Bucket == -1) return end();
return iterator(TheTable+Bucket);
}
const_iterator find(StringRef Key) const {
int Bucket = FindKey(Key);
if (Bucket == -1) return end();
return const_iterator(TheTable+Bucket);
}
/// lookup - Return the entry for the specified key, or a default
/// constructed value if no such entry exists.
ValueTy lookup(StringRef Key) const {
const_iterator it = find(Key);
if (it != end())
return it->second;
return ValueTy();
}
ValueTy &operator[](StringRef Key) {
return GetOrCreateValue(Key).getValue();
}
size_type count(StringRef Key) const {
return find(Key) == end() ? 0 : 1;
}
/// insert - Insert the specified key/value pair into the map. If the key
/// already exists in the map, return false and ignore the request, otherwise
/// insert it and return true.
bool insert(MapEntryTy *KeyValue) {
unsigned BucketNo = LookupBucketFor(KeyValue->getKey());
ItemBucket &Bucket = TheTable[BucketNo];
if (Bucket.Item && Bucket.Item != getTombstoneVal())
return false; // Already exists in map.
if (Bucket.Item == getTombstoneVal())
--NumTombstones;
Bucket.Item = KeyValue;
++NumItems;
assert(NumItems + NumTombstones <= NumBuckets);
RehashTable();
return true;
}
// clear - Empties out the StringMap
void clear() {
if (empty()) return;
// Zap all values, resetting the keys back to non-present (not tombstone),
// which is safe because we're removing all elements.
for (ItemBucket *I = TheTable, *E = TheTable+NumBuckets; I != E; ++I) {
if (I->Item && I->Item != getTombstoneVal()) {
static_cast<MapEntryTy*>(I->Item)->Destroy(Allocator);
I->Item = 0;
}
}
NumItems = 0;
NumTombstones = 0;
}
/// GetOrCreateValue - Look up the specified key in the table. If a value
/// exists, return it. Otherwise, default construct a value, insert it, and
/// return.
template <typename InitTy>
MapEntryTy &GetOrCreateValue(StringRef Key, InitTy Val) {
unsigned BucketNo = LookupBucketFor(Key);
ItemBucket &Bucket = TheTable[BucketNo];
if (Bucket.Item && Bucket.Item != getTombstoneVal())
return *static_cast<MapEntryTy*>(Bucket.Item);
MapEntryTy *NewItem =
MapEntryTy::Create(Key.begin(), Key.end(), Allocator, Val);
if (Bucket.Item == getTombstoneVal())
--NumTombstones;
++NumItems;
assert(NumItems + NumTombstones <= NumBuckets);
// Fill in the bucket for the hash table. The FullHashValue was already
// filled in by LookupBucketFor.
Bucket.Item = NewItem;
RehashTable();
return *NewItem;
}
MapEntryTy &GetOrCreateValue(StringRef Key) {
return GetOrCreateValue(Key, ValueTy());
}
/// remove - Remove the specified key/value pair from the map, but do not
/// erase it. This aborts if the key is not in the map.
void remove(MapEntryTy *KeyValue) {
RemoveKey(KeyValue);
}
void erase(iterator I) {
MapEntryTy &V = *I;
remove(&V);
V.Destroy(Allocator);
}
bool erase(StringRef Key) {
iterator I = find(Key);
if (I == end()) return false;
erase(I);
return true;
}
~StringMap() {
clear();
free(TheTable);
}
};
template<typename ValueTy>
class StringMapConstIterator {
protected:
StringMapImpl::ItemBucket *Ptr;
public:
typedef StringMapEntry<ValueTy> value_type;
explicit StringMapConstIterator(StringMapImpl::ItemBucket *Bucket,
bool NoAdvance = false)
: Ptr(Bucket) {
if (!NoAdvance) AdvancePastEmptyBuckets();
}
const value_type &operator*() const {
return *static_cast<StringMapEntry<ValueTy>*>(Ptr->Item);
}
const value_type *operator->() const {
return static_cast<StringMapEntry<ValueTy>*>(Ptr->Item);
}
bool operator==(const StringMapConstIterator &RHS) const {
return Ptr == RHS.Ptr;
}
bool operator!=(const StringMapConstIterator &RHS) const {
return Ptr != RHS.Ptr;
}
inline StringMapConstIterator& operator++() { // Preincrement
++Ptr;
AdvancePastEmptyBuckets();
return *this;
}
StringMapConstIterator operator++(int) { // Postincrement
StringMapConstIterator tmp = *this; ++*this; return tmp;
}
private:
void AdvancePastEmptyBuckets() {
while (Ptr->Item == 0 || Ptr->Item == StringMapImpl::getTombstoneVal())
++Ptr;
}
};
template<typename ValueTy>
class StringMapIterator : public StringMapConstIterator<ValueTy> {
public:
explicit StringMapIterator(StringMapImpl::ItemBucket *Bucket,
bool NoAdvance = false)
: StringMapConstIterator<ValueTy>(Bucket, NoAdvance) {
}
StringMapEntry<ValueTy> &operator*() const {
return *static_cast<StringMapEntry<ValueTy>*>(this->Ptr->Item);
}
StringMapEntry<ValueTy> *operator->() const {
return static_cast<StringMapEntry<ValueTy>*>(this->Ptr->Item);
}
};
}
#endif
| 0 |
rapidsai_public_repos/code-share/maxflow/galois/include/llvm
|
rapidsai_public_repos/code-share/maxflow/galois/include/llvm/ADT/ilist.h
|
//==-- llvm/ADT/ilist.h - Intrusive Linked List Template ---------*- C++ -*-==//
//
// The LLVM Compiler Infrastructure
//
// This file is distributed under the University of Illinois Open Source
// License. See LICENSE.TXT for details.
//
//===----------------------------------------------------------------------===//
//
// This file defines classes to implement an intrusive doubly linked list class
// (i.e. each node of the list must contain a next and previous field for the
// list.
//
// The ilist_traits trait class is used to gain access to the next and previous
// fields of the node type that the list is instantiated with. If it is not
// specialized, the list defaults to using the getPrev(), getNext() method calls
// to get the next and previous pointers.
//
// The ilist class itself, should be a plug in replacement for list, assuming
// that the nodes contain next/prev pointers. This list replacement does not
// provide a constant time size() method, so be careful to use empty() when you
// really want to know if it's empty.
//
// The ilist class is implemented by allocating a 'tail' node when the list is
// created (using ilist_traits<>::createSentinel()). This tail node is
// absolutely required because the user must be able to compute end()-1. Because
// of this, users of the direct next/prev links will see an extra link on the
// end of the list, which should be ignored.
//
// Requirements for a user of this list:
//
// 1. The user must provide {g|s}et{Next|Prev} methods, or specialize
// ilist_traits to provide an alternate way of getting and setting next and
// prev links.
//
//===----------------------------------------------------------------------===//
#ifndef LLVM_ADT_ILIST_H
#define LLVM_ADT_ILIST_H
#include <algorithm>
#include <cassert>
#include <cstddef>
#include <iterator>
namespace llvm {
template<typename NodeTy, typename Traits> class iplist;
template<typename NodeTy> class ilist_iterator;
/// ilist_nextprev_traits - A fragment for template traits for intrusive list
/// that provides default next/prev implementations for common operations.
///
template<typename NodeTy>
struct ilist_nextprev_traits {
static NodeTy *getPrev(NodeTy *N) { return N->getPrev(); }
static NodeTy *getNext(NodeTy *N) { return N->getNext(); }
static const NodeTy *getPrev(const NodeTy *N) { return N->getPrev(); }
static const NodeTy *getNext(const NodeTy *N) { return N->getNext(); }
static void setPrev(NodeTy *N, NodeTy *Prev) { N->setPrev(Prev); }
static void setNext(NodeTy *N, NodeTy *Next) { N->setNext(Next); }
};
template<typename NodeTy>
struct ilist_traits;
/// ilist_sentinel_traits - A fragment for template traits for intrusive list
/// that provides default sentinel implementations for common operations.
///
/// ilist_sentinel_traits implements a lazy dynamic sentinel allocation
/// strategy. The sentinel is stored in the prev field of ilist's Head.
///
template<typename NodeTy>
struct ilist_sentinel_traits {
/// createSentinel - create the dynamic sentinel
static NodeTy *createSentinel() { return new NodeTy(); }
/// destroySentinel - deallocate the dynamic sentinel
static void destroySentinel(NodeTy *N) { delete N; }
/// provideInitialHead - when constructing an ilist, provide a starting
/// value for its Head
/// @return null node to indicate that it needs to be allocated later
static NodeTy *provideInitialHead() { return 0; }
/// ensureHead - make sure that Head is either already
/// initialized or assigned a fresh sentinel
/// @return the sentinel
static NodeTy *ensureHead(NodeTy *&Head) {
if (!Head) {
Head = ilist_traits<NodeTy>::createSentinel();
ilist_traits<NodeTy>::noteHead(Head, Head);
ilist_traits<NodeTy>::setNext(Head, 0);
return Head;
}
return ilist_traits<NodeTy>::getPrev(Head);
}
/// noteHead - stash the sentinel into its default location
static void noteHead(NodeTy *NewHead, NodeTy *Sentinel) {
ilist_traits<NodeTy>::setPrev(NewHead, Sentinel);
}
};
/// ilist_node_traits - A fragment for template traits for intrusive list
/// that provides default node related operations.
///
template<typename NodeTy>
struct ilist_node_traits {
static NodeTy *createNode(const NodeTy &V) { return new NodeTy(V); }
static void deleteNode(NodeTy *V) { delete V; }
void addNodeToList(NodeTy *) {}
void removeNodeFromList(NodeTy *) {}
void transferNodesFromList(ilist_node_traits & /*SrcTraits*/,
ilist_iterator<NodeTy> /*first*/,
ilist_iterator<NodeTy> /*last*/) {}
};
/// ilist_default_traits - Default template traits for intrusive list.
/// By inheriting from this, you can easily use default implementations
/// for all common operations.
///
template<typename NodeTy>
struct ilist_default_traits : public ilist_nextprev_traits<NodeTy>,
public ilist_sentinel_traits<NodeTy>,
public ilist_node_traits<NodeTy> {
};
// Template traits for intrusive list. By specializing this template class, you
// can change what next/prev fields are used to store the links...
template<typename NodeTy>
struct ilist_traits : public ilist_default_traits<NodeTy> {};
// Const traits are the same as nonconst traits...
template<typename Ty>
struct ilist_traits<const Ty> : public ilist_traits<Ty> {};
//===----------------------------------------------------------------------===//
// ilist_iterator<Node> - Iterator for intrusive list.
//
template<typename NodeTy>
class ilist_iterator
: public std::iterator<std::bidirectional_iterator_tag, NodeTy, ptrdiff_t> {
public:
typedef ilist_traits<NodeTy> Traits;
typedef std::iterator<std::bidirectional_iterator_tag,
NodeTy, ptrdiff_t> super;
typedef typename super::value_type value_type;
typedef typename super::difference_type difference_type;
typedef typename super::pointer pointer;
typedef typename super::reference reference;
private:
pointer NodePtr;
// ilist_iterator is not a random-access iterator, but it has an
// implicit conversion to pointer-type, which is. Declare (but
// don't define) these functions as private to help catch
// accidental misuse.
void operator[](difference_type) const;
void operator+(difference_type) const;
void operator-(difference_type) const;
void operator+=(difference_type) const;
void operator-=(difference_type) const;
template<class T> void operator<(T) const;
template<class T> void operator<=(T) const;
template<class T> void operator>(T) const;
template<class T> void operator>=(T) const;
template<class T> void operator-(T) const;
public:
ilist_iterator(pointer NP) : NodePtr(NP) {}
ilist_iterator(reference NR) : NodePtr(&NR) {}
ilist_iterator() : NodePtr(0) {}
// This is templated so that we can allow constructing a const iterator from
// a nonconst iterator...
template<class node_ty>
ilist_iterator(const ilist_iterator<node_ty> &RHS)
: NodePtr(RHS.getNodePtrUnchecked()) {}
// This is templated so that we can allow assigning to a const iterator from
// a nonconst iterator...
template<class node_ty>
const ilist_iterator &operator=(const ilist_iterator<node_ty> &RHS) {
NodePtr = RHS.getNodePtrUnchecked();
return *this;
}
// Accessors...
operator pointer() const {
return NodePtr;
}
reference operator*() const {
return *NodePtr;
}
pointer operator->() const { return &operator*(); }
// Comparison operators
bool operator==(const ilist_iterator &RHS) const {
return NodePtr == RHS.NodePtr;
}
bool operator!=(const ilist_iterator &RHS) const {
return NodePtr != RHS.NodePtr;
}
// Increment and decrement operators...
ilist_iterator &operator--() { // predecrement - Back up
NodePtr = Traits::getPrev(NodePtr);
assert(NodePtr && "--'d off the beginning of an ilist!");
return *this;
}
ilist_iterator &operator++() { // preincrement - Advance
NodePtr = Traits::getNext(NodePtr);
return *this;
}
ilist_iterator operator--(int) { // postdecrement operators...
ilist_iterator tmp = *this;
--*this;
return tmp;
}
ilist_iterator operator++(int) { // postincrement operators...
ilist_iterator tmp = *this;
++*this;
return tmp;
}
// Internal interface, do not use...
pointer getNodePtrUnchecked() const { return NodePtr; }
};
// do not implement. this is to catch errors when people try to use
// them as random access iterators
template<typename T>
void operator-(int, ilist_iterator<T>);
template<typename T>
void operator-(ilist_iterator<T>,int);
template<typename T>
void operator+(int, ilist_iterator<T>);
template<typename T>
void operator+(ilist_iterator<T>,int);
// operator!=/operator== - Allow mixed comparisons without dereferencing
// the iterator, which could very likely be pointing to end().
template<typename T>
bool operator!=(const T* LHS, const ilist_iterator<const T> &RHS) {
return LHS != RHS.getNodePtrUnchecked();
}
template<typename T>
bool operator==(const T* LHS, const ilist_iterator<const T> &RHS) {
return LHS == RHS.getNodePtrUnchecked();
}
template<typename T>
bool operator!=(T* LHS, const ilist_iterator<T> &RHS) {
return LHS != RHS.getNodePtrUnchecked();
}
template<typename T>
bool operator==(T* LHS, const ilist_iterator<T> &RHS) {
return LHS == RHS.getNodePtrUnchecked();
}
// Allow ilist_iterators to convert into pointers to a node automatically when
// used by the dyn_cast, cast, isa mechanisms...
template<typename From> struct simplify_type;
template<typename NodeTy> struct simplify_type<ilist_iterator<NodeTy> > {
typedef NodeTy* SimpleType;
static SimpleType getSimplifiedValue(const ilist_iterator<NodeTy> &Node) {
return &*Node;
}
};
template<typename NodeTy> struct simplify_type<const ilist_iterator<NodeTy> > {
typedef NodeTy* SimpleType;
static SimpleType getSimplifiedValue(const ilist_iterator<NodeTy> &Node) {
return &*Node;
}
};
//===----------------------------------------------------------------------===//
//
/// iplist - The subset of list functionality that can safely be used on nodes
/// of polymorphic types, i.e. a heterogeneous list with a common base class that
/// holds the next/prev pointers. The only state of the list itself is a single
/// pointer to the head of the list.
///
/// This list can be in one of three interesting states:
/// 1. The list may be completely unconstructed. In this case, the head
/// pointer is null. When in this form, any query for an iterator (e.g.
/// begin() or end()) causes the list to transparently change to state #2.
/// 2. The list may be empty, but contain a sentinel for the end iterator. This
/// sentinel is created by the Traits::createSentinel method and is a link
/// in the list. When the list is empty, the pointer in the iplist points
/// to the sentinel. Once the sentinel is constructed, it
/// is not destroyed until the list is.
/// 3. The list may contain actual objects in it, which are stored as a doubly
/// linked list of nodes. One invariant of the list is that the predecessor
/// of the first node in the list always points to the last node in the list,
/// and the successor pointer for the sentinel (which always stays at the
/// end of the list) is always null.
///
template<typename NodeTy, typename Traits=ilist_traits<NodeTy> >
class iplist : public Traits {
mutable NodeTy *Head;
// Use the prev node pointer of 'head' as the tail pointer. This is really a
// circularly linked list where we snip the 'next' link from the sentinel node
// back to the first node in the list (to preserve assertions about going off
// the end of the list).
NodeTy *getTail() { return this->ensureHead(Head); }
const NodeTy *getTail() const { return this->ensureHead(Head); }
void setTail(NodeTy *N) const { this->noteHead(Head, N); }
/// CreateLazySentinel - This method verifies whether the sentinel for the
/// list has been created and lazily makes it if not.
void CreateLazySentinel() const {
this->ensureHead(Head);
}
static bool op_less(NodeTy &L, NodeTy &R) { return L < R; }
static bool op_equal(NodeTy &L, NodeTy &R) { return L == R; }
// No fundamental reason why iplist can't be copyable, but the default
// copy/copy-assign won't do.
iplist(const iplist &); // do not implement
void operator=(const iplist &); // do not implement
public:
typedef NodeTy *pointer;
typedef const NodeTy *const_pointer;
typedef NodeTy &reference;
typedef const NodeTy &const_reference;
typedef NodeTy value_type;
typedef ilist_iterator<NodeTy> iterator;
typedef ilist_iterator<const NodeTy> const_iterator;
typedef size_t size_type;
typedef ptrdiff_t difference_type;
typedef std::reverse_iterator<const_iterator> const_reverse_iterator;
typedef std::reverse_iterator<iterator> reverse_iterator;
iplist() : Head(this->provideInitialHead()) {}
~iplist() {
if (!Head) return;
clear();
Traits::destroySentinel(getTail());
}
// Iterator creation methods.
iterator begin() {
CreateLazySentinel();
return iterator(Head);
}
const_iterator begin() const {
CreateLazySentinel();
return const_iterator(Head);
}
iterator end() {
CreateLazySentinel();
return iterator(getTail());
}
const_iterator end() const {
CreateLazySentinel();
return const_iterator(getTail());
}
// reverse iterator creation methods.
reverse_iterator rbegin() { return reverse_iterator(end()); }
const_reverse_iterator rbegin() const{ return const_reverse_iterator(end()); }
reverse_iterator rend() { return reverse_iterator(begin()); }
const_reverse_iterator rend() const { return const_reverse_iterator(begin());}
// Miscellaneous inspection routines.
size_type max_size() const { return size_type(-1); }
bool empty() const { return Head == 0 || Head == getTail(); }
// Front and back accessor functions...
reference front() {
assert(!empty() && "Called front() on empty list!");
return *Head;
}
const_reference front() const {
assert(!empty() && "Called front() on empty list!");
return *Head;
}
reference back() {
assert(!empty() && "Called back() on empty list!");
return *this->getPrev(getTail());
}
const_reference back() const {
assert(!empty() && "Called back() on empty list!");
return *this->getPrev(getTail());
}
void swap(iplist &RHS) {
assert(0 && "Swap does not use list traits callback correctly yet!");
std::swap(Head, RHS.Head);
}
iterator insert(iterator where, NodeTy *New) {
NodeTy *CurNode = where.getNodePtrUnchecked();
NodeTy *PrevNode = this->getPrev(CurNode);
this->setNext(New, CurNode);
this->setPrev(New, PrevNode);
if (CurNode != Head) // Is PrevNode off the beginning of the list?
this->setNext(PrevNode, New);
else
Head = New;
this->setPrev(CurNode, New);
this->addNodeToList(New); // Notify traits that we added a node...
return New;
}
iterator insertAfter(iterator where, NodeTy *New) {
if (empty())
return insert(begin(), New);
else
return insert(++where, New);
}
NodeTy *remove(iterator &IT) {
assert(IT != end() && "Cannot remove end of list!");
NodeTy *Node = &*IT;
NodeTy *NextNode = this->getNext(Node);
NodeTy *PrevNode = this->getPrev(Node);
if (Node != Head) // Is PrevNode off the beginning of the list?
this->setNext(PrevNode, NextNode);
else
Head = NextNode;
this->setPrev(NextNode, PrevNode);
IT = NextNode;
this->removeNodeFromList(Node); // Notify traits that we removed a node...
// Set the next/prev pointers of the current node to null. This isn't
// strictly required, but this catches errors where a node is removed from
// an ilist (and potentially deleted) with iterators still pointing at it.
// When those iterators are incremented or decremented, they will assert on
// the null next/prev pointer instead of "usually working".
this->setNext(Node, 0);
this->setPrev(Node, 0);
return Node;
}
NodeTy *remove(const iterator &IT) {
iterator MutIt = IT;
return remove(MutIt);
}
// erase - remove a node from the controlled sequence... and delete it.
iterator erase(iterator where) {
this->deleteNode(remove(where));
return where;
}
private:
// transfer - The heart of the splice function. Move linked list nodes from
// [first, last) into position.
//
void transfer(iterator position, iplist &L2, iterator first, iterator last) {
assert(first != last && "Should be checked by callers");
if (position != last) {
// Note: we have to be careful about the case when we move the first node
// in the list. This node is the list sentinel node and we can't move it.
NodeTy *ThisSentinel = getTail();
setTail(0);
NodeTy *L2Sentinel = L2.getTail();
L2.setTail(0);
// Remove [first, last) from its old position.
NodeTy *First = &*first, *Prev = this->getPrev(First);
NodeTy *Next = last.getNodePtrUnchecked(), *Last = this->getPrev(Next);
if (Prev)
this->setNext(Prev, Next);
else
L2.Head = Next;
this->setPrev(Next, Prev);
// Splice [first, last) into its new position.
NodeTy *PosNext = position.getNodePtrUnchecked();
NodeTy *PosPrev = this->getPrev(PosNext);
// Fix head of list...
if (PosPrev)
this->setNext(PosPrev, First);
else
Head = First;
this->setPrev(First, PosPrev);
// Fix end of list...
this->setNext(Last, PosNext);
this->setPrev(PosNext, Last);
this->transferNodesFromList(L2, First, PosNext);
// Now that everything is set, restore the pointers to the list sentinels.
L2.setTail(L2Sentinel);
setTail(ThisSentinel);
}
}
public:
//===----------------------------------------------------------------------===
// Functionality derived from other functions defined above...
//
size_type size() const {
if (Head == 0) return 0; // Don't require construction of sentinel if empty.
return std::distance(begin(), end());
}
iterator erase(iterator first, iterator last) {
while (first != last)
first = erase(first);
return last;
}
void clear() { if (Head) erase(begin(), end()); }
// Front and back inserters...
void push_front(NodeTy *val) { insert(begin(), val); }
void push_back(NodeTy *val) { insert(end(), val); }
void pop_front() {
assert(!empty() && "pop_front() on empty list!");
erase(begin());
}
void pop_back() {
assert(!empty() && "pop_back() on empty list!");
iterator t = end(); erase(--t);
}
// Special forms of insert...
template<class InIt> void insert(iterator where, InIt first, InIt last) {
for (; first != last; ++first) insert(where, *first);
}
// Splice members - defined in terms of transfer...
void splice(iterator where, iplist &L2) {
if (!L2.empty())
transfer(where, L2, L2.begin(), L2.end());
}
void splice(iterator where, iplist &L2, iterator first) {
iterator last = first; ++last;
if (where == first || where == last) return; // No change
transfer(where, L2, first, last);
}
void splice(iterator where, iplist &L2, iterator first, iterator last) {
if (first != last) transfer(where, L2, first, last);
}
//===----------------------------------------------------------------------===
// High-Level Functionality that shouldn't really be here, but is part of list
//
// These two functions are actually called remove/remove_if in list<>, but
// they actually do the job of erase, rename them accordingly.
//
void erase(const NodeTy &val) {
for (iterator I = begin(), E = end(); I != E; ) {
iterator next = I; ++next;
if (*I == val) erase(I);
I = next;
}
}
template<class Pr1> void erase_if(Pr1 pred) {
for (iterator I = begin(), E = end(); I != E; ) {
iterator next = I; ++next;
if (pred(*I)) erase(I);
I = next;
}
}
template<class Pr2> void unique(Pr2 pred) {
if (empty()) return;
for (iterator I = begin(), E = end(), Next = begin(); ++Next != E;) {
if (pred(*I))
erase(Next);
else
I = Next;
Next = I;
}
}
void unique() { unique(op_equal); }
template<class Pr3> void merge(iplist &right, Pr3 pred) {
iterator first1 = begin(), last1 = end();
iterator first2 = right.begin(), last2 = right.end();
while (first1 != last1 && first2 != last2)
if (pred(*first2, *first1)) {
iterator next = first2;
transfer(first1, right, first2, ++next);
first2 = next;
} else {
++first1;
}
if (first2 != last2) transfer(last1, right, first2, last2);
}
void merge(iplist &right) { return merge(right, op_less); }
template<class Pr3> void sort(Pr3 pred);
void sort() { sort(op_less); }
};
template<typename NodeTy>
struct ilist : public iplist<NodeTy> {
typedef typename iplist<NodeTy>::size_type size_type;
typedef typename iplist<NodeTy>::iterator iterator;
ilist() {}
ilist(const ilist &right) {
insert(this->begin(), right.begin(), right.end());
}
explicit ilist(size_type count) {
insert(this->begin(), count, NodeTy());
}
ilist(size_type count, const NodeTy &val) {
insert(this->begin(), count, val);
}
template<class InIt> ilist(InIt first, InIt last) {
insert(this->begin(), first, last);
}
// bring hidden functions into scope
using iplist<NodeTy>::insert;
using iplist<NodeTy>::push_front;
using iplist<NodeTy>::push_back;
// Main implementation here - Insert for a node passed by value...
iterator insert(iterator where, const NodeTy &val) {
return insert(where, this->createNode(val));
}
// Front and back inserters...
void push_front(const NodeTy &val) { insert(this->begin(), val); }
void push_back(const NodeTy &val) { insert(this->end(), val); }
// Special forms of insert...
template<class InIt> void insert(iterator where, InIt first, InIt last) {
for (; first != last; ++first) insert(where, *first);
}
void insert(iterator where, size_type count, const NodeTy &val) {
for (; count != 0; --count) insert(where, val);
}
// Assign special forms...
void assign(size_type count, const NodeTy &val) {
iterator I = this->begin();
for (; I != this->end() && count != 0; ++I, --count)
*I = val;
if (count != 0)
insert(this->end(), val, val);
else
erase(I, this->end());
}
template<class InIt> void assign(InIt first1, InIt last1) {
iterator first2 = this->begin(), last2 = this->end();
for ( ; first1 != last1 && first2 != last2; ++first1, ++first2)
*first1 = *first2;
if (first2 == last2)
erase(first1, last1);
else
insert(last1, first2, last2);
}
// Resize members...
void resize(size_type newsize, NodeTy val) {
iterator i = this->begin();
size_type len = 0;
for ( ; i != this->end() && len < newsize; ++i, ++len) /* empty*/ ;
if (len == newsize)
erase(i, this->end());
else // i == end()
insert(this->end(), newsize - len, val);
}
void resize(size_type newsize) { resize(newsize, NodeTy()); }
};
} // End llvm namespace
namespace std {
// Ensure that swap uses the fast list swap...
template<class Ty>
void swap(llvm::iplist<Ty> &Left, llvm::iplist<Ty> &Right) {
Left.swap(Right);
}
} // End 'std' extensions...
#endif // LLVM_ADT_ILIST_H
| 0 |
rapidsai_public_repos/code-share/maxflow/galois/include/llvm
|
rapidsai_public_repos/code-share/maxflow/galois/include/llvm/ADT/Twine.h
|
//===-- Twine.h - Fast Temporary String Concatenation -----------*- C++ -*-===//
//
// The LLVM Compiler Infrastructure
//
// This file is distributed under the University of Illinois Open Source
// License. See LICENSE.TXT for details.
//
//===----------------------------------------------------------------------===//
#ifndef LLVM_ADT_TWINE_H
#define LLVM_ADT_TWINE_H
#include "llvm/ADT/StringRef.h"
#include "llvm/Support/DataTypes.h"
#include <cassert>
#include <string>
#include <iostream>
namespace llvm {
template <typename T>
class SmallVectorImpl;
class StringRef;
/// Twine - A lightweight data structure for efficiently representing the
/// concatenation of temporary values as strings.
///
/// A Twine is a kind of rope, it represents a concatenated string using a
/// binary-tree, where the string is the preorder of the nodes. Since the
/// Twine can be efficiently rendered into a buffer when its result is used,
/// it avoids the cost of generating temporary values for intermediate string
/// results -- particularly in cases when the Twine result is never
/// required. By explicitly tracking the type of leaf nodes, we can also avoid
/// the creation of temporary strings for conversions operations (such as
/// appending an integer to a string).
///
/// A Twine is not intended for use directly and should not be stored, its
/// implementation relies on the ability to store pointers to temporary stack
/// objects which may be deallocated at the end of a statement. Twines should
/// only be used accepted as const references in arguments, when an API wishes
/// to accept possibly-concatenated strings.
///
/// Twines support a special 'null' value, which always concatenates to form
/// itself, and renders as an empty string. This can be returned from APIs to
/// effectively nullify any concatenations performed on the result.
///
/// \b Implementation \n
///
/// Given the nature of a Twine, it is not possible for the Twine's
/// concatenation method to construct interior nodes; the result must be
/// represented inside the returned value. For this reason a Twine object
/// actually holds two values, the left- and right-hand sides of a
/// concatenation. We also have nullary Twine objects, which are effectively
/// sentinel values that represent empty strings.
///
/// Thus, a Twine can effectively have zero, one, or two children. The \see
/// isNullary(), \see isUnary(), and \see isBinary() predicates exist for
/// testing the number of children.
///
/// We maintain a number of invariants on Twine objects (FIXME: Why):
/// - Nullary twines are always represented with their Kind on the left-hand
/// side, and the Empty kind on the right-hand side.
/// - Unary twines are always represented with the value on the left-hand
/// side, and the Empty kind on the right-hand side.
/// - If a Twine has another Twine as a child, that child should always be
/// binary (otherwise it could have been folded into the parent).
///
/// These invariants are check by \see isValid().
///
/// \b Efficiency Considerations \n
///
/// The Twine is designed to yield efficient and small code for common
/// situations. For this reason, the concat() method is inlined so that
/// concatenations of leaf nodes can be optimized into stores directly into a
/// single stack allocated object.
///
/// In practice, not all compilers can be trusted to optimize concat() fully,
/// so we provide two additional methods (and accompanying operator+
/// overloads) to guarantee that particularly important cases (cstring plus
/// StringRef) codegen as desired.
class Twine {
/// NodeKind - Represent the type of an argument.
enum NodeKind {
/// An empty string; the result of concatenating anything with it is also
/// empty.
NullKind,
/// The empty string.
EmptyKind,
/// A pointer to a Twine instance.
TwineKind,
/// A pointer to a C string instance.
CStringKind,
/// A pointer to an std::string instance.
StdStringKind,
/// A pointer to a StringRef instance.
StringRefKind,
/// A char value reinterpreted as a pointer, to render as a character.
CharKind,
/// An unsigned int value reinterpreted as a pointer, to render as an
/// unsigned decimal integer.
DecUIKind,
/// An int value reinterpreted as a pointer, to render as a signed
/// decimal integer.
DecIKind,
/// A pointer to an unsigned long value, to render as an unsigned decimal
/// integer.
DecULKind,
/// A pointer to a long value, to render as a signed decimal integer.
DecLKind,
/// A pointer to an unsigned long long value, to render as an unsigned
/// decimal integer.
DecULLKind,
/// A pointer to a long long value, to render as a signed decimal integer.
DecLLKind,
/// A pointer to a uint64_t value, to render as an unsigned hexadecimal
/// integer.
UHexKind
};
union Child
{
const Twine *twine;
const char *cString;
const std::string *stdString;
const StringRef *stringRef;
char character;
unsigned int decUI;
int decI;
const unsigned long *decUL;
const long *decL;
const unsigned long long *decULL;
const long long *decLL;
const uint64_t *uHex;
};
private:
/// LHS - The prefix in the concatenation, which may be uninitialized for
/// Null or Empty kinds.
Child LHS;
/// RHS - The suffix in the concatenation, which may be uninitialized for
/// Null or Empty kinds.
Child RHS;
// enums stored as unsigned chars to save on space while some compilers
// don't support specifying the backing type for an enum
/// LHSKind - The NodeKind of the left hand side, \see getLHSKind().
unsigned char LHSKind;
/// RHSKind - The NodeKind of the left hand side, \see getLHSKind().
unsigned char RHSKind;
private:
/// Construct a nullary twine; the kind must be NullKind or EmptyKind.
explicit Twine(NodeKind Kind)
: LHSKind(Kind), RHSKind(EmptyKind) {
assert(isNullary() && "Invalid kind!");
}
/// Construct a binary twine.
explicit Twine(const Twine &_LHS, const Twine &_RHS)
: LHSKind(TwineKind), RHSKind(TwineKind) {
LHS.twine = &_LHS;
RHS.twine = &_RHS;
assert(isValid() && "Invalid twine!");
}
/// Construct a twine from explicit values.
explicit Twine(Child _LHS, NodeKind _LHSKind,
Child _RHS, NodeKind _RHSKind)
: LHS(_LHS), RHS(_RHS), LHSKind(_LHSKind), RHSKind(_RHSKind) {
assert(isValid() && "Invalid twine!");
}
/// isNull - Check for the null twine.
bool isNull() const {
return getLHSKind() == NullKind;
}
/// isEmpty - Check for the empty twine.
bool isEmpty() const {
return getLHSKind() == EmptyKind;
}
/// isNullary - Check if this is a nullary twine (null or empty).
bool isNullary() const {
return isNull() || isEmpty();
}
/// isUnary - Check if this is a unary twine.
bool isUnary() const {
return getRHSKind() == EmptyKind && !isNullary();
}
/// isBinary - Check if this is a binary twine.
bool isBinary() const {
return getLHSKind() != NullKind && getRHSKind() != EmptyKind;
}
/// isValid - Check if this is a valid twine (satisfying the invariants on
/// order and number of arguments).
bool isValid() const {
// Nullary twines always have Empty on the RHS.
if (isNullary() && getRHSKind() != EmptyKind)
return false;
// Null should never appear on the RHS.
if (getRHSKind() == NullKind)
return false;
// The RHS cannot be non-empty if the LHS is empty.
if (getRHSKind() != EmptyKind && getLHSKind() == EmptyKind)
return false;
// A twine child should always be binary.
if (getLHSKind() == TwineKind &&
!LHS.twine->isBinary())
return false;
if (getRHSKind() == TwineKind &&
!RHS.twine->isBinary())
return false;
return true;
}
/// getLHSKind - Get the NodeKind of the left-hand side.
NodeKind getLHSKind() const { return (NodeKind) LHSKind; }
/// getRHSKind - Get the NodeKind of the left-hand side.
NodeKind getRHSKind() const { return (NodeKind) RHSKind; }
/// printOneChild - Print one child from a twine.
void printOneChild(std::ostream &OS, Child Ptr, NodeKind Kind) const;
/// printOneChildRepr - Print the representation of one child from a twine.
void printOneChildRepr(std::ostream &OS, Child Ptr,
NodeKind Kind) const;
public:
/// @name Constructors
/// @{
/// Construct from an empty string.
/*implicit*/ Twine() : LHSKind(EmptyKind), RHSKind(EmptyKind) {
assert(isValid() && "Invalid twine!");
}
/// Construct from a C string.
///
/// We take care here to optimize "" into the empty twine -- this will be
/// optimized out for string constants. This allows Twine arguments have
/// default "" values, without introducing unnecessary string constants.
/*implicit*/ Twine(const char *Str)
: RHSKind(EmptyKind) {
if (Str[0] != '\0') {
LHS.cString = Str;
LHSKind = CStringKind;
} else
LHSKind = EmptyKind;
assert(isValid() && "Invalid twine!");
}
/// Construct from an std::string.
/*implicit*/ Twine(const std::string &Str)
: LHSKind(StdStringKind), RHSKind(EmptyKind) {
LHS.stdString = &Str;
assert(isValid() && "Invalid twine!");
}
/// Construct from a StringRef.
/*implicit*/ Twine(const StringRef &Str)
: LHSKind(StringRefKind), RHSKind(EmptyKind) {
LHS.stringRef = &Str;
assert(isValid() && "Invalid twine!");
}
/// Construct from a char.
explicit Twine(char Val)
: LHSKind(CharKind), RHSKind(EmptyKind) {
LHS.character = Val;
}
/// Construct from a signed char.
explicit Twine(signed char Val)
: LHSKind(CharKind), RHSKind(EmptyKind) {
LHS.character = static_cast<char>(Val);
}
/// Construct from an unsigned char.
explicit Twine(unsigned char Val)
: LHSKind(CharKind), RHSKind(EmptyKind) {
LHS.character = static_cast<char>(Val);
}
/// Construct a twine to print \arg Val as an unsigned decimal integer.
explicit Twine(unsigned Val)
: LHSKind(DecUIKind), RHSKind(EmptyKind) {
LHS.decUI = Val;
}
/// Construct a twine to print \arg Val as a signed decimal integer.
explicit Twine(int Val)
: LHSKind(DecIKind), RHSKind(EmptyKind) {
LHS.decI = Val;
}
/// Construct a twine to print \arg Val as an unsigned decimal integer.
explicit Twine(const unsigned long &Val)
: LHSKind(DecULKind), RHSKind(EmptyKind) {
LHS.decUL = &Val;
}
/// Construct a twine to print \arg Val as a signed decimal integer.
explicit Twine(const long &Val)
: LHSKind(DecLKind), RHSKind(EmptyKind) {
LHS.decL = &Val;
}
/// Construct a twine to print \arg Val as an unsigned decimal integer.
explicit Twine(const unsigned long long &Val)
: LHSKind(DecULLKind), RHSKind(EmptyKind) {
LHS.decULL = &Val;
}
/// Construct a twine to print \arg Val as a signed decimal integer.
explicit Twine(const long long &Val)
: LHSKind(DecLLKind), RHSKind(EmptyKind) {
LHS.decLL = &Val;
}
// FIXME: Unfortunately, to make sure this is as efficient as possible we
// need extra binary constructors from particular types. We can't rely on
// the compiler to be smart enough to fold operator+()/concat() down to the
// right thing. Yet.
/// Construct as the concatenation of a C string and a StringRef.
/*implicit*/ Twine(const char *_LHS, const StringRef &_RHS)
: LHSKind(CStringKind), RHSKind(StringRefKind) {
LHS.cString = _LHS;
RHS.stringRef = &_RHS;
assert(isValid() && "Invalid twine!");
}
/// Construct as the concatenation of a StringRef and a C string.
/*implicit*/ Twine(const StringRef &_LHS, const char *_RHS)
: LHSKind(StringRefKind), RHSKind(CStringKind) {
LHS.stringRef = &_LHS;
RHS.cString = _RHS;
assert(isValid() && "Invalid twine!");
}
/// Create a 'null' string, which is an empty string that always
/// concatenates to form another empty string.
static Twine createNull() {
return Twine(NullKind);
}
/// @}
/// @name Numeric Conversions
/// @{
// Construct a twine to print \arg Val as an unsigned hexadecimal integer.
static Twine utohexstr(const uint64_t &Val) {
Child LHS, RHS;
LHS.uHex = &Val;
RHS.twine = 0;
return Twine(LHS, UHexKind, RHS, EmptyKind);
}
/// @}
/// @name Predicate Operations
/// @{
/// isTriviallyEmpty - Check if this twine is trivially empty; a false
/// return value does not necessarily mean the twine is empty.
bool isTriviallyEmpty() const {
return isNullary();
}
/// isSingleStringRef - Return true if this twine can be dynamically
/// accessed as a single StringRef value with getSingleStringRef().
bool isSingleStringRef() const {
if (getRHSKind() != EmptyKind) return false;
switch (getLHSKind()) {
case EmptyKind:
case CStringKind:
case StdStringKind:
case StringRefKind:
return true;
default:
return false;
}
}
/// @}
/// @name String Operations
/// @{
Twine concat(const Twine &Suffix) const;
/// @}
/// @name Output & Conversion.
/// @{
/// str - Return the twine contents as a std::string.
std::string str() const;
/// toVector - Write the concatenated string into the given SmallString or
/// SmallVector.
void toVector(SmallVectorImpl<char> &Out) const;
/// getSingleStringRef - This returns the twine as a single StringRef. This
/// method is only valid if isSingleStringRef() is true.
StringRef getSingleStringRef() const {
assert(isSingleStringRef() &&"This cannot be had as a single stringref!");
switch (getLHSKind()) {
default: assert(0 && "Out of sync with isSingleStringRef");
case EmptyKind: return StringRef();
case CStringKind: return StringRef(LHS.cString);
case StdStringKind: return StringRef(*LHS.stdString);
case StringRefKind: return *LHS.stringRef;
}
}
/// toStringRef - This returns the twine as a single StringRef if it can be
/// represented as such. Otherwise the twine is written into the given
/// SmallVector and a StringRef to the SmallVector's data is returned.
StringRef toStringRef(SmallVectorImpl<char> &Out) const;
/// toNullTerminatedStringRef - This returns the twine as a single null
/// terminated StringRef if it can be represented as such. Otherwise the
/// twine is written into the given SmallVector and a StringRef to the
/// SmallVector's data is returned.
///
/// The returned StringRef's size does not include the null terminator.
StringRef toNullTerminatedStringRef(SmallVectorImpl<char> &Out) const;
/// print - Write the concatenated string represented by this twine to the
/// stream \arg OS.
void print(std::ostream &OS) const;
/// dump - Dump the concatenated string represented by this twine to stderr.
void dump() const;
/// print - Write the representation of this twine to the stream \arg OS.
void printRepr(std::ostream &OS) const;
/// dumpRepr - Dump the representation of this twine to stderr.
void dumpRepr() const;
/// @}
};
/// @name Twine Inline Implementations
/// @{
inline Twine Twine::concat(const Twine &Suffix) const {
// Concatenation with null is null.
if (isNull() || Suffix.isNull())
return Twine(NullKind);
// Concatenation with empty yields the other side.
if (isEmpty())
return Suffix;
if (Suffix.isEmpty())
return *this;
// Otherwise we need to create a new node, taking care to fold in unary
// twines.
Child NewLHS, NewRHS;
NewLHS.twine = this;
NewRHS.twine = &Suffix;
NodeKind NewLHSKind = TwineKind, NewRHSKind = TwineKind;
if (isUnary()) {
NewLHS = LHS;
NewLHSKind = getLHSKind();
}
if (Suffix.isUnary()) {
NewRHS = Suffix.LHS;
NewRHSKind = Suffix.getLHSKind();
}
return Twine(NewLHS, NewLHSKind, NewRHS, NewRHSKind);
}
inline Twine operator+(const Twine &LHS, const Twine &RHS) {
return LHS.concat(RHS);
}
/// Additional overload to guarantee simplified codegen; this is equivalent to
/// concat().
inline Twine operator+(const char *LHS, const StringRef &RHS) {
return Twine(LHS, RHS);
}
/// Additional overload to guarantee simplified codegen; this is equivalent to
/// concat().
inline Twine operator+(const StringRef &LHS, const char *RHS) {
return Twine(LHS, RHS);
}
inline std::ostream &operator<<(std::ostream &OS, const Twine &RHS) {
RHS.print(OS);
return OS;
}
/// @}
}
#endif
| 0 |
rapidsai_public_repos/code-share/maxflow/galois/include/llvm
|
rapidsai_public_repos/code-share/maxflow/galois/include/llvm/ADT/SmallPtrSet.h
|
//===- llvm/ADT/SmallPtrSet.h - 'Normally small' pointer set ----*- C++ -*-===//
//
// The LLVM Compiler Infrastructure
//
// This file is distributed under the University of Illinois Open Source
// License. See LICENSE.TXT for details.
//
//===----------------------------------------------------------------------===//
//
// This file defines the SmallPtrSet class. See the doxygen comment for
// SmallPtrSetImpl for more details on the algorithm used.
//
//===----------------------------------------------------------------------===//
#ifndef LLVM_ADT_SMALLPTRSET_H
#define LLVM_ADT_SMALLPTRSET_H
#include <cassert>
#include <cstddef>
#include <cstring>
#include <iterator>
#include "llvm/Support/DataTypes.h"
#include "llvm/Support/PointerLikeTypeTraits.h"
namespace llvm {
class SmallPtrSetIteratorImpl;
/// SmallPtrSetImpl - This is the common code shared among all the
/// SmallPtrSet<>'s, which is almost everything. SmallPtrSet has two modes, one
/// for small and one for large sets.
///
/// Small sets use an array of pointers allocated in the SmallPtrSet object,
/// which is treated as a simple array of pointers. When a pointer is added to
/// the set, the array is scanned to see if the element already exists, if not
/// the element is 'pushed back' onto the array. If we run out of space in the
/// array, we grow into the 'large set' case. SmallSet should be used when the
/// sets are often small. In this case, no memory allocation is used, and only
/// light-weight and cache-efficient scanning is used.
///
/// Large sets use a classic exponentially-probed hash table. Empty buckets are
/// represented with an illegal pointer value (-1) to allow null pointers to be
/// inserted. Tombstones are represented with another illegal pointer value
/// (-2), to allow deletion. The hash table is resized when the table is 3/4 or
/// more. When this happens, the table is doubled in size.
///
class SmallPtrSetImpl {
friend class SmallPtrSetIteratorImpl;
protected:
/// SmallArray - Points to a fixed size set of buckets, used in 'small mode'.
const void **SmallArray;
/// CurArray - This is the current set of buckets. If equal to SmallArray,
/// then the set is in 'small mode'.
const void **CurArray;
/// CurArraySize - The allocated size of CurArray, always a power of two.
/// Note that CurArray points to an array that has CurArraySize+1 elements in
/// it, so that the end iterator actually points to valid memory.
unsigned CurArraySize;
// If small, this is # elts allocated consecutively
unsigned NumElements;
unsigned NumTombstones;
// Helper to copy construct a SmallPtrSet.
SmallPtrSetImpl(const void **SmallStorage, const SmallPtrSetImpl& that);
explicit SmallPtrSetImpl(const void **SmallStorage, unsigned SmallSize) :
SmallArray(SmallStorage), CurArray(SmallStorage), CurArraySize(SmallSize) {
assert(SmallSize && (SmallSize & (SmallSize-1)) == 0 &&
"Initial size must be a power of two!");
// The end pointer, always valid, is set to a valid element to help the
// iterator.
CurArray[SmallSize] = 0;
clear();
}
~SmallPtrSetImpl();
public:
bool empty() const { return size() == 0; }
unsigned size() const { return NumElements; }
void clear() {
// If the capacity of the array is huge, and the # elements used is small,
// shrink the array.
if (!isSmall() && NumElements*4 < CurArraySize && CurArraySize > 32)
return shrink_and_clear();
// Fill the array with empty markers.
memset(CurArray, -1, CurArraySize*sizeof(void*));
NumElements = 0;
NumTombstones = 0;
}
protected:
static void *getTombstoneMarker() { return reinterpret_cast<void*>(-2); }
static void *getEmptyMarker() {
// Note that -1 is chosen to make clear() efficiently implementable with
// memset and because it's not a valid pointer value.
return reinterpret_cast<void*>(-1);
}
/// insert_imp - This returns true if the pointer was new to the set, false if
/// it was already in the set. This is hidden from the client so that the
/// derived class can check that the right type of pointer is passed in.
bool insert_imp(const void * Ptr);
/// erase_imp - If the set contains the specified pointer, remove it and
/// return true, otherwise return false. This is hidden from the client so
/// that the derived class can check that the right type of pointer is passed
/// in.
bool erase_imp(const void * Ptr);
bool count_imp(const void * Ptr) const {
if (isSmall()) {
// Linear search for the item.
for (const void *const *APtr = SmallArray,
*const *E = SmallArray+NumElements; APtr != E; ++APtr)
if (*APtr == Ptr)
return true;
return false;
}
// Big set case.
return *FindBucketFor(Ptr) == Ptr;
}
private:
bool isSmall() const { return CurArray == SmallArray; }
unsigned Hash(const void *Ptr) const {
return static_cast<unsigned>(((uintptr_t)Ptr >> 4) & (CurArraySize-1));
}
const void * const *FindBucketFor(const void *Ptr) const;
void shrink_and_clear();
/// Grow - Allocate a larger backing store for the buckets and move it over.
void Grow(unsigned NewSize);
void operator=(const SmallPtrSetImpl &RHS); // DO NOT IMPLEMENT.
protected:
void CopyFrom(const SmallPtrSetImpl &RHS);
};
/// SmallPtrSetIteratorImpl - This is the common base class shared between all
/// instances of SmallPtrSetIterator.
class SmallPtrSetIteratorImpl {
protected:
const void *const *Bucket;
public:
explicit SmallPtrSetIteratorImpl(const void *const *BP) : Bucket(BP) {
AdvanceIfNotValid();
}
bool operator==(const SmallPtrSetIteratorImpl &RHS) const {
return Bucket == RHS.Bucket;
}
bool operator!=(const SmallPtrSetIteratorImpl &RHS) const {
return Bucket != RHS.Bucket;
}
protected:
/// AdvanceIfNotValid - If the current bucket isn't valid, advance to a bucket
/// that is. This is guaranteed to stop because the end() bucket is marked
/// valid.
void AdvanceIfNotValid() {
while (*Bucket == SmallPtrSetImpl::getEmptyMarker() ||
*Bucket == SmallPtrSetImpl::getTombstoneMarker())
++Bucket;
}
};
/// SmallPtrSetIterator - This implements a const_iterator for SmallPtrSet.
template<typename PtrTy>
class SmallPtrSetIterator : public SmallPtrSetIteratorImpl {
typedef PointerLikeTypeTraits<PtrTy> PtrTraits;
public:
typedef PtrTy value_type;
typedef PtrTy reference;
typedef PtrTy pointer;
typedef std::ptrdiff_t difference_type;
typedef std::forward_iterator_tag iterator_category;
explicit SmallPtrSetIterator(const void *const *BP)
: SmallPtrSetIteratorImpl(BP) {}
// Most methods provided by baseclass.
const PtrTy operator*() const {
return PtrTraits::getFromVoidPointer(const_cast<void*>(*Bucket));
}
inline SmallPtrSetIterator& operator++() { // Preincrement
++Bucket;
AdvanceIfNotValid();
return *this;
}
SmallPtrSetIterator operator++(int) { // Postincrement
SmallPtrSetIterator tmp = *this; ++*this; return tmp;
}
};
/// RoundUpToPowerOfTwo - This is a helper template that rounds N up to the next
/// power of two (which means N itself if N is already a power of two).
template<unsigned N>
struct RoundUpToPowerOfTwo;
/// RoundUpToPowerOfTwoH - If N is not a power of two, increase it. This is a
/// helper template used to implement RoundUpToPowerOfTwo.
template<unsigned N, bool isPowerTwo>
struct RoundUpToPowerOfTwoH {
enum { Val = N };
};
template<unsigned N>
struct RoundUpToPowerOfTwoH<N, false> {
enum {
// We could just use NextVal = N+1, but this converges faster. N|(N-1) sets
// the right-most zero bits to one all at once, e.g. 0b0011000 -> 0b0011111.
Val = RoundUpToPowerOfTwo<(N|(N-1)) + 1>::Val
};
};
template<unsigned N>
struct RoundUpToPowerOfTwo {
enum { Val = RoundUpToPowerOfTwoH<N, (N&(N-1)) == 0>::Val };
};
/// SmallPtrSet - This class implements a set which is optimized for holding
/// SmallSize or less elements. This internally rounds up SmallSize to the next
/// power of two if it is not already a power of two. See the comments above
/// SmallPtrSetImpl for details of the algorithm.
template<class PtrType, unsigned SmallSize>
class SmallPtrSet : public SmallPtrSetImpl {
// Make sure that SmallSize is a power of two, round up if not.
enum { SmallSizePowTwo = RoundUpToPowerOfTwo<SmallSize>::Val };
/// SmallStorage - Fixed size storage used in 'small mode'. The extra element
/// ensures that the end iterator actually points to valid memory.
const void *SmallStorage[SmallSizePowTwo+1];
typedef PointerLikeTypeTraits<PtrType> PtrTraits;
public:
SmallPtrSet() : SmallPtrSetImpl(SmallStorage, SmallSizePowTwo) {}
SmallPtrSet(const SmallPtrSet &that) : SmallPtrSetImpl(SmallStorage, that) {}
template<typename It>
SmallPtrSet(It I, It E) : SmallPtrSetImpl(SmallStorage, SmallSizePowTwo) {
insert(I, E);
}
/// insert - This returns true if the pointer was new to the set, false if it
/// was already in the set.
bool insert(PtrType Ptr) {
return insert_imp(PtrTraits::getAsVoidPointer(Ptr));
}
/// erase - If the set contains the specified pointer, remove it and return
/// true, otherwise return false.
bool erase(PtrType Ptr) {
return erase_imp(PtrTraits::getAsVoidPointer(Ptr));
}
/// count - Return true if the specified pointer is in the set.
bool count(PtrType Ptr) const {
return count_imp(PtrTraits::getAsVoidPointer(Ptr));
}
template <typename IterT>
void insert(IterT I, IterT E) {
for (; I != E; ++I)
insert(*I);
}
typedef SmallPtrSetIterator<PtrType> iterator;
typedef SmallPtrSetIterator<PtrType> const_iterator;
inline iterator begin() const {
return iterator(CurArray);
}
inline iterator end() const {
return iterator(CurArray+CurArraySize);
}
// Allow assignment from any smallptrset with the same element type even if it
// doesn't have the same smallsize.
const SmallPtrSet<PtrType, SmallSize>&
operator=(const SmallPtrSet<PtrType, SmallSize> &RHS) {
CopyFrom(RHS);
return *this;
}
};
}
#endif
| 0 |
rapidsai_public_repos/code-share/maxflow/galois/include/llvm
|
rapidsai_public_repos/code-share/maxflow/galois/include/llvm/ADT/SmallVector.h
|
//===- llvm/ADT/SmallVector.h - 'Normally small' vectors --------*- C++ -*-===//
//
// The LLVM Compiler Infrastructure
//
// This file is distributed under the University of Illinois Open Source
// License. See LICENSE.TXT for details.
//
//===----------------------------------------------------------------------===//
//
// This file defines the SmallVector class.
//
//===----------------------------------------------------------------------===//
#ifndef LLVM_ADT_SMALLVECTOR_H
#define LLVM_ADT_SMALLVECTOR_H
#include "llvm/Support/type_traits.h"
#include <algorithm>
#include <cassert>
#include <cstddef>
#include <cstdlib>
#include <cstring>
#include <iterator>
#include <memory>
#ifdef _MSC_VER
namespace std {
#if _MSC_VER <= 1310
// Work around flawed VC++ implementation of std::uninitialized_copy. Define
// additional overloads so that elements with pointer types are recognized as
// scalars and not objects, causing bizarre type conversion errors.
template<class T1, class T2>
inline _Scalar_ptr_iterator_tag _Ptr_cat(T1 **, T2 **) {
_Scalar_ptr_iterator_tag _Cat;
return _Cat;
}
template<class T1, class T2>
inline _Scalar_ptr_iterator_tag _Ptr_cat(T1* const *, T2 **) {
_Scalar_ptr_iterator_tag _Cat;
return _Cat;
}
#else
// FIXME: It is not clear if the problem is fixed in VS 2005. What is clear
// is that the above hack won't work if it wasn't fixed.
#endif
}
#endif
namespace llvm {
/// SmallVectorBase - This is all the non-templated stuff common to all
/// SmallVectors.
class SmallVectorBase {
protected:
void *BeginX, *EndX, *CapacityX;
// Allocate raw space for N elements of type T. If T has a ctor or dtor, we
// don't want it to be automatically run, so we need to represent the space as
// something else. An array of char would work great, but might not be
// aligned sufficiently. Instead we use some number of union instances for
// the space, which guarantee maximal alignment.
union U {
double D;
long double LD;
long long L;
void *P;
} FirstEl;
// Space after 'FirstEl' is clobbered, do not add any instance vars after it.
protected:
SmallVectorBase(size_t Size)
: BeginX(&FirstEl), EndX(&FirstEl), CapacityX((char*)&FirstEl+Size) {}
/// isSmall - Return true if this is a smallvector which has not had dynamic
/// memory allocated for it.
bool isSmall() const {
return BeginX == static_cast<const void*>(&FirstEl);
}
/// grow_pod - This is an implementation of the grow() method which only works
/// on POD-like data types and is out of line to reduce code duplication.
void grow_pod(size_t MinSizeInBytes, size_t TSize);
public:
/// size_in_bytes - This returns size()*sizeof(T).
size_t size_in_bytes() const {
return size_t((char*)EndX - (char*)BeginX);
}
/// capacity_in_bytes - This returns capacity()*sizeof(T).
size_t capacity_in_bytes() const {
return size_t((char*)CapacityX - (char*)BeginX);
}
bool empty() const { return BeginX == EndX; }
};
template <typename T>
class SmallVectorTemplateCommon : public SmallVectorBase {
protected:
void setEnd(T *P) { this->EndX = P; }
public:
SmallVectorTemplateCommon(size_t Size) : SmallVectorBase(Size) {}
typedef size_t size_type;
typedef ptrdiff_t difference_type;
typedef T value_type;
typedef T *iterator;
typedef const T *const_iterator;
typedef std::reverse_iterator<const_iterator> const_reverse_iterator;
typedef std::reverse_iterator<iterator> reverse_iterator;
typedef T &reference;
typedef const T &const_reference;
typedef T *pointer;
typedef const T *const_pointer;
// forward iterator creation methods.
iterator begin() { return (iterator)this->BeginX; }
const_iterator begin() const { return (const_iterator)this->BeginX; }
iterator end() { return (iterator)this->EndX; }
const_iterator end() const { return (const_iterator)this->EndX; }
protected:
iterator capacity_ptr() { return (iterator)this->CapacityX; }
const_iterator capacity_ptr() const { return (const_iterator)this->CapacityX;}
public:
// reverse iterator creation methods.
reverse_iterator rbegin() { return reverse_iterator(end()); }
const_reverse_iterator rbegin() const{ return const_reverse_iterator(end()); }
reverse_iterator rend() { return reverse_iterator(begin()); }
const_reverse_iterator rend() const { return const_reverse_iterator(begin());}
size_type size() const { return end()-begin(); }
size_type max_size() const { return size_type(-1) / sizeof(T); }
/// capacity - Return the total number of elements in the currently allocated
/// buffer.
size_t capacity() const { return capacity_ptr() - begin(); }
/// data - Return a pointer to the vector's buffer, even if empty().
pointer data() { return pointer(begin()); }
/// data - Return a pointer to the vector's buffer, even if empty().
const_pointer data() const { return const_pointer(begin()); }
reference operator[](unsigned idx) {
assert(begin() + idx < end());
return begin()[idx];
}
const_reference operator[](unsigned idx) const {
assert(begin() + idx < end());
return begin()[idx];
}
reference front() {
return begin()[0];
}
const_reference front() const {
return begin()[0];
}
reference back() {
return end()[-1];
}
const_reference back() const {
return end()[-1];
}
};
/// SmallVectorTemplateBase<isPodLike = false> - This is where we put method
/// implementations that are designed to work with non-POD-like T's.
template <typename T, bool isPodLike>
class SmallVectorTemplateBase : public SmallVectorTemplateCommon<T> {
public:
SmallVectorTemplateBase(size_t Size) : SmallVectorTemplateCommon<T>(Size) {}
static void destroy_range(T *S, T *E) {
while (S != E) {
--E;
E->~T();
}
}
/// uninitialized_copy - Copy the range [I, E) onto the uninitialized memory
/// starting with "Dest", constructing elements into it as needed.
template<typename It1, typename It2>
static void uninitialized_copy(It1 I, It1 E, It2 Dest) {
std::uninitialized_copy(I, E, Dest);
}
/// grow - double the size of the allocated memory, guaranteeing space for at
/// least one more element or MinSize if specified.
void grow(size_t MinSize = 0);
};
// Define this out-of-line to dissuade the C++ compiler from inlining it.
template <typename T, bool isPodLike>
void SmallVectorTemplateBase<T, isPodLike>::grow(size_t MinSize) {
size_t CurCapacity = this->capacity();
size_t CurSize = this->size();
size_t NewCapacity = 2*CurCapacity + 1; // Always grow, even from zero.
if (NewCapacity < MinSize)
NewCapacity = MinSize;
T *NewElts = static_cast<T*>(malloc(NewCapacity*sizeof(T)));
// Copy the elements over.
this->uninitialized_copy(this->begin(), this->end(), NewElts);
// Destroy the original elements.
destroy_range(this->begin(), this->end());
// If this wasn't grown from the inline copy, deallocate the old space.
if (!this->isSmall())
free(this->begin());
this->setEnd(NewElts+CurSize);
this->BeginX = NewElts;
this->CapacityX = this->begin()+NewCapacity;
}
/// SmallVectorTemplateBase<isPodLike = true> - This is where we put method
/// implementations that are designed to work with POD-like T's.
template <typename T>
class SmallVectorTemplateBase<T, true> : public SmallVectorTemplateCommon<T> {
public:
SmallVectorTemplateBase(size_t Size) : SmallVectorTemplateCommon<T>(Size) {}
// No need to do a destroy loop for POD's.
static void destroy_range(T *, T *) {}
/// uninitialized_copy - Copy the range [I, E) onto the uninitialized memory
/// starting with "Dest", constructing elements into it as needed.
template<typename It1, typename It2>
static void uninitialized_copy(It1 I, It1 E, It2 Dest) {
// Arbitrary iterator types; just use the basic implementation.
std::uninitialized_copy(I, E, Dest);
}
/// uninitialized_copy - Copy the range [I, E) onto the uninitialized memory
/// starting with "Dest", constructing elements into it as needed.
template<typename T1, typename T2>
static void uninitialized_copy(T1 *I, T1 *E, T2 *Dest) {
// Use memcpy for PODs iterated by pointers (which includes SmallVector
// iterators): std::uninitialized_copy optimizes to memmove, but we can
// use memcpy here.
memcpy(Dest, I, (E-I)*sizeof(T));
}
/// grow - double the size of the allocated memory, guaranteeing space for at
/// least one more element or MinSize if specified.
void grow(size_t MinSize = 0) {
this->grow_pod(MinSize*sizeof(T), sizeof(T));
}
};
/// SmallVectorImpl - This class consists of common code factored out of the
/// SmallVector class to reduce code duplication based on the SmallVector 'N'
/// template parameter.
template <typename T>
class SmallVectorImpl : public SmallVectorTemplateBase<T, isPodLike<T>::value> {
typedef SmallVectorTemplateBase<T, isPodLike<T>::value > SuperClass;
SmallVectorImpl(const SmallVectorImpl&); // DISABLED.
public:
typedef typename SuperClass::iterator iterator;
typedef typename SuperClass::size_type size_type;
// Default ctor - Initialize to empty.
explicit SmallVectorImpl(unsigned N)
: SmallVectorTemplateBase<T, isPodLike<T>::value>(N*sizeof(T)) {
}
~SmallVectorImpl() {
// Destroy the constructed elements in the vector.
this->destroy_range(this->begin(), this->end());
// If this wasn't grown from the inline copy, deallocate the old space.
if (!this->isSmall())
free(this->begin());
}
void clear() {
this->destroy_range(this->begin(), this->end());
this->EndX = this->BeginX;
}
void resize(unsigned N) {
if (N < this->size()) {
this->destroy_range(this->begin()+N, this->end());
this->setEnd(this->begin()+N);
} else if (N > this->size()) {
if (this->capacity() < N)
this->grow(N);
this->construct_range(this->end(), this->begin()+N, T());
this->setEnd(this->begin()+N);
}
}
void resize(unsigned N, const T &NV) {
if (N < this->size()) {
this->destroy_range(this->begin()+N, this->end());
this->setEnd(this->begin()+N);
} else if (N > this->size()) {
if (this->capacity() < N)
this->grow(N);
construct_range(this->end(), this->begin()+N, NV);
this->setEnd(this->begin()+N);
}
}
void reserve(unsigned N) {
if (this->capacity() < N)
this->grow(N);
}
void push_back(const T &Elt) {
if (this->EndX < this->CapacityX) {
Retry:
new (this->end()) T(Elt);
this->setEnd(this->end()+1);
return;
}
this->grow();
goto Retry;
}
void pop_back() {
this->setEnd(this->end()-1);
this->end()->~T();
}
T pop_back_val() {
T Result = this->back();
pop_back();
return Result;
}
void swap(SmallVectorImpl &RHS);
/// append - Add the specified range to the end of the SmallVector.
///
template<typename in_iter>
void append(in_iter in_start, in_iter in_end) {
size_type NumInputs = std::distance(in_start, in_end);
// Grow allocated space if needed.
if (NumInputs > size_type(this->capacity_ptr()-this->end()))
this->grow(this->size()+NumInputs);
// Copy the new elements over.
// TODO: NEED To compile time dispatch on whether in_iter is a random access
// iterator to use the fast uninitialized_copy.
std::uninitialized_copy(in_start, in_end, this->end());
this->setEnd(this->end() + NumInputs);
}
/// append - Add the specified range to the end of the SmallVector.
///
void append(size_type NumInputs, const T &Elt) {
// Grow allocated space if needed.
if (NumInputs > size_type(this->capacity_ptr()-this->end()))
this->grow(this->size()+NumInputs);
// Copy the new elements over.
std::uninitialized_fill_n(this->end(), NumInputs, Elt);
this->setEnd(this->end() + NumInputs);
}
void assign(unsigned NumElts, const T &Elt) {
clear();
if (this->capacity() < NumElts)
this->grow(NumElts);
this->setEnd(this->begin()+NumElts);
construct_range(this->begin(), this->end(), Elt);
}
iterator erase(iterator I) {
iterator N = I;
// Shift all elts down one.
std::copy(I+1, this->end(), I);
// Drop the last elt.
pop_back();
return(N);
}
iterator erase(iterator S, iterator E) {
iterator N = S;
// Shift all elts down.
iterator I = std::copy(E, this->end(), S);
// Drop the last elts.
this->destroy_range(I, this->end());
this->setEnd(I);
return(N);
}
iterator insert(iterator I, const T &Elt) {
if (I == this->end()) { // Important special case for empty vector.
push_back(Elt);
return this->end()-1;
}
if (this->EndX < this->CapacityX) {
Retry:
new (this->end()) T(this->back());
this->setEnd(this->end()+1);
// Push everything else over.
std::copy_backward(I, this->end()-1, this->end());
// If we just moved the element we're inserting, be sure to update
// the reference.
const T *EltPtr = &Elt;
if (I <= EltPtr && EltPtr < this->EndX)
++EltPtr;
*I = *EltPtr;
return I;
}
size_t EltNo = I-this->begin();
this->grow();
I = this->begin()+EltNo;
goto Retry;
}
iterator insert(iterator I, size_type NumToInsert, const T &Elt) {
if (I == this->end()) { // Important special case for empty vector.
append(NumToInsert, Elt);
return this->end()-1;
}
// Convert iterator to elt# to avoid invalidating iterator when we reserve()
size_t InsertElt = I - this->begin();
// Ensure there is enough space.
reserve(static_cast<unsigned>(this->size() + NumToInsert));
// Uninvalidate the iterator.
I = this->begin()+InsertElt;
// If there are more elements between the insertion point and the end of the
// range than there are being inserted, we can use a simple approach to
// insertion. Since we already reserved space, we know that this won't
// reallocate the vector.
if (size_t(this->end()-I) >= NumToInsert) {
T *OldEnd = this->end();
append(this->end()-NumToInsert, this->end());
// Copy the existing elements that get replaced.
std::copy_backward(I, OldEnd-NumToInsert, OldEnd);
std::fill_n(I, NumToInsert, Elt);
return I;
}
// Otherwise, we're inserting more elements than exist already, and we're
// not inserting at the end.
// Copy over the elements that we're about to overwrite.
T *OldEnd = this->end();
this->setEnd(this->end() + NumToInsert);
size_t NumOverwritten = OldEnd-I;
this->uninitialized_copy(I, OldEnd, this->end()-NumOverwritten);
// Replace the overwritten part.
std::fill_n(I, NumOverwritten, Elt);
// Insert the non-overwritten middle part.
std::uninitialized_fill_n(OldEnd, NumToInsert-NumOverwritten, Elt);
return I;
}
template<typename ItTy>
iterator insert(iterator I, ItTy From, ItTy To) {
if (I == this->end()) { // Important special case for empty vector.
append(From, To);
return this->end()-1;
}
size_t NumToInsert = std::distance(From, To);
// Convert iterator to elt# to avoid invalidating iterator when we reserve()
size_t InsertElt = I - this->begin();
// Ensure there is enough space.
reserve(static_cast<unsigned>(this->size() + NumToInsert));
// Uninvalidate the iterator.
I = this->begin()+InsertElt;
// If there are more elements between the insertion point and the end of the
// range than there are being inserted, we can use a simple approach to
// insertion. Since we already reserved space, we know that this won't
// reallocate the vector.
if (size_t(this->end()-I) >= NumToInsert) {
T *OldEnd = this->end();
append(this->end()-NumToInsert, this->end());
// Copy the existing elements that get replaced.
std::copy_backward(I, OldEnd-NumToInsert, OldEnd);
std::copy(From, To, I);
return I;
}
// Otherwise, we're inserting more elements than exist already, and we're
// not inserting at the end.
// Copy over the elements that we're about to overwrite.
T *OldEnd = this->end();
this->setEnd(this->end() + NumToInsert);
size_t NumOverwritten = OldEnd-I;
this->uninitialized_copy(I, OldEnd, this->end()-NumOverwritten);
// Replace the overwritten part.
for (; NumOverwritten > 0; --NumOverwritten) {
*I = *From;
++I; ++From;
}
// Insert the non-overwritten middle part.
this->uninitialized_copy(From, To, OldEnd);
return I;
}
const SmallVectorImpl
&operator=(const SmallVectorImpl &RHS);
bool operator==(const SmallVectorImpl &RHS) const {
if (this->size() != RHS.size()) return false;
return std::equal(this->begin(), this->end(), RHS.begin());
}
bool operator!=(const SmallVectorImpl &RHS) const {
return !(*this == RHS);
}
bool operator<(const SmallVectorImpl &RHS) const {
return std::lexicographical_compare(this->begin(), this->end(),
RHS.begin(), RHS.end());
}
/// set_size - Set the array size to \arg N, which the current array must have
/// enough capacity for.
///
/// This does not construct or destroy any elements in the vector.
///
/// Clients can use this in conjunction with capacity() to write past the end
/// of the buffer when they know that more elements are available, and only
/// update the size later. This avoids the cost of value initializing elements
/// which will only be overwritten.
void set_size(unsigned N) {
assert(N <= this->capacity());
this->setEnd(this->begin() + N);
}
private:
static void construct_range(T *S, T *E, const T &Elt) {
for (; S != E; ++S)
new (S) T(Elt);
}
};
template <typename T>
void SmallVectorImpl<T>::swap(SmallVectorImpl<T> &RHS) {
if (this == &RHS) return;
// We can only avoid copying elements if neither vector is small.
if (!this->isSmall() && !RHS.isSmall()) {
std::swap(this->BeginX, RHS.BeginX);
std::swap(this->EndX, RHS.EndX);
std::swap(this->CapacityX, RHS.CapacityX);
return;
}
if (RHS.size() > this->capacity())
this->grow(RHS.size());
if (this->size() > RHS.capacity())
RHS.grow(this->size());
// Swap the shared elements.
size_t NumShared = this->size();
if (NumShared > RHS.size()) NumShared = RHS.size();
for (unsigned i = 0; i != static_cast<unsigned>(NumShared); ++i)
std::swap((*this)[i], RHS[i]);
// Copy over the extra elts.
if (this->size() > RHS.size()) {
size_t EltDiff = this->size() - RHS.size();
this->uninitialized_copy(this->begin()+NumShared, this->end(), RHS.end());
RHS.setEnd(RHS.end()+EltDiff);
this->destroy_range(this->begin()+NumShared, this->end());
this->setEnd(this->begin()+NumShared);
} else if (RHS.size() > this->size()) {
size_t EltDiff = RHS.size() - this->size();
this->uninitialized_copy(RHS.begin()+NumShared, RHS.end(), this->end());
this->setEnd(this->end() + EltDiff);
this->destroy_range(RHS.begin()+NumShared, RHS.end());
RHS.setEnd(RHS.begin()+NumShared);
}
}
template <typename T>
const SmallVectorImpl<T> &SmallVectorImpl<T>::
operator=(const SmallVectorImpl<T> &RHS) {
// Avoid self-assignment.
if (this == &RHS) return *this;
// If we already have sufficient space, assign the common elements, then
// destroy any excess.
size_t RHSSize = RHS.size();
size_t CurSize = this->size();
if (CurSize >= RHSSize) {
// Assign common elements.
iterator NewEnd;
if (RHSSize)
NewEnd = std::copy(RHS.begin(), RHS.begin()+RHSSize, this->begin());
else
NewEnd = this->begin();
// Destroy excess elements.
this->destroy_range(NewEnd, this->end());
// Trim.
this->setEnd(NewEnd);
return *this;
}
// If we have to grow to have enough elements, destroy the current elements.
// This allows us to avoid copying them during the grow.
if (this->capacity() < RHSSize) {
// Destroy current elements.
this->destroy_range(this->begin(), this->end());
this->setEnd(this->begin());
CurSize = 0;
this->grow(RHSSize);
} else if (CurSize) {
// Otherwise, use assignment for the already-constructed elements.
std::copy(RHS.begin(), RHS.begin()+CurSize, this->begin());
}
// Copy construct the new elements in place.
this->uninitialized_copy(RHS.begin()+CurSize, RHS.end(),
this->begin()+CurSize);
// Set end.
this->setEnd(this->begin()+RHSSize);
return *this;
}
/// SmallVector - This is a 'vector' (really, a variable-sized array), optimized
/// for the case when the array is small. It contains some number of elements
/// in-place, which allows it to avoid heap allocation when the actual number of
/// elements is below that threshold. This allows normal "small" cases to be
/// fast without losing generality for large inputs.
///
/// Note that this does not attempt to be exception safe.
///
template <typename T, unsigned N>
class SmallVector : public SmallVectorImpl<T> {
/// InlineElts - These are 'N-1' elements that are stored inline in the body
/// of the vector. The extra '1' element is stored in SmallVectorImpl.
typedef typename SmallVectorImpl<T>::U U;
enum {
// MinUs - The number of U's require to cover N T's.
MinUs = (static_cast<unsigned int>(sizeof(T))*N +
static_cast<unsigned int>(sizeof(U)) - 1) /
static_cast<unsigned int>(sizeof(U)),
// NumInlineEltsElts - The number of elements actually in this array. There
// is already one in the parent class, and we have to round up to avoid
// having a zero-element array.
NumInlineEltsElts = MinUs > 1 ? (MinUs - 1) : 1,
// NumTsAvailable - The number of T's we actually have space for, which may
// be more than N due to rounding.
NumTsAvailable = (NumInlineEltsElts+1)*static_cast<unsigned int>(sizeof(U))/
static_cast<unsigned int>(sizeof(T))
};
U InlineElts[NumInlineEltsElts];
public:
SmallVector() : SmallVectorImpl<T>(NumTsAvailable) {
}
explicit SmallVector(unsigned Size, const T &Value = T())
: SmallVectorImpl<T>(NumTsAvailable) {
this->reserve(Size);
while (Size--)
this->push_back(Value);
}
template<typename ItTy>
SmallVector(ItTy S, ItTy E) : SmallVectorImpl<T>(NumTsAvailable) {
this->append(S, E);
}
SmallVector(const SmallVector &RHS) : SmallVectorImpl<T>(NumTsAvailable) {
if (!RHS.empty())
SmallVectorImpl<T>::operator=(RHS);
}
const SmallVector &operator=(const SmallVector &RHS) {
SmallVectorImpl<T>::operator=(RHS);
return *this;
}
};
/// Specialize SmallVector at N=0. This specialization guarantees
/// that it can be instantiated at an incomplete T if none of its
/// members are required.
template <typename T>
class SmallVector<T,0> : public SmallVectorImpl<T> {
public:
SmallVector() : SmallVectorImpl<T>(0) {}
explicit SmallVector(unsigned Size, const T &Value = T())
: SmallVectorImpl<T>(0) {
this->reserve(Size);
while (Size--)
this->push_back(Value);
}
template<typename ItTy>
SmallVector(ItTy S, ItTy E) : SmallVectorImpl<T>(0) {
this->append(S, E);
}
SmallVector(const SmallVector &RHS) : SmallVectorImpl<T>(0) {
SmallVectorImpl<T>::operator=(RHS);
}
SmallVector &operator=(const SmallVectorImpl<T> &RHS) {
return SmallVectorImpl<T>::operator=(RHS);
}
};
template<typename T, unsigned N>
static inline size_t capacity_in_bytes(const SmallVector<T, N> &X) {
return X.capacity_in_bytes();
}
} // End llvm namespace
namespace std {
/// Implement std::swap in terms of SmallVector swap.
template<typename T>
inline void
swap(llvm::SmallVectorImpl<T> &LHS, llvm::SmallVectorImpl<T> &RHS) {
LHS.swap(RHS);
}
/// Implement std::swap in terms of SmallVector swap.
template<typename T, unsigned N>
inline void
swap(llvm::SmallVector<T, N> &LHS, llvm::SmallVector<T, N> &RHS) {
LHS.swap(RHS);
}
}
#endif
| 0 |
rapidsai_public_repos/code-share/maxflow/galois/include/llvm
|
rapidsai_public_repos/code-share/maxflow/galois/include/llvm/ADT/ArrayRef.h
|
//===--- ArrayRef.h - Array Reference Wrapper -------------------*- C++ -*-===//
//
// The LLVM Compiler Infrastructure
//
// This file is distributed under the University of Illinois Open Source
// License. See LICENSE.TXT for details.
//
//===----------------------------------------------------------------------===//
#ifndef LLVM_ADT_ARRAYREF_H
#define LLVM_ADT_ARRAYREF_H
#include "llvm/ADT/SmallVector.h"
#include <vector>
namespace llvm {
class APInt;
/// ArrayRef - Represent a constant reference to an array (0 or more elements
/// consecutively in memory), i.e. a start pointer and a length. It allows
/// various APIs to take consecutive elements easily and conveniently.
///
/// This class does not own the underlying data, it is expected to be used in
/// situations where the data resides in some other buffer, whose lifetime
/// extends past that of the ArrayRef. For this reason, it is not in general
/// safe to store an ArrayRef.
///
/// This is intended to be trivially copyable, so it should be passed by
/// value.
template<typename T>
class ArrayRef {
public:
typedef const T *iterator;
typedef const T *const_iterator;
typedef size_t size_type;
private:
/// The start of the array, in an external buffer.
const T *Data;
/// The number of elements.
size_type Length;
public:
/// @name Constructors
/// @{
/// Construct an empty ArrayRef.
/*implicit*/ ArrayRef() : Data(0), Length(0) {}
/// Construct an ArrayRef from a single element.
/*implicit*/ ArrayRef(const T &OneElt)
: Data(&OneElt), Length(1) {}
/// Construct an ArrayRef from a pointer and length.
/*implicit*/ ArrayRef(const T *data, size_t length)
: Data(data), Length(length) {}
/// Construct an ArrayRef from a range.
ArrayRef(const T *begin, const T *end)
: Data(begin), Length(end - begin) {}
/// Construct an ArrayRef from a SmallVector.
/*implicit*/ ArrayRef(const SmallVectorImpl<T> &Vec)
: Data(Vec.data()), Length(Vec.size()) {}
/// Construct an ArrayRef from a std::vector.
/*implicit*/ ArrayRef(const std::vector<T> &Vec)
: Data(Vec.empty() ? (T*)0 : &Vec[0]), Length(Vec.size()) {}
/// Construct an ArrayRef from a C array.
template <size_t N>
/*implicit*/ ArrayRef(const T (&Arr)[N])
: Data(Arr), Length(N) {}
/// @}
/// @name Simple Operations
/// @{
iterator begin() const { return Data; }
iterator end() const { return Data + Length; }
/// empty - Check if the array is empty.
bool empty() const { return Length == 0; }
const T *data() const { return Data; }
/// size - Get the array size.
size_t size() const { return Length; }
/// front - Get the first element.
const T &front() const {
assert(!empty());
return Data[0];
}
/// back - Get the last element.
const T &back() const {
assert(!empty());
return Data[Length-1];
}
/// equals - Check for element-wise equality.
bool equals(ArrayRef RHS) const {
if (Length != RHS.Length)
return false;
for (size_type i = 0; i != Length; i++)
if (Data[i] != RHS.Data[i])
return false;
return true;
}
/// slice(n) - Chop off the first N elements of the array.
ArrayRef<T> slice(unsigned N) {
assert(N <= size() && "Invalid specifier");
return ArrayRef<T>(data()+N, size()-N);
}
/// slice(n, m) - Chop off the first N elements of the array, and keep M
/// elements in the array.
ArrayRef<T> slice(unsigned N, unsigned M) {
assert(N+M <= size() && "Invalid specifier");
return ArrayRef<T>(data()+N, M);
}
/// @}
/// @name Operator Overloads
/// @{
const T &operator[](size_t Index) const {
assert(Index < Length && "Invalid index!");
return Data[Index];
}
/// @}
/// @name Expensive Operations
/// @{
std::vector<T> vec() const {
return std::vector<T>(Data, Data+Length);
}
/// @}
/// @name Conversion operators
/// @{
operator std::vector<T>() const {
return std::vector<T>(Data, Data+Length);
}
/// @}
};
/// @name ArrayRef Convenience constructors
/// @{
/// Construct an ArrayRef from a single element.
template<typename T>
ArrayRef<T> makeArrayRef(const T &OneElt) {
return OneElt;
}
/// Construct an ArrayRef from a pointer and length.
template<typename T>
ArrayRef<T> makeArrayRef(const T *data, size_t length) {
return ArrayRef<T>(data, length);
}
/// Construct an ArrayRef from a range.
template<typename T>
ArrayRef<T> makeArrayRef(const T *begin, const T *end) {
return ArrayRef<T>(begin, end);
}
/// Construct an ArrayRef from a SmallVector.
template <typename T>
ArrayRef<T> makeArrayRef(const SmallVectorImpl<T> &Vec) {
return Vec;
}
/// Construct an ArrayRef from a SmallVector.
template <typename T, unsigned N>
ArrayRef<T> makeArrayRef(const SmallVector<T, N> &Vec) {
return Vec;
}
/// Construct an ArrayRef from a std::vector.
template<typename T>
ArrayRef<T> makeArrayRef(const std::vector<T> &Vec) {
return Vec;
}
/// Construct an ArrayRef from a C array.
template<typename T, size_t N>
ArrayRef<T> makeArrayRef(const T (&Arr)[N]) {
return ArrayRef<T>(Arr);
}
/// @}
/// @name ArrayRef Comparison Operators
/// @{
template<typename T>
inline bool operator==(ArrayRef<T> LHS, ArrayRef<T> RHS) {
return LHS.equals(RHS);
}
template<typename T>
inline bool operator!=(ArrayRef<T> LHS, ArrayRef<T> RHS) {
return !(LHS == RHS);
}
/// @}
// ArrayRefs can be treated like a POD type.
template <typename T> struct isPodLike;
template <typename T> struct isPodLike<ArrayRef<T> > {
static const bool value = true;
};
}
#endif
| 0 |
rapidsai_public_repos/code-share/maxflow/galois/include/llvm
|
rapidsai_public_repos/code-share/maxflow/galois/include/llvm/ADT/StringRef.h
|
//===--- StringRef.h - Constant String Reference Wrapper --------*- C++ -*-===//
//
// The LLVM Compiler Infrastructure
//
// This file is distributed under the University of Illinois Open Source
// License. See LICENSE.TXT for details.
//
//===----------------------------------------------------------------------===//
#ifndef LLVM_ADT_STRINGREF_H
#define LLVM_ADT_STRINGREF_H
#include <cassert>
#include <cstring>
#include <utility>
#include <string>
namespace llvm {
template<typename T>
class SmallVectorImpl;
class APInt;
/// StringRef - Represent a constant reference to a string, i.e. a character
/// array and a length, which need not be null terminated.
///
/// This class does not own the string data, it is expected to be used in
/// situations where the character data resides in some other buffer, whose
/// lifetime extends past that of the StringRef. For this reason, it is not in
/// general safe to store a StringRef.
class StringRef {
public:
typedef const char *iterator;
typedef const char *const_iterator;
static const size_t npos = ~size_t(0);
typedef size_t size_type;
private:
/// The start of the string, in an external buffer.
const char *Data;
/// The length of the string.
size_t Length;
// Workaround PR5482: nearly all gcc 4.x miscompile StringRef and std::min()
// Changing the arg of min to be an integer, instead of a reference to an
// integer works around this bug.
static size_t min(size_t a, size_t b) { return a < b ? a : b; }
static size_t max(size_t a, size_t b) { return a > b ? a : b; }
// Workaround memcmp issue with null pointers (undefined behavior)
// by providing a specialized version
static int compareMemory(const char *Lhs, const char *Rhs, size_t Length) {
if (Length == 0) { return 0; }
return ::memcmp(Lhs,Rhs,Length);
}
public:
/// @name Constructors
/// @{
/// Construct an empty string ref.
/*implicit*/ StringRef() : Data(0), Length(0) {}
/// Construct a string ref from a cstring.
/*implicit*/ StringRef(const char *Str)
: Data(Str) {
assert(Str && "StringRef cannot be built from a NULL argument");
Length = ::strlen(Str); // invoking strlen(NULL) is undefined behavior
}
/// Construct a string ref from a pointer and length.
/*implicit*/ StringRef(const char *data, size_t length)
: Data(data), Length(length) {
assert((data || length == 0) &&
"StringRef cannot be built from a NULL argument with non-null length");
}
/// Construct a string ref from an std::string.
/*implicit*/ StringRef(const std::string &Str)
: Data(Str.data()), Length(Str.length()) {}
/// @}
/// @name Iterators
/// @{
iterator begin() const { return Data; }
iterator end() const { return Data + Length; }
/// @}
/// @name String Operations
/// @{
/// data - Get a pointer to the start of the string (which may not be null
/// terminated).
const char *data() const { return Data; }
/// empty - Check if the string is empty.
bool empty() const { return Length == 0; }
/// size - Get the string size.
size_t size() const { return Length; }
/// front - Get the first character in the string.
char front() const {
assert(!empty());
return Data[0];
}
/// back - Get the last character in the string.
char back() const {
assert(!empty());
return Data[Length-1];
}
/// equals - Check for string equality, this is more efficient than
/// compare() when the relative ordering of inequal strings isn't needed.
bool equals(StringRef RHS) const {
return (Length == RHS.Length &&
compareMemory(Data, RHS.Data, RHS.Length) == 0);
}
/// equals_lower - Check for string equality, ignoring case.
bool equals_lower(StringRef RHS) const {
return Length == RHS.Length && compare_lower(RHS) == 0;
}
/// compare - Compare two strings; the result is -1, 0, or 1 if this string
/// is lexicographically less than, equal to, or greater than the \arg RHS.
int compare(StringRef RHS) const {
// Check the prefix for a mismatch.
if (int Res = compareMemory(Data, RHS.Data, min(Length, RHS.Length)))
return Res < 0 ? -1 : 1;
// Otherwise the prefixes match, so we only need to check the lengths.
if (Length == RHS.Length)
return 0;
return Length < RHS.Length ? -1 : 1;
}
/// compare_lower - Compare two strings, ignoring case.
int compare_lower(StringRef RHS) const;
/// compare_numeric - Compare two strings, treating sequences of digits as
/// numbers.
int compare_numeric(StringRef RHS) const;
/// \brief Determine the edit distance between this string and another
/// string.
///
/// \param Other the string to compare this string against.
///
/// \param AllowReplacements whether to allow character
/// replacements (change one character into another) as a single
/// operation, rather than as two operations (an insertion and a
/// removal).
///
/// \param MaxEditDistance If non-zero, the maximum edit distance that
/// this routine is allowed to compute. If the edit distance will exceed
/// that maximum, returns \c MaxEditDistance+1.
///
/// \returns the minimum number of character insertions, removals,
/// or (if \p AllowReplacements is \c true) replacements needed to
/// transform one of the given strings into the other. If zero,
/// the strings are identical.
unsigned edit_distance(StringRef Other, bool AllowReplacements = true,
unsigned MaxEditDistance = 0);
/// str - Get the contents as an std::string.
std::string str() const {
if (Data == 0) return std::string();
return std::string(Data, Length);
}
/// @}
/// @name Operator Overloads
/// @{
char operator[](size_t Index) const {
assert(Index < Length && "Invalid index!");
return Data[Index];
}
/// @}
/// @name Type Conversions
/// @{
operator std::string() const {
return str();
}
/// @}
/// @name String Predicates
/// @{
/// startswith - Check if this string starts with the given \arg Prefix.
bool startswith(StringRef Prefix) const {
return Length >= Prefix.Length &&
compareMemory(Data, Prefix.Data, Prefix.Length) == 0;
}
/// endswith - Check if this string ends with the given \arg Suffix.
bool endswith(StringRef Suffix) const {
return Length >= Suffix.Length &&
compareMemory(end() - Suffix.Length, Suffix.Data, Suffix.Length) == 0;
}
/// @}
/// @name String Searching
/// @{
/// find - Search for the first character \arg C in the string.
///
/// \return - The index of the first occurrence of \arg C, or npos if not
/// found.
size_t find(char C, size_t From = 0) const {
for (size_t i = min(From, Length), e = Length; i != e; ++i)
if (Data[i] == C)
return i;
return npos;
}
/// find - Search for the first string \arg Str in the string.
///
/// \return - The index of the first occurrence of \arg Str, or npos if not
/// found.
size_t find(StringRef Str, size_t From = 0) const;
/// rfind - Search for the last character \arg C in the string.
///
/// \return - The index of the last occurrence of \arg C, or npos if not
/// found.
size_t rfind(char C, size_t From = npos) const {
From = min(From, Length);
size_t i = From;
while (i != 0) {
--i;
if (Data[i] == C)
return i;
}
return npos;
}
/// rfind - Search for the last string \arg Str in the string.
///
/// \return - The index of the last occurrence of \arg Str, or npos if not
/// found.
size_t rfind(StringRef Str) const;
/// find_first_of - Find the first character in the string that is \arg C,
/// or npos if not found. Same as find.
size_type find_first_of(char C, size_t From = 0) const {
return find(C, From);
}
/// find_first_of - Find the first character in the string that is in \arg
/// Chars, or npos if not found.
///
/// Note: O(size() + Chars.size())
size_type find_first_of(StringRef Chars, size_t From = 0) const;
/// find_first_not_of - Find the first character in the string that is not
/// \arg C or npos if not found.
size_type find_first_not_of(char C, size_t From = 0) const;
/// find_first_not_of - Find the first character in the string that is not
/// in the string \arg Chars, or npos if not found.
///
/// Note: O(size() + Chars.size())
size_type find_first_not_of(StringRef Chars, size_t From = 0) const;
/// find_last_of - Find the last character in the string that is \arg C, or
/// npos if not found.
size_type find_last_of(char C, size_t From = npos) const {
return rfind(C, From);
}
/// find_last_of - Find the last character in the string that is in \arg C,
/// or npos if not found.
///
/// Note: O(size() + Chars.size())
size_type find_last_of(StringRef Chars, size_t From = npos) const;
/// @}
/// @name Helpful Algorithms
/// @{
/// count - Return the number of occurrences of \arg C in the string.
size_t count(char C) const {
size_t Count = 0;
for (size_t i = 0, e = Length; i != e; ++i)
if (Data[i] == C)
++Count;
return Count;
}
/// count - Return the number of non-overlapped occurrences of \arg Str in
/// the string.
size_t count(StringRef Str) const;
/// getAsInteger - Parse the current string as an integer of the specified
/// radix. If Radix is specified as zero, this does radix autosensing using
/// extended C rules: 0 is octal, 0x is hex, 0b is binary.
///
/// If the string is invalid or if only a subset of the string is valid,
/// this returns true to signify the error. The string is considered
/// erroneous if empty.
///
bool getAsInteger(unsigned Radix, long long &Result) const;
bool getAsInteger(unsigned Radix, unsigned long long &Result) const;
bool getAsInteger(unsigned Radix, int &Result) const;
bool getAsInteger(unsigned Radix, unsigned &Result) const;
// TODO: Provide overloads for int/unsigned that check for overflow.
/// getAsInteger - Parse the current string as an integer of the
/// specified radix, or of an autosensed radix if the radix given
/// is 0. The current value in Result is discarded, and the
/// storage is changed to be wide enough to store the parsed
/// integer.
///
/// Returns true if the string does not solely consist of a valid
/// non-empty number in the appropriate base.
///
/// APInt::fromString is superficially similar but assumes the
/// string is well-formed in the given radix.
bool getAsInteger(unsigned Radix, APInt &Result) const;
/// @}
/// @name Substring Operations
/// @{
/// substr - Return a reference to the substring from [Start, Start + N).
///
/// \param Start - The index of the starting character in the substring; if
/// the index is npos or greater than the length of the string then the
/// empty substring will be returned.
///
/// \param N - The number of characters to included in the substring. If N
/// exceeds the number of characters remaining in the string, the string
/// suffix (starting with \arg Start) will be returned.
StringRef substr(size_t Start, size_t N = npos) const {
Start = min(Start, Length);
return StringRef(Data + Start, min(N, Length - Start));
}
/// slice - Return a reference to the substring from [Start, End).
///
/// \param Start - The index of the starting character in the substring; if
/// the index is npos or greater than the length of the string then the
/// empty substring will be returned.
///
/// \param End - The index following the last character to include in the
/// substring. If this is npos, or less than \arg Start, or exceeds the
/// number of characters remaining in the string, the string suffix
/// (starting with \arg Start) will be returned.
StringRef slice(size_t Start, size_t End) const {
Start = min(Start, Length);
End = min(max(Start, End), Length);
return StringRef(Data + Start, End - Start);
}
/// split - Split into two substrings around the first occurrence of a
/// separator character.
///
/// If \arg Separator is in the string, then the result is a pair (LHS, RHS)
/// such that (*this == LHS + Separator + RHS) is true and RHS is
/// maximal. If \arg Separator is not in the string, then the result is a
/// pair (LHS, RHS) where (*this == LHS) and (RHS == "").
///
/// \param Separator - The character to split on.
/// \return - The split substrings.
std::pair<StringRef, StringRef> split(char Separator) const {
size_t Idx = find(Separator);
if (Idx == npos)
return std::make_pair(*this, StringRef());
return std::make_pair(slice(0, Idx), slice(Idx+1, npos));
}
/// split - Split into two substrings around the first occurrence of a
/// separator string.
///
/// If \arg Separator is in the string, then the result is a pair (LHS, RHS)
/// such that (*this == LHS + Separator + RHS) is true and RHS is
/// maximal. If \arg Separator is not in the string, then the result is a
/// pair (LHS, RHS) where (*this == LHS) and (RHS == "").
///
/// \param Separator - The string to split on.
/// \return - The split substrings.
std::pair<StringRef, StringRef> split(StringRef Separator) const {
size_t Idx = find(Separator);
if (Idx == npos)
return std::make_pair(*this, StringRef());
return std::make_pair(slice(0, Idx), slice(Idx + Separator.size(), npos));
}
/// split - Split into substrings around the occurrences of a separator
/// string.
///
/// Each substring is stored in \arg A. If \arg MaxSplit is >= 0, at most
/// \arg MaxSplit splits are done and consequently <= \arg MaxSplit
/// elements are added to A.
/// If \arg KeepEmpty is false, empty strings are not added to \arg A. They
/// still count when considering \arg MaxSplit
/// An useful invariant is that
/// Separator.join(A) == *this if MaxSplit == -1 and KeepEmpty == true
///
/// \param A - Where to put the substrings.
/// \param Separator - The string to split on.
/// \param MaxSplit - The maximum number of times the string is split.
/// \param KeepEmpty - True if empty substring should be added.
void split(SmallVectorImpl<StringRef> &A,
StringRef Separator, int MaxSplit = -1,
bool KeepEmpty = true) const;
/// rsplit - Split into two substrings around the last occurrence of a
/// separator character.
///
/// If \arg Separator is in the string, then the result is a pair (LHS, RHS)
/// such that (*this == LHS + Separator + RHS) is true and RHS is
/// minimal. If \arg Separator is not in the string, then the result is a
/// pair (LHS, RHS) where (*this == LHS) and (RHS == "").
///
/// \param Separator - The character to split on.
/// \return - The split substrings.
std::pair<StringRef, StringRef> rsplit(char Separator) const {
size_t Idx = rfind(Separator);
if (Idx == npos)
return std::make_pair(*this, StringRef());
return std::make_pair(slice(0, Idx), slice(Idx+1, npos));
}
/// @}
};
/// @name StringRef Comparison Operators
/// @{
inline bool operator==(StringRef LHS, StringRef RHS) {
return LHS.equals(RHS);
}
inline bool operator!=(StringRef LHS, StringRef RHS) {
return !(LHS == RHS);
}
inline bool operator<(StringRef LHS, StringRef RHS) {
return LHS.compare(RHS) == -1;
}
inline bool operator<=(StringRef LHS, StringRef RHS) {
return LHS.compare(RHS) != 1;
}
inline bool operator>(StringRef LHS, StringRef RHS) {
return LHS.compare(RHS) == 1;
}
inline bool operator>=(StringRef LHS, StringRef RHS) {
return LHS.compare(RHS) != -1;
}
inline std::string &operator+=(std::string &buffer, llvm::StringRef string) {
return buffer.append(string.data(), string.size());
}
/// @}
// StringRefs can be treated like a POD type.
template <typename T> struct isPodLike;
template <> struct isPodLike<StringRef> { static const bool value = true; };
}
#endif
| 0 |
rapidsai_public_repos/code-share/maxflow/galois/include/llvm
|
rapidsai_public_repos/code-share/maxflow/galois/include/llvm/ADT/DenseMapInfo.h
|
//===- llvm/ADT/DenseMapInfo.h - Type traits for DenseMap -------*- C++ -*-===//
//
// The LLVM Compiler Infrastructure
//
// This file is distributed under the University of Illinois Open Source
// License. See LICENSE.TXT for details.
//
//===----------------------------------------------------------------------===//
//
// This file defines DenseMapInfo traits for DenseMap.
//
//===----------------------------------------------------------------------===//
#ifndef LLVM_ADT_DENSEMAPINFO_H
#define LLVM_ADT_DENSEMAPINFO_H
#include "llvm/Support/PointerLikeTypeTraits.h"
#include "llvm/Support/type_traits.h"
namespace llvm {
template<typename T>
struct DenseMapInfo {
//static inline T getEmptyKey();
//static inline T getTombstoneKey();
//static unsigned getHashValue(const T &Val);
//static bool isEqual(const T &LHS, const T &RHS);
};
// Provide DenseMapInfo for all pointers.
template<typename T>
struct DenseMapInfo<T*> {
static inline T* getEmptyKey() {
intptr_t Val = -1;
Val <<= PointerLikeTypeTraits<T*>::NumLowBitsAvailable;
return reinterpret_cast<T*>(Val);
}
static inline T* getTombstoneKey() {
intptr_t Val = -2;
Val <<= PointerLikeTypeTraits<T*>::NumLowBitsAvailable;
return reinterpret_cast<T*>(Val);
}
static unsigned getHashValue(const T *PtrVal) {
return (unsigned((uintptr_t)PtrVal) >> 4) ^
(unsigned((uintptr_t)PtrVal) >> 9);
}
static bool isEqual(const T *LHS, const T *RHS) { return LHS == RHS; }
};
// Provide DenseMapInfo for chars.
template<> struct DenseMapInfo<char> {
static inline char getEmptyKey() { return ~0; }
static inline char getTombstoneKey() { return ~0 - 1; }
static unsigned getHashValue(const char& Val) { return Val * 37; }
static bool isEqual(const char &LHS, const char &RHS) {
return LHS == RHS;
}
};
// Provide DenseMapInfo for unsigned ints.
template<> struct DenseMapInfo<unsigned> {
static inline unsigned getEmptyKey() { return ~0; }
static inline unsigned getTombstoneKey() { return ~0U - 1; }
static unsigned getHashValue(const unsigned& Val) { return Val * 37; }
static bool isEqual(const unsigned& LHS, const unsigned& RHS) {
return LHS == RHS;
}
};
// Provide DenseMapInfo for unsigned longs.
template<> struct DenseMapInfo<unsigned long> {
static inline unsigned long getEmptyKey() { return ~0UL; }
static inline unsigned long getTombstoneKey() { return ~0UL - 1L; }
static unsigned getHashValue(const unsigned long& Val) {
return (unsigned)(Val * 37UL);
}
static bool isEqual(const unsigned long& LHS, const unsigned long& RHS) {
return LHS == RHS;
}
};
// Provide DenseMapInfo for unsigned long longs.
template<> struct DenseMapInfo<unsigned long long> {
static inline unsigned long long getEmptyKey() { return ~0ULL; }
static inline unsigned long long getTombstoneKey() { return ~0ULL - 1ULL; }
static unsigned getHashValue(const unsigned long long& Val) {
return (unsigned)(Val * 37ULL);
}
static bool isEqual(const unsigned long long& LHS,
const unsigned long long& RHS) {
return LHS == RHS;
}
};
// Provide DenseMapInfo for ints.
template<> struct DenseMapInfo<int> {
static inline int getEmptyKey() { return 0x7fffffff; }
static inline int getTombstoneKey() { return -0x7fffffff - 1; }
static unsigned getHashValue(const int& Val) { return (unsigned)(Val * 37); }
static bool isEqual(const int& LHS, const int& RHS) {
return LHS == RHS;
}
};
// Provide DenseMapInfo for longs.
template<> struct DenseMapInfo<long> {
static inline long getEmptyKey() {
return (1UL << (sizeof(long) * 8 - 1)) - 1L;
}
static inline long getTombstoneKey() { return getEmptyKey() - 1L; }
static unsigned getHashValue(const long& Val) {
return (unsigned)(Val * 37L);
}
static bool isEqual(const long& LHS, const long& RHS) {
return LHS == RHS;
}
};
// Provide DenseMapInfo for long longs.
template<> struct DenseMapInfo<long long> {
static inline long long getEmptyKey() { return 0x7fffffffffffffffLL; }
static inline long long getTombstoneKey() { return -0x7fffffffffffffffLL-1; }
static unsigned getHashValue(const long long& Val) {
return (unsigned)(Val * 37LL);
}
static bool isEqual(const long long& LHS,
const long long& RHS) {
return LHS == RHS;
}
};
// Provide DenseMapInfo for all pairs whose members have info.
template<typename T, typename U>
struct DenseMapInfo<std::pair<T, U> > {
typedef std::pair<T, U> Pair;
typedef DenseMapInfo<T> FirstInfo;
typedef DenseMapInfo<U> SecondInfo;
static inline Pair getEmptyKey() {
return std::make_pair(FirstInfo::getEmptyKey(),
SecondInfo::getEmptyKey());
}
static inline Pair getTombstoneKey() {
return std::make_pair(FirstInfo::getTombstoneKey(),
SecondInfo::getTombstoneKey());
}
static unsigned getHashValue(const Pair& PairVal) {
uint64_t key = (uint64_t)FirstInfo::getHashValue(PairVal.first) << 32
| (uint64_t)SecondInfo::getHashValue(PairVal.second);
key += ~(key << 32);
key ^= (key >> 22);
key += ~(key << 13);
key ^= (key >> 8);
key += (key << 3);
key ^= (key >> 15);
key += ~(key << 27);
key ^= (key >> 31);
return (unsigned)key;
}
static bool isEqual(const Pair &LHS, const Pair &RHS) {
return FirstInfo::isEqual(LHS.first, RHS.first) &&
SecondInfo::isEqual(LHS.second, RHS.second);
}
};
} // end namespace llvm
#endif
| 0 |
rapidsai_public_repos/code-share/maxflow/galois/include/llvm
|
rapidsai_public_repos/code-share/maxflow/galois/include/llvm/ADT/StringExtras.h
|
//===-- llvm/ADT/StringExtras.h - Useful string functions -------*- C++ -*-===//
//
// The LLVM Compiler Infrastructure
//
// This file is distributed under the University of Illinois Open Source
// License. See LICENSE.TXT for details.
//
//===----------------------------------------------------------------------===//
//
// This file contains some functions that are useful when dealing with strings.
//
//===----------------------------------------------------------------------===//
#ifndef LLVM_ADT_STRINGEXTRAS_H
#define LLVM_ADT_STRINGEXTRAS_H
#include "llvm/Support/DataTypes.h"
#include "llvm/ADT/APFloat.h"
#include "llvm/ADT/DenseMapInfo.h"
#include "llvm/ADT/StringRef.h"
#include <cctype>
#include <cstdio>
#include <string>
namespace llvm {
template<typename T> class SmallVectorImpl;
/// hexdigit - Return the hexadecimal character for the
/// given number \arg X (which should be less than 16).
static inline char hexdigit(unsigned X, bool LowerCase = false) {
const char HexChar = LowerCase ? 'a' : 'A';
return X < 10 ? '0' + X : HexChar + X - 10;
}
/// utohex_buffer - Emit the specified number into the buffer specified by
/// BufferEnd, returning a pointer to the start of the string. This can be used
/// like this: (note that the buffer must be large enough to handle any number):
/// char Buffer[40];
/// printf("0x%s", utohex_buffer(X, Buffer+40));
///
/// This should only be used with unsigned types.
///
template<typename IntTy>
static inline char *utohex_buffer(IntTy X, char *BufferEnd) {
char *BufPtr = BufferEnd;
*--BufPtr = 0; // Null terminate buffer.
if (X == 0) {
*--BufPtr = '0'; // Handle special case.
return BufPtr;
}
while (X) {
unsigned char Mod = static_cast<unsigned char>(X) & 15;
*--BufPtr = hexdigit(Mod);
X >>= 4;
}
return BufPtr;
}
static inline std::string utohexstr(uint64_t X) {
char Buffer[17];
return utohex_buffer(X, Buffer+17);
}
static inline std::string utostr_32(uint32_t X, bool isNeg = false) {
char Buffer[11];
char *BufPtr = Buffer+11;
if (X == 0) *--BufPtr = '0'; // Handle special case...
while (X) {
*--BufPtr = '0' + char(X % 10);
X /= 10;
}
if (isNeg) *--BufPtr = '-'; // Add negative sign...
return std::string(BufPtr, Buffer+11);
}
static inline std::string utostr(uint64_t X, bool isNeg = false) {
char Buffer[21];
char *BufPtr = Buffer+21;
if (X == 0) *--BufPtr = '0'; // Handle special case...
while (X) {
*--BufPtr = '0' + char(X % 10);
X /= 10;
}
if (isNeg) *--BufPtr = '-'; // Add negative sign...
return std::string(BufPtr, Buffer+21);
}
static inline std::string itostr(int64_t X) {
if (X < 0)
return utostr(static_cast<uint64_t>(-X), true);
else
return utostr(static_cast<uint64_t>(X));
}
static inline std::string ftostr(double V) {
char Buffer[200];
sprintf(Buffer, "%20.6e", V);
char *B = Buffer;
while (*B == ' ') ++B;
return B;
}
static inline std::string ftostr(const APFloat& V) {
if (&V.getSemantics() == &APFloat::IEEEdouble)
return ftostr(V.convertToDouble());
else if (&V.getSemantics() == &APFloat::IEEEsingle)
return ftostr((double)V.convertToFloat());
return "<unknown format in ftostr>"; // error
}
static inline std::string LowercaseString(const std::string &S) {
std::string result(S);
for (unsigned i = 0; i < S.length(); ++i)
if (isupper(result[i]))
result[i] = char(tolower(result[i]));
return result;
}
static inline std::string UppercaseString(const std::string &S) {
std::string result(S);
for (unsigned i = 0; i < S.length(); ++i)
if (islower(result[i]))
result[i] = char(toupper(result[i]));
return result;
}
/// StrInStrNoCase - Portable version of strcasestr. Locates the first
/// occurrence of string 's1' in string 's2', ignoring case. Returns
/// the offset of s2 in s1 or npos if s2 cannot be found.
StringRef::size_type StrInStrNoCase(StringRef s1, StringRef s2);
/// getToken - This function extracts one token from source, ignoring any
/// leading characters that appear in the Delimiters string, and ending the
/// token at any of the characters that appear in the Delimiters string. If
/// there are no tokens in the source string, an empty string is returned.
/// The function returns a pair containing the extracted token and the
/// remaining tail string.
std::pair<StringRef, StringRef> getToken(StringRef Source,
StringRef Delimiters = " \t\n\v\f\r");
/// SplitString - Split up the specified string according to the specified
/// delimiters, appending the result fragments to the output list.
void SplitString(StringRef Source,
SmallVectorImpl<StringRef> &OutFragments,
StringRef Delimiters = " \t\n\v\f\r");
/// HashString - Hash function for strings.
///
/// This is the Bernstein hash function.
//
// FIXME: Investigate whether a modified bernstein hash function performs
// better: http://eternallyconfuzzled.com/tuts/algorithms/jsw_tut_hashing.aspx
// X*33+c -> X*33^c
static inline unsigned HashString(StringRef Str, unsigned Result = 0) {
for (unsigned i = 0, e = Str.size(); i != e; ++i)
Result = Result * 33 + Str[i];
return Result;
}
} // End llvm namespace
#endif
| 0 |
rapidsai_public_repos/code-share/maxflow/galois/include/llvm
|
rapidsai_public_repos/code-share/maxflow/galois/include/llvm/ADT/APInt.h
|
//===-- llvm/ADT/APInt.h - For Arbitrary Precision Integer -----*- C++ -*--===//
//
// The LLVM Compiler Infrastructure
//
// This file is distributed under the University of Illinois Open Source
// License. See LICENSE.TXT for details.
//
//===----------------------------------------------------------------------===//
//
// This file implements a class to represent arbitrary precision integral
// constant values and operations on them.
//
//===----------------------------------------------------------------------===//
#ifndef LLVM_APINT_H
#define LLVM_APINT_H
#include "llvm/ADT/ArrayRef.h"
#include "llvm/Support/MathExtras.h"
#include <cassert>
#include <climits>
#include <cstring>
#include <string>
namespace llvm {
class Serializer;
class Deserializer;
class FoldingSetNodeID;
class StringRef;
template<typename T>
class SmallVectorImpl;
// An unsigned host type used as a single part of a multi-part
// bignum.
typedef uint64_t integerPart;
const unsigned int host_char_bit = 8;
const unsigned int integerPartWidth = host_char_bit *
static_cast<unsigned int>(sizeof(integerPart));
//===----------------------------------------------------------------------===//
// APInt Class
//===----------------------------------------------------------------------===//
/// APInt - This class represents arbitrary precision constant integral values.
/// It is a functional replacement for common case unsigned integer type like
/// "unsigned", "unsigned long" or "uint64_t", but also allows non-byte-width
/// integer sizes and large integer value types such as 3-bits, 15-bits, or more
/// than 64-bits of precision. APInt provides a variety of arithmetic operators
/// and methods to manipulate integer values of any bit-width. It supports both
/// the typical integer arithmetic and comparison operations as well as bitwise
/// manipulation.
///
/// The class has several invariants worth noting:
/// * All bit, byte, and word positions are zero-based.
/// * Once the bit width is set, it doesn't change except by the Truncate,
/// SignExtend, or ZeroExtend operations.
/// * All binary operators must be on APInt instances of the same bit width.
/// Attempting to use these operators on instances with different bit
/// widths will yield an assertion.
/// * The value is stored canonically as an unsigned value. For operations
/// where it makes a difference, there are both signed and unsigned variants
/// of the operation. For example, sdiv and udiv. However, because the bit
/// widths must be the same, operations such as Mul and Add produce the same
/// results regardless of whether the values are interpreted as signed or
/// not.
/// * In general, the class tries to follow the style of computation that LLVM
/// uses in its IR. This simplifies its use for LLVM.
///
/// @brief Class for arbitrary precision integers.
class APInt {
unsigned BitWidth; ///< The number of bits in this APInt.
/// This union is used to store the integer value. When the
/// integer bit-width <= 64, it uses VAL, otherwise it uses pVal.
union {
uint64_t VAL; ///< Used to store the <= 64 bits integer value.
uint64_t *pVal; ///< Used to store the >64 bits integer value.
};
/// This enum is used to hold the constants we needed for APInt.
enum {
/// Bits in a word
APINT_BITS_PER_WORD = static_cast<unsigned int>(sizeof(uint64_t)) *
CHAR_BIT,
/// Byte size of a word
APINT_WORD_SIZE = static_cast<unsigned int>(sizeof(uint64_t))
};
/// This constructor is used only internally for speed of construction of
/// temporaries. It is unsafe for general use so it is not public.
/// @brief Fast internal constructor
APInt(uint64_t* val, unsigned bits) : BitWidth(bits), pVal(val) { }
/// @returns true if the number of bits <= 64, false otherwise.
/// @brief Determine if this APInt just has one word to store value.
bool isSingleWord() const {
return BitWidth <= APINT_BITS_PER_WORD;
}
/// @returns the word position for the specified bit position.
/// @brief Determine which word a bit is in.
static unsigned whichWord(unsigned bitPosition) {
return bitPosition / APINT_BITS_PER_WORD;
}
/// @returns the bit position in a word for the specified bit position
/// in the APInt.
/// @brief Determine which bit in a word a bit is in.
static unsigned whichBit(unsigned bitPosition) {
return bitPosition % APINT_BITS_PER_WORD;
}
/// This method generates and returns a uint64_t (word) mask for a single
/// bit at a specific bit position. This is used to mask the bit in the
/// corresponding word.
/// @returns a uint64_t with only bit at "whichBit(bitPosition)" set
/// @brief Get a single bit mask.
static uint64_t maskBit(unsigned bitPosition) {
return 1ULL << whichBit(bitPosition);
}
/// This method is used internally to clear the to "N" bits in the high order
/// word that are not used by the APInt. This is needed after the most
/// significant word is assigned a value to ensure that those bits are
/// zero'd out.
/// @brief Clear unused high order bits
APInt& clearUnusedBits() {
// Compute how many bits are used in the final word
unsigned wordBits = BitWidth % APINT_BITS_PER_WORD;
if (wordBits == 0)
// If all bits are used, we want to leave the value alone. This also
// avoids the undefined behavior of >> when the shift is the same size as
// the word size (64).
return *this;
// Mask out the high bits.
uint64_t mask = ~uint64_t(0ULL) >> (APINT_BITS_PER_WORD - wordBits);
if (isSingleWord())
VAL &= mask;
else
pVal[getNumWords() - 1] &= mask;
return *this;
}
/// @returns the corresponding word for the specified bit position.
/// @brief Get the word corresponding to a bit position
uint64_t getWord(unsigned bitPosition) const {
return isSingleWord() ? VAL : pVal[whichWord(bitPosition)];
}
/// Converts a string into a number. The string must be non-empty
/// and well-formed as a number of the given base. The bit-width
/// must be sufficient to hold the result.
///
/// This is used by the constructors that take string arguments.
///
/// StringRef::getAsInteger is superficially similar but (1) does
/// not assume that the string is well-formed and (2) grows the
/// result to hold the input.
///
/// @param radix 2, 8, 10, 16, or 36
/// @brief Convert a char array into an APInt
void fromString(unsigned numBits, StringRef str, uint8_t radix);
/// This is used by the toString method to divide by the radix. It simply
/// provides a more convenient form of divide for internal use since KnuthDiv
/// has specific constraints on its inputs. If those constraints are not met
/// then it provides a simpler form of divide.
/// @brief An internal division function for dividing APInts.
static void divide(const APInt LHS, unsigned lhsWords,
const APInt &RHS, unsigned rhsWords,
APInt *Quotient, APInt *Remainder);
/// out-of-line slow case for inline constructor
void initSlowCase(unsigned numBits, uint64_t val, bool isSigned);
/// shared code between two array constructors
void initFromArray(ArrayRef<uint64_t> array);
/// out-of-line slow case for inline copy constructor
void initSlowCase(const APInt& that);
/// out-of-line slow case for shl
APInt shlSlowCase(unsigned shiftAmt) const;
/// out-of-line slow case for operator&
APInt AndSlowCase(const APInt& RHS) const;
/// out-of-line slow case for operator|
APInt OrSlowCase(const APInt& RHS) const;
/// out-of-line slow case for operator^
APInt XorSlowCase(const APInt& RHS) const;
/// out-of-line slow case for operator=
APInt& AssignSlowCase(const APInt& RHS);
/// out-of-line slow case for operator==
bool EqualSlowCase(const APInt& RHS) const;
/// out-of-line slow case for operator==
bool EqualSlowCase(uint64_t Val) const;
/// out-of-line slow case for countLeadingZeros
unsigned countLeadingZerosSlowCase() const;
/// out-of-line slow case for countTrailingOnes
unsigned countTrailingOnesSlowCase() const;
/// out-of-line slow case for countPopulation
unsigned countPopulationSlowCase() const;
public:
/// @name Constructors
/// @{
/// If isSigned is true then val is treated as if it were a signed value
/// (i.e. as an int64_t) and the appropriate sign extension to the bit width
/// will be done. Otherwise, no sign extension occurs (high order bits beyond
/// the range of val are zero filled).
/// @param numBits the bit width of the constructed APInt
/// @param val the initial value of the APInt
/// @param isSigned how to treat signedness of val
/// @brief Create a new APInt of numBits width, initialized as val.
APInt(unsigned numBits, uint64_t val, bool isSigned = false)
: BitWidth(numBits), VAL(0) {
assert(BitWidth && "bitwidth too small");
if (isSingleWord())
VAL = val;
else
initSlowCase(numBits, val, isSigned);
clearUnusedBits();
}
/// Note that bigVal.size() can be smaller or larger than the corresponding
/// bit width but any extraneous bits will be dropped.
/// @param numBits the bit width of the constructed APInt
/// @param bigVal a sequence of words to form the initial value of the APInt
/// @brief Construct an APInt of numBits width, initialized as bigVal[].
APInt(unsigned numBits, ArrayRef<uint64_t> bigVal);
/// Equivalent to APInt(numBits, ArrayRef<uint64_t>(bigVal, numWords)), but
/// deprecated because this constructor is prone to ambiguity with the
/// APInt(unsigned, uint64_t, bool) constructor.
///
/// If this overload is ever deleted, care should be taken to prevent calls
/// from being incorrectly captured by the APInt(unsigned, uint64_t, bool)
/// constructor.
APInt(unsigned numBits, unsigned numWords, const uint64_t bigVal[]);
/// This constructor interprets the string \arg str in the given radix. The
/// interpretation stops when the first character that is not suitable for the
/// radix is encountered, or the end of the string. Acceptable radix values
/// are 2, 8, 10, 16, and 36. It is an error for the value implied by the
/// string to require more bits than numBits.
///
/// @param numBits the bit width of the constructed APInt
/// @param str the string to be interpreted
/// @param radix the radix to use for the conversion
/// @brief Construct an APInt from a string representation.
APInt(unsigned numBits, StringRef str, uint8_t radix);
/// Simply makes *this a copy of that.
/// @brief Copy Constructor.
APInt(const APInt& that)
: BitWidth(that.BitWidth), VAL(0) {
assert(BitWidth && "bitwidth too small");
if (isSingleWord())
VAL = that.VAL;
else
initSlowCase(that);
}
/// @brief Destructor.
~APInt() {
if (!isSingleWord())
delete [] pVal;
}
/// Default constructor that creates an uninitialized APInt. This is useful
/// for object deserialization (pair this with the static method Read).
explicit APInt() : BitWidth(1) {}
/// Profile - Used to insert APInt objects, or objects that contain APInt
/// objects, into FoldingSets.
void Profile(FoldingSetNodeID& id) const;
/// @}
/// @name Value Tests
/// @{
/// This tests the high bit of this APInt to determine if it is set.
/// @returns true if this APInt is negative, false otherwise
/// @brief Determine sign of this APInt.
bool isNegative() const {
return (*this)[BitWidth - 1];
}
/// This tests the high bit of the APInt to determine if it is unset.
/// @brief Determine if this APInt Value is non-negative (>= 0)
bool isNonNegative() const {
return !isNegative();
}
/// This tests if the value of this APInt is positive (> 0). Note
/// that 0 is not a positive value.
/// @returns true if this APInt is positive.
/// @brief Determine if this APInt Value is positive.
bool isStrictlyPositive() const {
return isNonNegative() && !!*this;
}
/// This checks to see if the value has all bits of the APInt are set or not.
/// @brief Determine if all bits are set
bool isAllOnesValue() const {
return countPopulation() == BitWidth;
}
/// This checks to see if the value of this APInt is the maximum unsigned
/// value for the APInt's bit width.
/// @brief Determine if this is the largest unsigned value.
bool isMaxValue() const {
return countPopulation() == BitWidth;
}
/// This checks to see if the value of this APInt is the maximum signed
/// value for the APInt's bit width.
/// @brief Determine if this is the largest signed value.
bool isMaxSignedValue() const {
return BitWidth == 1 ? VAL == 0 :
!isNegative() && countPopulation() == BitWidth - 1;
}
/// This checks to see if the value of this APInt is the minimum unsigned
/// value for the APInt's bit width.
/// @brief Determine if this is the smallest unsigned value.
bool isMinValue() const {
return !*this;
}
/// This checks to see if the value of this APInt is the minimum signed
/// value for the APInt's bit width.
/// @brief Determine if this is the smallest signed value.
bool isMinSignedValue() const {
return BitWidth == 1 ? VAL == 1 : isNegative() && isPowerOf2();
}
/// @brief Check if this APInt has an N-bits unsigned integer value.
bool isIntN(unsigned N) const {
assert(N && "N == 0 ???");
if (N >= getBitWidth())
return true;
if (isSingleWord())
return isUIntN(N, VAL);
return APInt(N, makeArrayRef(pVal, getNumWords())).zext(getBitWidth())
== (*this);
}
/// @brief Check if this APInt has an N-bits signed integer value.
bool isSignedIntN(unsigned N) const {
assert(N && "N == 0 ???");
return getMinSignedBits() <= N;
}
/// @returns true if the argument APInt value is a power of two > 0.
bool isPowerOf2() const {
if (isSingleWord())
return isPowerOf2_64(VAL);
return countPopulationSlowCase() == 1;
}
/// isSignBit - Return true if this is the value returned by getSignBit.
bool isSignBit() const { return isMinSignedValue(); }
/// This converts the APInt to a boolean value as a test against zero.
/// @brief Boolean conversion function.
bool getBoolValue() const {
return !!*this;
}
/// getLimitedValue - If this value is smaller than the specified limit,
/// return it, otherwise return the limit value. This causes the value
/// to saturate to the limit.
uint64_t getLimitedValue(uint64_t Limit = ~0ULL) const {
return (getActiveBits() > 64 || getZExtValue() > Limit) ?
Limit : getZExtValue();
}
/// @}
/// @name Value Generators
/// @{
/// @brief Gets maximum unsigned value of APInt for specific bit width.
static APInt getMaxValue(unsigned numBits) {
return getAllOnesValue(numBits);
}
/// @brief Gets maximum signed value of APInt for a specific bit width.
static APInt getSignedMaxValue(unsigned numBits) {
APInt API = getAllOnesValue(numBits);
API.clearBit(numBits - 1);
return API;
}
/// @brief Gets minimum unsigned value of APInt for a specific bit width.
static APInt getMinValue(unsigned numBits) {
return APInt(numBits, 0);
}
/// @brief Gets minimum signed value of APInt for a specific bit width.
static APInt getSignedMinValue(unsigned numBits) {
APInt API(numBits, 0);
API.setBit(numBits - 1);
return API;
}
/// getSignBit - This is just a wrapper function of getSignedMinValue(), and
/// it helps code readability when we want to get a SignBit.
/// @brief Get the SignBit for a specific bit width.
static APInt getSignBit(unsigned BitWidth) {
return getSignedMinValue(BitWidth);
}
/// @returns the all-ones value for an APInt of the specified bit-width.
/// @brief Get the all-ones value.
static APInt getAllOnesValue(unsigned numBits) {
return APInt(numBits, -1ULL, true);
}
/// @returns the '0' value for an APInt of the specified bit-width.
/// @brief Get the '0' value.
static APInt getNullValue(unsigned numBits) {
return APInt(numBits, 0);
}
/// Get an APInt with the same BitWidth as this APInt, just zero mask
/// the low bits and right shift to the least significant bit.
/// @returns the high "numBits" bits of this APInt.
APInt getHiBits(unsigned numBits) const;
/// Get an APInt with the same BitWidth as this APInt, just zero mask
/// the high bits.
/// @returns the low "numBits" bits of this APInt.
APInt getLoBits(unsigned numBits) const;
/// getOneBitSet - Return an APInt with exactly one bit set in the result.
static APInt getOneBitSet(unsigned numBits, unsigned BitNo) {
APInt Res(numBits, 0);
Res.setBit(BitNo);
return Res;
}
/// Constructs an APInt value that has a contiguous range of bits set. The
/// bits from loBit (inclusive) to hiBit (exclusive) will be set. All other
/// bits will be zero. For example, with parameters(32, 0, 16) you would get
/// 0x0000FFFF. If hiBit is less than loBit then the set bits "wrap". For
/// example, with parameters (32, 28, 4), you would get 0xF000000F.
/// @param numBits the intended bit width of the result
/// @param loBit the index of the lowest bit set.
/// @param hiBit the index of the highest bit set.
/// @returns An APInt value with the requested bits set.
/// @brief Get a value with a block of bits set.
static APInt getBitsSet(unsigned numBits, unsigned loBit, unsigned hiBit) {
assert(hiBit <= numBits && "hiBit out of range");
assert(loBit < numBits && "loBit out of range");
if (hiBit < loBit)
return getLowBitsSet(numBits, hiBit) |
getHighBitsSet(numBits, numBits-loBit);
return getLowBitsSet(numBits, hiBit-loBit).shl(loBit);
}
/// Constructs an APInt value that has the top hiBitsSet bits set.
/// @param numBits the bitwidth of the result
/// @param hiBitsSet the number of high-order bits set in the result.
/// @brief Get a value with high bits set
static APInt getHighBitsSet(unsigned numBits, unsigned hiBitsSet) {
assert(hiBitsSet <= numBits && "Too many bits to set!");
// Handle a degenerate case, to avoid shifting by word size
if (hiBitsSet == 0)
return APInt(numBits, 0);
unsigned shiftAmt = numBits - hiBitsSet;
// For small values, return quickly
if (numBits <= APINT_BITS_PER_WORD)
return APInt(numBits, ~0ULL << shiftAmt);
return getAllOnesValue(numBits).shl(shiftAmt);
}
/// Constructs an APInt value that has the bottom loBitsSet bits set.
/// @param numBits the bitwidth of the result
/// @param loBitsSet the number of low-order bits set in the result.
/// @brief Get a value with low bits set
static APInt getLowBitsSet(unsigned numBits, unsigned loBitsSet) {
assert(loBitsSet <= numBits && "Too many bits to set!");
// Handle a degenerate case, to avoid shifting by word size
if (loBitsSet == 0)
return APInt(numBits, 0);
if (loBitsSet == APINT_BITS_PER_WORD)
return APInt(numBits, -1ULL);
// For small values, return quickly.
if (numBits < APINT_BITS_PER_WORD)
return APInt(numBits, (1ULL << loBitsSet) - 1);
return getAllOnesValue(numBits).lshr(numBits - loBitsSet);
}
/// The hash value is computed as the sum of the words and the bit width.
/// @returns A hash value computed from the sum of the APInt words.
/// @brief Get a hash value based on this APInt
uint64_t getHashValue() const;
/// This function returns a pointer to the internal storage of the APInt.
/// This is useful for writing out the APInt in binary form without any
/// conversions.
const uint64_t* getRawData() const {
if (isSingleWord())
return &VAL;
return &pVal[0];
}
/// @}
/// @name Unary Operators
/// @{
/// @returns a new APInt value representing *this incremented by one
/// @brief Postfix increment operator.
const APInt operator++(int) {
APInt API(*this);
++(*this);
return API;
}
/// @returns *this incremented by one
/// @brief Prefix increment operator.
APInt& operator++();
/// @returns a new APInt representing *this decremented by one.
/// @brief Postfix decrement operator.
const APInt operator--(int) {
APInt API(*this);
--(*this);
return API;
}
/// @returns *this decremented by one.
/// @brief Prefix decrement operator.
APInt& operator--();
/// Performs a bitwise complement operation on this APInt.
/// @returns an APInt that is the bitwise complement of *this
/// @brief Unary bitwise complement operator.
APInt operator~() const {
APInt Result(*this);
Result.flipAllBits();
return Result;
}
/// Negates *this using two's complement logic.
/// @returns An APInt value representing the negation of *this.
/// @brief Unary negation operator
APInt operator-() const {
return APInt(BitWidth, 0) - (*this);
}
/// Performs logical negation operation on this APInt.
/// @returns true if *this is zero, false otherwise.
/// @brief Logical negation operator.
bool operator!() const;
/// @}
/// @name Assignment Operators
/// @{
/// @returns *this after assignment of RHS.
/// @brief Copy assignment operator.
APInt& operator=(const APInt& RHS) {
// If the bitwidths are the same, we can avoid mucking with memory
if (isSingleWord() && RHS.isSingleWord()) {
VAL = RHS.VAL;
BitWidth = RHS.BitWidth;
return clearUnusedBits();
}
return AssignSlowCase(RHS);
}
/// The RHS value is assigned to *this. If the significant bits in RHS exceed
/// the bit width, the excess bits are truncated. If the bit width is larger
/// than 64, the value is zero filled in the unspecified high order bits.
/// @returns *this after assignment of RHS value.
/// @brief Assignment operator.
APInt& operator=(uint64_t RHS);
/// Performs a bitwise AND operation on this APInt and RHS. The result is
/// assigned to *this.
/// @returns *this after ANDing with RHS.
/// @brief Bitwise AND assignment operator.
APInt& operator&=(const APInt& RHS);
/// Performs a bitwise OR operation on this APInt and RHS. The result is
/// assigned *this;
/// @returns *this after ORing with RHS.
/// @brief Bitwise OR assignment operator.
APInt& operator|=(const APInt& RHS);
/// Performs a bitwise OR operation on this APInt and RHS. RHS is
/// logically zero-extended or truncated to match the bit-width of
/// the LHS.
///
/// @brief Bitwise OR assignment operator.
APInt& operator|=(uint64_t RHS) {
if (isSingleWord()) {
VAL |= RHS;
clearUnusedBits();
} else {
pVal[0] |= RHS;
}
return *this;
}
/// Performs a bitwise XOR operation on this APInt and RHS. The result is
/// assigned to *this.
/// @returns *this after XORing with RHS.
/// @brief Bitwise XOR assignment operator.
APInt& operator^=(const APInt& RHS);
/// Multiplies this APInt by RHS and assigns the result to *this.
/// @returns *this
/// @brief Multiplication assignment operator.
APInt& operator*=(const APInt& RHS);
/// Adds RHS to *this and assigns the result to *this.
/// @returns *this
/// @brief Addition assignment operator.
APInt& operator+=(const APInt& RHS);
/// Subtracts RHS from *this and assigns the result to *this.
/// @returns *this
/// @brief Subtraction assignment operator.
APInt& operator-=(const APInt& RHS);
/// Shifts *this left by shiftAmt and assigns the result to *this.
/// @returns *this after shifting left by shiftAmt
/// @brief Left-shift assignment function.
APInt& operator<<=(unsigned shiftAmt) {
*this = shl(shiftAmt);
return *this;
}
/// @}
/// @name Binary Operators
/// @{
/// Performs a bitwise AND operation on *this and RHS.
/// @returns An APInt value representing the bitwise AND of *this and RHS.
/// @brief Bitwise AND operator.
APInt operator&(const APInt& RHS) const {
assert(BitWidth == RHS.BitWidth && "Bit widths must be the same");
if (isSingleWord())
return APInt(getBitWidth(), VAL & RHS.VAL);
return AndSlowCase(RHS);
}
APInt And(const APInt& RHS) const {
return this->operator&(RHS);
}
/// Performs a bitwise OR operation on *this and RHS.
/// @returns An APInt value representing the bitwise OR of *this and RHS.
/// @brief Bitwise OR operator.
APInt operator|(const APInt& RHS) const {
assert(BitWidth == RHS.BitWidth && "Bit widths must be the same");
if (isSingleWord())
return APInt(getBitWidth(), VAL | RHS.VAL);
return OrSlowCase(RHS);
}
APInt Or(const APInt& RHS) const {
return this->operator|(RHS);
}
/// Performs a bitwise XOR operation on *this and RHS.
/// @returns An APInt value representing the bitwise XOR of *this and RHS.
/// @brief Bitwise XOR operator.
APInt operator^(const APInt& RHS) const {
assert(BitWidth == RHS.BitWidth && "Bit widths must be the same");
if (isSingleWord())
return APInt(BitWidth, VAL ^ RHS.VAL);
return XorSlowCase(RHS);
}
APInt Xor(const APInt& RHS) const {
return this->operator^(RHS);
}
/// Multiplies this APInt by RHS and returns the result.
/// @brief Multiplication operator.
APInt operator*(const APInt& RHS) const;
/// Adds RHS to this APInt and returns the result.
/// @brief Addition operator.
APInt operator+(const APInt& RHS) const;
APInt operator+(uint64_t RHS) const {
return (*this) + APInt(BitWidth, RHS);
}
/// Subtracts RHS from this APInt and returns the result.
/// @brief Subtraction operator.
APInt operator-(const APInt& RHS) const;
APInt operator-(uint64_t RHS) const {
return (*this) - APInt(BitWidth, RHS);
}
APInt operator<<(unsigned Bits) const {
return shl(Bits);
}
APInt operator<<(const APInt &Bits) const {
return shl(Bits);
}
/// Arithmetic right-shift this APInt by shiftAmt.
/// @brief Arithmetic right-shift function.
APInt ashr(unsigned shiftAmt) const;
/// Logical right-shift this APInt by shiftAmt.
/// @brief Logical right-shift function.
APInt lshr(unsigned shiftAmt) const;
/// Left-shift this APInt by shiftAmt.
/// @brief Left-shift function.
APInt shl(unsigned shiftAmt) const {
assert(shiftAmt <= BitWidth && "Invalid shift amount");
if (isSingleWord()) {
if (shiftAmt == BitWidth)
return APInt(BitWidth, 0); // avoid undefined shift results
return APInt(BitWidth, VAL << shiftAmt);
}
return shlSlowCase(shiftAmt);
}
/// @brief Rotate left by rotateAmt.
APInt rotl(unsigned rotateAmt) const;
/// @brief Rotate right by rotateAmt.
APInt rotr(unsigned rotateAmt) const;
/// Arithmetic right-shift this APInt by shiftAmt.
/// @brief Arithmetic right-shift function.
APInt ashr(const APInt &shiftAmt) const;
/// Logical right-shift this APInt by shiftAmt.
/// @brief Logical right-shift function.
APInt lshr(const APInt &shiftAmt) const;
/// Left-shift this APInt by shiftAmt.
/// @brief Left-shift function.
APInt shl(const APInt &shiftAmt) const;
/// @brief Rotate left by rotateAmt.
APInt rotl(const APInt &rotateAmt) const;
/// @brief Rotate right by rotateAmt.
APInt rotr(const APInt &rotateAmt) const;
/// Perform an unsigned divide operation on this APInt by RHS. Both this and
/// RHS are treated as unsigned quantities for purposes of this division.
/// @returns a new APInt value containing the division result
/// @brief Unsigned division operation.
APInt udiv(const APInt &RHS) const;
/// Signed divide this APInt by APInt RHS.
/// @brief Signed division function for APInt.
APInt sdiv(const APInt &RHS) const {
if (isNegative())
if (RHS.isNegative())
return (-(*this)).udiv(-RHS);
else
return -((-(*this)).udiv(RHS));
else if (RHS.isNegative())
return -(this->udiv(-RHS));
return this->udiv(RHS);
}
/// Perform an unsigned remainder operation on this APInt with RHS being the
/// divisor. Both this and RHS are treated as unsigned quantities for purposes
/// of this operation. Note that this is a true remainder operation and not
/// a modulo operation because the sign follows the sign of the dividend
/// which is *this.
/// @returns a new APInt value containing the remainder result
/// @brief Unsigned remainder operation.
APInt urem(const APInt &RHS) const;
/// Signed remainder operation on APInt.
/// @brief Function for signed remainder operation.
APInt srem(const APInt &RHS) const {
if (isNegative())
if (RHS.isNegative())
return -((-(*this)).urem(-RHS));
else
return -((-(*this)).urem(RHS));
else if (RHS.isNegative())
return this->urem(-RHS);
return this->urem(RHS);
}
/// Sometimes it is convenient to divide two APInt values and obtain both the
/// quotient and remainder. This function does both operations in the same
/// computation making it a little more efficient. The pair of input arguments
/// may overlap with the pair of output arguments. It is safe to call
/// udivrem(X, Y, X, Y), for example.
/// @brief Dual division/remainder interface.
static void udivrem(const APInt &LHS, const APInt &RHS,
APInt &Quotient, APInt &Remainder);
static void sdivrem(const APInt &LHS, const APInt &RHS,
APInt &Quotient, APInt &Remainder) {
if (LHS.isNegative()) {
if (RHS.isNegative())
APInt::udivrem(-LHS, -RHS, Quotient, Remainder);
else
APInt::udivrem(-LHS, RHS, Quotient, Remainder);
Quotient = -Quotient;
Remainder = -Remainder;
} else if (RHS.isNegative()) {
APInt::udivrem(LHS, -RHS, Quotient, Remainder);
Quotient = -Quotient;
} else {
APInt::udivrem(LHS, RHS, Quotient, Remainder);
}
}
// Operations that return overflow indicators.
APInt sadd_ov(const APInt &RHS, bool &Overflow) const;
APInt uadd_ov(const APInt &RHS, bool &Overflow) const;
APInt ssub_ov(const APInt &RHS, bool &Overflow) const;
APInt usub_ov(const APInt &RHS, bool &Overflow) const;
APInt sdiv_ov(const APInt &RHS, bool &Overflow) const;
APInt smul_ov(const APInt &RHS, bool &Overflow) const;
APInt umul_ov(const APInt &RHS, bool &Overflow) const;
APInt sshl_ov(unsigned Amt, bool &Overflow) const;
/// @returns the bit value at bitPosition
/// @brief Array-indexing support.
bool operator[](unsigned bitPosition) const;
/// @}
/// @name Comparison Operators
/// @{
/// Compares this APInt with RHS for the validity of the equality
/// relationship.
/// @brief Equality operator.
bool operator==(const APInt& RHS) const {
assert(BitWidth == RHS.BitWidth && "Comparison requires equal bit widths");
if (isSingleWord())
return VAL == RHS.VAL;
return EqualSlowCase(RHS);
}
/// Compares this APInt with a uint64_t for the validity of the equality
/// relationship.
/// @returns true if *this == Val
/// @brief Equality operator.
bool operator==(uint64_t Val) const {
if (isSingleWord())
return VAL == Val;
return EqualSlowCase(Val);
}
/// Compares this APInt with RHS for the validity of the equality
/// relationship.
/// @returns true if *this == Val
/// @brief Equality comparison.
bool eq(const APInt &RHS) const {
return (*this) == RHS;
}
/// Compares this APInt with RHS for the validity of the inequality
/// relationship.
/// @returns true if *this != Val
/// @brief Inequality operator.
bool operator!=(const APInt& RHS) const {
return !((*this) == RHS);
}
/// Compares this APInt with a uint64_t for the validity of the inequality
/// relationship.
/// @returns true if *this != Val
/// @brief Inequality operator.
bool operator!=(uint64_t Val) const {
return !((*this) == Val);
}
/// Compares this APInt with RHS for the validity of the inequality
/// relationship.
/// @returns true if *this != Val
/// @brief Inequality comparison
bool ne(const APInt &RHS) const {
return !((*this) == RHS);
}
/// Regards both *this and RHS as unsigned quantities and compares them for
/// the validity of the less-than relationship.
/// @returns true if *this < RHS when both are considered unsigned.
/// @brief Unsigned less than comparison
bool ult(const APInt &RHS) const;
/// Regards both *this as an unsigned quantity and compares it with RHS for
/// the validity of the less-than relationship.
/// @returns true if *this < RHS when considered unsigned.
/// @brief Unsigned less than comparison
bool ult(uint64_t RHS) const {
return ult(APInt(getBitWidth(), RHS));
}
/// Regards both *this and RHS as signed quantities and compares them for
/// validity of the less-than relationship.
/// @returns true if *this < RHS when both are considered signed.
/// @brief Signed less than comparison
bool slt(const APInt& RHS) const;
/// Regards both *this as a signed quantity and compares it with RHS for
/// the validity of the less-than relationship.
/// @returns true if *this < RHS when considered signed.
/// @brief Signed less than comparison
bool slt(uint64_t RHS) const {
return slt(APInt(getBitWidth(), RHS));
}
/// Regards both *this and RHS as unsigned quantities and compares them for
/// validity of the less-or-equal relationship.
/// @returns true if *this <= RHS when both are considered unsigned.
/// @brief Unsigned less or equal comparison
bool ule(const APInt& RHS) const {
return ult(RHS) || eq(RHS);
}
/// Regards both *this as an unsigned quantity and compares it with RHS for
/// the validity of the less-or-equal relationship.
/// @returns true if *this <= RHS when considered unsigned.
/// @brief Unsigned less or equal comparison
bool ule(uint64_t RHS) const {
return ule(APInt(getBitWidth(), RHS));
}
/// Regards both *this and RHS as signed quantities and compares them for
/// validity of the less-or-equal relationship.
/// @returns true if *this <= RHS when both are considered signed.
/// @brief Signed less or equal comparison
bool sle(const APInt& RHS) const {
return slt(RHS) || eq(RHS);
}
/// Regards both *this as a signed quantity and compares it with RHS for
/// the validity of the less-or-equal relationship.
/// @returns true if *this <= RHS when considered signed.
/// @brief Signed less or equal comparison
bool sle(uint64_t RHS) const {
return sle(APInt(getBitWidth(), RHS));
}
/// Regards both *this and RHS as unsigned quantities and compares them for
/// the validity of the greater-than relationship.
/// @returns true if *this > RHS when both are considered unsigned.
/// @brief Unsigned greather than comparison
bool ugt(const APInt& RHS) const {
return !ult(RHS) && !eq(RHS);
}
/// Regards both *this as an unsigned quantity and compares it with RHS for
/// the validity of the greater-than relationship.
/// @returns true if *this > RHS when considered unsigned.
/// @brief Unsigned greater than comparison
bool ugt(uint64_t RHS) const {
return ugt(APInt(getBitWidth(), RHS));
}
/// Regards both *this and RHS as signed quantities and compares them for
/// the validity of the greater-than relationship.
/// @returns true if *this > RHS when both are considered signed.
/// @brief Signed greather than comparison
bool sgt(const APInt& RHS) const {
return !slt(RHS) && !eq(RHS);
}
/// Regards both *this as a signed quantity and compares it with RHS for
/// the validity of the greater-than relationship.
/// @returns true if *this > RHS when considered signed.
/// @brief Signed greater than comparison
bool sgt(uint64_t RHS) const {
return sgt(APInt(getBitWidth(), RHS));
}
/// Regards both *this and RHS as unsigned quantities and compares them for
/// validity of the greater-or-equal relationship.
/// @returns true if *this >= RHS when both are considered unsigned.
/// @brief Unsigned greater or equal comparison
bool uge(const APInt& RHS) const {
return !ult(RHS);
}
/// Regards both *this as an unsigned quantity and compares it with RHS for
/// the validity of the greater-or-equal relationship.
/// @returns true if *this >= RHS when considered unsigned.
/// @brief Unsigned greater or equal comparison
bool uge(uint64_t RHS) const {
return uge(APInt(getBitWidth(), RHS));
}
/// Regards both *this and RHS as signed quantities and compares them for
/// validity of the greater-or-equal relationship.
/// @returns true if *this >= RHS when both are considered signed.
/// @brief Signed greather or equal comparison
bool sge(const APInt& RHS) const {
return !slt(RHS);
}
/// Regards both *this as a signed quantity and compares it with RHS for
/// the validity of the greater-or-equal relationship.
/// @returns true if *this >= RHS when considered signed.
/// @brief Signed greater or equal comparison
bool sge(uint64_t RHS) const {
return sge(APInt(getBitWidth(), RHS));
}
/// This operation tests if there are any pairs of corresponding bits
/// between this APInt and RHS that are both set.
bool intersects(const APInt &RHS) const {
return (*this & RHS) != 0;
}
/// @}
/// @name Resizing Operators
/// @{
/// Truncate the APInt to a specified width. It is an error to specify a width
/// that is greater than or equal to the current width.
/// @brief Truncate to new width.
APInt trunc(unsigned width) const;
/// This operation sign extends the APInt to a new width. If the high order
/// bit is set, the fill on the left will be done with 1 bits, otherwise zero.
/// It is an error to specify a width that is less than or equal to the
/// current width.
/// @brief Sign extend to a new width.
APInt sext(unsigned width) const;
/// This operation zero extends the APInt to a new width. The high order bits
/// are filled with 0 bits. It is an error to specify a width that is less
/// than or equal to the current width.
/// @brief Zero extend to a new width.
APInt zext(unsigned width) const;
/// Make this APInt have the bit width given by \p width. The value is sign
/// extended, truncated, or left alone to make it that width.
/// @brief Sign extend or truncate to width
APInt sextOrTrunc(unsigned width) const;
/// Make this APInt have the bit width given by \p width. The value is zero
/// extended, truncated, or left alone to make it that width.
/// @brief Zero extend or truncate to width
APInt zextOrTrunc(unsigned width) const;
/// @}
/// @name Bit Manipulation Operators
/// @{
/// @brief Set every bit to 1.
void setAllBits() {
if (isSingleWord())
VAL = -1ULL;
else {
// Set all the bits in all the words.
for (unsigned i = 0; i < getNumWords(); ++i)
pVal[i] = -1ULL;
}
// Clear the unused ones
clearUnusedBits();
}
/// Set the given bit to 1 whose position is given as "bitPosition".
/// @brief Set a given bit to 1.
void setBit(unsigned bitPosition);
/// @brief Set every bit to 0.
void clearAllBits() {
if (isSingleWord())
VAL = 0;
else
memset(pVal, 0, getNumWords() * APINT_WORD_SIZE);
}
/// Set the given bit to 0 whose position is given as "bitPosition".
/// @brief Set a given bit to 0.
void clearBit(unsigned bitPosition);
/// @brief Toggle every bit to its opposite value.
void flipAllBits() {
if (isSingleWord())
VAL ^= -1ULL;
else {
for (unsigned i = 0; i < getNumWords(); ++i)
pVal[i] ^= -1ULL;
}
clearUnusedBits();
}
/// Toggle a given bit to its opposite value whose position is given
/// as "bitPosition".
/// @brief Toggles a given bit to its opposite value.
void flipBit(unsigned bitPosition);
/// @}
/// @name Value Characterization Functions
/// @{
/// @returns the total number of bits.
unsigned getBitWidth() const {
return BitWidth;
}
/// Here one word's bitwidth equals to that of uint64_t.
/// @returns the number of words to hold the integer value of this APInt.
/// @brief Get the number of words.
unsigned getNumWords() const {
return getNumWords(BitWidth);
}
/// Here one word's bitwidth equals to that of uint64_t.
/// @returns the number of words to hold the integer value with a
/// given bit width.
/// @brief Get the number of words.
static unsigned getNumWords(unsigned BitWidth) {
return (BitWidth + APINT_BITS_PER_WORD - 1) / APINT_BITS_PER_WORD;
}
/// This function returns the number of active bits which is defined as the
/// bit width minus the number of leading zeros. This is used in several
/// computations to see how "wide" the value is.
/// @brief Compute the number of active bits in the value
unsigned getActiveBits() const {
return BitWidth - countLeadingZeros();
}
/// This function returns the number of active words in the value of this
/// APInt. This is used in conjunction with getActiveData to extract the raw
/// value of the APInt.
unsigned getActiveWords() const {
return whichWord(getActiveBits()-1) + 1;
}
/// Computes the minimum bit width for this APInt while considering it to be
/// a signed (and probably negative) value. If the value is not negative,
/// this function returns the same value as getActiveBits()+1. Otherwise, it
/// returns the smallest bit width that will retain the negative value. For
/// example, -1 can be written as 0b1 or 0xFFFFFFFFFF. 0b1 is shorter and so
/// for -1, this function will always return 1.
/// @brief Get the minimum bit size for this signed APInt
unsigned getMinSignedBits() const {
if (isNegative())
return BitWidth - countLeadingOnes() + 1;
return getActiveBits()+1;
}
/// This method attempts to return the value of this APInt as a zero extended
/// uint64_t. The bitwidth must be <= 64 or the value must fit within a
/// uint64_t. Otherwise an assertion will result.
/// @brief Get zero extended value
uint64_t getZExtValue() const {
if (isSingleWord())
return VAL;
assert(getActiveBits() <= 64 && "Too many bits for uint64_t");
return pVal[0];
}
/// This method attempts to return the value of this APInt as a sign extended
/// int64_t. The bit width must be <= 64 or the value must fit within an
/// int64_t. Otherwise an assertion will result.
/// @brief Get sign extended value
int64_t getSExtValue() const {
if (isSingleWord())
return int64_t(VAL << (APINT_BITS_PER_WORD - BitWidth)) >>
(APINT_BITS_PER_WORD - BitWidth);
assert(getMinSignedBits() <= 64 && "Too many bits for int64_t");
return int64_t(pVal[0]);
}
/// This method determines how many bits are required to hold the APInt
/// equivalent of the string given by \arg str.
/// @brief Get bits required for string value.
static unsigned getBitsNeeded(StringRef str, uint8_t radix);
/// countLeadingZeros - This function is an APInt version of the
/// countLeadingZeros_{32,64} functions in MathExtras.h. It counts the number
/// of zeros from the most significant bit to the first one bit.
/// @returns BitWidth if the value is zero.
/// @returns the number of zeros from the most significant bit to the first
/// one bits.
unsigned countLeadingZeros() const {
if (isSingleWord()) {
unsigned unusedBits = APINT_BITS_PER_WORD - BitWidth;
return CountLeadingZeros_64(VAL) - unusedBits;
}
return countLeadingZerosSlowCase();
}
/// countLeadingOnes - This function is an APInt version of the
/// countLeadingOnes_{32,64} functions in MathExtras.h. It counts the number
/// of ones from the most significant bit to the first zero bit.
/// @returns 0 if the high order bit is not set
/// @returns the number of 1 bits from the most significant to the least
/// @brief Count the number of leading one bits.
unsigned countLeadingOnes() const;
/// Computes the number of leading bits of this APInt that are equal to its
/// sign bit.
unsigned getNumSignBits() const {
return isNegative() ? countLeadingOnes() : countLeadingZeros();
}
/// countTrailingZeros - This function is an APInt version of the
/// countTrailingZeros_{32,64} functions in MathExtras.h. It counts
/// the number of zeros from the least significant bit to the first set bit.
/// @returns BitWidth if the value is zero.
/// @returns the number of zeros from the least significant bit to the first
/// one bit.
/// @brief Count the number of trailing zero bits.
unsigned countTrailingZeros() const;
/// countTrailingOnes - This function is an APInt version of the
/// countTrailingOnes_{32,64} functions in MathExtras.h. It counts
/// the number of ones from the least significant bit to the first zero bit.
/// @returns BitWidth if the value is all ones.
/// @returns the number of ones from the least significant bit to the first
/// zero bit.
/// @brief Count the number of trailing one bits.
unsigned countTrailingOnes() const {
if (isSingleWord())
return CountTrailingOnes_64(VAL);
return countTrailingOnesSlowCase();
}
/// countPopulation - This function is an APInt version of the
/// countPopulation_{32,64} functions in MathExtras.h. It counts the number
/// of 1 bits in the APInt value.
/// @returns 0 if the value is zero.
/// @returns the number of set bits.
/// @brief Count the number of bits set.
unsigned countPopulation() const {
if (isSingleWord())
return CountPopulation_64(VAL);
return countPopulationSlowCase();
}
/// @}
/// @name Conversion Functions
/// @{
void print(std::ostream &OS, bool isSigned) const;
/// toString - Converts an APInt to a string and append it to Str. Str is
/// commonly a SmallString.
void toString(SmallVectorImpl<char> &Str, unsigned Radix, bool Signed,
bool formatAsCLiteral = false) const;
/// Considers the APInt to be unsigned and converts it into a string in the
/// radix given. The radix can be 2, 8, 10 16, or 36.
void toStringUnsigned(SmallVectorImpl<char> &Str, unsigned Radix = 10) const {
toString(Str, Radix, false, false);
}
/// Considers the APInt to be signed and converts it into a string in the
/// radix given. The radix can be 2, 8, 10, 16, or 36.
void toStringSigned(SmallVectorImpl<char> &Str, unsigned Radix = 10) const {
toString(Str, Radix, true, false);
}
/// toString - This returns the APInt as a std::string. Note that this is an
/// inefficient method. It is better to pass in a SmallVector/SmallString
/// to the methods above to avoid thrashing the heap for the string.
std::string toString(unsigned Radix, bool Signed) const;
/// @returns a byte-swapped representation of this APInt Value.
APInt byteSwap() const;
/// @brief Converts this APInt to a double value.
double roundToDouble(bool isSigned) const;
/// @brief Converts this unsigned APInt to a double value.
double roundToDouble() const {
return roundToDouble(false);
}
/// @brief Converts this signed APInt to a double value.
double signedRoundToDouble() const {
return roundToDouble(true);
}
/// The conversion does not do a translation from integer to double, it just
/// re-interprets the bits as a double. Note that it is valid to do this on
/// any bit width. Exactly 64 bits will be translated.
/// @brief Converts APInt bits to a double
double bitsToDouble() const {
union {
uint64_t I;
double D;
} T;
T.I = (isSingleWord() ? VAL : pVal[0]);
return T.D;
}
/// The conversion does not do a translation from integer to float, it just
/// re-interprets the bits as a float. Note that it is valid to do this on
/// any bit width. Exactly 32 bits will be translated.
/// @brief Converts APInt bits to a double
float bitsToFloat() const {
union {
unsigned I;
float F;
} T;
T.I = unsigned((isSingleWord() ? VAL : pVal[0]));
return T.F;
}
/// The conversion does not do a translation from double to integer, it just
/// re-interprets the bits of the double.
/// @brief Converts a double to APInt bits.
static APInt doubleToBits(double V) {
union {
uint64_t I;
double D;
} T;
T.D = V;
return APInt(sizeof T * CHAR_BIT, T.I);
}
/// The conversion does not do a translation from float to integer, it just
/// re-interprets the bits of the float.
/// @brief Converts a float to APInt bits.
static APInt floatToBits(float V) {
union {
unsigned I;
float F;
} T;
T.F = V;
return APInt(sizeof T * CHAR_BIT, T.I);
}
/// @}
/// @name Mathematics Operations
/// @{
/// @returns the floor log base 2 of this APInt.
unsigned logBase2() const {
return BitWidth - 1 - countLeadingZeros();
}
/// @returns the ceil log base 2 of this APInt.
unsigned ceilLogBase2() const {
return BitWidth - (*this - 1).countLeadingZeros();
}
/// @returns the log base 2 of this APInt if its an exact power of two, -1
/// otherwise
int32_t exactLogBase2() const {
if (!isPowerOf2())
return -1;
return logBase2();
}
/// @brief Compute the square root
APInt sqrt() const;
/// If *this is < 0 then return -(*this), otherwise *this;
/// @brief Get the absolute value;
APInt abs() const {
if (isNegative())
return -(*this);
return *this;
}
/// @returns the multiplicative inverse for a given modulo.
APInt multiplicativeInverse(const APInt& modulo) const;
/// @}
/// @name Support for division by constant
/// @{
/// Calculate the magic number for signed division by a constant.
struct ms;
ms magic() const;
/// Calculate the magic number for unsigned division by a constant.
struct mu;
mu magicu(unsigned LeadingZeros = 0) const;
/// @}
/// @name Building-block Operations for APInt and APFloat
/// @{
// These building block operations operate on a representation of
// arbitrary precision, two's-complement, bignum integer values.
// They should be sufficient to implement APInt and APFloat bignum
// requirements. Inputs are generally a pointer to the base of an
// array of integer parts, representing an unsigned bignum, and a
// count of how many parts there are.
/// Sets the least significant part of a bignum to the input value,
/// and zeroes out higher parts. */
static void tcSet(integerPart *, integerPart, unsigned int);
/// Assign one bignum to another.
static void tcAssign(integerPart *, const integerPart *, unsigned int);
/// Returns true if a bignum is zero, false otherwise.
static bool tcIsZero(const integerPart *, unsigned int);
/// Extract the given bit of a bignum; returns 0 or 1. Zero-based.
static int tcExtractBit(const integerPart *, unsigned int bit);
/// Copy the bit vector of width srcBITS from SRC, starting at bit
/// srcLSB, to DST, of dstCOUNT parts, such that the bit srcLSB
/// becomes the least significant bit of DST. All high bits above
/// srcBITS in DST are zero-filled.
static void tcExtract(integerPart *, unsigned int dstCount,
const integerPart *,
unsigned int srcBits, unsigned int srcLSB);
/// Set the given bit of a bignum. Zero-based.
static void tcSetBit(integerPart *, unsigned int bit);
/// Clear the given bit of a bignum. Zero-based.
static void tcClearBit(integerPart *, unsigned int bit);
/// Returns the bit number of the least or most significant set bit
/// of a number. If the input number has no bits set -1U is
/// returned.
static unsigned int tcLSB(const integerPart *, unsigned int);
static unsigned int tcMSB(const integerPart *parts, unsigned int n);
/// Negate a bignum in-place.
static void tcNegate(integerPart *, unsigned int);
/// DST += RHS + CARRY where CARRY is zero or one. Returns the
/// carry flag.
static integerPart tcAdd(integerPart *, const integerPart *,
integerPart carry, unsigned);
/// DST -= RHS + CARRY where CARRY is zero or one. Returns the
/// carry flag.
static integerPart tcSubtract(integerPart *, const integerPart *,
integerPart carry, unsigned);
/// DST += SRC * MULTIPLIER + PART if add is true
/// DST = SRC * MULTIPLIER + PART if add is false
///
/// Requires 0 <= DSTPARTS <= SRCPARTS + 1. If DST overlaps SRC
/// they must start at the same point, i.e. DST == SRC.
///
/// If DSTPARTS == SRC_PARTS + 1 no overflow occurs and zero is
/// returned. Otherwise DST is filled with the least significant
/// DSTPARTS parts of the result, and if all of the omitted higher
/// parts were zero return zero, otherwise overflow occurred and
/// return one.
static int tcMultiplyPart(integerPart *dst, const integerPart *src,
integerPart multiplier, integerPart carry,
unsigned int srcParts, unsigned int dstParts,
bool add);
/// DST = LHS * RHS, where DST has the same width as the operands
/// and is filled with the least significant parts of the result.
/// Returns one if overflow occurred, otherwise zero. DST must be
/// disjoint from both operands.
static int tcMultiply(integerPart *, const integerPart *,
const integerPart *, unsigned);
/// DST = LHS * RHS, where DST has width the sum of the widths of
/// the operands. No overflow occurs. DST must be disjoint from
/// both operands. Returns the number of parts required to hold the
/// result.
static unsigned int tcFullMultiply(integerPart *, const integerPart *,
const integerPart *, unsigned, unsigned);
/// If RHS is zero LHS and REMAINDER are left unchanged, return one.
/// Otherwise set LHS to LHS / RHS with the fractional part
/// discarded, set REMAINDER to the remainder, return zero. i.e.
///
/// OLD_LHS = RHS * LHS + REMAINDER
///
/// SCRATCH is a bignum of the same size as the operands and result
/// for use by the routine; its contents need not be initialized
/// and are destroyed. LHS, REMAINDER and SCRATCH must be
/// distinct.
static int tcDivide(integerPart *lhs, const integerPart *rhs,
integerPart *remainder, integerPart *scratch,
unsigned int parts);
/// Shift a bignum left COUNT bits. Shifted in bits are zero.
/// There are no restrictions on COUNT.
static void tcShiftLeft(integerPart *, unsigned int parts,
unsigned int count);
/// Shift a bignum right COUNT bits. Shifted in bits are zero.
/// There are no restrictions on COUNT.
static void tcShiftRight(integerPart *, unsigned int parts,
unsigned int count);
/// The obvious AND, OR and XOR and complement operations.
static void tcAnd(integerPart *, const integerPart *, unsigned int);
static void tcOr(integerPart *, const integerPart *, unsigned int);
static void tcXor(integerPart *, const integerPart *, unsigned int);
static void tcComplement(integerPart *, unsigned int);
/// Comparison (unsigned) of two bignums.
static int tcCompare(const integerPart *, const integerPart *,
unsigned int);
/// Increment a bignum in-place. Return the carry flag.
static integerPart tcIncrement(integerPart *, unsigned int);
/// Set the least significant BITS and clear the rest.
static void tcSetLeastSignificantBits(integerPart *, unsigned int,
unsigned int bits);
/// @brief debug method
void dump() const;
/// @}
};
/// Magic data for optimising signed division by a constant.
struct APInt::ms {
APInt m; ///< magic number
unsigned s; ///< shift amount
};
/// Magic data for optimising unsigned division by a constant.
struct APInt::mu {
APInt m; ///< magic number
bool a; ///< add indicator
unsigned s; ///< shift amount
};
inline bool operator==(uint64_t V1, const APInt& V2) {
return V2 == V1;
}
inline bool operator!=(uint64_t V1, const APInt& V2) {
return V2 != V1;
}
inline std::ostream &operator<<(std::ostream &OS, const APInt &I) {
I.print(OS, true);
return OS;
}
namespace APIntOps {
/// @brief Determine the smaller of two APInts considered to be signed.
inline APInt smin(const APInt &A, const APInt &B) {
return A.slt(B) ? A : B;
}
/// @brief Determine the larger of two APInts considered to be signed.
inline APInt smax(const APInt &A, const APInt &B) {
return A.sgt(B) ? A : B;
}
/// @brief Determine the smaller of two APInts considered to be signed.
inline APInt umin(const APInt &A, const APInt &B) {
return A.ult(B) ? A : B;
}
/// @brief Determine the larger of two APInts considered to be unsigned.
inline APInt umax(const APInt &A, const APInt &B) {
return A.ugt(B) ? A : B;
}
/// @brief Check if the specified APInt has a N-bits unsigned integer value.
inline bool isIntN(unsigned N, const APInt& APIVal) {
return APIVal.isIntN(N);
}
/// @brief Check if the specified APInt has a N-bits signed integer value.
inline bool isSignedIntN(unsigned N, const APInt& APIVal) {
return APIVal.isSignedIntN(N);
}
/// @returns true if the argument APInt value is a sequence of ones
/// starting at the least significant bit with the remainder zero.
inline bool isMask(unsigned numBits, const APInt& APIVal) {
return numBits <= APIVal.getBitWidth() &&
APIVal == APInt::getLowBitsSet(APIVal.getBitWidth(), numBits);
}
/// @returns true if the argument APInt value contains a sequence of ones
/// with the remainder zero.
inline bool isShiftedMask(unsigned numBits, const APInt& APIVal) {
return isMask(numBits, (APIVal - APInt(numBits,1)) | APIVal);
}
/// @returns a byte-swapped representation of the specified APInt Value.
inline APInt byteSwap(const APInt& APIVal) {
return APIVal.byteSwap();
}
/// @returns the floor log base 2 of the specified APInt value.
inline unsigned logBase2(const APInt& APIVal) {
return APIVal.logBase2();
}
/// GreatestCommonDivisor - This function returns the greatest common
/// divisor of the two APInt values using Euclid's algorithm.
/// @returns the greatest common divisor of Val1 and Val2
/// @brief Compute GCD of two APInt values.
APInt GreatestCommonDivisor(const APInt& Val1, const APInt& Val2);
/// Treats the APInt as an unsigned value for conversion purposes.
/// @brief Converts the given APInt to a double value.
inline double RoundAPIntToDouble(const APInt& APIVal) {
return APIVal.roundToDouble();
}
/// Treats the APInt as a signed value for conversion purposes.
/// @brief Converts the given APInt to a double value.
inline double RoundSignedAPIntToDouble(const APInt& APIVal) {
return APIVal.signedRoundToDouble();
}
/// @brief Converts the given APInt to a float vlalue.
inline float RoundAPIntToFloat(const APInt& APIVal) {
return float(RoundAPIntToDouble(APIVal));
}
/// Treast the APInt as a signed value for conversion purposes.
/// @brief Converts the given APInt to a float value.
inline float RoundSignedAPIntToFloat(const APInt& APIVal) {
return float(APIVal.signedRoundToDouble());
}
/// RoundDoubleToAPInt - This function convert a double value to an APInt value.
/// @brief Converts the given double value into a APInt.
APInt RoundDoubleToAPInt(double Double, unsigned width);
/// RoundFloatToAPInt - Converts a float value into an APInt value.
/// @brief Converts a float value into a APInt.
inline APInt RoundFloatToAPInt(float Float, unsigned width) {
return RoundDoubleToAPInt(double(Float), width);
}
/// Arithmetic right-shift the APInt by shiftAmt.
/// @brief Arithmetic right-shift function.
inline APInt ashr(const APInt& LHS, unsigned shiftAmt) {
return LHS.ashr(shiftAmt);
}
/// Logical right-shift the APInt by shiftAmt.
/// @brief Logical right-shift function.
inline APInt lshr(const APInt& LHS, unsigned shiftAmt) {
return LHS.lshr(shiftAmt);
}
/// Left-shift the APInt by shiftAmt.
/// @brief Left-shift function.
inline APInt shl(const APInt& LHS, unsigned shiftAmt) {
return LHS.shl(shiftAmt);
}
/// Signed divide APInt LHS by APInt RHS.
/// @brief Signed division function for APInt.
inline APInt sdiv(const APInt& LHS, const APInt& RHS) {
return LHS.sdiv(RHS);
}
/// Unsigned divide APInt LHS by APInt RHS.
/// @brief Unsigned division function for APInt.
inline APInt udiv(const APInt& LHS, const APInt& RHS) {
return LHS.udiv(RHS);
}
/// Signed remainder operation on APInt.
/// @brief Function for signed remainder operation.
inline APInt srem(const APInt& LHS, const APInt& RHS) {
return LHS.srem(RHS);
}
/// Unsigned remainder operation on APInt.
/// @brief Function for unsigned remainder operation.
inline APInt urem(const APInt& LHS, const APInt& RHS) {
return LHS.urem(RHS);
}
/// Performs multiplication on APInt values.
/// @brief Function for multiplication operation.
inline APInt mul(const APInt& LHS, const APInt& RHS) {
return LHS * RHS;
}
/// Performs addition on APInt values.
/// @brief Function for addition operation.
inline APInt add(const APInt& LHS, const APInt& RHS) {
return LHS + RHS;
}
/// Performs subtraction on APInt values.
/// @brief Function for subtraction operation.
inline APInt sub(const APInt& LHS, const APInt& RHS) {
return LHS - RHS;
}
/// Performs bitwise AND operation on APInt LHS and
/// APInt RHS.
/// @brief Bitwise AND function for APInt.
inline APInt And(const APInt& LHS, const APInt& RHS) {
return LHS & RHS;
}
/// Performs bitwise OR operation on APInt LHS and APInt RHS.
/// @brief Bitwise OR function for APInt.
inline APInt Or(const APInt& LHS, const APInt& RHS) {
return LHS | RHS;
}
/// Performs bitwise XOR operation on APInt.
/// @brief Bitwise XOR function for APInt.
inline APInt Xor(const APInt& LHS, const APInt& RHS) {
return LHS ^ RHS;
}
/// Performs a bitwise complement operation on APInt.
/// @brief Bitwise complement function.
inline APInt Not(const APInt& APIVal) {
return ~APIVal;
}
} // End of APIntOps namespace
} // End of llvm namespace
#endif
| 0 |
rapidsai_public_repos/code-share/maxflow/galois/include
|
rapidsai_public_repos/code-share/maxflow/galois/include/Lonestar/BoilerPlate.h
|
/** Common command line processing for benchmarks -*- C++ -*-
* @file
* @section License
*
* Galois, a framework to exploit amorphous data-parallelism in irregular
* programs.
*
* Copyright (C) 2012, The University of Texas at Austin. All rights reserved.
* UNIVERSITY EXPRESSLY DISCLAIMS ANY AND ALL WARRANTIES CONCERNING THIS
* SOFTWARE AND DOCUMENTATION, INCLUDING ANY WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR ANY PARTICULAR PURPOSE, NON-INFRINGEMENT AND WARRANTIES OF
* PERFORMANCE, AND ANY WARRANTY THAT MIGHT OTHERWISE ARISE FROM COURSE OF
* DEALING OR USAGE OF TRADE. NO WARRANTY IS EITHER EXPRESS OR IMPLIED WITH
* RESPECT TO THE USE OF THE SOFTWARE OR DOCUMENTATION. Under no circumstances
* shall University be liable for incidental, special, indirect, direct or
* consequential damages or loss of profits, interruption of business, or
* related expenses which may arise from use of Software or Documentation,
* including but not limited to those resulting from defects in Software and/or
* Documentation, or loss or inaccuracy of data of any kind.
*
* @author Andrew Lenharth <[email protected]>
*/
#ifndef LONESTAR_BOILERPLATE_H
#define LONESTAR_BOILERPLATE_H
#include "Galois/Galois.h"
#include "Galois/Version.h"
#include "Galois/Runtime/ll/gio.h"
#include "llvm/Support/CommandLine.h"
#include <sstream>
//! standard global options to the benchmarks
static llvm::cl::opt<bool> skipVerify("noverify", llvm::cl::desc("Skip verification step"), llvm::cl::init(false));
static llvm::cl::opt<int> numThreads("t", llvm::cl::desc("Number of threads"), llvm::cl::init(1));
//! initialize lonestar benchmark
static void LonestarStart(int argc, char** argv, const char* app, const char* desc = 0, const char* url = 0) {
using namespace Galois::Runtime::LL;
// display the name only if mater host
gPrint("Galois Benchmark Suite v", GALOIS_VERSION_STRING, " (r", GALOIS_SVNVERSION, ")\n");
gPrint("Copyright (C) ", GALOIS_COPYRIGHT_YEAR_STRING, " The University of Texas at Austin\n");
gPrint("http://iss.ices.utexas.edu/galois/\n\n");
gPrint("application: ", app ? app : "unspecified", "\n");
if (desc)
gPrint(desc, "\n");
if (url)
gPrint("http://iss.ices.utexas.edu/?p=projects/galois/benchmarks/", url, "\n");
gPrint("\n");
std::ostringstream cmdout;
for (int i = 0; i < argc; ++i) {
cmdout << argv[i];
if (i != argc - 1)
cmdout << " ";
}
gInfo("CommandLine ", cmdout.str().c_str());
char name[256];
gethostname(name, 256);
gInfo("Hostname ", name);
gFlush();
llvm::cl::ParseCommandLineOptions(argc, argv);
numThreads = Galois::setActiveThreads(numThreads);
// gInfo ("Using %d threads\n", numThreads.getValue());
Galois::Runtime::reportStat(0, "Threads", numThreads);
}
#endif
| 0 |
rapidsai_public_repos/code-share/maxflow/galois
|
rapidsai_public_repos/code-share/maxflow/galois/scripts/CMakeLists.txt
|
configure_file("make_dist.sh.in" "make_dist.sh")
file(COPY . DESTINATION ${CMAKE_CURRENT_BINARY_DIR} PATTERN .svn EXCLUDE)
| 0 |
rapidsai_public_repos/code-share/maxflow/galois
|
rapidsai_public_repos/code-share/maxflow/galois/scripts/quick_plot.pl
|
while (<STDIN>) {
if (/STAT.*/) {
my @values = split ',';
if ($values[2] eq $ARGV[0]) {
$v{$values[3]} += $values[4];
$n{$values[3]} += 1;
}
}
}
#foreach $key (sort {$a <=> $b} keys %v) {
# print "$key $v{$key} $n{$key}\n";
#}
open GP, "|gnuplot -persist" or die "Can't execute gnuplot";
if (exists $n{1}) {
$doscale = 1;
} else {
$doscale = 0;
}
if (scalar @ARGV > 1) {
print "outputfile (eps) is $ARGV[1]\n";
open GP, "|gnuplot" or die "Can't execute gnuplot";
print GP "set terminal postscript enhanced color\n";
print GP "set output '| ps2pdf - $ARGV[1]'\n";
} else {
open GP, "|gnuplot -persist" or die "Can't execute gnuplot";
}
print GP "set xlabel \"threads\"\n";
print GP "set ylabel \"$ARGV[0]\"\n";
print GP "set y2label \"Scaling\"\n" if $doscale;
print GP "set y2tics nomirror\n" if $doscale;
print GP "set ytics nomirror\n";
print GP "plot '-' title \"$ARGV[0]\" with lines axis x1y1";
print GP ", '-' title \"scaling\" with lines axis x1y2" if $doscale;
print GP "\n";
foreach $key (sort {$a <=> $b} keys %v) {
print GP $key . " " . ($v{$key} / $n{$key}) . "\n";
}
print GP "e\n";
if ($doscale) {
foreach $key (sort {$a <=> $b} keys %v) {
print GP $key . " " . ($v{1} / $n{1}) / ($v{$key} / $n{$key}) . "\n";
}
print GP "e\n";
}
| 0 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.