text
stringlengths 2
104M
| meta
dict |
---|---|
#pragma once
#include <bits/stdc++.h>
#include "HeavyLightDecomposition.h"
using namespace std;
// Supports path (and subtree) updates and queries on a forest
// Vertices and indices are 0-indexed
// Template Arguments:
// R: struct supporting range updates and queries on indices
// Required Fields:
// Data: the data type
// Lazy: the lazy type
// Required Functions:
// static qdef(): returns the query default value of type Data
// static merge(l, r): merges the datas l and r, must be associative
// constructor(A): takes a vector A of type Data with the initial
// value of each index
// update(l, r, val, rev): updates the range [l, r] with the value
// val, with rev indicating if the update range should be updated
// right to left
// query(l, r, rev): queries the range [l, r] with rev indicating
// if the result needs to be reversed
// Sample Struct: supporting range assignment and maximum subarray sum
// struct R {
// using Data = MaxSubarraySumCombine<int>::Data;
// using Lazy = MaxSubarraySumCombine<int>::Lazy;
// static Data qdef() { return MaxSubarraySumCombine<int>::qdef(); }
// static Data merge(const Data &l, const Data &r) {
// return MaxSubarraySumCombine<int>::merge(l, r);
// }
// SegmentTreeLazyBottomUp<MaxSubarraySumCombine<int>> ST;
// R(vector<Data> A) : ST(move(A)) {}
// void update(int l, int r, const Lazy &val, bool) {
// ST.update(l, r, val);
// }
// Data query(int l, int r, bool rev) {
// Data ret = ST.query(l, r);
// if (rev) swap(ret.pre, ret.suf);
// return ret;
// }
// };
// VALUES_ON_EDGES: boolean indicating whether the values are on the edges
// (the largest depth vertex of the edge) or the vertices
// Constructor Arguments:
// G: a generic forest data structure
// Required Functions:
// operator [v] const: iterates over the adjacency list of vertex v
// (which is a list of ints)
// size() const: returns the number of vertices in the forest
// A: a vector of type R::Data with the initial value of each vertex
// rt: a single root vertex
// roots: a vector of root vertices for each connected component
// Functions:
// updatePath(v, w, val): updates the path from v to w with the value val
// updateVertex(v, val): updates the vertex v with the value val
// updateSubtree(v, val): updates the subtree of vertex v with the value val
// queryPath(v, w): queries the path from v to w
// queryVertex(v): queries the vertex v
// querySubtree(v): queries the subtree of vertex v
// In practice, constructor has a moderate constant,
// update and query functions have a small constant
// Time Complexity:
// constructor: O(V) + time complexity of R's constructor
// updatePath, queryPath: O(log V) * time complexity of R's update/query
// updateVertex, updateSubtree, queryVertex, querySubtree:
// time complexity of R's update/query
// Memory Complexity: O(V) + memory complexity of R
// Tested:
// https://www.spoj.com/problems/GSS7/
// https://judge.yosupo.jp/problem/vertex_set_path_composite
template <class R, const bool VALUES_ON_EDGES>
struct PathQueries : public HLD {
using Data = typename R::Data; using Lazy = typename R::Lazy; R ops;
void updatePath(int v, int w, const Lazy &val) {
while (head[v] != head[w]) {
if (dep[head[v]] < dep[head[w]]) {
ops.update(pre[head[w]], pre[w], val, false); w = par[head[w]];
} else { ops.update(pre[head[v]], pre[v], val, true); v = par[head[v]]; }
}
if (v != w) {
if (dep[v] < dep[w])
ops.update(pre[v] + VALUES_ON_EDGES, pre[w], val, false);
else ops.update(pre[w] + VALUES_ON_EDGES, pre[v], val, true);
} else if (!VALUES_ON_EDGES) {
int i = pre[dep[v] < dep[w] ? v : w]; ops.update(i, i, val, false);
}
}
void updateVertex(int v, const Lazy &val) {
ops.update(pre[v], pre[v], val, false);
}
void updateSubtree(int v, const Lazy &val) {
int l = pre[v] + VALUES_ON_EDGES, r = post[v];
if (l <= r) ops.update(l, r, val, false);
}
Data queryPath(int v, int w) {
Data qu = R::qdef(), qd = R::qdef(); while (head[v] != head[w]) {
if (dep[head[v]] < dep[head[w]]) {
qd = R::merge(ops.query(pre[head[w]], pre[w], false), qd);
w = par[head[w]];
} else {
qu = R::merge(qu, ops.query(pre[head[v]], pre[v], true));
v = par[head[v]];
}
}
if (v != w) {
if (dep[v] < dep[w])
qu = R::merge(qu, ops.query(pre[v] + VALUES_ON_EDGES, pre[w], false));
else
qd = R::merge(ops.query(pre[w] + VALUES_ON_EDGES, pre[v], true), qd);
} else if (!VALUES_ON_EDGES) {
int i = pre[dep[v] < dep[w] ? v : w];
qu = R::merge(qu, ops.query(i, i, false));
}
return R::merge(qu, qd);
}
Data queryVertex(int v) { return ops.query(pre[v], pre[v], false); }
Data querySubtree(int v) {
int l = pre[v] + VALUES_ON_EDGES, r = post[v];
return l <= r ? ops.query(l, r, false) : R::qdef();
}
vector<Data> reorder(const vector<Data> &A) {
vector<Data> ret; ret.reserve(A.capacity());
for (int i = 0; i < V; i++) ret.push_back(A[vert[i]]);
return ret;
}
template <class Forest>
PathQueries(const Forest &G, const vector<Data> &A,
const vector<int> &roots = vector<int>())
: HLD(G, roots), ops(reorder(A)) {}
template <class Forest>
PathQueries(const Forest &G, const vector<Data> &A, int rt)
: PathQueries(G, A, vector<int>{rt}) {}
};
| {
"repo_name": "wesley-a-leung/Resources",
"stars": "34",
"repo_language": "C++",
"file_name": "SkewHeapIncremental.h",
"mime_type": "text/x-c++"
} |
#pragma once
#include <bits/stdc++.h>
#include "LowestCommonAncestor.h"
using namespace std;
// Supports vertex updates and path queries for invertible operations on
// a forest
// Vertices and indices are 0-indexed
// Template Arguments:
// R: struct supporting point updates and range queries on indices
// Required Fields:
// Data: the data type
// Lazy: the lazy type
// Required Functions:
// static qdef(): returns the query default value of type Data
// static merge(l, r): returns the values l of type Data merged with
// r of type Data, must be associative and commutative
// static invData(v): returns the inverse of v of type Data
// static invLazy(v): returns the inverse of v of type Lazy
// constructor(A): takes a vector A of type Data with the initial
// value of each index
// update(i, val): updates the index i with the value val
// query(l, r): queries the range [l, r]
// Sample Struct: supporting point increments and range sum queries
// struct R {
// using Data = int;
// using Lazy = int;
// static Data qdef() { return 0; }
// static Data merge(const Data &l, const Data &r) { return l + r; }
// static Data invData(const Data &v) { return -v; }
// static Lazy invLazy(const Lazy &v) { return -v; }
// FenwickTree1D<Data> FT;
// R(vector<Data> A) : FT(move(A)) {}
// void update(int i, const Lazy &val) { FT.update(i, val); }
// Data query(int l, int r) { return FT.query(l, r); }
// };
// VALUES_ON_EDGES: boolean indicating whether the values are on the edges
// (the largest depth vertex of the edge) or the vertices
// Constructor Arguments:
// G: a generic forest data structure
// Required Functions:
// operator [v] const: iterates over the adjacency list of vertex v
// (which is a list of ints)
// size() const: returns the number of vertices in the forest
// A: a vector of type R::Data with the initial value of each vertex
// rt: a single root vertex
// roots: a vector of root vertices for each connected component
// Functions:
// connected(v, w): returns true if and only if v and w are connected
// updateVertex(v, val): updates the vertex v with the value val
// queryPathFromRoot(v): queries the path betwen vertex v and the root of
// its connected component
// queryPath(v, w): queries the path between vertices v and w
// In practice, constructor has a moderate constant,
// update and query functions have a very small constant plus
// the constants of R's update and query functions
// Time Complexity:
// constructor: O(V) + time complexity of R's constructor
// connected: O(1)
// updateVertex, queryPathFromRoot, queryPath:
// time complexity of update/query
// Memory Complexity: O(V) + memory complexity of R
// Tested:
// https://judge.yosupo.jp/problem/vertex_add_path_sum
// http://www.usaco.org/index.php?page=viewproblem2&cpid=921
template <class R, const bool VALUES_ON_EDGES>
struct InvertibleVertexUpdatesPathQueries {
using Data = typename R::Data; using Lazy = typename R::Lazy;
LCA<> lca; int V, ind; vector<int> par, pre, post, vert; R ops;
bool connected(int v, int w) { return lca.connected(v, w); }
void updateVertex(int v, const Lazy &val) {
ops.update(pre[v], val); if (post[v] + 1 <= post[lca.root[v]])
ops.update(post[v] + 1, R::invLazy(val));
}
Data queryPathFromRoot(int v) {
int l = pre[lca.root[v]] + int(VALUES_ON_EDGES), r = pre[v];
return l <= r ? ops.query(pre[lca.root[v]], pre[v]) : R::qdef();
}
Data queryPath(int v, int w) {
Data ret = R::merge(queryPathFromRoot(v), queryPathFromRoot(w));
int u = lca.lca(v, w); Data inv = queryPathFromRoot(u);
if (VALUES_ON_EDGES) inv = R::merge(inv, inv);
else if (par[u] != -1) inv = R::merge(inv, queryPathFromRoot(par[u]));
return R::merge(ret, R::invData(inv));
}
template <class Forest> void dfs(const Forest &G, int v, int prev) {
par[v] = prev; vert[pre[v] = ++ind] = v;
for (int w : G[v]) if (w != prev) dfs(G, w, v);
post[v] = ind;
}
template <class Forest>
vector<Data> reorder(const Forest &G, const vector<Data> &A,
const vector<int> &roots) {
if (roots.empty()) {
for (int v = 0; v < V; v++) if (par[v] == -1) dfs(G, v, -1);
} else for (int v : roots) dfs(G, v, -1);
vector<Data> ret; ret.reserve(A.capacity());
for (int i = 0; i < V; i++) ret.push_back(A[vert[i]]);
for (int v = 0; v < V; v++) if (post[v] + 1 <= post[lca.root[v]])
ret[post[v] + 1] = R::merge(ret[post[v] + 1], R::invData(A[v]));
return ret;
}
template <class Forest> InvertibleVertexUpdatesPathQueries(
const Forest &G, const vector<Data> &A,
const vector<int> &roots = vector<int>())
: lca(G, roots), V(G.size()), ind(-1), par(V, -1),
pre(V), post(V), vert(V), ops(reorder(G, A, roots)) {}
template <class Forest> InvertibleVertexUpdatesPathQueries(
const Forest &G, const vector<Data> &A, int rt)
: InvertibleVertexUpdatesPathQueries(G, A, vector<int>{rt}) {}
};
| {
"repo_name": "wesley-a-leung/Resources",
"stars": "34",
"repo_language": "C++",
"file_name": "SkewHeapIncremental.h",
"mime_type": "text/x-c++"
} |
#pragma once
#include <bits/stdc++.h>
#include "../../datastructures/FischerHeunStructure.h"
using namespace std;
// Supports queries for the lowest common ancestor of 2 vertices in a forest
// and the distance between 2 vertices by reduing the problem to a
// range minimum query using the Fischer Heun Structure
// Vertices are 0-indexed
// Template Arguments:
// T: the type of the weight of the edges in the forest
// Constructor Arguments:
// G: a generic forest data structure (weighted or unweighted)
// Required Functions:
// operator [v] const: iterates over the adjacency list of vertex v
// (which is a list of ints for an unweighted forest, or a list of
// pair<int, T> for a weighted forest with weights of type T)
// size() const: returns the number of vertices in the forest
// rt: a single root vertex
// roots: a vector of root vertices for each connected component
// Fields:
// root: vector of roots for the forest each vertex is in
// pre: vector of the pre order traversal indices for each vertex
// dep: vector of depths to each vertex from the root of
// its connected component
// Functions:
// lca(v, w): returns the lowest common ancestor of vertices v and w assuming
// v and w are connected
// lca(r, v, w): returns the lowest common ancestor of vertices v and w if
// their component is rooted a r, assuming r, v, and w are connected
// getDirectChild(anc, des): returns the direct child of anc that is on the
// path from anc to des, where anc is an ancestor of des
// connected(v, w): returns true if and only if v and w are connected
// dist(v, w): returns the distance between vertices v and w assuming
// v and w are connected
// In practice, lca and dist have a moderate constant, constructor is
// dependent on the forest data structure
// Time Complexity:
// constructor: O(V)
// lca, getDirectChild, connected, dist: O(1)
// Memory Complexity: O(V)
// Tested:
// https://judge.yosupo.jp/problem/lca
// https://www.spoj.com/problems/LCASQ
// https://dmoj.ca/problem/rte16s3
// https://dmoj.ca/problem/wac1p6
// https://www.acmicpc.net/problem/15480
template <class T = int> struct LCA {
int V, ind; vector<int> root, pre, top, bot; vector<T> dep;
FischerHeunStructure<int, greater_equal<int>> FHS;
int getTo(int e) { return e; }
T getWeight(int) { return 1; }
int getTo(const pair<int, T> &e) { return e.first; }
T getWeight(const pair<int, T> &e) { return e.second; }
template <class Forest> void dfs(const Forest &G, int v) {
pre[v] = ind; for (auto &&e : G[v]) {
int w = getTo(e); if (pre[w] == -1) {
dep[bot[ind] = w] = dep[top[ind] = v] + getWeight(e); ind++;
root[w] = root[v]; dfs(G, w);
}
}
}
template <class Forest>
vector<int> init(const Forest &G, const vector<int> &roots) {
if (roots.empty()) {
for (int v = 0; v < V; v++) if (pre[v] == -1) dfs(G, root[v] = v);
} else for (int v : roots) dfs(G, root[v] = v);
vector<int> tmp(V); for (int i = 0; i < V; i++) tmp[i] = pre[top[i]];
return tmp;
}
template <class Forest>
LCA(const Forest &G, const vector<int> &roots = vector<int>())
: V(G.size()), ind(0), root(V, -1), pre(V, -1), top(V), bot(V),
dep(V, T()), FHS(init(G, roots)) {}
template <class Forest> LCA(const Forest &G, int rt)
: LCA(G, vector<int>{rt}) {}
int lca(int v, int w) {
if (v == w) return v;
if (pre[v] > pre[w]) swap(v, w);
return top[FHS.queryInd(pre[v], pre[w] - 1)];
}
int lca(int r, int v, int w) {
int a = lca(r, v), b = lca(r, w), c = lca(v, w);
int d = dep[a] > dep[b] ? a : b; return dep[c] > dep[d] ? c : d;
}
int getDirectChild(int anc, int des) {
return bot[FHS.queryInd(pre[anc], pre[des] - 1)];
}
bool connected(int v, int w) { return root[v] == root[w]; }
T dist(int v, int w) { return dep[v] + dep[w] - T(2) * dep[lca(v, w)]; }
};
| {
"repo_name": "wesley-a-leung/Resources",
"stars": "34",
"repo_language": "C++",
"file_name": "SkewHeapIncremental.h",
"mime_type": "text/x-c++"
} |
#pragma once
#include <bits/stdc++.h>
using namespace std;
// Supports subtree updates and queries on a forest
// Vertices and indices are 0-indexed
// Template Arguments:
// R: struct supporting range updates and queries on indices
// Required Fields:
// Data: the data type
// Lazy: the lazy type
// Required Functions:
// static qdef(): returns the query default value of type Data
// constructor(A): takes a vector A of type Data with the initial
// value of each index
// update(l, r, val): updates the range [l, r] with the value val
// query(l, r): queries the range [l, r]
// Sample Struct: supporting range sum updates and queries
// struct R {
// using Data = int;
// using Lazy = int;
// static Data qdef() { return 0; }
// FenwickTreeRange1D<Data> FT;
// R(vector<Data> A) : FT(move(A)) {}
// void update(int l, int r, const Lazy &val) { FT.update(l, r, val); }
// Data query(int l, int r) { return FT.query(l, r); }
// };
// VALUES_ON_EDGES: boolean indicating whether the values are on the edges
// (the largest depth vertex of the edge) or the vertices
// Constructor Arguments:
// G: a generic forest data structure
// Required Functions:
// operator [v] const: iterates over the adjacency list of vertex v
// (which is a list of ints)
// size() const: returns the number of vertices in the forest
// A: a vector of type R::Data with the initial value of each vertex
// rt: a single root vertex
// roots: a vector of root vertices for each connected component
// Fields:
// pre: vector of the pre order traversal indices for each vertex
// post: vector of the post order traversal indices (the last pre order index
// in its subtree) for each vertex
// vert: vector of vertex for each pre order index
// Functions:
// updateVertex(v, val): updates the vertex v with the value val
// updateSubtree(v, val): updates the subtree of vertex v with the value val
// queryVertex(v): queries the vertex v
// querySubtree(v): queries the subtree of vertex v
// In practice, constructor has a moderate constant,
// update and query functions have a very small constant plus
// the constants of R's update and query functions
// Time Complexity:
// constructor: O(V) + time complexity of R's constructor
// updateVertex, updateSubtree, queryVertex, querySubtree:
// time complexity of update/query
// Memory Complexity: O(V) + memory complexity of R
// Tested:
// https://judge.yosupo.jp/problem/vertex_add_subtree_sum
// https://codeforces.com/contest/620/problem/E
template <class R, const bool VALUES_ON_EDGES> struct SubtreeQueries {
using Data = typename R::Data; using Lazy = typename R::Lazy;
int V, ind; vector<int> pre, post, vert; R ops;
void updateVertex(int v, const Lazy &val) {
ops.update(pre[v], pre[v], val);
}
void updateSubtree(int v, const Lazy &val) {
int l = pre[v] + VALUES_ON_EDGES, r = post[v];
if (l <= r) ops.update(l, r, val);
}
Data queryVertex(int v) { return ops.query(pre[v], pre[v]); }
Data querySubtree(int v) {
int l = pre[v] + VALUES_ON_EDGES, r = post[v];
return l <= r ? ops.query(l, r) : R::qdef();
}
template <class Forest> void dfs(const Forest &G, int v, int prev) {
vert[pre[v] = ++ind] = v; for (int w : G[v]) if (w != prev) dfs(G, w, v);
post[v] = ind;
}
template <class Forest>
vector<Data> reorder(const Forest &G, const vector<Data> &A,
const vector<int> &roots) {
if (roots.empty()) {
for (int v = 0; v < V; v++) if (pre[v] == -1) dfs(G, v, -1);
} else for (int v : roots) dfs(G, v, -1);
vector<Data> ret; ret.reserve(A.capacity());
for (int i = 0; i < V; i++) ret.push_back(A[vert[i]]);
return ret;
}
template <class Forest>
SubtreeQueries(const Forest &G, const vector<Data> &A,
const vector<int> &roots = vector<int>())
: V(G.size()), ind(-1), pre(V, -1), post(V), vert(V),
ops(reorder(G, A, roots)) {}
template <class Forest>
SubtreeQueries(const Forest &G, const vector<Data> &A, int rt)
: SubtreeQueries(G, A, vector<int>{rt}) {}
};
| {
"repo_name": "wesley-a-leung/Resources",
"stars": "34",
"repo_language": "C++",
"file_name": "SkewHeapIncremental.h",
"mime_type": "text/x-c++"
} |
#pragma once
#include <bits/stdc++.h>
#include "LowestCommonAncestor.h"
using namespace std;
// Supports path updates and vertex queries for invertible operations on
// a forest
// Vertices and indices are 0-indexed
// Template Arguments:
// R: struct supporting range updates and point queries on indices
// Required Fields:
// Data: the data type
// Lazy: the lazy type
// Required Functions:
// static merge(l, r): returns the values l of type Data merged with
// r of type Data, must be associative and commutative
// static mergeLazy(l, r): merges the lazy values l and r
// static invData(v): returns the inverse of v of type Data
// static invLazy(v): returns the inverse of v of type Lazy
// constructor(A): takes a vector A of type Data with the initial
// value of each index
// update(l, r, val): updates the range [l, r] with the value val
// query(i): queries the index i
// Sample Struct: supporting range increments and point queries
// struct R {
// using Data = int;
// using Lazy = int;
// static Data merge(const Data &l, const Data &r) { return l + r; }
// static Lazy mergeLazy(const Lazy &l, const Lazy &r) {
// return l + r;
// }
// static Data invData(const Data &v) { return -v; }
// static Lazy invLazy(const Lazy &v) { return -v; }
// FenwickTreeRangePoint1D<Data> FT;
// R(vector<Data> A) : FT(move(A)) {}
// void update(int l, int r, const Lazy &val) { FT.update(l, r, val); }
// Data query(int i) { return FT.get(i); }
// };
// VALUES_ON_EDGES: boolean indicating whether the values are on the edges
// (the largest depth vertex of the edge) or the vertices
// Constructor Arguments:
// G: a generic forest data structure
// Required Functions:
// operator [v] const: iterates over the adjacency list of vertex v
// (which is a list of ints)
// size() const: returns the number of vertices in the forest
// A: a vector of type R::Data with the initial value of each vertex
// rt: a single root vertex
// roots: a vector of root vertices for each connected component
// Functions:
// connected(v, w): returns true if and only if v and w are connected
// updatePathFromRoot(v, val): updates the path betwen vertex v and
// the root of its connected component with the lazy value val
// updatePath(v, w, val): updates the path between vertices v and w with
// the lazy value val
// queryVertex(v): queries the vertex v
// In practice, constructor has a moderate constant,
// update and query functions have a very small constant plus
// the constants of R's update and query functions
// Time Complexity:
// constructor: O(V) + time complexity of R's constructor
// connected: O(1)
// updatePathFromRoot, updatePath, queryVertex:
// time complexity of update/query
// Memory Complexity: O(V) + memory complexity of R
// Tested:
// http://www.usaco.org/index.php?page=viewproblem2&cpid=102
template <class R, const bool VALUES_ON_EDGES>
struct InvertiblePathUpdatesVertexQueries {
using Data = typename R::Data; using Lazy = typename R::Lazy;
LCA<> lca; int V, ind; vector<int> par, pre, post, vert; R ops;
bool connected(int v, int w) { return lca.connected(v, w); }
void updatePathFromRoot(int v, const Lazy &val) {
int l = pre[lca.root[v]] + int(VALUES_ON_EDGES), r = pre[v];
if (l <= r) ops.update(l, r, val);
}
void updatePath(int v, int w, const Lazy &val) {
updatePathFromRoot(v, val); updatePathFromRoot(w, val);
int u = lca.lca(v, w); Lazy inv = R::invLazy(val);
if (VALUES_ON_EDGES) inv = R::mergeLazy(inv, inv);
updatePathFromRoot(u, inv);
if (!VALUES_ON_EDGES && par[u] != -1) updatePathFromRoot(par[u], inv);
}
Data queryVertex(int v) {
Data ret = ops.query(pre[v]); if (post[v] + 1 <= post[lca.root[v]])
ret = R::merge(ret, R::invData(ops.query(post[v] + 1)));
return ret;
}
template <class Forest> void dfs(const Forest &G, int v, int prev) {
par[v] = prev; vert[pre[v] = ++ind] = v;
for (int w : G[v]) if (w != prev) dfs(G, w, v);
post[v] = ind;
}
template <class Forest>
vector<Data> reorder(const Forest &G, const vector<Data> &A,
const vector<int> &roots) {
if (roots.empty()) {
for (int v = 0; v < V; v++) if (par[v] == -1) dfs(G, v, -1);
} else for (int v : roots) dfs(G, v, -1);
vector<Data> ret; ret.reserve(A.capacity());
for (int i = 0; i < V; i++) ret.push_back(A[vert[i]]);
for (int v = 0; v < V; v++) if (post[v] + 1 < V)
ret[post[v] + 1] = R::merge(ret[post[v] + 1], R::invData(A[v]));
return ret;
}
template <class Forest> InvertiblePathUpdatesVertexQueries(
const Forest &G, const vector<Data> &A,
const vector<int> &roots = vector<int>())
: lca(G, roots), V(G.size()), ind(-1), par(V, -1),
pre(V), post(V), vert(V), ops(reorder(G, A, roots)) {}
template <class Forest> InvertiblePathUpdatesVertexQueries(
const Forest &G, const vector<Data> &A, int rt)
: InvertiblePathUpdatesVertexQueries(G, A, vector<int>{rt}) {}
};
| {
"repo_name": "wesley-a-leung/Resources",
"stars": "34",
"repo_language": "C++",
"file_name": "SkewHeapIncremental.h",
"mime_type": "text/x-c++"
} |
#pragma once
#include <bits/stdc++.h>
#include "../../datastructures/BitPrefixSumArray.h"
#include "LowestCommonAncestor.h"
using namespace std;
// Wavelet Matrix supporting 2D aggregation path queries for invertible
// operations on a forest where the data can change, but not the keys
// Vertices are 0-indexed
// Template Arguments:
// T: the type of the element of the array
// R: struct supporting point updates and prefix queries on indices
// Required Fields:
// Data: the data type
// Lazy: the lazy type
// Required Functions:
// static qdef(): returns the query default value
// static merge(l, r): returns the values l of type Data merged with
// r of type Data, must be associative and commutative
// static invData(v): returns the inverse of v of type Data
// constructor(A): takes a vector A of type Data with the initial
// value of each index
// update(i, val): updates the index i with the value val
// query(r): queries the prefix ending at index r
// Sample Struct: supporting point increments updates and
// prefix sum queries
// struct R {
// using Data = int;
// using Lazy = int;
// static Data qdef() { return 0; }
// static Data merge(const Data &l, const Data &r) { return l + r; }
// static Data invData(const Data &v) { return -v; }
// FenwickTree1D<Data> FT;
// R(const vector<Data> &A) : FT(A) {}
// void update(int i, const Lazy &val) { FT.update(i, val); }
// Data query(int r) { return FT.query(r); }
// };
// VALUES_ON_EDGES: boolean indicating whether the values are on the edges
// (the largest depth vertex of the edge) or the vertices
// Cmp: the comparator to compare two elements
// Required Functions:
// operator (a, b): returns true if and only if a compares less than b
// Constructor Arguments:
// G: a generic forest data structure
// Required Functions:
// operator [v] const: iterates over the adjacency list of vertex v
// (which is a list of ints)
// size() const: returns the number of vertices in the forest
// A: a vector of type T of the values in the array
// X: a vector of type R::Data containing the initial data of the array
// rt: a single root vertex
// roots: a vector of root vertices for each connected component
// cmp: an instance of the Cmp struct
// Functions:
// update(v, val): updates the vertex v with the lazy value v
// query(v, w, hi): returns the aggregate value of the data associated with
// all elements with a key less than or equal to hi (using the comparator)
// on the path from v to w
// query(v, w, lo, hi): returns the aggregate value of the data associated
// with all elements with a key of not less than lo and not greater than hi
// (using the comparator) on the path from v to w
// bsearch(v, w, f): over all keys in the array, finds the first key k such
// that query(v, w, k) returns true
// In practice, has a moderate constant
// Time Complexity:
// constructor: O((V + C) log V) where C is the time complexity of
// R's constructor
// update: O(U log V) where U is the time complexity of R's update function
// query, bsearch: O(Q log V) where Q is the time complexity of
// R's query function
// Memory Complexity: O(V + (V log V) / 64 + M log V) where M is the memory
// complexity of R
// Tested:
// Fuzz and Stress Tested
template <class T, class R, const bool VALUES_ON_EDGES, class Cmp = less<T>>
struct WaveletMatrixTreeAggregation {
#define clt [&] (const T &a, const T &b) { return cmp(a, b); }
#define cle [&] (const T &a, const T &b) { return !cmp(b, a); }
using Data = typename R::Data; using Lazy = typename R::Lazy;
int V, H, preInd, postInd; vector<int> par, pre, post, last, mid1, mid2;
vector<T> S; LCA<> lca; vector<BitPrefixSumArray> B1, B2; vector<R> D1, D2;
Cmp cmp;
template <class Forest> void dfs(const Forest &G, int v, int prev) {
par[v] = prev; last[pre[v] = preInd++] = postInd - 1;
for (int w : G[v]) if (w != prev) dfs(G, w, v);
post[v] = postInd++;
}
void build(const vector<int> &A, const vector<Data> &X,
const vector<int> &ind, vector<int> &mid,
vector<BitPrefixSumArray> &B, vector<R> &D) {
vector<int> C(V), P(V); vector<Data> Y = X, Z = X; D.reserve(H);
for (int v = 0; v < V; v++) { C[ind[v]] = A[v]; Y[ind[v]] = X[v]; }
iota(P.begin(), P.end(), 0); for (int h = H - 1; h >= 0; h--) {
int ph = 1 << h;
for (int i = 0; i < V; i++) B[h].set(i, C[P[i]] <= ph - 1);
mid[h] = stable_partition(P.begin(), P.end(), [&] (int i) {
return C[i] <= ph - 1;
}) - P.begin();
B[h].build(); for (int i = 0; i < V; i++) Z[i] = Y[P[i]];
D.emplace_back(Z); for (int i = mid[h]; i < V; i++) C[P[i]] -= ph;
}
reverse(D.begin(), D.end());
}
template <class Forest>
WaveletMatrixTreeAggregation(
const Forest &G, const vector<T> &A, const vector<Data> &X,
const vector<int> &roots = vector<int>(), Cmp cmp = Cmp())
: V(A.size()), H(V == 0 ? 0 : __lg(V) + 1), preInd(0), postInd(0),
par(V, -1), pre(V), post(V), last(V), mid1(H), mid2(H),
S(A), lca(G, roots), B1(H, BitPrefixSumArray(V)), B2(B1), cmp(cmp) {
sort(S.begin(), S.end(), cmp); if (roots.empty()) {
for (int v = 0; v < V; v++) if (par[v] == -1) dfs(G, v, -1);
} else for (int v : roots) dfs(G, v, -1);
vector<int> C(V); for (int v = 0; v < V; v++)
C[v] = lower_bound(S.begin(), S.end(), A[v], cmp) - S.begin();
build(C, X, pre, mid1, B1, D1); build(C, X, post, mid2, B2, D2);
}
template <class Forest>
WaveletMatrixTreeAggregation(const Forest &G, const vector<T> &A,
const vector<Data> &X, int rt, Cmp cmp = Cmp())
: WaveletMatrixTreeAggregation(G, A, X, vector<int>{rt}, cmp) {}
bool connected(int v, int w) { return lca.connected(v, w); }
using Ranges = vector<tuple<int, int, int, int, int>>;
Ranges getRanges(int v, int w) {
int u = lca.lca(v, w), rt = lca.root[v]; Ranges ranges; ranges.reserve(5);
int t = VALUES_ON_EDGES ? 1 : (par[u] != -1 ? 2 : 0);
if (t && pre[rt] - 1 > 0)
ranges.emplace_back(pre[rt] - 1, last[pre[rt] - 1], -1, 0, 0);
ranges.emplace_back(pre[v], last[pre[v]], 1, 0, 0);
ranges.emplace_back(pre[w], last[pre[w]], 1, 0, 0);
ranges.emplace_back(pre[u], last[pre[u]], t == 1 ? -2 : -1, 0, 0);
if (t == 2) ranges.emplace_back(pre[par[u]], last[pre[par[u]]], -1, 0, 0);
return ranges;
}
void update(int v, const Lazy &val) {
for (int a = pre[v], b = post[v], h = H - 1; h >= 0; h--) {
if (B1[h].get(a)) a = B1[h].query(a - 1);
else a += mid1[h] - B1[h].query(a - 1);
if (B2[h].get(b)) b = B2[h].query(b - 1);
else b += mid2[h] - B2[h].query(b - 1);
D1[h].update(a, val); D2[h].update(b, val);
}
}
void qryRanges(int h, Ranges &ranges) {
for (auto &&r : ranges) {
get<3>(r) = B1[h].query(get<0>(r)); get<4>(r) = B2[h].query(get<1>(r));
}
}
Data qryAgg(int h, const Ranges &ranges) {
Data ret = R::qdef(); for (auto &&r : ranges) {
int a = get<3>(r) - 1, b = get<4>(r) - 1, t = get<2>(r);
Data q = a >= 0 ? D1[h].query(a) : R::qdef();
if (b >= 0) q = R::merge(q, R::invData(D2[h].query(b)));
if (t <= -1) q = R::invData(q);
if (t == -2) q = R::merge(q, q);
ret = R::merge(ret, q);
}
return ret;
}
void left(Ranges &ranges) {
for (auto &&r : ranges) {
get<0>(r) = get<3>(r) - 1; get<1>(r) = get<4>(r) - 1;
}
}
void right(int h, Ranges &ranges) {
for (auto &&r : ranges) {
get<0>(r) += mid1[h] - get<3>(r); get<1>(r) += mid2[h] - get<4>(r);
}
}
template <class F> Data qry(int v, int w, const T &x, F f) {
Ranges ranges = getRanges(v, w); Data ret = R::qdef();
for (int cur = 0, h = H - 1; h >= 0; h--) {
int ph = 1 << h; qryRanges(h, ranges);
if (cur + ph - 1 >= V || f(x, S[cur + ph - 1])) left(ranges);
else {
ret = R::merge(ret, qryAgg(h, ranges)); cur += ph; right(h, ranges);
}
}
return ret;
}
Data query(int v, int w, const T &hi) { return qry(v, w, hi, clt); }
Data query(int v, int w, const T &lo, const T &hi) {
return R::merge(qry(v, w, hi, clt), R::invData(qry(v, w, lo, cle)));
}
template <class F> pair<bool, T *> bsearch(int v, int w, F f) {
Ranges ranges = getRanges(v, w);
int cur = 0; Data agg = R::qdef(); for (int h = H - 1; h >= 0; h--) {
qryRanges(h, ranges); Data val = qryAgg(h, ranges);
if (f(R::merge(agg, val))) left(ranges);
else { cur += 1 << h; agg = R::merge(agg, val); right(h, ranges); }
}
return make_pair(cur < V, cur < V ? &S[cur] : nullptr);
}
#undef clt
#undef cle
};
| {
"repo_name": "wesley-a-leung/Resources",
"stars": "34",
"repo_language": "C++",
"file_name": "SkewHeapIncremental.h",
"mime_type": "text/x-c++"
} |
#pragma once
#include <bits/stdc++.h>
using namespace std;
// Decomposes a tree into chains, such that a path from any vertex to the root
// will cover at most O(log V) chains
// Can be used with PathQueries for path queries
// Vertices and indices are 0-indexed
// Constructor Arguments:
// G: a generic forest data structure
// Required Functions:
// operator [v] const: iterates over the adjacency list of vertex v
// (which is a list of ints)
// size() const: returns the number of vertices in the forest
// rt: a single root vertex
// roots: a vector of root vertices for each connected component
// Fields:
// root: vector of roots for the forest each vertex is in
// dep: vector of depths to each vertex from the root of
// its connected component
// par: vector of parent vertices for each vertex (or -1 if its a root)
// size: vector of sizes of the subtree for each vertex
// head: vector of the head of the chain of each vertex
// pre: vector of the pre order traversal indices for each vertex
// post: vector of the post order traversal indices (the last pre order index
// in its subtree) for each vertex
// vert: vector of vertex for each pre order index
// Functions:
// lca(v, w): returns the lowest common ancestor of vertices v and w assuming
// v and w are connected
// connected(v, w): returns true if and only if v and w are connected
// dist(v, w): returns the distance between vertices v and w assuming
// v and w are connected
// kthParent(v, k): returns the kth parent of the vertex v (0th parent is v,
// 1st parent is the parent of v)
// kthPath(v, w, k): returns the kth vertex (0-indexed) on the path from
// v to w
// getChains(v, w, edgeChains): returns a tuple of left index, right index,
// and a boolean indicating the ranges of the chains on the path from v to
// w and whether the range is reversed, with the chains being provided in
// order from v to w, with the lca of v and w being included if edgeChains
// is false, and excluded if it is true
// In practice, constructor has a moderate constant,
// lca, dist, kthParent, kthPath, have a small constant
// Time Complexity:
// constructor: O(V)
// lca, dist, kthParent, kthPath: O(log V)
// connected: O(1)
// Memory Complexity: O(V)
// Tested:
// https://www.spoj.com/problems/QTREE2/
// https://www.spoj.com/problems/GSS7/
// https://judge.yosupo.jp/problem/vertex_set_path_composite
struct HLD {
int V, ind; vector<int> root, dep, par, size, head, pre, post, vert;
template <class Forest>
void dfs(const Forest &G, int v, int prev, int r, int d) {
root[v] = r; dep[v] = d; par[v] = prev; size[v] = 1; for (int w : G[v])
if (w != prev) { dfs(G, w, v, r, d + 1); size[v] += size[w]; }
}
template <class Forest> void hld(const Forest &G, int v, int prev) {
if (head[v] == -1) head[v] = v;
vert[pre[v] = ++ind] = v; int heavy = -1;
for (int w : G[v]) if (w != prev && (heavy == -1 || size[heavy] < size[w]))
heavy = w;
if (heavy != -1) { head[heavy] = head[v]; hld(G, heavy, v); }
for (int w : G[v]) if (w != prev && w != heavy) hld(G, w, v);
post[v] = ind;
}
int lca(int v, int w) {
while (head[v] != head[w]) {
if (dep[head[v]] < dep[head[w]]) w = par[head[w]];
else v = par[head[v]];
}
return dep[v] < dep[w] ? v : w;
}
int dist(int v, int w) { return dep[v] + dep[w] - 2 * dep[lca(v, w)]; }
int kthParent(int v, int k) {
while (par[head[v]] != -1) {
if (pre[v] - pre[head[v]] >= k) return vert[pre[v] - k];
k -= pre[v] - pre[head[v]] + 1; v = par[head[v]];
}
return pre[v] < k ? -1 : vert[pre[v] - k];
}
int kthPath(int v, int w, int k) {
int LCA = lca(v, w);
if (dep[v] - dep[LCA] >= k) return kthParent(v, k);
else return kthParent(w, dep[v] + dep[w] - 2 * dep[LCA] - k);
}
bool connected(int v, int w) { return root[v] == root[w]; }
vector<tuple<int, int, bool>> getChains(int v, int w, bool edgeChains) {
vector<tuple<int, int, bool>> vChains, wChains;
while (head[v] != head[w]) {
if (dep[head[v]] < dep[head[w]]) {
wChains.emplace_back(pre[head[w]], pre[w], false); w = par[head[w]];
} else {
vChains.emplace_back(pre[head[v]], pre[v], true); v = par[head[v]];
}
}
if (v != w) {
if (dep[v] < dep[w])
wChains.emplace_back(pre[v] + edgeChains, pre[w], false);
else vChains.emplace_back(pre[w] + edgeChains, pre[v], true);
} else if (!edgeChains) {
int i = pre[dep[v] < dep[w] ? v : w]; wChains.emplace_back(i, i, false);
}
reverse(wChains.begin(), wChains.end());
vChains.insert(vChains.end(), wChains.begin(), wChains.end());
return vChains;
}
template <class Forest>
HLD(const Forest &G, const vector<int> &roots = vector<int>())
: V(G.size()), ind(-1), root(V, -1), dep(V), par(V), size(V),
head(V, -1), pre(V), post(V), vert(V) {
if (roots.empty()) {
for (int v = 0; v < V; v++)
if (root[v] == -1) { dfs(G, v, -1, v, 0); hld(G, v, -1); }
} else for (int v : roots) { dfs(G, v, -1, v, 0); hld(G, v, -1); }
}
template <class Forest> HLD(const Forest &G, int rt)
: HLD(G, vector<int>{rt}) {}
};
| {
"repo_name": "wesley-a-leung/Resources",
"stars": "34",
"repo_language": "C++",
"file_name": "SkewHeapIncremental.h",
"mime_type": "text/x-c++"
} |
#pragma once
#include <bits/stdc++.h>
#include "LowestCommonAncestor.h"
using namespace std;
// Mo's algorithm on a tree to answer offline queries over paths on a forest
// where each vertex has a value provided by an array A
// Template Arguments:
// S: struct to maintain a multiset of the elements in a set
// Required Fields:
// T: the type of each element
// R: the type of the return value for each query
// Q: the query object that contains information for each query
// Required Fields:
// v: one vertex of the query path
// w: the other vertex of the query path
// Required Functions:
// constructor(...args): takes any number of arguments (arguments are
// passed from constructor of MoTree)
// add(v): adds the value v to the multiset
// remove(v): removes the value v from the multiset
// Sample Struct: supporting queries for whether a value c exists on
// a path between vertices v and w
// struct S {
// using T = int; using R = bool;
// struct Q { int v, w, c; };
// vector<T> cnt;
// S(const vector<T> &A)
// : cnt(*max_element(A.begin(), A.end()) + 1, 0) {}
// void add(const T &v) { cnt[v]++; }
// void remove(const T &v) { --cnt[v]; }
// R query(const Q &q) const {
// return 0 <= q.c && q.c < int(cnt.size()) && cnt[q.c] > 0;
// }
// };
// Constructor Arguments:
// G: a generic forest data structure
// Required Functions:
// operator [v] const: iterates over the adjacency list of vertex v
// (which is a list of ints)
// size() const: returns the number of vertices in the forest
// A: a vector of type S::T of the values in the array
// queries: a vector of type S::Q representing the queries
// SCALE: the value to scale sqrt by
// ...args: arguments to pass to the constructor of S
// Fields:
// ans: a vector of integers with the answer for each query
// In practice, has a very small constant
// Time Complexity:
// constructor: O(C + K ((log K + U sqrt V) + T))
// for K queries where C is the time complexity of S's constructor,
// U is the time complexity of S.add and S.remove,
// and T is the time complexity of S.query
// Memory Complexity: O(V + K) for K queries
// Tested:
// https://www.spoj.com/problems/COT2/
// https://www.spoj.com/problems/GOT/
// https://dmoj.ca/problem/year2018p6
template <class S> struct MoTree {
using T = typename S::T; using R = typename S::R; using Q = typename S::Q;
struct Query {
Q q; int l, r, lca, i, b;
Query(const Q &q, int l, int r, int lca, int i, int b)
: q(q), l(l), r(r), lca(lca), i(i), b(b) {}
pair<int, int> getPair() const { return make_pair(b, b % 2 ? -r : r); }
bool operator < (const Query &o) const { return getPair() < o.getPair(); }
};
int V, ind; vector<int> pre, post, vert; LCA<> lca; vector<R> ans;
template <class Forest> void dfs(const Forest &G, int v, int prev) {
vert[pre[v] = ind++] = v; for (int w : G[v]) if (w != prev) dfs(G, w, v);
vert[post[v] = ind++] = v;
}
template <class Forest> LCA<> init(const Forest &G) {
vector<int> roots; for (int v = 0; v < V; v++)
if (pre[v] == -1) { roots.push_back(v); dfs(G, v, -1); }
return LCA<>(G, roots);
}
template <class Forest, class ...Args> MoTree(
const Forest &G, const vector<T> &A, const vector<Q> &queries,
double SCALE = 2, Args &&...args)
: V(G.size()), ind(0), pre(V, -1), post(V), vert(V * 2), lca(init(G)) {
int K = queries.size(), bsz = max(1.0, sqrt(A.size()) * SCALE);
vector<Query> q; q.reserve(K); vector<bool> vis(V, false);
S s(forward<Args>(args)...); for (int i = 0; i < K; i++) {
int v = queries[i].v, w = queries[i].w, u = lca.lca(v, w);
if (pre[v] > pre[w]) swap(v, w);
int l = u == v ? pre[v] : post[v], r = pre[w];
q.emplace_back(queries[i], l, r, u, i, l / bsz);
}
auto update = [&] (int v) {
if (vis[v]) s.remove(A[v]);
else s.add(A[v]);
vis[v] = !vis[v];
};
sort(q.begin(), q.end()); int l = 0, r = l - 1; for (auto &&qi : q) {
while (l > qi.l) update(vert[--l]);
while (r < qi.r) update(vert[++r]);
while (l < qi.l) update(vert[l++]);
while (r > qi.r) update(vert[r--]);
if (qi.lca != vert[l] && qi.lca != vert[r]) update(qi.lca);
R res = s.query(qi.q); if (ans.empty()) ans.resize(K, res);
ans[qi.i] = res;
if (qi.lca != vert[l] && qi.lca != vert[r]) update(qi.lca);
}
}
};
| {
"repo_name": "wesley-a-leung/Resources",
"stars": "34",
"repo_language": "C++",
"file_name": "SkewHeapIncremental.h",
"mime_type": "text/x-c++"
} |
#pragma once
#include <bits/stdc++.h>
#include "HeavyLightDecomposition.h"
using namespace std;
// Supports path (and subtree) queries on a forest
// Vertices and indices are 0-indexed
// Template Arguments:
// R: struct supporting range queries on indices
// Required Fields:
// Data: the data type
// Required Functions:
// static qdef(): returns the query default value of type Data
// static merge(l, r): merges the datas l and r, must be associative
// constructor(A): takes a vector A of type Data with the initial
// value of each index
// query(l, r, rev): queries the range [l, r] with rev indicating
// if the result needs to be reversed
// Sample Struct: supporting range assignment and maximum subarray sum
// struct R {
// using Data = MaxSubarraySumCombine<int>::Data;
// static Data qdef() { return MaxSubarraySumCombine<int>::qdef(); }
// static Data merge(const Data &l, const Data &r) {
// return MaxSubarraySumCombine<int>::merge(l, r);
// }
// SegmentTreeLazyBottomUp<MaxSubarraySumCombine<int>> ST;
// R(const vector<Data> &A) : ST(A) {}
// Data query(int l, int r, bool rev) {
// Data ret = ST.query(l, r);
// if (rev) swap(ret.pre, ret.suf);
// return ret;
// }
// };
// VALUES_ON_EDGES: boolean indicating whether the values are on the edges
// (the largest depth vertex of the edge) or the vertices
// Constructor Arguments:
// G: a generic forest data structure
// Required Functions:
// operator [v] const: iterates over the adjacency list of vertex v
// (which is a list of ints)
// size() const: returns the number of vertices in the forest
// A: a vector of type R::Data with the initial value of each vertex
// rt: a single root vertex
// roots: a vector of root vertices for each connected component
// Functions:
// queryPath(v, w): queries the path from v to w
// querySubtree(v): queries the subtree of vertex v
// In practice, constructor has a moderate constant,
// query functions have a small constant
// Time Complexity:
// constructor: O(V) + time complexity of R's constructor
// queryPath: O(log V) + time complexity of R's query
// querySubtree: time complexity of R's query
// Memory Complexity: O(V) + memory complexity of R
// Tested:
// https://dmoj.ca/problem/acc2p3
template <class R, const bool VALUES_ON_EDGES>
struct StaticPathQueries : public HLD {
using Data = typename R::Data; R ops; vector<Data> down, up;
Data queryPath(int v, int w) {
Data qu = R::qdef(), qd = R::qdef(); while (head[v] != head[w]) {
if (dep[head[v]] < dep[head[w]]) {
qd = R::merge(down[w], qd); w = par[head[w]];
} else { qu = R::merge(qu, up[v]); v = par[head[v]]; }
}
if (v != w) {
if (dep[v] < dep[w])
qu = R::merge(qu, ops.query(pre[v] + VALUES_ON_EDGES, pre[w], false));
else
qd = R::merge(ops.query(pre[w] + VALUES_ON_EDGES, pre[v], true), qd);
} else if (!VALUES_ON_EDGES) {
int i = pre[dep[v] < dep[w] ? v : w];
qu = R::merge(qu, ops.query(i, i, false));
}
return R::merge(qu, qd);
}
Data querySubtree(int v) {
int l = pre[v] + VALUES_ON_EDGES, r = post[v];
return l <= r ? ops.query(l, r, false) : R::qdef();
}
vector<Data> reorder(const vector<Data> &A) {
vector<Data> ret; ret.reserve(A.capacity());
for (int i = 0; i < V; i++) ret.push_back(A[vert[i]]);
return ret;
}
template <class Forest>
StaticPathQueries(const Forest &G, const vector<Data> &A,
const vector<int> &roots = vector<int>())
: HLD(G, roots), ops(reorder(A)), down(A), up(A) {
for (int i = 0; i < V; i++) {
int v = vert[i]; if (head[v] != v) {
down[v] = R::merge(down[par[v]], A[v]);
up[v] = R::merge(A[v], up[par[v]]);
}
}
}
template <class Forest>
StaticPathQueries(const Forest &G, const vector<Data> &A, int rt)
: StaticPathQueries(G, A, vector<int>{rt}) {}
};
| {
"repo_name": "wesley-a-leung/Resources",
"stars": "34",
"repo_language": "C++",
"file_name": "SkewHeapIncremental.h",
"mime_type": "text/x-c++"
} |
#pragma once
#include <bits/stdc++.h>
#include "../../datastructures/FischerHeunStructure.h"
#include "../../datastructures/unionfind/WeightedUnionFind.h"
using namespace std;
// Converts a tree or forest into a line graph to query for the maximum edge
// value on a path between two vertices
// Vertices are 0-indexed
// Template Arguments:
// T: the type of the weight of the edges
// Cmp: the comparator to compare two values,
// convention is same as std::priority_queue in STL
// Required Functions:
// operator (a, b): returns true if and only if a compares less than b
// Constructor Arguments:
// V: number of vertices in the graph
// edges: a vector of tuples in the form (v, w, weight) representing
// an undirected edge in the graph between vertices v and w with
// weight of weight
// cmp: an instance of the Cmp struct
// Functions:
// connected(v, w): returns true if and only if v and w are connected
// query(v, w): returns the maximum edge weight (based on the comparator) on
// the path from vertices v and w assuming v and w are connected
// In practice, has a moderate constant
// Time Complexity:
// constructor: O(V)
// connected, query: O(1)
// Memory Complexity: O(V)
// Tested:
// https://dmoj.ca/problem/acc2p3
// https://codeforces.com/problemsets/acmsguru/problem/99999/383
template <class T, class Cmp = less<T>> struct LineTree {
using Edge = tuple<int, int, T>;
vector<int> root, ind; vector<T> weights; FischerHeunStructure<int> FHS;
vector<int> init(int V, vector<Edge> edges, Cmp cmp) {
sort(edges.begin(), edges.end(), [&] (const Edge &a, const Edge &b) {
return cmp(get<2>(a), get<2>(b));
});
vector<int> nxt(V, -1), A(V, 0), ret; ret.reserve(V); int k = 0;
auto op = [&] (const pair<int, int> &a, const pair<int, int> &b) {
nxt[a.second] = b.first; A[a.second] = k;
return make_pair(a.first, b.second);
};
vector<pair<int, int>> W(V);
for (int v = 0; v < V; v++) W[v] = make_pair(v, v);
WeightedUnionFind<pair<int, int>, decltype(op)> uf(move(W), op);
weights.reserve(edges.size()); for (auto &&e : edges) {
assert(uf.join(get<0>(e), get<1>(e))); k++; weights.push_back(get<2>(e));
}
for (int v = 0; v < V; v++) if ((root[v] = uf.find(v)) == v)
for (int w = uf.getWeight(v).first; w != -1; w = nxt[w]) {
ind[w] = ret.size(); ret.push_back(A[w]);
}
return ret;
}
LineTree(int V, vector<Edge> edges, Cmp cmp = Cmp())
: root(V), ind(V), weights(), FHS(init(V, move(edges), cmp)) {}
bool connected(int v, int w) { return root[v] == root[w]; }
T query(int v, int w) {
assert(v != w); if (ind[v] > ind[w]) swap(v, w);
return weights[FHS.query(ind[v], ind[w] - 1)];
}
};
| {
"repo_name": "wesley-a-leung/Resources",
"stars": "34",
"repo_language": "C++",
"file_name": "SkewHeapIncremental.h",
"mime_type": "text/x-c++"
} |
#pragma once
#include <bits/stdc++.h>
#include "../../datastructures/BitPrefixSumArray.h"
#include "HeavyLightDecomposition.h"
using namespace std;
// Wavelet Matrix supporting 2D aggregation path queries using heavy light
// decomposition on a forest where the data can change, but not the keys
// Vertices are 0-indexed
// Template Arguments:
// T: the type of the element of the array
// R: struct supporting point updates and prefix queries on indices
// Required Fields:
// Data: the data type
// Lazy: the lazy type
// Required Functions:
// static qdef(): returns the query default value
// static merge(l, r): returns the values l of type Data merged with
// r of type Data, must be associative and commutative
// constructor(A): takes a vector A of type Data with the initial
// value of each index
// update(i, val): updates the index i with the value val
// query(l, r): queries the range [l, r]
// Sample Struct: supporting point increments updates and
// prefix sum queries
// struct R {
// using Data = int;
// using Lazy = int;
// static Data qdef() { return 0; }
// static Data merge(const Data &l, const Data &r) { return l + r; }
// FenwickTree1D<Data> FT;
// R(const vector<Data> &A) : FT(A) {}
// void update(int i, const Lazy &val) { FT.update(i, val); }
// Data query(int l, int r) { return FT.query(l, r); }
// };
// VALUES_ON_EDGES: boolean indicating whether the values are on the edges
// (the largest depth vertex of the edge) or the vertices
// Cmp: the comparator to compare two elements
// Required Functions:
// operator (a, b): returns true if and only if a compares less than b
// Constructor Arguments:
// G: a generic forest data structure
// Required Functions:
// operator [v] const: iterates over the adjacency list of vertex v
// (which is a list of ints)
// size() const: returns the number of vertices in the forest
// A: a vector of type T of the values in the array
// X: a vector of type R::Data containing the initial data of the array
// rt: a single root vertex
// roots: a vector of root vertices for each connected component
// cmp: an instance of the Cmp struct
// Functions:
// update(v, val): updates the vertex v with the lazy value v
// query(v, w, hi): returns the aggregate value of the data associated with
// all elements with a key less than or equal to hi (using the comparator)
// on the path from v to w
// query(v, w, lo, hi): returns the aggregate value of the data associated
// with all elements with a key of not less than lo and not greater than hi
// (using the comparator) on the path from v to w
// bsearch(v, w, f): over all keys in the array, finds the first key k such
// that query(v, w, k) returns true
// In practice, has a moderate constant
// Time Complexity:
// constructor: O((V + C) log V) where C is the time complexity of
// R's constructor
// update: O(U log V) where U is the time complexity of R's update function
// query, bsearch: O(Q (log V)^2) where Q is the time complexity of
// R's query function
// Memory Complexity: O(V + (V log V) / 64 + M log V) where M is the memory
// complexity of R
// Tested:
// Fuzz and Stress Tested
template <class T, class R, const bool VALUES_ON_EDGES, class Cmp = less<T>>
struct WaveletMatrixHLD : public HLD {
#define clt [&] (const T &a, const T &b) { return cmp(a, b); }
#define cle [&] (const T &a, const T &b) { return !cmp(b, a); }
using Data = typename R::Data; using Lazy = typename R::Lazy;
int H; vector<int> mid; vector<T> S; vector<BitPrefixSumArray> B;
vector<R> D; Cmp cmp;
template <class Forest>
WaveletMatrixHLD(const Forest &G, const vector<T> &A, const vector<Data> &X,
const vector<int> &roots = vector<int>(), Cmp cmp = Cmp())
: HLD(G, roots), H(V == 0 ? 0 : __lg(V) + 1), mid(H), S(A),
B(H, BitPrefixSumArray(V)), cmp(cmp) {
sort(S.begin(), S.end(), cmp); vector<int> temp(V), C(V), P(V);
vector<Data> Y = X, Z = X; D.reserve(H); for (int v = 0; v < V; v++)
temp[v] = lower_bound(S.begin(), S.end(), A[v], cmp) - S.begin();
for (int v = 0; v < V; v++) { C[pre[v]] = temp[v]; Y[pre[v]] = X[v]; }
iota(P.begin(), P.end(), 0); for (int h = H - 1; h >= 0; h--) {
int ph = 1 << h;
for (int i = 0; i < V; i++) B[h].set(i, C[P[i]] <= ph - 1);
mid[h] = stable_partition(P.begin(), P.end(), [&] (int i) {
return C[i] <= ph - 1;
}) - P.begin();
B[h].build(); for (int i = 0; i < V; i++) Z[i] = Y[P[i]];
D.emplace_back(Z); for (int i = mid[h]; i < V; i++) C[P[i]] -= ph;
}
reverse(D.begin(), D.end());
}
template <class Forest>
WaveletMatrixHLD(const Forest &G, const vector<T> &A,
const vector<Data> &X, int rt, Cmp cmp = Cmp())
: WaveletMatrixHLD(G, A, X, vector<int>{rt}, cmp) {}
using Ranges = vector<tuple<int, int, int, int>>;
Ranges getRanges(int v, int w) {
Ranges ranges; while (head[v] != head[w]) {
if (dep[head[v]] < dep[head[w]]) {
ranges.emplace_back(pre[head[w]], pre[w], 0, 0); w = par[head[w]];
} else {
ranges.emplace_back(pre[head[v]], pre[v], 0, 0); v = par[head[v]];
}
}
if (v != w) {
if (dep[v] < dep[w])
ranges.emplace_back(pre[v] + VALUES_ON_EDGES, pre[w], 0, 0);
else ranges.emplace_back(pre[w] + VALUES_ON_EDGES, pre[v], 0, 0);
} else if (!VALUES_ON_EDGES) {
int i = pre[dep[v] < dep[w] ? v : w]; ranges.emplace_back(i, i, 0, 0);
}
return ranges;
}
void update(int v, const Lazy &val) {
for (int i = pre[v], h = H - 1; h >= 0; h--) {
if (B[h].get(i)) i = B[h].query(i - 1);
else i += mid[h] - B[h].query(i - 1);
D[h].update(i, val);
}
}
void qryRanges(int h, Ranges &ranges) {
for (auto &&r : ranges) {
get<2>(r) = B[h].query(get<0>(r) - 1); get<3>(r) = B[h].query(get<1>(r));
}
}
Data qryAggLeft(int h, const Ranges &ranges) {
Data ret = R::qdef(); for (auto &&r : ranges) {
int a = get<2>(r), b = get<3>(r) - 1;
if (a <= b) ret = R::merge(ret, D[h].query(a, b));
}
return ret;
}
Data qryAggRight(int h, const Ranges &ranges) {
Data ret = R::qdef(); for (auto &&r : ranges) {
int a = get<0>(r) + mid[h] - get<2>(r);
int b = get<1>(r) + mid[h] - get<3>(r);
if (a <= b) ret = R::merge(ret, D[h].query(a, b));
}
return ret;
}
void left(Ranges &ranges) {
for (auto &&r : ranges) {
get<0>(r) = get<2>(r); get<1>(r) = get<3>(r) - 1;
}
}
void right(int h, Ranges &ranges) {
for (auto &&r : ranges) {
get<0>(r) += mid[h] - get<2>(r); get<1>(r) += mid[h] - get<3>(r);
}
}
template <class F>
Data qryPre(int h, int cur, Ranges &ranges, const T &x, F f) {
Data ret = R::qdef(); for (; h >= 0; h--) {
int ph = 1 << h; qryRanges(h, ranges);
if (cur + ph - 1 >= V || f(x, S[cur + ph - 1])) left(ranges);
else {
ret = R::merge(ret, qryAggLeft(h, ranges));
cur += ph; right(h, ranges);
}
}
return ret;
}
template <class F>
Data qrySuf(int h, int cur, Ranges &ranges, const T &v, F f) {
Data ret = R::qdef(); for (; h >= 0; h--) {
int ph = 1 << h; qryRanges(h, ranges);
if (cur + ph - 1 >= V || f(v, S[cur + ph - 1])) {
ret = R::merge(ret, qryAggRight(h, ranges));
if (h == 0) ret = R::merge(ret, qryAggLeft(h, ranges));
left(ranges);
} else {
if (h == 0) ret = R::merge(ret, qryAggRight(h, ranges));
cur += ph; right(h, ranges);
}
}
return ret;
}
Data query(int v, int w, const T &hi) {
Ranges ranges = getRanges(v, w); return qryPre(H - 1, 0, ranges, hi, clt);
}
Data query(int v, int w, const T &lo, const T &hi) {
Ranges ranges = getRanges(v, w);
for (int cur = 0, h = H - 1; h >= 0; h--) {
int ph = 1 << h; qryRanges(h, ranges);
bool loLeft = cur + ph - 1 >= V || !cmp(S[cur + ph - 1], lo);
bool hiLeft = cur + ph - 1 >= V || cmp(hi, S[cur + ph - 1]);
if (loLeft != hiLeft) {
Ranges leftRanges = ranges, rightRanges = ranges;
left(leftRanges); right(h, rightRanges);
Data ret = R::merge(qrySuf(h - 1, cur, leftRanges, lo, cle),
qryPre(h - 1, cur + ph, rightRanges, hi, clt));
return h == 0 ? R::merge(ret, qryAggLeft(h, ranges)) : ret;
} else if (loLeft) left(ranges);
else { cur += ph; right(h, ranges); }
}
return R::qdef();
}
template <class F> pair<bool, T *> bsearch(int v, int w, F f) {
int cur = 0; Data agg = R::qdef(); Ranges ranges = getRanges(v, w);
for (int h = H - 1; h >= 0; h--) {
qryRanges(h, ranges); Data val = qryAggLeft(h, ranges);
if (f(R::merge(agg, val))) left(ranges);
else { cur += 1 << h; agg = R::merge(agg, val); right(h, ranges); }
}
return make_pair(cur < V, cur < V ? &S[cur] : nullptr);
}
#undef clt
#undef cle
};
| {
"repo_name": "wesley-a-leung/Resources",
"stars": "34",
"repo_language": "C++",
"file_name": "SkewHeapIncremental.h",
"mime_type": "text/x-c++"
} |
#pragma once
#include <bits/stdc++.h>
#include "LowestCommonAncestor.h"
#include "SubtreeQueries.h"
using namespace std;
// Supports online queries for the number of distinct elements in a
// subtree for a static forest G with V vertices
// Vertices are 0-indexed
// Template Arguments:
// T: the type of each element
// Constructor Arguments:
// G: a generic forest data structure
// Required Functions:
// operator [v] const: iterates over the adjacency list of vertex v
// (which is a list of ints)
// size() const: returns the number of vertices in the forest
// A: a vector of type T of the values of each vertex
// rt: a single root vertex
// roots: a vector of root vertices for each connected component
// Functions:
// query(v): returns the number of distinct elements in the subtree of
// vertex v
// In practice, has a moderate constant, slightly faster than SmallToLargeTree
// Time Complexity:
// constructor: O(V log V)
// query: O(1)
// Memory Complexity: O(V)
// Tested:
// https://cses.fi/problemset/task/1139/
template <class T> struct CountDistinctSubtree {
struct R {
using Data = int; using Lazy = int; vector<Data> A;
static Data qdef() { return 0; }
R(vector<Data> A) : A(move(A)) {}
void update(int l, int, const Lazy &val) { A[l] += val; }
Data query(int l, int r) { return A[r] - (l == 0 ? 0 : A[l - 1]); }
};
LCA<> lca; SubtreeQueries<R, false> sbtr;
int query(int v) { return sbtr.querySubtree(v); }
template <class Forest>
CountDistinctSubtree(const Forest &G, const vector<T> &A,
const vector<int> &roots = vector<int>())
: lca(G, roots), sbtr(G, vector<int>(A.size(), 0), roots) {
int V = G.size(); vector<T> temp = A; sort(temp.begin(), temp.end());
temp.erase(unique(temp.begin(), temp.end()), temp.end());
vector<int> C(V), st(temp.size() + 1, 0), ind(V);
for (int v = 0; v < V; v++)
st[C[v] = lower_bound(temp.begin(), temp.end(), A[v]) - temp.begin()]++;
partial_sum(st.begin(), st.end(), st.begin());
for (int v : sbtr.vert) ind[--st[C[v]]] = v;
for (int i = 0; i < int(temp.size()); i++)
for (int j = st[i]; j < st[i + 1]; j++) {
sbtr.updateVertex(ind[j], 1); if (j + 1 < st[i + 1])
sbtr.updateVertex(lca.lca(ind[j], ind[j + 1]), -1);
}
partial_sum(sbtr.ops.A.begin(), sbtr.ops.A.end(), sbtr.ops.A.begin());
}
template <class Forest>
CountDistinctSubtree(const Forest &G, const vector<T> &A, int rt)
: CountDistinctSubtree(G, A, vector<int>{rt}) {}
};
| {
"repo_name": "wesley-a-leung/Resources",
"stars": "34",
"repo_language": "C++",
"file_name": "SkewHeapIncremental.h",
"mime_type": "text/x-c++"
} |
#pragma once
#include <bits/stdc++.h>
#include "../../datastructures/trees/fenwicktrees/FenwickTree1D.h"
#include "LowestCommonAncestor.h"
#include "SubtreeQueries.h"
using namespace std;
// Supports online queries for the number of distinct elements in a
// subtree for a forest G with V vertices with vertex updates
// Vertices are 0-indexed
// Template Arguments:
// T: the type of each element
// C: a container representing a mapping from type T to a set of integers
// Constructor Arguments:
// G: a generic forest data structure
// Required Functions:
// operator [v] const: iterates over the adjacency list of vertex v
// (which is a list of ints)
// size() const: returns the number of vertices in the forest
// A: a vector of type T of the intial values of each vertex
// rt: a single root vertex
// roots: a vector of root vertices for each connected component
// ...args: arguments to pass to the constructor of an instance of type C
// Functions:
// update(v, val): update vertex v with the value val
// query(v): returns the number of distinct elements in the subtree of
// vertex v
// In practice, has a moderate constant
// Time Complexity:
// constructor: O(V log V)
// update, query: O(log V)
// Memory Complexity: O(V)
// Tested:
// https://dmoj.ca/problem/dmopc20c1p5
template <class T, class C = map<T, set<int>>>
struct CountDistinctSubtreeUpdates {
struct R {
using Data = int; using Lazy = int; FenwickTree1D<Data> FT;
static Data qdef() { return 0; }
R(vector<Data> A) : FT(move(A)) {}
void update(int l, int, const Lazy &val) { FT.update(l, val); }
Data query(int l, int r) { return FT.query(l, r); }
};
vector<T> A; C M; LCA<> lca; SubtreeQueries<R, false> sbtr;
void upd(int v, const T &val, int delta) {
int u = -1, w = -1; set<int> &mv = M[val];
auto it = delta == 1 ? mv.upper_bound(sbtr.pre[v])
: mv.erase(mv.find(sbtr.pre[v]));
if (it != mv.begin())
sbtr.updateVertex(lca.lca(u = sbtr.vert[*prev(it)], v), -delta);
if (it != mv.end())
sbtr.updateVertex(lca.lca(v, w = sbtr.vert[*it]), -delta);
if (u != -1 && w != -1) sbtr.updateVertex(lca.lca(u, w), delta);
sbtr.updateVertex(v, delta); if (delta == 1) mv.insert(it, sbtr.pre[v]);
}
void update(int v, const T &val) { upd(v, A[v], -1); upd(v, A[v] = val, 1); }
int query(int v) { return sbtr.querySubtree(v); }
vector<int> init(int V) {
vector<int> ret; ret.reserve(V + 1); ret.resize(V, 0); return ret;
}
template <class Forest, class ...Args> CountDistinctSubtreeUpdates(
const Forest &G, const vector<T> &A,
const vector<int> &roots = vector<int>(), Args &&...args)
: A(A), M(forward<Args>(args)...),
lca(G, roots), sbtr(G, init(A.size()), roots) {
for (int v = 0; v < int(G.size()); v++) upd(v, A[v], 1);
}
template <class Forest, class ...Args>
CountDistinctSubtreeUpdates(const Forest &G, const vector<T> &A, int rt,
Args &&...args)
: CountDistinctSubtreeUpdates(G, A, vector<int>{rt},
forward<Args>(args)...) {}
};
| {
"repo_name": "wesley-a-leung/Resources",
"stars": "34",
"repo_language": "C++",
"file_name": "SkewHeapIncremental.h",
"mime_type": "text/x-c++"
} |
#pragma once
#include <bits/stdc++.h>
#include "../../datastructures/BitPrefixSumArray.h"
#include "LowestCommonAncestor.h"
using namespace std;
// Wavelet Matrix supporting rank and select operations for paths on a forest
// Vertices are 0-indexed
// Template Arguments:
// T: the type of the element of the array
// VALUES_ON_EDGES: boolean indicating whether the values are on the edges
// (the largest depth vertex of the edge) or the vertices
// Cmp: the comparator to compare two elements
// Required Functions:
// operator (a, b): returns true if and only if a compares less than b
// Constructor Arguments:
// G: a generic forest data structure
// Required Functions:
// operator [v] const: iterates over the adjacency list of vertex v
// (which is a list of ints)
// size() const: returns the number of vertices in the forest
// A: a vector of type T of the values in the array
// rt: a single root vertex
// roots: a vector of root vertices for each connected component
// cmp: an instance of the Cmp struct
// Functions:
// rank(v, w, x): returns the number of values less than k (using the
// comparator) on the path from v to w
// count(v, w, lo, hi) returns the number of values not less than lo and
// not greater than hi (using the comparator) on the path from v to w
// select(v, w, k): returns the kth element (0-indexed) sorted by the
// comparator if the values on the the path from v to w were sorted
// In practice, has a moderate constant
// Time Complexity:
// constructor: O(V log V)
// rank, count, select: O(log V)
// Memory Complexity: O(V + (V log V) / 64)
// Tested:
// https://mcpt.ca/problem/hld2 (rank/count)
// https://www.acmicpc.net/problem/11932 (select)
template <class T, const bool VALUES_ON_EDGES, class Cmp = less<T>>
struct WaveletMatrixTree {
#define clt [&] (const T &a, const T &b) { return cmp(a, b); }
#define cle [&] (const T &a, const T &b) { return !cmp(b, a); }
int V, H, preInd, postInd; vector<int> par, pre, post, last, mid1, mid2;
vector<T> S; LCA<> lca; vector<BitPrefixSumArray> B1, B2; Cmp cmp;
template <class Forest> void dfs(const Forest &G, int v, int prev) {
par[v] = prev; last[pre[v] = preInd++] = postInd - 1;
for (int w : G[v]) if (w != prev) dfs(G, w, v);
post[v] = postInd++;
}
void build(const vector<int> &A, const vector<int> &ind, vector<int> &mid,
vector<BitPrefixSumArray> &B) {
vector<int> C(V); for (int v = 0; v < V; v++) C[ind[v]] = A[v];
for (int h = H - 1; h >= 0; h--) {
int ph = 1 << h; for (int i = 0; i < V; i++) B[h].set(i, C[i] <= ph - 1);
mid[h] = stable_partition(C.begin(), C.end(), [&] (int v) {
return v <= ph - 1;
}) - C.begin();
B[h].build(); for (int i = mid[h]; i < V; i++) C[i] -= ph;
}
}
template <class Forest>
WaveletMatrixTree(const Forest &G, const vector<T> &A,
const vector<int> &roots = vector<int>(), Cmp cmp = Cmp())
: V(A.size()), H(V == 0 ? 0 : __lg(V) + 1), preInd(0), postInd(0),
par(V, -1), pre(V), post(V), last(V), mid1(H), mid2(H),
S(A), lca(G, roots), B1(H, BitPrefixSumArray(V)), B2(B1), cmp(cmp) {
sort(S.begin(), S.end(), cmp); if (roots.empty()) {
for (int v = 0; v < V; v++) if (par[v] == -1) dfs(G, v, -1);
} else for (int v : roots) dfs(G, v, -1);
vector<int> C(V); for (int v = 0; v < V; v++)
C[v] = lower_bound(S.begin(), S.end(), A[v], cmp) - S.begin();
build(C, pre, mid1, B1); build(C, post, mid2, B2);
}
template <class Forest>
WaveletMatrixTree(const Forest &G, const vector<T> &A, int rt,
Cmp cmp = Cmp())
: WaveletMatrixTree(G, A, vector<int>{rt}, cmp) {}
bool connected(int v, int w) { return lca.connected(v, w); }
using Ranges = vector<tuple<int, int, int, int, int>>;
Ranges getRanges(int v, int w) {
int u = lca.lca(v, w), rt = lca.root[v]; Ranges ranges; ranges.reserve(5);
int t = VALUES_ON_EDGES ? 1 : (par[u] != -1 ? 2 : 0);
if (t && pre[rt] - 1 > 0)
ranges.emplace_back(pre[rt] - 1, last[pre[rt] - 1], -1, 0, 0);
ranges.emplace_back(pre[v], last[pre[v]], 1, 0, 0);
ranges.emplace_back(pre[w], last[pre[w]], 1, 0, 0);
ranges.emplace_back(pre[u], last[pre[u]], t == 1 ? -2 : -1, 0, 0);
if (t == 2) ranges.emplace_back(pre[par[u]], last[pre[par[u]]], -1, 0, 0);
return ranges;
}
int query(int h, Ranges &ranges) {
int val = 0; for (auto &&r : ranges) {
get<3>(r) = B1[h].query(get<0>(r)); get<4>(r) = B2[h].query(get<1>(r));
val += get<2>(r) * (get<3>(r) - get<4>(r));
}
return val;
}
void left(Ranges &ranges) {
for (auto &&r : ranges) {
get<0>(r) = get<3>(r) - 1; get<1>(r) = get<4>(r) - 1;
}
}
void right(int h, Ranges &ranges) {
for (auto &&r : ranges) {
get<0>(r) += mid1[h] - get<3>(r); get<1>(r) += mid2[h] - get<4>(r);
}
}
template <class F> int cnt(int v, int w, const T &x, F f) {
Ranges ranges = getRanges(v, w);
int ret = 0; for (int cur = 0, h = H - 1; h >= 0; h--) {
int ph = 1 << h, val = query(h, ranges);
if (cur + ph - 1 >= V || f(x, S[cur + ph - 1])) left(ranges);
else { cur += ph; ret += val; right(h, ranges); }
}
return ret;
}
int rank(int v, int w, const T &x) { return cnt(v, w, x, cle); }
int count(int v, int w, const T &lo, const T &hi) {
return cnt(v, w, hi, clt) - cnt(v, w, lo, cle);
}
T select(int v, int w, int k) {
Ranges ranges = getRanges(v, w);
int cur = 0; for (int h = H - 1; h >= 0; h--) {
int val = query(h, ranges); if (k < val) left(ranges);
else { cur += 1 << h; k -= val; right(h, ranges); }
}
return S[cur];
}
#undef clt
#undef cle
};
| {
"repo_name": "wesley-a-leung/Resources",
"stars": "34",
"repo_language": "C++",
"file_name": "SkewHeapIncremental.h",
"mime_type": "text/x-c++"
} |
#pragma once
#include <bits/stdc++.h>
using namespace std;
// Supports queries on graphs that involve the set of vertices in a subtree
// Template Arguments:
// S: struct to maintain a multiset of the elements in a set
// Required Fields:
// T: the type of each element
// R: the type of the return value for each query
// Q: the query object that contains information for each query
// Required Fields:
// v: the vertex of the subtree to query
// Required Functions:
// constructor(A): takes a vector A of type T equal to the static array
// representing the values for each vertex of the graph
// add(v): adds the value v to the multiset
// remove(v): removes the value v from the multiset
// Sample Struct: supporting queries for whether a value c exists in
// the subtree of vertex v
// struct S {
// using T = int; using R = bool;
// struct Q { int v, c; };
// vector<T> cnt;
// S(const vector<T> &A)
// : cnt(*max_element(A.begin(), A.end()) + 1, 0) {}
// void add(const T &v) { cnt[v]++; }
// void remove(const T &v) { --cnt[v]; }
// R query(const Q &q) const {
// return 0 <= q.c && q.c < int(cnt.size()) && cnt[q.c] > 0;
// }
// };
// Constructor Arguments:
// G: a generic forest data structure
// Required Functions:
// operator [v] const: iterates over the adjacency list of vertex v
// (which is a list of ints)
// size() const: returns the number of vertices in the forest
// A: a vector of type S::T of the values in the array
// queries: a vector of type S::Q representing the queries
// rt: a single root vertex
// roots: a vector of root vertices for each connected component
// f: a function from S::T to any arithmetic type, f(A[v]) needs to return
// the weight of the vertex v with value A[v]
// Fields:
// ans: a vector of integers with the answer for each query
// In practice, has a moderate constant
// Time Complexity:
// constructor: O(C + U V log V + K T)
// for K queries where C is the time complexity of S's constructor,
// U is the time complexity of S.add and S.remove,
// and T is the time complexity of S.query
// Memory Complexity: O(V + K) for K queries
// Tested:
// https://cses.fi/problemset/task/1139/
// https://codeforces.com/contest/600/problem/E
// https://codeforces.com/contest/375/problem/D
template <class S> struct SmallToLargeTree {
using T = typename S::T; using R = typename S::R; using Q = typename S::Q;
vector<R> ans;
template <class Forest, class F = function<int(T)>> SmallToLargeTree(
const Forest &G, const vector<T> &A, const vector<Q> &queries,
const vector<int> &roots = vector<int>(), F f = [] (T) { return 1; } ) {
using weight_t = decltype(f(T())); int V = G.size(), K = queries.size();
vector<int> st(V + 1, 0), ind(K); S s(A);
vector<weight_t> size(V, weight_t());
function<void(int, int)> getSize = [&] (int v, int prev) {
size[v] = f(A[v]);
for (int w : G[v]) if (w != prev) { getSize(w, v); size[v] += size[w]; }
};
function<void(int, int, int, bool)> update = [&] (
int v, int prev, int heavy, bool add) {
if (add) s.add(A[v]);
else s.remove(A[v]);
for (int w : G[v]) if (w != prev && w != heavy) update(w, v, heavy, add);
};
function<void(int, int, bool)> dfs = [&] (int v, int prev, bool keep) {
int heavy = -1; for (int w : G[v])
if (w != prev && (heavy == -1 || size[heavy] < size[w])) heavy = w;
for (int w : G[v]) if (w != prev && w != heavy) dfs(w, v, 0);
if (heavy != -1) dfs(heavy, v, 1);
update(v, prev, heavy, 1); for (int i = st[v]; i < st[v + 1]; i++) {
R res = s.query(queries[ind[i]]); if (ans.empty()) ans.resize(K, res);
ans[ind[i]] = res;
}
if (!keep) update(v, prev, -1, 0);
};
for (int i = 0; i < K; i++) st[queries[i].v]++;
partial_sum(st.begin(), st.end(), st.begin());
for (int i = 0; i < K; i++) ind[--st[queries[i].v]] = i;
if (roots.empty()) {
for (int v = 0; v < V; v++)
if (size[v] == weight_t()) { getSize(v, -1); dfs(v, -1, 0); }
} else for (int v : roots) { getSize(v, -1); dfs(v, -1, 0); }
}
template <class Forest, class F = function<int(T)>> SmallToLargeTree(
const Forest &G, const vector<T> &A, const vector<Q> &queries, int rt,
F f = [] (T) { return 1; })
: SmallToLargeTree(G, A, queries, vector<int>{rt}, f) {}
};
| {
"repo_name": "wesley-a-leung/Resources",
"stars": "34",
"repo_language": "C++",
"file_name": "SkewHeapIncremental.h",
"mime_type": "text/x-c++"
} |
#pragma once
#include <bits/stdc++.h>
using namespace std;
// Finds the centroid of each component of a tree,
// and recursively splits the component at that vertex
// Can be used to create a centroid tree, which has depth O(log V)
// Function Arguments:
// G: a generic forest structure
// Required Functions:
// operator [v] const: iterates over the adjacency list of vertex v
// (which is a list of ints)
// size() const: returns the number of vertices in the forest
// f(G, excl, c, p): a function to call on each centroid of its
// component where G is a reference to the graph, excl is a reference to
// an array of bools that indicates whether a vertex has been previously
// used as a centroid, c is the current centroid (excl[c] is false),
// and p is its parent in the centorid tree
// Return Value: a vector of integers representing the parent of each vertex
// in the centroid tree, or -1 if it is a root
// In practice, has a moderate constant
// Time Complexity: O(V log V)
// Memory Complexity: O(V)
// Tested:
// https://codeforces.com/contest/321/problem/C
// https://codeforces.com/contest/161/problem/D
template <class Forest, class F>
vector<int> centroidDecomposition(const Forest &G, F f) {
int V = G.size(); vector<int> size(V), par(V, -1);
vector<bool> excl(V, false);
function<int(int, int)> getSize = [&] (int v, int prev) {
size[v] = 1;
for (int w : G[v]) if (w != prev && !excl[w]) size[v] += getSize(w, v);
return size[v];
};
function<int(int, int, int)> dfs = [&] (int v, int prev, int compSize) {
for (int w : G[v]) if (w != prev && !excl[w] && size[w] > compSize / 2)
return dfs(w, v, compSize);
return v;
};
vector<pair<int, int>> q(V); for (int s = 0; s < V; s++) if (par[s] == -1) {
int front = 0, back = 0; q[back++] = make_pair(s, -1);
while (front < back) {
int v = q[front].first, c = dfs(v, -1, getSize(v, -1));
par[c] = q[front++].second; f(G, excl, c, par[c]); excl[c] = true;
for (int w : G[c]) if (!excl[w]) q[back++] = make_pair(w, c);
}
}
return par;
}
| {
"repo_name": "wesley-a-leung/Resources",
"stars": "34",
"repo_language": "C++",
"file_name": "SkewHeapIncremental.h",
"mime_type": "text/x-c++"
} |
#pragma once
#include <bits/stdc++.h>
#include <ext/pb_ds/priority_queue.hpp>
using namespace std;
using namespace __gnu_pbds;
// Computes the global minimum cut for a weighted graph
// A cut is a partition of the vertices into two nonempty subsets
// A crossing edge is an edge with endpoints in both subsets
// The cost of a cut is the sum of the weights of the crossing edges
// Vertices are 0-indexed
// Template Arguments:
// T: the type of the weight of the edges in the graph
// Constructor Arguments:
// V: the number of vertices in the graph
// INF: a value for infinity
// Fields:
// cut: vector of booleans representing which side of the cut each vertex
// is on
// cutWeight: the weight of the global minimum cut
// Functions:
// addEdge(v, w, weight): adds an undirected edge between vertices v
// and w, with a weight of weight
// globalMinCut(): returns the global minimum cut of the current graph
// and updates the cut array
// In practice, has a very small constant
// Time Complexity:
// constructor: O(V)
// addEdge: O(1)
// globalMinCut: O(V (V + E) log V)
// Memory Complexity: O(V^2 + E)
// Tested:
// https://dmoj.ca/problem/checkercut
// https://hackerrank.com/contests/w37/challenges/two-efficient-teams/problem
template <class T> struct StoerWagnerGlobalMinCut {
struct Edge {
int to, rev; T weight;
Edge(int to, int rev, T weight) : to(to), rev(rev), weight(weight) {}
};
struct Node {
T w; int v; Node(T w, int v) : w(w), v(v) {}
bool operator < (const Node &o) const { return w < o.w; }
};
int V; vector<vector<Edge>> G; vector<bool> cut; T cutWeight, INF;
void addEdge(int v, int w, T weight) {
if (v == w) return;
G[v].emplace_back(w, int(G[w].size()), weight);
G[w].emplace_back(v, int(G[v].size()) - 1, weight);
}
StoerWagnerGlobalMinCut(int V, T INF = numeric_limits<T>::max())
: V(V), G(V), cut(V, false), cutWeight(INF), INF(INF) {}
T globalMinCut() {
vector<vector<Edge>> H = G; fill(cut.begin(), cut.end(), false);
cutWeight = INF; vector<int> par(V); iota(par.begin(), par.end(), 0);
for (int phase = V - 1; phase > 0; phase--) {
vector<T> W(V, T()); __gnu_pbds::priority_queue<Node> PQ;
vector<typename decltype(PQ)::point_iterator> ptr(V, PQ.end());
for (int v = 1; v < V; v++) if (par[v] == v)
ptr[v] = PQ.push(Node(W[v], v));
for (auto &&e : H[0]) if (ptr[e.to] != PQ.end())
PQ.modify(ptr[e.to], Node(W[e.to] += e.weight, e.to));
for (int i = 0, v, last = 0; i < phase; i++, last = v) {
T w = PQ.top().w; v = PQ.top().v; PQ.pop(); ptr[v] = PQ.end();
if (i == phase - 1) {
if (cutWeight > w) {
cutWeight = w; for (int x = 0; x < V; x++) cut[x] = par[x] == v;
}
fill(W.begin(), W.end(), T());
for (auto &&e : H[v]) W[e.to] += e.weight;
for (auto &&e : H[last]) {
e.weight += W[e.to]; H[e.to][e.rev].weight += W[e.to];
W[e.to] = T();
}
for (auto &&e : H[v]) if (W[e.to] != T()) {
H[e.to][e.rev].to = last; H[e.to][e.rev].rev = H[last].size();
H[last].emplace_back(e.to, e.rev, e.weight);
}
H[v].clear();
for (int x = 0; x < V; x++) if (par[x] == v) par[x] = last;
} else {
for (auto &&e : H[v]) if (ptr[e.to] != PQ.end())
PQ.modify(ptr[e.to], Node(W[e.to] += e.weight, e.to));
}
}
}
return cutWeight;
}
};
| {
"repo_name": "wesley-a-leung/Resources",
"stars": "34",
"repo_language": "C++",
"file_name": "SkewHeapIncremental.h",
"mime_type": "text/x-c++"
} |
#pragma once
#include <bits/stdc++.h>
using namespace std;
// Computes the global minimum cut for a weighted graph
// A cut is a partition of the vertices into two nonempty subsets
// A crossing edge is an edge with endpoints in both subsets
// The cost of a cut is the sum of the weights of the crossing edges
// Vertices are 0-indexed
// Template Arguments:
// T: the type of the weight of the edges in the graph
// Constructor Arguments:
// V: the number of vertices in the graph
// INF: a value for infinity
// Fields:
// cut: vector of booleans representing which side of the cut each vertex
// is on
// cutWeight: the weight of the global minimum cut
// Functions:
// addEdge(v, w, weight): adds an undirected edge between vertices v
// and w, with a weight of weight
// globalMinCut(): returns the global minimum cut of the current graph
// and updates the cut array
// In practice, has a very small constant
// Time Complexity:
// constructor: O(V^2)
// addEdge: O(1)
// globalMinCut: O(V^3)
// Memory Complexity: O(V^2)
// Tested:
// https://dmoj.ca/problem/checkercut
// https://hackerrank.com/contests/w37/challenges/two-efficient-teams/problem
template <class T> struct ClassicalStoerWagnerGlobalMinCut {
int V; vector<vector<T>> G; vector<bool> cut; T cutWeight, INF;
void addEdge(int v, int w, T weight) {
if (v == w) return;
G[v][w] += weight; G[w][v] += weight;
}
ClassicalStoerWagnerGlobalMinCut(int V, T INF = numeric_limits<T>::max())
: V(V), G(V, vector<T>(V, T())), cut(V, false),
cutWeight(INF), INF(INF) {}
T globalMinCut() {
vector<vector<T>> H = G; fill(cut.begin(), cut.end(), false);
cutWeight = INF; vector<int> par(V); iota(par.begin(), par.end(), 0);
for (int phase = V - 1; phase > 0; phase--) {
vector<T> W = H[0]; vector<bool> vis(V, true);
for (int v = 0; v < V; v++) vis[v] = par[v] != v;
for (int i = 0, v, last = 0; i < phase; i++, last = v) {
v = -1; for (int w = 1; w < V; w++)
if (!vis[w] && (v == -1 || W[v] < W[w])) v = w;
if (i == phase - 1) {
if (cutWeight > W[v]) {
cutWeight = W[v]; for (int w = 0; w < V; w++) cut[w] = par[w] == v;
}
for (int w = 0; w < V; w++) {
H[last][w] += H[v][w]; H[w][last] += H[v][w];
if (par[w] == v) par[w] = last;
}
} else {
vis[v] = true; for (int w = 0; w < V; w++) W[w] += H[v][w];
}
}
}
return cutWeight;
}
};
| {
"repo_name": "wesley-a-leung/Resources",
"stars": "34",
"repo_language": "C++",
"file_name": "SkewHeapIncremental.h",
"mime_type": "text/x-c++"
} |
#pragma once
#include <bits/stdc++.h>
using namespace std;
// Computes the strongly connected components of a directed graph using
// Tarjan's algorithm
// Vertices are 0-indexed
// Constructor Arguments:
// G: a generic directed graph structure which can be weighted or unweighted,
// though weights do not change the components
// Required Functions:
// operator [v] const: iterates over the adjacency list of vertex v
// (which is a list of ints for an unweighted graph, or a list of
// pair<int, T> for a weighted graph with weights of type T)
// size() const: returns the number of vertices in the graph
// condensationEdges: a reference to a vector of pairs that will store the
// edges in the condensation graph when all vertices in an scc are
// condensed into a single vertex (with an index equal to its scc id),
// and it is guaranteed that this list of edges is sorted by the first
// element in the pair in non decreasing order
// Fields:
// id: a vector of the index of the scc each vertex is part of
// components: a vector of vectors containing the vertices in each scc and
// is sorted in reverse topological order
// In practice, has a moderate constant, faster than Kosaraju
// Time Complexity:
// constructor: O(V + E)
// Memory Complexity: O(V)
// Tested:
// Stress Tested
// https://judge.yosupo.jp/problem/scc
// https://dmoj.ca/problem/acc2p2
// https://open.kattis.com/problems/watchyourstep
struct SCC {
int ind, top; vector<int> id, low, stk; vector<vector<int>> components;
int getTo(int e) { return e; }
template <class T> int getTo(const pair<int, T> &e) { return e.first; }
template <class Digraph> void dfs(const Digraph &G, int v) {
id[stk[top++] = v] = -1; int mn = low[v] = ind++; for (auto &&e : G[v]) {
int w = getTo(e); if (id[w] == -2) dfs(G, w);
mn = min(mn, low[w]);
}
if (mn < low[v]) { low[v] = mn; return; }
int w; components.emplace_back(); do {
id[w = stk[--top]] = components.size() - 1; low[w] = INT_MAX;
components.back().push_back(w);
} while (w != v);
}
template <class Digraph> SCC(const Digraph &G)
: ind(0), top(0), id(G.size(), -2), low(G.size()), stk(G.size()) {
for (int v = 0; v < int(G.size()); v++) if (id[v] == -2) dfs(G, v);
}
template <class Digraph>
SCC(const Digraph &G, vector<pair<int, int>> &condensationEdges) : SCC(G) {
vector<int> last(components.size(), -1);
for (auto &&comp : components) for (int v : comp) for (auto &&e : G[v]) {
int w = getTo(e); if (id[v] != id[w] && last[id[w]] != id[v])
condensationEdges.emplace_back(last[id[w]] = id[v], id[w]);
}
}
};
| {
"repo_name": "wesley-a-leung/Resources",
"stars": "34",
"repo_language": "C++",
"file_name": "SkewHeapIncremental.h",
"mime_type": "text/x-c++"
} |
#pragma once
#include <bits/stdc++.h>
#include "../../datastructures/unionfind/UnionFind.h"
using namespace std;
// Support online queries for the number of bridges in a graph, after edges
// have been added
// Constructor Arguments:
// V: the number of vertices in the graph
// Fields:
// bridgeCnt: the current number of bridges in the graph
// uf1: a UnionFind data structure representing the 1-edge connected
// components
// uf2: a UnionFind data structure representing the 2-edge connected
// components
// Functions:
// addEdge(v, w): adds an edge between vertices v and w
// Time Complexity:
// constructor: O(V)
// addEdge: O(log V) amortized
// Memory Complexity: O(V)
// Tested:
// https://codeforces.com/gym/100551/problem/B
struct IncrementalBridges {
UnionFind uf1, uf2; vector<int> par, mn, vis; int stamp, bridgeCnt;
IncrementalBridges(int V)
: uf1(V), uf2(V), par(V, -1), mn(V), vis(V, -1),
stamp(-1), bridgeCnt(0) {
iota(mn.begin(), mn.end(), 0);
}
void addEdge(int v, int w) {
if (uf2.connected(v, w)) return;
if (uf1.connected(v, w)) {
stamp++; int lca = -1; for (int x = v, y = w;; swap(x, y)) if (x != -1) {
if (vis[x = mn[uf2.find(x)]] == stamp) { lca = x; break; }
vis[x] = stamp; x = par[x];
}
for (int h = 0; h < 2; h++, swap(v, w))
for (v = mn[uf2.find(v)]; v != lca;) {
int p = mn[uf2.find(par[v])], pp = par[p]; uf2.join(v, p);
par[v = mn[uf2.find(v)] = p] = pp; bridgeCnt--;
}
} else {
if (uf1.getSize(v) < uf1.getSize(w)) swap(v, w);
for (int p = -1, last = v, x = w; x != -1; last = x, x = p) {
p = par[x = mn[uf2.find(x)]]; par[x] = last;
}
bridgeCnt++; uf1.join(v, w);
}
}
};
| {
"repo_name": "wesley-a-leung/Resources",
"stars": "34",
"repo_language": "C++",
"file_name": "SkewHeapIncremental.h",
"mime_type": "text/x-c++"
} |
#pragma once
#include <bits/stdc++.h>
using namespace std;
// Decomposes an undirected graph into 2-edge connected components and
// identifies bridges
// Vertices are 0-indexed
// Constructor Arguments:
// G: a generic undirected graph structure
// Required Functions:
// operator [v] const: iterates over the adjacency list of vertex v
// (which is a list of ints)
// size() const: returns the number of vertices in the graph
// Fields:
// id: a vector of the index of the 2-edge connected component each vertex
// is part of
// components: a vector of vectors containing the vertices in each 2-edge
// connected component
// bridges: a vector of pairs that stores the bridges in the graph
// In practice, has a moderate constant
// Time Complexity:
// constructor: O(V + E)
// Memory Complexity: O(V)
// Tested:
// Stress Tested
// https://judge.yosupo.jp/problem/two_edge_connected_components
struct Bridges {
int ind, top; vector<int> id, low, pre, stk; vector<vector<int>> components;
vector<pair<int, int>> bridges;
void makeComponent(int v) {
int w; components.emplace_back(); do {
id[w = stk[--top]] = components.size() - 1;
components.back().push_back(w);
} while (w != v);
}
template <class Graph> void dfs(const Graph &G, int v, int prev) {
id[stk[top++] = v] = -1; low[v] = pre[v] = ind++; bool parEdge = false;
for (int w : G[v]) {
if (w == prev && !parEdge) parEdge = true;
else if (id[w] == -2) {
dfs(G, w, v); low[v] = min(low[v], low[w]);
if (low[w] == pre[w]) { bridges.emplace_back(v, w); makeComponent(w); }
} else low[v] = min(low[v], pre[w]);
}
}
template <class Graph> Bridges(const Graph &G)
: ind(0), top(0), id(G.size(), -2), low(G.size()), pre(G.size()),
stk(G.size()) {
for (int v = 0; v < int(G.size()); v++) if (id[v] == -2) {
dfs(G, v, -1); if (top > 0) makeComponent(v);
}
}
};
| {
"repo_name": "wesley-a-leung/Resources",
"stars": "34",
"repo_language": "C++",
"file_name": "SkewHeapIncremental.h",
"mime_type": "text/x-c++"
} |
#pragma once
#include <bits/stdc++.h>
#include "../../datastructures/unionfind/UnionFindUndo.h"
#include "../../queries/LIFOSetDivAndConq.h"
using namespace std;
// Support offline queries on connected components, after edges have been
// added or removed, using divide and conquer
// Constructor Arguments:
// V: the number of vertices in the graph
// Fields:
// ans: a vector of integers with the answer for
// each query (1 is true, 0 is false for boolean queries)
// Functions:
// addEdge(v, w): adds an edge between vertices v and w
// removeEdge(v, w): removes an edge between vertices v and w, assuming
// an edge exists
// addConnectedQuery(v, w): adds a query asking whether v and w are in the
// same connected component
// addSizeQuery(v): adds a query asking for the number of vertices in the
// same connected component as vertex v
// addCntQuery(): adds a query asking for the number of connected components
// solveQueries(): solves all queries asked so far
// In practice, has a small constant, faster than DynamicConnectivityLCT and
// DynamicConnectivityLevelStructure
// Time Complexity:
// constructor: O(1)
// addEdge, removeEdge, addConnectedQuery, addSizeQuery, addCntQuery: O(1)
// solveQueries: O(V + Q log Q log V)
// Memory Complexity: O(V + Q) for Q edge additions/removals and queries
// Tested:
// https://codeforces.com/gym/100551/problem/A
// https://codeforces.com/gym/100551/problem/E
struct DynamicConnectivityDivAndConq {
struct S {
using T = pair<int, int>; using R = int;
struct Q { int type, v, w; };
UnionFindUndo uf;
S(int V) : uf(V) {}
void push(const T &e) { uf.join(e.first, e.second); }
void pop() { uf.undo(); }
R query(const Q &q) {
if (q.type == 1) return uf.connected(q.v, q.w);
else if (q.type == 2) return uf.getSize(q.v);
else return uf.cnt;
}
};
int V; LIFOSetDivAndConq<S> s; vector<int> &ans = s.ans;
DynamicConnectivityDivAndConq(int V) : V(V) {}
void addEdge(int v, int w) {
if (v > w) swap(v, w);
s.addElement(make_pair(v, w));
}
void removeEdge(int v, int w) {
if (v > w) swap(v, w);
s.removeElement(make_pair(v, w));
}
void addConnectedQuery(int v, int w) {
S::Q q; q.type = 1; q.v = v; q.w = w; s.addQuery(q);
}
void addSizeQuery(int v) {
S::Q q; q.type = 2; q.v = q.w = v; s.addQuery(q);
}
void addCntQuery() { S::Q q; q.type = 3; q.v = q.w = -1; s.addQuery(q); }
void solveQueries() { s.solveQueries(V); }
};
| {
"repo_name": "wesley-a-leung/Resources",
"stars": "34",
"repo_language": "C++",
"file_name": "SkewHeapIncremental.h",
"mime_type": "text/x-c++"
} |
#pragma once
#include <bits/stdc++.h>
#include "../dynamictrees/LinkCutTree.h"
using namespace std;
// Support offline queries on connected components, after edges have been
// added or removed, using a Link Cut Tree
// Constructor Arguments:
// V: the number of vertices in the graph
// Fields:
// ans: a vector of integers with the answer for
// each query (1 is true, 0 is false for boolean queries)
// Functions:
// addEdge(v, w): adds an edge between vertices v and w
// removeEdge(v, w): removes an edge between vertices v and w, assuming
// an edge exists
// addConnectedQuery(v, w): adds a query asking whether v and w are in the
// same connected component
// addCntQuery(): adds a query asking for the number of connected components
// solveQueries(): solves all queries asked so far
// In practice, has a moderate constant, slower than
// DynamicConnectivityDivAndConq, faster than
// DynamicConnectivityLevelStructure
// Time Complexity:
// constructor: O(1)
// addEdge, removeEdge, addConnectedQuery, addCntQuery: O(1)
// solveQueries: O(V + Q (log Q + log V))
// Memory Complexity: O(V + Q) for Q edge additions/removals and queries
// Tested:
// https://codeforces.com/gym/100551/problem/A
// https://codeforces.com/gym/100551/problem/E
struct DynamicConnectivityLCT {
struct Node {
using Data = pair<int, int>; using Lazy = Data;
static const bool RANGE_UPDATES = false, RANGE_QUERIES = true;
static const bool RANGE_REVERSALS = true, HAS_PAR = true;
bool rev; Node *l, *r, *p; Data val, sbtr;
Node(const Data &v)
: rev(false), l(nullptr), r(nullptr), p(nullptr), val(v), sbtr(v) {}
void update() {
sbtr = val;
if (l) { sbtr = min(l->sbtr, sbtr); }
if (r) { sbtr = min(sbtr, r->sbtr); }
}
void propagate() {
if (rev) {
swap(l, r); rev = false;
if (l) l->reverse();
if (r) r->reverse();
}
}
void reverse() { rev = !rev; }
static Data qdef() { return make_pair(INT_MAX, -1); }
};
int V; vector<tuple<int, int, int, int>> queries; vector<int> ans;
DynamicConnectivityLCT(int V) : V(V) {}
void addEdge(int v, int w) {
if (v > w) swap(v, w);
queries.emplace_back(0, v, w, -1);
}
void removeEdge(int v, int w) {
if (v > w) swap(v, w);
queries.emplace_back(1, v, w, -1);
}
void addConnectedQuery(int v, int w) {
queries.emplace_back(2, v, w, queries.size());
}
void addCntQuery() { queries.emplace_back(3, -1, -1, queries.size()); }
void solveQueries() {
vector<pair<int, int>> edges; int Q = queries.size(); edges.reserve(Q);
for (auto &&q : queries) if (get<0>(q) == 0)
edges.emplace_back(get<1>(q), get<2>(q));
sort(edges.begin(), edges.end()); vector<int> last(edges.size(), Q);
for (int i = 0; i < Q; i++) {
int t, v, w, _; tie(t, v, w, _) = queries[i]; if (t == 0) {
int j = lower_bound(edges.begin(), edges.end(), make_pair(v, w))
- edges.begin();
get<3>(queries[i]) = last[j]; last[j] = i;
} else if (t == 1) {
int j = lower_bound(edges.begin(), edges.end(), make_pair(v, w))
- edges.begin();
int temp = get<3>(queries[get<3>(queries[i]) = last[j]]);
get<3>(queries[last[j]]) = i; last[j] = temp;
}
}
vector<pair<int, int>> tmp(V + Q, make_pair(INT_MAX, -1));
for (int i = 0; i < Q; i++) tmp[V + i] = make_pair(get<3>(queries[i]), i);
LCT<Node> lct(tmp); ans.clear(); for (int i = 0, cnt = V; i < Q; i++) {
int t, v, w, o; tie(t, v, w, o) = queries[i]; if (t == 0) {
if (v == w) continue;
int z, j; tie(z, j) = lct.queryPath(v, w); if (j != -1) {
if (z >= o) continue;
lct.cut(get<1>(queries[j]), V + j);
lct.cut(get<2>(queries[j]), V + j); cnt++;
}
lct.link(v, V + i); lct.link(w, V + i); cnt--;
} else if (t == 1) {
if (v == w) continue;
if (lct.connected(v, V + o)) {
lct.cut(v, V + o); lct.cut(w, V + o); cnt++;
}
} else if (t == 2) ans.push_back(lct.connected(v, w));
else if (t == 3) ans.push_back(cnt);
}
}
};
| {
"repo_name": "wesley-a-leung/Resources",
"stars": "34",
"repo_language": "C++",
"file_name": "SkewHeapIncremental.h",
"mime_type": "text/x-c++"
} |
#pragma once
#include <bits/stdc++.h>
#include "../../datastructures/unionfind/UnionFind.h"
using namespace std;
// Computes the connected components of a graph using Union Find
// Vertices are 0-indexed
// Constructor Arguments:
// V: number of vertices in the graph
// Fields:
// id: a vector of the index of the component each vertex is part of
// components: a vector of vectors containing the vertices in each component
// uf: a UnionFind data structure representing the connected components
// Functions:
// addEdge: adds a bidirectional edge between vertices v and w
// assign: assigns each vertex to a connected component and populates the
// id and component vectors
// In practice, has a small constant, faster than bfs and dfs
// Time Complexity:
// constructor: O((V + E) alpha V)
// Memory Complexity: O(V)
// Tested:
// https://dmoj.ca/problem/ccc03s3
// https://codeforces.com/contest/1253/problem/D
struct CC {
int V; vector<int> id; vector<vector<int>> components; UnionFind uf;
CC(int V) : V(V), id(V), uf(V) {}
void addEdge(int v, int w) { uf.join(v, w); }
void assign() {
components.clear(); components.reserve(uf.cnt);
fill(id.begin(), id.end(), -1);
for (int v = 0; v < V; v++) if (uf.find(v) == v) {
id[v] = components.size(); components.emplace_back(1, v);
components.back().reserve(uf.getSize(v));
}
for (int v = 0; v < V; v++) if (id[v] == -1)
components[id[v] = id[uf.find(v)]].push_back(v);
}
};
| {
"repo_name": "wesley-a-leung/Resources",
"stars": "34",
"repo_language": "C++",
"file_name": "SkewHeapIncremental.h",
"mime_type": "text/x-c++"
} |
#pragma once
#include <bits/stdc++.h>
#include "../../datastructures/unionfind/UnionFind.h"
#include "../../datastructures/unionfind/PartiallyPersistentUnionFind.h"
using namespace std;
// Supports persistent queries on 2-edge connected component after
// edges have been added
// Constructor Arguments:
// V: number of vertices in the graph
// edges: a vector of pairs in the form (v, w) representing
// an undirected edge in the graph between vertices v and w in the order
// of the list
// Functions:
// find(t, v): inherited from PartiallyPersistentUnionFind, finds an
// arbitrary root of the 2-edge connected component containing vertex v,
// after the edge at index t in edges is added
// connected(t, v, w): inherited from PartiallyPersistentUnionFind,
// returns true if v and w are in the 2-edge connected component, after the
// edge at index t in edges is added, returns false otherwise
// getSize(t, v): inherited from PartiallyPersistentUnionFind, returns the
// size of the 2-edge connected component containing vertex v, after the
// edge at index t in edges is added
// getCnt(t): inherited from PartiallyPersistentUnionFind, returns the number
// of 2-edge connected components, after the edge at index t in
// edges is added
// getFirst(v, w): inherited from PartiallyPersistentUnionFind, returns
// the index of the first edge in edges that vertices v and w are in the
// same 2-edge connected component, -1 if v == w and curTime + 1 if they
// are never in the same 2-edge connected component
// getBridgeCnt(t): returns the number of bridges in the graph, after the
// edge at index t in edges is added
// In practice, constructor, find, connected, getSize, getCnt,
// getBridgeCnt have a small constant, getFirst has a very small constant
// Time Complexity:
// constructor: O(V alpha V + E log V)
// find, join, connected, getSize, getCnt: O(log V)
// getFirst: O(log V log E)
// getBridgeCnt: O(1)
// Memory Complexity: O(V + E)
// Tested:
// https://dmoj.ca/problem/mcco17d1p3
struct PartiallyPersistentIncrementalBridges
: public PartiallyPersistentUnionFind {
vector<int> bridgeCnt;
PartiallyPersistentIncrementalBridges(int V,
const vector<pair<int, int>> &edges)
: PartiallyPersistentUnionFind(V), bridgeCnt(edges.size()) {
int E = edges.size(); vector<bool> inForest(E);
vector<int> par(V, -1), dep(V), st(V + 1, 0), mn(V), to(E * 2);
function<void(int, int, int)> dfs = [&] (int v, int prev, int d) {
par[v] = prev; dep[v] = d; for (int e = st[v]; e < st[v + 1]; e++) {
int w = to[e]; if (w != prev) dfs(w, v, d + 1);
}
};
UnionFind forest(V); for (int i = 0; i < E; i++)
inForest[i] = forest.join(edges[i].first, edges[i].second);
iota(mn.begin(), mn.end(), 0); for (int i = 0; i < E; i++)
if (inForest[i]) { st[edges[i].first]++; st[edges[i].second]++; }
partial_sum(st.begin(), st.end(), st.begin());
for (int i = 0; i < E; i++) if (inForest[i]) {
int v, w; tie(v, w) = edges[i]; to[--st[v]] = w; to[--st[w]] = v;
}
for (int v = 0; v < V; v++) if (par[v] == -1) dfs(v, -1, 0);
for (int i = 0, curBridgeCnt = 0; i < E; i++) {
if (inForest[i]) curBridgeCnt++;
else {
int v, w; tie(v, w) = edges[i];
for (v = mn[find(i, v)], w = mn[find(i, w)]; v != w;) {
if (dep[v] < dep[w]) swap(v, w);
int p = mn[find(i, par[v])]; join(v, p); v = mn[find(i, v)] = p;
curTime--; curBridgeCnt--;
}
}
bridgeCnt[i] = curBridgeCnt; curTime++;
}
}
int getBridgeCnt(int t) { return bridgeCnt[t]; }
};
| {
"repo_name": "wesley-a-leung/Resources",
"stars": "34",
"repo_language": "C++",
"file_name": "SkewHeapIncremental.h",
"mime_type": "text/x-c++"
} |
#pragma once
#include <bits/stdc++.h>
#include "../dynamictrees/LinkCutTree.h"
using namespace std;
// Support offline queries for the number of bridges in a graph, after edges
// have been added or removed, using a Link Cut Tree
// Constructor Arguments:
// V: the number of vertices in the graph
// Fields:
// ans: a vector of integers with the answer for each query
// Functions:
// addEdge(v, w): adds an edge between vertices v and w
// removeEdge(v, w): removes an edge between vertices v and w, assuming
// an edge exists
// addBridgeQuery(): adds a query for the number of bridges in the graph
// solveQueries(): solves all queries asked so far
// In practice, has a moderate constant
// Time Complexity:
// constructor: O(1)
// addEdge, removeEdge, addBridgeQuery: O(1)
// solveQueries: O(V + Q (log Q + log V))
// Memory Complexity: O(V + Q) for Q edge additions/removals and queries
// Tested:
// https://codeforces.com/gym/100551/problem/D
struct DynamicBridges {
struct Node {
static constexpr const int NO_COVER = INT_MIN, NO_DEL = INT_MAX;
static void check(int &a, const int &b) {
if ((a > b && b != NO_COVER) || a == NO_COVER) a = b;
}
using Data = pair<int, int>; using Lazy = Data;
static const bool RANGE_UPDATES = false, RANGE_QUERIES = true;
static const bool RANGE_REVERSALS = true;
bool isEdge, rev; Data val, sbtr;
int edgeCnt, coveredCntSub, coverLazy, covered, coveredSub;
Node *l, *r, *p;
Node(const Data &v)
: isEdge(false), rev(false), val(v), sbtr(v), edgeCnt(0),
coveredCntSub(0), coverLazy(NO_COVER), covered(NO_COVER),
coveredSub(NO_COVER), l(nullptr), r(nullptr), p(nullptr) {}
int getCoveredCnt() {
return coverLazy == NO_COVER ? coveredCntSub : edgeCnt;
}
void update() {
edgeCnt = isEdge; coveredCntSub = isEdge && (covered != NO_COVER);
coveredSub = covered; sbtr = val;
if (l) {
check(coveredSub, l->coveredSub); check(coveredSub, l->coverLazy);
edgeCnt += l->edgeCnt; coveredCntSub += l->getCoveredCnt();
sbtr = min(l->sbtr, sbtr);
}
if (r) {
check(coveredSub, r->coveredSub); check(coveredSub, r->coverLazy);
edgeCnt += r->edgeCnt; coveredCntSub += r->getCoveredCnt();
sbtr = min(r->sbtr, sbtr);
}
}
void propagate() {
if (rev) {
if (l) l->reverse();
if (r) r->reverse();
rev = false;
}
if (coverLazy != NO_COVER) {
covered = max(covered, coverLazy); check(coveredSub, coverLazy);
if (l) l->coverLazy = max(l->coverLazy, coverLazy);
if (r) r->coverLazy = max(r->coverLazy, coverLazy);
coveredCntSub = edgeCnt; coverLazy = NO_COVER;
}
}
void removeCover(int cover) {
if (coverLazy <= cover) coverLazy = NO_COVER;
if (coveredSub == NO_COVER || coveredSub > cover) return;
if (covered <= cover) covered = NO_COVER;
if (l) l->removeCover(cover);
if (r) r->removeCover(cover);
propagate(); update();
}
void reverse() { rev = !rev; swap(l, r); }
static Data qdef() { return make_pair(NO_DEL, -1); }
};
int V; vector<tuple<int, int, int, int>> queries; vector<int> ans;
DynamicBridges(int V) : V(V) {}
void addEdge(int v, int w) {
if (v > w) swap(v, w);
queries.emplace_back(0, v, w, -1);
}
void removeEdge(int v, int w) {
if (v > w) swap(v, w);
queries.emplace_back(1, v, w, -1);
}
void addBridgeQuery() { queries.emplace_back(2, -1, -1, -1); }
void solveQueries() {
vector<pair<int, int>> edges; int Q = queries.size(); edges.reserve(Q);
for (auto &&q : queries) if (get<0>(q) == 0)
edges.emplace_back(get<1>(q), get<2>(q));
sort(edges.begin(), edges.end()); vector<int> last(edges.size(), Q);
for (int i = 0; i < Q; i++) {
int t, v, w, _; tie(t, v, w, _) = queries[i]; if (t == 0) {
int j = lower_bound(edges.begin(), edges.end(), make_pair(v, w))
- edges.begin();
get<3>(queries[i]) = last[j]; last[j] = i;
} else if (t == 1) {
int j = lower_bound(edges.begin(), edges.end(), make_pair(v, w))
- edges.begin();
int temp = get<3>(queries[get<3>(queries[i]) = last[j]]);
get<3>(queries[last[j]]) = i; last[j] = temp;
}
}
vector<pair<int, int>> tmp(V + Q, make_pair(Node::NO_DEL, -1));
for (int i = 0; i < Q; i++) tmp[V + i] = make_pair(get<3>(queries[i]), i);
LCT<Node> lct(tmp); int bridgeCnt = 0; for (int i = 0; i < Q; i++)
lct.TR[V + i].edgeCnt = int(lct.TR[V + i].isEdge = true);
auto cover = [&] (int x, int y, int coverId) {
lct.queryPath(x, y); bridgeCnt += lct.TR[y].getCoveredCnt();
lct.TR[y].coverLazy = coverId;
bridgeCnt -= lct.TR[y].getCoveredCnt();
};
auto uncover = [&] (int x, int y, int coverId) {
lct.queryPath(x, y); bridgeCnt += lct.TR[y].getCoveredCnt();
lct.TR[y].removeCover(coverId);
bridgeCnt -= lct.TR[y].getCoveredCnt();
};
auto addTreeEdge = [&] (int v, int w, int i) {
lct.link(v, V + i); lct.link(w, V + i); bridgeCnt++;
};
auto removeTreeEdge = [&] (int v, int w, int i) {
lct.cut(v, V + i); lct.cut(w, V + i);
bridgeCnt += lct.TR[V + i].getCoveredCnt() - 1;
};
ans.clear(); for (int i = 0; i < Q; i++) {
int t, v, w, o; tie(t, v, w, o) = queries[i]; if (t == 0) {
if (v == w) continue;
int z, j; tie(z, j) = lct.queryPath(v, w);
if (j == -1) addTreeEdge(v, w, i);
else {
if (z >= o) { cover(v, w, o); continue; }
int x = get<1>(queries[j]), y = get<2>(queries[j]);
removeTreeEdge(x, y, j); addTreeEdge(v, w, i); cover(x, y, z);
}
} else if (t == 1) {
if (v == w) continue;
if (lct.connected(v, V + o)) removeTreeEdge(v, w, o);
else uncover(v, w, i);
} else if (t == 2) ans.push_back(bridgeCnt);
}
}
};
| {
"repo_name": "wesley-a-leung/Resources",
"stars": "34",
"repo_language": "C++",
"file_name": "SkewHeapIncremental.h",
"mime_type": "text/x-c++"
} |
#pragma once
#include <bits/stdc++.h>
#include "../../datastructures/unionfind/UnionFind.h"
using namespace std;
// Decomposes an undirected graph into 3-edge connected components
// Vertices are 0-indexed
// Constructor Arguments:
// G: a generic undirected graph structure
// Required Functions:
// operator [v] const: iterates over the adjacency list of vertex v
// (which is a list of ints)
// size() const: returns the number of vertices in the graph
// Fields:
// id: a vector of the index of the 3-edge connected component each vertex
// is part of
// components: a vector of vectors containing the vertices in each 3-edge
// connected component
// uf: a UnionFind data structure representing the 3-edge connected
// components
// In practice, has a moderate constant
// Time Complexity:
// constructor: O((V + E) alpha V)
// Memory Complexity: O(V)
// Tested:
// https://judge.yosupo.jp/problem/three_edge_connected_components
struct ThreeEdgeCC {
int V, ind; vector<int> id, pre, post, low, deg, path;
vector<vector<int>> components; UnionFind uf;
template <class Graph> void dfs(const Graph &G, int v, int prev) {
pre[v] = ++ind; for (int w : G[v]) if (w != v) {
if (w == prev) { prev = -1; continue; }
if (pre[w] != -1) {
if (pre[w] < pre[v]) { deg[v]++; low[v] = min(low[v], pre[w]); }
else {
deg[v]--; int &u = path[v];
for (; u != -1 && pre[u] <= pre[w] && pre[w] <= post[u];) {
uf.join(v, u); deg[v] += deg[u]; u = path[u];
}
}
continue;
}
dfs(G, w, v); if (path[w] == -1 && deg[w] <= 1) {
deg[v] += deg[w]; low[v] = min(low[v], low[w]); continue;
}
if (deg[w] == 0) w = path[w];
if (low[v] > low[w]) { low[v] = min(low[v], low[w]); swap(w, path[v]); }
for (; w != -1; w = path[w]) { uf.join(v, w); deg[v] += deg[w]; }
}
post[v] = ind;
}
template <class Graph> ThreeEdgeCC(const Graph &G)
: V(G.size()), ind(-1), id(V, -1), pre(V, -1), post(V), low(V, INT_MAX),
deg(V, 0), path(V, -1), uf(V) {
for (int v = 0; v < V; v++) if (pre[v] == -1) dfs(G, v, -1);
components.reserve(uf.cnt);
for (int v = 0; v < V; v++) if (uf.find(v) == v) {
id[v] = components.size(); components.emplace_back(1, v);
components.back().reserve(uf.getSize(v));
}
for (int v = 0; v < V; v++) if (id[v] == -1)
components[id[v] = id[uf.find(v)]].push_back(v);
}
};
| {
"repo_name": "wesley-a-leung/Resources",
"stars": "34",
"repo_language": "C++",
"file_name": "SkewHeapIncremental.h",
"mime_type": "text/x-c++"
} |
#pragma once
#include <bits/stdc++.h>
#include "../dynamictrees/LinkCutTree.h"
using namespace std;
// Support queries for the number of bridges in a graph, after edges have been
// added, using a Link Cut Tree, as well as undoing the last edge added
// Constructor Arguments:
// V: the number of vertices in the graph
// Fields:
// bridgeCnt: the current number of bridges in the graph
// history: a vector of tuples storing information of the last edge added
// Functions:
// addEdge(v, w): adds an edge between vertices v and w
// undo(v, w): undoes the last edge added to the graph
// twoEdgeConnected(v, w): queries whether v and w are in the
// same 2-edge connected component
// Time Complexity:
// constructor: O(V)
// addEdge, undo, twoEdgeConnected: O(log V) amortized
// Memory Complexity: O(V)
// Tested:
// https://codeforces.com/gym/100551/problem/D
struct IncrementalBridgesUndo {
struct Node {
using Data = pair<int, int>; using Lazy = int;
static const bool RANGE_UPDATES = true, RANGE_QUERIES = true;
static const bool RANGE_REVERSALS = true;
bool rev; int sz; Node *l, *r, *p; Lazy lz; Data val, sbtr;
Node(const Data &v)
: rev(false), sz(1), l(nullptr), r(nullptr), p(nullptr),
lz(0), val(v), sbtr(v) {}
void update() {
sz = 1; sbtr = val;
if (l) {
sz += l->sz;
if (sbtr.first < l->sbtr.first) sbtr = l->sbtr;
else if (sbtr.first == l->sbtr.first) sbtr.second += l->sbtr.second;
}
if (r) {
sz += r->sz;
if (sbtr.first < r->sbtr.first) sbtr = r->sbtr;
else if (sbtr.first == r->sbtr.first) sbtr.second += r->sbtr.second;
}
}
void propagate() {
if (rev) {
if (l) l->reverse();
if (r) r->reverse();
rev = false;
}
if (lz != 0) {
if (l) l->apply(lz);
if (r) r->apply(lz);
lz = 0;
}
}
void apply(const Lazy &v) {
lz += v; val.first += v; sbtr.first += v;
}
void reverse() { rev = !rev; swap(l, r); }
static Data qdef() { return make_pair(0, 0); }
};
int V, treeEdges, bridgeCnt; LCT<Node> lct;
vector<tuple<int, int, int>> history;
vector<pair<int, int>> init(int V) {
vector<pair<int, int>> ret(max(0, V * 2 - 1), make_pair(1, 1));
fill(ret.begin(), ret.begin() + V, make_pair(0, 1)); return ret;
}
IncrementalBridgesUndo(int V)
: V(V), treeEdges(0), bridgeCnt(0), lct(init(V)) {}
void addEdge(int v, int w) {
pair<int, int> q = lct.queryPath(v, w); if (q.second == 0) {
lct.link(v, V + treeEdges); lct.link(w, V + treeEdges++); bridgeCnt++;
history.emplace_back(v, w, -1); return;
}
lct.updatePathFromRoot(w, -1); if (q.first == 1) bridgeCnt -= q.second;
history.emplace_back(v, w, q.first == 1 ? q.second : 0);
}
void undo() {
int v, w, delta; tie(v, w, delta) = history.back(); history.pop_back();
if (delta < 0) {
lct.cut(v, V + --treeEdges); lct.cut(w, V + treeEdges); bridgeCnt--;
return;
}
lct.updatePath(v, w, 1); bridgeCnt += delta;
}
bool twoEdgeConnected(int v, int w) {
return lct.queryPath(v, w).first == 0;
}
};
| {
"repo_name": "wesley-a-leung/Resources",
"stars": "34",
"repo_language": "C++",
"file_name": "SkewHeapIncremental.h",
"mime_type": "text/x-c++"
} |
#pragma once
#include <bits/stdc++.h>
using namespace std;
// Computes the biconnected components of an undirected graph
// Vertices are 0-indexed
// Constructor Arguments:
// G: a generic undirected graph structure
// Required Functions:
// operator [v] const: iterates over the adjacency list of vertex v
// (which is a list of ints)
// size() const: returns the number of vertices in the graph
// blockCutForestEdges: a reference to a vector of pairs that will store the
// edges in the block-cut forest with the articulation vertices having the
// same index in the original graph, non articulation vertices being
// isolated, and each bcc/block having an index offset by V
// Fields:
// ids: a vector of vectors of the indices of the bccs each vertex is part of
// components: a vector of vectors containing the vertices in each bcc
// articulation: a vector of booleans that indicates whether each vertex
// is an articulation point or not
// In practice, has a moderate constant
// Time Complexity:
// constructor: O(V + E)
// Memory Complexity: O(V + E)
// Tested:
// Stress Tested
// https://judge.yosupo.jp/problem/biconnected_components
struct BCC {
int ind; vector<int> low, pre; vector<pair<int, int>> stk;
vector<bool> articulation; vector<vector<int>> ids, components;
vector<vector<pair<int, int>>> edgesInComp;
void assign(int x, int id) {
if (ids[x].empty() || ids[x].back() != id) {
ids[x].push_back(id); components.back().push_back(x);
}
}
void makeComponent(int s) {
int x, y, id = components.size(); components.emplace_back();
edgesInComp.emplace_back(); while (int(stk.size()) > s) {
tie(x, y) = stk.back(); stk.pop_back(); assign(x, id); assign(y, id);
edgesInComp.back().emplace_back(x, y);
}
}
template <class Graph> void dfs(const Graph &G, int v, int prev) {
low[v] = pre[v] = ind++; bool parEdge = false; int deg = 0;
for (int w : G[v]) {
deg++; if (w == prev && !parEdge) parEdge = true;
else if (pre[w] == -1) {
int s = stk.size(); stk.emplace_back(v, w);
dfs(G, w, v); low[v] = min(low[v], low[w]);
if (low[w] >= pre[v]) { articulation[v] = true; makeComponent(s); }
} else {
low[v] = min(low[v], pre[w]);
if (pre[w] < pre[v]) stk.emplace_back(v, w);
}
}
if (deg == 0) { makeComponent(0); assign(v, int(components.size()) - 1); }
}
template <class Graph> BCC(const Graph &G)
: ind(0), low(G.size()), pre(G.size(), -1),
articulation(G.size(), false), ids(G.size()) {
for (int v = 0; v < int(G.size()); v++) if (pre[v] == -1) dfs(G, v, -1);
}
template <class Graph>
BCC(const Graph &G, vector<pair<int, int>> &blockCutForestEdges) : BCC(G) {
for (int v = 0; v < int(G.size()); v++) if (articulation[v])
for (int id : ids[v])
blockCutForestEdges.emplace_back(v, int(G.size()) + id);
}
};
| {
"repo_name": "wesley-a-leung/Resources",
"stars": "34",
"repo_language": "C++",
"file_name": "SkewHeapIncremental.h",
"mime_type": "text/x-c++"
} |
#pragma once
#include <bits/stdc++.h>
#include "../../datastructures/trees/binarysearchtrees/Splay.h"
using namespace std;
// Support online queries on connected components, after edges have been
// added or removed, using a level structure (log V Euler Tour Trees)
// Constructor Arguments:
// V: the number of vertices in the graph
// Fields:
// cnt: the current number of connected components
// Functions:
// addEdge(v, w): adds an edge between vertices v and w, assuming
// no edges currently exists
// removeEdge(v, w): removes an edge between vertices v and w, assuming
// an edge exists
// connected(v, w): returns true if v and w are in the same
// connected component, false otherwise
// getSize(v): returns the size of the connected component containing
// vertex v
// In practice, has a moderate constant, slower than DynamicConnectivityLCT and
// DynamicConnectivityDivAndConq
// Time Complexity:
// constructor: O(V)
// addEdge, connected, getSize: O(log V) amortized
// removeEdge: O((log V)^2) amortized
// Memory Complexity: O((V + E) log V)
// Tested:
// https://www.acmicpc.net/problem/17465
struct DynamicConnectivityLevelStructure {
struct Node {
int v, sz; Node *l, *r, *p;
Node(int v) : v(v), sz(1), l(nullptr), r(nullptr), p(nullptr) {}
void update() {
sz = 1;
if (l) sz += l->sz;
if (r) sz += r->sz;
}
void propagate() {}
};
struct Level : public Splay<Node> {
vector<map<int, Node *>> T; vector<set<int>> G; vector<int> vis; int stamp;
Level(int V) : T(V), G(V), vis(V, -1), stamp(0) {}
void insert(Node *&root, int i, Node *n) {
applyToRange(root, i, i - 1, [&] (Node *&x) { x = n; });
}
Node *disconnect(Node *&root, int l, int r) {
Node *ret = nullptr;
applyToRange(root, l, r, [&] (Node *&x) { ret = x; x = nullptr; });
return ret;
}
void makeRoot(int v) {
if (!T[v].empty()) {
Node *l = T[v].begin()->second; int i = index(l, l);
Node *r = disconnect(l, i, l->sz - 1); insert(l, 0, r);
splay(T[v].begin()->second);
}
}
int findRoot(int v) {
if (T[v].empty()) return v;
Node *x = T[v].begin()->second; splay(x);
for (x->propagate(); x->l; (x = x->l)->propagate());
splay(x); return x->v;
}
bool connected(int v, int w) { return findRoot(v) == findRoot(w); }
int getSize(int v) {
if (T[v].empty()) return 0;
Node *x = T[v].begin()->second; splay(x); return x->sz / 2 + 1;
}
bool addEdge(int v, int w) {
G[v].insert(w); G[w].insert(v); if (!connected(v, w)) {
makeRoot(v); makeRoot(w);
Node *l = T[v].empty() ? nullptr : T[v].begin()->second;
insert(l, l ? l->sz : 0, T[v][w] = makeNode(v));
if (!T[w].empty()) insert(l, l->sz, T[w].begin()->second);
insert(l, l->sz, T[w][v] = makeNode(w)); return true;
}
return false;
}
bool removeEdge(int v, int w) {
G[v].erase(w); G[w].erase(v); auto it1 = T[v].find(w);
if (it1 != T[v].end()) {
auto it2 = T[w].find(v); Node *a = it1->second, *b = it2->second;
int i = index(a, a), j = index(b, b); if (i > j) swap(i, j);
Node *c = disconnect(b, i, j); clear(disconnect(c, 0, 0));
clear(disconnect(c, c->sz - 1, c->sz - 1));
T[v].erase(it1); T[w].erase(it2); return true;
}
return false;
}
void dfs(Node *x, vector<int> &verts) {
if (!x) return;
if (vis[x->v] != stamp) { vis[x->v] = stamp; verts.push_back(x->v); }
dfs(x->l, verts); dfs(x->r, verts);
}
vector<int> vertsInComp(int v) {
if (T[v].empty()) return vector<int>{v};
vector<int> ret; Node *x = T[v].begin()->second; splay(x); dfs(x, ret);
stamp++; return ret;
}
};
int V, cnt; deque<Level> lvls;
DynamicConnectivityLevelStructure(int V) : V(V), cnt(V), lvls(1, Level(V)) {}
void addEdge(int v, int w) { cnt -= lvls[0].addEdge(v, w); }
void removeEdge(int v, int w) {
int i = -1; for (int j = 0; j < int(lvls.size()); j++)
if (lvls[j].removeEdge(v, w)) i = j;
if (i == -1) return;
pair<int, int> f = make_pair(-1, -1); cnt++;
for (; f.first == -1 && i >= 0; i--) {
if (i + 1 == int(lvls.size())) lvls.emplace_back(V);
if (lvls[i].getSize(v) > lvls[i].getSize(w)) swap(v, w);
int r = lvls[i].findRoot(w); for (int x : lvls[i].vertsInComp(v)) {
for (auto &&y : lvls[i].T[x]) lvls[i + 1].addEdge(x, y.first);
for (int y : lvls[i].G[x]) {
if (f.first == -1 && lvls[i].findRoot(y) == r) f = make_pair(x, y);
else { lvls[i + 1].G[x].insert(y); lvls[i + 1].G[y].insert(x); }
}
}
}
if (f.first != -1)
for (cnt--, i++; i >= 0; i--) lvls[i].addEdge(f.first, f.second);
}
bool connected(int v, int w) { return lvls[0].connected(v, w); }
int getSize(int v) { return lvls[0].getSize(v); }
};
| {
"repo_name": "wesley-a-leung/Resources",
"stars": "34",
"repo_language": "C++",
"file_name": "SkewHeapIncremental.h",
"mime_type": "text/x-c++"
} |
#pragma once
#include <bits/stdc++.h>
#include "../../datastructures/unionfind/UnionFind.h"
using namespace std;
// Computes the minimum spanning tree (or forest) using Kruskal's algorithm
// Vertices are 0-indexed
// Template Arguments:
// T: the type of the weight of the edges
// Constructor Arguments:
// V: number of vertices in the graph
// edges: a vector of tuples in the form (v, w, weight) representing
// an undirected edge in the graph between vertices v and w with
// weight of weight
// Fields:
// mstWeight: the weight of the mst
// mstEdges: a vector of tuples of the edges in the mst
// In practice, has a small constant, faster than Prim and Boruvka
// Time Complexity:
// constructor: O(V + E log E)
// Memory Complexity: O(V + E)
// Tested:
// Stress Tested
// https://open.kattis.com/problems/minspantree
template <class T> struct KruskalMST {
using Edge = tuple<int, int, T>;
T mstWeight; vector<Edge> mstEdges; UnionFind uf;
KruskalMST(int V, vector<Edge> edges) : mstWeight(), uf(V) {
sort(edges.begin(), edges.end(), [&] (const Edge &a, const Edge &b) {
return get<2>(a) < get<2>(b);
});
for (auto &&e : edges) {
if (int(mstEdges.size()) >= V - 1) break;
if (uf.join(get<0>(e), get<1>(e))) {
mstEdges.push_back(e); mstWeight += get<2>(e);
}
}
}
};
| {
"repo_name": "wesley-a-leung/Resources",
"stars": "34",
"repo_language": "C++",
"file_name": "SkewHeapIncremental.h",
"mime_type": "text/x-c++"
} |
#pragma once
#include <bits/stdc++.h>
using namespace std;
// Computes the minimum spanning tree (or forest) using Prims's algorithm
// Vertices are 0-indexed
// Template Arguments:
// T: the type of the weight of the edges
// Constructor Arguments:
// G: a generic weighted graph structure
// Required Functions:
// operator [v] const: iterates over the adjacency list of vertex v
// (which is a list of pair<int, T> with weights of type T)
// size() const: returns the number of vertices in the graph
// INF: a value for infinity
// Fields:
// mstWeight: the weight of the mst
// mstEdges: a vector of tuples of the edges in the mst
// In practice, has a small constant, faster than Boruvka, slower that Kruskal
// Time Complexity:
// constructor: O((V + E) log E)
// Memory Complexity: O(V + E)
// Tested:
// Stress Tested
// https://open.kattis.com/problems/minspantree
template <class T> struct PrimMST {
using Edge = tuple<int, int, T>; T mstWeight; vector<Edge> mstEdges;
struct Node {
T d; int v; Node(T d, int v) : d(d), v(v) {}
bool operator < (const Node &o) const { return d > o.d; }
};
template <class WeightedGraph>
PrimMST(const WeightedGraph &G, T INF = numeric_limits<T>::max())
: mstWeight() {
int V = G.size(); vector<bool> done(V, false);
vector<T> mn(V, INF); vector<int> to(V, -1); std::priority_queue<Node> PQ;
for (int s = 0; s < V; s++) if (!done[s]) {
PQ.emplace(mn[s] = T(), s); while (!PQ.empty()) {
int v = PQ.top().v; PQ.pop(); if (done[v]) continue;
done[v] = true;
for (auto &&e : G[v]) if (!done[e.first] && e.second < mn[e.first]) {
to[e.first] = v; PQ.emplace(mn[e.first] = e.second, e.first);
}
}
}
for (int v = 0; v < V; v++) if (to[v] != -1) {
mstEdges.emplace_back(v, to[v], mn[v]); mstWeight += mn[v];
}
}
};
| {
"repo_name": "wesley-a-leung/Resources",
"stars": "34",
"repo_language": "C++",
"file_name": "SkewHeapIncremental.h",
"mime_type": "text/x-c++"
} |
#pragma once
#include <bits/stdc++.h>
#include "../../datastructures/unionfind/UnionFind.h"
using namespace std;
// Computes the minimum spanning tree (or forest) using Boruvka's algorithm
// Vertices are 0-indexed
// Template Arguments:
// T: the type of the weight of the edges
// Constructor Arguments:
// V: number of vertices in the graph
// edges: a vector of tuples in the form (v, w, weight) representing
// an undirected edge in the graph between vertices v and w with
// weight of weight
// Fields:
// mstWeight: the weight of the mst
// mstEdges: a vector of tuples of the edges in the mst
// In practice, has a small constant, slower than Prim and Kruskal
// Time Complexity:
// constructor: O(V + E log V)
// Memory Complexity: O(V)
// Tested:
// Stress Tested
// https://open.kattis.com/problems/minspantree
template <class T> struct BoruvkaMST {
using Edge = tuple<int, int, T>;
T mstWeight; vector<Edge> mstEdges; UnionFind uf;
BoruvkaMST(int V, const vector<Edge> &edges) : mstWeight(), uf(V) {
for (int t = 1; t < V && int(mstEdges.size()) < V - 1; t *= 2) {
vector<int> closest(V, -1); for (int e = 0; e < int(edges.size()); e++) {
int v = uf.find(get<0>(edges[e])), w = uf.find(get<1>(edges[e]));
if (v == w) continue;
if (closest[v] == -1 || get<2>(edges[e]) < get<2>(edges[closest[v]]))
closest[v] = e;
if (closest[w] == -1 || get<2>(edges[e]) < get<2>(edges[closest[w]]))
closest[w] = e;
}
for (int v = 0; v < V; v++) if (closest[v] != -1
&& uf.join(get<0>(edges[closest[v]]), get<1>(edges[closest[v]]))) {
mstEdges.push_back(edges[closest[v]]);
mstWeight += get<2>(edges[closest[v]]);
}
}
}
};
| {
"repo_name": "wesley-a-leung/Resources",
"stars": "34",
"repo_language": "C++",
"file_name": "SkewHeapIncremental.h",
"mime_type": "text/x-c++"
} |
#pragma once
#include <bits/stdc++.h>
#include "../../datastructures/trees/heaps/SkewHeapIncremental.h"
#include "../../datastructures/unionfind/UnionFindUndo.h"
using namespace std;
// Computes the minimum arborescence, or directed minimum spanning tree using
// Gabow's variant of Edmonds' algorithm
// The directed minimum spanning tree is a set of edges such that every vertex
// is reachable from the root, the sum of the edge weights is minimized
// Template Arguments:
// T: the type of the weight of the edges
// Constructor Arguments:
// V: number of vertices in the directed graph
// edges: a vector of tuples in the form (v, w, weight) representing
// a directed edge in the graph from vertex v to w with
// weight of weight
// root: the root of the directed mst to find
// INF: a value for infinity
// Fields:
// mstWeight: the weight of the directed mst, or INF if none exist
// mstEdges: a vector of tuples of the edges in the directed mst
// In practice, has a moderate constant
// Time Complexity:
// constructor: O((V + E) log V)
// Memory Complexity: O(V + E)
// Tested:
// https://open.kattis.com/contests/nwerc18open/problems/fastestspeedrun
// https://codeforces.com/contest/240/problem/E
// https://judge.yosupo.jp/problem/directedmst
template <class T> struct GabowMinArborescence {
using Edge = tuple<int, int, T>;
struct Pair {
Edge e; int ind;
Pair(const Edge &e, int ind) : e(e), ind(ind) {}
bool operator > (const Pair &p) const { return get<2>(e) > get<2>(p.e); }
Pair &operator += (const T &add) { get<2>(e) += add; return *this; }
};
using Heap = SkewHeapIncremental<Pair, greater<Pair>, T>;
T INF, mstWeight; vector<Edge> mstEdges;
GabowMinArborescence(int V, const vector<Edge> &edges, int root,
T INF = numeric_limits<T>::max())
: INF(INF), mstWeight() {
UnionFindUndo uf(V); vector<int> vis(V, -1); vis[root] = root;
vector<Edge> in(V, Edge(-1, -1, T())); vector<Heap> H(V);
for (int i = 0; i < int(edges.size()); i++)
H[get<1>(edges[i])].push(Pair(edges[i], i));
vector<tuple<int, int, vector<Edge>>> cycs; for (int s = 0; s < V; s++) {
int v = s; vector<pair<int, Edge>> path; while (vis[v] < 0) {
if (H[v].empty()) { mstWeight = INF; return; }
vis[v] = s; Pair p = H[v].pop(); path.emplace_back(v, edges[p.ind]);
H[v].increment(-get<2>(p.e)); v = uf.find(get<0>(p.e));
mstWeight += get<2>(p.e); if (vis[v] == s) {
Heap h; vector<Edge> E; int w, t = uf.history.size(); do {
h.merge(H[w = path.back().first]);
E.push_back(path.back().second); path.pop_back();
} while (uf.join(v, w));
H[v = uf.find(v)] = move(h); vis[v] = -1; cycs.emplace_back(v, t, E);
}
}
for (auto &&p : path) in[uf.find(get<1>(p.second))] = p.second;
}
while (!cycs.empty()) {
int v, t; vector<Edge> E; tie(v, t, E) = cycs.back(); cycs.pop_back();
while (int(uf.history.size()) > t) uf.undo();
Edge inEdge = in[v]; for (auto &&e : E) in[uf.find(get<1>(e))] = e;
in[uf.find(get<1>(inEdge))] = inEdge;
}
for (int v = 0; v < V; v++)
if (get<1>(in[v]) != -1) mstEdges.push_back(in[v]);
}
};
| {
"repo_name": "wesley-a-leung/Resources",
"stars": "34",
"repo_language": "C++",
"file_name": "SkewHeapIncremental.h",
"mime_type": "text/x-c++"
} |
#pragma once
#include <bits/stdc++.h>
#include "../../datastructures/unionfind/UnionFindUndo.h"
using namespace std;
// Support offline queries for the minimum spanning tree, after edges have been
// added or removed, using divide and conquer and UnionFindUndo
// Template Arguments:
// T: the type of the weight of the edges
// Constructor Arguments:
// V: number of vertices in the graph
// Fields:
// ans: a vector of type T with the minimum spanning tree for each query
// Functions:
// addEdge(v, w, weight): adds an edge between vertices v and w with
// a weight of weight
// removeEdge(v, w, weight): removes an edge between vertices v and w with
// a weight of weight, assuming such an edge exists
// addMstQuery(v, w): adds a query asking for the current minimum spanning
// tree (or forest)
// solveQueries(): solves all queries asked so far
// In practice, has a small constant
// Time Complexity:
// constructor: O(V)
// addEdge, removeEdge, addMstQuery: O(1)
// solveQueries: O(V + Q log Q (log Q + log V))
// Memory Complexity: O(V + Q) for Q edge additions/removals and queries
// Tested:
// https://dmoj.ca/problem/ccoprep4p3
template <class T> struct DynamicMSTDivAndConq {
int V; vector<tuple<int, int, int, T, int>> queries; vector<T> ans;
DynamicMSTDivAndConq(int V) : V(V) {}
void addEdge(int v, int w, T weight) {
if (v > w) swap(v, w);
queries.emplace_back(0, v, w, weight, -1);
}
void removeEdge(int v, int w, T weight) {
if (v > w) swap(v, w);
queries.emplace_back(1, v, w, weight, -1);
}
void addMstQuery() {
queries.emplace_back(2, -1, -1, T(), -1);
}
void solveQueries() {
vector<tuple<int, int, T>> edges; int Q = queries.size(); edges.reserve(Q);
for (auto &&q : queries) if (get<0>(q) == 0)
edges.emplace_back(get<1>(q), get<2>(q), get<3>(q));
sort(edges.begin(), edges.end());
vector<int> cntAdd(edges.size(), 0), cntRem = cntAdd, qinds{-1};
qinds.reserve(Q); for (int i = 0; i < Q; i++) {
int t, v, w, _; T weight; tie(t, v, w, weight, _) = queries[i];
tuple<int, int, T> e(v, w, weight); if (t == 0) {
int j = lower_bound(edges.begin(), edges.end(), e) - edges.begin();
get<4>(queries[i]) = j + cntAdd[j]++;
} else if (t == 1) {
int j = lower_bound(edges.begin(), edges.end(), e) - edges.begin();
get<4>(queries[i]) = j + cntRem[j]++;
} else if (t == 2) qinds.push_back(i);
}
UnionFindUndo uf(V); ans.clear(); ans.reserve(Q);
vector<bool> active(edges.size(), false), changed(edges.size(), false);
auto cmpEdge = [&] (int i, int j) {
return active[i] == active[j] ? get<2>(edges[i]) < get<2>(edges[j])
: active[i];
};
function<void(int, int, vector<int> &, T)> dc
= [&] (int ql, int qr, vector<int> &maybe, T curMST) {
int curSize = uf.history.size(); if (ql == qr) {
for (int i = qinds[ql - 1] + 1; i <= qinds[qr]; i++) {
int t = get<0>(queries[i]), j = get<4>(queries[i]);
if (t == 0) active[j] = true;
else if (t == 1) active[j] = false;
else if (t == 2) {
sort(maybe.begin(), maybe.end(), cmpEdge); for (int j : maybe)
if (active[j] && uf.join(get<0>(edges[j]), get<1>(edges[j])))
curMST += get<2>(edges[j]);
ans.push_back(curMST);
while (int(uf.history.size()) > curSize) uf.undo();
}
}
return;
}
sort(maybe.begin(), maybe.end(), cmpEdge);
for (int i = qinds[ql - 1] + 1; i <= qinds[qr]; i++)
if (get<0>(queries[i]) <= 1) {
int j = get<4>(queries[i]); changed[j] = true;
uf.join(get<0>(edges[j]), get<1>(edges[j]));
}
vector<int> must; for (int j : maybe)
if (!changed[j] && uf.join(get<0>(edges[j]), get<1>(edges[j])))
must.push_back(j);
while (int(uf.history.size()) > curSize) uf.undo();
for (int j : must) if (uf.join(get<0>(edges[j]), get<1>(edges[j])))
curMST += get<2>(edges[j]);
int curSize2 = uf.history.size();
vector<int> newMaybe; for (int j : maybe)
if (changed[j] || uf.join(get<0>(edges[j]), get<1>(edges[j])))
newMaybe.push_back(j);
while (int(uf.history.size()) > curSize2) uf.undo();
for (int i = qinds[ql - 1] + 1; i <= qinds[qr]; i++)
if (get<0>(queries[i]) <= 1) changed[get<4>(queries[i])] = false;
int qm = ql + (qr - ql) / 2;
dc(ql, qm, newMaybe, curMST); dc(qm + 1, qr, newMaybe, curMST);
while (int(uf.history.size()) > curSize) uf.undo();
};
if (int(qinds.size()) > 1) {
vector<int> maybe(edges.size()); iota(maybe.begin(), maybe.end(), 0);
dc(1, int(qinds.size()) - 1, maybe, T());
}
}
};
| {
"repo_name": "wesley-a-leung/Resources",
"stars": "34",
"repo_language": "C++",
"file_name": "SkewHeapIncremental.h",
"mime_type": "text/x-c++"
} |
#pragma once
#include <bits/stdc++.h>
#include "../dynamictrees/LinkCutTree.h"
using namespace std;
// Supports queries for the minimum spanning tree (or forest) after
// edges have been added, using a Link Cut Tree
// Template Arguments:
// T: the type of the weight of the edges
// Constructor Arguments:
// V: number of vertices in the graph
// NEG_INF: a value for negative infinity of type T
// Fields:
// mstWeight: the weight of the current mst
// mstEdges: a vector of tuples of the edges in the current mst in the form
// (v, w, weight) representing an undirected edge in the graph between
// vertices v and w with weight of weight
// lct: a Link Cut Tree of the current mst of the graph
// Functions:
// addEdge(v, w, weight): adds an undirected edge in the graph between
// vertices v and w with weight of weight
// In practice, has a moderate constant
// Time Complexity:
// constructor: O(V)
// addEdge: O(log V) amortized
// Memory Complexity: O(V)
// Tested:
// https://dmoj.ca/problem/noi14p2
// https://open.kattis.com/problems/minspantree
// https://codeforces.com/gym/101047/problem/I
template <class T> struct IncrementalMST {
struct Node {
using Data = pair<T, int>; using Lazy = Data;
static const bool RANGE_UPDATES = false, RANGE_QUERIES = true;
static const bool RANGE_REVERSALS = true;
bool rev; Node *l, *r, *p; Data val, sbtr;
Node(const Data &v)
: rev(false), l(nullptr), r(nullptr), p(nullptr), val(v), sbtr(v) {}
void update() {
sbtr = val;
if (l) { sbtr = max(l->sbtr, sbtr); }
if (r) { sbtr = max(sbtr, r->sbtr); }
}
void propagate() {
if (rev) {
if (l) l->reverse();
if (r) r->reverse();
rev = false;
}
}
void apply(const Lazy &v) { val = sbtr = v; }
void reverse() { rev = !rev; swap(l, r); }
static Data qdef() { return make_pair(T(), -1); }
};
using Edge = tuple<int, int, T>; int V, top; vector<int> stk;
T mstWeight; vector<Edge> mstEdges; LCT<Node> lct;
IncrementalMST(int V, T NEG_INF = numeric_limits<T>::lowest())
: V(V), top(max(0, V - 1)), stk(top), mstWeight(T()),
lct(vector<pair<T, int>>(V + top, make_pair(NEG_INF, -1))) {
iota(stk.rbegin(), stk.rend(), 0); mstEdges.reserve(top);
}
void addEdge(int v, int w, T weight) {
if (v == w) return;
T z; int j; tie(z, j) = lct.queryPath(v, w); if (j != -1) {
if (z <= weight) return;
lct.cut(get<0>(mstEdges[j]), V + j); lct.cut(get<1>(mstEdges[j]), V + j);
stk[top++] = j; mstWeight -= z;
}
j = stk[--top]; Edge e(v, w, weight);
if (j >= int(mstEdges.size())) mstEdges.push_back(e);
else mstEdges[j] = e;
lct.updateVertex(V + j, make_pair(weight, j));
lct.link(v, V + j); lct.link(w, V + j); mstWeight += weight;
}
};
| {
"repo_name": "wesley-a-leung/Resources",
"stars": "34",
"repo_language": "C++",
"file_name": "SkewHeapIncremental.h",
"mime_type": "text/x-c++"
} |
#pragma once
#include <bits/stdc++.h>
#include "../dynamictrees/LinkCutTree.h"
using namespace std;
// Supports queries for the minimum spanning tree (or forest) after
// edges have been added, using a Link Cut Tree, as well as undoing the
// last edge added
// Template Arguments:
// T: the type of the weight of the edges
// Constructor Arguments:
// V: number of vertices in the graph
// NEG_INF: a value for negative infinity of type T
// Fields:
// mstWeight: the weight of the current mst
// mstEdges: a vector of tuples of the edges in the current mst in the form
// (v, w, weight) representing an undirected edge in the graph between
// vertices v and w with weight of weight
// lct: a Link Cut Tree of the current mst of the graph
// history: a vector of tuples storing information of the last edge added
// Functions:
// addEdge(v, w, weight): adds an undirected edge in the graph between
// vertices v and w with weight of weight
// undo(): undoes the last edge added to the graph
// In practice, has a moderate constant
// Time Complexity:
// constructor: O(V)
// addEdge, undo: O(log V) amortized
// Memory Complexity: O(V)
// Tested:
// https://dmoj.ca/problem/ccoprep4p3
template <class T> struct IncrementalMSTUndo {
struct Node {
using Data = pair<T, int>; using Lazy = Data;
static const bool RANGE_UPDATES = false, RANGE_QUERIES = true;
static const bool RANGE_REVERSALS = true;
bool rev; Node *l, *r, *p; Data val, sbtr;
Node(const Data &v)
: rev(false), l(nullptr), r(nullptr), p(nullptr), val(v), sbtr(v) {}
void update() {
sbtr = val;
if (l) { sbtr = max(l->sbtr, sbtr); }
if (r) { sbtr = max(sbtr, r->sbtr); }
}
void propagate() {
if (rev) {
if (l) l->reverse();
if (r) r->reverse();
rev = false;
}
}
void apply(const Lazy &v) { val = sbtr = v; }
void reverse() { rev = !rev; swap(l, r); }
static Data qdef() { return make_pair(T(), -1); }
};
using Edge = tuple<int, int, T>; int V, top; vector<int> stk;
T mstWeight; vector<Edge> mstEdges; LCT<Node> lct;
vector<tuple<int, Edge, int, Edge, bool>> history;
IncrementalMSTUndo(int V, T NEG_INF = numeric_limits<T>::lowest())
: V(V), top(max(0, V - 1)), stk(top), mstWeight(T()),
lct(vector<pair<T, int>>(V + top, make_pair(NEG_INF, -1))) {
iota(stk.rbegin(), stk.rend(), 0); mstEdges.reserve(top);
}
void addEdge(int v, int w, T weight) {
Edge e(v, w, weight); history.emplace_back(-1, e, -1, e, false);
if (v == w) return;
T z; int j; tie(z, j) = lct.queryPath(v, w); if (j != -1) {
if (z <= weight) return;
lct.cut(get<0>(mstEdges[j]), V + j); lct.cut(get<1>(mstEdges[j]), V + j);
get<0>(history.back()) = stk[top++] = j; mstWeight -= z;
get<1>(history.back()) = mstEdges[j];
}
get<2>(history.back()) = j = stk[--top]; if (j >= int(mstEdges.size())) {
mstEdges.push_back(e); get<4>(history.back()) = true;
} else mstEdges[j] = e;
lct.updateVertex(V + j, make_pair(weight, j)); lct.link(v, V + j);
lct.link(w, V + j); mstWeight += weight; get<3>(history.back()) = e;
}
void undo() {
int j = get<2>(history.back()); Edge e = get<3>(history.back());
if (j != -1) {
mstWeight -= get<2>(e); stk[top++] = j;
lct.cut(get<0>(e), V + j); lct.cut(get<1>(e), V + j);
if (get<4>(history.back())) mstEdges.pop_back();
}
j = get<0>(history.back()); e = get<1>(history.back()); if (j != -1) {
mstWeight += get<2>(e); --top; mstEdges[j] = e;
lct.updateVertex(V + j, make_pair(get<2>(e), j));
lct.link(get<0>(e), V + j); lct.link(get<1>(e), V + j);
}
history.pop_back();
}
};
| {
"repo_name": "wesley-a-leung/Resources",
"stars": "34",
"repo_language": "C++",
"file_name": "SkewHeapIncremental.h",
"mime_type": "text/x-c++"
} |
#pragma once
#include <bits/stdc++.h>
using namespace std;
// Computes the minimum spanning tree (or forest) using Classical Prims's
// algorithm
// Vertices are 0-indexed
// Constructor Arguments:
// G: a generic weighted graph structure
// with the [] operator (const) defined to iterate over the adjacency list
// (which is a list of pair<int, T> with weights of type T), as well as a
// member function size() (const) that returns the number of vertices
// in the graph
// INF: a value for infinity
// Fields:
// mstWeight: the weight of the mst
// mstEdges: a vector of tuples of the edges in the mst
// In practice, has a small constant
// Time Complexity:
// constructor: O(V^2 + E)
// Memory Complexity: O(V + E)
// Tested:
// Stress Tested
// https://mcpt.ca/problem/anmstproblem
template <class T> struct ClassicalPrimMST {
using Edge = tuple<int, int, T>; T mstWeight; vector<Edge> mstEdges;
template <class WeightedGraph>
ClassicalPrimMST(const WeightedGraph &G, T INF = numeric_limits<T>::max())
: mstWeight() {
int V = G.size(); vector<bool> done(V, false); vector<T> mn(V, INF);
vector<int> to(V, -1); for (int s = 0; s < V; s++) if (!done[s]) {
mn[s] = T(); while (true) {
int v = -1; for (int w = 0; w < V; w++)
if (!done[w] && (v == -1 || mn[v] > mn[w])) v = w;
if (v == -1 || mn[v] >= INF) break;
done[v] = true;
for (auto &&e : G[v]) if (!done[e.first] && e.second < mn[e.first]) {
to[e.first] = v; mn[e.first] = e.second;
}
}
}
for (int v = 0; v < V; v++) if (to[v] != -1) {
mstEdges.emplace_back(v, to[v], mn[v]); mstWeight += mn[v];
}
}
};
| {
"repo_name": "wesley-a-leung/Resources",
"stars": "34",
"repo_language": "C++",
"file_name": "SkewHeapIncremental.h",
"mime_type": "text/x-c++"
} |
#pragma once
#include <bits/stdc++.h>
#include "KruskalMST.h"
using namespace std;
// Computes the minimum spanning tree of a complete graph of N points where
// the edge weights is equal to the Manhattan distance between the
// points |x_i - x_j| + |y_i - y_j|
// Generates up to 4N candidate edges with each point connected to its nearest
// neighbour in each octant
// Vertices are 0-indexed
// Template Arguments:
// T: the type of the coordinates of the points
// Constructor Arguments:
// P: a vector of pairs of type T representing the points
// Fields:
// mstWeight: the weight of the mst
// mstEdges: a vector of tuples of the edges in the mst with the vertices
// corresponding to the original indices of P
// In practice, has a moderate constant
// Time Complexity: O(N log N)
// Memory Complexity: O(N)
// Tested:
// https://judge.yosupo.jp/problem/manhattanmst
template <class T> struct ManhattanMST : public KruskalMST<T> {
using Edge = typename KruskalMST<T>::Edge;
static vector<Edge> generateCandidates(vector<pair<T, T>> P) {
vector<int> id(P.size()); iota(id.begin(), id.end(), 0); vector<Edge> ret;
ret.reserve(P.size() * 4); for (int h = 0; h < 4; h++) {
sort(id.begin(), id.end(), [&] (int i, int j) {
return P[i].first - P[j].first < P[j].second - P[i].second;
});
map<T, int> M; for (int i : id) {
auto it = M.lower_bound(-P[i].second);
for (; it != M.end(); it = M.erase(it)) {
int j = it->second;
T dx = P[i].first - P[j].first, dy = P[i].second - P[j].second;
if (dy > dx) break;
ret.emplace_back(i, j, dx + dy);
}
M[-P[i].second] = i;
}
for (auto &&p : P) {
if (h % 2) p.first = -p.first;
else swap(p.first, p.second);
}
}
return ret;
}
ManhattanMST(const vector<pair<T, T>> &P)
: KruskalMST<T>(P.size(), generateCandidates(P)) {}
};
| {
"repo_name": "wesley-a-leung/Resources",
"stars": "34",
"repo_language": "C++",
"file_name": "SkewHeapIncremental.h",
"mime_type": "text/x-c++"
} |
#pragma once
#include <bits/stdc++.h>
#include "IncrementalBipartiteUndo.h"
#include "../../queries/LIFOSetDivAndConq.h"
using namespace std;
// Support offline queries on connected components and bipartiteness, after
// edges have been added or removed, using divide and conquer
// Constructor Arguments:
// V: the number of vertices in the graph
// Fields:
// ans: a vector of integers with the answer for
// each query (1 is true, 0 is false for boolean queries)
// Functions:
// addEdge(v, w): adds an edge between vertices v and w
// removeEdge(v, w): removes an edge between vertices v and w, assuming
// an edge exists
// addConnectedQuery(v, w): adds a query asking whether v and w are in the
// same connected component
// addSizeQuery(v): adds a query asking for the number of vertices in the
// same connected component as vertex v
// addCntQuery(): adds a query asking for the number of connected components
// addComponentBipartiteQuery(v): adds a query asking for whether the
// connected component containing vertex v is bipartite
// addBipartiteGraphQuery(): adds a query asking if the graph is bipartite
// addColorQuery(v): adds a query asking for the color of vertex v for one
// possible coloring of the graph, assuming the component is bipartite
// addPathParityQuery(v, w): adds a query asking for the parity of the path
// from v to w (false if even number of edges, true if odd), assuming the
// component is bipartite and v and w are connected
// solveQueries(): solves all queries asked so far
// In practice, has a small constant
// Time Complexity:
// constructor: O(1)
// addEdge, removeEdge, addConnectedQuery, addSizeQuery, addCntQuery: O(1)
// addComponentBipartiteQuery, addBipartiteGraphQuery: O(1)
// addColorQuery, addPathParity: O(1)
// solveQueries: O(V + Q log Q log V)
// Memory Complexity: O(V + Q) for Q edge additions/removals and queries
// Tested:
// https://codeforces.com/contest/813/problem/F
struct DynamicBipartiteDivAndConq {
struct S {
using T = pair<int, int>; using R = int;
struct Q { int type, v, w; };
IncrementalBipartiteUndo uf;
S(int V) : uf(V) {}
void push(const T &e) { uf.addEdge(e.first, e.second); }
void pop() { uf.undo(); }
R query(const Q &q) {
if (q.type == 1) return uf.connected(q.v, q.w);
else if (q.type == 2) return uf.getSize(q.v);
else if (q.type == 3) return uf.cnt;
else if (q.type == 4) return uf.componentBipartite(q.v);
else if (q.type == 5) return uf.bipartiteGraph;
else if (q.type == 6) return uf.color(q.v);
else return uf.pathParity(q.v, q.w);
}
};
int V; LIFOSetDivAndConq<S> s; vector<int> &ans = s.ans;
DynamicBipartiteDivAndConq(int V) : V(V) {}
void addEdge(int v, int w) {
if (v > w) swap(v, w);
s.addElement(make_pair(v, w));
}
void removeEdge(int v, int w) {
if (v > w) swap(v, w);
s.removeElement(make_pair(v, w));
}
void addConnectedQuery(int v, int w) {
S::Q q; q.type = 1; q.v = v; q.w = w; s.addQuery(q);
}
void addSizeQuery(int v) {
S::Q q; q.type = 2; q.v = q.w = v; s.addQuery(q);
}
void addCntQuery() { S::Q q; q.type = 3; q.v = q.w = -1; s.addQuery(q); }
void addComponentBipartiteQuery(int v) {
S::Q q; q.type = 4; q.v = q.w = v; s.addQuery(q);
}
void addBipartiteGraphQuery() {
S::Q q; q.type = 5; q.v = q.w = -1; s.addQuery(q);
}
void addColorQuery(int v) {
S::Q q; q.type = 6; q.v = q.w = v; s.addQuery(q);
}
void addPathParityQuery(int v, int w) {
S::Q q; q.type = 7; q.v = v; q.w = w; s.addQuery(q);
}
void solveQueries() { s.solveQueries(V); }
};
| {
"repo_name": "wesley-a-leung/Resources",
"stars": "34",
"repo_language": "C++",
"file_name": "SkewHeapIncremental.h",
"mime_type": "text/x-c++"
} |
#pragma once
#include <bits/stdc++.h>
using namespace std;
// Supports queries for whether a graph component is bipartite
// after edges have been added using Union Find by size supporting undos
// Vertices are 0-indexed
// V: the number of vertices in the graph
// Fields:
// UF: a vector of integers representing the parent of each vertex in the
// tree, or the negative of the size of the connected component if that
// vertex is a root
// P: a vector of booleans representing the parity of the path to the root
// of that connected component
// B: a vector of booleans representing whether the component rooted at the
// vertex is bipartite for root vertices, undefined otherwise
// cnt: the current number of connected components
// bipartiteGraph: a boolean indicating whether the current graph is
// bipartite or not
// history: a vector of tuples storing the history of all addEdge calls
// Functions:
// find(v): returns a pair containing the root of the connected component and
// the parity of the path from the root to vertex v
// addEdge(v, w): adds an edge between vertices v and w
// undo(): undoes the last edge added by popping from the history stack
// connected(v, w): returns true if v and w are in the same
// connected component, false otherwise
// getSize(v): returns the size of the connected component containing
// vertex v
// componentBipartite(v): returns whether the connected component containing
// vertex v is bipartite or not
// color(v): returns the color of vertex v for one
// possible coloring of the graph, assuming the component is bipartite
// pathParity(v, w): return the parity of the path from
// v to w (false if even number of edges, true if odd), assuming the
// component is bipartite and v and w are connected
// Time Complexity:
// constructor: O(V)
// find, addEdge, connected, getSize: O(log V)
// componentBipartite, color, pathParity: O(log V)
// undo: O(1)
// Memory Complexity: O(V + Q) for Q calls to addEdge
// Tested:
// https://www.spoj.com/problems/BUGLIFE/
// https://cses.fi/problemset/task/1668
// https://codeforces.com/contest/813/problem/F
struct IncrementalBipartiteUndo {
vector<int> UF; vector<bool> P, B; int cnt; bool bipartiteGraph;
vector<tuple<int, int, int, bool, bool>> history;
IncrementalBipartiteUndo(int V)
: UF(V, -1), P(V, false), B(V, true), cnt(V), bipartiteGraph(true) {}
pair<int, bool> find(int v) {
bool p = P[v]; for (; UF[v] >= 0; p ^= P[v = UF[v]]);
return make_pair(v, p);
}
void addEdge(int v, int w) {
bool pv, pw; tie(v, pv) = find(v); tie(w, pw) = find(w); if (v == w) {
history.emplace_back(v, w, 0, bipartiteGraph, B[v]);
bipartiteGraph &= (B[v] = B[v] & (pv ^ pw)); return;
}
if (UF[v] > UF[w]) { swap(v, w); swap(pv, pw); }
history.emplace_back(v, w, UF[w], P[w], B[v]);
UF[v] += UF[w]; UF[w] = v; P[w] = pv ^ pw ^ 1; B[v] = B[v] & B[w]; cnt--;
}
void undo() {
int v, w, ufw; bool pw, bv; tie(v, w, ufw, pw, bv) = history.back();
history.pop_back(); B[v] = bv; if (ufw == 0) bipartiteGraph = pw;
else { UF[w] = ufw; UF[v] -= UF[w]; P[w] = pw; cnt++; }
}
bool connected(int v, int w) { return find(v).first == find(w).first; }
int getSize(int v) { return -UF[find(v).first]; }
bool componentBipartite(int v) { return B[find(v).first]; }
bool color(int v) { return find(v).second; }
bool pathParity(int v, int w) { return color(v) ^ color(w); }
};
| {
"repo_name": "wesley-a-leung/Resources",
"stars": "34",
"repo_language": "C++",
"file_name": "SkewHeapIncremental.h",
"mime_type": "text/x-c++"
} |
#pragma once
#include <bits/stdc++.h>
using namespace std;
// Supports queries for whether a graph component is bipartite
// after edges have been added using Union Find by size with path compression
// Vertices are 0-indexed
// V: the number of vertices in the graph
// Fields:
// UF: a vector of integers representing the parent of each vertex in the
// tree, or the negative of the size of the connected component if that
// vertex is a root
// P: a vector of booleans representing the parity of the path to the root
// of that connected component
// B: a vector of booleans representing whether the component rooted at the
// vertex is bipartite for root vertices, undefined otherwise
// cnt: the current number of connected components
// bipartiteGraph: a boolean indicating whether the current graph is
// bipartite or not
// Functions:
// find(v): returns a pair containing the root of the connected component and
// the parity of the path from the root to vertex v
// addEdge(v, w): adds an edge between vertices v and w
// connected(v, w): returns true if v and w are in the same
// connected component, false otherwise
// getSize(v): returns the size of the connected component containing
// vertex v
// componentBipartite(v): returns whether the connected component containing
// vertex v is bipartite or not
// color(v): returns the color of vertex v for one
// possible coloring of the graph, assuming the component is bipartite
// pathParity(v, w): return the parity of the path from
// v to w (false if even number of edges, true if odd), assuming the
// component is bipartite and v and w are connected
// Time Complexity:
// constructor: O(V)
// find, addEdge, connected, getSize, componentBipartite, color, pathParity:
// O(alpha V) amortized, O(log V) worse case
// Memory Complexity: O(V)
// Tested:
// Stress Tested
// https://www.spoj.com/problems/BUGLIFE/
// https://cses.fi/problemset/task/1668
// https://tlx.toki.id/problems/troc-16/D
struct IncrementalBipartite {
vector<int> UF; vector<bool> P, B; int cnt; bool bipartiteGraph;
IncrementalBipartite(int V)
: UF(V, -1), P(V, false), B(V, true), cnt(V), bipartiteGraph(true) {}
pair<int, bool> find(int v) {
bool p = P[v]; for (; UF[v] >= 0; p ^= P[v = UF[v]]) if (UF[UF[v]] >= 0) {
p ^= P[UF[v]]; P[v] = P[v] ^ P[UF[v]]; UF[v] = UF[UF[v]];
}
return make_pair(v, p);
}
void addEdge(int v, int w) {
bool pv, pw; tie(v, pv) = find(v); tie(w, pw) = find(w);
if (v == w) { bipartiteGraph &= (B[v] = B[v] & (pv ^ pw)); return; }
if (UF[v] > UF[w]) { swap(v, w); swap(pv, pw); }
UF[v] += UF[w]; UF[w] = v; P[w] = pv ^ pw ^ 1; B[v] = B[v] & B[w]; cnt--;
}
bool connected(int v, int w) { return find(v).first == find(w).first; }
int getSize(int v) { return -UF[find(v).first]; }
bool componentBipartite(int v) { return B[find(v).first]; }
bool color(int v) { return find(v).second; }
bool pathParity(int v, int w) { return color(v) ^ color(w); }
};
| {
"repo_name": "wesley-a-leung/Resources",
"stars": "34",
"repo_language": "C++",
"file_name": "SkewHeapIncremental.h",
"mime_type": "text/x-c++"
} |
#pragma once
#include <bits/stdc++.h>
using namespace std;
// Determines whether an undirected graph is bipartite, or whether it has
// an odd cycle
// Vertices are 0-indexed
// Constructor Arguments:
// G: a generic undirected graph structure
// Required Functions:
// operator [v] const: iterates over the adjacency list of vertex v
// (which is a list of ints)
// size() const: returns the number of vertices in the graph
// Fields:
// bipartite: a boolean indicating whether the graph is bipartite or not
// color: a vector of booleans for one possible bipartite coloring
// oddCycle: a vector of the vertices in an odd cycle of the graph, empty
// if the graph is bipartite
// In practice, has a moderate constant
// Time Complexity:
// constructor: O(V + E)
// Memory Complexity: O(V)
// Tested:
// Stress Tested
// https://www.spoj.com/problems/BUGLIFE/
// https://cses.fi/problemset/task/1669
struct Bipartite {
int V; bool bipartite; vector<bool> color; vector<int> oddCycle;
template <class Graph> Bipartite(const Graph &G)
: V(G.size()), bipartite(true), color(V, false) {
vector<int> to(G.size(), -2), q(G.size()), stk(G.size()); int top = 0;
for (int s = 0; s < V && bipartite; s++) if (to[s] == -2) {
int front = 0, back = 0; to[q[back++] = s] = -1; while (front < back) {
int v = q[front++]; for (int w : G[v]) {
if (to[w] == -2) color[q[back++] = w] = !color[to[w] = v];
else if (color[w] == color[v]) {
bipartite = false; int x = v, y = w; while (x != y) {
x = to[stk[top++] = x]; oddCycle.push_back(y); y = to[y];
}
stk[top++] = x; while (top > 0) oddCycle.push_back(stk[--top]);
oddCycle.push_back(w); return;
}
}
}
}
}
};
| {
"repo_name": "wesley-a-leung/Resources",
"stars": "34",
"repo_language": "C++",
"file_name": "SkewHeapIncremental.h",
"mime_type": "text/x-c++"
} |
#pragma once
#include <bits/stdc++.h>
using namespace std;
// Computes the dominator tree rooted at a vertex
// A vertex dominates v another vertex w if all paths from the root to w must
// pass through v
// A vertex v is an immediate dominator of w if v dominates w and no other
// vertex dominates w
// Vertices are 0-indexed
// Constructor Arguments:
// G: a generic directed graph structure
// Required Functions:
// operator [v] const: iterates over the adjacency list of vertex v
// (which is a list of ints)
// size() const: returns the number of vertices in the graph
// Fields:
// idom: a vector of the immediate dominator of each vertex, or -1 if it
// is not connected to the root, or is the root vertex
// In practice, has a small constant
// Time Complexity:
// constructor: O(V + E log V)
// Memory Complexity: O(V + E)
// Tested:
// https://judge.yosupo.jp/problem/dominatortree
struct DominatorTree {
int V, ind; vector<int> idom, sdom, par, ord, U, UF, m;
vector<vector<int>> bucket, H;
int find(int v) {
if (UF[v] == v) return v;
int fv = find(UF[v]); if (sdom[m[v]] > sdom[m[UF[v]]]) m[v] = m[UF[v]];
return UF[v] = fv;
}
int eval(int v) { find(v); return m[v]; }
template <class Digraph> void dfs(const Digraph &G, int v) {
ord[sdom[v] = ind++] = v;
for (int w : G[v]) if (sdom[w] == -1) { par[w] = v; dfs(G, w); }
}
template <class Digraph> DominatorTree(const Digraph &G, int root)
: V(G.size()), ind(0), idom(V, -1), sdom(V, -1), par(V, -1), ord(V, -1),
U(V, -1), UF(V), m(V), bucket(V), H(V) {
for (int v = 0; v < V; v++) {
UF[v] = m[v] = v; for (int w : G[v]) H[w].push_back(v);
}
dfs(G, root); for (int i = ind - 1; i > 0; i--) {
int w = ord[i]; for (int v : H[w]) if (sdom[v] >= 0)
sdom[w] = min(sdom[w], sdom[eval(v)]);
bucket[ord[sdom[w]]].push_back(w);
for (int v : bucket[par[w]]) U[v] = eval(v);
bucket[UF[w] = par[w]].clear();
}
for (int i = 1; i < ind; i++) {
int w = ord[i], u = U[w];
idom[w] = sdom[w] == sdom[u] ? sdom[w] : idom[u];
}
for (int i = 1; i < ind; i++) { int w = ord[i]; idom[w] = ord[idom[w]]; }
}
};
| {
"repo_name": "wesley-a-leung/Resources",
"stars": "34",
"repo_language": "C++",
"file_name": "SkewHeapIncremental.h",
"mime_type": "text/x-c++"
} |
#pragma once
#include <bits/stdc++.h>
using namespace std;
// Recursive helper function
template <const int MAXV, class F>
void maximalCliques(const vector<bitset<MAXV>> &matrix, F &f,
bitset<MAXV> P, bitset<MAXV> X, bitset<MAXV> R) {
if (!P.any()) {
if (!X.any()) f(R);
return;
}
auto q = (P | X)._Find_first(); auto cands = P & ~matrix[q];
for (int i = 0; i < int(matrix.size()); i++) if (cands[i]) {
R[i] = 1;
maximalCliques<MAXV, F>(matrix, f, P & matrix[i], X & matrix[i], R);
R[i] = P[i] = 0; X[i] = 1;
}
}
// Runs a callback on the maximal cliques in a graph
// A clique is maximal if any vertex added to it would result in a non-clique
// Vertices are 0-indexed
// Template Arguments:
// MAXV: the maximum number of vertices in the graph
// F: the type of the function f
// Function Arguments:
// matrix: a matrix of bitsets representing the adjacency matrix of the
// graph, must be symmetric
// f(s): the function to run a callback on for each clique, where s is a
// bitset representing which vertices are in the clique
// In practice, has a very small constant
// Time Complexity: O(3^(V / 3)), much faster in practice
// Memory Complexity: O(V^2 / 64)
// Tested:
// https://open.kattis.com/problems/friends
template <const int MAXV, class F>
void maximalCliques(const vector<bitset<MAXV>> &matrix, F f) {
maximalCliques<MAXV, F>(matrix, f, ~bitset<MAXV>(), bitset<MAXV>(),
bitset<MAXV>());
}
| {
"repo_name": "wesley-a-leung/Resources",
"stars": "34",
"repo_language": "C++",
"file_name": "SkewHeapIncremental.h",
"mime_type": "text/x-c++"
} |
#pragma once
#include <bits/stdc++.h>
#include "../components/StronglyConnectedComponents.h"
using namespace std;
// Solves the two satisfiability problem and provides all possible values
// for each variable
// Given an implication graph, determine whether a consistent assignment exists
// Functions for created an implication graph can be seen in ImplicationGraph.h
// Variables are 0-indexed
// Template Arguments:
// MAXN: the maximum number of variables
// Constructor Arguments:
// G: the implication graph with N * 2 vertices for N variables with vertex
// a * 2 + 1 representing the affirmative for variable a, and a * 2
// representing the negative for variable a
// Required Functions:
// operator [v] const: iterates over the adjacency list of vertex v
// which is a list of ints)
// size() const: returns the number of vertices in the graph
// Fields:
// scc: the SCC of the implication graph
// possibilities: a vector of integers representing the possible values for
// that variable; 0 if guaranteed to be false, 1 if guaranteed to be true,
// 2 otherwise
// In practice, has a very small constant
// Time Complexity:
// constructor: O(N + M + MAXN M / 32) for M equations
// Memory Complexity: O(N + M + MAXN M / 32)
// Tested:
// https://dmoj.ca/problem/wac1p5
template <const int MAXN> struct TwoSatExtended {
vector<pair<int, int>> DAG; SCC scc; vector<bitset<MAXN * 2>> dp;
vector<int> possibilities;
template <class ImplicationGraph>
TwoSatExtended(const ImplicationGraph &G) : DAG(), scc(G, DAG) {
int N = G.size() / 2;
for (int i = 0; i < N; i++) if (scc.id[i * 2] == scc.id[i * 2 + 1]) return;
dp.resize(scc.components.size());
for (int i = 0; i < int(dp.size()); i++) dp[i][i] = 1;
for (auto &&e : DAG) dp[e.first] |= dp[e.second];
possibilities.assign(N, 2); for (int i = 0; i < N; i++) {
if (dp[scc.id[i * 2]][scc.id[i * 2 + 1]]) possibilities[i] = 1;
if (dp[scc.id[i * 2 + 1]][scc.id[i * 2]]) possibilities[i] = 0;
}
}
};
| {
"repo_name": "wesley-a-leung/Resources",
"stars": "34",
"repo_language": "C++",
"file_name": "SkewHeapIncremental.h",
"mime_type": "text/x-c++"
} |
#pragma once
#include <bits/stdc++.h>
using namespace std;
// Finds small minimum vertex covers
// Vertices are 0-indexed
// Constructor Arguments:
// V: the number of vertices in the graph
// e: a vector of pairs in the form (v, w) representing
// an undirected edge in the simple graph (no self loops or parallel edges)
// between vertices v and w
// K: the maximum size of the minimum vertex cover to find
// Fields:
// V: the number of vertices in the graph
// E: the number of vertices in the graph
// minCover: the size of a minimum vertex cover with at most K vertices, or
// V if no such cover exists
// inCover: a vector of booleans representing whether each vertex is in the
// minimum vertex cover with at most K vertices or not
// In practice, has a very small constant
// Time Complexity:
// constructor: O(E log E + 2^K KV)
// Memory Complexity: O(V + E)
// Tested:
// https://dmoj.ca/problem/occ19g5
struct SmallMinVertexCover {
vector<pair<int, int>> edges; int V, E, curE, minCover; vector<int> nxt;
vector<bool> cur, inCover, temp; vector<pair<int, int>> history;
vector<pair<int, int>> init(vector<pair<int, int>> e) {
for (auto &&ei : e) if (ei.first > ei.second) swap(ei.first, ei.second);
e.reserve(e.size() + 1); e.emplace_back(-1, -1); sort(e.begin(), e.end());
e.erase(unique(e.begin(), e.end()), e.end()); return e;
}
void cover(int v) {
inCover[v] = 1; for (int e = nxt[0], last = 0; e <= E; e = nxt[e])
if (!cur[e] && (edges[e].first == v || edges[e].second == v)) {
cur[e] = 1; curE--; nxt[last] = nxt[e]; history.emplace_back(e, last);
} else last = e;
}
void uncover(int v, int revertSize) {
inCover[v] = 0;
for (; int(history.size()) > revertSize; history.pop_back()) {
cur[nxt[history.back().second] = history.back().first] = 0; curE++;
}
}
void solve(int K, int k) {
if (k >= minCover) return;
if (curE == 0) { minCover = k; temp = inCover; return; }
if (curE > (K - k) * V) return;
int v = -1, w = -1, curSize = history.size();
for (int e = nxt[0]; e <= E; e = nxt[e])
if (!cur[e]) { tie(v, w) = edges[e]; break; }
cover(v); solve(K, k + 1); uncover(v, curSize);
cover(w); solve(K, k + 1); uncover(w, curSize);
}
SmallMinVertexCover(int V, const vector<pair<int, int>> &e, int K)
: edges(init(e)), V(V), E(edges.size() - 1), curE(E), minCover(V),
nxt(E + 1), cur(E + 1, 0), inCover(V, 0), temp(V, 1) {
iota(nxt.begin(), nxt.end(), 1); solve(K, 0); temp = inCover;
}
};
| {
"repo_name": "wesley-a-leung/Resources",
"stars": "34",
"repo_language": "C++",
"file_name": "SkewHeapIncremental.h",
"mime_type": "text/x-c++"
} |
#pragma once
#include <bits/stdc++.h>
using namespace std;
// Functions for generating an implication graph
// Each function runs a callback on the created edges
// N variables will require a graph with N * 2 vertices, with vertex
// a * 2 + 1 representing the affirmative for variable a, and a * 2
// representing the negative for variable a
// Adds an implication (a -> b) with affA indicating if a is the affirmative
// (if true) or the negative (if false), and similarly with b
// Template Arguments:
// F: the type of f
// Functions Arguments:
// affA: whether a is affirmative
// a: the first variable
// affB: whether b is affirmative
// b: the second variable
// f(i, j): the function to run a callback on for the corresponding edge
// created by the implication
template <class F> void addImpl(bool affA, int a, bool affB, int b, F f) {
f(a * 2 + affA, b * 2 + affB);
}
// Adds a disjunction (a | b) with affA indicating if a is the affirmative
// (if true) or the negative (if false), and similarly with b
// Template Arguments:
// F: the type of f
// Functions Arguments:
// affA: whether a is affirmative
// a: the first variable
// affB: whether b is affirmative
// b: the second variable
// f(i, j): the function to run a callback on for the corresponding edge
// created by the disjunction
template <class F> void addOr(bool affA, int a, bool affB, int b, F f) {
addImpl(!affA, a, affB, b, f); addImpl(!affB, b, affA, a, f);
}
// Sets the variable a to true
// Template Arguments:
// F: the type of f
// Functions Arguments:
// a: the variable
// f(i, j): the function to run a callback on for the corresponding edge
// created by setting this variable to true
template <class F> void setTrue(int a, F f) {
addImpl(false, a, true, a, f);
}
| {
"repo_name": "wesley-a-leung/Resources",
"stars": "34",
"repo_language": "C++",
"file_name": "SkewHeapIncremental.h",
"mime_type": "text/x-c++"
} |
#pragma once
#include <bits/stdc++.h>
#include "../components/StronglyConnectedComponents.h"
using namespace std;
// Solves the two satisfiability problem
// Given an implication graph, determine whether a consistent assignment exists
// Functions for created an implication graph can be seen in ImplicationGraph.h
// Variables are 0-indexed
// Constructor Arguments:
// G: the implication graph with N * 2 vertices for N variables with vertex
// a * 2 + 1 representing the affirmative for variable a, and a * 2
// representing the negative for variable a
// Required Functions:
// operator [v] const: iterates over the adjacency list of vertex v
// which is a list of ints)
// size() const: returns the number of vertices in the graph
// Fields:
// scc: the SCC of the implication graph
// x: a vector of booleans for one possible valid assignment, or empty if
// no valid assignment exists
// In practice, has a moderate constant
// Time Complexity:
// constructor: O(N + M) for M equations
// Memory Complexity: O(N + M)
// Tested:
// https://judge.yosupo.jp/problem/two_sat
// https://codeforces.com/contest/1215/problem/F
// https://codeforces.com/contest/780/problem/D
struct TwoSat {
SCC scc; vector<bool> x;
template <class ImplicationGraph>
TwoSat(const ImplicationGraph &G) : scc(G) {
assert(G.size() % 2 == 0); int N = G.size() / 2;
for (int i = 0; i < N; i++) if (scc.id[i * 2] == scc.id[i * 2 + 1]) return;
x.assign(N, false);
for (int i = 0; i < N; i++) x[i] = scc.id[i * 2] > scc.id[i * 2 + 1];
}
};
| {
"repo_name": "wesley-a-leung/Resources",
"stars": "34",
"repo_language": "C++",
"file_name": "SkewHeapIncremental.h",
"mime_type": "text/x-c++"
} |
#pragma once
#include <bits/stdc++.h>
using namespace std;
// Modified from https://github.com/kth-competitive-programming/kactl/blob/master/content/graph/MaximumClique.h,
// which itself is based on
// https://gitlab.com/janezkonc/mcqd/blob/master/mcqd.h,
// which has a GPL3 license
// Computes the clique with the maximum number of vertices in a graph
// Can be used to compute the maximum independent set by finding the maximum
// clique on the complement graph, with the minimum vertex cover being any
// vertex not in the maximum independent set
// Vertices are 0-indexed
// Constructor Arguments:
// matrix: a vector of vectors of booleans representing the adjacency matrix
// of the graph, must be symmetric
// Fields:
// maximumClique: a vertex of vertices in the maximum clique
// In practice, has a very small constant
// Time Complexity: exponential, much faster in practice
// Memory Complexity: O(V^2)
// Tested:
// https://dmoj.ca/problem/clique
// https://judge.yosupo.jp/problem/maximum_independent_set
struct MaximumClique {
static constexpr const double limit = 0.025;
struct Vertex { int i, d; Vertex(int i) : i(i), d(0) {} };
vector<vector<bool>> matrix; vector<vector<int>> C;
vector<int> maximumClique, q, S, old; double pk;
void init(vector<Vertex> &r) {
for (auto &&v: r) v.d = 0;
for (auto &&v : r) for (auto &&w : r) v.d += matrix[v.i][w.i];
sort(r.begin(), r.end(), [&] (const Vertex &v, const Vertex &w) {
return v.d > w.d;
});
int mxD = r[0].d;
for (int i = 0; i < int(r.size()); i++) r[i].d = min(i, mxD) + 1;
}
void expand(vector<Vertex> &R, int lvl = 1) {
S[lvl] += S[lvl - 1] - old[lvl]; old[lvl] = S[lvl - 1];
while (!R.empty()) {
if (int(q.size()) + R.back().d <= int(maximumClique.size())) return;
q.push_back(R.back().i); vector<Vertex> T;
for (auto &&v : R) if (matrix[R.back().i][v.i]) T.emplace_back(v.i);
if (!T.empty()) {
if (S[lvl]++ / ++pk < limit) init(T);
int mnk = max(int(maximumClique.size()) - int(q.size()) + 1, 1);
int j = 0, mxk = 1; C[1].clear(); C[2].clear(); for (auto &&v : T) {
int k = 1;
auto f = [&] (int i) { return matrix[v.i][i]; };
while (any_of(C[k].begin(), C[k].end(), f)) k++;
if (k > mxk) C[(mxk = k) + 1].clear();
if (k < mnk) T[j++].i = v.i;
C[k].push_back(v.i);
}
if (j > 0) T[j - 1].d = 0;
for (int k = mnk; k <= mxk; k++)
for (int i : C[k]) { T[j].i = i; T[j++].d = k; }
expand(T, lvl + 1);
} else if (q.size() > maximumClique.size()) maximumClique = q;
q.pop_back(); R.pop_back();
}
}
MaximumClique(const vector<vector<bool>> &matrix)
: matrix(matrix), C(matrix.size() + 1), S(C.size()), old(S), pk(0) {
vector<Vertex> V; V.reserve(matrix.size());
for (int i = 0; i < int(matrix.size()); i++) V.emplace_back(i);
init(V); expand(V);
}
};
| {
"repo_name": "wesley-a-leung/Resources",
"stars": "34",
"repo_language": "C++",
"file_name": "SkewHeapIncremental.h",
"mime_type": "text/x-c++"
} |
#pragma once
#include <bits/stdc++.h>
using namespace std;
// Computes the maximum weighted matching in a general undirected weighted
// graph, such that each vertex is incident with at most one edge in the
// matching, and the selected edges have the maximum sum of weights
// Vertices are 0-indexed
// Template Arguments:
// T: the type of each edge weight
// Constructor Arguments:
// matrix: a V x V matrix containing the weight of the edge between two
// vertices, 0 if an edge doesn't exist, must be symmetric and
// non negative
// INF: a value for infinity
// Fields:
// V: the number of vertices in the graph
// cardinality: the cardinality of the maximum matching
// INF: a value for infinity
// cost: the maximum cost of the weighted matching
// mate: the other vertex in the matching, or -1 if unmatched
// In practice, has a moderate constant
// Time Complexity:
// constructor: O(V^3)
// Memory Complexity: O(V^2)
// Tested:
// https://uoj.ac/problem/81
// https://judge.yosupo.jp/problem/general_weighted_matching
template <class T> struct GeneralWeightedMaxMatch {
struct Edge {
int v, w; T weight; Edge() : v(0), w(0), weight(T()) {}
};
int V, VX, cardinality, curStamp; T INF, cost;
vector<int> mate, match, slack, st, par, S, stamp; vector<T> lab;
vector<vector<int>> flo, floFrom; vector<vector<Edge>> G; queue<int> q;
T eDelta(const Edge &e) {
return lab[e.v] + lab[e.w] - G[e.v][e.w].weight * T(2);
}
void updateSlack(int v, int x) {
if (!slack[x] || eDelta(G[v][x]) < eDelta(G[slack[x]][x])) slack[x] = v;
}
void setSlack(int x) {
slack[x] = 0; for (int v = 1; v <= V; v++)
if (G[v][x].weight > T() && st[v] != x && S[st[v]] == 0)
updateSlack(v, x);
}
void qPush(int x) {
if (x <= V) q.push(x);
else for (int t : flo[x]) qPush(t);
}
void setSt(int x, int b) {
st[x] = b; if (x > V) for (int t : flo[x]) setSt(t, b);
}
int getPr(int b, int xr) {
int pr = find(flo[b].begin(), flo[b].end(), xr) - flo[b].begin();
if (pr % 2) {
reverse(flo[b].begin() + 1, flo[b].end());
return int(flo[b].size()) - pr;
}
return pr;
}
void setMatch(int v, int w) {
Edge &e = G[v][w]; match[v] = e.w; if (v <= V) return;
int xr = floFrom[v][e.v], pr = getPr(v, xr);
for (int i = 0; i < pr; i++) setMatch(flo[v][i], flo[v][i ^ 1]);
setMatch(xr, w); rotate(flo[v].begin(), flo[v].begin() + pr, flo[v].end());
}
void augment(int v, int w) {
while (true) {
int xnw = st[match[v]]; setMatch(v, w); if (!xnw) return;
setMatch(xnw, v = st[par[w = xnw]]);
}
}
int lca(int v, int w) {
for (curStamp++; v || w; swap(v, w)) {
if (!v) continue;
if (stamp[v] == curStamp) return v;
stamp[v] = curStamp; v = st[match[v]]; if (v) v = st[par[v]];
}
return 0;
}
void addBlossom(int v, int anc, int w) {
int b = V + 1; while (b <= VX && st[b]) b++;
if (b > VX) VX++;
lab[b] = T(); S[b] = 0; match[b] = match[anc]; flo[b] = vector<int>{anc};
auto blossom = [&] (int x) {
for (int y; x != anc; x = st[par[y]]) {
flo[b].push_back(x); flo[b].push_back(y = st[match[x]]); qPush(y);
}
};
blossom(v); reverse(flo[b].begin() + 1, flo[b].end()); blossom(w);
setSt(b, b);
for (int x = 1; x <= VX; x++) G[b][x].weight = G[x][b].weight = T();
for (int x = 1; x <= V; x++) floFrom[b][x] = 0;
for (int xs : flo[b]) {
for (int x = 1; x <= VX; x++)
if (G[b][x].weight == T(0) || eDelta(G[xs][x]) < eDelta(G[b][x])) {
G[b][x] = G[xs][x]; G[x][b] = G[x][xs];
}
for (int x = 1; x <= V; x++) if (floFrom[xs][x]) floFrom[b][x] = xs;
}
setSlack(b);
}
void expandBlossom(int b) {
for (int t : flo[b]) setSt(t, t);
int xr = floFrom[b][G[b][par[b]].v], pr = getPr(b, xr);
for (int i = 0; i < pr; i += 2) {
int xs = flo[b][i], xns = flo[b][i + 1]; par[xs] = G[xns][xs].v;
S[xs] = 1; S[xns] = slack[xs] = slack[xns] = 0; qPush(xns);
}
S[xr] = 1; par[xr] = par[b];
for (int i = pr + 1; i < int(flo[b].size()); i++) {
int xs = flo[b][i]; S[xs] = -1; setSlack(xs);
}
st[b] = 0;
}
bool onFoundEdge(const Edge &e) {
int v = st[e.v], w = st[e.w]; if (S[w] == -1) {
par[w] = e.v; S[w] = 1; slack[w] = 0;
int nv = st[match[w]]; S[nv] = slack[nv] = 0; qPush(nv);
} else if (S[w] == 0) {
int anc = lca(v, w);
if (!anc) { augment(v, w); augment(w, v); return true; }
addBlossom(v, anc, w);
}
return false;
}
bool matching() {
q = queue<int>(); for (int x = 1; x <= VX; x++) {
S[x] = -1; slack[x] = 0;
if (st[x] == x && !match[x]) { par[x] = S[x] = 0; qPush(x); }
}
if (q.empty()) return false;
while (true) {
while (!q.empty()) {
int v = q.front(); q.pop(); if (S[st[v]] == 1) continue;
for (int w = 1; w <= V; w++)
if (G[v][w].weight > T() && st[v] != st[w]) {
if (eDelta(G[v][w]) == T()) {
if (onFoundEdge(G[v][w])) return true;
} else updateSlack(v, st[w]);
}
}
T d = INF; for (int b = V + 1; b <= VX; b++)
if (st[b] == b && S[b] == 1) d = min(d, lab[b] / T(2));
for (int x = 1; x <= VX; x++) if (st[x] == x && slack[x]) {
if (S[x] == -1) d = min(d, eDelta(G[slack[x]][x]));
else if (S[x] == 0) d = min(d, eDelta(G[slack[x]][x]) / T(2));
}
for (int v = 1; v <= V; v++) {
if (S[st[v]] == 0) {
if (lab[v] <= d) return false;
lab[v] -= d;
} else if (S[st[v]] == 1) lab[v] += d;
}
for (int b = V + 1; b <= VX; b++) if (st[b] == b && S[b] != -1) {
if (S[b] == 0) lab[b] += d * T(2);
else lab[b] -= d * T(2);
}
q = queue<int>(); for (int x = 1; x <= VX; x++)
if (st[x] == x && slack[x] && st[slack[x]] != x
&& eDelta(G[slack[x]][x]) == T() && onFoundEdge(G[slack[x]][x]))
return true;
for (int b = V + 1; b <= VX; b++)
if (st[b] == b && S[b] == 1 && lab[b] == T()) expandBlossom(b);
}
return false;
}
GeneralWeightedMaxMatch(const vector<vector<T>> &matrix,
T INF = numeric_limits<T>::max())
: V(matrix.size()), VX(V), cardinality(0), curStamp(0), INF(INF),
cost(T()), mate(V, -1), match(V * 2 + 1, 0), slack(V * 2 + 1, 0),
st(V * 2 + 1, 0), par(V * 2 + 1, 0), S(V * 2 + 1, 0),
stamp(V * 2 + 1, 0), lab(V * 2 + 1, T()), flo(V * 2 + 1),
floFrom(V * 2 + 1, vector<int>(V + 1, 0)),
G(V * 2 + 1, vector<Edge>(V * 2 + 1)) {
iota(st.begin(), st.begin() + V + 1, 0); T mx = T();
for (int v = 1; v <= V; v++) for (int w = 1; w <= V; w++) {
G[v][w].v = v; G[v][w].w = w; floFrom[v][w] = (v == w ? v : 0);
assert(G[v][w].weight >= T());
mx = max(mx, G[v][w].weight = matrix[v - 1][w - 1]);
}
fill(lab.begin() + 1, lab.begin() + V + 1, mx);
while (matching()) cardinality++;
for (int v = 1; v <= V; v++) if (match[v] && match[v] < v) {
mate[mate[v - 1] = G[v][match[v]].w - 1] = v - 1;
cost += G[v][match[v]].weight;
}
}
};
| {
"repo_name": "wesley-a-leung/Resources",
"stars": "34",
"repo_language": "C++",
"file_name": "SkewHeapIncremental.h",
"mime_type": "text/x-c++"
} |
#pragma once
#include <bits/stdc++.h>
using namespace std;
// Given N people of type A and M people of type B, and a list of their ranked
// preferences for partners, the goal is to arrange min(N, M) pairs such that
// if a person x of type A prefers a person y of type B more than their
// current partner, then person y prefers their current partner more than x
// It is guaranteed that a solution always exists
// Constructor Arguments:
// aPrefs: a matrix of size N x M with aPrefs[i][j] representing the jth
// preferred choice for the partner of type B for person i of type A;
// aPrefs[i] must be a permutation from 0 to M - 1
// bPrefs: a matrix of size M x N with bPrefs[j][i] representing the ith
// preferred choice for the partner of type A for person j of type B;
// bPrefs[j] must be a permutation from 0 to N - 1
// Fields:
// N: the number of people of type A
// M: the number of people of type B
// bForA: a vector representing the partner of type B for each person of
// type A, or -1 if that person of type A is unmatched
// aForB: a vector representing the partner of type A for each person of
// type B, or -1 if that person of type B is unmatched
// In practice, has a moderate constant
// Time Complexity:
// constructor O(NM)
// Memory Complexity: O(NM)
// Tested:
// https://www.spoj.com/problems/STABLEMP/
// https://open.kattis.com/problems/jealousyoungsters
struct StableMarriage {
int N, M; vector<int> bForA, aForB;
StableMarriage(const vector<vector<int>> &aPrefs,
const vector<vector<int>> &bPrefs)
: N(aPrefs.size()), M(bPrefs.size()), bForA(N, -1), aForB(M, -1) {
bool rev = N > M; if (rev) { swap(N, M); bForA.swap(aForB); }
auto &A = rev ? bPrefs : aPrefs, &B = rev ? aPrefs : bPrefs;
vector<vector<int>> bRankOfA(M, vector<int>(N));
for (int b = 0; b < M; b++) for (int a = 0; a < N; a++)
bRankOfA[b][B[b][a]] = a;
queue<int> q; for (int a = 0; a < N; a++) q.push(a);
vector<int> cur(N, 0); while (!q.empty()) {
int a = q.front(); q.pop(); while (true) {
int b = A[a][cur[a]++]; if (aForB[b] == -1) { aForB[b] = a; break; }
else if (bRankOfA[b][a] < bRankOfA[b][aForB[b]]) {
q.push(aForB[b]); aForB[b] = a; break;
}
}
}
for (int b = 0; b < M; b++) if (aForB[b] != -1) bForA[aForB[b]] = b;
if (rev) { swap(N, M); bForA.swap(aForB); }
}
};
| {
"repo_name": "wesley-a-leung/Resources",
"stars": "34",
"repo_language": "C++",
"file_name": "SkewHeapIncremental.h",
"mime_type": "text/x-c++"
} |
#pragma once
#include <bits/stdc++.h>
using namespace std;
// Solves the assignment problem of matching N workers to M jobs with the
// minimum cost where each worker can only be assigned to at most 1 job and
// each job can only be assigned to at most 1 worker and there are exactly
// min(N, M) assignments
// Maximum cost assignment can be found by negating all non infinity values in
// the matrix and taking the negative of the minimum cost
// Maximum cost matching can be found by negating all non infinity values in
// the matrix, setting all infinity values to 0 and taking the negative of
// the minimum cost
// Template Arguments:
// T: the type of the cost
// Constructor Arguments:
// A: a matrix of size N x M with A[i][j] representing the cost of assigning
// worker i to job j
// INF: a value for infinity
// Fields:
// N: the number of workers
// M: the number of jobs
// cost: the minimum cost for a valid assignment
// jobForWorker: a vector representing the job assigned to each worker, or -1
// if that worker is not assigned any job
// workerForJob: a vector representing the worker assigned to each job, or -1
// if that job is not assigned any worker
// In practice, has a very small constant
// Time Complexity:
// constructor: O(N^2 M)
// Memory Complexity: O(NM)
// Tested:
// https://judge.yosupo.jp/problem/assignment
// https://open.kattis.com/problems/cordonbleu
// https://www.spoj.com/problems/BABY/
// https://dmoj.ca/problem/tle17c7p5
// https://open.kattis.com/problems/workers
template <class T> struct HungarianAlgorithm {
int N, M; T cost; vector<int> jobForWorker, workerForJob;
HungarianAlgorithm(vector<vector<T>> A, T INF = numeric_limits<T>::max())
: N(A.size()), M(A.empty() ? 0 : A[0].size()), cost(T()) {
bool rev = N > M; if (rev) {
swap(N, M); vector<vector<T>> B(N, vector<T>(M));
for (int i = 0; i < N; i++) for (int j = 0; j < M; j++)
B[i][j] = A[j][i];
A.swap(B);
}
jobForWorker.assign(N, -1); workerForJob.assign(M + 1, N);
auto add = [&] (pair<int, T> &a, const pair<int, T> &b) {
a.first += b.first; a.second += b.second;
};
auto sub = [&] (pair<int, T> &a, const pair<int, T> &b) {
a.first -= b.first; a.second -= b.second;
};
vector<pair<int, T>> d1(N + 1, make_pair(0, T()));
vector<pair<int, T>> d2(M + 1, make_pair(0, T()));
for (int i = 0; i < N; i++) {
int j0 = M; workerForJob[j0] = i;
vector<pair<int, T>> dist(M + 1, make_pair(1, T()));
vector<int> par(M + 1, -1); vector<bool> done(M + 1, false); do {
done[j0] = true; int i0 = workerForJob[j0], j1 = M;
pair<int, T> delta = make_pair(1, T());
for (int j = 0; j < M; j++) if (!done[j]) {
pair<int, T> d = A[i0][j] >= INF ? make_pair(1, T())
: make_pair(0, A[i0][j]);
sub(d, d1[i0]); sub(d, d2[j]);
if (dist[j].first > 0 || d < dist[j]) { dist[j] = d; par[j] = j0; }
if (dist[j] < delta) delta = dist[j1 = j];
}
j0 = j1; for (int j = 0; j <= M; j++) {
if (done[j]) { add(d1[workerForJob[j]], delta); sub(d2[j], delta); }
else sub(dist[j], delta);
}
} while (workerForJob[j0] != N);
for (; j0 != M; j0 = par[j0]) workerForJob[j0] = workerForJob[par[j0]];
}
workerForJob.pop_back(); for (int j = 0; j < M; j++) {
if (workerForJob[j] == N) workerForJob[j] = -1;
else jobForWorker[workerForJob[j]] = j;
}
cost = d2[M].first < 0 ? INF : -d2[M].second;
if (rev) { swap(N, M); jobForWorker.swap(workerForJob); }
}
};
| {
"repo_name": "wesley-a-leung/Resources",
"stars": "34",
"repo_language": "C++",
"file_name": "SkewHeapIncremental.h",
"mime_type": "text/x-c++"
} |
#pragma once
#include <bits/stdc++.h>
using namespace std;
// Computes the maximum matching in a general undirected graph, such that each
// vertex is incident with at most one edge in the matching
// Vertices are 0-indexed
// Constructor Arguments:
// G: a generic undirected graph structure
// Required Functions:
// operator [v] const: iterates over the adjacency list of vertex v
// (which is a list of ints)
// size() const: returns the number of vertices in the graph
// Fields:
// V: the number of vertices in the graph
// cardinality: the cardinality of the maximum matching
// mate: the other vertex in the matching, or -1 if unmatched
// In practice, has a very small constant
// Time Complexity:
// constructor: O(VE log V)
// Memory Complexity: O(V)
// Tested:
// https://judge.yosupo.jp/problem/general_matching
// https://codeforces.com/contest/1089/problem/B
// https://uoj.ac/problem/79
struct GeneralUnweightedMaxMatching {
int V, cardinality, front, back; vector<int> mate, first, q;
vector<pair<int, int>> label;
void rematch(int v, int w) {
int t = mate[v]; mate[v] = w; if (mate[t] != v) return;
if (label[v].second == -1) rematch(mate[t] = label[v].first, t);
else { int x, y; tie(x, y) = label[v]; rematch(x, y); rematch(y, x); }
}
int find(int v) {
return label[first[v]].first < 0 ? first[v] : first[v] = find(first[v]);
}
void relabel(int x, int y) {
int r = find(x), s = find(y), join = 0; if (r == s) return;
pair<int, int> h = label[r] = label[s] = make_pair(~x, y); while (true) {
if (s != V) swap(r, s);
r = find(label[mate[r]].first); if (label[r] == h) { join = r; break; }
else label[r] = h;
}
for (int v : {first[x], first[y]})
for (; v != join; v = first[label[mate[v]].first]) {
label[v] = make_pair(x, y); first[q[back++] = v] = join;
}
}
template <class Graph> bool augment(const Graph &G, int s) {
front = back = 0; label[q[back++] = s] = make_pair(first[s] = V, -1);
while (front < back) {
int v = q[front++]; for (int w : G[v]) {
if (mate[w] == V && w != s) { rematch(mate[w] = v, w); return true; }
else if (label[w].first >= 0) relabel(v, w);
else if (label[mate[w]].first == -1)
label[mate[first[q[back++] = mate[w]] = w]].first = v;
}
}
return false;
}
template <class Graph> GeneralUnweightedMaxMatching(const Graph &G)
: V(G.size()), cardinality(0), front(0), back(0), mate(V + 1, V),
first(V + 1, V), q(V), label(V + 1, make_pair(-1, -1)) {
for (int v = 0; v < V; v++) if (mate[v] == V && augment(G, v)) {
cardinality++; for (int i = 0; i < back; i++)
label[q[i]] = label[mate[q[i]]] = make_pair(-1, -1);
label[V] = make_pair(-1, -1);
}
mate.pop_back(); for (auto &&m : mate) if (m == V) m = -1;
}
};
| {
"repo_name": "wesley-a-leung/Resources",
"stars": "34",
"repo_language": "C++",
"file_name": "SkewHeapIncremental.h",
"mime_type": "text/x-c++"
} |
#pragma once
#include <bits/stdc++.h>
using namespace std;
// Given N people, and a list of their ranked preferences for roommates,
// the goal is to arrange N / 2 pairs such that if a person x prefers
// a person y more than their current roommate, then person y prefers
// their current roommate more than x
// Constructor Arguments:
// prefs: a matrix of size N x (N - 1) with prefs[i][j] representing the jth
// preferred choice for the person i
// prefs[i] must be a permutation of 0 to N - 1, excluding i
// Fields:
// N: the number of people
// mate: a vector representing the roommate of each person; all -1 if there
// is no stable matching
// In practice, has a moderate constant
// Time Complexity:
// constructor O(N^2)
// Memory Complexity: O(N^2)
// Tested:
// https://codeforces.com/contest/1423/problem/A
struct StableRoommates {
struct NoMatch {}; int N; vector<int> mate;
StableRoommates(vector<vector<int>> prefs) : N(prefs.size()), mate(N, -1) {
if (N % 2 == 1 || N <= 0) return;
vector<vector<int>> rnk(N, vector<int>(N, 0));
vector<int> fr(N, 0), bk(N, N - 1), proposed(N, -1);
vector<vector<bool>> active(N, vector<bool>(N, true)); queue<int> q;
auto rem = [&] (int i, int j) { active[i][j] = active[j][i] = false; };
auto clip = [&] (int i) {
while (fr[i] < bk[i] && !active[i][prefs[i][fr[i]]]) fr[i]++;
while (fr[i] < bk[i] && !active[i][prefs[i][bk[i] - 1]]) bk[i]--;
if (fr[i] >= bk[i]) throw NoMatch();
};
auto add = [&] (int i, int j) {
proposed[mate[i] = j] = i; while (true) {
clip(j); if (prefs[j][bk[j] - 1] != i) rem(j, prefs[j][bk[j] - 1]);
else break;
}
};
auto nxt = [&] (int i) {
clip(i); int j = prefs[i][fr[i]++];
clip(i); prefs[i][--fr[i]] = j; return proposed[prefs[i][fr[i] + 1]];
};
for (int i = 0; i < N; i++) {
q.push(i); for (int j = 0; j < N - 1; j++) rnk[i][prefs[i][j]] = j;
}
try {
while (!q.empty()) {
int i = q.front(); q.pop(); while (true) {
clip(i); int j = prefs[i][fr[i]], i2 = proposed[j];
if (i2 != -1 && rnk[j][i2] < rnk[j][i]) { rem(i, j); continue; }
if (i2 != -1) { mate[i2] = proposed[j] = -1; q.push(i2); }
add(i, j); break;
}
}
int cur = 0; while (true) {
for (; cur < N; cur++) {
clip(cur); if (bk[cur] - fr[cur] > 1) break;
}
if (cur == N) break;
vector<int> cyc1, cyc2; int i = cur, j = i;
do { i = nxt(i); j = nxt(nxt(j)); } while (i != j);
do { cyc1.push_back(j); j = nxt(j); } while (i != j);
for (int k : cyc1) {
j = mate[k]; cyc2.push_back(j); mate[k] = proposed[j] = -1;
rem(k, j);
}
for (int k = 0; k < int(cyc1.size()); k++)
add(cyc1[k], cyc2[(k + 1) % cyc2.size()]);
}
} catch (NoMatch &) { fill(mate.begin(), mate.end(), -1); }
}
};
| {
"repo_name": "wesley-a-leung/Resources",
"stars": "34",
"repo_language": "C++",
"file_name": "SkewHeapIncremental.h",
"mime_type": "text/x-c++"
} |
#pragma once
#include <bits/stdc++.h>
using namespace std;
// Computes the maximum matching (and minimum vertex cover) on
// an unweighted bipartite graph
// The maximum independent set is any vertex not in the minimum vertex cover
// Vertices are 0-indexed
// Constructor Arguments:
// G: a generic undirected bipartite graph structure
// Required Functions:
// operator [v] const: iterates over the adjacency list of vertex v
// (which is a list of ints)
// size() const: returns the number of vertices in the graph
// color: a vector of booleans of size V indicating the color of each vertex
// Fields:
// V: the number of vertices in the graph
// cardinality: the cardinality of the maximum matching
// mate: the other vertex in the matching, or -1 if unmatched
// inCover: a vector of booleans indicating whether a vertex is in the
// minimum vertex cover or not
// In practice, has a very small constant
// Time Complexity:
// constructor: O((V + E) sqrt V)
// Memory Complexity: O(V)
// Tested:
// https://www.spoj.com/problems/MATCHING/
// https://judge.yosupo.jp/problem/bipartitematching
// https://dmoj.ca/problem/coci19c6p4
struct HopcroftKarpMaxMatch {
int V, cardinality;
vector<int> mate, lvl, q, type0; vector<bool> color, inCover, vis;
template <class BipartiteGraph> bool bfs(const BipartiteGraph &G) {
int front = 0, back = 0; for (int v : type0) {
if (mate[v] == -1) lvl[q[back++] = v] = 0;
else lvl[v] = -1;
}
while (front < back) {
int v = q[front++]; for (int w : G[v]) {
if (mate[w] == -1) return true;
else if (lvl[mate[w]] == -1) lvl[q[back++] = mate[w]] = lvl[v] + 1;
}
}
return false;
}
template <class BipartiteGraph> bool dfs(const BipartiteGraph &G, int v) {
for (int w : G[v])
if (mate[w] == -1 || (lvl[mate[w]] == lvl[v] + 1 && dfs(G, mate[w]))) {
mate[mate[v] = w] = v; return true;
}
lvl[v] = -1; return false;
}
template <class BipartiteGraph>
void dfsVertexCover(const BipartiteGraph &G, int v) {
if (vis[v]) return;
vis[v] = true;
for (int w : G[v]) if ((mate[v] == w) == color[v]) dfsVertexCover(G, w);
}
template <class BipartiteGraph>
HopcroftKarpMaxMatch(const BipartiteGraph &G, const vector<bool> &color)
: V(G.size()), cardinality(0), mate(V, -1), lvl(V), q(V),
color(color), inCover(V, false), vis(V, false) {
for (int v = 0; v < V; v++) if (!color[v]) type0.push_back(v);
while (bfs(G)) for (int v : type0) if (mate[v] == -1 && dfs(G, v))
cardinality++;
for (int v : type0) if (mate[v] == -1) dfsVertexCover(G, v);
for (int v = 0; v < V; v++) inCover[v] = vis[v] == color[v];
}
};
| {
"repo_name": "wesley-a-leung/Resources",
"stars": "34",
"repo_language": "C++",
"file_name": "SkewHeapIncremental.h",
"mime_type": "text/x-c++"
} |
#pragma once
#include <bits/stdc++.h>
using namespace std;
// Floyd Warshall's all pairs shortest path algorithm for weighted graphs
// with negative weight
// Able to detect negative cycles
// Vertices are 0-indexed
// Template Arguments:
// T: the type of the weight of the edges in the graph
// Constructor Arguments:
// matrix: a V x V matrix containing the minimum weight of a directed edge
// between two vertices, INF if an edge doesn't exist
// INF: a value for infinity, must be negatable
// Fields:
// dist: a vector of vectors of the shortest distance between each pair
// of vertices, INF if there is no path, -INF if the shortest path
// has no lower bound
// par: a vector of vectors of the parent vertex for each vertex in the
// shortest path tree for each source vertex (par[v][w] is the parent
// of vertex w in the shortest path tree from vertex v), or -1 if there is
// no parent
// hasNegativeCycle: a boolean that is true if there is a negative cycle
// in the graph and false otherwise
// Functions:
// getPath(v, w): returns the list of directed edges on the path from
// vertex v to vertex w
// In practice, has a very small constant
// Time Complexity:
// constructor: O(V^3)
// getPath: O(V)
// Memory Complexity: O(V^2)
// Tested:
// Fuzz and Stress Tested
// https://open.kattis.com/problems/allpairspath
template <class T> struct FloydWarshallAPSP {
using Edge = tuple<int, int, T>; vector<vector<T>> dist;
vector<vector<int>> par; T INF; bool hasNegativeCycle;
FloydWarshallAPSP(const vector<vector<T>> &matrix,
T INF = numeric_limits<T>::max())
: dist(matrix), par(dist.size(), vector<int>(dist.size(), -1)),
INF(INF), hasNegativeCycle(false) {
int V = dist.size(); for (int v = 0; v < V; v++) {
for (int w = 0; w < V; w++) if (dist[v][w] < INF) par[v][w] = v;
dist[v][v] = min(T(), dist[v][v]); par[v][v] = -1;
}
for (int u = 0; u < V; u++) for (int v = 0; v < V; v++)
if (dist[v][u] < INF) for (int w = 0; w < V; w++)
if (dist[u][w] < INF && dist[v][w] > dist[v][u] + dist[u][w]) {
dist[v][w] = dist[v][u] + dist[u][w]; par[v][w] = par[u][w];
}
for (int u = 0; u < V; u++) for (int v = 0; v < V; v++)
for (int w = 0; w < V; w++)
if (dist[w][w] < T() && dist[u][w] < INF && dist[w][v] < INF) {
dist[u][v] = -INF; hasNegativeCycle = true; break;
}
}
vector<Edge> getPath(int v, int w) {
vector<Edge> path; for (; par[v][w] != -1; w = par[v][w])
path.emplace_back(par[v][w], w, dist[v][w] - dist[v][par[v][w]]);
reverse(path.begin(), path.end()); return path;
}
};
| {
"repo_name": "wesley-a-leung/Resources",
"stars": "34",
"repo_language": "C++",
"file_name": "SkewHeapIncremental.h",
"mime_type": "text/x-c++"
} |
#pragma once
#include <bits/stdc++.h>
using namespace std;
// Bellman Ford's single source shortest path algorithm for weighted graphs
// with negative weights
// Able to detect negative cycles
// Vertices are 0-indexed
// Template Arguments:
// T: the type of the weight of the edges in the graph
// Constructor Arguments:
// V: the number of vertices in the graph
// edges: a vector of tuples in the form (v, w, weight) representing
// a directed edge in the graph from v to w with weight of weight
// s: a single source vertex
// src: a vector of source vertices
// INF: a value for infinity, must be negatable
// Fields:
// dist: vector of shortest distance from the closest source vertex to each
// vertex, or INF if there is no path, -INF if the shortest path
// has no lower bound
// par: the parent vertex for each vertex in the shortest path tree, or
// -1 if there is no parent
// hasNegativeCycle: a boolean that is true if there is a negative cycle
// in the graph and false otherwise
// Functions:
// getPath(v): returns the list of directed edges on the path from the
// closest source vertex to vertex v
// In practice, has a very small constant
// Time Complexity:
// constructor: O(E (V + log E))
// getPath: O(V)
// Memory Complexity: O(V + E)
// Tested:
// Stress Tested
// https://open.kattis.com/problems/shortestpath3
// https://dmoj.ca/problem/sssp
template <class T> struct BellmanFordSSSP {
using Edge = tuple<int, int, T>; vector<T> dist; vector<int> par; T INF;
bool hasNegativeCycle;
BellmanFordSSSP(int V, vector<Edge> edges, const vector<int> &srcs,
T INF = numeric_limits<T>::max())
: dist(V, INF), par(V, -1), INF(INF), hasNegativeCycle(false) {
sort(edges.begin(), edges.end()); for (int s : srcs) dist[s] = T();
for (int i = 0; i < V - 1; i++) for (auto &&e : edges) {
int v, w; T weight; tie(v, w, weight) = e;
if (dist[v] < INF && dist[w] > dist[v] + weight)
dist[w] = dist[par[w] = v] + weight;
}
for (bool inCycle = true; inCycle;) {
inCycle = false; for (auto &&e : edges) {
int v, w; T weight; tie(v, w, weight) = e;
if (dist[v] < INF && dist[w] > -INF
&& (dist[v] <= -INF || dist[w] > dist[v] + weight)) {
dist[w] = -INF; inCycle = hasNegativeCycle = true;
}
}
}
}
BellmanFordSSSP(int V, const vector<Edge> &edges, int s,
T INF = numeric_limits<T>::max())
: BellmanFordSSSP(V, edges, vector<int>{s}, INF) {}
vector<Edge> getPath(int v) {
vector<Edge> path; for (; par[v] != -1; v = par[v])
path.emplace_back(par[v], v, dist[v] - dist[par[v]]);
reverse(path.begin(), path.end()); return path;
}
};
| {
"repo_name": "wesley-a-leung/Resources",
"stars": "34",
"repo_language": "C++",
"file_name": "SkewHeapIncremental.h",
"mime_type": "text/x-c++"
} |
#pragma once
#include <bits/stdc++.h>
#include "../../datastructures/trees/heaps/PersistentRandomizedHeap.h"
using namespace std;
// Computes the K shortest walks from s to t in a directed graph
// Template Arguments:
// T: the type of the weight of the edges
// Function Arguments:
// V: number of vertices in the directed graph
// edges: a vector of tuples in the form (v, w, weight) representing
// a directed edge in the graph from vertex v to w with
// weight of weight
// K: the number of shortest walks to compute
// s: the source vertex
// t: the destination vertex
// INF: a value for infinity
// Return Value: a vector of type T of size K with the K shortest walks
// In practice, has a moderate constant
// Time Complexity: O((V + E + K) log V)
// Memory Complexity: O((V + E) log V + K)
// Tested:
// https://judge.yosupo.jp/problem/k_shortest_walk
template <class T>
vector<T> kShortestWalks(int V, const vector<tuple<int, int, T>> &edges, int K,
int s, int t, T INF = numeric_limits<T>::max()) {
if (K == 0) return vector<T>();
using Heap = PersistentRandomizedHeap<pair<T, int>, greater<pair<T, int>>>;
using ptr = typename Heap::ptr; vector<Heap> cand(V);
struct Node1 {
T d; int v; Node1(T d, int v) : d(d), v(v) {}
bool operator < (const Node1 &o) const { return d > o.d; }
};
struct Node2 {
T d; int v; ptr p; Node2(T d, int v, ptr p) : d(d), v(v), p(p) {}
bool operator < (const Node2 &o) const { return d > o.d; }
};
vector<T> dist(V, INF), ret; ret.reserve(K); vector<int> ord; ord.reserve(V);
vector<pair<int, int>> par(V, make_pair(-1, -1));
vector<vector<tuple<int, T, int>>> G(V), H(V);
std::priority_queue<Node1> PQ;
std::priority_queue<Node2> ans;
for (int i = 0; i < int(edges.size()); i++) {
int v, w; T weight; tie(v, w, weight) = edges[i];
G[v].emplace_back(w, weight, i); H[w].emplace_back(v, weight, i);
}
PQ.emplace(dist[t] = T(), t); while (!PQ.empty()) {
T d = PQ.top().d; int v = PQ.top().v; PQ.pop(); if (d > dist[v]) continue;
ord.push_back(v);
for (auto &&e : H[v]) if (dist[get<0>(e)] > dist[v] + get<1>(e)) {
PQ.emplace(dist[get<0>(e)] = dist[v] + get<1>(e), get<0>(e));
par[get<0>(e)] = make_pair(get<2>(e), v);
}
}
for (int v : ord) {
for (auto &&e : G[v])
if (get<2>(e) != par[v].first && dist[get<0>(e)] < INF) {
T d = dist[get<0>(e)] - dist[v] + get<1>(e);
cand[v].push(make_pair(d, get<0>(e)));
}
if (par[v].first != -1) cand[v].merge(cand[par[v].second]);
if (v == s) {
ret.push_back(dist[v]); if (cand[v].root)
ans.emplace(dist[v] + cand[v].top().first, v, cand[v].root);
}
}
while (int(ret.size()) < K) {
if (ans.empty()) { ret.push_back(INF); continue; }
T d = ans.top().d; int v = ans.top().v; ptr p = ans.top().p; ans.pop();
ret.push_back(d);
if (p->l) ans.emplace(d + p->l->val.first - p->val.first, v, p->l);
if (p->r) ans.emplace(d + p->r->val.first - p->val.first, v, p->r);
v = p->val.second;
if (cand[v].root) ans.emplace(d + cand[v].top().first, v, cand[v].root);
}
return ret;
}
| {
"repo_name": "wesley-a-leung/Resources",
"stars": "34",
"repo_language": "C++",
"file_name": "SkewHeapIncremental.h",
"mime_type": "text/x-c++"
} |
#pragma once
#include <bits/stdc++.h>
using namespace std;
// Computes the shortest Hamiltonian cycle
// All vertices are visited exactly once before returning to the start vertex
// Vertices are 0-indexed
// Template Arguments:
// T: the type of the weight of the edges in the graph
// Constructor Arguments:
// matrix: a V x V matrix containing the minimum weight of a directed edge
// between two vertices, INF if an edge doesn't exist
// Fields:
// shortestCycleDist: the distance of the shortest Hamiltonian cycle, INF if
// no cycle exists (V == 0 implies shortestCycleDist == 0,
// V == 1 implies shortestCycleDist == INF)
// ord: the order the vertices are visited in the shortest Hamiltonian cycle,
// the start vertex is not repeated
// In practice, has a small constant
// Time Complexity:
// constructor: O(V^2 2^V)
// Memory Complexity: O(V 2^V)
// Tested:
// Fuzz Tested
template <class T> struct ShortestHamiltonianCycle {
vector<vector<T>> dp; T INF, shortestCycleDist; vector<int> ord;
ShortestHamiltonianCycle(const vector<vector<T>> &matrix,
T INF = numeric_limits<T>::max())
: dp(1 << matrix.size(), vector<T>(matrix.size(), INF)), INF(INF),
shortestCycleDist(matrix.empty() ? T() : INF), ord(matrix.size(), 0) {
int V = matrix.size(); if (V > 0) dp[1][0] = T();
for (int mask = 1; mask < (1 << V); mask += 2) for (int i = 1; i < V; i++)
if ((mask >> i) & 1) {
for (int o = mask ^ (1 << i), j = 0; j < V; j++) if ((mask >> j) & 1) {
if (matrix[j][i] < INF && dp[o][j] < INF
&& dp[mask][i] > dp[o][j] + matrix[j][i])
dp[mask][i] = dp[o][j] + matrix[j][i];
}
}
int cur = (1 << V) - 1;
for (int i = 1; i < V; i++) if (dp.back()[i] < INF && matrix[i][0] < INF)
shortestCycleDist = min(shortestCycleDist, dp.back()[i] + matrix[i][0]);
if (shortestCycleDist >= INF) return;
auto get = [&] (int mask, int j, int last) {
if (dp[mask][j] >= INF || matrix[j][last] >= INF) return INF;
return dp[mask][j] + matrix[j][last];
};
for (int last = 0, i = V - 2; i >= 0; i--) {
int bj = -1; for (int j = 1; j < V; j++) if ((cur >> j) & 1)
if (bj == -1 || get(cur, bj, last) > get(cur, j, last)) bj = j;
cur ^= 1 << (last = ord[i] = bj);
}
}
};
| {
"repo_name": "wesley-a-leung/Resources",
"stars": "34",
"repo_language": "C++",
"file_name": "SkewHeapIncremental.h",
"mime_type": "text/x-c++"
} |
#pragma once
#include <bits/stdc++.h>
#include "../search/TransitiveClosureSCC.h"
using namespace std;
// Johnson's all pairs shortest path algorithm for weighted graphs
// with negative weights
// Able to detect negative cycles
// Vertices are 0-indexed
// Template Arguments:
// T: the type of the weight of the edges in the graph
// Constructor Arguments:
// G: a generic weighted graph structure
// Required Functions:
// operator [v] const: iterates over the adjacency list of vertex v
// (which is a list of pair<int, T> with weights of type T)
// size() const: returns the number of vertices in the graph
// MAXV: the maximum number of vertices in the graph
// INF: a value for infinity, must be negatable
// Fields:
// dist: a vector of vectors of the shortest distance between each pair
// of vertices, INF if there is no path, -INF if the shortest path
// has no lower bound
// par: a vector of vectors of the parent vertex for each vertex in the
// shortest path tree for each source vertex (par[v][w] is the parent
// of vertex w in the shortest path tree from vertex v), or -1 if there is
// no parent
// hasNegativeCycle: a boolean that is true if there is a negative cycle
// in the graph and false otherwise
// Functions:
// getPath(v, w): returns the list of directed edges on the path from
// vertex v to vertex w
// In practice, has a small constant
// Time Complexity:
// constructor: O(VE log E)
// getPath: O(V)
// Memory Complexity: O(V + E + MAXV V / 64)
// Tested:
// https://dmoj.ca/problem/apsp
template <class T, const int MAXV> struct JohnsonAPSP {
using Edge = tuple<int, int, T>; int V; vector<vector<T>> dist;
vector<vector<int>> par; T INF; bool hasNegativeCycle;
struct Node {
T d; int v; Node(T d, int v) : d(d), v(v) {}
bool operator < (const Node &o) const { return d > o.d; }
};
template <class WeightedGraph>
JohnsonAPSP(const WeightedGraph &G, T INF = numeric_limits<T>::max())
: V(G.size()), dist(V, vector<T>(V, INF)), par(V, vector<int>(V, -1)),
INF(INF), hasNegativeCycle(false) {
bool hasNegativeWeight = false; for (int v = 0; v < V; v++)
for (auto &&e : G[v]) hasNegativeWeight |= e.second < T();
vector<bool> inNegCyc(V, false); vector<T> h(V, T());
vector<int> id; vector<bitset<MAXV>> neg; if (hasNegativeWeight) {
TransitiveClosureSCC<MAXV> tc(G); id = tc.scc.id;
vector<bool> compInNegCyc(tc.dp.size(), false);
for (int i = 0; i < V - 1; i++) for (int v = 0; v < V; v++)
for (auto &&e : G[v]) if (id[v] == id[e.first])
h[e.first] = min(h[e.first], h[v] + e.second);
for (int v = 0; v < V; v++) for (auto &&e : G[v])
if (id[v] == id[e.first] && h[e.first] > h[v] + e.second)
compInNegCyc[id[v]] = true;
for (int v = 0; v < V; v++)
hasNegativeCycle |= (inNegCyc[v] = compInNegCyc[id[v]]);
fill(h.begin(), h.end(), T());
for (int i = 0; i < V - 1; i++) for (int v = 0; v < V; v++)
for (auto &&e : G[v]) if (!inNegCyc[v] && !inNegCyc[e.first])
h[e.first] = min(h[e.first], h[v] + e.second);
neg.resize(tc.dp.size()); for (auto &&e : tc.DAG) {
if (compInNegCyc[e.second]) neg[e.first] |= tc.dp[e.second];
else neg[e.first] |= neg[e.second];
}
for (int i = 0; i < int(neg.size()); i++) if (compInNegCyc[i])
neg[i] = tc.dp[i];
}
for (int s = 0; s < V; s++) {
if (!inNegCyc[s]) {
std::priority_queue<Node> PQ;
PQ.emplace(dist[s][s] = T(), s); while (!PQ.empty()) {
T d = PQ.top().d; int v = PQ.top().v; PQ.pop();
if (d > dist[s][v]) continue;
for (auto &&e : G[v]) {
int w = e.first; T weight = e.second + h[v] - h[w];
if (!inNegCyc[w] && dist[s][w] > dist[s][v] + weight)
PQ.emplace(dist[s][w] = dist[s][par[s][w] = v] + weight, w);
}
}
}
if (hasNegativeWeight) for (int v = 0; v < V; v++)
if (neg[id[s]][id[v]]) dist[s][v] = -INF;
}
if (hasNegativeWeight) for (int v = 0; v < V; v++)
for (int w = 0; w < V; w++) if (dist[v][w] != INF && dist[v][w] != -INF)
dist[v][w] = dist[v][w] - h[v] + h[w];
}
vector<Edge> getPath(int v, int w) {
vector<Edge> path; for (; par[v][w] != -1; w = par[v][w])
path.emplace_back(par[v][w], w, dist[v][w] - dist[v][par[v][w]]);
reverse(path.begin(), path.end()); return path;
}
};
| {
"repo_name": "wesley-a-leung/Resources",
"stars": "34",
"repo_language": "C++",
"file_name": "SkewHeapIncremental.h",
"mime_type": "text/x-c++"
} |
#pragma once
#include <bits/stdc++.h>
#include "../search/TopologicalOrder.h"
using namespace std;
// Computes the shortest path (based on the comparator) on a directed
// acyclic graph
// Vertices are 0-indexed
// Template Arguments:
// T: the type of the weight of the edges in the graph
// Cmp: the comparator to compare two distances; less<T> will compute
// the shortest path while greater<T> will compute the longest path
// Required Functions:
// operator (a, b): returns true if and only if a compares less than b
// Constructor Arguments:
// G: a generic directed acyclic graph structure (weighted or unweighted)
// Required Functions:
// operator [v] const: iterates over the adjacency list of vertex v
// (which is a list of ints for an unweighted graph, or a list of
// pair<int, T> for a weighted graph with weights of type T)
// size() const: returns the number of vertices in the graph
// s: a single source vertex
// src: a vector of source vertices
// INF: a value for infinity for shortest path, or negative infinity
// for longest path; (Cmp()(INF, x)) must return false for all
// values of x
// cmp: an instance of the Cmp struct
// Fields:
// dist: vector of distance from the closest source vertex to each vertex,
// or INF if unreachable, and is also the shortest distance for
// an unweighted graph
// par: the parent vertex for each vertex in the shortest path tree,
// or -1 if there is no parent
// Functions:
// getPath(v): returns the list of directed edges on the path from the
// closest source vertex to vertex v
// In practice, has a moderate constant
// Time Complexity:
// constructor: O(V + E)
// getPath: O(V)
// Memory Complexity: O(V)
// Tested:
// https://atcoder.jp/contests/dp/tasks/dp_g
template <class T, class Cmp = less<T>> struct DAGSP {
using Edge = tuple<int, int, T>; vector<T> dist; vector<int> par; T INF;
TopologicalOrder ord;
int getTo(int e) { return e; }
T getWeight(int) { return 1; }
int getTo(const pair<int, T> &e) { return e.first; }
T getWeight(const pair<int, T> &e) { return e.second; }
template <class DAG>
DAGSP(const DAG &G, const vector<int> &srcs, T INF, Cmp cmp = Cmp())
: dist(G.size(), INF), par(G.size(), -1), INF(INF), ord(G) {
for (int s : srcs) dist[s] = T();
for (int v : ord.ord) for (auto &&e : G[v]) {
int w = getTo(e); T weight = getWeight(e);
if (cmp(dist[v], INF) && cmp(dist[v] + weight, dist[w]))
dist[w] = dist[par[w] = v] + weight;
}
}
template <class DAG> DAGSP(const DAG &G, int s, T INF, Cmp cmp = Cmp())
: DAGSP(G, vector<int>{s}, INF, cmp) {}
vector<Edge> getPath(int v) {
vector<Edge> path; for (; par[v] != -1; v = par[v])
path.emplace_back(par[v], v, dist[v] - dist[par[v]]);
reverse(path.begin(), path.end()); return path;
}
};
| {
"repo_name": "wesley-a-leung/Resources",
"stars": "34",
"repo_language": "C++",
"file_name": "SkewHeapIncremental.h",
"mime_type": "text/x-c++"
} |
#pragma once
#include <bits/stdc++.h>
using namespace std;
// Classical Dijkstra's single source shortest path algorithm for
// weighted graphs without negative weights
// Vertices are 0-indexed
// Template Arguments:
// T: the type of the weight of the edges in the graph
// Constructor Arguments:
// G: a generic weighted graph structure
// Required Functions:
// operator [v] const: iterates over the adjacency list of vertex v
// (which is a list of pair<int, T> with weights of type T)
// size() const: returns the number of vertices in the graph
// s: a single source vertex
// src: a vector of source vertices
// INF: a value for infinity
// Fields:
// dist: vector of shortest distance from the closest source vertex to each
// vertex, or INF if unreachable
// par: the parent vertex for each vertex in the shortest path tree, or
// -1 if there is no parent
// Functions:
// getPath(v): returns the list of directed edges on the path from the
// closest source vertex to vertex v
// In practice, has a small constant
// Time Complexity:
// constructor: O(V^2 + E)
// getPath: O(V)
// Memory Complexity: O(V)
// Tested:
// Stress Tested
// https://dmoj.ca/problem/sssp
template <class T> struct ClassicalDijkstraSSSP {
using Edge = tuple<int, int, T>; vector<T> dist; vector<int> par; T INF;
template <class WeightedGraph>
ClassicalDijkstraSSSP(const WeightedGraph &G, const vector<int> &srcs,
T INF = numeric_limits<T>::max())
: dist(G.size(), INF), par(G.size(), -1), INF(INF) {
vector<bool> done(G.size(), false); for (int s : srcs) dist[s] = T();
for (int i = 0; i < int(G.size()) - 1; i++) {
int v = -1; for (int w = 0; w < int(G.size()); w++)
if (!done[w] && (v == -1 || dist[v] > dist[w])) v = w;
if (dist[v] >= INF) break;
done[v] = true;
for (auto &&e : G[v]) if (dist[e.first] > dist[v] + e.second)
dist[e.first] = dist[par[e.first] = v] + e.second;
}
}
template <class WeightedGraph>
ClassicalDijkstraSSSP(const WeightedGraph &G, int s,
T INF = numeric_limits<T>::max())
: ClassicalDijkstraSSSP(G, vector<int>{s}, INF) {}
vector<Edge> getPath(int v) {
vector<Edge> path; for (; par[v] != -1; v = par[v])
path.emplace_back(par[v], v, dist[v] - dist[par[v]]);
reverse(path.begin(), path.end()); return path;
}
};
| {
"repo_name": "wesley-a-leung/Resources",
"stars": "34",
"repo_language": "C++",
"file_name": "SkewHeapIncremental.h",
"mime_type": "text/x-c++"
} |
#pragma once
#include <bits/stdc++.h>
using namespace std;
// Computes the shortest Hamiltonian path
// All vertices are visited exactly once, does not return to the start
// Vertices are 0-indexed
// Template Arguments:
// T: the type of the weight of the edges in the graph
// Constructor Arguments:
// matrix: a V x V matrix containing the minimum weight of a directed edge
// between two vertices, INF if an edge doesn't exist
// Fields:
// shortestPathDist: the distance of the shortest Hamiltonian path, INF if
// no path exists (V <= 1 implies shortestPathDist == 0)
// ord: the order the vertices are visited in the shortest Hamiltonian path
// In practice, has a small constant
// Time Complexity:
// constructor: O(V^2 2^V)
// Memory Complexity: O(V 2^V)
// Tested:
// Fuzz Tested
template <class T> struct ShortestHamiltonianPath {
vector<vector<T>> dp; T INF, shortestPathDist; vector<int> ord;
ShortestHamiltonianPath(const vector<vector<T>> &matrix,
T INF = numeric_limits<T>::max())
: dp(1 << matrix.size(), vector<T>(matrix.size(), INF)), INF(INF),
shortestPathDist(matrix.empty() ? T() : INF), ord(matrix.size(), 0) {
int V = matrix.size(); for (int i = 0; i < V; i++) dp[1 << i][i] = T();
for (int mask = 0; mask < (1 << V); mask++) for (int i = 0; i < V; i++)
if ((mask >> i) & 1) {
for (int o = mask ^ (1 << i), j = 0; j < V; j++) if ((mask >> j) & 1) {
if (matrix[j][i] < INF && dp[o][j] < INF
&& dp[mask][i] > dp[o][j] + matrix[j][i])
dp[mask][i] = dp[o][j] + matrix[j][i];
}
}
int cur = (1 << V) - 1; for (int i = 0; i < V; i++)
shortestPathDist = min(shortestPathDist, dp.back()[i]);
if (shortestPathDist >= INF) return;
auto get = [&] (int mask, int j, int last) {
T weight = (last == -1 ? T() : matrix[j][last]);
if (dp[mask][j] < INF && weight < INF) return dp[mask][j] + weight;
else return INF;
};
for (int last = -1, i = V - 1; i >= 0; i--) {
int bj = -1; for (int j = 0; j < V; j++) if ((cur >> j) & 1)
if (bj == -1 || get(cur, bj, last) > get(cur, j, last)) bj = j;
cur ^= 1 << (last = ord[i] = bj);
}
}
};
| {
"repo_name": "wesley-a-leung/Resources",
"stars": "34",
"repo_language": "C++",
"file_name": "SkewHeapIncremental.h",
"mime_type": "text/x-c++"
} |
#pragma once
#include <bits/stdc++.h>
using namespace std;
// Dijkstra's single source shortest path algorithm for weighted graphs
// without negative weights
// Vertices are 0-indexed
// Template Arguments:
// T: the type of the weight of the edges in the graph
// Constructor Arguments:
// G: a generic weighted graph structure
// Required Functions:
// operator [v] const: iterates over the adjacency list of vertex v
// (which is a list of pair<int, T> with weights of type T)
// size() const: returns the number of vertices in the graph
// s: a single source vertex
// src: a vector of source vertices
// INF: a value for infinity
// Fields:
// dist: vector of shortest distance from the closest source vertex to each
// vertex, or INF if unreachable
// par: the parent vertex for each vertex in the shortest path tree, or
// -1 if there is no parent
// Functions:
// getPath(v): returns the list of directed edges on the path from the
// closest source vertex to vertex v
// In practice, has a small constant
// Time Complexity:
// constructor: O((V + E) log E)
// getPath: O(V)
// Memory Complexity: O(V + E)
// Tested:
// Stress Tested
// https://judge.yosupo.jp/problem/shortest_path
// https://open.kattis.com/problems/shortestpath1
// https://dmoj.ca/problem/sssp
template <class T> struct DijkstraSSSP {
using Edge = tuple<int, int, T>; vector<T> dist; vector<int> par; T INF;
struct Node {
T d; int v; Node(T d, int v) : d(d), v(v) {}
bool operator < (const Node &o) const { return d > o.d; }
};
template <class WeightedGraph>
DijkstraSSSP(const WeightedGraph &G, const vector<int> &srcs,
T INF = numeric_limits<T>::max())
: dist(G.size(), INF), par(G.size(), -1), INF(INF) {
std::priority_queue<Node> PQ;
for (int s : srcs) PQ.emplace(dist[s] = T(), s);
while (!PQ.empty()) {
T d = PQ.top().d; int v = PQ.top().v; PQ.pop();
if (d > dist[v]) continue;
for (auto &&e : G[v]) if (dist[e.first] > dist[v] + e.second)
PQ.emplace(dist[e.first] = dist[par[e.first] = v] + e.second, e.first);
}
}
template <class WeightedGraph> DijkstraSSSP(const WeightedGraph &G, int s,
T INF = numeric_limits<T>::max())
: DijkstraSSSP(G, vector<int>{s}, INF) {}
vector<Edge> getPath(int v) {
vector<Edge> path; for (; par[v] != -1; v = par[v])
path.emplace_back(par[v], v, dist[v] - dist[par[v]]);
reverse(path.begin(), path.end()); return path;
}
};
| {
"repo_name": "wesley-a-leung/Resources",
"stars": "34",
"repo_language": "C++",
"file_name": "SkewHeapIncremental.h",
"mime_type": "text/x-c++"
} |
#pragma once
#include <bits/stdc++.h>
#include "Point3D.h"
#include "Sphere3D.h"
using namespace std;
// Functions for a 3D polyhedron
// Determines twice the vector area of a face of coplanar points
// Function Arguments:
// face: the coplanar points of the face
// Return Value: twice the vector area of the face, points are in ccw order
// when looking from above (where up is the direction of the return vector)
// Time Complexity: O(N)
// Memory Complexity: O(1)
// Tested:
// https://www.spoj.com/problems/CH3D/
// https://open.kattis.com/problems/threedprinter
pt3 vectorArea2(const vector<pt3> &face) {
pt3 ret(0, 0, 0); for (int i = 0, n = face.size(); i < n; i++)
ret += face[i] * face[(i + 1) % n];
return ret;
}
// Returns the absolute area of a face of coplanar points
// Function Arguments:
// face: the coplanar points of the face
// Return Value: the area of the face
// Time Complexity: O(N)
// Memory Complexity: O(1)
// Tested:
// https://www.spoj.com/problems/CH3D/
T faceArea(const vector<pt3> &face) { return abs(vectorArea2(face)) / T(2); }
// Flips some of the faces of a polyhedron such that they are oriented
// consistently (they will either all be pointed outwards or inwards)
// Function Arguments:
// faces: a reference to a vector of vectors of points representing
// the faces of the polyhedron to be reoriented
// Time Complexity: O(N) for N total points
// Memory Complexity: O(1)
// Tested:
// https://open.kattis.com/problems/threedprinter
void reorient(vector<vector<pt3>> &faces) {
int n = faces.size(); vector<vector<pair<int, bool>>> G(n);
map<pair<pt3, pt3>, int> seen; for (int v = 0; v < n; v++)
for (int i = 0, m = faces[v].size(); i < m; i++) {
pt3 a = faces[v][i], b = faces[v][(i + 1) % m];
auto it = seen.find(make_pair(a, b)); bool f = true;
if (it == seen.end()) { it = seen.find(make_pair(b, a)); f = false; }
if (it == seen.end()) seen[make_pair(a, b)] = v;
else {
int w = it->second; G[v].emplace_back(w, f); G[w].emplace_back(v, f);
}
}
vector<char> flip(n, -1); vector<int> q(n); int front = 0, back = 0;
flip[q[back++] = 0] = 0; while (front < back) {
int v = q[front++]; for (auto &&e : G[v]) if (flip[e.first] == -1)
flip[q[back++] = e.first] = flip[v] ^ e.second;
}
for (int v = 0; v < n; v++) {
assert(flip[v] != -1);
if (flip[v]) reverse(faces[v].begin(), faces[v].end());
}
}
// Returns the surface area of a polyhedron
// Function Arguments:
// faces: a vector of vectors of points representing
// the faces of the polyhedron with consistent orientation
// Return Value: the surface area of the polyhedron
// Time Complexity: O(N) for N total points
// Memory Complexity: O(1)
// Tested:
// https://www.spoj.com/problems/CH3D/
T getSurfaceArea(const vector<vector<pt3>> &faces) {
T sa = 0; for (auto &&face : faces) sa += faceArea(face);
return sa;
}
// Returns 6 times the signed volume of a polyhedron
// Function Arguments:
// faces: a vector of vectors of points representing
// the faces of the polyhedron with consistent orientation
// Return Value: 6 times the signed volume of the polyhedron, positive if
// all vector areas point outwards, negative if inwards
// Time Complexity: O(N) for N total points
// Memory Complexity: O(1)
// Tested:
// https://www.spoj.com/problems/CH3D/
// https://open.kattis.com/problems/threedprinter
T getVolume6(const vector<vector<pt3>> &faces) {
T vol6 = 0; for (auto &&face : faces) vol6 += (vectorArea2(face) | face[0]);
return vol6;
}
// Determines if a point is inside a polyhedron or not
// Function Arguments:
// faces: a vector of vectors of points representing
// the faces of the polyhedron with consistent orientation
// p: the point to check
// Return Value: 1 if inside the polyhedron, 0 if on the face, -1 if outside
// Time Complexity: O(N) for N total points
// Memory Complexity: O(1)
int isInPolyhedron(const vector<vector<pt3>> &faces, pt3 p) {
T sum = 0, PI = acos(T(-1)); Sphere3D s(p, 1); for (auto &&face : faces) {
pt3 a = face[0], b = face[1], c = face[2], n = (b - a) * (c - a);
if (eq((n | p) - (n | a), 0)) return 0;
sum += remainder(s.surfaceAreaOnSph(face), 4 * PI);
}
return eq(sum, 0) ? -1 : 1;
}
| {
"repo_name": "wesley-a-leung/Resources",
"stars": "34",
"repo_language": "C++",
"file_name": "SkewHeapIncremental.h",
"mime_type": "text/x-c++"
} |
#pragma once
#include <bits/stdc++.h>
#include "../../utils/EpsCmp.h"
using namespace std;
// Functions for a 3D point
// * operator between 2 points is cross product
// | operator between 2 points is dot product
#define OP(op, U, a, x, y, z) \
pt3 operator op (U a) const { return pt3(x, y, z); } \
pt3 &operator op##= (U a) { return *this = *this op a; }
#define CMP(op, body) bool operator op (pt3 p) const { return body; }
struct pt3 {
T x, y, z; constexpr pt3(T x = 0, T y = 0, T z = 0) : x(x), y(y), z(z) {}
pt3 operator + () const { return *this; }
pt3 operator - () const { return pt3(-x, -y, -z); }
OP(+, pt3, p, x + p.x, y + p.y, z + p.z)
OP(-, pt3, p, x - p.x, y - p.y, z - p.z)
OP(*, T, a, x * a, y * a, z * a) OP(/, T, a, x / a, y / a, z / a)
friend pt3 operator * (T a, pt3 p) { return pt3(a * p.x, a * p.y, a * p.z); }
bool operator < (pt3 p) const {
return eq(x, p.x) ? (eq(y, p.y) ? lt(z, p.z) : lt(y, p.y)) : lt(x, p.x);
}
CMP(<=, !(p < *this)) CMP(>, p < *this) CMP(>=, !(*this < p))
CMP(==, !(*this < p) && !(p < *this)) CMP(!=, *this < p || p < *this)
T operator | (pt3 p) const { return x * p.x + y * p.y + z * p.z; }
OP(*, pt3, p, y * p.z - z * p.y, z * p.x - x * p.z, x * p.y - y * p.x)
};
#undef OP
#undef CMP
istream &operator >> (istream &stream, pt3 &p) {
return stream >> p.x >> p.y >> p.z;
}
ostream &operator << (ostream &stream, pt3 p) {
return stream << p.x << ' ' << p.y << ' ' << p.z;
}
T norm(pt3 p) { return p | p; }
T abs(pt3 p) { return sqrt(norm(p)); }
pt3 unit(pt3 p) { return p / abs(p); }
T distSq(pt3 a, pt3 b) { return norm(b - a); }
T dist(pt3 a, pt3 b) { return abs(b - a); }
// returns an angle in the range [0, PI]
T ang(pt3 a, pt3 b, pt3 c) {
a = unit(a - b); c = unit(c - b); return 2 * atan2(abs(a - c), abs(a + c));
}
pt3 rot(pt3 a, pt3 axis, T theta) {
return a * cos(theta) + (unit(axis) * a * sin(theta))
+ (unit(axis) * (unit(axis) | a) * (1 - cos(theta)));
}
// sign of volume6 and above: 1 if d is above the plane abc with
// normal ab x ac, 0 if on the plane, -1 if below the plane
// 6 times the signed area of the tetrahedron abcd
T volume6(pt3 a, pt3 b, pt3 c, pt3 d) {
return (b - a) * (c - a) | (d - a);
}
int above(pt3 a, pt3 b, pt3 c, pt3 d) { return sgn(volume6(a, b, c, d)); }
// Converts a position based on radius (r >= 0), inclination/latitude
// (-PI / 2 <= theta <= PI / 2), and azimuth/longitude (-PI < phi <= PI)
// Convention is that the x axis passes through the meridian (phi = 0), and
// the z axis passes through the North Pole (theta = Pi / 2)
pt3 sph(T r, T theta, T phi) {
return pt3(r * cos(theta) * cos(phi), r * cos(theta) * sin(phi),
r * sin(theta));
}
T inc(pt3 p) { return atan2(p.z, T(sqrt(p.x * p.x + p.y * p.y))); }
T az(pt3 p) { return atan2(p.y, p.x); }
| {
"repo_name": "wesley-a-leung/Resources",
"stars": "34",
"repo_language": "C++",
"file_name": "SkewHeapIncremental.h",
"mime_type": "text/x-c++"
} |
#pragma once
#include <bits/stdc++.h>
#include "../../utils/Random.h"
#include "Point3D.h"
using namespace std;
// Computes the convex hull of a set of N points 3D points (convex set of
// minimum points with the minimum volume)
// Function Arguments:
// P: the vector of points
// Return Value: a vector of faces in the convex hull, with each face being a
// vector of exactly 3 points, all facing outwards
// In practice, has a large constant
// Time Complexity: O(N log N) expected
// Memory Complexity: O(N log N)
// Tested:
// https://www.spoj.com/problems/CH3D/
// https://open.kattis.com/problems/worminapple
// https://open.kattis.com/problems/starsinacan
vector<vector<pt3>> convexHull3D(vector<pt3> P) {
vector<array<int, 3>> hullInd; shuffle(P.begin(), P.end(), rng);
int n = P.size(); for (int i = 1, num = 1; i < n; i++) {
if (num == 1) {
if (P[0] != P[i]) { swap(P[1], P[i]); num++; }
} else if (num == 2) {
if (!eq(norm((P[1] - P[0]) * (P[i] - P[0])), 0)) {
swap(P[2], P[i]); num++;
}
} else if (num == 3) {
if (above(P[0], P[1], P[2], P[i]) != 0) { swap(P[3], P[i]); num++; }
}
}
vector<bool> active; vector<vector<int>> vis(n), rvis;
vector<array<pair<int, int>, 3>> other; vector<int> label(n, -1);
auto addFace = [&] (int a, int b, int c) {
hullInd.push_back({a, b, c}); active.push_back(true);
rvis.emplace_back(); other.emplace_back();
};
auto addEdge = [&] (int a, int b) {
vis[b].push_back(a); rvis[a].push_back(b);
};
auto abv = [&] (int a, int b) {
array<int, 3> f = hullInd[a];
return above(P[f[0]], P[f[1]], P[f[2]], P[b]) > 0;
};
auto edge = [&] (int f, int s) {
return make_pair(hullInd[f][s], hullInd[f][(s + 1) % 3]);
};
auto glue = [&] (int af, int as, int bf, int bs) {
other[af][as] = make_pair(bf, bs); other[bf][bs] = make_pair(af, as);
};
addFace(0, 1, 2); addFace(0, 2, 1); if (abv(1, 3)) swap(P[1], P[2]);
for (int i = 0; i < 3; i++) glue(0, i, 1, 2 - i);
for (int i = 3; i < n; i++) addEdge(abv(1, i), i);
for (int i = 3; i < n; i++) {
vector<int> rem; for (auto &&t : vis[i])
if (active[t]) { active[t] = false; rem.push_back(t); }
if (rem.empty()) continue;
int st = -1; for (auto &&r : rem) for (int j = 0; j < 3; j++) {
int o = other[r][j].first; if (active[o]) {
int a, b; tie(a, b) = edge(r, j); addFace(a, b, i); st = a;
int cur = int(rvis.size()) - 1; label[a] = cur; vector<int> tmp;
set_union(rvis[r].begin(), rvis[r].end(),
rvis[o].begin(), rvis[o].end(), back_inserter(tmp));
for (auto &&x : tmp) if (abv(cur, x)) addEdge(cur, x);
glue(cur, 0, other[r][j].first, other[r][j].second);
}
}
for (int x = st, y; ; x = y) {
int lx = label[x]; glue(lx, 1, label[y = hullInd[lx][1]], 2);
if (y == st) break;
}
}
vector<vector<pt3>> hull;
for (int i = 0; i < int(hullInd.size()); i++) if (active[i])
hull.push_back(vector<pt3>{P[hullInd[i][0]], P[hullInd[i][1]],
P[hullInd[i][2]]});
return hull;
}
| {
"repo_name": "wesley-a-leung/Resources",
"stars": "34",
"repo_language": "C++",
"file_name": "SkewHeapIncremental.h",
"mime_type": "text/x-c++"
} |
#pragma once
#include <bits/stdc++.h>
#include "Point3D.h"
using namespace std;
// Affine Transformations in 3D
// Functions:
// prependMatrix(m2, b2): sets m = m2 * m and b = m2 * b + b2
// transform(t): applies the AffineTransformation t to this
// scale(p): scales the x, y, z coordinates by p.x, p.y, p.z respectively
// translate(p): translates the point by p
// rotate(axis, theta): rotates the point theta radians around the line from
// the origin in the direction of axis
// reflect(norm): reflects the point across the plane passing through the
// origin with the normal vector norm
// project(norm): projects the point across the plane passing through the
// origin with the normal vector norm
// applyTransform(p): applies the transformation to the point p
// inverse(): returns the inverse of this transformation,
// determinant of m must nonzero
// Time Complexity:
// constructor, prependMatrix, transform, scale, translate, rotate, reflect,
// project, applyTransform, inverse: O(1)
// Memory Complexity: O(1)
// Tested:
// https://codeforces.com/problemsets/acmsguru/problem/99999/265
struct AffineTransform3D {
static array<array<T, 3>, 3> inverse(const array<array<T, 3>, 3> &A) {
T a = A[0][0], b = A[0][1], c = A[0][2];
T d = A[1][0], e = A[1][1], f = A[1][2];
T g = A[2][0], h = A[2][1], k = A[2][2];
T det = a * (e * k - f * h) - b * (d * k - f * g) + c * (d * h - e * g);
assert(!eq(det, 0)); array<array<T, 3>, 3> ret;
ret[0][0] = (e * k - f * h) / det;
ret[0][1] = -(b * k - c * h) / det;
ret[0][2] = (b * f - c * e) / det;
ret[1][0] = -(d * k - f * g) / det;
ret[1][1] = (a * k - c * g) / det;
ret[1][2] = -(a * f - c * d) / det;
ret[2][0] = (d * h - e * g) / det;
ret[2][1] = -(a * h - b * g) / det;
ret[2][2] = (a * e - b * d) / det;
return ret;
}
array<array<T, 3>, 3> m; array<T, 3> b;
AffineTransform3D() {
for (int i = 0; i < 3; i++) {
b[i] = T(0); for (int j = 0; j < 3; j++) m[i][j] = i == j ? T(1) : T(0);
}
}
void prependMatrix(const array<array<T, 3>, 3> &m2, const array<T, 3> &b2) {
array<array<T, 3>, 3> resm; array<T, 3> resb;
for (int i = 0; i < 3; i++) for (int j = 0; j < 3; j++) {
resm[i][j] = T(0);
for (int k = 0; k < 3; k++) resm[i][j] += m2[i][k] * m[k][j];
}
for (int i = 0; i < 3; i++) {
resb[i] = b2[i]; for (int j = 0; j < 3; j++) resb[i] += m2[i][j] * b[j];
}
m = resm; b = resb;
}
void transform(const AffineTransform3D &t) { prependMatrix(t.m, t.b); }
void scale(pt3 p) {
prependMatrix({array<T, 3>{p.x, T(0), T(0)},
{T(0), p.y, T(0)},
{T(0), T(0), p.z}},
{T(0), T(0), T(0)});
}
void translate(pt3 p) {
prependMatrix({array<T, 3>{T(1), T(0), T(0)},
{T(0), T(1), T(0)},
{T(0), T(0), T(1)}},
{p.x, p.y, p.z});
}
void rotate(pt3 axis, T theta) {
axis = unit(axis);
T x = axis.x, y = axis.y, z = axis.z, c = cos(theta), s = sin(theta);
prependMatrix({array<T, 3>{x * x * (1 - c) + c,
x * y * (1 - c) - z * s,
x * z * (1 - c) + y * s},
{y * x * (1 - c) + z * s,
y * y * (1 - c) + c,
y * z * (1 - c) - x * s},
{z * x * (1 - c) - y * s,
z * y * (1 - c) + x * s,
z * z * (1 - c) + c}},
{T(0), T(0), T(0)});
}
void reflect(pt3 norm) {
norm = unit(norm); T a = norm.x, b = norm.y, c = norm.z;
prependMatrix({array<T, 3>{1 - 2 * a * a, -2 * a * b, -2 * a * c},
{-2 * b * a, 1 - 2 * b * b, -2 * b * c},
{-2 * c * a, -2 * c * b, 1 - 2 * c * c}},
{T(0), T(0), T(0)});
}
void project(pt3 norm) {
norm = unit(norm); T a = norm.x, b = norm.y, c = norm.z;
prependMatrix({array<T, 3>{b * b + c * c, -a * b, -a * c},
{-b * a, a * a + c * c, -b * c},
{-c * a, -c * b, a * a + b * b}},
{T(0), T(0), T(0)});
}
pt3 applyTransform(pt3 p) {
return pt3(m[0][0] * p.x + m[0][1] * p.y + m[0][2] * p.z + b[0],
m[1][0] * p.x + m[1][1] * p.y + m[1][2] * p.z + b[1],
m[2][0] * p.x + m[2][1] * p.y + m[2][2] * p.z + b[2]);
}
AffineTransform3D inverse() const {
AffineTransform3D ret;
ret.prependMatrix({array<T, 3>{T(1), T(0), T(0)},
{T(0), T(1), T(0)},
{T(0), T(0), T(1)}},
{-b[0], -b[1], -b[2]});
ret.prependMatrix(inverse(m), {T(0), T(0), T(0)}); return ret;
}
};
| {
"repo_name": "wesley-a-leung/Resources",
"stars": "34",
"repo_language": "C++",
"file_name": "SkewHeapIncremental.h",
"mime_type": "text/x-c++"
} |
#pragma once
#include <bits/stdc++.h>
#include "Point3D.h"
#include "Line3D.h"
using namespace std;
// Functions for a 3D plane
struct Plane3D {
pt3 n; T d;
// ax + by + cz = d, above is ax + by + cz > d
Plane3D(T a = 0, T b = 0, T c = 0, T d = 0) : n(a, b, c), d(d) {}
// normal n, offset d
Plane3D(pt3 n, T d) : n(n), d(d) {}
// normal n, point p
Plane3D(pt3 n, pt3 p) : n(n), d(n | p) {}
// 3 non-collinear points p, q, r
Plane3D(pt3 p, pt3 q, pt3 r) : Plane3D((q - p) * (r - p), p) {}
T eval(pt3 p) const { return (n | p) - d; }
// sign of isAbove, dist: 1 if above plane, 0 if on plane,
// -1 if below plane
int isAbove(pt3 p) const { return sgn(eval(p)); }
T dist(pt3 p) const { return eval(p) / abs(n); }
T distSq(pt3 p) const { T e = eval(p); return e * e / norm(n); }
Plane3D translate(pt3 p) const { return Plane3D(n, d + (n | p)); }
Plane3D shiftUp(T e) const { return Plane3D(n, d + e * abs(n)); }
pt3 proj(pt3 p) const { return p - n * eval(p) / norm(n); }
pt3 refl(pt3 p) const { return p - n * T(2) * eval(p) / norm(n); }
// returns 1 if the projections of a, b, c on this plane form a ccw turn
// looking from above, 0 if collinear, -1 if cw
int ccwProj(pt3 a, pt3 b, pt3 c) const { return sgn((b - a) * (c - a) | n); }
// returns a tuple (a, b, c) of 3 non-collinear points on the plane,
// guaranteed that ccwProj(a, b, c) == 1
tuple<pt3, pt3, pt3> getPts() const {
pt3 v = pt3(1, 0, 0); if (eq(abs(n * v), 0)) v = pt3(0, 1, 0);
pt3 v1 = n * v, v2 = n * v1; pt3 a = proj(pt3(0, 0, 0));
return make_tuple(a, a + v1, a + v2);
}
};
Line3D perpThrough(Plane3D pi, pt3 o) {
Line3D ret; ret.o = o; ret.d = pi.n; return ret;
}
Plane3D perpThrough(Line3D l, pt3 o) { return Plane3D(l.d, o); }
// Transforms points to a new coordinate system where the x and y axes are
// on the plane, with the z axis being the normal vector (positive z is in
// the direction of the normal vector)
// Z coordinate is guaranteed to be the distance to the plane (positive if
// above plane, negative if below, 0 if on)
// Constructor Arguments:
// pi: a plane
// p, q, r: 3 non-collinear points
// Functions:
// transform(p): transforms p into the new coordinate system
// Time Complexity:
// constructor, transform: O(1)
// Memory Complexity: O(1)
// Tested:
// https://dmoj.ca/problem/utso15p6
// https://open.kattis.com/problems/starsinacan
struct CoordinateTransformation {
pt3 o, dx, dy, dz;
CoordinateTransformation(Plane3D pi) {
pt3 p, q, r; tie(p, q, r) = pi.getPts(); o = p;
dx = unit(q - p); dz = unit(dx * (r - p)); dy = dz * dx;
}
CoordinateTransformation(pt3 p, pt3 q, pt3 r) : o(p) {
dx = unit(q - p); dz = unit(dx * (r - p)); dy = dz * dx;
}
pt3 transform(pt3 p) const {
return pt3((p - o) | dx, (p - o) | dy, (p - o) | dz);
}
};
// Intersection of plane and line
// Function Arguments:
// pi: the plane
// l: the line
// res: a reference to the point to store the intersection if it exists
// Return Value: 0 if no intersection, 1 if point of intersection, 2 otherwise
// Time Complexity: O(1)
// Memory Complexity: O(1)
int planeLineIntersection(Plane3D pi, Line3D l, pt3 &res) {
T a = pi.n | l.d; if (eq(norm(a), 0)) return pi.isAbove(l.o) == 0 ? 2 : 0;
res = l.o - l.d * pi.eval(l.o) / a; return 1;
}
// Intersection of 2 planes
// Function Arguments:
// pi1: the first plane
// pi2: the second plane
// res: a reference to the line to store the intersection if it exists
// Return Value: 0 if no intersection, 1 if line of intersection, 2 otherwise
// Time Complexity: O(1)
// Memory Complexity: O(1)
// Tested:
// https://dmoj.ca/problem/utso15p6
int planePlaneIntersection(Plane3D pi1, Plane3D pi2, Line3D &res) {
pt3 d = pi1.n * pi2.n; if (eq(norm(d), 0))
return eq(abs(pi1.d / abs(pi1.n)), abs(pi2.d / abs(pi2.n))) ? 2 : 0;
res.o = (pi2.n * pi1.d - pi1.n * pi2.d) * d / norm(d); res.d = d; return 1;
}
| {
"repo_name": "wesley-a-leung/Resources",
"stars": "34",
"repo_language": "C++",
"file_name": "SkewHeapIncremental.h",
"mime_type": "text/x-c++"
} |
#pragma once
#include <bits/stdc++.h>
#include "Point3D.h"
#include "Line3D.h"
#include "Plane3D.h"
using namespace std;
// Functions for a 3D sphere
struct Sphere3D {
pt3 o; T r; Sphere3D(T r = 0) : o(0, 0), r(r) {}
Sphere3D(pt3 o, T r) : o(o), r(r) {}
// 1 if p is inside this sphere, 0 if on this sphere,
// -1 if outside this sphere
int contains(pt3 p) const { return sgn(r * r - distSq(o, p)); }
// 1 if s is strictly inside this sphere, 0 if inside and touching this
// sphere, -1 otherwise
int contains(Sphere3D s) const {
T dr = r - s.r; return lt(dr, 0) ? -1 : sgn(dr * dr - distSq(o, s.o));
}
// 1 if s is strictly outside this sphere, 0 if outside and touching this
// sphere, -1 otherwise
int disjoint(Sphere3D s) const {
T sr = r + s.r; return sgn(distSq(o, s.o) - sr * sr);
}
pt3 proj(pt3 p) const { return o + (p - o) * r / dist(o, p); }
pt3 inv(pt3 p) const { return o + (p - o) * r * r / distSq(o, p); }
// Shortest distance on the sphere between the projections of a and b onto
// this sphere
T greatCircleDist(pt3 a, pt3 b) const { return r * ang(a, o, b); }
// Is a-b a valid great circle segment (not on opposite sides)
bool isGreatCircleSeg(pt3 a, pt3 b) const {
assert(contains(a) == 0 && contains(b) == 0);
a -= o; b -= o; return !eq(norm(a * b), 0) || lt(0, (a | b));
}
// Returns whether p is on the great circle segment a-b
bool onSphSeg(pt3 p, pt3 a, pt3 b) const {
assert(isGreatCircleSeg(a, b)); p -= o; a -= o; b -= o; pt3 n = a * b;
if (eq(norm(n), 0)) return eq(norm(a * p), 0) && lt(0, (a | p));
return eq((n | p), 0) && !lt((n | a * p), 0) && !lt(0, (n | b * p));
}
// Returns the points of intersection (or segment of intersection) between
// the great circle segments a-b and p-q
vector<pt3> greatCircleSegIntersection(pt3 a, pt3 b, pt3 p, pt3 q) const {
assert(isGreatCircleSeg(a, b) && isGreatCircleSeg(p, q));
pt3 ab = (a - o) * (b - o), pq = (p - o) * (q - o);
int oa = sgn(pq | (a - o)), ob = sgn(pq | (b - o));
int op = sgn(ab | (p - o)), oq = sgn(ab | (q - o));
if (oa != ob && op != oq && oa != op)
return vector<pt3>{proj(o + ab * pq * op)};
vector<pt3> ret; if (onSphSeg(p, a, b)) ret.push_back(p);
if (onSphSeg(q, a, b)) ret.push_back(q);
if (onSphSeg(a, p, q)) ret.push_back(a);
if (onSphSeg(b, p, q)) ret.push_back(b);
sort(ret.begin(), ret.end());
ret.erase(unique(ret.begin(), ret.end()), ret.end()); return ret;
}
// Returns the angle between 3 points projected onto the sphere,
// positive angle if a-b-c forms a ccw turn, negative if cw, 0 if collinear
T angSph(pt3 a, pt3 b, pt3 c) const {
a -= o; b -= o; c -= o; T theta = ang(b * a, pt3(0, 0, 0), b * c);
return (a * b | c) < 0 ? -theta : theta;
}
// Returns the surface area of a polygon projected onto the sphere,
// inside area if points are in ccw order, outside area if points are
// in cw order
T surfaceAreaOnSph(const vector<pt3> &poly) const {
int n = poly.size(); T PI = acos(T(-1)), a = -(n - 2) * PI;
for (int i = 0; i < n; i++) {
T ang = angSph(poly[i], poly[(i + 1) % n], poly[(i + 2) % n]);
if (ang < 0) ang += 2 * PI;
a += ang;
}
return r * r * a;
}
};
// Determine the intersection of a sphere and a line
// Function Arguments:
// s: the sphere
// l: the line
// Return Value: the points of intersection (if any) of the sphere and
// the line, guaranteed to be sorted based on projection on the line
// Time Complexity: O(1)
// Memory Complexity: O(1)
vector<pt3> sphereLineIntersection(Sphere3D s, Line3D l) {
vector<pt3> ret; T h2 = s.r * s.r - l.distSq(s.o); pt3 p = l.proj(s.o);
if (eq(h2, 0)) ret.push_back(p);
else if (lt(0, h2)) {
pt3 h = l.d * sqrt(h2) / abs(l.d);
ret.push_back(p - h); ret.push_back(p + h);
}
return ret;
}
// Determine the intersection of a sphere and a plane
// Function Arguments:
// s: the sphere
// pi: the plane
// res: a pair of pt3 and T, representing the center of the circle, and the
// radius of the circle of intersection if it exists, guaranteed to be on
// the plane pi
// Return Value: a boolean indicating whether an intersection exists or not
// Time Complexity: O(1)
// Memory Complexity: O(1)
bool spherePlaneIntersection(Sphere3D s, Plane3D pi, pair<pt3, T> &res) {
T d2 = s.r * s.r - pi.distSq(s.o); if (lt(d2, 0)) return false;
res.first = pi.proj(s.o); res.second = sqrt(max(d2, T(0))); return true;
}
// Determine the surface area and volume of the sphere cap above the
// intersection of a sphere and a half-space defined by the space above
// a plane (surface area does not include the base of the cap)
// Function Arguments:
// s: the sphere
// pi: the plane with the half-space defined as the space above the plane
// Return Value: a pair containing the surface area and the volume of the
// sphere above the intersection of the sphere and the half-space
// Time Complexity: O(1)
// Memory Complexity: O(1)
pair<T, T> sphereHalfSpaceIntersectionSAV(Sphere3D s, Plane3D pi) {
T d2 = s.r * s.r - pi.distSq(s.o);
T h = lt(d2, 0) ? T(0) : s.r - abs(pi.dist(s.o));
if (pi.isAbove(s.o) > 0) h = s.r * 2 - h;
T PI = acos(T(-1));
return make_pair(PI * 2 * s.r * h, PI * h * h / 3 * (3 * s.r - h));
}
// Determine the intersection of two spheres
// Function Arguments:
// c1: the first circle
// c2: the second circle
// c: a tuple containing the plane the circle lies on (pointing away
// from s1), the center of the circle, and the radius
// Return Value: 0 if no intersection, 2 if identical spheres, 1 otherwise
// Time Complexity: O(1)
// Memory Complexity: O(1)
int sphereSphereIntersection(Sphere3D s1, Sphere3D s2,
tuple<Plane3D, pt3, T> &c) {
pt3 d = s2.o - s1.o; T d2 = norm(d);
if (eq(d2, 0)) return eq(s1.r, s2.r) ? 2 : 0;
T pd = (d2 + s1.r * s1.r - s2.r * s2.r) / 2, h2 = s1.r * s1.r - pd * pd / d2;
if (lt(h2, 0)) return 0;
pt3 o = s1.o + d * pd / d2;
c = make_tuple(Plane3D(d, o), o, sqrt(max(h2, T(0)))); return 1;
}
// Determine the surface area and volume of the intersection of two spheres
// Function Arguments:
// s1: the first sphere
// s2: the second sphere
// Return Value: a pair containing the surface area and volume of the
// intersection of the two spheres
// Time Complexity: O(1)
// Memory Complexity: O(1)
pair<T, T> sphereSphereIntersectionSAV(Sphere3D s1, Sphere3D s2) {
pt3 d = s2.o - s1.o; T d2 = norm(d), dr = abs(s1.r - s2.r), PI = acos(T(-1));
if (!lt(dr * dr, d2)) {
T r = min(s1.r, s2.r);
return make_pair(PI * 4 * r * r, PI * 4 * r * r * r / 3);
}
T sr = s1.r + s2.r; if (lt(sr * sr, d2)) return make_pair(T(0), T(0));
T pd = (d2 + s1.r * s1.r - s2.r * s2.r) / 2;
Plane3D pi = Plane3D(d, s1.o + d * pd / d2);
pair<T, T> a = sphereHalfSpaceIntersectionSAV(s1, pi);
pair<T, T> b = sphereHalfSpaceIntersectionSAV(s2, Plane3D(-pi.n, -pi.d));
a.first += b.first; a.second += b.second; return a;
}
| {
"repo_name": "wesley-a-leung/Resources",
"stars": "34",
"repo_language": "C++",
"file_name": "SkewHeapIncremental.h",
"mime_type": "text/x-c++"
} |
#pragma once
#include <bits/stdc++.h>
#include "Point3D.h"
using namespace std;
// Functions for a 3D plane (represented in parametric form o + kd for real k)
struct Line3D {
pt3 o, d;
// points p and q
Line3D(pt3 p = pt3(0, 0, 0), pt3 q = pt3(0, 0, 0)) : o(p), d(q - p) {}
bool onLine(pt3 p) const { return eq(norm(d * (p - o)), 0); }
T distSq(pt3 p) const { return norm(d * (p - o)) / norm(d); }
T dist(pt3 p) const { return sqrt(distSq(p)); }
Line3D translate(pt3 p) const { return Line3D(o + p, d); }
pt3 proj(pt3 p) const { return o + d * (d | (p - o)) / norm(d); }
pt3 refl(pt3 p) const { return proj(p) * T(2) - p; }
// compares points by projection (3 way comparison)
int cmpProj(pt3 p, pt3 q) const { return sgn((d | p) - (d | q)); }
};
// Closest distance between 2 lines
// Function Arguments:
// l1: the first line
// l2: the second line
// Return Value: the closest distance between 2 lines
// Time Complexity: O(1)
// Memory Complexity: O(1)
T lineLineDist(Line3D l1, Line3D l2) {
pt3 n = l1.d * l2.d; if (eq(norm(n), 0)) return l1.dist(l2.o);
return abs((l2.o - l1.o) | n) / abs(n);
}
// Closest point on the line l1 to the line l2
// If l1 and l2 are parallel, it returns the projection of (0, 0, 0) on l1
// Function Arguments:
// l1: the first line
// l2: the second line
// Return Value: the closest point on the line l1 to the line l2
// Time Complexity: O(1)
// Memory Complexity: O(1)
pt3 closestOnL1ToL2(Line3D l1, Line3D l2) {
pt3 n = l1.d * l2.d; if (eq(norm(n), 0)) return l1.proj(pt3(0, 0, 0));
pt3 n2 = l2.d * n; return l1.o + l1.d * ((l2.o - l1.o) | n2) / (l1.d | n2);
}
// Intersection of 2 lines
// Function Arguments:
// l1: the first line
// l2: the second line
// res: a reference to the point to store the intersection if it exists
// Return Value: 0 if no intersection, 1 if point of intersection, 2 otherwise
// Time Complexity: O(1)
// Memory Complexity: O(1)
int lineLineIntersection(Line3D l1, Line3D l2, pt3 &res) {
pt3 n = l1.d * l2.d; if (eq(norm(n), 0)) return eq(l1.dist(l2.o), 0) ? 2 : 0;
res = closestOnL1ToL2(l1, l2); return 1;
}
// Determines if a point is on a line segment
// Function Arguments:
// p: the point to check if on the line segment
// a: one endpoint of the line segment
// b: the other endpoint of the line segment
// Return Value: true if p is on the line segment a-b, false otherwise
// Time Complexity: O(1)
// Memory Complexity: O(1)
bool onSeg(pt3 p, pt3 a, pt3 b) {
if (a == b) return p == a;
Line3D l(a, b);
return l.onLine(p) && l.cmpProj(a, p) <= 0 && l.cmpProj(p, b) <= 0;
}
// Determine the intersection of two line segments
// Function Arguments:
// a: one endpoint of the first line segment
// b: the other endpoint of the first line segment
// p: one endpoint of the second line segment
// q: the other endpoint of the second line segment
// Return Value: if the line segments do not intersect, an empty vector
// of points; if the line segments intersect at a point, a vector containing
// the point of intersection; if the line segments have a line segment of
// intersection, a vector containing the two endpoints of the
// line segment intersection (it can return more if there is precision error)
vector<pt3> segSegIntersection(pt3 a, pt3 b, pt3 p, pt3 q) {
vector<pt3> ret; if (a == b) {
if (onSeg(a, p, q)) ret.push_back(a);
} else if (p == q) {
if (onSeg(p, a, b)) ret.push_back(p);
} else {
pt3 inter; Line3D l1(a, b), l2(p, q);
int cnt = lineLineIntersection(l1, l2, inter);
if (cnt == 1) ret.push_back(inter);
else if (cnt == 2) {
if (onSeg(p, a, b)) ret.push_back(p);
if (onSeg(q, a, b)) ret.push_back(q);
if (onSeg(a, p, q)) ret.push_back(a);
if (onSeg(b, p, q)) ret.push_back(b);
}
}
sort(ret.begin(), ret.end());
ret.erase(unique(ret.begin(), ret.end()), ret.end()); return ret;
}
// Finds the closest point on a line segment to another point
// Function Arguments
// p: the reference point
// a: one endpoint of the line segment
// b: the other endpoint of the line segment
// Return Value: the closest point to p on the line segment a-b
// Time Complexity: O(1)
// Memory Complexity: O(1)
pt3 closestPtOnSeg(pt3 p, pt3 a, pt3 b) {
if (a != b) {
Line3D l(a, b);
if (l.cmpProj(a, p) < 0 && l.cmpProj(p, b) < 0) return l.proj(p);
}
return lt(dist(p, a), dist(p, b)) ? a : b;
}
// Finds the distance to the closest point on a line segment to another point
// Function Arguments
// p: the reference point
// a: one endpoint of the line segment
// b: the other endpoint of the line segment
// Return Value: the distance to the closest point to p on the line segment a-b
// Time Complexity: O(1)
// Memory Complexity: O(1)
T ptSegDist(pt3 p, pt3 a, pt3 b) {
if (a != b) {
Line3D l(a, b);
if (l.cmpProj(a, p) < 0 && l.cmpProj(p, b) < 0) return l.dist(p);
}
return min(dist(p, a), dist(p, b));
}
// Finds the closest distance between two line segments
// Function Arguments
// a: one endpoint of the first line segment
// b: the other endpoint of the first line segment
// p: one endpoint of the second line segment
// q: the other endpoint of the second line segment
// Return Value: the closest distance between the two line
// Time Complexity: O(1)
// Memory Complexity: O(1)
T segSegDist(pt3 a, pt3 b, pt3 p, pt3 q) {
return !segSegIntersection(a, b, p, q).empty()
? 0
: min({ptSegDist(p, a, b), ptSegDist(q, a, b),
ptSegDist(a, p, q), ptSegDist(b, p, q)});
}
| {
"repo_name": "wesley-a-leung/Resources",
"stars": "34",
"repo_language": "C++",
"file_name": "SkewHeapIncremental.h",
"mime_type": "text/x-c++"
} |
#pragma once
#include <bits/stdc++.h>
#include "../../utils/Random.h"
#include "Point.h"
#include "Circle.h"
using namespace std;
// Determines the minimum enclosing circle of a set of points
// Function Arguments:
// P: the points
// Return Value: the minimum enclosing circle of P
// In practice, has a large constant
// Time Complexity: O(N) expected
// Memory Complexity: O(N)
// Tested:
// https://www.spoj.com/problems/QCJ4/
// https://open.kattis.com/problems/starsinacan
Circle minEnclosingCircle(vector<pt> P) {
shuffle(P.begin(), P.end(), rng); Circle c(P[0], 0);
for (int i = 0; i < int(P.size()); i++) if (lt(c.r, dist(P[i], c.o))) {
c = Circle(P[i], 0);
for (int j = 0; j < i; j++) if (lt(c.r, dist(P[j], c.o))) {
pt p = (P[i] + P[j]) / T(2); c = Circle(p, dist(P[i], p));
for (int k = 0; k < j; k++) if (lt(c.r, dist(P[k], c.o)))
c = circumcircle(P[i], P[j], P[k]);
}
}
return c;
}
| {
"repo_name": "wesley-a-leung/Resources",
"stars": "34",
"repo_language": "C++",
"file_name": "SkewHeapIncremental.h",
"mime_type": "text/x-c++"
} |
#pragma once
#include <bits/stdc++.h>
#include "Point.h"
#include "Angle.h"
using namespace std;
// Computes the minimum area from 3 distinct points out of a set of N points
// Angle::pivot is modified
// Constructor Arguments:
// P: the vector of points
// Fields:
// PA: the first point in the minimum area triangle
// PB: the second point in the minimum area triangle
// PC: the third point in the minimum area triangle
// minArea2: twice the area of the minimum area triangle
// hull: the points in the convex hull of P
// In practice, has a moderate constant
// Time Complexity:
// constructor: O(N^2 log N)
// Memory Complexity: O(N)
// Tested:
// Fuzz Tested
struct MinTriangleArea {
pt PA, PB, PC; T minArea2;
MinTriangleArea(const vector<pt> &P) : minArea2(numeric_limits<T>::max()) {
int n = P.size(); if (n < 3) return;
vector<Angle> A(n); for (int i = 0; i < n; i++) A[i] = Angle(P[i]);
for (int i = 0; i < n; i++) {
Angle::setPivot(P[i]); sort(A.begin(), A.end());
for (int j = 0; j < n - 1; j++) {
pt b = A[j].p, c = A[(j + 1) % (n - 1)].p;
T a2 = area2(P[i], b, c); if (a2 < 0) { swap(b, c); a2 = -a2; }
if (a2 < minArea2) { PA = P[i]; PB = b; PC = c; minArea2 = a2; }
}
}
}
};
| {
"repo_name": "wesley-a-leung/Resources",
"stars": "34",
"repo_language": "C++",
"file_name": "SkewHeapIncremental.h",
"mime_type": "text/x-c++"
} |
#pragma once
#include <bits/stdc++.h>
#include "Point.h"
using namespace std;
// Computes the Delaunay Triangulation of a set of distinct points
// If all points are collinear, there is no triangulation
// If there are 4 or more points on the same circle, the triangulation is
// ambiguous, otherwise it is unique
// Each circumcircle does not completely contain any of the input points
// Constructor Arguments:
// P: the distinct points
// Fields:
// tri: a vector of arrays of 3 points, representing a triangle in
// the triangulation, points given in ccw order
// In practice, has a moderate constant
// Time Complexity:
// construction: O(N log N)
// Memory Complexity: O(N)
// Tested:
// https://dmoj.ca/problem/cco08p6
// https://codeforces.com/problemsets/acmsguru/problem/99999/383
struct DelaunayTriangulation {
static constexpr const pt inf = pt(numeric_limits<T>::max(),
numeric_limits<T>::max());
struct Quad {
static vector<Quad> V;
int rot, o; pt p; Quad(int rot) : rot(rot), o(-1), p(inf) {}
int &r() { return V[rot].rot; }
pt &f() { return V[r()].p; }
int prv() { return V[V[rot].o].rot; }
int nxt() { return V[r()].prv(); }
};
int head; vector<Quad> &V = Quad::V; vector<array<pt, 3>> tri;
int makeQuad(int rot) { V.emplace_back(rot); return int(V.size()) - 1; }
bool circ(pt p, pt a, pt b, pt c) {
T p2 = norm(p), A = norm(a) - p2, B = norm(b) - p2, C = norm(c) - p2;
return lt(0, area2(p, a, b) * C + area2(p, b, c) * A + area2(p, c, a) * B);
}
int makeEdge(pt a, pt b) {
int r = ~head ? head : makeQuad(makeQuad(makeQuad(makeQuad(-1))));
head = V[r].o; V[V[r].r()].r() = r; for (int i = 0; i < 4; i++) {
r = V[r].rot; V[r].o = i & 1 ? r : V[r].r();
}
V[r].p = a; V[r].f() = b; return r;
}
void splice(int a, int b) {
swap(V[V[V[a].o].rot].o, V[V[V[b].o].rot].o); swap(V[a].o, V[b].o);
}
int connect(int a, int b) {
int q = makeEdge(V[a].f(), V[b].p);
splice(q, V[a].nxt()); splice(V[q].r(), b); return q;
}
bool valid(int a, int base) {
return ccw(V[a].f(), V[base].f(), V[base].p) > 0;
}
pair<int, int> rec(const vector<pt> &P, int lo, int hi) {
int k = hi - lo + 1; if (k <= 3) {
int a = makeEdge(P[lo], P[lo + 1]), b = makeEdge(P[lo + 1], P[hi]);
if (k == 2) return make_pair(a, V[a].r());
splice(V[a].r(), b); int side = sgn(ccw(P[lo], P[lo + 1], P[hi]));
int c = side ? connect(b, a) : -1;
return make_pair(side < 0 ? V[c].r() : a, side < 0 ? c : V[b].r());
}
int a, b, ra, rb, mid = lo + (hi - lo) / 2;
tie(ra, a) = rec(P, lo, mid); tie(b, rb) = rec(P, mid + 1, hi);
while ((ccw(V[b].p, V[a].f(), V[a].p) < 0 && (a = V[a].nxt()))
|| (ccw(V[a].p, V[b].f(), V[b].p) > 0 && (b = V[V[b].r()].o)));
int base = connect(V[b].r(), a); if (V[a].p == V[ra].p) ra = V[base].r();
if (V[b].p == V[rb].p) rb = base;
while (true) {
int l = V[V[base].r()].o; if (valid(l, base))
while (circ(V[V[l].o].f(), V[base].f(), V[base].p, V[l].f())) {
int t = V[l].o; splice(l, V[l].prv());
splice(V[l].r(), V[V[l].r()].prv()); V[l].o = head; head = l; l = t;
}
int r = V[base].prv(); if (valid(r, base))
while (circ(V[V[r].prv()].f(), V[base].f(), V[base].p, V[r].f())) {
int t = V[r].prv(); splice(r, V[r].prv());
splice(V[r].r(), V[V[r].r()].prv()); V[r].o = head; head = r; r = t;
}
if (!valid(l, base) && !valid(r, base)) break;
if (!valid(l, base) || (valid(r, base)
&& circ(V[r].f(), V[r].p, V[l].f(), V[l].p)))
base = connect(r, V[base].r());
else base = connect(V[base].r(), V[l].r());
}
return make_pair(ra, rb);
}
DelaunayTriangulation(vector<pt> P) : head(-1) {
sort(P.begin(), P.end()); assert(unique(P.begin(), P.end()) == P.end());
V.reserve(P.size() * 16); if (int(P.size()) < 2) return;
int e = rec(P, 0, int(P.size()) - 1).first, qi = 0, ind = 0;
vector<bool> vis(V.size(), false); vector<int> q{e}; q.reserve(V.size());
while (ccw(V[V[e].o].f(), V[e].f(), V[e].p) < 0) e = V[e].o;
auto add = [&] {
int c = e; do {
if (ind % 3 == 0) tri.emplace_back();
tri.back()[ind++ % 3] = V[c].p;
vis[c] = true; q.push_back(V[c].r()); c = V[c].nxt();
} while (c != e);
};
add(); tri.clear(); ind = 0; tri.reserve(V.size() / 3 + 1);
while (qi < int(q.size())) if (!vis[e = q[qi++]]) add();
assert(ind % 3 == 0); V = vector<Quad>();
}
};
vector<DelaunayTriangulation::Quad> DelaunayTriangulation::Quad::V
= vector<DelaunayTriangulation::Quad>();
| {
"repo_name": "wesley-a-leung/Resources",
"stars": "34",
"repo_language": "C++",
"file_name": "SkewHeapIncremental.h",
"mime_type": "text/x-c++"
} |
#pragma once
#include <bits/stdc++.h>
#include "Point.h"
#include "Line.h"
using namespace std;
// Helper struct for bentleyOttmann
struct Seg {
static T X; mutable pt p, q; mutable int i; bool isQuery;
pair<T, T> y() const {
pt v = q - p; return make_pair(cross(v, p) + v.y * X, v.x);
}
Seg(pt p, pt q, int i) : p(p), q(q), i(i), isQuery(false) {
assert(!eq(p.x, q.x)); if (p > q) swap(this->p, this->q);
}
Seg(T Y) : p(X, Y), q(p), i(-1), isQuery(true) {}
bool operator < (const Seg &s) const {
if (isQuery) {
pair<T, T> p2 = s.y(); return lt(p.y * p2.second, p2.first);
} else if (s.isQuery) {
pair<T, T> p1 = y(); return lt(p1.first, s.p.y * p1.second);
}
pair<T, T> p1 = y(), p2 = s.y();
return lt(p1.first * p2.second, p2.first * p1.second);
}
};
T Seg::X = 0;
// Helper struct for bentleyOttmann
struct SegEvent {
pt p; int type, i, j;
SegEvent(pt p, int type, int i, int j) : p(p), type(type), i(i), j(j) {}
bool operator > (const SegEvent &e) const {
return eq(e.p.x, p.x) ? (eq(e.type, type) ? lt(e.p.y, p.y)
: lt(e.type, type))
: lt(e.p.x, p.x);
}
};
// Helper struct for bentleyOttmann
struct VerticalSeg {
T lo, hi; int i; VerticalSeg(T lo, T hi, int i) : lo(lo), hi(hi), i(i) {}
bool operator < (const VerticalSeg &vs) const { return lt(hi, vs.hi); }
};
// Runs a callback on all pairs of intersecting line segments (proper or not)
// Template Arguments:
// F: the type of the function f
// Function Arguments:
// segs: a vector of pairs of the endpoints of the line segments
// f(i, j): the function to run a callback on for each pair of intersecting
// line segments, where i and j are the indices of the intersecting
// line segments; each unordered pair (i, j) is passed at most once
// In practice, has a moderate constant
// Time Complexity: O((N + K) log N) for N line segments and K intersections
// Memory Complexity: O(N + K)
// Tested:
// Fuzz Tested
// https://open.kattis.com/problems/polygon
// https://open.kattis.com/problems/scholarslawn
template <class F> void bentleyOttmann(const vector<pair<pt, pt>> &segs, F f) {
std::priority_queue<SegEvent, vector<SegEvent>, greater<SegEvent>> events;
int n = segs.size(); for (int i = 0; i < n; i++) {
pt a, b; tie(a, b) = segs[i]; if (a > b) swap(a, b);
int isEq = eq(a.x, b.x);
events.emplace(a, isEq, i, i); events.emplace(b, 4 - isEq, i, i);
}
multiset<Seg> active; using iter = decltype(active)::iterator;
vector<iter> ptr(n, active.end()); set<pair<int, int>> seen;
auto checkSeen = [&] (int i, int j) {
if (seen.count(make_pair(i, j)) || seen.count(make_pair(j, i)))
return true;
seen.emplace(i, j); return false;
};
auto checkInter = [&] (iter a, iter b) {
vector<pt> p = segSegIntersection(a->p, a->q, b->p, b->q);
if (int(p.size()) == 1 && cross(a->q - a->p, b->q - b->p) < 0) {
if (!checkSeen(a->i, b->i)) f(a->i, b->i);
events.emplace(p[0], 2, a->i, b->i);
}
};
auto checkRange = [&] (int i, T lo, T hi) {
auto it = active.lower_bound(Seg(lo));
auto check = [&] {
pair<T, T> y = it->y(); return !lt(hi * y.second, y.first);
};
for (; it != active.end() && check(); it++) {
pt a, b; tie(a, b) = segs[i];
if (segSegIntersects(a, b, it->p, it->q) && !checkSeen(i, it->i))
f(i, it->i);
}
};
multiset<VerticalSeg> vertical; while (!events.empty()) {
SegEvent e = events.top(); events.pop(); Seg::X = e.p.x; if (e.type == 0) {
checkRange(e.i, e.p.y, e.p.y);
ptr[e.i] = active.emplace(segs[e.i].first, segs[e.i].second, e.i);
if (ptr[e.i] != active.begin()) checkInter(prev(ptr[e.i]), ptr[e.i]);
if (next(ptr[e.i]) != active.end()) checkInter(ptr[e.i], next(ptr[e.i]));
} else if (e.type == 1) {
T lo = segs[e.i].first.y, hi = segs[e.i].second.y;
if (lo > hi) swap(lo, hi);
checkRange(e.i, lo, hi);
auto it = vertical.lower_bound(VerticalSeg(lo, lo, e.i));
for (; it != vertical.end(); it++) if (!checkSeen(e.i, it->i))
f(e.i, it->i);
vertical.emplace(lo, hi, e.i);
} else if (e.type == 2) {
if (next(ptr[e.i]) != ptr[e.j]) continue;
if (cross(ptr[e.i]->q - ptr[e.i]->p, ptr[e.j]->q - ptr[e.j]->p) < 0) {
swap(ptr[e.i], ptr[e.j]); swap(ptr[e.i]->i, ptr[e.j]->i);
swap(ptr[e.i]->p, ptr[e.j]->p); swap(ptr[e.i]->q, ptr[e.j]->q);
if (ptr[e.j] != active.begin()) checkInter(prev(ptr[e.j]), ptr[e.j]);
if (next(ptr[e.i]) != active.end())
checkInter(ptr[e.i], next(ptr[e.i]));
}
} else if (e.type == 4) {
if (ptr[e.i] != active.begin() && next(ptr[e.i]) != active.end())
checkInter(prev(ptr[e.i]), next(ptr[e.i]));
active.erase(ptr[e.i]); ptr[e.i] = active.end();
checkRange(e.i, e.p.y, e.p.y);
} else vertical.clear();
}
}
// Transforms a set of line segments into another set of line segments
// such that the union of the line segments remains the same, and any
// intersection between any pair of segments is an endpoint of both segments
// Function Arguments:
// segs: a vector of pairs of the endpoints of the line segments
// Return Value: the resulting vector of pairs of the endpoints of the
// line segments after the transformation
// In practice, has a moderate constant
// Time Complexity: O((N + K) log N) for N line segments and K intersections
// Memory Complexity: O(N + K)
// Tested:
// https://open.kattis.com/problems/scholarslawn
vector<pair<pt, pt>> segArrangement(const vector<pair<pt, pt>> &segs) {
int n = segs.size(); vector<vector<pt>> ptsOnSegs(n); set<pair<pt, pt>> ret;
bentleyOttmann(segs, [&] (int i, int j) {
for (auto &&p : segSegIntersection(segs[i].first, segs[i].second,
segs[j].first, segs[j].second)) {
ptsOnSegs[i].push_back(p);
ptsOnSegs[j].push_back(p);
}
});
for (int i = 0; i < n; i++) {
ptsOnSegs[i].push_back(segs[i].first);
ptsOnSegs[i].push_back(segs[i].second);
sort(ptsOnSegs[i].begin(), ptsOnSegs[i].end());
ptsOnSegs[i].erase(unique(ptsOnSegs[i].begin(), ptsOnSegs[i].end()),
ptsOnSegs[i].end());
for (int j = 0; j < int(ptsOnSegs[i].size()) - 1; j++) {
pt a = ptsOnSegs[i][j], b = ptsOnSegs[i][j + 1];
if (!ret.count(make_pair(a, b)) && !ret.count(make_pair(b, a)))
ret.emplace(a, b);
}
}
return vector<pair<pt, pt>>(ret.begin(), ret.end());
}
| {
"repo_name": "wesley-a-leung/Resources",
"stars": "34",
"repo_language": "C++",
"file_name": "SkewHeapIncremental.h",
"mime_type": "text/x-c++"
} |
#pragma once
#include <bits/stdc++.h>
using namespace std;
// Supports adding lines in the form f(x) = mx + b, or line segments
// in the form of f(x) = mx + b over l <= x <= r, and finding
// the maximum value of f(x) where all possible value of x are
// known beforehand
// Template Arguments:
// T: the type of the slope (m) and intercept (b) of the line, as well as
// the type of the function argument (x)
// Cmp: the comparator to compare two f(x) values,
// convention is same as std::priority_queue in STL
// Required Functions:
// operator (a, b): returns true if and only if a compares less than b
// Constructor Arguments:
// xs: a vector of type T of the possible x values for each query
// cmp: an instance of the Cmp struct
// inf: a value for positive infinity, must be negatable
// Functions:
// addLine(m, b): adds a line in the form f(x) = mx + b to the set of lines
// addLine(m, b, l, r): adds a line segment in the form f(x) = mx + b
// where l <= x <= r, to the set of lines
// getMax(x): finds the maximum value of f(x) (based on the comparator)
// for all inserted lines, x must be in the vector xs
// clear(): removes all lines from the seg
// In practice, has a moderate constant, performance compared to
// IncrementalConvexHullTrick (which uses multiset) and
// IncrementalConvexHullTrickSqrtBuffer can vary, faster than
// SparseLiChaoTree
// Time Complexity:
// constructor: O(1)
// addLine, getMax: O(log N) for the range [0, N)
// addLineSegment: O((log N) ^ 2) for the range [0, N)
// clear: O(N) for the range [0, N)
// Memory Complexity: O(N) for the range [0, N)
// Tested:
// https://judge.yosupo.jp/problem/line_add_get_min
// https://judge.yosupo.jp/problem/segment_add_get_min
// https://open.kattis.com/problems/longestlife
// https://www.spoj.com/problems/CHTPRAC/
template <class T, class Cmp = less<T>> struct LiChaoTree {
using Line = pair<T, T>; int N; Cmp cmp; T INF; vector<Line> TR; vector<T> X;
T eval(Line l, int i) const { return l.first * X[i] + l.second; }
bool majorize(Line a, Line b, int l, int r) {
return !cmp(eval(a, l), eval(b, l)) && !cmp(eval(a, r), eval(b, r));
}
int cInd(T x) const {
return lower_bound(X.begin(), X.end(), x) - X.begin();
}
int fInd(T x) const {
return upper_bound(X.begin(), X.end(), x) - X.begin() - 1;
}
LiChaoTree(const vector<T> &xs, Cmp cmp = Cmp(),
T inf = numeric_limits<T>::max())
: cmp(cmp), INF(min(inf, -inf, cmp)), X(xs) {
sort(X.begin(), X.end()); X.erase(unique(X.begin(), X.end()), X.end());
N = X.size(); TR.assign(N == 0 ? 0 : 1 << __lg(N * 4 - 1), Line(T(), INF));
}
void addLine(int k, int tl, int tr, Line line) {
if (majorize(line, TR[k], tl, tr)) swap(line, TR[k]);
if (majorize(TR[k], line, tl, tr)) return;
if (cmp(eval(TR[k], tl), eval(line, tl))) swap(line, TR[k]);
int m = tl + (tr - tl) / 2; if (!cmp(eval(line, m), eval(TR[k], m))) {
swap(line, TR[k]); addLine(k * 2, tl, m, line);
} else addLine(k * 2 + 1, m + 1, tr, line);
}
void addLineSegment(int k, int tl, int tr, int l, int r, Line line) {
if (r < tl || tr < l) return;
if (l <= tl && tr <= r) { addLine(k, tl, tr, line); return; }
int m = tl + (tr - tl) / 2; addLineSegment(k * 2, tl, m, l, r, line);
addLineSegment(k * 2 + 1, m + 1, tr, l, r, line);
}
T getMax(int k, int tl, int tr, int i) const {
T ret = eval(TR[k], i); if (tl == tr) return ret;
int m = tl + (tr - tl) / 2;
if (i <= m) return max(ret, getMax(k * 2, tl, m, i), cmp);
else return max(ret, getMax(k * 2 + 1, m + 1, tr, i), cmp);
}
void addLine(T m, T b) { addLine(1, 0, N - 1, Line(m, b)); }
void addLineSegment(T m, T b, T l, T r) {
int li = cInd(l), ri = fInd(r);
if (li <= ri) addLineSegment(1, 0, N - 1, li, ri, Line(m, b));
}
T getMax(T x) const { return getMax(1, 0, N - 1, cInd(x)); }
void clear() { fill(TR.begin(), TR.end(), Line(T(), INF)); }
};
| {
"repo_name": "wesley-a-leung/Resources",
"stars": "34",
"repo_language": "C++",
"file_name": "SkewHeapIncremental.h",
"mime_type": "text/x-c++"
} |
#pragma once
#include <bits/stdc++.h>
#include "Point.h"
#include "Angle.h"
#include "Line.h"
using namespace std;
// Computes the intersection of half-planes defined by the left side of
// a set of lines (including the line itself)
// Points on the intersection are given in ccw order and may be identical
// Angle::pivot is set to (0, 0)
// Function Arguments:
// lines: a vector of lines representing the half-planes defined by the
// left side
// Return Value: a pair containing a return code and a vector of points with
// the return codes defined as follows:
// code == 0: the intersection is finite (possibly empty),
// with the vector containing the points on the intersection;
// code == 1: the intersection is infinite and bounded by a series of
// line segments and rays (possibly a single infinite line), with the
// vector P[0], P[1], ..., P[k] where the first ray is represented by
// the point P[1] in the direction towards P[0], and the second ray is
// represented by the point P[k - 1] in the direction towards P[k];
// the remaining points P[1], ..., P[k - 1] represent the intersections
// between the line segments and rays; if there are only 2 points, then
// the intersection is bounded by the left side of the line from
// P[0] to P[1]
// code == 2: the intersection is infinite and bounded by two lines
// (possibly coincident lines in opposite directions), with the vector
// containing exactly 4 points (P[0], P[1], P[2], P[3]), where P[0] is a
// point on the first bounding line, P[1] is another point on the first
// line in the positive direction (based on the direction vector of the
// line) from P[0], P[2] is a point on the second line, and P[3] is another
// point on the second line in its positive direction from P[2]
// code == 3: the intersection is the entire plane, with an empty vector
// In practice, has a moderate constant
// Time Complexity: O(N log N)
// Memory Complexity: O(N)
// Tested:
// https://open.kattis.com/problems/bigbrother
// https://open.kattis.com/problems/marshlandrescues
// https://dmoj.ca/problem/ccoprep3p3
pair<int, vector<pt>> halfPlaneIntersection(vector<Line> lines) {
Angle::setPivot(pt(0, 0));
sort(lines.begin(), lines.end(), [&] (Line a, Line b) {
Angle angA(a.v), angB(b.v);
return angA == angB ? a.onLeft(b.proj(pt(0, 0))) < 0 : angA < angB;
});
lines.erase(unique(lines.begin(), lines.end(), [&] (Line a, Line b) {
return Angle(a.v) == Angle(b.v);
}), lines.end());
int N = lines.size(); if (N == 0) return make_pair(3, vector<pt>());
if (N == 1) {
pt p = lines[0].proj(pt(0, 0));
return make_pair(1, vector<pt>{p, p + lines[0].v});
}
int code = 0; for (int i = 0; code == 0 && i < N; i++) {
Angle diff = Angle(lines[i].v) - Angle(lines[i == 0 ? N - 1 : i - 1].v);
if (diff < Angle(pt(1, 0))) {
rotate(lines.begin(), lines.begin() + i, lines.end()); code = 1;
if (N == 2 && diff == Angle(pt(-1, 0))) {
pt p = lines[0].proj(pt(0, 0));
if (lines[1].onLeft(p) < 0) return make_pair(0, vector<pt>());
pt q = lines[1].proj(pt(0, 0));
return make_pair(2, vector<pt>{p, p + lines[0].v, q, q + lines[1].v});
}
}
}
vector<Line> q(N + 1, lines[0]); vector<pt> ret(N); pt inter;
int front = 0, back = 0; for (int i = 1; i <= N - code; i++) {
if (i == N) lines.push_back(q[front]);
while (front < back && lines[i].onLeft(ret[back - 1]) < 0) back--;
while (i != N && front < back && lines[i].onLeft(ret[front]) < 0) front++;
if (lineLineIntersection(lines[i], q[back], inter) != 1) continue;
ret[back++] = inter; q[back] = lines[i];
}
if (code == 0 && back - front < 3) return make_pair(code, vector<pt>());
vector<pt> P(ret.begin() + front, ret.begin() + back); if (code == 1) {
P.insert(P.begin(), P[0] - q[front].v); P.push_back(P.back() + q[back].v);
}
return make_pair(code, P);
}
| {
"repo_name": "wesley-a-leung/Resources",
"stars": "34",
"repo_language": "C++",
"file_name": "SkewHeapIncremental.h",
"mime_type": "text/x-c++"
} |
#pragma once
#include <bits/stdc++.h>
using namespace std;
// Supports adding lines in the form f(x) = mx + b, or line segments
// in the form of f(x) = mx + b over l <= x <= r, and finding
// the maximum value of f(x) at an integral point x where MN <= x <= MX
// Template Arguments:
// IndexType: the type of x for queries (and l and r for addLineSegment)
// T: the type of the slope (m) and intercept (b) of the line, as well as
// the type of the function argument (x)
// Cmp: the comparator to compare two f(x) values,
// convention is same as std::priority_queue in STL
// Required Functions:
// operator (a, b): returns true if and only if a compares less than b
// Constructor Arguments:
// MN: the minimum bound (inclusive) for the value of x, allowing queries
// where MN <= x
// MX: the maximum bound (inclusive) for the value of x, allowing queries
// where x <= MX
// cmp: an instance of the Cmp struct
// INF: a value for positive infinity, must be negatable
// Functions:
// addLine(m, b): adds a line in the form f(x) = mx + b to the set of lines
// addLineSegment(m, b, l, r): adds a line segment in the form f(x) = mx + b
// where l <= x <= r, to the set of lines
// getMax(x): finds the maximum value of f(x) (based on the comparator)
// for all inserted lines
// clear(): removes all lines from the seg
// In practice, has a moderate constant, performance compared to
// IncrementalConvexHullTrick (which uses multiset) and
// IncrementalConvexHullTrickSqrtBuffer can vary, slower than LiChaoTree
// Time Complexity:
// constructor: O(1)
// addLine, getMax: O(log(MX - MN)) for the range [MX, MN]
// addLineSegment: O(log(MX - MN) ^ 2) for the range [MX, MN]
// clear: O(Q log(MX - MN) + U (log(MX - MN))^2) for the range
// [MX, MN], Q addLine queries, and U addLineSegment queries
// Memory Complexity: O(Q log(MX - MN) + U (log(MX - MN))^2) for the range
// [MX, MN], Q addLine queries, and U addLineSegment
// queries
// Tested:
// https://judge.yosupo.jp/problem/line_add_get_min
// https://judge.yosupo.jp/problem/segment_add_get_min
// https://open.kattis.com/problems/longestlife
// https://www.spoj.com/problems/CHTPRAC/
template <class IndexType, class T, class Cmp = less<T>>
struct SparseLiChaoTree {
static_assert(is_integral<IndexType>::value, "IndexType must be integeral");
using Line = pair<T, T>;
struct Node {
Line line; int l, r; Node(T m, T b) : line(m, b), l(-1), r(-1) {}
};
IndexType MN, MX; Cmp cmp; T INF; int root; vector<Node> TR;
T eval(Line l, IndexType x) const { return l.first * x + l.second; }
bool majorize(Line a, Line b, IndexType l, IndexType r) {
return !cmp(eval(a, l), eval(b, l)) && !cmp(eval(a, r), eval(b, r));
}
SparseLiChaoTree(IndexType MN, IndexType MX, Cmp cmp = Cmp(),
T inf = numeric_limits<T>::max())
: MN(MN), MX(MX), cmp(cmp), INF(min(inf, -inf, cmp)), root(-1) {}
int addLine(int k, IndexType tl, IndexType tr, Line line) {
if (k == -1) { k = TR.size(); TR.emplace_back(T(), INF); }
if (majorize(line, TR[k].line, tl, tr)) swap(line, TR[k].line);
if (majorize(TR[k].line, line, tl, tr)) return k;
if (cmp(eval(TR[k].line, tl), eval(line, tl))) swap(line, TR[k].line);
IndexType m = tl + (tr - tl) / 2;
if (!cmp(eval(line, m), eval(TR[k].line, m))) {
swap(line, TR[k].line);
int nl = addLine(TR[k].l, tl, m, line); TR[k].l = nl;
} else { int nr = addLine(TR[k].r, m + 1, tr, line); TR[k].r = nr; }
return k;
}
int addLineSegment(int k, IndexType tl, IndexType tr,
IndexType l, IndexType r, Line line) {
if (r < tl || tr < l) return k;
if (l <= tl && tr <= r) return addLine(k, tl, tr, line);
if (k == -1) { k = TR.size(); TR.emplace_back(T(), INF); }
IndexType m = tl + (tr - tl) / 2;
int nl = addLineSegment(TR[k].l, tl, m, l, r, line); TR[k].l = nl;
int nr = addLineSegment(TR[k].r, m + 1, tr, l, r, line); TR[k].r = nr;
return k;
}
T getMax(int k, IndexType tl, IndexType tr, IndexType x) const {
if (k == -1) return INF;
T ret = eval(TR[k].line, x); IndexType m = tl + (tr - tl) / 2;
if (x <= m) return max(ret, getMax(TR[k].l, tl, m, x), cmp);
else return max(ret, getMax(TR[k].r, m + 1, tr, x), cmp);
}
void addLine(T m, T b) { root = addLine(root, MN, MX, Line(m, b)); }
void addLineSegment(T m, T b, IndexType l, IndexType r) {
root = addLineSegment(root, MN, MX, l, r, Line(m, b));
}
T getMax(IndexType x) const { return getMax(root, MN, MX, x); }
void clear() { root = -1; TR.clear(); }
};
| {
"repo_name": "wesley-a-leung/Resources",
"stars": "34",
"repo_language": "C++",
"file_name": "SkewHeapIncremental.h",
"mime_type": "text/x-c++"
} |
#pragma once
#include <bits/stdc++.h>
#include "Point.h"
#include "ConvexHull.h"
using namespace std;
// Computes the maximmum area from 3 distinct points out of a set of N points
// Constructor Arguments:
// P: the vector of points
// Fields:
// PA: the first point in the maximum area triangle
// PB: the second point in the maximum area triangle
// PC: the third point in the maximum area triangle
// maxArea2: twice the area of the maximum area triangle
// hull: the points in the convex hull of P
// In practice, has a small constant
// Time Complexity:
// constructor: O(N^2)
// Memory Complexity: O(N)
// Tested:
// https://dmoj.ca/problem/dmpg18g5
struct MaxTriangleArea {
pt PA, PB, PC; T maxArea2; vector<pt> hull;
MaxTriangleArea(const vector<pt> &P) : maxArea2(0), hull(convexHull(P)) {
int H = hull.size(), a = 0, b = 1, c = 2; if (H < 3) return;
maxArea2 = area2(PA = hull[a], PB = hull[b], PC = hull[c]);
for (;; b = (a + 1) % H, c = (b + 1) % H) {
for (; c != a; b = (b + 1) % H) {
T A = area2(hull[a], hull[b], hull[c]), B = A;
while ((B = area2(hull[a], hull[b], hull[(c + 1) % H])) >= A) {
c = (c + 1) % H; A = B;
}
if (maxArea2 < A) {
maxArea2 = A; PA = hull[a]; PB = hull[b]; PC = hull[c];
}
}
if ((a = (a + 1) % H) == 0) break;
}
}
};
| {
"repo_name": "wesley-a-leung/Resources",
"stars": "34",
"repo_language": "C++",
"file_name": "SkewHeapIncremental.h",
"mime_type": "text/x-c++"
} |
#pragma once
#include <bits/stdc++.h>
#include "Point.h"
#include "Angle.h"
#include "Line.h"
#include "Circle.h"
using namespace std;
// Functions for 2D polygons, represented by a vector of the N points in
// the polygon
// Returns i mod n if i is in the range [0, 2n)
int mod(int i, int n) { return i < n ? i : i - n; }
// Determines twice the signed area of a simple polygon
// Function Arguments:
// poly: the points of the simple polygon
// Return Value: twice the signed area of the polygon, positive if
// ccw, negative if cw
// Time Complexity: O(N)
// Memory Complexity: O(1)
// Tested:
// https://open.kattis.com/problems/polygonarea
// https://open.kattis.com/problems/crane
T getArea2(const vector<pt> &poly) {
T ret = 0; int n = poly.size();
for (int i = 0; i < n; i++) ret += cross(poly[i], poly[mod(i + 1, n)]);
return ret;
}
// Determines centroid of a simple polygon
// Function Arguments:
// poly: the points of the simple polygon
// Return Value: the centroid of the polygon
// Time Complexity: O(N)
// Memory Complexity: O(1)
// Tested:
// https://open.kattis.com/problems/crane
pt getCentroid(const vector<pt> &poly) {
T A2 = 0; pt cen(0, 0); int n = poly.size(); for (int i = 0; i < n; i++) {
T a = cross(poly[i], poly[mod(i + 1, n)]); A2 += a;
cen += a * (poly[i] + poly[mod(i + 1, n)]);
}
return cen / A2 / T(3);
}
// Determines the orientation of a convex polygon
// Function Arguments:
// poly: the points of the convex polygon
// Return Value: 1 if ccw, -1 if cw, 0 if a point or a line
// Time Complexity: O(1)
// Memory Complexity: O(1)
int isCcwConvexPolygon(const vector<pt> &poly) {
return ccw(poly.back(), poly[0], poly[mod(1, poly.size())]);
}
// Determines the orientation of a simple polygon
// Function Arguments:
// poly: the points of the simple polygon
// Return Value: 1 if ccw, -1 if cw, 0 if a point or a line
// Time Complexity: O(N)
// Memory Complexity: O(1)
// Tested:
// https://open.kattis.com/problems/abstractart
int isCcwPolygon(const vector<pt> &poly) {
int n = poly.size();
int i = min_element(poly.begin(), poly.end()) - poly.begin();
return ccw(poly[mod(i + n - 1, n)], poly[i], poly[mod(i + 1, n)]);
}
// Determines whether a point is inside a convex polygon
// Function Arguments:
// poly: the points of the convex polygon in ccw order or cw order
// p: the point to check
// Return Value: 1 if inside the polygon, 0 if on the edge, -1 if outside
// Time Complexity: O(log N)
// Memory Complexity: O(1)
// Tested:
// https://codeforces.com/contest/166/problem/B
int isInConvexPolygon(const vector<pt> &poly, pt p) {
int n = poly.size(), a = 1, b = n - 1;
if (n < 3) return onSeg(p, poly[0], poly.back()) ? 0 : -1;
if (ccw(poly[0], poly[a], poly[b]) > 0) swap(a, b);
int o1 = ccw(poly[0], p, poly[a]), o2 = ccw(poly[0], p, poly[b]);
if (o1 < 0 || o2 > 0) return -1;
if (o1 == 0 || o2 == 0) return 0;
while (abs(a - b) > 1) {
int c = (a + b) / 2; (ccw(poly[0], p, poly[c]) < 0 ? b : a) = c;
}
return ccw(poly[a], p, poly[b]);
}
// Determines whether a point is inside a simple polygon
// Function Arguments:
// poly: the points of the simple polygon in ccw order or cw order
// p: the point to check
// Return Value: 1 if inside the polygon, 0 if on the edge, -1 if outside
// Time Complexity: O(N)
// Memory Complexity: O(1)
// Tested:
// https://open.kattis.com/problems/pointinpolygon
int isInPolygon(const vector<pt> &poly, pt p) {
int n = poly.size(), windingNumber = 0; for (int i = 0; i < n; i++) {
pt a = poly[i], b = poly[mod(i + 1, n)]; if (lt(b.y, a.y)) swap(a, b);
if (onSeg(p, a, b)) return 0;
windingNumber ^= (!lt(p.y, a.y) && lt(p.y, b.y) && ccw(p, a, b) > 0);
}
return windingNumber == 0 ? -1 : 1;
}
// Finds an extreme vertex of a convex polygon (a vertex that is the furthest
// point in that direction, selecting the rightmost vertex if there are
// multiple)
// Function Arguments:
// poly: the points of the convex polygon in ccw order
// dir: the direction
// Return Value: the index of an extreme vertex
// Time Complexity: O(log N)
// Memory Complexity: O(1)
// Tested:
// https://codeforces.com/contest/799/problem/G
// https://www.acmicpc.net/problem/4225
int extremeVertex(const vector<pt> &poly, pt dir) {
int n = poly.size(), lo = 0, hi = n; pt pp = perp(dir);
auto cmp = [&] (int i, int j) {
return sgn(cross(pp, poly[mod(i, n)] - poly[mod(j, n)]));
};
auto extr = [&] (int i) {
return cmp(i + 1, i) >= 0 && cmp(i, i + n - 1) < 0;
};
if (extr(0)) return 0;
while (lo + 1 < hi) {
int m = lo + (hi - lo) / 2; if (extr(m)) return m;
int ls = cmp(lo + 1, lo), ms = cmp(m + 1, m);
(ls < ms || (ls == ms && ls == cmp(lo, m)) ? hi : lo) = m;
}
return lo;
}
// Finds the intersection of a convex polygon and a line
// Function Arguments:
// poly: the points of the convex polygon in ccw order
// l: the line
// Return Value: (-1, -1) if no collision
// (i, -1) if touching corner i
// (i, i) if along side (i, i + 1)
// (i, j) if crossing sides (i, i + 1) and (j, j + 1)
// crossing corner i is treated as crossing side (i, i + 1)
// first index in pair is guaranteed to NOT be on the left side of the line
// Time Complexity: O(log N)
// Memory Complexity: O(1)
// Tested:
// Fuzz Tested
// https://codeforces.com/contest/799/problem/G
pair<int, int> convexPolygonLineIntersection(const vector<pt> &poly, Line l) {
int n = poly.size();
if (n == 1) return make_pair(l.onLeft(poly[0]) == 0 ? 0 : -1, -1);
if (n == 2) {
int o0 = l.onLeft(poly[0]), o1 = l.onLeft(poly[1]);
if (o0 == 0 && o1 == 0) return make_pair(0, 0);
if (o0 == 0) return make_pair(0, -1);
if (o1 == 0) return make_pair(1, -1);
return o0 == o1 ? make_pair(-1, -1) : make_pair(0, 1);
}
int endA = extremeVertex(poly, -perp(l.v));
int endB = extremeVertex(poly, perp(l.v));
auto cmpL = [&] (int i) { return l.onLeft(poly[i]); };
pair<int, int> ret(-1, -1); if (cmpL(endA) > 0 || cmpL(endB) < 0) return ret;
for (int i = 0; i < 2; i++) {
int lo = endB, hi = endA; while (mod(lo + 1, n) != hi) {
int m = mod((lo + hi + (lo < hi ? 0 : n)) / 2, n);
(cmpL(m) == cmpL(endB) ? lo : hi) = m;
}
(i ? ret.second : ret.first) = mod(lo + !cmpL(hi), n); swap(endA, endB);
}
if (ret.first == ret.second) return make_pair(ret.first, -1);
if (!cmpL(ret.first) && !cmpL(ret.second)) {
switch ((ret.first - ret.second + n + 1) % n) {
case 0: return make_pair(ret.first, ret.first);
case 2: return make_pair(ret.second, ret.second);
}
}
return ret;
}
// Finds a single tangent of a convex polygon and a point strictly outside
// the polygon
// Function Arguments:
// poly: the points of the convex polygon in ccw order
// p: the point strictly outside the polygon
// left: whether the left or right tangent line is found if p is
// considered to be below the polygon
// Return Value: the tangent index, with the closest point to p being
// selected if there are multiple indices
// Time Complexity: O(log N)
// Memory Complexity: O(1)
// Tested:
// Fuzz Tested
// https://dmoj.ca/problem/coci19c2p5
int convexPolygonPointSingleTangent(const vector<pt> &poly, pt p, bool left) {
int n = poly.size(), o = ccw(p, poly[0], poly.back());
bool farSide = o ? o < 0 : lt(distSq(p, poly.back()), distSq(p, poly[0]));
int lo = farSide != left, hi = lo + n - 2; while (lo <= hi) {
int mid = lo + (hi - lo) / 2;
if (ccw(p, poly[0], poly[mid]) == (left ? -1 : 1)) {
if (farSide == left) hi = mid - 1;
else lo = mid + 1;
} else {
if ((ccw(poly[mid], poly[mod(mid + 1, n)], p) < 0) == left) hi = mid - 1;
else lo = mid + 1;
}
}
return mod(lo, n);
}
// Finds the tangent of a convex polygon and a point strictly outside
// the polygon
// Function Arguments:
// poly: the points of the convex polygon in ccw order
// p: the point strictly outside the polygon
// Return Value: a pair containing the tangent indices, with the first index
// being the left tangent point and the second index being the right tangent
// if p is considered to be below the polygon; all points strictly between
// the tangent indices are strictly within the tangent lines, while all other
// points are on or outside the tangent lines
// Time Complexity: O(log N)
// Memory Complexity: O(1)
// Tested:
// Fuzz Tested
// https://dmoj.ca/problem/coci19c2p5
pair<int, int> convexPolygonPointTangent(const vector<pt> &poly, pt p) {
return make_pair(convexPolygonPointSingleTangent(poly, p, true),
convexPolygonPointSingleTangent(poly, p, false));
}
// Finds the tangent of a convex polygon and a circle strictly and completely
// outside the polygon
// Function Arguments:
// poly: the points of the convex polygon in ccw order
// c: the circle strictly and completely outside the polygon
// inner: whether to find the inner or outer tangents
// Return Value: a pair containing the tangent indices, with the first index
// being the left tangent point and the second index being the right tangent
// if c is considered to be below the polygon; all points strictly between
// the tangent indices are strictly within the tangent lines, while all other
// points are on or outside the tangent lines (same index means all points
// are inside or on the tangent lines)
// Time Complexity: O(log N)
// Memory Complexity: O(1)
// Tested:
// Fuzz Tested
pair<int, int> convexPolygonCircleTangent(const vector<pt> &poly,
Circle c, bool inner) {
int n = poly.size(), a = 0, b = 0; vector<pair<pt, pt>> t;
for (int h = 0; h < 2; h++) {
assert(circleCircleTangent(Circle(poly[0], 0), c, inner, t) == 1);
pt q = t[h].second; int o = ccw(q, poly[0], poly.back());
bool farSide = o ? o < 0 : lt(distSq(q, poly.back()), distSq(q, poly[0]));
int lo = farSide == h, hi = lo + n - 2; while (lo <= hi) {
int mid = lo + (hi - lo) / 2; t.clear();
assert(circleCircleTangent(Circle(poly[mid], 0), c, inner, t) == 1);
q = t[h].second; if (ccw(q, poly[0], poly[mid]) == (h ? 1 : -1)) {
if (farSide != h) hi = mid - 1;
else lo = mid + 1;
} else {
if ((ccw(poly[mid], poly[mod(mid + 1, n)], q) < 0) != h) hi = mid - 1;
else lo = mid + 1;
}
}
(h ? b : a) = mod(lo, n); t.clear();
}
return make_pair(a, b);
}
// Finds the tangents of two convex polygons that do not intersect
// Function Arguments:
// poly1: the first convex polygon
// poly2: the second convex polygon that does not intersect with the first
// inner: whether to find the inner or outer tangents
// Return Value: a vector of pairs containing the tangent indices, with the
// first element in each pair being the index in the first polygon, and the
// second element being the index in the second polygon; first point in
// each pair is the left tangent point of the first polygon if the second
// polygon is considered to be below the first polygon
// Time Complexity: O(log N log M)
// Memory Complexity: O(1)
// Tested:
// Fuzz Tested
vector<pair<int, int>> convexPolygonConvexPolygonTangent(
const vector<pt> &poly1, const vector<pt> &poly2, bool inner) {
int n = poly1.size(), a = 0, b = 0, c = 0, d = 0; vector<pair<int, int>> ret;
for (int h = 0; h < 2; h++) {
pt q = poly2[convexPolygonPointSingleTangent(poly2, poly1[0], inner ^ h)];
int o = ccw(q, poly1[0], poly1.back());
bool farSide = o ? o < 0
: lt(distSq(q, poly1.back()), distSq(q, poly1[0]));
int lo = farSide == h, hi = lo + n - 2; while (lo <= hi) {
int mid = lo + (hi - lo) / 2;
q = poly2[convexPolygonPointSingleTangent(poly2, poly1[mid], inner ^ h)];
if (ccw(q, poly1[0], poly1[mid]) == (h ? 1 : -1)) {
if (farSide != h) hi = mid - 1;
else lo = mid + 1;
} else {
if ((ccw(poly1[mid], poly1[mod(mid + 1, n)], q) < 0) != h)
hi = mid - 1;
else lo = mid + 1;
}
}
(h ? b : a) = lo = mod(lo, n);
(h ? d : c) = convexPolygonPointSingleTangent(poly2, poly1[lo], inner ^ h);
}
ret.emplace_back(a, c); ret.emplace_back(b, d); return ret;
}
// Finds the closest point on the edge of the polygon to a point strictly
// outside the polygon
// Function Arguments:
// poly: the points of the convex polygon in ccw order
// p: the point strictly outside the polygon
// Return Value: the closest point on the edge of the polygon to the point p
// Time Complexity: O(log N)
// Memory Complexity: O(1)
// Tested:
// Fuzz Tested
pt closestPointOnConvexPolygon(const vector<pt> &poly, pt p) {
pair<int, int> tangent = convexPolygonPointTangent(poly, p);
int n = poly.size(), len = tangent.second - tangent.first;
if (len < 0) len += n;
if (len == 0) return poly[tangent.first];
int lo = 0, hi = len - 2; while (lo <= hi) {
int mid = lo + (hi - lo) / 2, i = mod(tangent.first + mid, n);
if (ptSegDist(p, poly[i], poly[mod(i + 1, n)])
< ptSegDist(p, poly[mod(i + 1, n)], poly[mod(i + 2, n)]))
hi = mid - 1;
else lo = mid + 1;
}
int i = mod(tangent.first + lo, n);
return closestPtOnSeg(p, poly[i], poly[mod(i + 1, n)]);
}
// Determines the intersection of a simple polygon and a half-plane defined
// by the left side of a line (including the line itself)
// Function Arguments:
// poly: the points of the simple polygon in ccw order
// l: the line with the half-plane defined by the left side
// Return Value: the polygon defined by the intersection of the simple polygon
// and the half-plane, assuming the result is a single simple polyon
// Time Complexity: O(N)
// Memory Complexity: O(N)
// Tested:
// https://dmoj.ca/problem/utso15p6
// https://open.kattis.com/problems/canyon
vector<pt> polygonHalfPlaneIntersection(const vector<pt> &poly, Line l) {
int n = poly.size(); vector<pt> ret; for (int i = 0; i < n; i++) {
int j = mod(i + 1, n), o1 = l.onLeft(poly[i]), o2 = l.onLeft(poly[j]);
if (o1 >= 0) ret.push_back(poly[i]);
if (o1 && o2 && o1 != o2) {
pt p; if (lineLineIntersection(l, Line(poly[i], poly[j]), p) == 1)
ret.push_back(p);
}
}
return ret;
}
// Computes the area of union of multiple polygons
// Function Arguments:
// polys: a vector of the polygons represented by a vector of points given in
// ccw order
// Return Value: the area of the union of all the polygons
// Time Complexity: O(N^2 log N) for N total points
// Memory Complexity: O(N) for N total points
// Tested:
// https://open.kattis.com/problems/abstractart
T polygonUnion(const vector<vector<pt>> &polys) {
auto rat = [&] (pt p, pt q) { return sgn(q.x) ? p.x / q.x : p.y / q.y; };
T ret = 0; for (int i = 0; i < int(polys.size()); i++)
for (int v = 0; v < int(polys[i].size()); v++) {
pt a = polys[i][v], b = polys[i][mod(v + 1, polys[i].size())];
vector<pair<T, int>> segs{make_pair(0, 0), make_pair(1, 0)};
for (int j = 0; j < int(polys.size()); j++) if (i != j)
for (int w = 0; w < int(polys[j].size()); w++) {
pt c = polys[j][w], d = polys[j][mod(w + 1, polys[j].size())];
int sc = ccw(a, b, c), sd = ccw(a, b, d); if (sc != sd) {
if (min(sc, sd) < 0) {
T sa = area2(c, d, a), sb = area2(c, d, b);
segs.emplace_back(sa / (sa - sb), sgn(sc - sd));
}
} else if (j < i && !sc && !sd && sgn(dot(b - a, d - c)) > 0) {
segs.emplace_back(rat(c - a, b - a), 1);
segs.emplace_back(rat(d - a, b - a), -1);
}
}
sort(segs.begin(), segs.end()); T sm = 0;
for (auto &&s : segs) s.first = min(max(s.first, T(0)), T(1));
for (int j = 1, cnt = segs[0].second; j < int(segs.size()); j++) {
if (!cnt) sm += segs[j].first - segs[j - 1].first;
cnt += segs[j].second;
}
ret += cross(a, b) * sm;
}
return ret / 2;
}
// Determines the area of the intersection of a simple polygon and a circle
// Function Arguments:
// poly: the points of the simple polygon in ccw order
// c: the circle
// Return Value: the area of the intersection of the simple polygon and
// the circle
// Time Complexity: O(N)
// Memory Complexity: O(1)
// Tested:
// https://open.kattis.com/problems/pizzacutting
// https://open.kattis.com/problems/birthdaycake
T polygonCircleIntersectionArea(const vector<pt> &poly, Circle c) {
T r2 = c.r * c.r / 2;
auto tri = [&] (pt p, pt q) {
pt d = q - p; T a = dot(d, p) / norm(d);
T b = (norm(p) - c.r * c.r) / norm(d), det = a * a - b;
if (!lt(0, det)) return ang(q, pt(0, 0), p) * r2;
T s = max(T(0), -a - sqrt(det)), t = min(T(1), -a + sqrt(det));
if (lt(t, 0) || !lt(s, 1)) return ang(q, pt(0, 0), p) * r2;
pt u = p + d * s, v = p + d * t;
return ang(u, pt(0, 0), p) * r2 + cross(u, v) / 2
+ ang(q, pt(0, 0), v) * r2;
};
T ret = 0; for (int n = poly.size(), i = 0; i < n; i++)
ret += tri(poly[i] - c.o, poly[mod(i + 1, n)] - c.o);
return ret;
}
// Computes the area of union of multiple polygons and multiple circles
// Assertion failure likely means there is precision error
// Function Arguments:
// polys: a vector of the polygons represented by a vector of points given in
// ccw order
// circles: a vector of the circles
// Return Value: the area of the union of all the polygons and all the circles
// Time Complexity: O((N + M)^2 log (N + M)) for N total points and M circles
// Memory Complexity: O((N + M)^2) for N total points and M circles
// Tested:
// https://open.kattis.com/problems/abstractart
// https://www.spoj.com/problems/CIRU/
// https://dmoj.ca/problem/noi05p6
T polygonCircleUnionArea(const vector<vector<pt>> &polys,
const vector<Circle> &circles) {
int n = polys.size(), m = circles.size(); T ret = 0;
auto rat = [&] (pt p, pt q) { return sgn(q.x) ? p.x / q.x : p.y / q.y; };
for (int i = 0; i < n; i++) for (int v = 0; v < int(polys[i].size()); v++) {
pt a = polys[i][v], b = polys[i][mod(v + 1, polys[i].size())];
if (a == b) continue;
vector<pair<T, int>> segs{make_pair(0, 0), make_pair(1, 0)};
for (int j = 0; j < n; j++) if (i != j)
for (int w = 0; w < int(polys[j].size()); w++) {
pt c = polys[j][w], d = polys[j][mod(w + 1, polys[j].size())];
int sc = ccw(a, b, c), sd = ccw(a, b, d); if (sc != sd) {
if (min(sc, sd) < 0) {
T sa = area2(c, d, a), sb = area2(c, d, b);
segs.emplace_back(sa / (sa - sb), sgn(sc - sd));
}
} else if (j < i && !sc && !sd && sgn(dot(b - a, d - c)) > 0) {
segs.emplace_back(rat(c - a, b - a), 1);
segs.emplace_back(rat(d - a, b - a), -1);
}
}
Line l(a, b); for (int j = 0; j < m; j++) {
vector<pt> p = circleLineIntersection(circles[j], l);
if (int(p.size()) == 2) {
segs.emplace_back(rat(p[0] - a, b - a), 1);
segs.emplace_back(rat(p[1] - a, b - a), -1);
}
}
sort(segs.begin(), segs.end()); T sm = 0;
for (auto &&s : segs) s.first = min(max(s.first, T(0)), T(1));
for (int j = 1, cnt = segs[0].second; j < int(segs.size()); j++) {
if (!cnt) sm += segs[j].first - segs[j - 1].first;
cnt += segs[j].second;
}
ret += cross(a, b) * sm / 2;
}
for (int i = 0; i < m; i++) {
vector<pair<Angle, int>> segs; Angle::setPivot(circles[i].o);
segs.emplace_back(Angle(circles[i].o - pt(circles[i].r, 0)), 0);
segs.emplace_back(Angle(circles[i].o + pt(circles[i].r, 0)), 0);
segs.emplace_back(Angle(circles[i].o), 0); bool covered = false;
for (int j = 0; j < m; j++) if (i != j) {
int o = circles[j].contains(circles[i]);
if (o > 0 || (o == 0 && (lt(circles[i].r, circles[j].r) || j < i))) {
covered = true; break;
}
vector<pt> p; circleCircleIntersection(circles[i], circles[j], p);
if (int(p.size()) == 2) {
Angle a(p[0]), b(p[1]);
segs.emplace_back(a, 1); segs.emplace_back(b, -1); if (a >= b) {
segs.emplace_back(Angle(circles[i].o - pt(circles[i].r, 0)), 1);
segs.emplace_back(Angle(circles[i].o), -1);
}
}
}
for (int j = 0; j < n && !covered; j++) {
vector<pair<Angle, int>> tmp; bool hasInter = false;
for (int w = 0; w < int(polys[j].size()); w++) {
pt c = polys[j][w], d = polys[j][mod(w + 1, polys[j].size())];
if (c == d) continue;
int sc = circles[i].contains(c), sd = circles[i].contains(d);
vector<pt> p = circleSegIntersection(circles[i], c, d); if (sc != sd) {
if (min(sc, sd) > 0 && int(p.size()) == 1) {
hasInter = true; tmp.emplace_back(Angle(p[0]), sgn(sc - sd));
} else if (min(sc, sd) < 0 && int(p.size()) == 2) {
hasInter = true; tmp.emplace_back(Angle(p[sc == 0]), sgn(sc - sd));
}
} else if (int(p.size()) == 2) {
hasInter = true; tmp.emplace_back(Angle(p[1]), 1);
tmp.emplace_back(Angle(p[0]), -1);
}
}
auto cmp = [&] (const pair<Angle, int> &a, const pair<Angle, int> &b) {
return make_pair(a.first, -a.second) < make_pair(b.first, -b.second);
};
sort(tmp.begin(), tmp.end(), cmp);
if (!tmp.empty() && tmp[0].second < 0) {
tmp.emplace_back(Angle(circles[i].o - pt(circles[i].r, 0)), 1);
tmp.emplace_back(Angle(circles[i].o), -1);
}
segs.insert(segs.end(), tmp.begin(), tmp.end());
if (!hasInter && isInPolygon(polys[j], circles[i].o) >= 0)
for (auto &&p : polys[j])
if (circles[i].contains(p) < 0) { covered = true; break; }
}
if (covered) continue;
sort(segs.begin(), segs.end()); for (auto &&s : segs)
if (s.first.p == circles[i].o) s.first.p -= pt(circles[i].r, 0);
for (int j = 1, cnt = segs[0].second; j < int(segs.size()); j++) {
if (!cnt) {
pt a = segs[j - 1].first.p, b = segs[j].first.p; if (a != b) {
ret += cross(a, b) / 2;
ret += circleHalfPlaneIntersectionArea(circles[i], Line(b, a));
}
}
cnt += segs[j].second;
}
}
return ret;
}
| {
"repo_name": "wesley-a-leung/Resources",
"stars": "34",
"repo_language": "C++",
"file_name": "SkewHeapIncremental.h",
"mime_type": "text/x-c++"
} |
#pragma once
#include <bits/stdc++.h>
#include "../../search/BinarySearch.h"
using namespace std;
// Supports adding lines in the form f(x) = mx + b and finding
// the maximum value of f(x) at any given x; this version allows for
// updates and queries in arbitrary order
// Template Arguments:
// T: the type of the slope (m) and intercept (b) of the line, as well as
// the type of the function argument (x), must be able to store m * b
// Cmp: the comparator to compare two f(x) values,
// convention is same as std::priority_queue in STL
// Required Functions:
// operator (a, b): returns true if and only if a compares less than b
// Constructor Arguments:
// cmp: an instance of the Cmp struct
// SCALE: the value to scale sqrt by
// Functions:
// addLine(m, b): adds a line in the form f(x) = mx + b to the set of lines
// getMax(x): finds the maximum value of f(x) (based on the comparator)
// for all inserted lines
// size(): returns the number of lines in the convex hull
// reserve(N): reserves space for N lines in the convex hull
// In practice, has a very small constant, performance compared to
// IncrementalConvexHullTrick (which uses multiset) and SparseLiChao
// can vary, slower than LiChao
// Time Complexity:
// constructor: O(1)
// addLine: O(1) amortized
// getMax: O(sqrt(N) + log(N)) amortized for N lines in the convex hull
// size: O(1)
// reserve: O(N)
// Memory Complexity: O(N) for N lines in the convex hull
// Tested:
// https://judge.yosupo.jp/problem/line_add_get_min
// https://open.kattis.com/problems/longestlife
// https://www.spoj.com/problems/CHTPRAC/
template <class T, class Cmp = less<T>>
struct IncrementalConvexHullTrickSqrtBuffer {
struct Line {
T m, b; Line(T m, T b) : m(m), b(b) {}
T eval(T x) const { return m * x + b; }
};
vector<Line> large, small; Cmp cmp; double SCALE;
IncrementalConvexHullTrickSqrtBuffer(Cmp cmp = Cmp(), double SCALE = 4)
: cmp(cmp), SCALE(SCALE) {}
bool ccw(Line a, Line b, Line c) {
return (b.m - a.m) * (c.b - a.b) <= (b.b - a.b) * (c.m - a.m);
}
bool slope(Line a, Line b) {
return !cmp(a.m, b.m) && !cmp(b.m, a.m) && !cmp(a.b, b.b);
}
void rebuildHull() {
int back = 0; for (auto &&line : large) {
while (back >= 1 && slope(line, large[back - 1])) back--;
while (back >= 2 && ccw(line, large[back - 1], large[back - 2])) back--;
large[back++] = line;
}
large.erase(large.begin() + back, large.end());
}
int size() const { return large.size() + small.size(); }
void rebuild() {
if (int(small.size()) > SCALE * sqrt(size())) {
auto lcmp = [&] (Line a, Line b) { return cmp(a.m, b.m); };
int lSz = large.size(); sort(small.begin(), small.end(), lcmp);
large.insert(large.end(), small.begin(), small.end()); small.clear();
inplace_merge(large.begin(), large.begin() + lSz, large.end(), lcmp);
rebuildHull();
}
}
void addLine(T m, T b) { small.emplace_back(m, b); }
T getMax(T x) {
rebuild(); int ind = bsearch<FIRST>(0, int(large.size()) - 1, [&] (int i) {
return cmp(large[i + 1].eval(x), large[i].eval(x));
});
T mx = (large.empty() ? small[0] : large[ind]).eval(x);
for (auto &&line : small) mx = max(mx, line.eval(x), cmp);
return mx;
}
};
| {
"repo_name": "wesley-a-leung/Resources",
"stars": "34",
"repo_language": "C++",
"file_name": "SkewHeapIncremental.h",
"mime_type": "text/x-c++"
} |
#pragma once
#include <bits/stdc++.h>
#include "Point.h"
#include "Angle.h"
#include "Line.h"
#include "Circle.h"
using namespace std;
// A ray represented by a point and line with a direction
// Fields:
// p: the point
// l: a line passing through p, representing the direction
// nxt: a pointer to the next ray in the circular set
struct Ray {
pt p; Line l; mutable const Ray *nxt;
Ray(pt p, Line l = Line()) : p(p), l(l), nxt(nullptr) {}
virtual bool cmp(const Ray &o) const {
Angle::setPivot(pt(0, 0)); return Angle(o.l.v) < Angle(l.v);
}
bool operator < (const Ray &r) const { return r.cmp(*this); }
};
// Helper struct for isIn
struct IsInCmp : public Ray {
pt q, r; IsInCmp(pt p, pt q, pt r) : Ray(p), q(q), r(r) {}
bool cmp(const Ray &o) const override {
return q != o.p && ccw(p, o.nxt->p, r) > 0;
}
};
// Helper struct for pointTangents
struct PointTangentCmp : public Ray {
pt q, r; bool left, farSide;
PointTangentCmp(pt p, pt q, pt r, bool left, bool farSide)
: Ray(p), q(q), r(r), left(left), farSide(farSide) {}
bool cmp(const Ray &o) const override {
if (farSide != left && o.p == p) return true;
if (farSide == left && o.p == q) return false;
if (ccw(r, p, o.p) == (left ? -1 : 1)) return farSide != left;
else return (ccw(o.p, o.nxt->p, r) < 0) != left;
}
};
// Helper struct for circleTangents
struct CircleTangentCmp : public Ray {
pt q; Circle c; bool inner, h, farSide;
CircleTangentCmp(pt p, pt q, Circle c, bool inner, bool h, bool farSide)
: Ray(p), q(q), c(c), inner(inner), h(h), farSide(farSide) {}
bool cmp(const Ray &o) const override {
if (farSide == h && o.p == p) return true;
if (farSide != h && o.p == q) return false;
vector<pair<pt, pt>> t;
assert(circleCircleTangent(Circle(o.p, 0), c, inner, t) == 1);
pt q = t[h].second;
if (ccw(q, p, o.p) == (h ? 1 : -1)) return farSide == h;
else return (ccw(o.p, o.nxt->p, q) < 0) == h;
}
};
// Helper struct for closestPt
struct ClosestPointCmp : public Ray {
Angle lo, hi;
ClosestPointCmp(pt p, Angle lo, Angle hi) : Ray(p), lo(lo), hi(hi) {}
bool cmp(const Ray &o) const override {
Angle::setPivot(pt(0, 0)); if (Angle(o.l.v) < lo) return true;
if (hi < Angle(o.l.v)) return false;
return ptSegDist(p, o.p, o.nxt->p)
>= ptSegDist(p, o.nxt->p, o.nxt->nxt->p);
}
};
// Helper struct for lineIntersection
struct LineIntersectionCmp : public Ray {
Angle lo, hi;
LineIntersectionCmp(Line l, Angle lo, Angle hi)
: Ray(pt(0, 0), l), lo(lo), hi(hi) {}
bool cmp(const Ray &o) const override {
Angle::setPivot(pt(0, 0)); if (Angle(o.l.v) < lo) return true;
if (hi < Angle(o.l.v)) return false;
return l.onLeft(o.nxt->p) < 0;
}
};
// Maintains the convex hull of points where points can be added at any time
// Points are stored as a circular ordered set of Rays, with an associated
// Line in the direction towards the next point
// Points are strictly convex
// Fields:
// a2: twice the area of the convex hull
// Functions:
// clear(): clears the points in the convex hull
// isIn(p): returns 1 if inside the polygon, 0 if on the edge, -1 if outside
// singlePointTangent(p, left): returns an iterator pointing to the left or
// right tangent, with the closest point to p being selected if there are
// multiple points, p must be strictly outside the polygon
// pointTangents(p): returns a pair of iterators, with the first iterator
// being the left tangent point and the second iterator being the right
// tangent if p is considered to be below the polygon; all points strictly
// between the tangent iterator are strictly within the tangent lines,
// while all other points are on or outside the tangent lines,
// p must be strictly outside the polygon
// circleTangents(c, inner): returns a pair of iterators, with the first
// iterator being the left inner or outer tangent point and the second
// iterator being the right tangent if c is considered to be below the
// polygon; all points strictly between the tangent iterator are strictly
// within the tangent lines, while all other points are on or outside the
// tangent lines (same iterator means all points are inside or on the
// tangent lines), the circle must be strictly and completely outside the
// polygon
// hullTangents(hull, inner): returns a vector of pair of iterators, with the
// first element in each pair being the iterator in the first polygon,
// and the second element being the iteraetor in the second polygon; first
// point in each pair is the left inner/outer tangent point of the first
// polygon if the second polygon is considered to be below the
// first polygon
// closestPt(p): returns the closest point on the edge of the polygon to
// the point p where p is strictly outside the polygon
// addPoint(p): adds the point p and removes any points that are no longer
// in the convex hull; returns true if p is in the hull, false otherwise;
// Angle::pivot is set to (0, 0)
// extremeVertex(dir): returns an iterator pointing to an extreme vertex
// in the direction dir (a vertex that is the furthest point in that
// direction, selecting the rightmost vertex if there are multiple);
// Angle::pivot is set to (0, 0)
// lineIntersection(l): if l does not intersect with this convex hull, an
// empty vector is returned; if there is one point of intersection, a
// vector containing the point of intersection is returned; if there is a
// segment of intersection, a vector containing two endpoints of the
// line segment intersection is returned
// halfPlaneIntersection(l): intersects this convex hull with the half-plane
// specified by the left side of l (including l itself); resulting hull
// is strictly convex; Angle::pivot is set to (0, 0)
// In practice, has a moderate constant
// Time Complexity:
// constructor: O(1)
// clear: O(N)
// isIn, singlePointTangent, pointTangents, circleTangents,
// closestPt, extremeVertex, lineIntersection: O(log N)
// hullTangents: O(log N log M)
// addPoint, halfPlaneIntersection: O(log N + K) for K removed points
// Memory Complexity: O(1)
// Tested:
// Fuzz Tested
// https://codeforces.com/problemsets/acmsguru/problem/99999/277
// https://open.kattis.com/problems/bigbrother
// https://open.kattis.com/problems/marshlandrescues
// https://dmoj.ca/problem/ccoprep3p3
// https://www.acmicpc.net/problem/4225
struct IncrementalConvexHull : public set<Ray> {
using iter = set<Ray>::iterator; T a2; IncrementalConvexHull() : a2(0) {}
iter mod(iter it) const { return !empty() && it == end() ? begin() : it; }
iter prv(iter it) const { return prev(it == begin() ? end() : it); }
iter nxt(iter it) const { return mod(next(it)); }
void erase(iter it) {
iter a = prv(it), b = nxt(it); set<Ray>::erase(it); if (empty()) return;
a->nxt = &*b;
}
void emplace(pt p, Line l) {
iter it = set<Ray>::emplace(p, l).first;
prv(it)->nxt = &*it; it->nxt = &*nxt(it);
}
iter rem(iter it) {
iter a = prv(it), b = nxt(it); a2 += cross(a->p, b->p);
a2 -= cross(a->p, it->p) + cross(it->p, b->p); erase(it); return b;
}
void clear() { set<Ray>::clear(); a2 = 0; }
int isIn(pt p) const {
if (empty()) return -1;
pt a = begin()->p, b = prev(end())->p; if (onSeg(p, a, b)) return 0;
auto it = lower_bound(IsInCmp(a, b, p)); pt q = nxt(it)->p;
if (onSeg(p, it->p, q)) return 0;
vector<pt> P{a, it->p, q}; sort(P.begin(), P.end());
P.erase(unique(P.begin(), P.end()), P.end());
if (int(P.size()) < 3) return onSeg(p, P[0], P.back()) ? 0 : -1;
return ccw(it->p, q, p) >= 0 ? 1 : -1;
}
iter singlePointTangent(pt p, bool left) const {
pt a = begin()->p, b = prev(end())->p; int o = ccw(p, a, b);
bool farSide = o ? o < 0 : lt(distSq(p, b), distSq(p, a));
return mod(lower_bound(PointTangentCmp(a, b, p, left, farSide)));
}
pair<iter, iter> pointTangents(pt p) const {
return make_pair(singlePointTangent(p, 1), singlePointTangent(p, 0));
}
pair<iter, iter> circleTangents(Circle c, bool inner) const {
pair<iter, iter> ret; pt a = begin()->p, b = prev(end())->p;
for (int h = 0; h < 2; h++) {
vector<pair<pt, pt>> t;
assert(circleCircleTangent(Circle(a, 0), c, inner, t) == 1);
pt q = t[h].second; int o = ccw(q, a, b);
bool farSide = o ? o < 0 : lt(distSq(q, b), distSq(q, a));
(h ? ret.second : ret.first) = mod(lower_bound(CircleTangentCmp(
a, b, c, inner, h, farSide)));
}
return ret;
}
pt closestPt(pt p) const {
auto f = [&] (iter a, iter b) {
if (a == b) return a->p;
if (a != (b = prv(b)))
a = mod(lower_bound(ClosestPointCmp(
p, Angle(a->l.v), Angle(prv(b)->l.v))));
return closestPtOnSeg(p, a->p, a->nxt->p);
};
iter a, b; tie(a, b) = pointTangents(p); if (*b < *a) {
pt q = f(a, prev(end())), r = f(begin(), b);
pt s = distSq(p, q) < distSq(p, r) ? q : r;
pt t = closestPtOnSeg(p, prev(end())->p, begin()->p);
return distSq(p, s) < distSq(p, t) ? s : t;
} else return f(a, b);
}
vector<pair<iter, iter>> hullTangents(const IncrementalConvexHull &hull,
bool inner) const;
bool addPoint(pt p) {
if (empty()) { emplace(p, Line(p, p)); return true; }
if (isIn(p) >= 0) return false;
iter l, r; tie(l, r) = pointTangents(p); l = prv(l);
if (ccw(l->p, nxt(l)->p, p) > 0) l = nxt(l);
if (ccw(p, r->p, nxt(r)->p) == 0) r = nxt(r);
for (l = nxt(l); l != r; l = rem(l));
l = prv(l); pt a = l->p, b = r->p;
a2 += cross(a, p) + cross(p, b) - cross(a, b);
erase(l); emplace(a, Line(a, p)); emplace(p, Line(p, b)); return true;
}
iter extremeVertex(pt dir) const {
return mod(lower_bound(Ray(pt(0, 0), Line(perp(dir), 0))));
}
vector<pt> lineIntersection(Line l) const {
vector<pt> ret;
auto check = [&] (iter a) {
pt p = a->p, q = a->nxt->p; if (p != q) {
pt r; lineLineIntersection(l, a->l, r);
if (onSeg(r, p, q)) ret.push_back(r);
}
if (l.onLeft(p) == 0) ret.push_back(p);
if (l.onLeft(q) == 0) ret.push_back(q);
};
auto f = [&] (iter a, iter b) {
if (a == b) check(a);
else check(mod(lower_bound(LineIntersectionCmp(
l, Angle(a->l.v), Angle(prv(b)->l.v)))));
};
Line l2(-l.v, -l.c); iter a = mod(lower_bound(Ray(pt(0, 0), l)));
iter b = mod(lower_bound(Ray(pt(0, 0), l2)));
if (*b < *a) { std::swap(a, b); std::swap(l, l2); }
f(a, b); std::swap(l, l2); f(b, prev(end())); f(begin(), a);
sort(ret.begin(), ret.end());
ret.erase(unique(ret.begin(), ret.end()), ret.end()); return ret;
}
void halfPlaneIntersection(Line l) {
if (empty()) return;
iter b = mod(lower_bound(Ray(pt(0, 0), l)));
if (l.onLeft(b->p) >= 0) return;
iter a = prv(b); while (a != b && l.onLeft(a->p) < 0) a = prv(a);
if (a == b) { clear(); return; }
iter c = nxt(a); while (c != b) c = rem(c);
c = nxt(b); while (l.onLeft(c->p) < 0) c = nxt(b = rem(b));
pt p, q, r = a->p, s = c->p; Line la = a->l, lb = b->l;
if (lineLineIntersection(la, l, p) != 1
|| lineLineIntersection(l, lb, q) != 1) {
assert(lineLineIntersection(la, lb, p) == 1); q = p;
}
a2 -= cross(r, b->p) + cross(b->p, s); erase(a); erase(b);
a2 += cross(r, p) + cross(p, q) + cross(q, s);
if (r != p) emplace(r, la);
if (q != s) emplace(q, lb);
if ((r == p && q == s) || p != q) emplace(p, l);
}
};
// Helper struct for hullTangents
struct HullTangentCmp : public Ray {
pt q; const IncrementalConvexHull &hull; bool inner, h, farSide;
HullTangentCmp(pt p, pt q, const IncrementalConvexHull &hull,
bool inner, bool h, bool farSide)
: Ray(p), q(q), hull(hull), inner(inner), h(h), farSide(farSide) {}
bool cmp(const Ray &o) const override {
if (farSide == h && o.p == p) return true;
if (farSide != h && o.p == q) return false;
pt q = hull.singlePointTangent(o.p, inner ^ h)->p;
if (ccw(q, p, o.p) == (h ? 1 : -1)) return farSide == h;
else return (ccw(o.p, o.nxt->p, q) < 0) == h;
}
};
vector<pair<IncrementalConvexHull::iter, IncrementalConvexHull::iter>>
IncrementalConvexHull::hullTangents(const IncrementalConvexHull &hull,
bool inner) const {
vector<pair<iter, iter>> ret(2); pt a = begin()->p, b = prev(end())->p;
for (int h = 0; h < 2; h++) {
pt q = hull.singlePointTangent(a, inner ^ h)->p; int o = ccw(q, a, b);
bool farSide = o ? o < 0 : lt(distSq(q, b), distSq(q, a));
ret[h].first = mod(lower_bound(HullTangentCmp(
a, b, hull, inner, h, farSide)));
ret[h].second = hull.singlePointTangent(ret[h].first->p, inner ^ h);
}
return ret;
}
| {
"repo_name": "wesley-a-leung/Resources",
"stars": "34",
"repo_language": "C++",
"file_name": "SkewHeapIncremental.h",
"mime_type": "text/x-c++"
} |
#pragma once
#include <bits/stdc++.h>
#include "../../search/BinarySearch.h"
using namespace std;
// Supports adding lines in the form f(x) = mx + b and finding
// the maximum value of f(x) at any given x; this version only supports
// adding lines in sorted order
// Template Arguments:
// T: the type of the slope (m) and intercept (b) of the line, as well as
// the type of the function argument (x), must be able to store m * b
// Cmp: the comparator to compare two f(x) values,
// convention is same as std::priority_queue in STL
// Required Functions:
// operator (a, b): returns true if and only if a compares less than b
// REVERSE: boolean to indicate whether the lines are added in reverse order
// of the comparator
// Constructor Arguments:
// cmp: an instance of the Cmp struct
// Functions:
// addLine(m, b): adds a line in the form f(x) = mx + b to the set of lines,
// lines must be added in the order of slope sorted by Cmp, or in reverse
// order if REVERSE is true
// getMax(x): finds the maximum x value (based on the comparator) for all
// inserted lines
// getMaxMonoInc(x): finds the maximum x value (based on the comparator)
// for all inserted lines, where all queries have non decreasing x values
// (based on the standard < operator)
// getMaxMonoDec(x): finds the maximum x value (based on the comparator)
// for all inserted lines, where all queries have non increasing x values
// (based on the standard < operator)
// size(): returns the number of lines in the convex hull
// reserve(N): reserves space for N lines in the convex hull
// In practice, has a moderate constant
// Time Complexity:
// addLine, getMaxMonoInc, getMaxMonoDec: O(1) amortized
// getMax: O(log N) for N lines in the convex hull
// size: O(1)
// reserve: O(N)
// Memory Complexity: O(N) for N lines in the convex hull
// Tested:
// https://www.spoj.com/problems/CHTPRAC/
// https://atcoder.jp/contests/dp/tasks/dp_z
template <class T, class Cmp = less<T>, const bool REVERSE = false>
struct ConvexHullTrick {
struct Line {
T m, b; Line(T m, T b) : m(m), b(b) {}
T eval(T x) const { return m * x + b; }
};
vector<Line> L; Cmp cmp; int front, back;
ConvexHullTrick(Cmp cmp = Cmp()) : cmp(cmp), front(0), back(0) {}
int size() const { return L.size(); }
void addLine(T m, T b) {
auto ccw = [&] {
T c1 = (L.back().m - m) * (L[size() - 2].b - b);
T c2 = (L.back().b - b) * (L[size() - 2].m - m);
return REVERSE ? c1 >= c2 : c1 <= c2;
};
while (!L.empty() && !cmp(m, L.back().m) && !cmp(L.back().m, m)
&& !cmp(b, L.back().b))
L.pop_back();
while (size() >= 2 && ccw()) L.pop_back();
if (size() == back) back++;
L.emplace_back(m, b);
front = min(front, size() - 1); back = min(back, size());
}
T moveFront(T x) {
while (front + 1 < size() && !cmp(L[front + 1].eval(x), L[front].eval(x)))
front++;
return L[front].eval(x);
}
T moveBack(T x) {
while (back - 2 >= 0 && !cmp(L[back - 2].eval(x), L[back - 1].eval(x)))
back--;
return L[back - 1].eval(x);
}
T getMaxMonoInc(T x) { return REVERSE ? moveBack(x) : moveFront(x); }
T getMaxMonoDec(T x) { return REVERSE ? moveFront(x) : moveBack(x); }
T getMax(T x) const {
return L[bsearch<FIRST>(0, size() - 1, [&] (int i) {
return cmp(L[i + 1].eval(x), L[i].eval(x));
})].eval(x);
}
void reserve(int N) { L.reserve(N); }
};
| {
"repo_name": "wesley-a-leung/Resources",
"stars": "34",
"repo_language": "C++",
"file_name": "SkewHeapIncremental.h",
"mime_type": "text/x-c++"
} |
#pragma once
#include <bits/stdc++.h>
#include "Point.h"
#include "Angle.h"
#include "Line.h"
#include "../../datastructures/IntervalUnion.h"
using namespace std;
// Functions for a 2D circle
struct Circle {
pt o; T r; Circle(T r = 0) : o(0, 0), r(r) {}
Circle(pt o, T r) : o(o), r(r) {}
// 1 if p is inside this circle, 0 if on this circle,
// -1 if outside this circle
int contains(pt p) const { return sgn(r * r - distSq(o, p)); }
// 1 if c is strictly inside this circle, 0 if inside and touching this
// circle, -1 otherwise
int contains(Circle c) const {
T dr = r - c.r; return lt(dr, 0) ? -1 : sgn(dr * dr - distSq(o, c.o));
}
// 1 if c is strictly outside this circle, 0 if outside and touching this
// circle, -1 otherwise
int disjoint(Circle c) const {
T sr = r + c.r; return sgn(distSq(o, c.o) - sr * sr);
}
pt proj(pt p) const { return o + (p - o) * r / dist(o, p); }
pt inv(pt p) const { return o + (p - o) * r * r / distSq(o, p); }
};
// Determine the intersection of a circle and a line
// Function Arguments:
// c: the circle
// l: the line
// Return Value: the points of intersection, if any, of the circle and
// the line, guaranteed to be sorted based on projection on the line
// Time Complexity: O(1)
// Memory Complexity: O(1)
// Tested:
// https://dmoj.ca/problem/noi05p6
vector<pt> circleLineIntersection(Circle c, Line l) {
vector<pt> ret; T h2 = c.r * c.r - l.distSq(c.o); pt p = l.proj(c.o);
if (eq(h2, 0)) ret.push_back(p);
else if (lt(0, h2)) {
pt h = l.v * sqrt(h2) / abs(l.v);
ret.push_back(p - h); ret.push_back(p + h);
}
return ret;
}
// Determine the intersection of a circle and a line segment
// Function Arguments:
// c: the circle
// a: the first point on the line segment
// b: the second point on the line segment
// Return Value: the points of intersection, if any, of the circle and
// the line segment, guaranteed to be sorted based on projection on the line
// from a to b
// Time Complexity: O(1)
// Memory Complexity: O(1)
// Tested:
// https://dmoj.ca/problem/noi05p6
vector<pt> circleSegIntersection(Circle c, pt a, pt b) {
vector<pt> ret; if (a == b) { if (c.contains(a) == 0) ret.push_back(a); }
else {
Line l(a, b); for (auto &&p : circleLineIntersection(c, l))
if (l.cmpProj(a, p) <= 0 && l.cmpProj(p, b) <= 0) ret.push_back(p);
}
return ret;
}
// Determine the area of the intersection of a circle and a half-plane defined
// by the left side of a line
// Function Arguments:
// c: the circle
// l: the line with the half-plane defined by the left side
// Return Value: the area of the intersection of the circle and the half-plane
// Time Complexity: O(1)
// Memory Complexity: O(1)
// Tested:
// https://dmoj.ca/problem/noi05p6
T circleHalfPlaneIntersectionArea(Circle c, Line l) {
T h2 = c.r * c.r - l.distSq(c.o), ret = 0; if (!lt(h2, 0)) {
pt p = l.proj(c.o), h = l.v * sqrt(max(h2, T(0))) / abs(l.v);
pt a = p - h, b = p + h; T theta = abs(ang(a, c.o, b));
ret = c.r * c.r * (theta - sin(theta)) / 2;
}
if (l.onLeft(c.o) > 0) ret = acos(T(-1)) * c.r * c.r - ret;
return ret;
}
// Determine the intersection of two circles
// Function Arguments:
// c1: the first circle
// c2: the second circle
// res: the points of intersection, if any, of the two circles;
// the first point is guaranteed to not be on the left side of the
// line from c1.o to c2.o
// Return Value: 0 if no intersection, 2 if identical circles, 1 otherwise
// Time Complexity: O(1)
// Memory Complexity: O(1)
// Tested:
// https://codeforces.com/contest/420/problem/E
// https://open.kattis.com/problems/drawingcircles
// https://dmoj.ca/problem/noi05p6
int circleCircleIntersection(Circle c1, Circle c2, vector<pt> &res) {
pt d = c2.o - c1.o; T d2 = norm(d);
if (eq(d2, 0)) return eq(c1.r, c2.r) ? 2 : 0;
T pd = (d2 + c1.r * c1.r - c2.r * c2.r) / 2;
T h2 = c1.r * c1.r - pd * pd / d2; pt p = c1.o + d * pd / d2;
if (eq(h2, 0)) res.push_back(p);
else if (lt(0, h2)) {
pt h = perp(d) * sqrt(h2 / d2); res.push_back(p - h); res.push_back(p + h);
}
return !res.empty();
}
// Determine the area of the intersection of two circles
// Function Arguments:
// c1: the first circle
// c2: the second circle
// Return Value: the area of the intersection of the two circles
// Time Complexity: O(1)
// Memory Complexity: O(1)
// Tested:
// https://codeforces.com/contest/600/problem/D
T circleCircleIntersectionArea(Circle c1, Circle c2) {
T d = dist(c1.o, c2.o); if (!lt(d, c1.r + c2.r)) return 0;
if (!lt(c2.r, d + c1.r)) return acos(T(-1)) * c1.r * c1.r;
if (!lt(c1.r, d + c2.r)) return acos(T(-1)) * c2.r * c2.r;
auto A = [&] (T r1, T r2) {
T a = (d * d + r1 * r1 - r2 * r2) / (2 * d * r1);
T theta = 2 * acos(max(T(-1), min(T(1), a)));
return r1 * r1 * (theta - sin(theta)) / 2;
};
return A(c1.r, c2.r) + A(c2.r, c1.r);
}
// Determine the tangents of two circles
// Function Arguments:
// c1: the first circle
// c2: the second circle
// inner: whether to find the inner or outer tangents
// res: a vector of pairs of size 2 of the tangents, with each pair
// representing a point on the first circle and the second circle;
// the first point in each pair is guaranteed to not be on the left side of
// the line from c1.o to c2.o
// Return Value: 0 if no tangents, 2 if identical circles, 1 otherwise
// Time Complexity: O(1)
// Memory Complexity: O(1)
// Tested:
// https://dmoj.ca/problem/nccc7s4
// https://dmoj.ca/problem/noi05p6
int circleCircleTangent(Circle c1, Circle c2, bool inner,
vector<pair<pt, pt>> &res) {
pt d = c2.o - c1.o; T r2 = inner ? -c2.r : c2.r, dr = c1.r - r2;
T d2 = norm(d), h2 = d2 - dr * dr;
if (eq(d2, 0) || lt(h2, 0)) return eq(h2, 0) ? 2 : 0;
for (T sign : {T(-1), T(1)}) {
pt v = (d * dr + perp(d) * sqrt(max(h2, T(0))) * sign) / d2;
res.emplace_back(c1.o + v * c1.r, c2.o + v * r2);
}
return 1;
}
// Determines the circumcircle from 3 non-collinear points
// Function Arguments:
// a: the first point
// b: the second point
// c: the third point
// Return Value: the circumcircle of the 3 points
// Time Complexity: O(1)
// Memory Complexity: O(1)
// Tested:
// https://www.spoj.com/problems/QCJ4/
Circle circumcircle(pt a, pt b, pt c) {
b -= a; c -= a;
pt ret = b * c * (conj(c) - conj(b)) / (b * conj(c) - conj(b) * c);
return Circle(a + ret, abs(ret));
}
// Computes the area of union of multiple circles
// Function Arguments:
// circles: a vector of the circles
// Return Value: the area of the union of all the circles
// Time Complexity: O(N^2 log N)
// Memory Complexity: O(N)
// Tested:
// https://www.spoj.com/problems/CIRU/
T circleUnionArea(const vector<Circle> &circles) {
int n = circles.size(); T ret = 0; for (int i = 0; i < n; i++) {
vector<pair<Angle, Angle>> intervals; Angle::setPivot(circles[i].o);
bool inside = false; for (int j = 0; j < n; j++) if (i != j) {
int o = circles[j].contains(circles[i]);
if (o > 0 || (o == 0 && (lt(circles[i].r, circles[j].r) || j < i))) {
inside = true; break;
}
vector<pt> p; circleCircleIntersection(circles[i], circles[j], p);
if (int(p.size()) == 2) {
Angle a(p[0]), b(p[1]); if (a < b) intervals.emplace_back(a, b);
else {
intervals.emplace_back(a, Angle(circles[i].o));
Angle c(circles[i].o - pt(circles[i].r, 0));
intervals.emplace_back(c, b);
}
}
}
if (inside) continue;
if (intervals.empty()) ret += acos(T(-1)) * circles[i].r * circles[i].r;
else {
intervalUnion(intervals); if (intervals.back().second == circles[i].o) {
intervals.front().first = intervals.back().first; intervals.pop_back();
}
for (int j = 0, k = int(intervals.size()); j < k; j++) {
pt a = intervals[j].second.p;
pt b = intervals[j + 1 == k ? 0 : j + 1].first.p;
ret += cross(a, b) / 2;
ret += circleHalfPlaneIntersectionArea(circles[i], Line(b, a));
}
}
}
return ret;
}
| {
"repo_name": "wesley-a-leung/Resources",
"stars": "34",
"repo_language": "C++",
"file_name": "SkewHeapIncremental.h",
"mime_type": "text/x-c++"
} |
#pragma once
#include <bits/stdc++.h>
using namespace std;
// Helper struct for IncrementalConvexHullTrick
template <class T> struct CHTLine {
bool isQuery; T m, b; mutable T x;
CHTLine(T m, T b) : isQuery(false), m(m), b(b), x(T()) {}
CHTLine(T x) : isQuery(true), m(T()), b(T()), x(x) {}
};
// Supports adding lines in the form f(x) = mx + b and finding
// the maximum value of f(x) at any given x; this version allows for
// updates and queries in arbitrary order
// Template Arguments:
// T: the type of the slope (m) and intercept (b) of the line, as well as
// the type of the function argument (x)
// Cmp: the comparator to compare two f(x) values,
// convention is same as std::priority_queue in STL
// Required Functions:
// operator (a, b): returns true if and only if a compares less than b
// Constructor Arguments:
// cmp: an instance of the Cmp struct
// INF: a value for positive infinity, must be negatable
// Functions (in addition to std::multiset):
// addLine(m, b): adds a line in the form f(x) = mx + b to the set of lines
// getMax(x): finds the maximum value of f(x) (based on the comparator)
// for all inserted lines
// In practice, has a moderate constant, performance compared to
// IncrementalConvexHullTrickSqrtBuffer and SparseLiChao can vary, slower
// than LiChao
// Time Complexity:
// constructor: O(1)
// addLine, getMax: O(log(N)) amortized for N lines in the convex hull
// Memory Complexity: O(N) for N lines in the convex hull
// Tested:
// https://judge.yosupo.jp/problem/line_add_get_min
// https://open.kattis.com/problems/longestlife
// https://www.spoj.com/problems/CHTPRAC/
#define FUN function<bool(const CHTLine<T> &, const CHTLine<T> &)>
template <class T, class Cmp = less<T>>
struct IncrementalConvexHullTrick : public multiset<CHTLine<T>, FUN> {
using MS = multiset<CHTLine<T>, FUN>; using iter = typename MS::iterator;
using MS::begin; using MS::end; using MS::emplace; using MS::erase;
using MS::lower_bound; Cmp cmp; T INF;
FUN makeFun(Cmp cmp) {
return [=] (const CHTLine<T> &a, const CHTLine<T> &b) {
return a.isQuery || b.isQuery ? a.x < b.x : cmp(a.m, b.m);
};
}
IncrementalConvexHullTrick(Cmp cmp = Cmp(), T INF = numeric_limits<T>::max())
: MS(makeFun(cmp)), cmp(cmp), INF(INF) {}
template <const bool _ = is_integral<T>::value
|| is_same<__int128_t, T>::value>
typename enable_if<!_, T>::type div(T a, T b) { return a / b; }
template <const bool _ = is_integral<T>::value
|| is_same<__int128_t, T>::value>
typename enable_if<_, T>::type div(T a, T b) {
return a / b - T((a < T()) != (b < T()) && T() != a % b);
}
bool intersect(iter x, iter y) {
if (y == end()) { x->x = INF; return false; }
if (!cmp(x->m, y->m) && !cmp(y->m, x->m))
x->x = cmp(y->b, x->b) ? INF : -INF;
else x->x = div(y->b - x->b, x->m - y->m);
return x->x >= y->x;
}
void addLine(T m, T b) {
auto z = emplace(m, b), y = z++, x = y;
while (intersect(y, z)) z = erase(z);
if (x != begin() && intersect(--x, y)) intersect(x, y = erase(y));
while ((y = x) != begin() && (--x)->x >= y->x) intersect(x, erase(y));
}
T getMax(T x) const {
auto l = *lower_bound(CHTLine<T>(x)); return l.m * x + l.b;
}
};
#undef FUN
| {
"repo_name": "wesley-a-leung/Resources",
"stars": "34",
"repo_language": "C++",
"file_name": "SkewHeapIncremental.h",
"mime_type": "text/x-c++"
} |
#pragma once
#include <bits/stdc++.h>
#include "Point.h"
#include "Angle.h"
#include "Line.h"
using namespace std;
// Helper struct for polygonTriangulation
struct Chain {
static T X; mutable Line l;
Chain(T Y) : l(0, 1, Y) {}
Chain(pt p, pt q) : l(p, q) {}
bool operator < (const Chain &c) const {
return lt((l.c + l.v.y * X) * c.l.v.x, (c.l.c + c.l.v.y * X) * l.v.x);
}
};
// Helper struct for polygonTriangulation
struct ChainInfo {
int chainId, last, link;
ChainInfo(int chainId, int last)
: chainId(chainId), last(last), link(last) {}
};
T Chain::X = 0;
// Triangulates a polygon such that all N - 2 triangles are disjoint, with the
// union of the triangles being equal to the original polygon
// Function Arguments:
// P: the points of the simple polygon in ccw order, no two points can
// be identical
// Return Value: a vector of arrays of 3 points, representing a triangle in
// the triangulation, points given in ccw order
// In practice, has a moderate constant
// Time Complexity: O(N log N)
// Memory Complexity: O(N)
// Tested:
// Fuzz Tested
vector<array<pt, 3>> polygonTriangulation(const vector<pt> &P) {
vector<array<pt, 3>> ret;
auto monotone = [&] (const vector<int> &ind) {
auto add = [&] (int i, int j, int k) {
ret.push_back(array<pt, 3>{P[ind[i]], P[ind[j]], P[ind[k]]});
};
auto pccw = [&] (int i, int j, int k) {
return ccw(P[ind[i]], P[ind[j]], P[ind[k]]);
};
int st = min_element(ind.begin(), ind.end(), [&] (int i, int j) {
return P[i].x < P[j].x;
}) - ind.begin();
auto prv = [&] (int i) { return i == 0 ? int(ind.size()) - 1 : i - 1; };
auto nxt = [&] (int i) { return i + 1 == int(ind.size()) ? 0 : i + 1; };
deque<int> lo{st}, hi{st}; for (int k = 1; k < int(ind.size()); k++) {
int a = nxt(lo.back()), b = prv(hi.back());
if (P[ind[a]].x < P[ind[b]].x) {
while (int(lo.size()) >= 2
&& pccw(lo[lo.size() - 2], lo.back(), a) == 1) {
add(lo[lo.size() - 2], lo.back(), a); lo.pop_back();
}
while (int(hi.size()) >= 2 && pccw(a, hi[1], hi[0]) == 1) {
add(a, hi[1], hi[0]); hi.pop_front(); lo.front() = hi.front();
}
lo.push_back(a);
} else {
while (int(hi.size()) >= 2
&& pccw(b, hi.back(), hi[hi.size() - 2]) == 1) {
add(b, hi.back(), hi[hi.size() - 2]); hi.pop_back();
}
while (int(lo.size()) >= 2 && pccw(lo[0], lo[1], b) == 1) {
add(lo[0], lo[1], b); lo.pop_front(); hi.front() = lo.front();
}
hi.push_back(b);
}
}
};
int n = P.size(); vector<int> ord(n); iota(ord.begin(), ord.end(), 0);
sort(ord.begin(), ord.end(), [&] (int a, int b) { return P[a].x < P[b].x; });
auto isAdj = [&] (int i, int j) {
int d = abs(i - j); return d == 1 || d == n - 1;
};
auto prv = [&] (int i) { return i == 0 ? n - 1 : i - 1; };
auto nxt = [&] (int i) { return i + 1 == n ? 0 : i + 1; };
vector<vector<pair<int, int>>> G(n); int curEdge = 0, curChain = 0;
auto addBiEdge = [&] (int i, int j) {
if (i == -1 || j == -1 || isAdj(i, j)) return;
G[i].emplace_back(j, curEdge++); G[j].emplace_back(i, curEdge++);
};
for (int i = 0; i < n; i++) G[i].emplace_back(nxt(i), curEdge++);
multimap<Chain, ChainInfo> lo, hi; for (int l = 0, r = 0; l < n; l = r) {
for (r = l + 1; r < n && !lt(P[ord[l]].x, P[ord[r]].x); r++);
sort(ord.begin() + l, ord.begin() + r, [&] (int a, int b) {
return P[a].y < P[b].y;
});
Chain::X = P[ord[l]].x; for (int d = l, u = l; d < r; d = u) {
for (u = d + 1; u < r && isAdj(ord[u - 1], ord[u]); u++);
auto it1 = lo.upper_bound(Chain(P[ord[u - 1]].y));
auto it2 = hi.lower_bound(Chain(P[ord[d]].y));
if (it1 == lo.begin() || it2 == hi.end()
|| ((--it1)->second.chainId != it2->second.chainId
&& (nxt(it1->second.last) != ord[u - 1]
|| prv(it2->second.last) != ord[d]))) {
lo.emplace(Chain(P[ord[d]], P[nxt(ord[d])]),
ChainInfo(curChain, ord[d]));
hi.emplace(Chain(P[ord[u - 1]], P[prv(ord[u - 1])]),
ChainInfo(curChain++, ord[u - 1]));
} else if (it1->second.chainId != it2->second.chainId) {
auto it3 = next(it2), it4 = prev(it1);
addBiEdge(ord[u - 1], it1->second.link);
addBiEdge(ord[d], it2->second.link);
addBiEdge(ord[u - 1], it3->second.link);
addBiEdge(ord[d], it4->second.link);
lo.erase(it1); hi.erase(it2);
it3->second.chainId = it4->second.chainId;
it3->second.link = -1; it4->second.link = ord[u - 1];
} else {
bool conLo = nxt(it1->second.last) == ord[d];
bool conHi = prv(it2->second.last) == ord[u - 1];
if (conLo && conHi) {
addBiEdge(ord[d], it1->second.link);
addBiEdge(ord[d], it2->second.link);
lo.erase(it1); hi.erase(it2);
} else if (conLo) {
addBiEdge(ord[d], it1->second.link);
addBiEdge(ord[d], it2->second.link);
it1->first.l = Line(P[ord[u - 1]], P[nxt(ord[u - 1])]);
it1->second.last = it1->second.link = ord[u - 1];
it2->second.link = -1;
} else if (conHi) {
addBiEdge(ord[d], it1->second.link);
addBiEdge(ord[d], it2->second.link);
it2->first.l = Line(P[ord[d]], P[prv(ord[d])]);
it2->second.last = it2->second.link = ord[d];
it1->second.link = -1;
} else if (nxt(it1->second.last) == ord[u - 1]) {
addBiEdge(ord[u - 1], it1->second.link);
addBiEdge(ord[u - 1], it2->second.link);
it1->first.l = Line(P[ord[d]], P[nxt(ord[d])]);
it1->second.last = ord[d]; it1->second.link = ord[u - 1];
it2->second.link = -1;
} else if (prv(it2->second.last) == ord[d]) {
addBiEdge(ord[d], it1->second.link);
addBiEdge(ord[d], it2->second.link);
it2->first.l = Line(P[ord[u - 1]], P[prv(ord[u - 1])]);
it2->second.last = it2->second.link = ord[u - 1];
it1->second.link = -1;
} else {
addBiEdge(ord[d], it1->second.link);
addBiEdge(ord[d], it2->second.link);
hi.emplace(Chain(P[ord[d]], P[prv(ord[d])]),
ChainInfo(it1->second.chainId, ord[d]));
it1->second.link = it2->second.link = -1;
it2->second.chainId = curChain;
lo.emplace(Chain(P[ord[u - 1]], P[nxt(ord[u - 1])]),
ChainInfo(curChain++, ord[u - 1]));
}
}
}
}
auto cmpAng = [&] (const pair<int, int> &a, const pair<int, int> &b) {
return Angle(P[a.first]) < Angle(P[b.first]);
};
auto eqAng = [&] (const pair<int, int> &a, const pair<int, int> &b) {
return Angle(P[a.first]) == Angle(P[b.first]);
};
for (int i = 0; i < n; i++) {
Angle::setPivot(P[i]); sort(G[i].begin(), G[i].end(), cmpAng);
G[i].erase(unique(G[i].begin(), G[i].end(), eqAng), G[i].end());
}
vector<bool> vis(curEdge, false);
for (int i = 0; i < n; i++) for (auto &&e : G[i]) if (!vis[e.second]) {
vector<int> ind; int cur = i, nxt = e.first, eid = e.second;
while (!vis[eid]) {
ind.push_back(cur); vis[eid] = true; Angle::setPivot(P[nxt]);
auto it = lower_bound(G[nxt].begin(), G[nxt].end(),
make_pair(cur, -1), cmpAng);
it = prev(it == G[nxt].begin() ? G[nxt].end() : it);
cur = nxt; nxt = it->first; eid = it->second;
}
monotone(ind);
}
return ret;
}
| {
"repo_name": "wesley-a-leung/Resources",
"stars": "34",
"repo_language": "C++",
"file_name": "SkewHeapIncremental.h",
"mime_type": "text/x-c++"
} |
#pragma once
#include <bits/stdc++.h>
#include "Point.h"
using namespace std;
// Computes the convex hull of a set of N points
// Function Arguments:
// P: the vector of points
// Return Value: a vector of points in the convex hull in ccw order
// In practice, has a small constant
// Time Complexity: O(N log N)
// Memory Complexity: O(N)
// Tested:
// https://open.kattis.com/problems/convexhull
vector<pt> convexHull(vector<pt> P) {
vector<pt> hull; sort(P.begin(), P.end()); for (int h = 0; h < 2; h++) {
int st = hull.size(); for (auto &&p : P) {
while (int(hull.size()) >= st + 2
&& ccw(hull[hull.size() - 2], hull.back(), p) <= 0)
hull.pop_back();
hull.push_back(p);
}
hull.pop_back(); reverse(P.begin(), P.end());
}
if (int(hull.size()) == 2 && hull[0] == hull[1]) hull.pop_back();
if (hull.empty() && !P.empty()) hull.push_back(P[0]);
return hull;
}
| {
"repo_name": "wesley-a-leung/Resources",
"stars": "34",
"repo_language": "C++",
"file_name": "SkewHeapIncremental.h",
"mime_type": "text/x-c++"
} |
#pragma once
#include <bits/stdc++.h>
#include "Point.h"
using namespace std;
// Computes the closest pair of points out of a set of N points
// Constructor Arguments:
// P: the vector of points
// Fields:
// best1: the first point in the pair of closest points
// best2: the second point in the pair of closest points
// bestDistSq: the distance squared between best1 and best2
// In practice, has a moderate constant
// Time Complexity:
// constructor: O(N log N)
// Memory Complexity: O(N)
// Tested:
// https://open.kattis.com/problems/closestpair2
struct ClosestPair {
static bool yOrdLt(pt p, pt q) { return lt(p.y, q.y); }
pt best1, best2; T bestDistSq;
void closest(vector<pt> &P, int lo, int hi) {
if (hi <= lo) return;
int mid = lo + (hi - lo) / 2; pt median = P[mid];
closest(P, lo, mid); closest(P, mid + 1, hi); vector<pt> aux;
merge(P.begin() + lo, P.begin() + mid + 1, P.begin() + mid + 1,
P.begin() + hi + 1, back_inserter(aux), yOrdLt);
copy(aux.begin(), aux.end(), P.begin() + lo);
for (int i = lo, k = 0; i <= hi; i++) {
T dx = P[i].x - median.x, dx2 = dx * dx; if (lt(dx2, bestDistSq)) {
for (int j = k - 1; j >= 0; j--) {
T dy = P[i].y - aux[j].y, dy2 = dy * dy;
if (!lt(dy2, bestDistSq)) break;
T dSq = distSq(P[i], aux[j]); if (lt(dSq, bestDistSq)) {
bestDistSq = dSq; best1 = P[i]; best2 = aux[j];
}
}
aux[k++] = P[i];
}
}
}
ClosestPair(vector<pt> P) : bestDistSq(numeric_limits<T>::max()) {
sort(P.begin(), P.end()); closest(P, 0, int(P.size()) - 1);
}
};
| {
"repo_name": "wesley-a-leung/Resources",
"stars": "34",
"repo_language": "C++",
"file_name": "SkewHeapIncremental.h",
"mime_type": "text/x-c++"
} |
#pragma once
#include <bits/stdc++.h>
#include "Point.h"
#include "ConvexHull.h"
using namespace std;
// Computes the farthest pair of points out of a set of N points
// Constructor Arguments:
// P: the vector of points
// Fields:
// best1: the first point in the pair of farthest points
// best2: the second point in the pair of farthest points
// bestDistSq: the distance squared between best1 and best2
// hull: the points in the convex hull of P
// In practice, has a small constant
// Time Complexity:
// constructor: O(N log N)
// Memory Complexity: O(N)
// Tested:
// Fuzz Tested
struct FarthestPair {
pt best1, best2; T bestDistSq; vector<pt> hull;
FarthestPair(const vector<pt> &P) : bestDistSq(0), hull(convexHull(P)) {
int H = hull.size(); pt o(0, 0);
for (int i = 0, j = H < 2 ? 0 : 1; i < j; i++) for (;; j = (j + 1) % H) {
T dSq = distSq(hull[i], hull[j]); if (lt(bestDistSq, dSq)) {
bestDistSq = dSq; best1 = hull[i]; best2 = hull[j];
}
pt a = hull[i + 1] - hull[i], b = hull[(j + 1) % H] - hull[j];
if (ccw(o, a, b) <= 0) break;
}
}
};
| {
"repo_name": "wesley-a-leung/Resources",
"stars": "34",
"repo_language": "C++",
"file_name": "SkewHeapIncremental.h",
"mime_type": "text/x-c++"
} |
#pragma once
#include <bits/stdc++.h>
#include "Point.h"
using namespace std;
// Functions for a 2D line
struct Line {
pt v; T c;
// ax + by = c, left side is ax + by > c
Line(T a = 0, T b = 0, T c = 0) : v(b, -a), c(c) {}
// direction vector v with offset c
Line(pt v, T c) : v(v), c(c) {}
// points p and q
Line(pt p, pt q) : v(q - p), c(cross(v, p)) {}
T eval(pt p) const { return cross(v, p) - c; }
// sign of onLeft, dist: 1 if left of line, 0 if on line, -1 if right of line
int onLeft(pt p) const { return sgn(eval(p)); }
T dist(pt p) const { return eval(p) / abs(v); }
T distSq(pt p) const { T e = eval(p); return e * e / norm(v); }
// rotated 90 degrees ccw
Line perpThrough(pt p) const { return Line(p, p + perp(v)); }
Line translate(pt p) const { return Line(v, c + cross(v, p)); }
Line shiftLeft(T d) const { return Line(v, c + d * abs(v)); }
pt proj(pt p) const { return p - perp(v) * eval(p) / norm(v); }
pt refl(pt p) const { return p - perp(v) * T(2) * eval(p) / norm(v); }
// compares points by orthogonal projection (3 way comparison)
int cmpProj(pt p, pt q) const { return sgn(dot(v, p) - dot(v, q)); }
};
// Bisector of 2 lines
// Function Arguments:
// l1: the first line
// l2: the second line
// interior: whether the interior or exterior bisector is used
// Return Value: a line representing the bisector
// Time Complexity: O(1)
// Memory Complexity: O(1)
// Tested:
// https://dmoj.ca/problem/secret
Line bisector(Line l1, Line l2, bool interior) {
T s = interior ? 1 : -1;
return Line(l2.v / abs(l2.v) + l1.v / abs(l1.v) * s,
l2.c / abs(l2.v) + l1.c / abs(l1.v) * s);
}
// Intersection of 2 lines
// Function Arguments:
// l1: the first line
// l2: the second line
// res: a reference to a point to store the intersection if it exists
// Return Value: 0 if no intersection, 1 if proper intersection, 2 otherwise
// Time Complexity: O(1)
// Memory Complexity: O(1)
// Tested:
// https://dmoj.ca/problem/nccc7s5
int lineLineIntersection(Line l1, Line l2, pt &res) {
T d = cross(l1.v, l2.v);
if (eq(d, 0)) return l2.v * l1.c == l1.v * l2.c ? 2 : 0;
res = (l2.v * l1.c - l1.v * l2.c) / d; return 1;
}
// Determines if a point is on a line segment
// Function Arguments:
// p: the point to check if on the line segment
// a: one endpoint of the line segment
// b: the other endpoint of the line segment
// Return Value: true if p is on the line segment a-b, false otherwise
// Time Complexity: O(1)
// Memory Complexity: O(1)
// Tested:
// https://open.kattis.com/problems/segmentintersection
bool onSeg(pt p, pt a, pt b) {
return !ccw(p, a, b) && !lt(0, dot(a - p, b - p));
}
// Determine if two line segments intersect
// Function Arguments:
// a: one endpoint of the first line segment
// b: the other endpoint of the first line segment
// p: one endpoint of the second line segment
// q: the other endpoint of the second line segment
// Return Value: 0 if no intersection, 1 if proper intersection (a single
// point and not an endpoint), 2 otherwise
// Time Complexity: O(1)
// Memory Complexity: O(1)
// Tested:
// https://open.kattis.com/problems/segmentintersection
int segSegIntersects(pt a, pt b, pt p, pt q) {
if (ccw(a, b, p) * ccw(a, b, q) < 0 && ccw(p, q, a) * ccw(p, q, b) < 0)
return 1;
if (onSeg(p, a, b) || onSeg(q, a, b) || onSeg(a, p, q) || onSeg(b, p, q))
return 2;
return 0;
}
// Determine the intersection of two line segments
// Function Arguments:
// a: one endpoint of the first line segment
// b: the other endpoint of the first line segment
// p: one endpoint of the second line segment
// q: the other endpoint of the second line segment
// Return Value: if the line segments do not intersect, an empty vector
// of points; if the line segments intersect at a point, a vector containing
// the point of intersection; if the line segments have a line segment of
// intersection, a vector containing the two endpoints of the
// line segment intersection (it can return more if there is precision error)
// Time Complexity: O(1)
// Memory Complexity: O(1)
// Tested:
// https://open.kattis.com/problems/segmentintersection
vector<pt> segSegIntersection(pt a, pt b, pt p, pt q) {
int intersects = segSegIntersects(a, b, p, q);
if (!intersects) return vector<pt>();
if (intersects == 1) {
T c1 = cross(p - a, b - a), c2 = cross(q - a, b - a);
return vector<pt>{(c1 * q - c2 * p) / (c1 - c2)};
}
vector<pt> ret; if (onSeg(p, a, b)) ret.push_back(p);
if (onSeg(q, a, b)) ret.push_back(q);
if (onSeg(a, p, q)) ret.push_back(a);
if (onSeg(b, p, q)) ret.push_back(b);
sort(ret.begin(), ret.end());
ret.erase(unique(ret.begin(), ret.end()), ret.end()); return ret;
}
// Finds the closest point on a line segment to another point
// Function Arguments
// p: the reference point
// a: one endpoint of the line segment
// b: the other endpoint of the line segment
// Return Value: the closest point to p on the line segment a-b
// Time Complexity: O(1)
// Memory Complexity: O(1)
pt closestPtOnSeg(pt p, pt a, pt b) {
if (a == b) return a;
T d = distSq(a, b), t = min(d, max(T(0), dot(p - a, b - a)));
return a + (b - a) * t / d;
}
// Finds the distance to the closest point on a line segment to another point
// Function Arguments
// p: the reference point
// a: one endpoint of the line segment
// b: the other endpoint of the line segment
// Return Value: the distance to the closest point to p on the line segment a-b
// Time Complexity: O(1)
// Memory Complexity: O(1)
// Tested:
// https://open.kattis.com/problems/segmentdistance
T ptSegDist(pt p, pt a, pt b) {
if (a == b) return dist(a, p);
T d = distSq(a, b), t = min(d, max(T(0), dot(p - a, b - a)));
return abs((p - a) * d - (b - a) * t) / d;
}
// Finds the closest distance between two line segments
// Function Arguments
// a: one endpoint of the first line segment
// b: the other endpoint of the first line segment
// p: one endpoint of the second line segment
// q: the other endpoint of the second line segment
// Return Value: the closest distance between the two line
// Time Complexity: O(1)
// Memory Complexity: O(1)
// Tested:
// https://open.kattis.com/problems/segmentdistance
T segSegDist(pt a, pt b, pt p, pt q) {
return segSegIntersects(a, b, p, q) > 0
? 0
: min({ptSegDist(p, a, b), ptSegDist(q, a, b),
ptSegDist(a, p, q), ptSegDist(b, p, q)});
}
| {
"repo_name": "wesley-a-leung/Resources",
"stars": "34",
"repo_language": "C++",
"file_name": "SkewHeapIncremental.h",
"mime_type": "text/x-c++"
} |
#pragma once
#include <bits/stdc++.h>
#include "Point.h"
using namespace std;
// Kd Tree for randomized 2d points
// Constructor Arguments:
// xmin: the minimum x value
// ymin: the minimum y value
// xmax: the maximum x value
// ymax: the maximum y value
// P: the points
// Functions:
// empty(): returns true if the tree is empty, false otherwise
// size(): the number of points in the tree
// insert(p): inserts p into the tree
// contains(p): returns true if the tree contains p, false otherwise
// range(rect): returns a vector of the points in the rectangle rect
// nearest(p, nearest): sets nearest to the nearest point in the tree to p,
// returns true if a point exists, false otherwise (tree is empty)
// In practice, has a moderate constant
// Time Complexity:
// constructor: O(N) for N randomized points
// empty, size: O(1)
// insert, contains: O(log N) for N randomized points
// range: O(sqrt N + K) for N randomized points and K points in the rectangle
// nearest: O(log N) for N randomized points
// Memory Complexity: O(N)
// Tested:
// https://open.kattis.com/problems/closestpair1
struct KdTree {
struct Rectangle {
T xmin, ymin, xmax, ymax;
Rectangle(T xmin = 0, T ymin = 0, T xmax = 0, T ymax = 0)
: xmin(xmin), ymin(ymin), xmax(xmax), ymax(ymax) {
assert(xmin <= xmax); assert(ymin <= ymax);
}
bool intersects(const Rectangle &that) const {
return !lt(xmax, that.xmin) && !lt(ymax, that.ymin)
&& !lt(that.xmax, xmin) && !lt(that.ymax, ymin);
}
bool contains(pt p) const {
return !lt(p.x, xmin) && !lt(p.y, ymin)
&& !lt(xmax, p.x) && !lt(ymax, p.y);
}
T distSq(pt p) const {
T dx = 0, dy = 0;
if (p.x < xmin) dx = p.x - xmin;
else if (p.x > xmax) dx = p.x - xmax;
if (p.y < ymin) dy = p.y - ymin;
else if (p.y > ymax) dy = p.y - ymax;
return dx * dx + dy * dy;
}
};
struct Node {
pt p; Rectangle r; Node *lu, *rd;
Node(pt p, const Rectangle &r) : p(p), r(r), lu(nullptr), rd(nullptr) {}
};
deque<Node> TR;
static bool xOrdLt(pt p, pt q) { return lt(p.x, q.x); }
static bool yOrdLt(pt p, pt q) { return lt(p.y, q.y); }
Node *makeNode(pt p, const Rectangle &r) {
TR.emplace_back(p, r); return &TR.back();
}
T XMIN, YMIN, XMAX, YMAX; int cnt; Node *root;
template <class It>
Node *build(Node *n, It st, int lo, int hi, bool partition,
T xmin, T ymin, T xmax, T ymax) {
if (lo > hi) return nullptr;
int mid = lo + (hi - lo) / 2; if (partition)
nth_element(st + lo, st + mid, st + hi + 1, xOrdLt);
else nth_element(st + lo, st + mid, st + hi + 1, yOrdLt);
pt p = *(st + mid); n = makeNode(p, Rectangle(xmin, ymin, xmax, ymax));
if (partition) {
n->lu = build(n->lu, st, lo, mid - 1, !partition,
xmin, ymin, n->p.x, ymax);
n->rd = build(n->rd, st, mid + 1, hi, !partition,
n->p.x, ymin, xmax, ymax);
} else {
n->lu = build(n->lu, st, lo, mid - 1, !partition,
xmin, ymin, xmax, n->p.y);
n->rd = build(n->rd, st, mid + 1, hi, !partition,
xmin, n->p.y, xmax, ymax);
}
return n;
}
Node *insert(Node *n, pt p, bool partition,
T xmin, T ymin, T xmax, T ymax) {
if (!n) { cnt++; return makeNode(p, Rectangle(xmin, ymin, xmax, ymax)); }
if (n->p == p) return n;
if (partition) {
if (xOrdLt(p, n->p)) n->lu = insert(n->lu, p, !partition,
xmin, ymin, n->p.x, ymax);
else n->rd = insert(n->rd, p, !partition,
n->p.x, ymin, xmax, ymax);
} else {
if (yOrdLt(p, n->p)) n->lu = insert(n->lu, p, !partition,
xmin, ymin, xmax, n->p.y);
else n->rd = insert(n->rd, p, !partition,
xmin, n->p.y, xmax, ymax);
}
return n;
}
bool contains(Node *n, pt p, bool partition) {
if (!n) return false;
if (n->p == p) return true;
if (partition) {
if (xOrdLt(p, n->p)) return contains(n->lu, p, !partition);
else return contains(n->rd, p, !partition);
} else {
if (yOrdLt(p, n->p)) return contains(n->lu, p, !partition);
else return contains(n->rd, p, !partition);
}
}
void range(Node *n, vector<pt> &ret, const Rectangle &rect) {
if (!n || !rect.intersects(n->r)) return;
if (rect.contains(n->p)) ret.push_back(n->p);
range(n->lu, ret, rect); range(n->rd, ret, rect);
}
bool findNearest(Node *n, pt p, bool hasNearest, pt &nearest) {
if (!n || (hasNearest && lt(distSq(nearest, p), n->r.distSq(p))))
return hasNearest;
if (!hasNearest || lt(distSq(n->p, p), distSq(nearest, p))) {
hasNearest = true; nearest = n->p;
}
if (n->lu && n->lu->r.contains(p)) {
hasNearest = findNearest(n->lu, p, hasNearest, nearest);
hasNearest = findNearest(n->rd, p, hasNearest, nearest);
} else {
hasNearest = findNearest(n->rd, p, hasNearest, nearest);
hasNearest = findNearest(n->lu, p, hasNearest, nearest);
}
return hasNearest;
}
KdTree(T xmin, T ymin, T xmax, T ymax)
: XMIN(xmin), YMIN(ymin), XMAX(xmax), YMAX(ymax),
cnt(0), root(nullptr) {}
KdTree(T xmin, T ymin, T xmax, T ymax, vector<pt> P)
: XMIN(xmin), YMIN(ymin), XMAX(xmax), YMAX(ymax), cnt(P.size()) {
root = build(root, P.begin(), 0, cnt - 1, true, XMIN, YMIN, XMAX, YMAX);
}
bool empty() { return cnt == 0; }
int size() { return cnt; }
void insert(pt p) { root = insert(root, p, true, XMIN, YMIN, XMAX, YMAX); }
bool contains(pt p) { return contains(root, p, true); }
vector<pt> range(const Rectangle &rect) {
vector<pt> ret; range(root, ret, rect); return ret;
}
bool findNearest(pt p, pt &nearest) {
return findNearest(root, p, false, nearest);
}
};
| {
"repo_name": "wesley-a-leung/Resources",
"stars": "34",
"repo_language": "C++",
"file_name": "SkewHeapIncremental.h",
"mime_type": "text/x-c++"
} |
#pragma once
#include <bits/stdc++.h>
#include "Point.h"
using namespace std;
// Struct to compare angles in the range [-PI, PI) around a pivot based
// on arg(p - pivot)
// Points directly to the left have an angle of -PI, points equal to the pivot
// are placed after all points
// Constructor Arguments:
// p: the point
// Fields:
// static pivot: the pivot point
// Functions:
// static setPivot(p): sets p as the pivot point
// <, <=, >, >=, ==, !=: compares 2 angles
// +, +=, -, -=: arithmetic operators
// Time Complexity:
// constructor, setPivot, <, <=, >, >=, ==, !=, +, +=, -, -=: O(1)
// Memory Complexity: O(1)
// Tested:
// https://judge.yosupo.jp/problem/sort_points_by_argument
#define OP(op, body) Angle operator op (Angle a) const { return body; } \
Angle &operator op##= (Angle a) { return *this = *this op a; }
#define CMP(op, body) bool operator op (Angle a) const { return body; }
struct Angle {
static pt pivot; static void setPivot(pt p) { pivot = p; }
pt p; Angle(pt p = pt(0, 0)) : p(p) {}
int half() const {
if (eq(p.x, pivot.x) && eq(p.y, pivot.y)) return 2;
return int(!lt(p.y, pivot.y) && (!eq(p.y, pivot.y) || !lt(p.x, pivot.x)));
}
bool operator < (Angle a) const {
int h = half() - a.half(); return h == 0 ? ccw(pivot, p, a.p) > 0 : h < 0;
}
CMP(<=, !(a < *this)) CMP(>, a < *this) CMP(>=, !(*this < a))
CMP(==, !(*this < a) && !(a < *this)) CMP(!=, *this < a || a < *this)
Angle operator + () const { return *this; }
Angle operator - () const { return Angle(pivot + conj(p - pivot)); }
OP(+, Angle(pivot + (p - pivot) * (a.p - pivot))) OP(-, *this + (-a))
};
#undef OP
#undef CMP
pt Angle::pivot = pt(0, 0);
| {
"repo_name": "wesley-a-leung/Resources",
"stars": "34",
"repo_language": "C++",
"file_name": "SkewHeapIncremental.h",
"mime_type": "text/x-c++"
} |
#pragma once
#include <bits/stdc++.h>
#include "Point.h"
#include "Circle.h"
#include "DelaunayTriangulation.h"
using namespace std;
// Computes the Voronoi Diagram and Delaunay Triangulation of a set of
// distinct points
// If all points are collinear, there is no triangulation or diagram
// If there are 4 or more points on the same circle, the triangulation is
// ambiguous, otherwise it is unique
// Each circumcircle does not completely contain any of the input points
// The Voronoi Diagram is represented as a graph with the first tri.size()
// vertices being the circumcenters of each triangle in the triangulation
// and additional vertices being added to represent infinty points in a
// certain direction
// An edge is added between (v, w) if either triangles
// v and w share an edge, or if v is a triangle that has an edge that is
// not shared with any other triangle and rayDir[w - tri.size()] is the
// direction of the ray from the circumcenter of v passing perpendicular
// to the edge of the triangle of v that is not shared
// Constructor Arguments:
// P: the distinct points
// Fields:
// tri: a vector of arrays of 3 points, representing a triangle in
// the triangulation, points given in ccw order
// circumcircles: the circumcircles of each triangle
// rayDir: for edges in the Voronoi Diagram that are infinity, this vector
// contains the direction vector of those edges
// G: the adjacency list of each vertex in the Voronoi Diagram
// In practice, has a moderate constant
// Time Complexity:
// construction: O(N log N)
// Memory Complexity: O(N log N)
// Tested:
// https://open.kattis.com/problems/pandapreserve
struct VoronoiDiagram {
vector<array<pt, 3>> tri; vector<Circle> circumcircles; vector<pt> rayDir;
vector<vector<int>> G;
VoronoiDiagram(const vector<pt> &P)
: tri(DelaunayTriangulation(P).tri), G(tri.size()) {
map<pair<pt, pt>, int> seen; circumcircles.reserve(tri.size());
for (auto &&t : tri)
circumcircles.push_back(circumcircle(t[0], t[1], t[2]));
for (int h = 0; h < 2; h++) for (int i = 0; i < int(tri.size()); i++)
for (int k = 0; k < 3; k++) {
pt p1 = tri[i][k], p2 = tri[i][(k + 1) % 3];
auto it = seen.find(make_pair(p1, p2)); if (h == 0) {
if (it == seen.end()) it = seen.find(make_pair(p2, p1));
if (it == seen.end()) seen[make_pair(p1, p2)] = i;
else {
int j = it->second; G[i].push_back(j); G[j].push_back(i);
seen.erase(it);
}
} else if (it != seen.end()) {
int j = G.size(); G.emplace_back(); rayDir.push_back(perp(p1 - p2));
G[i].push_back(j); G[j].push_back(i);
}
}
}
};
| {
"repo_name": "wesley-a-leung/Resources",
"stars": "34",
"repo_language": "C++",
"file_name": "SkewHeapIncremental.h",
"mime_type": "text/x-c++"
} |
#pragma once
#include <bits/stdc++.h>
#include "Point.h"
using namespace std;
// Affine Transformations in 2D
// Functions:
// prependMatrix(m2, b2): sets m = m2 * m and b = m2 * b + b2
// transform(t): applies the AffineTransformation t to this
// scale(p): scales the x and y coordinates by p.x and p.y respectively
// translate(p): translates the point by p
// rotate(theta): rotates the point theta radians around the origin
// reflect(dir): reflects the point across the line passing through the
// origin with direction dir
// project(dir): projects the point onto the line passing through the
// origin with direction dir
// applyTransform(p): applies the transformation to the point p
// inverse(): returns the inverse of this transformation,
// determinant of m must nonzero
// Time Complexity:
// constructor, prependMatrix, transform, scale, translate, rotate, reflect,
// project, applyTransform, inverse: O(1)
// Memory Complexity: O(1)
// Tested:
// https://dmoj.ca/problem/tree3
struct AffineTransform {
array<array<T, 2>, 2> m; array<T, 2> b;
AffineTransform() {
for (int i = 0; i < 2; i++) {
b[i] = T(0); for (int j = 0; j < 2; j++) m[i][j] = i == j ? T(1) : T(0);
}
}
void prependMatrix(const array<array<T, 2>, 2> &m2, const array<T, 2> &b2) {
array<array<T, 2>, 2> resm; array<T, 2> resb;
for (int i = 0; i < 2; i++) for (int j = 0; j < 2; j++) {
resm[i][j] = T(0);
for (int k = 0; k < 2; k++) resm[i][j] += m2[i][k] * m[k][j];
}
for (int i = 0; i < 2; i++) {
resb[i] = b2[i]; for (int j = 0; j < 2; j++) resb[i] += m2[i][j] * b[j];
}
m = resm; b = resb;
}
void transform(const AffineTransform &t) { prependMatrix(t.m, t.b); }
void scale(pt p) {
prependMatrix({array<T, 2>{p.x, T(0)}, {T(0), p.y}}, {T(0), T(0)});
}
void translate(pt p) {
prependMatrix({array<T, 2>{T(1), T(0)}, {T(0), T(1)}}, {p.x, p.y});
}
void rotate(T theta) {
T c = cos(theta), s = sin(theta);
prependMatrix({array<T, 2>{c, -s}, {s, c}}, {T(0), T(0)});
}
void reflect(pt dir) {
T a = dir.x * dir.x - dir.y * dir.y, b = T(2) * dir.x * dir.y;
T n = norm(dir); a /= n; b /= n;
prependMatrix({array<T, 2>{a, b}, {b, -a}}, {T(0), T(0)});
}
void project(pt dir) {
T a = dir.x * dir.x, b = dir.x * dir.y, c = dir.y * dir.y;
T n = norm(dir); a /= n; b /= n; c /= n;
prependMatrix({array<T, 2>{a, b}, {b, c}}, {T(0), T(0)});
}
pt applyTransform(pt p) {
return pt(m[0][0] * p.x + m[0][1] * p.y + b[0],
m[1][0] * p.x + m[1][1] * p.y + b[1]);
}
AffineTransform inverse() const {
AffineTransform ret;
ret.prependMatrix({array<T, 2>{T(1), T(0)}, {T(0), T(1)}}, {-b[0], -b[1]});
T det = m[0][0] * m[1][1] - m[0][1] * m[1][0]; assert(!eq(det, 0));
ret.prependMatrix({array<T, 2>{m[1][1] / det, -m[0][1] / det},
{-m[1][0] / det, m[0][0] / det}},
{T(0), T(0)});
return ret;
}
};
| {
"repo_name": "wesley-a-leung/Resources",
"stars": "34",
"repo_language": "C++",
"file_name": "SkewHeapIncremental.h",
"mime_type": "text/x-c++"
} |
#pragma once
#include <bits/stdc++.h>
#include "Point.h"
using namespace std;
// Helper struct to maintain upper hull
struct DecrementalUpperHull {
struct Link; using ptr = Link *;
struct Link {
int id; pt p; ptr prv, nxt;
Link(int id, pt p) : id(id), p(p), prv(nullptr), nxt(nullptr) {}
};
struct Node {
ptr chain, back, tangent;
Node() : chain(nullptr), back(nullptr), tangent(nullptr) {}
};
int N; vector<Node> TR; vector<Link> links;
template <class F, class G>
pair<ptr, ptr> findBridge(ptr l, ptr r, F f, G g) {
while (f(l) || f(r)) {
if (!f(r) || (f(l) && g(pt(0, 0), f(l)->p - l->p, f(r)->p - r->p))) {
if (g(l->p, f(l)->p, r->p)) l = f(l);
else break;
} else {
if (!g(l->p, r->p, f(r)->p)) r = f(r);
else break;
}
}
return make_pair(l, r);
}
void fixChain(int x, ptr l, ptr r, bool rev) {
if (rev) {
tie(r, l) = findBridge(r, l, [&] (ptr q) { return q->prv; },
[&] (pt a, pt b, pt c) {
return ccw(a, b, c) >= 0;
});
} else {
tie(l, r) = findBridge(l, r, [&] (ptr q) { return q->nxt; },
[&] (pt a, pt b, pt c) {
return ccw(a, b, c) <= 0;
});
}
TR[x].tangent = l;
TR[x].chain = TR[x * 2].chain; TR[x].back = TR[x * 2 + 1].back;
TR[x * 2].chain = l->nxt; TR[x * 2 + 1].back = r->prv;
if (l->nxt) l->nxt->prv = nullptr;
else TR[x * 2].chain = nullptr;
if (r->prv) r->prv->nxt = nullptr;
else TR[x * 2 + 1].chain = nullptr;
l->nxt = r; r->prv = l;
}
void build(int x, int tl, int tr) {
if (tl == tr) { TR[x].chain = TR[x].back = &links[tl]; return; }
int m = tl + (tr - tl) / 2;
build(x * 2, tl, m); build(x * 2 + 1, m + 1, tr);
fixChain(x, TR[x * 2].chain, TR[x * 2 + 1].chain, false);
}
void rob(int x, int y) {
TR[x].chain = TR[y].chain; TR[x].back = TR[y].back;
TR[y].chain = TR[y].back = nullptr;
}
void rem(int x, int tl, int tr, int i) {
if (i < tl || tr < i) return;
int m = tl + (tr - tl) / 2, y = x * 2 + int(i > m); if (!TR[x].tangent) {
TR[y].chain = TR[x].chain; TR[y].back = TR[x].back;
if (i <= m) rem(x * 2, tl, m, i);
else rem(x * 2 + 1, m + 1, tr, i);
rob(x, y); return;
}
ptr l = TR[x].tangent, r = l->nxt; l->nxt = TR[x * 2].chain;
if (TR[x * 2].chain) TR[x * 2].chain->prv = l;
else TR[x * 2].back = l;
TR[x * 2].chain = TR[x].chain; r->prv = TR[x * 2 + 1].back;
if (TR[x * 2 + 1].back) TR[x * 2 + 1].back->nxt = r;
else TR[x * 2 + 1].chain = r;
TR[x * 2 + 1].back = TR[x].back;
if (TR[y].chain == TR[y].back && TR[y].chain->id == i) {
TR[y].chain = TR[y].back = nullptr;
rob(x, y ^ 1); TR[x].tangent = nullptr; return;
}
if (i <= m) {
if (l->id == i) l = l->nxt;
rem(x * 2, tl, m, i); if (!l) l = TR[x * 2].back;
} else {
if (r->id == i) r = r->prv;
rem(x * 2 + 1, m + 1, tr, i); if (!r) r = TR[x * 2 + 1].chain;
}
fixChain(x, l, r, i <= m);
}
void rem(int i) {
if (TR[1].chain == TR[1].back) {
TR[1].chain = TR[1].back = nullptr; return;
}
rem(1, 0, N - 1, i);
}
DecrementalUpperHull(const vector<pt> &P)
: N(P.size()), TR(N == 0 ? 0 : 1 << __lg(N * 4 - 1)) {
links.reserve(N); for (int i = 0; i < N; i++) links.emplace_back(i, P[i]);
build(1, 0, N - 1);
}
vector<int> getHull() {
vector<int> ret;
for (ptr x = TR[1].chain; x; x = x->nxt) ret.push_back(x->id);
return ret;
}
};
// Computes the convex layer each point is on
// Function Arguments:
// P: a vector of points
// Return Value: a vector of integers representing the convex layer each
// point is on, with 0 being the outermost layer, and increasing when
// moving inwards
// In practice, has a small constant
// Time Complexity: O(N (log N)^2)
// Memory Complexity: O(N)
// Tested:
// https://judge.yosupo.jp/problem/convex_layers
vector<int> convexLayers(const vector<pt> &P) {
if (P.empty()) return vector<int>();
int N = P.size(); vector<int> ind(N); iota(ind.begin(), ind.end(), 0);
sort(ind.begin(), ind.end(), [&] (int i, int j) { return P[i] < P[j]; });
vector<pt> tmp(N); for (int i = 0; i < N; i++) tmp[i] = P[ind[i]];
DecrementalUpperHull up(tmp);
for (int i = 0; i < N; i++) tmp[i] = P[ind[N - i - 1]] * T(-1);
DecrementalUpperHull down(move(tmp)); vector<int> ret(N, -1);
for (int layer = 0, done = 0; done < N; layer++) {
vector<int> hull; for (int i : up.getHull()) hull.push_back(i);
for (int i : down.getHull()) hull.push_back(N - i - 1);
for (int i : hull) if (ret[ind[i]] == -1) {
ret[ind[i]] = layer; done++; up.rem(i); down.rem(N - i - 1);
}
}
return ret;
}
| {
"repo_name": "wesley-a-leung/Resources",
"stars": "34",
"repo_language": "C++",
"file_name": "SkewHeapIncremental.h",
"mime_type": "text/x-c++"
} |
#pragma once
#include <bits/stdc++.h>
#include "../../search/BinarySearch.h"
using namespace std;
// Supports adding lines in the form f(x) = mx + b, finding
// the maximum value of f(x) at any given x, and removing the last line added
// Template Arguments:
// T: the type of the slope (m) and intercept (b) of the line, as well as
// the type of the function argument (x), must be able to store m * b
// Cmp: the comparator to compare two f(x) values,
// convention is same as std::priority_queue in STL
// Required Functions:
// operator (a, b): returns true if and only if a compares less than b
// Constructor Arguments:
// cmp: an instance of the Cmp struct
// Functions:
// addLine(m, b): adds a line in the form f(x) = mx + b to the set of lines,
// lines must be added in the order of slope sorted by Cmp
// undo(): removed the last line added
// getMax(x): finds the maximum x value (based on the comparator) for all
// inserted lines
// size(): returns the number of lines in the convex hull
// reserve(N): reserves space for N lines in the convex hull
// In practice, has a moderate constant
// Time Complexity:
// addLine: O(log N) if reserve is called beforehand,
// O(log N) amortized otherwise
// getMax: O(log N)
// undo, size: O(1)
// reserve: O(N)
// Memory Complexity: O(N) for N lines in the convex hull
// Tested:
// https://oj.uz/problem/view/CEOI09_harbingers
template <class T, class Cmp = less<T>> struct ConvexHullTrickUndo {
struct Line {
T m, b; Line(T m = T(), T b = T()) : m(m), b(b) {}
T eval(T x) const { return m * x + b; }
};
vector<pair<int, Line>> history; vector<Line> L; Cmp cmp; int back;
ConvexHullTrickUndo(Cmp cmp = Cmp()) : cmp(cmp), back(0) {}
int size() const { return back; }
void addLine(T m, T b) {
int i = back; if (i >= 1)
i = bsearch<LAST>(1, i + 1, [&] (int j) {
return cmp(m, L[j - 1].m) || cmp(L[j - 1].m, m) || cmp(b, L[j - 1].b);
});
if (i >= 2)
i = bsearch<LAST>(2, i + 1, [&] (int j) {
T c1 = (L[j - 1].m - m) * (L[j - 2].b - b);
T c2 = (L[j - 1].b - b) * (L[j - 2].m - m);
return c1 > c2;
});
if (i == int(L.size())) L.emplace_back();
history.emplace_back(back, L[i]); L[i] = Line(m, b); back = i + 1;
}
void undo() { tie(back, L[back - 1]) = history.back(); history.pop_back(); }
T getMax(T x) const {
return L[bsearch<FIRST>(0, back - 1, [&] (int i) {
return cmp(L[i + 1].eval(x), L[i].eval(x));
})].eval(x);
}
void reserve(int N) { L.reserve(N); history.reserve(N); }
};
| {
"repo_name": "wesley-a-leung/Resources",
"stars": "34",
"repo_language": "C++",
"file_name": "SkewHeapIncremental.h",
"mime_type": "text/x-c++"
} |
#pragma once
#include <bits/stdc++.h>
#include "../../utils/EpsCmp.h"
using namespace std;
// Functions for a 2D point
// * operator between 2 points is complex number multiplication
// / operator between 2 points is complex number division
#define OP(op, U, a, x, y) pt operator op (U a) const { return pt(x, y); } \
pt &operator op##= (U a) { return *this = *this op a; }
#define CMP(op, body) bool operator op (pt p) const { return body; }
struct pt {
T x, y; constexpr pt(T x = 0, T y = 0) : x(x), y(y) {}
pt operator + () const { return *this; }
pt operator - () const { return pt(-x, -y); }
OP(+, pt, p, x + p.x, y + p.y) OP(-, pt, p, x - p.x, y - p.y)
OP(*, T, a, x * a, y * a) OP(/, T, a, x / a, y / a)
friend pt operator * (T a, pt p) { return pt(a * p.x, a * p.y); }
bool operator < (pt p) const { return eq(x, p.x) ? lt(y, p.y) : lt(x, p.x); }
CMP(<=, !(p < *this)) CMP(>, p < *this) CMP(>=, !(*this < p))
CMP(==, !(*this < p) && !(p < *this)) CMP(!=, *this < p || p < *this)
OP(*, pt, p, x * p.x - y * p.y, y * p.x + x * p.y)
OP(/, pt, p, (x * p.x + y * p.y) / (p.x * p.x + p.y * p.y),
(y * p.x - x * p.y) / (p.x * p.x + p.y * p.y))
};
#undef OP
#undef CMP
istream &operator >> (istream &stream, pt &p) { return stream >> p.x >> p.y; }
ostream &operator << (ostream &stream, pt p) {
return stream << p.x << ' ' << p.y;
}
pt conj(pt a) { return pt(a.x, -a.y); }
T dot(pt a, pt b) { return a.x * b.x + a.y * b.y; }
T cross(pt a, pt b) { return a.x * b.y - a.y * b.x; }
T norm(pt a) { return dot(a, a); }
T abs(pt a) { return sqrt(norm(a)); }
T arg(pt a) { return atan2(a.y, a.x); }
pt polar(T r, T theta) { return r * pt(cos(theta), sin(theta)); }
T distSq(pt a, pt b) { return norm(b - a); }
T dist(pt a, pt b) { return abs(b - a); }
T ang(pt a, pt b) { return arg(b - a); }
// sign of ang, area2, ccw: 1 if counterclockwise, 0 if collinear,
// -1 if clockwise
T ang(pt a, pt b, pt c) {
a -= b; c -= b; return arg(pt(dot(c, a), cross(c, a)));
}
// twice the signed area of triangle a, b, c
T area2(pt a, pt b, pt c) { return cross(b - a, c - a); }
int ccw(pt a, pt b, pt c) { return sgn(area2(a, b, c)); }
// a rotated theta radians around p
pt rot(pt a, pt p, T theta) { return (a - p) * pt(polar(T(1), theta)) + p; }
// rotated 90 degrees ccw
pt perp(pt a) { return pt(-a.y, a.x); }
| {
"repo_name": "wesley-a-leung/Resources",
"stars": "34",
"repo_language": "C++",
"file_name": "SkewHeapIncremental.h",
"mime_type": "text/x-c++"
} |
#pragma once
#include <bits/stdc++.h>
using namespace std;
// Binary search over a range for the first or last occurrence of
// a return value for a boolean function
// Template Arguments:
// ISFIRST: boolean of whether or not the first of last occurrence is being
// searched for
// T: the type of the range to search over, must be integral
// F: the type of the function that is being searched over
// Function Argyments:
// lo: the inclusive lower bound
// hi: the exclusive upper bound
// f: the function to search over
// Return Value:
// If ISFIRST is true:
// Returns the first value in the range [lo, hi) where f(x) is true
// if no value in [lo, hi) satisfies f(x), then it returns hi
// assumes that all values where f(x) is true are greater than all values
// where f(x) is false
// IF ISFIRST is false:
// Returns the last value in the range [lo, hi) where f(x) is true
// if no value in [lo, hi) satisfies f(x), then it returns lo - 1
// assumes that all values where f(x) is true are less than all values
// where f(x) is false
// In practice, has a small constant
// Time Complexity: O(log (hi - lo)) * (cost to compute f(x))
// Memory Complexity: O(1)
// Tested:
// https://mcpt.ca/problem/lcc18c5s3
// https://dmoj.ca/problem/apio19p3
// https://dmoj.ca/problem/pib20p2
const bool FIRST = true, LAST = false;
template <const bool ISFIRST, class T, class F> T bsearch(T lo, T hi, F f) {
static_assert(is_integral<T>::value, "T must be integral");
hi--; while (lo <= hi) {
T mid = lo + (hi - lo) / 2;
if (f(mid) == ISFIRST) hi = mid - 1;
else lo = mid + 1;
}
return ISFIRST ? lo : hi;
}
| {
"repo_name": "wesley-a-leung/Resources",
"stars": "34",
"repo_language": "C++",
"file_name": "SkewHeapIncremental.h",
"mime_type": "text/x-c++"
} |
#pragma once
#include <bits/stdc++.h>
using namespace std;
// Ternary search for the maximum of a function
// Template Arguments:
// T: the type of the range to search over, must be a floating point type
// F: the type of the function that is being searched over
// Cmp: the comparator to compare two f(x) values,
// convention is same as std::priority_queue in STL
// Required Functions:
// operator (a, b): returns true if and only if a compares less than b
// Function Arguments:
// lo: the inclusive lower bound
// hi: the inclusive upper bound
// f: the function to search over
// iters: the number of iterations to run the ternary search
// cmp: an instance of the Cmp class
// Return Value: the value x in the range [lo, hi] such that f(x) is
// maximum based on the comparator
// In practice, has a small constant
// Time Complexity: O(iters) * (cost to compute f(x))
// Memory Complexity: O(1)
// Tested:
// https://codeforces.com/contest/578/problem/C
template <class T, class F, class Cmp = less<T>>
T tsearch(T lo, T hi, F f, int iters, Cmp cmp = less<T>()) {
static_assert(is_floating_point<T>::value,
"T must be a floating point type");
for (int it = 0; it < iters; it++) {
T m1 = lo + (hi - lo) / 3, m2 = hi - (hi - lo) / 3;
if (cmp(f(m1), f(m2))) lo = m1;
else hi = m2;
}
return lo + (hi - lo) / 2;
}
| {
"repo_name": "wesley-a-leung/Resources",
"stars": "34",
"repo_language": "C++",
"file_name": "SkewHeapIncremental.h",
"mime_type": "text/x-c++"
} |