name
stringlengths 12
178
| code_snippet
stringlengths 8
36.5k
| score
float64 3.26
3.68
|
---|---|---|
graphhopper_Polygon_contains_rdh
|
/**
* Does the point in polygon check.
*
* @param lat
* Latitude of the point to be checked
* @param lon
* Longitude of the point to be checked
* @return true if point is inside polygon
*/
public boolean contains(double lat, double lon) {
return prepPolygon.contains(factory.createPoint(new Coordinate(lon, lat)));
}
| 3.26 |
graphhopper_MultiSourceElevationProvider_setBaseURL_rdh
|
/**
* For the MultiSourceElevationProvider you have to specify the base URL separated by a ';'.
* The first for cgiar, the second for gmted.
*/
@Override
public MultiSourceElevationProvider setBaseURL(String baseURL) {
String[] urls = baseURL.split(";");
if (urls.length != 2) {
throw new IllegalArgumentException("The base url must consist of two urls separated by a ';'. The first for cgiar, the second for gmted");
}
srtmProvider.setBaseURL(urls[0]);
globalProvider.setBaseURL(urls[1]);
return
this;
}
| 3.26 |
graphhopper_PathDetail_getValue_rdh
|
/**
*
* @return the value of this PathDetail. Can be null
*/
public Object getValue() {
return value;
}
| 3.26 |
graphhopper_IntFloatBinaryHeap_percolateDownMinHeap_rdh
|
/**
* Percolates element down heap from the array position given by the index.
*/
final void percolateDownMinHeap(final int index) {
final int element = elements[index];
final float key = keys[index];
int hole = index;
while ((hole * 2) <= size) {
int child = hole * 2;
// if we have a right child and that child can not be percolated
// up then move onto other child
if ((child != size) && (keys[child + 1] < keys[child])) {
child++;
}
// if we found resting place of bubble then terminate search
if (keys[child] >=
key) {
break;
}
elements[hole] = elements[child];
keys[hole] = keys[child];
hole = child;
}
elements[hole] = element;
keys[hole] = key;
}
| 3.26 |
graphhopper_Downloader_m0_rdh
|
/**
* This method initiates a connect call of the provided connection and returns the response
* stream. It only returns the error stream if it is available and readErrorStreamNoException is
* true otherwise it throws an IOException if an error happens. Furthermore it wraps the stream
* to decompress it if the connection content encoding is specified.
*/
public InputStream m0(HttpURLConnection connection, boolean readErrorStreamNoException) throws IOException {
// create connection but before reading get the correct inputstream based on the compression and if error
connection.connect();
InputStream v0;
if ((readErrorStreamNoException && (connection.getResponseCode() >= 400))
&& (connection.getErrorStream() != null))
v0 = connection.getErrorStream();
else
v0 = connection.getInputStream();
if (v0 == null)
throw new IOException("Stream is null. Message:" + connection.getResponseMessage());
// wrap
try {
String encoding = connection.getContentEncoding();
if ((encoding != null) && encoding.equalsIgnoreCase("gzip"))
v0 = new GZIPInputStream(v0);
else if ((encoding != null) && encoding.equalsIgnoreCase("deflate"))
v0 = new InflaterInputStream(v0, new Inflater(true));
} catch (IOException ex) {
}
return v0;}
| 3.26 |
graphhopper_Distributions_logNormalDistribution_rdh
|
/**
* Use this function instead of Math.log(normalDistribution(sigma, x)) to avoid an
* arithmetic underflow for very small probabilities.
*/public static double logNormalDistribution(double sigma, double x) {
return Math.log(1.0 / (sqrt(2.0 *
PI) * sigma)) + ((-0.5) * pow(x / sigma, 2));
}
| 3.26 |
graphhopper_Distributions_logExponentialDistribution_rdh
|
/**
* Use this function instead of Math.log(exponentialDistribution(beta, x)) to avoid an
* arithmetic underflow for very small probabilities.
*
* @param beta
* =1/lambda with lambda being the standard exponential distribution rate parameter
*/
static double logExponentialDistribution(double beta, double x) {
return log(1.0 / beta) - (x / beta);
}
| 3.26 |
graphhopper_Distributions_exponentialDistribution_rdh
|
/**
*
* @param beta
* =1/lambda with lambda being the standard exponential distribution rate parameter
*/
static double exponentialDistribution(double beta, double x)
{
return (1.0 / beta) * exp((-x) / beta);
}
| 3.26 |
graphhopper_ConditionalExpressionVisitor_isValidIdentifier_rdh
|
// allow only methods and other identifiers (constants and encoded values)
boolean isValidIdentifier(String identifier) {
if (variableValidator.isValid(identifier)) {
if (!Character.isUpperCase(identifier.charAt(0)))
result.guessedVariables.add(identifier);
return true;}return false;
}
| 3.26 |
graphhopper_GraphHopperConfig_putObject_rdh
|
// We can add explicit configuration properties to GraphHopperConfig (for example to allow lists or nested objects),
// everything else is stored in a HashMap
@JsonAnySetter
public GraphHopperConfig putObject(String key, Object value) {
map.putObject(key, value);
return this;
}
| 3.26 |
graphhopper_LandmarkStorage_estimateMaxWeight_rdh
|
/**
* This method returns the maximum weight for the graph starting from the landmarks
*/
private double estimateMaxWeight(List<IntArrayList> graphComponents, EdgeFilter accessFilter) {
double maxWeight = 0;
int searchedSubnetworks = 0;
Random random = new Random(0);
// the maximum weight can only be an approximation so there is only a tiny improvement when we would do this for
// all landmarks. See #2027 (1st commit) where only 1 landmark was sufficient when multiplied with 1.01 at the end
// TODO instead of calculating the landmarks again here we could store them in landmarkIDs and do this for all here
int[] tmpLandmarkNodeIds = new int[3];
for (IntArrayList subnetworkIds : graphComponents) {
if (subnetworkIds.size() < minimumNodes)
continue;
searchedSubnetworks++;
int maxRetries
= Math.max(subnetworkIds.size(), 100);
for (int retry = 0; retry < maxRetries; retry++) {
int index = random.nextInt(subnetworkIds.size());
int nextStartNode = subnetworkIds.get(index);
LandmarkExplorer explorer = findLandmarks(tmpLandmarkNodeIds, nextStartNode, accessFilter, "estimate " + index);
if (explorer.getFromCount() < minimumNodes) {
LOGGER.error((((((((("method findLandmarks for " + createPoint(graph, nextStartNode)) + " (") + nextStartNode) + ")") + " resulted in too few visited nodes: ") + explorer.getFromCount()) + " vs expected minimum ") + minimumNodes) + ", see #2256");
continue;
}
// starting
for (int lmIdx = 0; lmIdx < tmpLandmarkNodeIds.length; lmIdx++) {
int lmNodeId = tmpLandmarkNodeIds[lmIdx];explorer = new LandmarkExplorer(graph, this, weighting, traversalMode, accessFilter, false);
explorer.setStartNode(lmNodeId);
explorer.runAlgo();maxWeight = Math.max(maxWeight, explorer.getLastEntry().weight);}
break;
}
}
if ((maxWeight <= 0) && (searchedSubnetworks > 0))
throw new IllegalStateException((((("max weight wasn't set although "
+ searchedSubnetworks) + " subnetworks were searched (total ") + graphComponents.size()) + "), minimumNodes:") + minimumNodes);
// we have to increase maxWeight slightly as it is only an approximation towards the maximum weight,
// especially when external landmarks are provided, but also because we do not traverse all landmarks
return maxWeight * 1.008;
}
| 3.26 |
graphhopper_LandmarkStorage_getToWeight_rdh
|
/**
*
* @return the weight from the specified node to the landmark (specified *as index*)
*/
int getToWeight(int landmarkIndex, int node) {
int res = ((int) (landmarkWeightDA.getShort(((((long) (node)) * LM_ROW_LENGTH) + (landmarkIndex * 4)) + TO_OFFSET))) & 0xffff;
if (res == SHORT_INFINITY)
return SHORT_MAX;
return res;
}
| 3.26 |
graphhopper_LandmarkStorage_getWeighting_rdh
|
/**
* This method returns the weighting for which the landmarks are originally created
*/
public Weighting getWeighting() {
return weighting;}
| 3.26 |
graphhopper_LandmarkStorage_setLandmarkSuggestions_rdh
|
/**
* This method forces the landmark preparation to skip the landmark search and uses the specified landmark list instead.
* Useful for manual tuning of larger areas to safe import time or improve quality.
*/
public LandmarkStorage setLandmarkSuggestions(List<LandmarkSuggestion> landmarkSuggestions) {
if (landmarkSuggestions == null)
throw new IllegalArgumentException("landmark suggestions cannot be null");
this.landmarkSuggestions = landmarkSuggestions;
return this;
}
| 3.26 |
graphhopper_LandmarkStorage_createLandmarks_rdh
|
/**
* This method calculates the landmarks and initial weightings to & from them.
*/
public void createLandmarks() {
if (isInitialized())
throw new IllegalStateException("Initialize the landmark storage only once!");
// fill 'from' and 'to' weights with maximum value
long maxBytes = ((long) (graph.getNodes())) * LM_ROW_LENGTH;
this.landmarkWeightDA.create(2000);
this.landmarkWeightDA.ensureCapacity(maxBytes);
for (long pointer = 0; pointer < maxBytes; pointer += 2) {
landmarkWeightDA.setShort(pointer, ((short) (SHORT_INFINITY)));
}
int[] empty = new int[landmarks];
Arrays.fill(empty, UNSET_SUBNETWORK);
f0.add(empty);
byte[] subnetworks = new byte[graph.getNodes()];
Arrays.fill(subnetworks, ((byte) (UNSET_SUBNETWORK)));
String snKey = Subnetwork.key(lmConfig.getName());
// TODO We could use EdgeBasedTarjanSCC instead of node-based TarjanSCC here to get the small networks directly,
// instead of using the subnetworkEnc from PrepareRoutingSubnetworks.
if (!encodedValueLookup.hasEncodedValue(snKey))throw new IllegalArgumentException((("EncodedValue '" + snKey) + "' does not exist. For Landmarks this is ") + "currently required (also used in PrepareRoutingSubnetworks). See #2256");
// Exclude edges that we previously marked in PrepareRoutingSubnetworks to avoid problems like "connection not found".
final BooleanEncodedValue edgeInSubnetworkEnc = encodedValueLookup.getBooleanEncodedValue(snKey);
final IntHashSet blockedEdges;
// We use the areaIndex to split certain areas from each other but do not permanently change the base graph
// so that other algorithms still can route through these regions. This is done to increase the density of
// landmarks for an area like Europe+Asia, which improves the query speed.
if (areaIndex != null) {
StopWatch sw = new StopWatch().start();
blockedEdges = findBorderEdgeIds(areaIndex);
if (logDetails)
LOGGER.info((((("Made " + blockedEdges.size()) + " edges inaccessible. Calculated country cut in ") + sw.stop().getSeconds()) + "s, ") + Helper.getMemInfo());
} else {
blockedEdges = new IntHashSet();
}
EdgeFilter accessFilter
= edge -> (!edge.get(edgeInSubnetworkEnc)) && (!blockedEdges.contains(edge.getEdge()));
EdgeFilter tarjanFilter = edge -> accessFilter.accept(edge) && Double.isFinite(weighting.calcEdgeWeight(edge, false));
StopWatch sw = new StopWatch().start();
ConnectedComponents graphComponents = TarjanSCC.findComponents(graph, tarjanFilter, true);
if (logDetails)
LOGGER.info((((("Calculated " + graphComponents.getComponents().size()) + " subnetworks via tarjan in ") + sw.stop().getSeconds()) +
"s, ") + Helper.getMemInfo());
String additionalInfo = "";
// guess the factor
if (factor <= 0) {
// A 'factor' is necessary to store the weight in just a short value but without losing too much precision.
// This factor is rather delicate to pick, we estimate it from an exploration with some "test landmarks",
// see estimateMaxWeight. If we pick the distance too big for small areas this could lead to (slightly)
// suboptimal routes as there will be too big rounding errors. But picking it too small is bad for performance
// e.g. for Germany at least 1500km is very important otherwise speed is at least twice as slow e.g. for 1000km
double maxWeight = estimateMaxWeight(graphComponents.getComponents(), accessFilter);
setMaximumWeight(maxWeight);
additionalInfo = (", maxWeight:" + maxWeight) + " from quick estimation";
}
if (logDetails)
LOGGER.info(((("init landmarks for subnetworks with node count greater than " + minimumNodes) + " with factor:") + factor) + additionalInfo);
int nodes = 0;
for (IntArrayList subnetworkIds
: graphComponents.getComponents()) {
nodes += subnetworkIds.size();
if (subnetworkIds.size() < minimumNodes)
continue;
if (factor <= 0)
throw new IllegalStateException((((((("factor wasn't initialized " + factor) + ", subnetworks:") + graphComponents.getComponents().size()) + ", minimumNodes:")
+ minimumNodes) + ", current size:") + subnetworkIds.size());
int index = subnetworkIds.size() - 1;
// ensure start node is reachable from both sides and no subnetwork is associated
for (; index >= 0; index--) {
int nextStartNode = subnetworkIds.get(index);
if (subnetworks[nextStartNode] == UNSET_SUBNETWORK) {
if (logDetails) {
GHPoint p = createPoint(graph, nextStartNode);LOGGER.info(((((((((("start node: " + nextStartNode) + " (") + p) + ") subnetwork ") + index) + ", subnetwork size: ") + subnetworkIds.size()) + ", ") + Helper.getMemInfo()) + (areaIndex == null ? "" : " area:" + areaIndex.query(p.lat, p.lon)));
}
if (createLandmarksForSubnetwork(nextStartNode, subnetworks, accessFilter))
break;
}}
if (index < 0)
LOGGER.warn((((("next start node not found in big enough network of size " + subnetworkIds.size()) + ", first element is ") + subnetworkIds.get(0)) + ", ") + createPoint(graph, subnetworkIds.get(0)));
}
int subnetworkCount = f0.size();
// store all landmark node IDs and one int for the factor itself.
/* landmark mapping per subnetwork */
this.landmarkWeightDA.ensureCapacity(maxBytes/* landmark weights */
+ (((long) (subnetworkCount)) * landmarks));
// calculate offset to point into landmark mapping
long bytePos = maxBytes;
for (int[] landmarks : f0) {
for (int lmNodeId : landmarks) {
landmarkWeightDA.setInt(bytePos, lmNodeId);
bytePos += 4L;}
}
landmarkWeightDA.setHeader(0 * 4, graph.getNodes());
landmarkWeightDA.setHeader(1 * 4, landmarks);
landmarkWeightDA.setHeader(2 * 4, subnetworkCount);
if
((factor * DOUBLE_MLTPL) > Integer.MAX_VALUE)
throw new UnsupportedOperationException("landmark weight factor cannot be bigger than Integer.MAX_VALUE " + (factor * DOUBLE_MLTPL));
landmarkWeightDA.setHeader(3 * 4, ((int) (Math.round(factor * DOUBLE_MLTPL)))); // serialize fast byte[] into DataAccess
subnetworkStorage.create(graph.getNodes());
for (int nodeId = 0; nodeId < subnetworks.length; nodeId++) {
subnetworkStorage.setSubnetwork(nodeId, subnetworks[nodeId]);
}
if (logDetails)
LOGGER.info((("Finished landmark creation. Subnetwork node count sum " + nodes) + " vs. nodes ") + graph.getNodes());
initialized = true;
}
| 3.26 |
graphhopper_LandmarkStorage_getLandmarksAsGeoJSON_rdh
|
/**
*
* @return the calculated landmarks as GeoJSON string.
*/
String
getLandmarksAsGeoJSON() {
String str = "";
for (int subnetwork = 1; subnetwork < f0.size(); subnetwork++) {
int[] lmArray = f0.get(subnetwork);
for (int lmIdx = 0; lmIdx < lmArray.length; lmIdx++) {
int index = lmArray[lmIdx];
if (!str.isEmpty())str += ",";
str += ((("{ \"type\": \"Feature\", \"geometry\": {\"type\": \"Point\", \"coordinates\": [" + na.getLon(index)) + ", ") + na.getLat(index)) + "]},";
str += ((((((((" \"properties\":{\"node_index\":" + index) + ",") + "\"subnetwork\":") + subnetwork) + ",") + "\"lm_index\":") + lmIdx) + "}") + "}";
}
}
return ("{ \"type\": \"FeatureCollection\", \"features\": [" + str) + "]}";
}
| 3.26 |
graphhopper_LandmarkStorage_setWeight_rdh
|
/**
*
* @return false if the value capacity was reached and instead of the real value the SHORT_MAX was stored.
*/
final boolean setWeight(long pointer, double value) {
double tmpVal = value / factor;
if (tmpVal > Integer.MAX_VALUE)
throw new UnsupportedOperationException((((("Cannot store infinity explicitly, pointer=" + pointer) + ", value=") + value) + ", factor=") + factor);
if (tmpVal >= SHORT_MAX) {
landmarkWeightDA.setShort(pointer, ((short) (SHORT_MAX)));
return false;
} else {
landmarkWeightDA.setShort(pointer, ((short) (tmpVal)));
return true;
}
}
| 3.26 |
graphhopper_LandmarkStorage_getSubnetworksWithLandmarks_rdh
|
/**
*
* @return the number of subnetworks that have landmarks
*/ public int getSubnetworksWithLandmarks() {
return f0.size();
}
| 3.26 |
graphhopper_LandmarkStorage_setAreaIndex_rdh
|
/**
* This method specifies the polygons which should be used to split the world wide area to improve performance and
* quality in this scenario.
*/
public void setAreaIndex(AreaIndex<SplitArea>
areaIndex) {
this.areaIndex = areaIndex; }
| 3.26 |
graphhopper_LandmarkStorage_setLMSelectionWeighting_rdh
|
/**
* This weighting is used for the selection heuristic and is per default not the weighting specified in the constructor.
* The special weighting leads to a much better distribution of the landmarks and results in better response times.
*/
public void setLMSelectionWeighting(Weighting lmSelectionWeighting) {
this.lmSelectionWeighting = lmSelectionWeighting;
}
| 3.26 |
graphhopper_LandmarkStorage_setLogDetails_rdh
|
/**
* By default do not log many details.
*/
public void setLogDetails(boolean logDetails) {
this.logDetails
= logDetails;
}
| 3.26 |
graphhopper_LandmarkStorage_createLandmarksForSubnetwork_rdh
|
/**
* This method creates landmarks for the specified subnetwork (integer list)
*
* @return landmark mapping
*/
private boolean createLandmarksForSubnetwork(final int startNode, final byte[]
subnetworks, EdgeFilter accessFilter) {
final int subnetworkId = f0.size();
int[] tmpLandmarkNodeIds = new int[landmarks];
int logOffset = Math.max(1, landmarks / 2);
boolean
pickedPrecalculatedLandmarks = false;
if (!landmarkSuggestions.isEmpty()) {
double lat = na.getLat(startNode);
double lon = na.getLon(startNode);
LandmarkSuggestion selectedSuggestion = null;
for (LandmarkSuggestion lmsugg : landmarkSuggestions) {
if (lmsugg.getBox().contains(lat, lon)) {
selectedSuggestion = lmsugg;break;
}
}
if (selectedSuggestion != null) {
if (selectedSuggestion.getNodeIds().size() < tmpLandmarkNodeIds.length)
throw new IllegalArgumentException((("landmark suggestions are too few " + selectedSuggestion.getNodeIds().size()) + " for requested landmarks ") + landmarks);
pickedPrecalculatedLandmarks
= true;
for (int i = 0; i < tmpLandmarkNodeIds.length; i++) {
int lmNodeId = selectedSuggestion.getNodeIds().get(i);
tmpLandmarkNodeIds[i] = lmNodeId;
}
}
}
if (pickedPrecalculatedLandmarks) {LOGGER.info(("Picked " + tmpLandmarkNodeIds.length) + " landmark suggestions, skip finding landmarks");
} else {
LandmarkExplorer explorer = findLandmarks(tmpLandmarkNodeIds, startNode, accessFilter, "create");
if (explorer.getFromCount() < minimumNodes) {
// too small subnetworks are initialized with special id==0
explorer.setSubnetworks(subnetworks, UNCLEAR_SUBNETWORK);
return false;
}
if (logDetails)
LOGGER.info((("Finished searching landmarks for subnetwork " + subnetworkId) + " of size ") + explorer.getVisitedNodes());
}
// 2) calculate weights for all landmarks -> 'from' and 'to' weight
for (int lmIdx = 0; lmIdx < tmpLandmarkNodeIds.length; lmIdx++) {
if (Thread.currentThread().isInterrupted()) {
throw new RuntimeException("Thread was interrupted for landmark " + lmIdx);
}
int lmNodeId = tmpLandmarkNodeIds[lmIdx];
LandmarkExplorer explorer = new LandmarkExplorer(graph, this, weighting, traversalMode, accessFilter, false);explorer.setStartNode(lmNodeId);
explorer.runAlgo();
explorer.initLandmarkWeights(lmIdx, lmNodeId, LM_ROW_LENGTH, FROM_OFFSET);
// set subnetwork id to all explored nodes, but do this only for the first landmark
if (lmIdx == 0) {
if (explorer.setSubnetworks(subnetworks, subnetworkId))
return false;
}
explorer = new LandmarkExplorer(graph, this, weighting, traversalMode, accessFilter, true);explorer.setStartNode(lmNodeId);
explorer.runAlgo();
explorer.initLandmarkWeights(lmIdx, lmNodeId, LM_ROW_LENGTH, TO_OFFSET);
if (lmIdx == 0) {
if (explorer.setSubnetworks(subnetworks, subnetworkId))return false;
}
if (logDetails && ((lmIdx % logOffset) == 0))
LOGGER.info((((("Set landmarks weights [" + weighting) + "]. ") + "Progress ") + ((int) ((100.0 * lmIdx) / tmpLandmarkNodeIds.length))) + "%");
}
// TODO set weight to SHORT_MAX if entry has either no 'from' or no 'to' entry
f0.add(tmpLandmarkNodeIds);
return true;
}
| 3.26 |
graphhopper_LandmarkStorage_m1_rdh
|
/**
* For testing only
*/
DataAccess m1() {
return landmarkWeightDA;
}
| 3.26 |
graphhopper_LandmarkStorage_chooseActiveLandmarks_rdh
|
// From all available landmarks pick just a few active ones
boolean chooseActiveLandmarks(int fromNode, int toNode, int[] activeLandmarkIndices, boolean reverse) {
if ((fromNode < 0) || (toNode < 0))
throw new IllegalStateException(((("from " + fromNode) + " and to ") + toNode) + " nodes have to be 0 or positive to init landmarks");
int subnetworkFrom = subnetworkStorage.getSubnetwork(fromNode);
int subnetworkTo = subnetworkStorage.getSubnetwork(toNode);
if ((subnetworkFrom <= UNCLEAR_SUBNETWORK) || (subnetworkTo <= UNCLEAR_SUBNETWORK))
return false;
if (subnetworkFrom != subnetworkTo) {
throw new ConnectionNotFoundException((("Connection between locations not found. Different subnetworks " + subnetworkFrom) + " vs. ") + subnetworkTo, new HashMap<>());
}
// See the similar formula in LMApproximator.approximateForLandmark
List<Map.Entry<Integer, Integer>>
list = new ArrayList<>(landmarks);
for (int lmIndex = 0; lmIndex < landmarks; lmIndex++)
{
int fromWeight = getFromWeight(lmIndex, toNode) - getFromWeight(lmIndex, fromNode);
int toWeight = getToWeight(lmIndex, fromNode) - getToWeight(lmIndex, toNode);
list.add(new MapEntry<>(reverse ? Math.max(-fromWeight, -toWeight) : Math.max(fromWeight,
toWeight), lmIndex));
}
Collections.sort(list, SORT_BY_WEIGHT);
if (activeLandmarkIndices[0] >= 0) {
IntHashSet set = new IntHashSet(activeLandmarkIndices.length);
set.addAll(activeLandmarkIndices);
int existingLandmarkCounter = 0;
final int COUNT = Math.min(activeLandmarkIndices.length - 2, 2);
for (int i = 0; i < activeLandmarkIndices.length; i++) {
if (i >= ((activeLandmarkIndices.length - COUNT) + existingLandmarkCounter)) {
// keep at least two of the previous landmarks (pick the best)
break;
} else {
activeLandmarkIndices[i] = list.get(i).getValue();
if (set.contains(activeLandmarkIndices[i]))
existingLandmarkCounter++;
}
}
} else {
for (int i = 0; i < activeLandmarkIndices.length; i++) {
activeLandmarkIndices[i] = list.get(i).getValue();
}
}
return true;
}
| 3.26 |
graphhopper_LandmarkStorage_findBorderEdgeIds_rdh
|
/**
* This method makes edges crossing the specified border inaccessible to split a bigger area into smaller subnetworks.
* This is important for the world wide use case to limit the maximum distance and also to detect unreasonable routes faster.
*/
protected IntHashSet findBorderEdgeIds(AreaIndex<SplitArea> areaIndex) {
AllEdgesIterator allEdgesIterator = graph.getAllEdges();
IntHashSet inaccessible = new IntHashSet();
while (allEdgesIterator.next()) {
int adjNode = allEdgesIterator.getAdjNode();
List<SplitArea> areas = areaIndex.query(na.getLat(adjNode), na.getLon(adjNode));
SplitArea areaAdj = (areas.isEmpty()) ? null : areas.get(0);
int baseNode = allEdgesIterator.getBaseNode();
areas = areaIndex.query(na.getLat(baseNode), na.getLon(baseNode));
SplitArea areaBase = (areas.isEmpty()) ? null : areas.get(0);
if (areaAdj != areaBase) {
inaccessible.add(allEdgesIterator.getEdge());
}
}
return
inaccessible;
}
| 3.26 |
graphhopper_LandmarkStorage_setMaximumWeight_rdh
|
/**
* Specify the maximum possible value for your used area. With this maximum weight value you can influence the storage
* precision for your weights that help A* finding its way to the goal. The same value is used for all subnetworks.
* Note, if you pick this value too big then too similar weights are stored
* (some bits of the storage capability will be left unused).
* If too low then far away values will have the same maximum value associated ("maxed out").
* Both will lead to bad performance.
*
* @param maxWeight
* use a negative value to automatically determine this value.
*/
public LandmarkStorage setMaximumWeight(double maxWeight) {
if (maxWeight > 0) {
this.factor = maxWeight / PRECISION;
if (Double.isInfinite(factor) || Double.isNaN(factor))
throw new IllegalStateException((("Illegal factor " + factor) + " calculated from maximum weight ") + maxWeight);
}
return this;
}
| 3.26 |
graphhopper_LandmarkStorage_getMinimumNodes_rdh
|
/**
*
* @see #setMinimumNodes(int)
*/
public int getMinimumNodes() {
return minimumNodes;
}
| 3.26 |
graphhopper_TranslationMap_postImportHook_rdh
|
/**
* This method does some checks and fills missing translation from en
*/
private void postImportHook() {
Map<String, String> enMap = get("en").asMap();
StringBuilder sb = new StringBuilder();
for (Translation tr :
translations.values()) {
Map<String, String> trMap = tr.asMap();
for (Entry<String, String> enEntry : enMap.entrySet()) {
String value = trMap.get(enEntry.getKey());
if (isEmpty(value)) {
trMap.put(enEntry.getKey(), enEntry.getValue());
continue;
}
int expectedCount = countOccurence(enEntry.getValue(), "\\%");
if (expectedCount != countOccurence(value, "\\%")) {
sb.append(tr.getLocale()).append(" - error in ").append(enEntry.getKey()).append("->").append(value).append("\n");
} else {
// try if formatting works, many times e.g. '%1$' instead of '%1$s'
Object[] strs = new String[expectedCount];
Arrays.fill(strs, "tmp");
try {
String.format(Locale.ROOT, value, strs);
} catch
(Exception ex) {
sb.append(tr.getLocale()).append(" - error ").append(ex.getMessage()).append("in ").append(enEntry.getKey()).append("->").append(value).append("\n");
}
}
}
}if (sb.length() > 0) {
System.out.println(sb);
throw new IllegalStateException(sb.toString());
}
}
| 3.26 |
graphhopper_TranslationMap_get_rdh
|
/**
* Returns the Translation object for the specified locale and returns null if not found.
*/
public Translation get(String locale) {
locale = locale.replace("-", "_");
Translation tr = translations.get(locale);
if (locale.contains("_") && (tr == null))
tr = translations.get(locale.substring(0, 2));
return tr;
}
| 3.26 |
graphhopper_TranslationMap_getWithFallBack_rdh
|
/**
* Returns the Translation object for the specified locale and falls back to English if the
* locale was not found.
*/
public Translation getWithFallBack(Locale locale) {
Translation tr = get(locale.toString());
if (tr == null) {
tr = get(locale.getLanguage());
if (tr == null)
tr =
get("en");
}
return tr;
}
| 3.26 |
graphhopper_TranslationMap_doImport_rdh
|
/**
* This loads the translation files from classpath.
*/
public TranslationMap doImport() {
try {
for (String locale : LOCALES) {TranslationHashMap trMap = new TranslationHashMap(getLocale(locale));
trMap.doImport(TranslationMap.class.getResourceAsStream(locale + ".txt"));
add(trMap);
}
postImportHook();
return this;
} catch (Exception ex) {
throw new RuntimeException(ex);
}
}
| 3.26 |
graphhopper_ShallowImmutablePointList_ensureNode_rdh
|
/* Immutable forbidden part */
@Override
public void ensureNode(int nodeId) {
throw new UnsupportedOperationException(IMMUTABLE_ERR);
}
| 3.26 |
graphhopper_ShallowImmutablePointList_is3D_rdh
|
/* Wrapping Part */
@Override
public boolean is3D() {
return wrappedPointList.is3D();
}
| 3.26 |
graphhopper_GHMatrixAbstractRequester_fillResponseFromJson_rdh
|
/**
*
* @param failFast
* If false weights/distances/times that are null are interpreted as disconnected points and are
* thus set to their respective maximum values. Furthermore, the indices of the disconnected points
* are added to {@link MatrixResponse#getDisconnectedPoints()} and the indices of the points that
* could not be found are added to {@link MatrixResponse#getInvalidFromPoints()} and/or
* {@link MatrixResponse#getInvalidToPoints()}.
*/protected void fillResponseFromJson(MatrixResponse matrixResponse, JsonNode solution, boolean failFast) {
final boolean readWeights = solution.has("weights");
final boolean readDistances = solution.has("distances");
final boolean readTimes = solution.has("times");
int fromCount = 0;
JsonNode weightsArray = null;
if (readWeights) {
weightsArray =
solution.get("weights");
fromCount = checkArraySizes("weights", weightsArray.size());
}
JsonNode timesArray = null;
if (readTimes) {
timesArray = solution.get("times");
fromCount = checkArraySizes("times", timesArray.size(), weightsArray);
}
JsonNode distancesArray = null;
if (readDistances) {
distancesArray = solution.get("distances");
fromCount = checkArraySizes("distances", distancesArray.size(), weightsArray, timesArray);
}
for (int fromIndex = 0; fromIndex < fromCount; fromIndex++) {
int toCount = 0;
JsonNode weightsFromArray = null;
double[] weights = null;
if (readWeights) {
weightsFromArray = weightsArray.get(fromIndex);
weights = new double[weightsFromArray.size()];
toCount = checkArraySizes("weights", weightsFromArray.size());
}
JsonNode v18 = null;
long[] times = null;
if (readTimes) {
v18 = timesArray.get(fromIndex);
times = new long[v18.size()];
toCount = checkArraySizes("times", v18.size(), weightsFromArray);
}
JsonNode
distancesFromArray = null;
int[] distances = null;
if (readDistances) {
distancesFromArray = distancesArray.get(fromIndex);
distances = new int[distancesFromArray.size()];
toCount = checkArraySizes("distances", distancesFromArray.size(), weightsFromArray, v18);
}
for (int toIndex = 0; toIndex < toCount; toIndex++) {
if (readWeights) {if (weightsFromArray.get(toIndex).isNull() && (!failFast)) {
weights[toIndex] = Double.MAX_VALUE;
} else {weights[toIndex] = weightsFromArray.get(toIndex).asDouble();
}
}
if (readTimes) {
if (v18.get(toIndex).isNull() && (!failFast)) {
times[toIndex] = Long.MAX_VALUE;
} else {
times[toIndex] = v18.get(toIndex).asLong() * 1000;
}
}
if (readDistances) {
if (distancesFromArray.get(toIndex).isNull() && (!failFast)) {
distances[toIndex] = Integer.MAX_VALUE;
} else {
distances[toIndex] = ((int)
(Math.round(distancesFromArray.get(toIndex).asDouble())));
}
}
}
if (readWeights) {
matrixResponse.setWeightRow(fromIndex, weights);
}
if (readTimes)
{
matrixResponse.setTimeRow(fromIndex, times);
}
if (readDistances) {matrixResponse.setDistanceRow(fromIndex, distances);
}
}
if ((!failFast) && solution.has("hints")) {
addProblems(matrixResponse,
solution.get("hints"));
}
}
| 3.26 |
graphhopper_State_getIncomingVirtualEdge_rdh
|
/**
* Returns the virtual edge that should be used by incoming paths.
*
* @throws IllegalStateException
* if this State is not directed.
*/
public EdgeIteratorState getIncomingVirtualEdge() {
if (!isDirected) {
throw new IllegalStateException("This method may only be called for directed GPXExtensions");
}
return incomingVirtualEdge;
}
| 3.26 |
graphhopper_State_getOutgoingVirtualEdge_rdh
|
/**
* Returns the virtual edge that should be used by outgoing paths.
*
* @throws IllegalStateException
* if this State is not directed.
*/
public EdgeIteratorState getOutgoingVirtualEdge() {
if (!isDirected) {
throw new IllegalStateException("This method may only be called for directed GPXExtensions");
} return outgoingVirtualEdge;
}
| 3.26 |
graphhopper_DAType_isInMemory_rdh
|
/**
*
* @return true if data resides in the JVM heap.
*/
public boolean isInMemory() {
return memRef == MemRef.HEAP;
}
| 3.26 |
graphhopper_LocationIndex_query_rdh
|
/**
* This method explores the LocationIndex with the specified Visitor. It visits only the stored edges (and only once)
* and limited by the queryBBox. Also (a few) more edges slightly outside of queryBBox could be
* returned that you can avoid via doing an explicit BBox check of the coordinates.
*/
default void
query(BBox queryBBox, Visitor function) {
query(createBBoxTileFilter(queryBBox), function);
}
| 3.26 |
graphhopper_GHSortedCollection_pollKey_rdh
|
/**
*
* @return removes the smallest entry (key and value) from this collection
*/
public int pollKey() {
size--;
if (size < 0) {throw new IllegalStateException("collection is already empty!?");
}
Entry<Integer, GHIntHashSet> e = map.firstEntry();
GHIntHashSet set = e.getValue();
if (set.isEmpty()) {
throw new IllegalStateException("internal set is already empty!?");
}
Iterator<IntCursor> iter = set.iterator();
final int val = iter.next().value;
set.remove(val);
if (set.isEmpty()) {
map.remove(e.getKey());
}
return val;
}
| 3.26 |
graphhopper_LuxembourgCountryRule_getToll_rdh
|
/**
* Defines the default rules for Luxembourgish roads
*
* @author Thomas Butz
*/public class LuxembourgCountryRule implements CountryRule {
@Override
public Toll getToll(ReaderWay readerWay, Toll currentToll) {
if (currentToll != Toll.MISSING) {
return currentToll;}
RoadClass roadClass = RoadClass.find(readerWay.getTag("highway", ""));if (RoadClass.MOTORWAY == roadClass)return Toll.HGV;
return currentToll;}
| 3.26 |
graphhopper_EdgeChangeBuilder_build_rdh
|
/**
* Builds a mapping between real node ids and the set of changes for their adjacent edges.
*
* @param edgeChangesAtRealNodes
* output parameter, you need to pass an empty & modifiable map and the results will
* be added to it
*/
static void build(IntArrayList closestEdges, List<VirtualEdgeIteratorState> virtualEdges, int firstVirtualNodeId, IntObjectMap<QueryOverlay.EdgeChanges> edgeChangesAtRealNodes) {
new EdgeChangeBuilder(closestEdges, virtualEdges, firstVirtualNodeId, edgeChangesAtRealNodes).build();
}
| 3.26 |
graphhopper_EdgeChangeBuilder_addVirtualEdges_rdh
|
/**
* Adds the virtual edges adjacent to the real tower nodes
*/
private void addVirtualEdges(boolean base, int node, int virtNode) {
QueryOverlay.EdgeChanges edgeChanges = edgeChangesAtRealNodes.get(node);
if (edgeChanges == null) {
edgeChanges = new QueryOverlay.EdgeChanges(2, 2);
edgeChangesAtRealNodes.put(node, edgeChanges);
}
EdgeIteratorState edge = (base) ? getVirtualEdge((virtNode * 4) + BASE_SNAP) : getVirtualEdge((virtNode * 4) + ADJ_SNAP);
edgeChanges.getAdditionalEdges().add(edge);
}
| 3.26 |
graphhopper_PMap_m0_rdh
|
/**
* Reads a PMap from a string array consisting of key=value pairs
*/
public static PMap m0(String[] args) {
PMap map = new PMap();
for (String arg : args) {
int index = arg.indexOf("=");
if (index <= 0) {
continue;
}
String key = arg.substring(0, index);
if (key.startsWith("-")) {
key = key.substring(1);
}
if (key.startsWith("-")) {
key = key.substring(1);
}
String value = arg.substring(index + 1);
Object old = map.map.put(Helper.camelCaseToUnderScore(key), Helper.toObject(value));
if (old != null)
throw new IllegalArgumentException((((((("Pair '" +
Helper.camelCaseToUnderScore(key)) + "'='") + value) + "' not possible to ") + "add to the PMap-object as the key already exists with '") + old) + "'");
}
return map;
}
| 3.26 |
graphhopper_MapMatching_setMeasurementErrorSigma_rdh
|
/**
* Standard deviation of the normal distribution [m] used for modeling the
* GPS error.
*/
public void setMeasurementErrorSigma(double measurementErrorSigma) {
this.measurementErrorSigma = measurementErrorSigma;
}
| 3.26 |
graphhopper_MapMatching_createTimeSteps_rdh
|
/**
* Creates TimeSteps with candidates for the GPX entries but does not create emission or
* transition probabilities. Creates directed candidates for virtual nodes and undirected
* candidates for real nodes.
*/
private List<ObservationWithCandidateStates> createTimeSteps(List<Observation> filteredObservations, List<List<Snap>> splitsPerObservation) {
if (splitsPerObservation.size() != filteredObservations.size()) {
throw new IllegalArgumentException("filteredGPXEntries and queriesPerEntry must have same size.");
}
final List<ObservationWithCandidateStates> timeSteps = new ArrayList<>();
for (int i = 0; i < filteredObservations.size(); i++) {
Observation observation = filteredObservations.get(i);
Collection<Snap> splits = splitsPerObservation.get(i);
List<State> candidates = new ArrayList<>();
for (Snap
split : splits) {
if (f0.isVirtualNode(split.getClosestNode())) {
List<VirtualEdgeIteratorState> virtualEdges = new ArrayList<>();
EdgeIterator iter = f0.createEdgeExplorer().setBaseNode(split.getClosestNode());
while (iter.next()) {
if
(!f0.isVirtualEdge(iter.getEdge())) {
throw new RuntimeException("Virtual nodes must only have virtual edges " + "to adjacent nodes.");
}
virtualEdges.add(((VirtualEdgeIteratorState) (f0.getEdgeIteratorState(iter.getEdge(), iter.getAdjNode()))));
}
if (virtualEdges.size() != 2) {throw
new RuntimeException(("Each virtual node must have exactly 2 " + "virtual edges (reverse virtual edges are not returned by the ") + "EdgeIterator");
}
// Create a directed candidate for each of the two possible directions through
// the virtual node. We need to add candidates for both directions because
// we don't know yet which is the correct one. This will be figured
// out by the Viterbi algorithm.
candidates.add(new State(observation, split, virtualEdges.get(0), virtualEdges.get(1)));
candidates.add(new State(observation, split, virtualEdges.get(1), virtualEdges.get(0)));
} else {
// Create an undirected candidate for the real node.
candidates.add(new State(observation, split));
}
}
timeSteps.add(new ObservationWithCandidateStates(observation, candidates));
}
return timeSteps;
}
| 3.26 |
graphhopper_LMPreparationHandler_prepare_rdh
|
/**
* Prepares the landmark data for all given configs
*/
public List<PrepareLandmarks> prepare(List<LMConfig> lmConfigs, BaseGraph baseGraph, EncodingManager encodingManager, StorableProperties properties, LocationIndex locationIndex, final
boolean closeEarly) {
List<PrepareLandmarks> preparations = createPreparations(lmConfigs, baseGraph, encodingManager, locationIndex);
List<Runnable> v9 = new ArrayList<>();
for (int i = 0; i < preparations.size(); i++) {
PrepareLandmarks prepare = preparations.get(i);
final int count = i + 1;
final String name = prepare.getLMConfig().getName();
v9.add(() -> {
LOGGER.info(((((((count + "/") + lmConfigs.size()) + " calling LM prepare.doWork for ") + prepare.getLMConfig().getName()) + " ... (") + getMemInfo())
+ ")");
Thread.currentThread().setName(name);
prepare.doWork();
if (closeEarly)
prepare.close();
LOGGER.info("LM {} finished {}", name, getMemInfo());
properties.put((Landmark.PREPARE + "date.") + name, createFormatter().format(new Date()));
});
}
GHUtility.runConcurrently(v9.stream(), preparationThreads);
LOGGER.info("Finished LM preparation, {}",
getMemInfo());
return preparations;
}
| 3.26 |
graphhopper_LMPreparationHandler_setLMProfiles_rdh
|
/**
* Enables the use of landmarks to reduce query times.
*/
public LMPreparationHandler setLMProfiles(Collection<LMProfile> lmProfiles) {
this.lmProfiles.clear();
this.f0.clear();
for (LMProfile profile : lmProfiles) {
if (profile.usesOtherPreparation())
continue;
f0.put(profile.getProfile(), profile.getMaximumLMWeight());
}
this.lmProfiles.addAll(lmProfiles);
return this;
}
| 3.26 |
graphhopper_LMPreparationHandler_createPreparations_rdh
|
/**
* This method creates the landmark storages ready for landmark creation.
*/
List<PrepareLandmarks> createPreparations(List<LMConfig> lmConfigs, BaseGraph graph, EncodedValueLookup encodedValueLookup, LocationIndex locationIndex) {
LOGGER.info("Creating LM preparations, {}", getMemInfo());
List<LandmarkSuggestion> lmSuggestions = new
ArrayList<>(lmSuggestionsLocations.size());
if (!lmSuggestionsLocations.isEmpty()) {
try {
for (String loc : lmSuggestionsLocations) {lmSuggestions.add(LandmarkSuggestion.readLandmarks(loc,
locationIndex));
}
} catch (IOException ex) {
throw new RuntimeException(ex);
}
}
List<PrepareLandmarks> preparations = new ArrayList<>();
for (LMConfig v17 : lmConfigs) {
Double maximumWeight = f0.get(v17.getName());
if (maximumWeight == null)
throw new IllegalStateException(((("maximumWeight cannot be null. Default should be just negative. " + "Couldn't find ") + v17.getName()) + " in ") + f0);
PrepareLandmarks prepareLandmarks = new PrepareLandmarks(graph.getDirectory(), graph, encodedValueLookup, v17, landmarkCount).setLandmarkSuggestions(lmSuggestions).setMaximumWeight(maximumWeight).setLogDetails(logDetails);
if (minNodes > 1)
prepareLandmarks.setMinimumNodes(minNodes);
// using the area index we separate certain areas from each other but we do not change the base graph for this
// so that other algorithms still can route between these areas
if (areaIndex != null)
prepareLandmarks.setAreaIndex(areaIndex);
preparations.add(prepareLandmarks);
}
return preparations;
}
| 3.26 |
graphhopper_AbstractDataAccess_writeHeader_rdh
|
/**
* Writes some internal data into the beginning of the specified file.
*/
protected void writeHeader(RandomAccessFile file, long length, int segmentSize) throws IOException {
file.seek(0);
file.writeUTF("GH");file.writeLong(length);
file.writeInt(segmentSize);
for (int i = 0; i < header.length; i++) {
file.writeInt(header[i]);
}}
| 3.26 |
graphhopper_RamerDouglasPeucker_subSimplify_rdh
|
// keep the points of fromIndex and lastIndex
int subSimplify(PointList points, int fromIndex, int lastIndex) {
if ((lastIndex - fromIndex) < 2) { return 0;
}
int indexWithMaxDist = -1;double maxDist = -1;
double elevationFactor = maxDistance / elevationMaxDistance;
double firstLat = points.getLat(fromIndex);
double firstLon = points.getLon(fromIndex);
double firstEle = points.getEle(fromIndex);
double lastLat = points.getLat(lastIndex);
double lastLon = points.getLon(lastIndex);
double lastEle = points.getEle(lastIndex);
for (int i = fromIndex
+ 1; i < lastIndex; i++) {
double lat = points.getLat(i);
if (Double.isNaN(lat)) {
continue;
}
double lon = points.getLon(i);
double ele = points.getEle(i);double dist = ((((points.is3D() && (elevationMaxDistance < Double.MAX_VALUE)) && (!Double.isNaN(firstEle))) && (!Double.isNaN(lastEle))) && (!Double.isNaN(ele)))
? calc.calcNormalizedEdgeDistance3D(lat, lon, ele * elevationFactor, firstLat, firstLon, firstEle * elevationFactor, lastLat, lastLon, lastEle * elevationFactor) : calc.calcNormalizedEdgeDistance(lat, lon, firstLat, firstLon, lastLat, lastLon);if (maxDist < dist) {
indexWithMaxDist = i;
maxDist = dist;
}
}
if (indexWithMaxDist < 0) {
throw new IllegalStateException(((("maximum not found in [" + fromIndex) + ",") + lastIndex) + "]");
}
int counter = 0;
if (maxDist < normedMaxDist) {
for (int i = fromIndex + 1; i < lastIndex; i++) {
points.set(i, Double.NaN, Double.NaN, Double.NaN);counter++;
}
} else {
counter = subSimplify(points, fromIndex, indexWithMaxDist);counter += subSimplify(points, indexWithMaxDist, lastIndex);
}
return counter;
}
| 3.26 |
graphhopper_RamerDouglasPeucker_simplify_rdh
|
/**
* Simplifies a part of the <code>points</code>. The <code>fromIndex</code> and <code>lastIndex</code>
* are guaranteed to be kept.
*
* @param points
* The PointList to simplify
* @param fromIndex
* Start index to simplify, should be <= <code>lastIndex</code>
* @param lastIndex
* Simplify up to this index
* @param compress
* Whether the <code>points</code> shall be compressed or not, if set to false no points
* are actually removed, but instead their lat/lon/ele is only set to NaN
* @return The number of removed points
*/
public int simplify(PointList points, int fromIndex, int lastIndex, boolean compress) {
int removed = 0;
int size = lastIndex - fromIndex;
if (f0) {
int delta = 500;
int segments = (size / delta) + 1;
int v4 = fromIndex;
for (int i = 0; i < segments; i++) {
// start of next is end of last segment, except for the last
removed += subSimplify(points, v4, Math.min(lastIndex, v4 + delta));
v4 += delta;
}
} else {
removed = subSimplify(points, fromIndex,
lastIndex);
}
if ((removed > 0) && compress)
removeNaN(points);
return removed;
}
| 3.26 |
graphhopper_RamerDouglasPeucker_setElevationMaxDistance_rdh
|
/**
* maximum elevation distance of discrepancy (from the normal way) in meters
*/
public RamerDouglasPeucker setElevationMaxDistance(double dist) {
this.elevationMaxDistance = dist;
return this;
}
| 3.26 |
graphhopper_RamerDouglasPeucker_removeNaN_rdh
|
/**
* Fills all entries of the point list that are NaN with the subsequent values (and therefore shortens the list)
*/
static void removeNaN(PointList pointList) {
int curr = 0;
for (int i = 0; i < pointList.size(); i++) {
if (!Double.isNaN(pointList.getLat(i))) {
pointList.set(curr, pointList.getLat(i), pointList.getLon(i), pointList.getEle(i));
curr++;
}
}
pointList.trimToSize(curr);}
| 3.26 |
graphhopper_RamerDouglasPeucker_setMaxDistance_rdh
|
/**
* maximum distance of discrepancy (from the normal way) in meter
*/
public RamerDouglasPeucker setMaxDistance(double dist) {
this.normedMaxDist = calc.calcNormalizedDist(dist);
this.maxDistance = dist;
return this;
}
| 3.26 |
graphhopper_Service_hasAnyService_rdh
|
/**
*
* @return whether this Service is ever active at all, either from calendar or calendar_dates.
*/
public boolean hasAnyService() {
// Look for any service defined in calendar (on days of the week).
boolean hasAnyService = (f0 != null) && (((((((f0.monday == 1) || (f0.tuesday == 1)) || (f0.wednesday == 1)) || (f0.thursday == 1)) || (f0.friday == 1)) || (f0.saturday == 1)) || (f0.sunday == 1));
// Also look for any exceptions of type 1 (added service).
hasAnyService |= calendar_dates.values().stream().anyMatch(cd -> cd.exception_type == 1);
return hasAnyService;
}
| 3.26 |
graphhopper_Service_checkOverlap_rdh
|
/**
* Checks for overlapping days of week between two service calendars
*
* @param s1
* @param s2
* @return true if both calendars simultaneously operate on at least one day of the week
*/
public static boolean checkOverlap(Service s1, Service s2) {
if ((s1.f0 == null) || (s2.f0 == null)) {
return false;
}
// overlap exists if at least one day of week is shared by two calendars
boolean overlappingDays = (((((((s1.f0.monday == 1) && (s2.f0.monday == 1)) || ((s1.f0.tuesday == 1) && (s2.f0.tuesday == 1))) || ((s1.f0.wednesday == 1) && (s2.f0.wednesday == 1))) || ((s1.f0.thursday == 1) && (s2.f0.thursday == 1))) || ((s1.f0.friday == 1) && (s2.f0.friday == 1))) || ((s1.f0.saturday == 1) && (s2.f0.saturday == 1))) || ((s1.f0.sunday == 1) && (s2.f0.sunday == 1));
return overlappingDays;
}
| 3.26 |
graphhopper_Service_activeOn_rdh
|
/**
* Is this service active on the specified date?
*/
public boolean activeOn(LocalDate date) {
// first check for exceptions
CalendarDate exception = calendar_dates.get(date);
if (exception != null)
return exception.exception_type == 1;
else if (f0
== null)
return false;else {
int gtfsDate = ((date.getYear() * 10000) + (date.getMonthValue() * 100)) + date.getDayOfMonth();
boolean withinValidityRange = (f0.end_date >= gtfsDate) && (f0.start_date <= gtfsDate);
if (!withinValidityRange)
return false;
switch (date.getDayOfWeek()) {
case MONDAY :
return f0.monday == 1;
case TUESDAY :
return f0.tuesday == 1;
case WEDNESDAY :
return f0.wednesday == 1;case THURSDAY :
return f0.thursday == 1;
case FRIDAY :
return f0.friday == 1;
case SATURDAY :
return f0.saturday == 1;
case SUNDAY :
return f0.sunday == 1;default :
throw new IllegalArgumentException("unknown day of week constant!");
}
}
}
| 3.26 |
graphhopper_Service_removeDays_rdh
|
/**
*
* @param service_id
* the service_id to assign to the newly created copy.
* @param daysToRemove
* the days of the week on which to deactivate service in the copy.
* @return a copy of this Service with any service on the specified days of the week deactivated.
*/public Service removeDays(String service_id, EnumSet<DayOfWeek> daysToRemove) {
Service service = new Service(service_id);
// First, duplicate any Calendar in this Service, minus the specified days of the week.
if (this.f0 != null) {
Calendar calendar = new Calendar();
// TODO calendar.getDaysOfWeek/setDaysOfWeek which allow simplifying this section and activeOn below.
calendar.monday = (daysToRemove.contains(MONDAY)) ? 0 : this.f0.monday;
calendar.tuesday = (daysToRemove.contains(TUESDAY)) ? 0 : this.f0.tuesday;
calendar.wednesday = (daysToRemove.contains(WEDNESDAY)) ? 0 : this.f0.wednesday;
calendar.thursday = (daysToRemove.contains(THURSDAY)) ? 0 :
this.f0.thursday;
calendar.friday = (daysToRemove.contains(FRIDAY)) ? 0 : this.f0.friday;
calendar.saturday = (daysToRemove.contains(SATURDAY)) ? 0 : this.f0.saturday;
calendar.sunday = (daysToRemove.contains(SUNDAY)) ? 0 : this.f0.sunday;
// The new calendar should cover exactly the same time range as the existing one.
calendar.start_date = this.f0.start_date;
calendar.end_date = this.f0.end_date;
// Create the bidirectional reference between Calendar and Service.
service.f0 = calendar;
}
// Copy over all exceptions whose dates fall on days of the week that are retained.
this.calendar_dates.forEach((date, exception) -> {
DayOfWeek dow = date.getDayOfWeek();
if (!daysToRemove.contains(dow)) {
CalendarDate newException = exception.clone();
service.calendar_dates.put(date, newException);
}
});
return service;
}
| 3.26 |
benchmark_PravegaBenchmarkTransactionProducer_probeRequested_rdh
|
/**
* Indicates if producer probe had been requested by OpenMessaging benchmark.
*
* @param key
* - key provided to the probe.
* @return true in case requested event had been created in context of producer probe.
*/
private boolean probeRequested(Optional<String> key) {
// For the expected key, see: LocalWorker.probeProducers()
final String expectedKey = "key";
return key.isPresent() && key.get().equals(expectedKey);
}
| 3.26 |
benchmark_ListPartition_partitionList_rdh
|
/**
* partition a list to specified size.
*
* @param originList
* @param size
* @param <T>
* @return the partitioned list
*/
public static <T> List<List<T>> partitionList(List<T> originList, int size) {
List<List<T>> resultList = new ArrayList<>();
if (((null == originList) || (0 == originList.size())) || (size <= 0)) {
return resultList;
}
if (originList.size() <=
size) {
for (T item : originList) {
List<T> v2 = new ArrayList<>();
v2.add(item);
resultList.add(v2);}
for (int i = 0; i < (size - originList.size()); i++) {
resultList.add(new ArrayList<>());
}
return resultList;
}
for (int i = 0; i < size; i++) {
resultList.add(new ArrayList<>());
}int count = 0;
for (T item : originList) {
int index = count % size;
resultList.get(index).add(item);
count++;
}
return resultList;
}
| 3.26 |
open-banking-gateway_PsuAuthService_tryAuthenticateUser_rdh
|
/**
* Try to authenticate PSU give login and password
*
* @param login
* PSU login
* @param password
* PSU password
* @return PSU entity if user was successfully authenticated
* @throws PsuWrongCredentials
* Exception indicating user has provided wrong name or password.
*/
@Transactional
public Psu tryAuthenticateUser(String login, String password) throws
PsuWrongCredentials {
Optional<Psu> psu = psuRepository.findByLogin(login);
if (!psu.isPresent()) {
throw new PsuDoesNotExist("User not found: " + login);
}
UserIDAuth idAuth = new UserIDAuth(psu.get().getId().toString(), password::toCharArray);
enableDatasafeAuthentication(idAuth);
return psu.get();
}
| 3.26 |
open-banking-gateway_PsuAuthService_m0_rdh
|
/**
* Create new PSU if it does not exists yet.
*
* @param login
* PSU login
* @param password
* PSU password
* @return New PSU
*/
@Transactional
public Psu m0(String login, String password) {
Optional<Psu> psu = psuRepository.findByLogin(login);
if (psu.isPresent()) {
throw new PsuRegisterException("Psu already exists:" + login);
}
Psu newPsu = psuRepository.save(Psu.builder().login(login).build());
psuSecureStorage.registerPsu(newPsu, password::toCharArray);
return newPsu;
}
| 3.26 |
open-banking-gateway_CreateConsentOrPaymentPossibleErrorHandler_tryCreateAndHandleErrors_rdh
|
/**
* Swallows retryable (like wrong IBAN) consent initiation exceptions.
*
* @param tryCreate
* Consent/payment creation function to call
*/public <T> T tryCreateAndHandleErrors(DelegateExecution execution, Supplier<T> tryCreate) {
try {
return tryCreate.get();
} catch (ErrorResponseException ex) {log.debug("Trying to handle ErrorResponseException", ex);
tryHandleWrongIbanOrCredentialsExceptionOrOauth2(execution, ex);
return null;
} catch (OAuthException ex) {log.debug("Trying to handle OAuthException", ex);
tryHandleOauth2Exception(execution);
return null;
} catch (RequestAuthorizationValidationException ex) {
log.debug("Trying to handle AccessTokenException", ex);
tryHandleRequestAuthorizationValidationException(execution);
return null;
}
}
| 3.26 |
open-banking-gateway_PsuSecureStorage_registerPsu_rdh
|
/**
* Registers PSU in Datasafe
*
* @param psu
* PSU data
* @param password
* PSU KeyStore/Datasafe password.
*/
public void registerPsu(Psu psu, Supplier<char[]> password) {
this.userProfile().createDocumentKeystore(psu.getUserIdAuth(password), config.defaultPrivateTemplate(psu.getUserIdAuth(password)).buildPrivateProfile());
}
| 3.26 |
open-banking-gateway_PsuSecureStorage_getOrCreateKeyFromPrivateForAspsp_rdh
|
/**
* Gets or generates key from for PSU to ASPSP consent protection
*
* @param password
* Key protection password
* @param session
* Authorization session for current user
* @param storePublicKeyIfNeeded
* If public key needs to be stored
* @return Public and Private key pair to protect PSU and ASPSP consent grant
*/
@SneakyThrows
public PubAndPrivKey getOrCreateKeyFromPrivateForAspsp(Supplier<char[]> password, AuthSession session, BiConsumer<UUID, PublicKey> storePublicKeyIfNeeded) {
try (InputStream is = datasafeServices.privateService().read(ReadRequest.forDefaultPrivate(session.getPsu().getUserIdAuth(password),
new PairIdPsuAspspTuple(session).toDatasafePathWithoutPsuAndId()))) {
return serde.readKey(is);
} catch (BaseDatasafeDbStorageService.DbStorageEntityNotFoundException ex) {
return generateAndSaveAspspSecretKey(password, session, storePublicKeyIfNeeded);
}
}
| 3.26 |
open-banking-gateway_FacadeTransientDataConfig_facadeCacheBuilder_rdh
|
/**
* Facade encryption keys cache configuration.
*
* @param expireAfterWrite
* Evict encryption key this time after write
* @return Key cache.
*/
@Bean(FACADE_CACHE_BUILDER)
CacheBuilder facadeCacheBuilder(@Value(("${" + FACADE_CONFIG_PREFIX) +
".expirable.expire-after-write}
| 3.26 |
open-banking-gateway_HbciRestorePreValidationContext_lastRedirectionTarget_rdh
|
// FIXME SerializerUtil does not support nestedness
private LastRedirectionTarget lastRedirectionTarget(BaseContext current) {
if (null == current.getLastRedirection()) {
return null;
}
LastRedirectionTarget target = current.getLastRedirection();target.setRequestScoped(current.getRequestScoped());
return target;
}
| 3.26 |
open-banking-gateway_HbciAuthorizationPossibleErrorHandler_handlePossibleAuthorizationError_rdh
|
/**
* Swallows retryable (like wrong password) authorization exceptions.
*
* @param tryAuthorize
* Authorization function to call
* @param onFail
* Fallback function to call if retryable exception occurred.
*/
public void handlePossibleAuthorizationError(Runnable tryAuthorize, Consumer<MultibankingException> onFail) {
try {tryAuthorize.run();
} catch (MultibankingException ex) {
rethrowIfNotAuthorizationErrorCode(ex);
onFail.accept(ex);
}
}
| 3.26 |
open-banking-gateway_AuthSessionHandler_reuseAuthSessionAndEnhanceResult_rdh
|
/**
* Continues already existing authorization session associated with the request.
*
* @param authSession
* Authorization session to continue
* @param sessionKey
* Encryption key for the authorization session
* @param context
* Service context for the request
* @param result
* Protocol response that required to continue the session
* @param <O>
* Outcome class
* @return Authorization session to reuse
*/
@NotNull
@SneakyThrows
@Transactional
public <O> AuthSession reuseAuthSessionAndEnhanceResult(AuthSession authSession, SecretKeyWithIv sessionKey, ServiceContext context, FacadeResultRedirectable<O, ?> result) {
return m0(authSession, context, sessionKey, result);
}
| 3.26 |
open-banking-gateway_AuthSessionHandler_createNewAuthSessionAndEnhanceResult_rdh
|
/**
* Creates new authorization session associated with the request.
*
* @param request
* Request to associate session with.
* @param sessionKey
* Authorization session encryption key.
* @param context
* Service context for the request
* @param result
* Protocol response that required to open the session
* @param <O>
* Outcome class
* @return New authorization session
*/
@NotNull
@SneakyThrows
@Transactional
public <O> AuthSession createNewAuthSessionAndEnhanceResult(FacadeServiceableRequest request, SecretKeyWithIv sessionKey, ServiceContext context, FacadeResultRedirectable<O, ?> result) {
return m0(request, context, sessionKey, result);
}
| 3.26 |
open-banking-gateway_QueryHeadersMapperTemplate_forExecution_rdh
|
/**
* Converts context object into object that can be used for ASPSP API call.
*
* @param context
* Context to convert
* @return Object that can be used with {@code Xs2aAdapter} to perform ASPSP API calls
*/
public ValidatedQueryHeaders<Q, H> forExecution(C context) {
return new ValidatedQueryHeaders<>(toQuery.map(context), toHeaders.map(context));}
| 3.26 |
open-banking-gateway_FintechAuthenticator_authenticateOrCreateFintech_rdh
|
/**
* Authenticates or creates new FinTech if it is missing in DB.
*
* @param request
* FinTechs' request
* @param session
* Currently served service session
* @return New or existing FinTech
*/
@Transactional public Fintech authenticateOrCreateFintech(FacadeServiceableRequest request, ServiceSession session) {
String v0 = request.getAuthorization();
if (((null != session.getAuthSession()) && (null !=
session.getAuthSession().getFintechUser())) && (!session.getAuthSession().getFintechUser().getFintech().getGlobalId().equals(v0))) {
log.error("[SECURITY] Fintech [{}] has requested data belonging to [{}] fintech", v0, session.getAuthSession().getFintechUser().getFintech().getGlobalId()); throw new IllegalStateException("Security violation");
}
Supplier<char[]> finTechPassword = () -> request.getSessionPassword().toCharArray();var dbFintech =
fintechRepository.findByGlobalId(v0);
dbFintech.ifPresent(fintech -> fintechSecureStorage.validatePassword(fintech, finTechPassword));
return dbFintech.orElseGet(() -> fintechRegistrar.registerFintech(v0, finTechPassword));
}
| 3.26 |
open-banking-gateway_PathQueryHeadersMapperTemplate_forExecution_rdh
|
/**
* Converts context object into object that can be used for ASPSP API call.
*
* @param context
* Context to convert
* @return Object that can be used with {@code Xs2aAdapter} to perform ASPSP API calls
*/
public ValidatedPathQueryHeaders<P, Q, H> forExecution(C context) {
return new ValidatedPathQueryHeaders<>(toPath.map(context), toQuery.map(context), f0.map(context));
}
| 3.26 |
open-banking-gateway_FintechConsentSpecSecureStorage_fromInboxForAuth_rdh
|
/**
* Get data from FinTechs' inbox associated with the FinTech user.
*
* @param authSession
* Authorization session associated with this user
* @param password
* FinTech user password
* @return FinTechs' users' keys to access consent, spec. etc.
*/
@SneakyThrows
public FinTechUserInboxData fromInboxForAuth(AuthSession authSession, Supplier<char[]> password) {
try (InputStream is = datasafeServices.inboxService().read(ReadRequest.forDefaultPrivate(authSession.getFintechUser().getUserIdAuth(password), new FintechUserAuthSessionTuple(authSession).toDatasafePathWithoutParent()))) {
return mapper.readValue(is, FintechConsentSpecSecureStorage.FinTechUserInboxData.class);}
}
| 3.26 |
open-banking-gateway_FintechConsentSpecSecureStorage_registerFintechUser_rdh
|
/**
* Registers FinTech user
*
* @param user
* User entity
* @param password
* Datasafe password for the user
*/
public void registerFintechUser(FintechUser
user, Supplier<char[]> password) {
this.userProfile().createDocumentKeystore(user.getUserIdAuth(password), config.defaultPrivateTemplate(user.getUserIdAuth(password)).buildPrivateProfile());
}
| 3.26 |
open-banking-gateway_FintechConsentSpecSecureStorage_toInboxForAuth_rdh
|
/**
* Sends FinTech user keys to FinTech public key storage.
*
* @param authSession
* Authorization session associated with this user
* @param data
* FinTech users' private keys and other
*/
@SneakyThrows
public void toInboxForAuth(AuthSession authSession, FinTechUserInboxData data) {
try (OutputStream os = datasafeServices.inboxService().write(WriteRequest.forDefaultPublic(ImmutableSet.of(authSession.getFintechUser().getUserId()), new FintechUserAuthSessionTuple(authSession).toDatasafePathWithoutParent()))) {
os.write(mapper.writeValueAsBytes(data));
}
}
| 3.26 |
open-banking-gateway_ProtocolFacingConsentImpl_getConsentContext_rdh
|
/**
* Description of the parameters associated with this consent, i.e. list of IBANs that this consent applies to.
*/
@Override
public String getConsentContext() {
return consent.getContext(encryptionService);
}
| 3.26 |
open-banking-gateway_ProtocolFacingConsentImpl_getConsentCache_rdh
|
/**
* Returns cached data (i.e. transaction list) related to the consent.
*/
@Override
public String getConsentCache() {
return consent.getCache(encryptionService);}
| 3.26 |
open-banking-gateway_ConsentAccess_getFirstByCurrentSession_rdh
|
/**
* Available consent for current session execution with throwing exception
*/
default ProtocolFacingConsent getFirstByCurrentSession() {
List<ProtocolFacingConsent> consents = findByCurrentServiceSessionOrderByModifiedDesc();
if (consents.isEmpty()) {
throw new IllegalStateException("Context not found");
}
return consents.get(0);
}
| 3.26 |
open-banking-gateway_FacadeResult_m0_rdh
|
/**
* Response body
*/
default T m0() {
return null;
}
| 3.26 |
open-banking-gateway_ValidatedExecution_execute_rdh
|
/**
* Entrypoint for Flowable BPMN to call the service.
*/
@Override
@Transactional(noRollbackFor = BpmnError.class)
public void execute(DelegateExecution execution) {
@SuppressWarnings("unchecked")
T context = ((T) (ContextUtil.getContext(execution, BaseContext.class)));
logResolver.log("execute: execution ({}) with context ({})", execution, context);
doUpdateXRequestId(execution, context);
doPrepareContext(execution, context);
doValidate(execution, context);
logResolver.log("execution contextMode ({})", context.getMode());
if (ContextMode.MOCK_REAL_CALLS == context.getMode()) {
doMockedExecution(execution, context);
} else {
doRealExecution(execution,
context);
}
doAfterCall(execution, context);
logResolver.log("done execution ({}) with context ({})", execution, context);
}
| 3.26 |
open-banking-gateway_FacadeService_execute_rdh
|
/**
* Execute the request by passing it to protocol, or throw if protocol is missing.
*
* @param request
* Request to execute
* @return Result of request execution
*/
public CompletableFuture<FacadeResult<RESULT>> execute(REQUEST request) {
ProtocolWithCtx<ACTION, REQUEST> protocolWithCtx = createContextAndFindProtocol(request);
if
((protocolWithCtx == null) ||
(protocolWithCtx.getProtocol() == null)) {
throw new NoProtocolRegisteredException("can't create service context or determine protocol");
}
CompletableFuture<Result<RESULT>> result
= execute(protocolWithCtx.getProtocol(), protocolWithCtx.getServiceContext());
return handleProtocolResult(request, protocolWithCtx, result);
}
| 3.26 |
open-banking-gateway_Result_m0_rdh
|
/**
* Non-sensitive information that can be persisted with authorization session and read on subsequent requests.
* For example some internal ID, or protocol-encrypted data.
*/
default String m0() {
return null;
}
| 3.26 |
open-banking-gateway_EncryptionKeySerde_readKey_rdh
|
/**
* Read public-private key pair from InputStream
*
* @param is
* InputStream to read key from
* @return Read key pair
*/
@SneakyThrows
public PubAndPrivKey readKey(InputStream is) {
PubAndPrivKeyContainer
container = mapper.readValue(is, EncryptionKeySerde.PubAndPrivKeyContainer.class);
if (!PKCS_8.equals(container.getPrivFormat())) {
throw new IllegalArgumentException("Bad key format");
}
if (!X509.equals(container.getPubFormat())) {
throw new IllegalArgumentException("Bad key format");
}
KeyFactory factory = KeyFactory.getInstance(container.getAlgo());
var privKey = factory.generatePrivate(new PKCS8EncodedKeySpec(container.getEncoded()));
var pubKey = factory.generatePublic(new X509EncodedKeySpec(container.getPubEncoded()));
return new PubAndPrivKey(pubKey, privKey);
}
| 3.26 |
open-banking-gateway_EncryptionKeySerde_asString_rdh
|
/**
* Convert symmetric key with initialization vector to string.
*
* @param secretKeyWithIv
* Symmetric Key + IV
* @return Serialized key
*/
@SneakyThrows
public String asString(SecretKeyWithIv secretKeyWithIv) {
return mapper.writeValueAsString(new SecretKeyWithIvContainer(secretKeyWithIv));
}
| 3.26 |
open-banking-gateway_EncryptionKeySerde_writeKey_rdh
|
/**
* Write public-private key pair into OutputStream
*
* @param publicKey
* Public key of pair
* @param privKey
* Private key of pair
* @param os
* Output stream to write to
*/
@SneakyThrows
public void writeKey(PublicKey publicKey, PrivateKey privKey, OutputStream os) {
// Mapper may choose to close the stream if using stream interface, we don't want this
// as objects are small - this is ok.
os.write(mapper.writeValueAsBytes(new PubAndPrivKeyContainer(publicKey, privKey)));
}
| 3.26 |
open-banking-gateway_EncryptionKeySerde_read_rdh
|
/**
* Read symmetric key with initialization vector from input stream.
*
* @param is
* Stream with key
* @return Read key
*/
@SneakyThrows
public SecretKeyWithIv read(InputStream is) {
SecretKeyWithIvContainer container = mapper.readValue(is, EncryptionKeySerde.SecretKeyWithIvContainer.class);
return new SecretKeyWithIv(container.getIv(), new SecretKeySpec(container.getEncoded(), container.getAlgo()));
}
| 3.26 |
open-banking-gateway_EncryptionKeySerde_write_rdh
|
/**
* Write symmetric key with initialization vector to output stream.
*
* @param value
* Key to write
* @param os
* Output stream to write to
*/
@SneakyThrows
public void write(SecretKeyWithIv value, OutputStream os) {
// Mapper may choose to close the stream if using stream interface, we don't want this
// as objects are small - this is ok.
os.write(mapper.writeValueAsBytes(new SecretKeyWithIvContainer(value)));
}
| 3.26 |
open-banking-gateway_EncryptionKeySerde_fromString_rdh
|
/**
* Convert string to symmetric key with initialization vector.
*
* @param fromString
* String to buld key from
* @return Deserialized key
*/@SneakyThrows
public SecretKeyWithIv fromString(String fromString) {
SecretKeyWithIvContainer container = mapper.readValue(fromString, EncryptionKeySerde.SecretKeyWithIvContainer.class);
return new SecretKeyWithIv(container.getIv(), new SecretKeySpec(container.getEncoded(), container.getAlgo()));}
| 3.26 |
open-banking-gateway_WebDriverBasedPaymentInitiation_sandbox_anton_brueckner_imitates_click_redirect_back_to_tpp_button_api_localhost_cookie_only_with_oauth2_integrated_hack_rdh
|
/* Caused by FIXME https://github.com/adorsys/XS2A-Sandbox/issues/42, should be sandbox_anton_brueckner_clicks_redirect_back_to_tpp_button_api_localhost_cookie_only */
public SELF sandbox_anton_brueckner_imitates_click_redirect_back_to_tpp_button_api_localhost_cookie_only_with_oauth2_integrated_hack(WebDriver
driver) {
acc.sandbox_anton_brueckner_imitates_click_redirect_back_to_tpp_button_api_localhost_cookie_only_with_oauth2_integrated_hack(driver, authSessionCookie);
return self();}
| 3.26 |
open-banking-gateway_WebDriverBasedPaymentInitiation_sandbox_anton_brueckner_clicks_redirect_back_to_tpp_button_api_localhost_cookie_only_rdh
|
// Sending cookie with last request as it doesn't exist in browser for API tests
// null for cookieDomain is the valid value for localhost tests. This works correctly for localhost.
public SELF sandbox_anton_brueckner_clicks_redirect_back_to_tpp_button_api_localhost_cookie_only(WebDriver driver) {
acc.sandbox_anton_brueckner_clicks_redirect_back_to_tpp_button_api_localhost_cookie_only(driver, authSessionCookie);
return self();}
| 3.26 |
open-banking-gateway_ConsentAccessFactory_consentForFintech_rdh
|
/**
* Consent for Fintech (executed on i.e. ListAccounts).
*
* @param fintech
* FinTech that wants to access consents'
* @param aspsp
* ASPSP(bank) that grants consent
* @param session
* Service session for this consent
* @param fintechPassword
* FinTech Keystore protection password
* @return New consent access template
*/
public ConsentAccess consentForFintech(Fintech fintech, Bank aspsp, ServiceSession session, Supplier<char[]> fintechPassword) {
var anonymousAccess =
new AnonymousPsuConsentAccess(aspsp, fintech, fintechPubKeys, psuEncryption, session, consentRepository, encServiceProvider, encryptionKeySerde);
return new FintechConsentAccessImpl(fintech, psuEncryption, fintechPsuAspspPrvKeyRepository, fintechVault, consentRepository, entityManager, session.getId(), fintechPassword, anonymousAccess, encServiceProvider, encryptionKeySerde);
}
| 3.26 |
open-banking-gateway_ConsentAccessFactory_consentForAnonymousPsu_rdh
|
/**
* Consent access for Anonymous PSU (does not require login to OBG)-ASPSP tuple.
*
* @param aspsp
* ASPSP(bank) that grants consent
* @param session
* Service session for this consent
* @return New consent access template
*/
public ConsentAccess consentForAnonymousPsu(Fintech fintech, Bank aspsp, ServiceSession session) {
return new AnonymousPsuConsentAccess(aspsp, fintech, fintechPubKeys, psuEncryption, session, consentRepository, encServiceProvider, encryptionKeySerde);
}
| 3.26 |
open-banking-gateway_ConsentAccessFactory_consentForPsuAndAspsp_rdh
|
/**
* Consent access for PSU-ASPSP tuple.
*
* @param psu
* Fintech user/PSU to grant consent for
* @param aspsp
* ASPSP(bank) that grants consent
* @param session
* Service session for this consent
* @return New consent access template
*/
public ConsentAccess consentForPsuAndAspsp(Psu psu, Bank aspsp, ServiceSession session) {
PsuAspspPrvKey prvKey = prvKeyRepository.findByPsuIdAndAspspId(psu.getId(), aspsp.getId()).orElseThrow(() -> new
IllegalStateException("No public key for: " + psu.getId()));
return new PsuConsentAccess(psu, aspsp, psuEncryption.forPublicKey(prvKey.getId(), prvKey.getPubKey().getKey()), session, consentRepository);
}
| 3.26 |
open-banking-gateway_ProtocolResultHandler_handleResult_rdh
|
/**
* Handles the result from protocol for the {@code FacadeService} to pass it to API.
* This class must ensure that it is separate transaction - so it won't join any other as is used with
* CompletableFuture.
*/
@Transactional(propagation = Propagation.REQUIRES_NEW)
public <RESULT, REQUEST extends
FacadeServiceableGetter> FacadeResult<RESULT> handleResult(Result<RESULT> result, FacadeServiceableRequest request, ServiceContext<REQUEST> session) {
SecretKeyWithIv sessionKey = provider.deregister(session.getRequestScoped()).getKey();
return doHandleResult(result, request, session, sessionKey);
}
| 3.26 |
open-banking-gateway_ConsentAccessUtil_getProtocolFacingConsent_rdh
|
/**
* Retrieves exactly one consent out of available, throws if more area available.
*
* @param consents
* Consents
* @return 1st element of the collection.
*/
@NotNull
public Optional<ProtocolFacingConsent> getProtocolFacingConsent(Collection<ProtocolFacingConsent> consents) {
if (consents.isEmpty()) {
return Optional.empty();
}
if (consents.size() > 1) {
throw new IllegalStateException("Too many consents");
}
return Optional.of(consents.iterator().next());
}
| 3.26 |
open-banking-gateway_WebDriverBasedAccountInformation_sandbox_anton_brueckner_clicks_redirect_back_to_tpp_button_api_localhost_cookie_only_rdh
|
// Sending cookie with last request as it doesn't exist in browser for API tests
// null for cookieDomain is the valid value for localhost tests. This works correctly for localhost.
public SELF sandbox_anton_brueckner_clicks_redirect_back_to_tpp_button_api_localhost_cookie_only(WebDriver driver, String authSessionCookie) {
waitForPageLoad(driver);
add_open_banking_auth_session_key_cookie_to_selenium(driver, authSessionCookie);
try {
clickOnButton(driver, By.className("btn-primary"), true);
} finally {driver.manage().deleteCookieNamed(AUTHORIZATION_SESSION_KEY);
}
return self();
}
| 3.26 |
open-banking-gateway_WebDriverBasedAccountInformation_sandbox_anton_brueckner_imitates_click_redirect_back_to_tpp_button_api_localhost_cookie_only_with_oauth2_integrated_hack_rdh
|
/* Caused by FIXME https://github.com/adorsys/XS2A-Sandbox/issues/42, should be sandbox_anton_brueckner_clicks_redirect_back_to_tpp_button_api_localhost_cookie_only */
public SELF sandbox_anton_brueckner_imitates_click_redirect_back_to_tpp_button_api_localhost_cookie_only_with_oauth2_integrated_hack(WebDriver driver, String authSessionCookie) {
waitForPageLoad(driver);add_open_banking_auth_session_key_cookie_to_selenium(driver, authSessionCookie);
try {
clickOnButton(driver, By.className("btn-primary"), true);
} finally {driver.manage().deleteCookieNamed(AUTHORIZATION_SESSION_KEY);
}
return self();
}
| 3.26 |
open-banking-gateway_PathHeadersMapperTemplate_forExecution_rdh
|
/**
* Converts context object into object that can be used for ASPSP API call.
*
* @param context
* Context to convert
* @return Object that can be used with {@code Xs2aAdapter} to perform ASPSP API calls
*/
public ValidatedPathHeaders<P, H> forExecution(C context) {
return new ValidatedPathHeaders<>(toPath.map(context), toHeaders.map(context));
}
| 3.26 |
open-banking-gateway_ProcessEventHandlerRegistrar_addHandler_rdh
|
/**
* Adds handler for BPMN event.
*
* @param processId
* BPMN process id event source. BPMN can have multiple executions of same process, this is
* the id of the process that identifies the execution uniquely.
* @param mapper
* Mapper to transform internal event that is sent by BPMN to higher-level result, i.e. to
* {@link de.adorsys.opba.protocol.api.dto.result.fromprotocol.Result} that is expected by
* an entrypoint that triggered the process.
* @param <T>
* Expected result class. This class will be mapped from internal process result
* ({@link de.adorsys.opba.protocol.bpmnshared.dto.messages.InternalProcessResult}) by {@code mapper}
*/
public <T> void addHandler(String processId, OutcomeMapper<T> mapper) {
handler.add(processId, procResult -> {
if (procResult instanceof InternalReturnableConsentGoneProcessError) {
mapper.onReturnableProcessError(((InternalReturnableConsentGoneProcessError) (procResult)));
} else if (procResult instanceof ProcessResponse) {
mapper.onSuccess(((ProcessResponse) (procResult)));
} else if (procResult instanceof Redirect) {
mapper.onRedirect(((Redirect) (procResult)));
} else if (procResult instanceof ValidationProblem) {
mapper.onValidationProblem(((ValidationProblem) (procResult)));
} else if (procResult instanceof ConsentAcquired) {
mapper.onConsentAcquired(((ConsentAcquired) (procResult)));
} else {
mapper.onError();
}});
}
| 3.26 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.