name
stringlengths 12
178
| code_snippet
stringlengths 8
36.5k
| score
float64 3.26
3.68
|
---|---|---|
graphhopper_ArrayUtil_merge_rdh
|
/**
*
* @param a
* sorted array
* @param b
* sorted array
* @return sorted array consisting of the elements of a and b, duplicates get removed
*/
public static int[] merge(int[] a, int[] b) {
if ((a.length + b.length) == 0)
return new int[]{ };
int[] v30 = new int[a.length + b.length];
int size = 0;
int i = 0;
int j = 0;
while ((i < a.length) && (j < b.length)) {
if (a[i]
< b[j])
v30[size++] = a[i++];
else
v30[size++] = b[j++];
}
if (i == a.length) {
System.arraycopy(b, j, v30, size, b.length - j);
size += b.length - j;
} else {
System.arraycopy(a, i, v30, size, a.length - i);
size += a.length - i;
}
int sizeWithoutDuplicates = removeConsecutiveDuplicates(v30, size);
return Arrays.copyOf(v30, sizeWithoutDuplicates);
}
| 3.26 |
graphhopper_ArrayUtil_removeConsecutiveDuplicates_rdh
|
/**
* Removes all duplicate elements of the given array in the range [0, end[ in place
*
* @return the size of the new range that contains no duplicates (smaller or equal to end).
*/
public static int removeConsecutiveDuplicates(int[] arr, int end) {
int curr = 0;
for
(int i = 1; i < end; ++i) {
if (arr[i] != arr[curr])
arr[++curr] = arr[i];
}
return curr + 1;
}
| 3.26 |
graphhopper_ArrayUtil_constant_rdh
|
/**
* Creates an IntArrayList of a given size where each element is set to the given value
*/
public static IntArrayList constant(int size, int value) {
IntArrayList result = new IntArrayList(size);
Arrays.fill(result.buffer, value);
result.elementsCount = size;
return result;
}
| 3.26 |
graphhopper_GraphHopperWeb_setPostRequest_rdh
|
/**
* Use new endpoint 'POST /route' instead of 'GET /route'
*/
public GraphHopperWeb setPostRequest(boolean postRequest)
{
this.postRequest = postRequest; return this;
}
| 3.26 |
graphhopper_GraphHopperWeb_setOptimize_rdh
|
/**
*
* @param optimize
* "false" if the order of the locations should be left
* unchanged, this is the default. Or if "true" then the order of the
* location is optimized according to the overall best route and returned
* this way i.e. the traveling salesman problem is solved under the hood.
* Note that in this case the request takes longer and costs more credits.
* For more details see:
* https://github.com/graphhopper/directions-api/blob/master/FAQ.md#what-is-one-credit
*/
public GraphHopperWeb setOptimize(String optimize) {
this.optimize = optimize;
return this;
}
| 3.26 |
graphhopper_CHStorageBuilder_addShortcutEdgeBased_rdh
|
/**
*
* @param origKeyFirst
* The first original edge key that is skipped by this shortcut *in the direction of the shortcut*.
* This definition assumes that edge-based shortcuts are one-directional, and they are.
* For example for the following shortcut edge from x to y: x->u->v->w->y ,
* which skips the shortcuts x->v and v->y the first original edge key would be the one of the edge x->u
* @param origKeyLast
* like origKeyFirst, but the last orig edge key, i.e. the key of w->y in above example
*/
public int addShortcutEdgeBased(int a, int b, int accessFlags, double weight, int skippedEdge1, int skippedEdge2, int origKeyFirst,
int origKeyLast) {
checkNewShortcut(a, b);
int shortcut = storage.shortcutEdgeBased(a, b, accessFlags, weight, skippedEdge1, skippedEdge2, origKeyFirst, origKeyLast);
setLastShortcut(a, shortcut); return shortcut;
}
| 3.26 |
graphhopper_ValueExpressionVisitor_isValidIdentifier_rdh
|
// allow only methods and other identifiers (constants and encoded values)
boolean isValidIdentifier(String identifier) {
if (variableValidator.isValid(identifier)) {
if (!Character.isUpperCase(identifier.charAt(0)))
result.guessedVariables.add(identifier);
return
true;
}
return false;
}
| 3.26 |
graphhopper_UrbanDensityCalculator_calcUrbanDensity_rdh
|
/**
* Calculates the urban density (rural/residential/city) for all edges of the graph.
* First a weighted road density is calculated for every edge to determine whether it belongs to a residential area.
* In a second step very dense residential areas are classified as 'city'.
*
* @param residentialAreaRadius
* radius used for residential area calculation in meters
* @param residentialAreaSensitivity
* Use higher values if there are too many residential areas that are not recognized. Use
* smaller values if there are too many non-residential areas that are classified as residential.
* @param cityAreaRadius
* in meters, see residentialAreaRadius
* @param cityAreaSensitivity
* similar to residentialAreaSensitivity, but for the city classification
* @param threads
* number of threads used to calculate the road densities
*/ public static void calcUrbanDensity(Graph graph, EnumEncodedValue<UrbanDensity> urbanDensityEnc, EnumEncodedValue<RoadClass> roadClassEnc, BooleanEncodedValue roadClassLinkEnc, double residentialAreaRadius, double residentialAreaSensitivity, double cityAreaRadius, double cityAreaSensitivity, int threads) {
logger.info("Calculating residential areas ..., radius={}, sensitivity={}, threads={}", residentialAreaRadius, residentialAreaSensitivity, threads);
StopWatch sw = StopWatch.started();
calcResidential(graph, urbanDensityEnc, roadClassEnc, roadClassLinkEnc,
residentialAreaRadius, residentialAreaSensitivity, threads);
logger.info(("Finished calculating residential areas, took: " + sw.stop().getSeconds()) + "s");
if (cityAreaRadius > 1) {
logger.info("Calculating city areas ..., radius={}, sensitivity={}, threads={}", cityAreaRadius, cityAreaSensitivity, threads);
sw = StopWatch.started();
calcCity(graph, urbanDensityEnc, cityAreaRadius, cityAreaSensitivity, threads);
logger.info(("Finished calculating city areas, took: " + sw.stop().getSeconds()) + "s");}
}
| 3.26 |
graphhopper_BBox_createInverse_rdh
|
/**
* Prefills BBox with minimum values so that it can increase.
*/public static BBox createInverse(boolean elevation) {
if (elevation) {
return new BBox(Double.MAX_VALUE, -Double.MAX_VALUE, Double.MAX_VALUE, -Double.MAX_VALUE, Double.MAX_VALUE, -Double.MAX_VALUE, true);
} else {
return new BBox(Double.MAX_VALUE, -Double.MAX_VALUE, Double.MAX_VALUE, -Double.MAX_VALUE, Double.NaN, Double.NaN, false);
}
}
| 3.26 |
graphhopper_BBox_calculateIntersection_rdh
|
/**
* Calculates the intersecting BBox between this and the specified BBox
*
* @return the intersecting BBox or null if not intersecting
*/
public BBox calculateIntersection(BBox bBox) {
if (!this.intersects(bBox))
return null;
double minLon = Math.max(this.minLon, bBox.minLon);
double maxLon =
Math.min(this.maxLon, bBox.maxLon);
double minLat = Math.max(this.minLat, bBox.minLat);
double maxLat = Math.min(this.maxLat, bBox.maxLat);
return new BBox(minLon, maxLon, minLat, maxLat);
}
| 3.26 |
graphhopper_BBox_parseBBoxString_rdh
|
/**
* This method creates a BBox out of a string in format lon1,lon2,lat1,lat2
*/
public static BBox parseBBoxString(String objectAsString) {
String[] splittedObject = objectAsString.split(",");
if (splittedObject.length != 4)
throw new IllegalArgumentException("BBox should have 4 parts but was " + objectAsString);
double minLon = Double.parseDouble(splittedObject[0]);
double v21 = Double.parseDouble(splittedObject[1]);
double minLat = Double.parseDouble(splittedObject[2]);
double maxLat =
Double.parseDouble(splittedObject[3]);
return new BBox(minLon,
v21, minLat, maxLat);
}
| 3.26 |
graphhopper_BBox_intersects_rdh
|
/**
* This method calculates if this BBox intersects with the specified BBox
*/
public boolean intersects(double minLon, double maxLon, double minLat, double maxLat) {
return (((this.minLon < maxLon) && (this.minLat < maxLat)) && (minLon < this.maxLon)) && (minLat < this.maxLat);
}
| 3.26 |
graphhopper_BBox_parseTwoPoints_rdh
|
/**
* This method creates a BBox out of a string in format lat1,lon1,lat2,lon2
*/
public static BBox parseTwoPoints(String objectAsString) {
String[] splittedObject = objectAsString.split(",");
if (splittedObject.length != 4)
throw new IllegalArgumentException("BBox should have 4 parts but was " + objectAsString);
double minLat = Double.parseDouble(splittedObject[0]);double minLon = Double.parseDouble(splittedObject[1]);
double maxLat = Double.parseDouble(splittedObject[2]);
double maxLon = Double.parseDouble(splittedObject[3]);return BBox.fromPoints(minLat, minLon, maxLat, maxLon);
}
| 3.26 |
graphhopper_PrepareRoutingSubnetworks_setMinNetworkSize_rdh
|
/**
* All components of the graph with less than 2*{@link #minNetworkSize} directed edges (edge keys) will be marked
* as subnetworks. The biggest component will never be marked as subnetwork, even when it is below this size.
*/
public PrepareRoutingSubnetworks setMinNetworkSize(int minNetworkSize) {
this.minNetworkSize = minNetworkSize;
return this;
}
| 3.26 |
graphhopper_PrepareRoutingSubnetworks_doWork_rdh
|
/**
* Finds and marks all subnetworks according to {@link #setMinNetworkSize(int)}
*
* @return the total number of marked edges
*/
public int doWork() {
if (minNetworkSize <= 0) {
f0.info("Skipping subnetwork search: prepare.min_network_size: " + minNetworkSize);
return 0;
}
StopWatch sw = new StopWatch().start();
f0.info((((((((((("Start marking subnetworks, prepare.min_network_size: " + minNetworkSize) + ", threads: ")
+ threads)
+ ", nodes: ") + Helper.nf(graph.getNodes())) + ", edges: ") + Helper.nf(graph.getEdges())) + ", jobs: ") + prepareJobs) + ", ") + Helper.getMemInfo());
AtomicInteger total = new AtomicInteger(0);
List<BitSet> flags = Stream.generate(() -> new BitSet(graph.getEdges())).limit(prepareJobs.size()).collect(Collectors.toList());
Stream<Runnable> runnables = IntStream.range(0, prepareJobs.size()).mapToObj(i -> () -> {
PrepareJob job = prepareJobs.get(i);
total.addAndGet(setSubnetworks(job.weighting, job.subnetworkEnc.getName().replaceAll("_subnetwork", ""), flags.get(i)));
});
GHUtility.runConcurrently(runnables, threads);
AllEdgesIterator iter = graph.getAllEdges();
while (iter.next()) {
for (int i = 0; i < prepareJobs.size(); i++) {
PrepareJob prepareJob = prepareJobs.get(i); iter.set(prepareJob.subnetworkEnc, flags.get(i).get(iter.getEdge()));
}
}
f0.info((((("Finished finding and marking subnetworks for " + prepareJobs.size()) + " jobs, took: ") + sw.stop().getSeconds()) + "s, ") + Helper.getMemInfo());
return total.get();
}
| 3.26 |
graphhopper_BitUtil_toSignedInt_rdh
|
/**
* Converts the specified long back into a signed int (reverse method for toUnsignedLong)
*/
public static int toSignedInt(long x) {
return ((int) (x));
}
| 3.26 |
graphhopper_BitUtil_toBitString_rdh
|
/**
* Higher order bits comes first in the returned string.
*/
public String toBitString(byte[] bytes) {
StringBuilder sb = new StringBuilder(bytes.length * 8);
byte lastBit = ((byte) (1 << 7));
for (int bIndex = bytes.length - 1; bIndex >= 0; bIndex--) {
byte b = bytes[bIndex];
for (int i = 0; i < 8; i++) {
if ((b & lastBit) == 0)
sb.append('0');
else
sb.append('1');
b <<= 1;
}
}
return sb.toString();
}
| 3.26 |
graphhopper_BitUtil_toLong_rdh
|
/**
* See the counterpart {@link #fromLong(long)}
*/public final long toLong(byte[] b) {
return
toLong(b, 0);
}
| 3.26 |
graphhopper_BitUtil_toUnsignedLong_rdh
|
/**
* This method handles the specified (potentially negative) int as unsigned bit representation
* and returns the positive converted long.
*/
public static long toUnsignedLong(int
x) {
return ((long) (x)) & 0xffffffffL;
}
| 3.26 |
graphhopper_BikeCommonAverageSpeedParser_applyMaxSpeed_rdh
|
/**
*
* @param way
* needed to retrieve tags
* @param speed
* speed guessed e.g. from the road type or other tags
* @return The assumed average speed.
*/
double applyMaxSpeed(ReaderWay way, double speed, boolean bwd) {
double maxSpeed = getMaxSpeed(way, bwd);
// We strictly obey speed limits, see #600
return
isValidSpeed(maxSpeed) && (speed > maxSpeed) ? maxSpeed : speed;
}
| 3.26 |
graphhopper_WayToEdgesMap_reserve_rdh
|
/**
* We need to reserve a way before we can put the associated edges into the map.
* This way we can define a set of keys/ways for which we shall add edges later.
*/
public void reserve(long way) {
offsetIndexByWay.put(way, RESERVED);
}
| 3.26 |
graphhopper_FindMinMax_splitIntoBlocks_rdh
|
/**
* Splits the specified list into several list of statements starting with if
*/
private static List<List<Statement>> splitIntoBlocks(List<Statement> statements) {
List<List<Statement>> result = new ArrayList<>();
List<Statement> block = null;
for (Statement st : statements) {
if (IF.equals(st.getKeyword()))
result.add(block = new ArrayList<>());
if (block == null)
throw new IllegalArgumentException("Every block must start with an if-statement");
block.add(st);
}
return result;
}
| 3.26 |
graphhopper_FindMinMax_checkLMConstraints_rdh
|
/**
* This method throws an exception when this CustomModel would decrease the edge weight compared to the specified
* baseModel as in such a case the optimality of A* with landmarks can no longer be guaranteed (as the preparation
* is based on baseModel).
*/
public static void checkLMConstraints(CustomModel baseModel, CustomModel queryModel, EncodedValueLookup lookup) {
if (queryModel.isInternal())
throw new IllegalArgumentException("CustomModel of query cannot be internal");
if (queryModel.getDistanceInfluence() != null) {
double bmDI = (baseModel.getDistanceInfluence() == null) ? 0 :
baseModel.getDistanceInfluence();
if (queryModel.getDistanceInfluence() < bmDI)throw new IllegalArgumentException((("CustomModel in query can only use distance_influence bigger or equal to " + bmDI) + ", but was: ") + queryModel.getDistanceInfluence());
}
checkMultiplyValue(queryModel.getPriority(), lookup);
checkMultiplyValue(queryModel.getSpeed(), lookup);
}
| 3.26 |
graphhopper_FindMinMax_findMinMax_rdh
|
/**
* This method returns the smallest value possible in "min" and the smallest value that cannot be
* exceeded by any edge in max.
*/
static MinMax findMinMax(Set<String> createdObjects, MinMax minMax, List<Statement> statements, EncodedValueLookup lookup) {
// 'blocks' of the statements are applied one after the other. A block consists of one (if) or more statements (elseif+else)
List<List<Statement>> blocks = splitIntoBlocks(statements);
for (List<Statement> block : blocks)
findMinMaxForBlock(createdObjects, minMax, block, lookup);
return minMax;
}
| 3.26 |
graphhopper_OSMReader_addEdge_rdh
|
/**
* This method is called for each segment an OSM way is split into during the second pass of {@link WaySegmentParser}.
*
* @param fromIndex
* a unique integer id for the first node of this segment
* @param toIndex
* a unique integer id for the last node of this segment
* @param pointList
* coordinates of this segment
* @param way
* the OSM way this segment was taken from
* @param nodeTags
* node tags of this segment. there is one map of tags for each point.
*/
protected void addEdge(int fromIndex, int toIndex, PointList pointList, ReaderWay way, List<Map<String, Object>> nodeTags) {
// sanity checks
if ((fromIndex < 0) || (toIndex < 0))
throw
new AssertionError((((("to or from index is invalid for this edge " + fromIndex) + "->") +
toIndex) + ", points:") + pointList);
if (pointList.getDimension() != nodeAccess.getDimension())
throw new AssertionError((("Dimension does not match for pointList vs. nodeAccess " + pointList.getDimension()) + " <-> ") + nodeAccess.getDimension());
if (pointList.size() != nodeTags.size())
throw new AssertionError((("there should be as many maps of node tags as there are points. node tags: " + nodeTags.size()) + ", points: ") + pointList.size());
// todo: in principle it should be possible to delay elevation calculation so we do not need to store
// elevations during import (saves memory in pillar info during import). also note that we already need to
// to do some kind of elevation processing (bridge+tunnel interpolation in GraphHopper class, maybe this can
// go together
if (pointList.is3D()) {
// sample points along long edges
if (config.getLongEdgeSamplingDistance() < Double.MAX_VALUE)
pointList = EdgeSampling.sample(pointList, config.getLongEdgeSamplingDistance(), distCalc, eleProvider);
// smooth the elevation before calculating the distance because the distance will be incorrect if calculated afterwards
if (config.getElevationSmoothing().equals("ramer"))
EdgeElevationSmoothingRamer.smooth(pointList, config.getElevationSmoothingRamerMax());
else if (config.getElevationSmoothing().equals("moving_average"))
EdgeElevationSmoothingMovingAverage.smooth(pointList, config.getSmoothElevationAverageWindowSize());else if (!config.getElevationSmoothing().isEmpty())
throw new AssertionError(("Unsupported elevation smoothing algorithm: '" + config.getElevationSmoothing()) + "'");
}
if ((config.getMaxWayPointDistance() > 0) && (pointList.size()
> 2))
simplifyAlgo.simplify(pointList);
double distance = distCalc.calcDistance(pointList);
if (distance < 0.001) {
// As investigation shows often two paths should have crossed via one identical point
// but end up in two very close points.
zeroCounter++;
distance = 0.001;
}
double maxDistance = (Integer.MAX_VALUE - 1)
/ 1000.0;
if (Double.isNaN(distance)) {
LOGGER.warn((("Bug in OSM or GraphHopper. Illegal tower node distance " + distance) + " reset to 1m, osm way ") + way.getId());
distance = 1;
}
if (Double.isInfinite(distance) || (distance > maxDistance)) {
// Too large is very rare and often the wrong tagging. See #435
// so we can avoid the complexity of splitting the way for now (new towernodes would be required, splitting up geometry etc)
// For example this happens here: https://www.openstreetmap.org/way/672506453 (Cape Town - Tristan da Cunha ferry)
LOGGER.warn((("Bug in OSM or GraphHopper. Too big tower node distance " + distance) + " reset to large value, osm way ") + way.getId());distance = maxDistance;
}
setArtificialWayTags(pointList, way, distance, nodeTags);
IntsRef relationFlags = getRelFlagsMap(way.getId());
EdgeIteratorState
v19 = baseGraph.edge(fromIndex, toIndex).setDistance(distance);osmParsers.handleWayTags(v19.getEdge(), edgeIntAccess, way, relationFlags);
List<KVStorage.KeyValue> list = way.getTag("key_values", Collections.emptyList());
if (!list.isEmpty())
v19.setKeyValues(list);
// If the entire way is just the first and last point, do not waste space storing an empty way geometry
if (pointList.size() > 2)
{
// the geometry consists only of pillar nodes, but we check that the first and last points of the pointList
// are equal to the tower node coordinates
checkCoordinates(fromIndex, pointList.get(0));checkCoordinates(toIndex, pointList.get(pointList.size() - 1));
v19.setWayGeometry(pointList.shallowCopy(1, pointList.size() - 1, false));
}
checkDistance(v19);
restrictedWaysToEdgesMap.putIfReserved(way.getId(), v19.getEdge());
}
| 3.26 |
graphhopper_OSMReader_getDataDate_rdh
|
/**
*
* @return the timestamp given in the OSM file header or null if not found
*/
public Date getDataDate() {
return osmDataDate;
}
| 3.26 |
graphhopper_OSMReader_processRelation_rdh
|
/**
* This method is called for each relation during the second pass of {@link WaySegmentParser}
* We use it to save the relations and process them afterwards.
*/
protected void processRelation(ReaderRelation relation, LongToIntFunction getIdForOSMNodeId) {
if (turnCostStorage != null)
if (RestrictionConverter.isTurnRestriction(relation)) {
long osmViaNode = RestrictionConverter.getViaNodeIfViaNodeRestriction(relation);
if (osmViaNode >= 0) {
int
viaNode = getIdForOSMNodeId.applyAsInt(osmViaNode);
// only include the restriction if the corresponding node wasn't excluded
if (viaNode >= 0) {
relation.setTag("graphhopper:via_node", viaNode);
restrictionRelations.add(relation);
}
} else// not a via-node restriction -> simply add it as is
restrictionRelations.add(relation);
}
}
| 3.26 |
graphhopper_OSMReader_preprocessWay_rdh
|
/**
* This method is called for each way during the second pass and before the way is split into edges.
* We currently use it to parse road names and calculate the distance of a way to determine the speed based on
* the duration tag when it is present. The latter cannot be done on a per-edge basis, because the duration tag
* refers to the duration of the entire way.
*/
protected void preprocessWay(ReaderWay way, WaySegmentParser.CoordinateSupplier coordinateSupplier) {
// storing the road name does not yet depend on the flagEncoder so manage it directly
List<KVStorage.KeyValue> list = new ArrayList<>();
if (config.isParseWayNames()) {
// http://wiki.openstreetmap.org/wiki/Key:name
String name = "";
if (!config.getPreferredLanguage().isEmpty())
name = fixWayName(way.getTag("name:" + config.getPreferredLanguage()));
if (name.isEmpty())
name = fixWayName(way.getTag("name"));
if (!name.isEmpty())
list.add(new KVStorage.KeyValue(STREET_NAME, name));
// http://wiki.openstreetmap.org/wiki/Key:ref
String refName = fixWayName(way.getTag("ref"));
if (!refName.isEmpty())
list.add(new KVStorage.KeyValue(STREET_REF, refName));
if (way.hasTag("destination:ref")) {
list.add(new KVStorage.KeyValue(STREET_DESTINATION_REF, fixWayName(way.getTag("destination:ref"))));
} else {
if (way.hasTag("destination:ref:forward"))list.add(new KVStorage.KeyValue(STREET_DESTINATION_REF, fixWayName(way.getTag("destination:ref:forward")), true, false));
if (way.hasTag("destination:ref:backward"))
list.add(new KVStorage.KeyValue(STREET_DESTINATION_REF, fixWayName(way.getTag("destination:ref:backward")), false, true));
}
if (way.hasTag("destination")) {
list.add(new KVStorage.KeyValue(STREET_DESTINATION, fixWayName(way.getTag("destination"))));
} else {
if (way.hasTag("destination:forward"))
list.add(new KVStorage.KeyValue(STREET_DESTINATION, fixWayName(way.getTag("destination:forward")), true, false));
if (way.hasTag("destination:backward"))
list.add(new KVStorage.KeyValue(STREET_DESTINATION, fixWayName(way.getTag("destination:backward")), false, true));
}
}
way.setTag("key_values", list);
if (!isCalculateWayDistance(way))
return;
double distance = calcDistance(way, coordinateSupplier);
if (Double.isNaN(distance)) {
// Some nodes were missing, and we cannot determine the distance. This can happen when ways are only
// included partially in an OSM extract. In this case we cannot calculate the speed either, so we return.
LOGGER.warn("Could not determine distance for OSM way: " + way.getId());
return;
}
way.setTag("way_distance", distance);
// For ways with a duration tag we determine the average speed. This is needed for e.g. ferry routes, because
// the duration tag is only valid for the entire way, and it would be wrong to use it after splitting the way
// into edges.
String durationTag = way.getTag("duration");
if (durationTag == null) {
// no duration tag -> we cannot derive speed. happens very frequently for short ferries, but also for some long ones, see: #2532
if (isFerry(way) && (distance > 500000))
OSM_WARNING_LOGGER.warn(((("Long ferry OSM way without duration tag: " + way.getId()) + ", distance: ") + Math.round(distance /
1000.0)) + " km");
return; }
long
durationInSeconds;
try {
durationInSeconds = OSMReaderUtility.parseDuration(durationTag);
} catch (Exception e) {
OSM_WARNING_LOGGER.warn((("Could not parse duration tag '" + durationTag) + "' in OSM way: ") + way.getId());
return;
}
double speedInKmPerHour = (distance / 1000) / ((durationInSeconds / 60.0) / 60.0);
if (speedInKmPerHour < 0.1) {
// Often there are mapping errors like duration=30:00 (30h) instead of duration=00:30 (30min). In this case we
// ignore the duration tag. If no such cases show up anymore, because they were fixed, maybe raise the limit to find some more.
OSM_WARNING_LOGGER.warn(((((((("Unrealistic low speed calculated from duration. Maybe the duration is too long, or it is applied to a way that only represents a part of the connection? OSM way: " + way.getId()) + ". duration=") + durationTag) + " (= ") + Math.round(durationInSeconds / 60.0)) + " minutes), distance=") + distance) + " m");
return;
}
// tag will be present if 1) isCalculateWayDistance was true for this way, 2) no OSM nodes were missing
// such that the distance could actually be calculated, 3) there was a duration tag we could parse, and 4) the
// derived speed was not unrealistically slow.
way.setTag("speed_from_duration", speedInKmPerHour);
}
| 3.26 |
graphhopper_OSMReader_preprocessRelations_rdh
|
/**
* This method is called for each relation during the first pass of {@link WaySegmentParser}
*/
protected void preprocessRelations(ReaderRelation relation) {
if ((!relation.isMetaRelation()) && relation.hasTag("type", "route"))
{
// we keep track of all route relations, so they are available when we create edges later
for (ReaderRelation.Member member : relation.getMembers()) {
if (member.getType() != Type.WAY)
continue;
IntsRef oldRelationFlags = getRelFlagsMap(member.getRef());
IntsRef newRelationFlags = osmParsers.handleRelationTags(relation, oldRelationFlags);
putRelFlagsMap(member.getRef(), newRelationFlags);
}
}
Arrays.stream(RestrictionConverter.getRestrictedWayIds(relation)).forEach(restrictedWaysToEdgesMap::reserve);
}
| 3.26 |
graphhopper_OSMReader_calcDistance_rdh
|
/**
*
* @return the distance of the given way or NaN if some nodes were missing
*/
private double calcDistance(ReaderWay way, WaySegmentParser.CoordinateSupplier coordinateSupplier)
{
LongArrayList nodes = way.getNodes();
// every way has at least two nodes according to our acceptWay function
GHPoint3D prevPoint = coordinateSupplier.getCoordinate(nodes.get(0));
if (prevPoint == null)
return Double.NaN;
boolean is3D = !Double.isNaN(prevPoint.ele);
double distance = 0;
for (int i = 1; i < nodes.size(); i++) {
GHPoint3D point = coordinateSupplier.getCoordinate(nodes.get(i));
if (point == null)
return Double.NaN;
if (Double.isNaN(point.ele) == is3D)
throw
new IllegalStateException("There should be elevation data for either all points or no points at all. OSM way: " + way.getId());
distance += (is3D) ? distCalc.calcDist3D(prevPoint.lat, prevPoint.lon, prevPoint.ele, point.lat, point.lon, point.ele) : distCalc.calcDist(prevPoint.lat, prevPoint.lon, point.lat, point.lon);
prevPoint = point;
}
return distance;}
| 3.26 |
graphhopper_OSMReader_acceptWay_rdh
|
/**
* This method is called for each way during the first and second pass of the {@link WaySegmentParser}. All OSM
* ways that are not accepted here and all nodes that are not referenced by any such way will be ignored.
*/
protected boolean acceptWay(ReaderWay way) {
// ignore broken geometry
if (way.getNodes().size() < 2)
return false;
// ignore multipolygon geometry
if (!way.hasTags())
return false;
return osmParsers.acceptWay(way);
}
| 3.26 |
graphhopper_OSMReader_setArtificialWayTags_rdh
|
/**
* This method is called during the second pass of {@link WaySegmentParser} and provides an entry point to enrich
* the given OSM way with additional tags before it is passed on to the tag parsers.
*/
protected void setArtificialWayTags(PointList pointList, ReaderWay way, double distance, List<Map<String, Object>> nodeTags) {
way.setTag("node_tags", nodeTags);
way.setTag("edge_distance", distance);
way.setTag("point_list", pointList);
// we have to remove existing artificial tags, because we modify the way even though there can be multiple edges
// per way. sooner or later we should separate the artificial ('edge') tags from the way, see discussion here:
// https://github.com/graphhopper/graphhopper/pull/2457#discussion_r751155404
way.removeTag("country");
way.removeTag("country_rule");
way.removeTag("custom_areas");
List<CustomArea> customAreas;
if (areaIndex != null) {
double middleLat;
double middleLon;
if (pointList.size() > 2) {
middleLat = pointList.getLat(pointList.size() / 2);
middleLon = pointList.getLon(pointList.size() / 2);
} else {
double firstLat = pointList.getLat(0);
double
firstLon = pointList.getLon(0);
double lastLat = pointList.getLat(pointList.size() - 1);
double lastLon = pointList.getLon(pointList.size() - 1);
middleLat = (firstLat + lastLat) / 2;
middleLon = (firstLon + lastLon) / 2;
}
customAreas = areaIndex.query(middleLat, middleLon);
} else {
customAreas = emptyList();
}
// special handling for countries: since they are built-in with GraphHopper they are always fed to the EncodingManager
Country country = Country.MISSING;
State state = State.MISSING;
double countryArea = Double.POSITIVE_INFINITY;
for (CustomArea customArea : customAreas) {
// ignore areas that aren't countries
if (customArea.getProperties() == null)
continue;
String alpha2WithSubdivision = ((String) (customArea.getProperties().get(State.ISO_3166_2)));
if (alpha2WithSubdivision == null)
continue;
// the country string must be either something like US-CA (including subdivision) or just DE
String[] strs = alpha2WithSubdivision.split("-");
if ((strs.length == 0) || (strs.length > 2))
throw new IllegalStateException("Invalid alpha2: " + alpha2WithSubdivision);
Country v14 = Country.find(strs[0]);
if (v14 == null)
throw new IllegalStateException("Unknown country: " + strs[0]);
// countries with subdivision overrule those without subdivision as well as bigger ones with subdivision
if (((strs.length == 2) && ((state == State.MISSING) || (customArea.getArea() < countryArea))) || // countries without subdivision only overrule bigger ones without subdivision
((strs.length
== 1) && ((state == State.MISSING) && (customArea.getArea() < countryArea)))) {
country = v14;
state = State.find(alpha2WithSubdivision);
countryArea = customArea.getArea();
}
}
way.setTag("country", country);
way.setTag("country_state", state);
if (countryRuleFactory != null) {
CountryRule countryRule = countryRuleFactory.getCountryRule(country);
if (countryRule != null)
way.setTag("country_rule", countryRule);
}
// also add all custom areas as artificial tag
way.setTag("custom_areas", customAreas);
}
| 3.26 |
graphhopper_OSMReader_setFile_rdh
|
/**
* Sets the OSM file to be read. Supported formats include .osm.xml, .osm.gz and .xml.pbf
*/
public OSMReader setFile(File osmFile) {
this.osmFile = osmFile;
return this;
}
| 3.26 |
graphhopper_OSMReader_isCalculateWayDistance_rdh
|
/**
*
* @return true if the length of the way shall be calculated and added as an artificial way tag
*/
protected boolean isCalculateWayDistance(ReaderWay way)
{
return isFerry(way);
}
| 3.26 |
graphhopper_WayToEdgeConverter_convertForViaNode_rdh
|
/**
* Finds the edge IDs associated with the given OSM ways that are adjacent to the given via-node.
* For each way there can be multiple edge IDs and there should be exactly one that is adjacent to the via-node
* for each way. Otherwise we throw {@link OSMRestrictionException}
*/
public NodeResult convertForViaNode(LongArrayList fromWays, int viaNode, LongArrayList toWays) throws OSMRestrictionException { if (fromWays.isEmpty() || toWays.isEmpty())
throw new IllegalArgumentException("There must be at least one from- and to-way");
if ((fromWays.size() > 1) && (toWays.size() > 1))
throw new IllegalArgumentException("There can only be multiple from- or to-ways, but not both");
NodeResult result = new NodeResult(fromWays.size(), toWays.size());
for (LongCursor fromWay : fromWays)
edgesByWay.apply(fromWay.value).forEachRemaining(e -> {
if (baseGraph.isAdjacentToNode(e.value, viaNode))
result.fromEdges.add(e.value);
});
if (result.fromEdges.size() < fromWays.size())
throw new OSMRestrictionException("has from member ways that aren't adjacent to the via member node");
else if (result.fromEdges.size() > fromWays.size())
throw new OSMRestrictionException("has from member ways that aren't split at the via member node");
for (LongCursor
toWay : toWays)
edgesByWay.apply(toWay.value).forEachRemaining(e -> {
if (baseGraph.isAdjacentToNode(e.value, viaNode))
result.toEdges.add(e.value);
});
if (result.toEdges.size() < toWays.size())
throw new OSMRestrictionException("has to member ways that aren't adjacent to the via member node");
else if (result.toEdges.size() > toWays.size())
throw
new OSMRestrictionException("has to member ways that aren't split at the via member node");
return result;
}
| 3.26 |
graphhopper_WayToEdgeConverter_convertForViaWays_rdh
|
/**
* Finds the edge IDs associated with the given OSM ways that are adjacent to each other. For example for given
* from-, via- and to-ways there can be multiple edges associated with each (because each way can be split into
* multiple edges). We then need to find the from-edge that is connected with one of the via-edges which in turn
* must be connected with one of the to-edges. We use DFS/backtracking to do this.
* There can also be *multiple* via-ways, but the concept is the same.
* Note that there can also be multiple from- or to-*ways*, but only one of each of them should be considered at a
* time. In contrast to the via-ways there are only multiple from/to-ways, because of restrictions like no_entry or
* no_exit where there can be multiple from- or to-members. So we need to find one edge-chain for each pair of from-
* and to-ways.
* Besides the edge IDs we also return the node IDs that connect the edges, so we can add turn restrictions at these
* nodes later.
*/
public EdgeResult convertForViaWays(LongArrayList fromWays, LongArrayList viaWays, LongArrayList toWays) throws OSMRestrictionException {
if ((fromWays.isEmpty() || toWays.isEmpty()) || viaWays.isEmpty())
throw new IllegalArgumentException("There must be at least one from-, via- and to-way");
if ((fromWays.size() > 1) && (toWays.size() > 1))
throw new IllegalArgumentException("There can only be multiple from- or to-ways, but not both");
List<IntArrayList> solutions = new ArrayList<>();
for (LongCursor
fromWay : fromWays)
for (LongCursor toWay : toWays)
findEdgeChain(fromWay.value, viaWays, toWay.value, solutions);
if (solutions.size() < (fromWays.size() * toWays.size()))
throw
new OSMRestrictionException("has from/to member ways that aren't connected with the via member way(s)");
else if (solutions.size() > (fromWays.size() * toWays.size()))
throw new
OSMRestrictionException("has from/to member ways that aren't split at the via member way(s)");
return buildResult(solutions, new EdgeResult(fromWays.size(), viaWays.size(), toWays.size()));
}
| 3.26 |
graphhopper_WayToEdgeConverter_getNodes_rdh
|
/**
* All the intermediate nodes, i.e. for an edge chain like this:
* <pre>
* a b c d
* 0---1---2---3---4
* </pre>
* where 'a' is the from-edge and 'd' is the to-edge this will be [1,2,3]
*/
public IntArrayList getNodes() {
return nodes;
}
| 3.26 |
graphhopper_VectorTileEncoder_clipCovers_rdh
|
/**
* A short circuit clip to the tile extent (tile boundary + buffer) for
* points to improve performance. This method can be overridden to change
* clipping behavior. See also {@link #clipGeometry(Geometry)}.
*
* @param geom
* a {@link Geometry} to check for "covers"
* @return a boolean true when the current clip geometry covers the given geom.
*/
protected boolean clipCovers(Geometry geom) {
if (geom instanceof Point) {Point p = ((Point) (geom));
return clipGeometry.getEnvelopeInternal().covers(p.getCoordinate());
}
return clipEnvelope.covers(geom.getEnvelopeInternal());
}
| 3.26 |
graphhopper_VectorTileEncoder_addFeature_rdh
|
/**
* Add a feature with layer name (typically feature type name), some attributes
* and a Geometry. The Geometry must be in "pixel" space 0,0 upper left and
* 256,256 lower right.
* <p>
* For optimization, geometries will be clipped and simplified. Features with
* geometries outside of the tile will be skipped.
*
* @param layerName
* a {@link String} with the vector tile layer name.
* @param attributes
* a {@link Map} with the vector tile feature attributes.
* @param geometry
* a {@link Geometry} for the vector tile feature.
* @param id
* a long with the vector tile feature id field.
*/
public void addFeature(String layerName, Map<String, ?> attributes, Geometry geometry, long id) {
// skip small Polygon/LineString.
if ((geometry instanceof MultiPolygon) && (geometry.getArea() < minimumArea)) {
return;
}
if ((geometry instanceof Polygon) && (geometry.getArea() < minimumArea)) {
return;}
if ((geometry instanceof LineString) && (geometry.getLength() < minimumLength)) {
return;
}
// special handling of GeometryCollection. subclasses are not handled here.
if (geometry.getClass().equals(GeometryCollection.class)) {
for (int i = 0; i < geometry.getNumGeometries(); i++) {
Geometry subGeometry = geometry.getGeometryN(i);
// keeping the id. any better suggestion?
addFeature(layerName, attributes, subGeometry, id);
}
return;
}
// About to simplify and clip. Looks like simplification before clipping is
// faster than clipping before simplification
// simplify non-points
if ((simplificationDistanceTolerance > 0.0) && (!(geometry instanceof Point))) {
if ((geometry instanceof LineString) || (geometry instanceof MultiLineString)) {geometry = DouglasPeuckerSimplifier.simplify(geometry, simplificationDistanceTolerance);
} else if ((geometry instanceof Polygon) || (geometry instanceof MultiPolygon)) {Geometry simplified = DouglasPeuckerSimplifier.simplify(geometry, simplificationDistanceTolerance);
// extra check to prevent polygon converted to line
if ((simplified instanceof Polygon) || (simplified instanceof MultiPolygon)) {
geometry = simplified;
} else {
geometry =
TopologyPreservingSimplifier.simplify(geometry, simplificationDistanceTolerance);
}
} else {
geometry = TopologyPreservingSimplifier.simplify(geometry, simplificationDistanceTolerance);
}
}
// clip geometry
if (geometry instanceof Point) {
if (!clipCovers(geometry)) {
return;
}
} else {
geometry =
clipGeometry(geometry);
}
// no need to add empty geometry
if ((geometry == null) || geometry.isEmpty()) {
return;
}
Layer layer = layers.get(layerName);
if (layer == null) {
layer = new Layer();
layers.put(layerName, layer);
}
Feature feature = new Feature();
feature.geometry = geometry;
feature.id = id;
this.autoincrement = Math.max(this.autoincrement, id + 1);
for (Map.Entry<String, ?> e : attributes.entrySet()) {
// skip attribute without value
if (e.getValue() == null) {
continue;
}
feature.tags.add(layer.key(e.getKey()));
feature.tags.add(layer.value(e.getValue()));
}
layer.features.add(feature);
}
| 3.26 |
graphhopper_VectorTileEncoder_commands_rdh
|
/**
* // // // Ex.: MoveTo(3, 6), LineTo(8, 12), LineTo(20, 34), ClosePath //
* Encoded as: [ 9 3 6 18 5 6 12 22 15 ] // == command type 7 (ClosePath),
* length 1 // ===== relative LineTo(+12, +22) == LineTo(20, 34) // ===
* relative LineTo(+5, +6) == LineTo(8, 12) // == [00010 010] = command type
* 2 (LineTo), length 2 // === relative MoveTo(+3, +6) // == [00001 001] =
* command type 1 (MoveTo), length 1 // Commands are encoded as uint32
* varints, vertex parameters are // encoded as sint32 varints (zigzag).
* Vertex parameters are // also encoded as deltas to the previous position.
* The original // position is (0,0)
*
* @param cs
* @return */
List<Integer> commands(Coordinate[] cs, boolean closePathAtEnd) {
return
commands(cs, closePathAtEnd, false);
}
| 3.26 |
graphhopper_VectorTileEncoder_encode_rdh
|
/**
*
* @return a byte array with the vector tile
*/
public byte[] encode() {
VectorTile.Tile.Builder tile = Tile.newBuilder();
for (Map.Entry<String, Layer> e : layers.entrySet()) {
String layerName = e.getKey();
Layer layer = e.getValue();
VectorTile.Tile.Layer.Builder tileLayer = Tile.Layer.newBuilder();
tileLayer.setVersion(2);
tileLayer.setName(layerName);
tileLayer.addAllKeys(layer.keys());
for (Object value : layer.values()) {
VectorTile.Tile.Value.Builder
v17 = Tile.Value.newBuilder();
if (value instanceof String) {
v17.setStringValue(((String) (value)));
} else if (value instanceof Integer) {
v17.setSintValue(((Integer) (value)).intValue());
} else if (value instanceof Long) {
v17.setSintValue(((Long) (value)).longValue());
} else if (value instanceof Float) {
v17.setFloatValue(((Float) (value)).floatValue());
} else if (value instanceof Double) {
v17.setDoubleValue(((Double) (value)).doubleValue());
} else if (value instanceof BigDecimal) {
v17.setStringValue(value.toString());} else if (value instanceof Number) {
v17.setDoubleValue(((Number) (value)).doubleValue());
} else if (value instanceof Boolean) {
v17.setBoolValue(((Boolean) (value)).booleanValue());} else
{
v17.setStringValue(value.toString());
}
tileLayer.addValues(v17.build());
}
tileLayer.setExtent(extent);
for
(Feature feature : layer.features) {
Geometry geometry = feature.geometry;
VectorTile.Tile.Feature.Builder featureBuilder = Tile.Feature.newBuilder();
featureBuilder.addAllTags(feature.tags);
if (feature.id >= 0) {
featureBuilder.setId(feature.id);
}
GeomType geomType =
toGeomType(geometry);
x = 0;
y = 0;List<Integer> commands = commands(geometry);
// skip features with no geometry commands
if (commands.isEmpty()) {
continue;
}
// Extra step to parse and check validity and try to repair. Probably expensive.
if ((simplificationDistanceTolerance > 0.0) && (geomType == GeomType.POLYGON)) {
double scale =
(autoScale) ? extent / 256.0 : 1.0;
Geometry decodedGeometry = VectorTileDecoder.decodeGeometry(gf, geomType, commands, scale);
if (!isValid(decodedGeometry)) {
// Invalid. Try more simplification and without preserving topology.
geometry = DouglasPeuckerSimplifier.simplify(geometry, simplificationDistanceTolerance * 2.0); if (geometry.isEmpty()) {
continue;
}
geomType = toGeomType(geometry);
x = 0;
y = 0;
commands = commands(geometry);}
}
featureBuilder.setType(geomType);
featureBuilder.addAllGeometry(commands);
tileLayer.addFeatures(featureBuilder.build());
}
tile.addLayers(tileLayer.build());
}
return tile.build().toByteArray();
}
| 3.26 |
graphhopper_VectorTileEncoder_clipGeometry_rdh
|
/**
* Clip geometry according to buffer given at construct time. This method
* can be overridden to change clipping behavior. See also
* {@link #clipCovers(Geometry)}.
*
* @param geometry
* a {@link Geometry} to check for intersection with the current clip geometry
* @return a boolean true when current clip geometry intersects with the given geometry.
*/
protected Geometry clipGeometry(Geometry geometry) {
try {
if (clipEnvelope.contains(geometry.getEnvelopeInternal())) {
return geometry;
}
Geometry original = geometry;
geometry = clipGeometry.intersection(original);
// some times a intersection is returned as an empty geometry.
// going via wkt fixes the problem.
if (geometry.isEmpty() && clipGeometryPrepared.intersects(original)) {
Geometry originalViaWkt = new WKTReader().read(original.toText());
geometry = clipGeometry.intersection(originalViaWkt);
}
return
geometry;
} catch (TopologyException e) {
// could not intersect. original geometry will be used instead.
return geometry;
} catch (ParseException e1) {
// could not encode/decode WKT. original geometry will be used
// instead.
return geometry;
}
}
| 3.26 |
graphhopper_FootAccessParser_getAccess_rdh
|
/**
* Some ways are okay but not separate for pedestrians.
*/
public WayAccess getAccess(ReaderWay way) {
String highwayValue = way.getTag("highway");
if (highwayValue == null) {
WayAccess acceptPotentially = WayAccess.CAN_SKIP;
if (FerrySpeedCalculator.isFerry(way)) {
String footTag = way.getTag("foot");
if ((footTag == null) || intendedValues.contains(footTag))
acceptPotentially = WayAccess.FERRY;
}
// special case not for all acceptedRailways, only platform
if (way.hasTag("railway", "platform"))
acceptPotentially = WayAccess.WAY;
if (way.hasTag("man_made", "pier"))
acceptPotentially = WayAccess.WAY;
if (!acceptPotentially.canSkip()) {
if (way.hasTag(restrictions, restrictedValues) && (!getConditionalTagInspector().isRestrictedWayConditionallyPermitted(way)))
return WayAccess.CAN_SKIP;
return acceptPotentially;
}
return WayAccess.CAN_SKIP;
}
// other scales are too dangerous, see http://wiki.openstreetmap.org/wiki/Key:sac_scale
if ((way.getTag("sac_scale") != null) && (!way.hasTag("sac_scale", allowedSacScale)))
return WayAccess.CAN_SKIP;
boolean permittedWayConditionallyRestricted = getConditionalTagInspector().isPermittedWayConditionallyRestricted(way);
boolean restrictedWayConditionallyPermitted = getConditionalTagInspector().isRestrictedWayConditionallyPermitted(way);
String firstValue = way.getFirstPriorityTag(restrictions);
if (!firstValue.isEmpty()) {String[] restrict = firstValue.split(";");
for (String v7 : restrict) {if (restrictedValues.contains(v7) && (!restrictedWayConditionallyPermitted))
return WayAccess.CAN_SKIP;
if (intendedValues.contains(v7) && (!permittedWayConditionallyRestricted))
return WayAccess.WAY;
}
}
if (way.hasTag("sidewalk", sidewalkValues))
return WayAccess.WAY;
if (!allowedHighwayTags.contains(highwayValue))
return WayAccess.CAN_SKIP;
if (way.hasTag("motorroad", "yes"))
return WayAccess.CAN_SKIP;
if (isBlockFords() && ("ford".equals(highwayValue) || way.hasTag("ford")))
return WayAccess.CAN_SKIP;
if (permittedWayConditionallyRestricted)
return WayAccess.CAN_SKIP;
return
WayAccess.WAY;
}
| 3.26 |
graphhopper_DijkstraOneToMany_getMemoryUsageAsString_rdh
|
/**
* List currently used memory in MB (approximately)
*/
public String getMemoryUsageAsString() {
long len = weights.length;
return ((((((8L + 4L) + 4L) * len) + (changedNodes.getCapacity()
* 4L)) + (heap.getCapacity() * (4L + 4L))) / Helper.MB) + "MB";
}
| 3.26 |
graphhopper_DijkstraOneToMany_clear_rdh
|
/**
* Call clear if you have a different start node and need to clear the cache.
*/
public DijkstraOneToMany clear() {
doClear = true;
return this;
}
| 3.26 |
graphhopper_EdgeBasedTarjanSCC_findComponentsForStartEdges_rdh
|
/**
* Like {@link #findComponents(Graph, EdgeTransitionFilter, boolean)}, but the search only starts at the
* given edges. This does not mean the search cannot expand to other edges, but this can be controlled by the
* edgeTransitionFilter. This method does not return single edge components (the excludeSingleEdgeComponents option is
* set to true).
*/
public static ConnectedComponents findComponentsForStartEdges(Graph graph, EdgeTransitionFilter edgeTransitionFilter, IntContainer edges) { return new EdgeBasedTarjanSCC(graph, edgeTransitionFilter, true).findComponentsForStartEdges(edges);
}
| 3.26 |
graphhopper_EdgeBasedTarjanSCC_findComponentsRecursive_rdh
|
/**
* Runs Tarjan's algorithm in a recursive way. Doing it like this requires a large stack size for large graphs,
* which can be set like `-Xss1024M`. Usually the version using an explicit stack ({@link #findComponents()}) should be
* preferred. However, this recursive implementation is easier to understand.
*
* @see #findComponents(Graph, EdgeTransitionFilter, boolean)
*/public static ConnectedComponents findComponentsRecursive(Graph graph, EdgeTransitionFilter edgeTransitionFilter, boolean excludeSingleEdgeComponents) {
return new EdgeBasedTarjanSCC(graph, edgeTransitionFilter, excludeSingleEdgeComponents).findComponentsRecursive();
}
| 3.26 |
graphhopper_EdgeBasedTarjanSCC_findComponents_rdh
|
/**
* Runs Tarjan's algorithm using an explicit stack.
*
* @param edgeTransitionFilter
* Only edge transitions accepted by this filter will be considered when we explore the graph.
* If a turn is not accepted the corresponding path will be ignored (edges that are only connected
* by a path with such a turn will not be considered to belong to the same component)
* @param excludeSingleEdgeComponents
* if set to true components that only contain a single edge will not be
* returned when calling {@link #findComponents} or {@link #findComponentsRecursive()},
* which can be useful to save some memory.
*/
public static ConnectedComponents findComponents(Graph graph, EdgeTransitionFilter edgeTransitionFilter, boolean excludeSingleEdgeComponents) {
return new EdgeBasedTarjanSCC(graph, edgeTransitionFilter, excludeSingleEdgeComponents).m0();
}
| 3.26 |
graphhopper_EdgeBasedTarjanSCC_getSingleEdgeComponents_rdh
|
/**
* The set of edge-keys that form their own (single-edge key) component. If {@link EdgeBasedTarjanSCC#excludeSingleEdgeComponents}
* is enabled this set will be empty.
*/
public BitSet getSingleEdgeComponents() {
return singleEdgeComponents;}
| 3.26 |
graphhopper_ReaderRelation_getRef_rdh
|
/**
* member reference which is an OSM ID
*/
public long getRef() {
return f0;
}
| 3.26 |
graphhopper_PointList_copy_rdh
|
/**
* This method does a deep copy of this object for the specified range.
*
* @param from
* the copying of the old PointList starts at this index
* @param end
* the copying of the old PointList ends at the index before (i.e. end is exclusive)
*/
public PointList copy(int from, int end) {
if (from > end)
throw new IllegalArgumentException("from must be smaller or equal to end");
if ((from < 0) || (end > size()))
throw new IllegalArgumentException((((("Illegal interval: " + from) + ", ") + end) + ", size:") + size());
PointList thisPL = this;
if (this
instanceof ShallowImmutablePointList) {
ShallowImmutablePointList spl = ((ShallowImmutablePointList) (this));
thisPL = spl.wrappedPointList;
from = spl.fromOffset + from;
end = spl.fromOffset + end;
}
int len = end - from;
PointList copyPL = new PointList(len, is3D());
copyPL.size = len;
copyPL.isImmutable = isImmutable();
System.arraycopy(thisPL.latitudes, from, copyPL.latitudes, 0, len);
System.arraycopy(thisPL.longitudes, from, copyPL.longitudes, 0, len);
if (is3D())
System.arraycopy(thisPL.elevations, from, copyPL.elevations, 0, len);
return copyPL;
}
| 3.26 |
graphhopper_PointList_m2_rdh
|
/**
* Clones this PointList. If this PointList was immutable, the cloned will be mutable. If this PointList was a
* {@link ShallowImmutablePointList}, the cloned PointList will be a regular PointList.
*/
public PointList m2(boolean reverse) {
PointList clonePL = new PointList(size(), is3D());
if (is3D())
for (int i = 0; i < size(); i++) {
clonePL.add(this.getLat(i), this.getLon(i), this.getEle(i));
}
else
for (int i = 0; i < size(); i++) {
clonePL.add(this.getLat(i), this.getLon(i));
}
if (reverse)
clonePL.reverse();
return clonePL;
}
| 3.26 |
graphhopper_PointList_makeImmutable_rdh
|
/**
* Once immutable, there is no way to make this object mutable again. This is done to ensure the consistency of
* shallow copies. If you need to modify this object again, you have to create a deep copy of it.
*/
public PointList makeImmutable() {
this.isImmutable = true;
return this;
}
| 3.26 |
graphhopper_PointList_parse2DJSON_rdh
|
/**
* Takes the string from a json array ala [lon1,lat1], [lon2,lat2], ... and fills the list from
* it.
*/
public void parse2DJSON(String str) {
for (String latlon : str.split("\\[")) {
if (latlon.trim().length()
== 0)
continue;
String[]
ll =
latlon.split(",");String lat = ll[1].replace("]", "").trim();
add(Double.parseDouble(lat), Double.parseDouble(ll[0].trim()), Double.NaN);
}
}
| 3.26 |
graphhopper_PointList_shallowCopy_rdh
|
/**
* Create a shallow copy of this Pointlist from from to end, excluding end.
*
* @param makeImmutable
* makes this PointList immutable. If you don't ensure the consistency it might happen that due to changes of this
* object, the shallow copy might contain incorrect or corrupt data.
*/
public PointList shallowCopy(final int from, final int end, boolean makeImmutable) {
if (makeImmutable)
this.makeImmutable();
return new ShallowImmutablePointList(from, end, this);
}
| 3.26 |
graphhopper_GHMatrixBatchRequester_setMaxIterations_rdh
|
/**
* Internal parameter. Increase only if you have very large matrices.
*/
public GHMatrixBatchRequester setMaxIterations(int maxIterations) {
this.maxIterations = maxIterations;
return this;
}
| 3.26 |
graphhopper_LandmarkSuggestion_readLandmarks_rdh
|
/**
* The expected format is lon,lat per line where lines starting with characters will be ignored. You can create
* such a file manually via geojson.io -> Save as CSV. Optionally add a second line with
* <pre>#BBOX:minLat,minLon,maxLat,maxLon</pre>
* <p>
* to specify an explicit bounding box. TODO: support GeoJSON instead.
*/
public static LandmarkSuggestion readLandmarks(String file, LocationIndex locationIndex) throws IOException {
// landmarks should be suited for all vehicles
EdgeFilter edgeFilter = EdgeFilter.ALL_EDGES;
List<String> lines = Helper.readFile(file);
List<Integer> landmarkNodeIds = new ArrayList<>();
BBox bbox = BBox.createInverse(false);
int lmSuggestionIdx = 0;
String errors = "";
for (String lmStr : lines) {
if (lmStr.startsWith("#BBOX:")) {
bbox = BBox.parseTwoPoints(lmStr.substring("#BBOX:".length()));
continue;
} else if (lmStr.isEmpty() || Character.isAlphabetic(lmStr.charAt(0))) {continue;
}
GHPoint point =
GHPoint.fromStringLonLat(lmStr);
if (point == null)
throw new RuntimeException((("Invalid format " + lmStr) + " for point ") + lmSuggestionIdx);
lmSuggestionIdx++;
Snap result = locationIndex.findClosest(point.lat, point.lon, edgeFilter);
if (!result.isValid()) {
errors += ((("Cannot find close node found for landmark suggestion[" +
lmSuggestionIdx) + "]=") + point) +
".\n";
continue;
}
bbox.update(point.lat, point.lon);
landmarkNodeIds.add(result.getClosestNode());
}
if (!errors.isEmpty())
throw new RuntimeException(errors);
return new LandmarkSuggestion(landmarkNodeIds, bbox);
}
| 3.26 |
graphhopper_TourStrategy_m0_rdh
|
/**
* Modifies the Distance up to +-10%
*/
protected double m0(double distance) {
double distanceModification = (random.nextDouble() * 0.1) * distance;
if (random.nextBoolean())
distanceModification = -distanceModification;
return distance + distanceModification;
}
| 3.26 |
graphhopper_MiniPerfTest_getMin_rdh
|
/**
*
* @return minimum time of every call, in ms
*/
public double getMin() {
return min / NS_PER_MS;
}
| 3.26 |
graphhopper_MiniPerfTest_getMax_rdh
|
/**
*
* @return maximum time of every calls, in ms
*/public double getMax() {return max / NS_PER_MS;
}
| 3.26 |
graphhopper_MiniPerfTest_getMean_rdh
|
/**
*
* @return mean time per call, in ms
*/
public double getMean() {
return getSum() / counts;
}
| 3.26 |
graphhopper_MiniPerfTest_getSum_rdh
|
/**
*
* @return time for all calls accumulated, in ms
*/
public double getSum() {
return fullTime / NS_PER_MS;
}
| 3.26 |
graphhopper_MiniPerfTest_start_rdh
|
/**
* Important: Make sure to use the dummy sum in your program somewhere such that it's calculation cannot be skipped
* by the JVM. Either use {@link #getDummySum()} or {@link #getReport()} after running this method.
*/
public MiniPerfTest start(Task m) {
int warmupCount = Math.max(1, counts / 3);
for (int i = 0; i < warmupCount; i++) {
dummySum += m.doCalc(true, i);
}
long startFull = System.nanoTime();
for (int i = 0; i < counts; i++) {
long start = System.nanoTime();
dummySum += m.doCalc(false, i);
long time = System.nanoTime() - start;if (time < min)
min = time;
if (time > max)
max = time;
}
fullTime = System.nanoTime() - startFull;
return this;
}
| 3.26 |
graphhopper_PbfFieldDecoder_decodeString_rdh
|
/**
* Decodes a raw string into a String.
* <p>
*
* @param rawString
* The PBF encoding string.
* @return The string as a String.
*/
public String decodeString(int rawString) {
return strings[rawString];
}
| 3.26 |
graphhopper_PbfFieldDecoder_decodeLongitude_rdh
|
/**
* Decodes a raw longitude value into degrees.
* <p>
*
* @param rawLongitude
* The PBF encoded value.
* @return The longitude in degrees.
*/
public double decodeLongitude(long rawLongitude) {
return COORDINATE_SCALING_FACTOR * (coordLongitudeOffset + (coordGranularity * rawLongitude));
}
| 3.26 |
graphhopper_PbfFieldDecoder_m0_rdh
|
/**
* Decodes a raw timestamp value into a Date.
* <p>
*
* @param rawTimestamp
* The PBF encoded timestamp.
* @return The timestamp as a Date.
*/ public Date m0(long rawTimestamp) {
return new Date(dateGranularity * rawTimestamp);
}
| 3.26 |
graphhopper_PbfFieldDecoder_decodeLatitude_rdh
|
/**
* Decodes a raw latitude value into degrees.
* <p>
*
* @param rawLatitude
* The PBF encoded value.
* @return The latitude in degrees.
*/
public double decodeLatitude(long rawLatitude) {return COORDINATE_SCALING_FACTOR * (coordLatitudeOffset + (coordGranularity * rawLatitude));
}
| 3.26 |
graphhopper_BaseGraph_copyProperties_rdh
|
/**
* This method copies the properties of one {@link EdgeIteratorState} to another.
*
* @return the updated iterator the properties where copied to.
*/
EdgeIteratorState copyProperties(EdgeIteratorState from, EdgeIteratorStateImpl to) {
long edgePointer =
store.toEdgePointer(to.getEdge());
store.writeFlags(edgePointer, from.getFlags());
// copy the rest with higher level API
to.m2(from.getDistance()).setKeyValues(from.getKeyValues()).setWayGeometry(from.fetchWayGeometry(FetchMode.PILLAR_ONLY)); return to;
}
| 3.26 |
graphhopper_BaseGraph_flushAndCloseGeometryAndNameStorage_rdh
|
/**
* Flush and free resources that are not needed for post-processing (way geometries and KVStorage for edges).
*/
public void flushAndCloseGeometryAndNameStorage() {
setWayGeometryHeader();wayGeometry.flush();
wayGeometry.close();
edgeKVStorage.flush();
edgeKVStorage.close();
}
| 3.26 |
graphhopper_BaseGraph_init_rdh
|
/**
* Similar to {@link #init(int edgeId, int adjNode)}, but here we retrieve the edge in a certain direction
* directly using an edge key.
*/
final void init(int edgeKey) {
if (edgeKey < 0)
throw new IllegalArgumentException("edge keys must not be negative, given: " +
edgeKey);
this.edgeId = GHUtility.getEdgeFromEdgeKey(edgeKey);
edgePointer = store.toEdgePointer(edgeId);
baseNode = store.getNodeA(edgePointer);
adjNode = store.getNodeB(edgePointer);
if ((edgeKey % 2) == 0) {
reverse
= false;
} else {
reverse = true;
int tmp = baseNode;
baseNode = adjNode;
adjNode = tmp;
}
}
| 3.26 |
graphhopper_BaseGraph_m1_rdh
|
// todo: maybe rename later, but for now this makes it easier to replace GraphBuilder
public Builder m1(boolean withTurnCosts) {
this.withTurnCosts = withTurnCosts;
return this;
}
| 3.26 |
graphhopper_GraphHopper_process_rdh
|
/**
* Creates the graph from OSM data.
*/
protected void process(boolean closeEarly) {
GHDirectory directory = new GHDirectory(ghLocation, dataAccessDefaultType);
directory.configure(dataAccessConfig);
boolean withUrbanDensity = urbanDensityCalculationThreads > 0;
boolean withMaxSpeedEstimation = maxSpeedCalculator != null;
Map<String, String> vehiclesByName = getVehiclesByName(vehiclesString, profilesByName.values());
List<String> encodedValueStrings = getEncodedValueStrings(encodedValuesString);
encodingManager = buildEncodingManager(vehiclesByName, encodedValueStrings, withUrbanDensity, withMaxSpeedEstimation, profilesByName.values());
osmParsers = buildOSMParsers(vehiclesByName, encodedValueStrings, osmReaderConfig.getIgnoredHighways(), dateRangeParserString);
baseGraph = new BaseGraph.Builder(getEncodingManager()).setDir(directory).set3D(hasElevation()).withTurnCosts(encodingManager.needsTurnCostsSupport()).setSegmentSize(defaultSegmentSize).build();
properties = new StorableProperties(directory);
checkProfilesConsistency();
GHLock lock = null;
try {
if (directory.getDefaultType().isStoring()) {lockFactory.setLockDir(new File(ghLocation));
lock = lockFactory.create(fileLockName, true);
if (!lock.tryLock())
throw new RuntimeException("To avoid multiple writers we need to obtain a write lock but it failed. In " + ghLocation, lock.getObtainFailedReason());
}
ensureWriteAccess();
importOSM();
cleanUp();
postImport();
postProcessing(closeEarly);
flush();
} finally {
if (lock != null)
lock.release();
}
}
| 3.26 |
graphhopper_GraphHopper_initLocationIndex_rdh
|
/**
* Initializes the location index after the import is done.
*/
protected void initLocationIndex() {
if (locationIndex != null)throw new
IllegalStateException("Cannot initialize locationIndex twice!");
locationIndex = createLocationIndex(baseGraph.getDirectory());
}
| 3.26 |
graphhopper_GraphHopper_close_rdh
|
/**
* Releases all associated resources like memory or files. But it does not remove them. To
* remove the files created in graphhopperLocation you have to call clean().
*/
public void close() {
if (baseGraph != null)baseGraph.close();
if (properties != null)
properties.close();
chGraphs.values().forEach(RoutingCHGraph::close);
landmarks.values().forEach(LandmarkStorage::close);
if (locationIndex != null)
locationIndex.close();
try {
lockFactory.forceRemove(fileLockName, true);
} catch (Exception ex) {// silently fail e.g. on Windows where we cannot remove an unreleased native lock
}
}
| 3.26 |
graphhopper_GraphHopper_getBaseGraph_rdh
|
/**
* The underlying graph used in algorithms.
*
* @throws IllegalStateException
* if graph is not instantiated.
*/
public BaseGraph getBaseGraph() {
if (baseGraph == null)
throw new IllegalStateException("GraphHopper storage not initialized");
return baseGraph;}
| 3.26 |
graphhopper_GraphHopper_setProfiles_rdh
|
/**
* Sets the routing profiles that shall be supported by this GraphHopper instance. The (and only the) given profiles
* can be used for routing without preparation and for CH/LM preparation.
* <p>
* Here is an example how to setup two CH profiles and one LM profile (via the Java API)
*
* <pre>
* {@code hopper.setProfiles(
* new Profile("my_car").setVehicle("car"),
* new Profile("your_bike").setVehicle("bike")
* );
* hopper.getCHPreparationHandler().setCHProfiles(
* new CHProfile("my_car"),
* new CHProfile("your_bike")
* );
* hopper.getLMPreparationHandler().setLMProfiles(
* new LMProfile("your_bike")
* );}
* </pre>
* <p>
* See also https://github.com/graphhopper/graphhopper/pull/1922.
*
* @see CHPreparationHandler#setCHProfiles
* @see LMPreparationHandler#setLMProfiles
*/
public GraphHopper setProfiles(Profile... profiles) {
return setProfiles(Arrays.asList(profiles));
}
| 3.26 |
graphhopper_GraphHopper_setAllowWrites_rdh
|
/**
* Specifies if it is allowed for GraphHopper to write. E.g. for read only filesystems it is not
* possible to create a lock file and so we can avoid write locks.
*/
public GraphHopper setAllowWrites(boolean allowWrites) {
this.allowWrites = allowWrites;
return this;
}
| 3.26 |
graphhopper_GraphHopper_postProcessing_rdh
|
/**
* Runs both after the import and when loading an existing Graph
*
* @param closeEarly
* release resources as early as possible
*/
protected void postProcessing(boolean closeEarly) {
initLocationIndex();
importPublicTransit();
if (closeEarly) {
boolean includesCustomProfiles = profilesByName.values().stream().anyMatch(p -> CustomWeighting.NAME.equals(p.getWeighting()));
// when there are custom profiles we must not close way geometry or KVStorage, because
// they might be needed to evaluate the custom weightings for the following preparations
if (!includesCustomProfiles)baseGraph.flushAndCloseGeometryAndNameStorage();
}
if (lmPreparationHandler.isEnabled())
m5(closeEarly);
// we needed the location index for the LM preparation, but we don't need it for CH
if (closeEarly)
locationIndex.close();
if (chPreparationHandler.isEnabled())
loadOrPrepareCH(closeEarly);
}
| 3.26 |
graphhopper_GraphHopper_importAndClose_rdh
|
/**
* Imports and processes data, storing it to disk when complete.
*/
public void importAndClose() {
if (!load()) {
printInfo();
process(true);
} else {
printInfo();
logger.info("Graph already imported into " + ghLocation);
}
close();
}
| 3.26 |
graphhopper_GraphHopper_setUrbanDensityCalculation_rdh
|
/**
* Configures the urban density classification. Each edge will be classified as 'rural','residential' or 'city', {@link UrbanDensity}
*
* @param residentialAreaRadius
* in meters. The higher this value the longer the calculation will take and the bigger the area for
* which the road density used to identify residential areas is calculated.
* @param residentialAreaSensitivity
* Use this to find a trade-off between too many roads being classified as residential (too high
* values) and not enough roads being classified as residential (too small values)
* @param cityAreaRadius
* in meters. The higher this value the longer the calculation will take and the bigger the area for
* which the road density used to identify city areas is calculated. Set this to zero
* to skip the city classification.
* @param cityAreaSensitivity
* Use this to find a trade-off between too many roads being classified as city (too high values)
* and not enough roads being classified as city (too small values)
* @param threads
* the number of threads used for the calculation. If this is zero the urban density
* calculation is skipped entirely
*/
public GraphHopper setUrbanDensityCalculation(double residentialAreaRadius, double residentialAreaSensitivity, double cityAreaRadius, double cityAreaSensitivity, int threads) {
ensureNotLoaded();
this.residentialAreaRadius = residentialAreaRadius;
this.residentialAreaSensitivity = residentialAreaSensitivity;
this.cityAreaRadius = cityAreaRadius;
this.cityAreaSensitivity = cityAreaSensitivity;
this.urbanDensityCalculationThreads = threads;
return this;
}
| 3.26 |
graphhopper_GraphHopper_cleanUp_rdh
|
/**
* Internal method to clean up the graph.
*/
protected void cleanUp() {
PrepareRoutingSubnetworks preparation = new PrepareRoutingSubnetworks(baseGraph.getBaseGraph(), buildSubnetworkRemovalJobs());
preparation.setMinNetworkSize(minNetworkSize);
preparation.setThreads(subnetworksThreads);
preparation.doWork();
properties.put("profiles", getProfilesString());
logger.info((("nodes: " + Helper.nf(baseGraph.getNodes())) + ", edges: ") + Helper.nf(baseGraph.getEdges()));
}
| 3.26 |
graphhopper_GraphHopper_getLocationIndex_rdh
|
/**
* The location index created from the graph.
*
* @throws IllegalStateException
* if index is not initialized
*/
public LocationIndex getLocationIndex() {
if (locationIndex == null)
throw new IllegalStateException("LocationIndex not initialized");
return locationIndex;}
| 3.26 |
graphhopper_GraphHopper_m5_rdh
|
/**
* For landmarks it is required to always call this method: either it creates the landmark data or it loads it.
*/
protected void m5(boolean closeEarly) {
for (LMProfile profile : lmPreparationHandler.getLMProfiles())if ((!getLMProfileVersion(profile.getProfile()).isEmpty()) && (!getLMProfileVersion(profile.getProfile()).equals("" + profilesByName.get(profile.getProfile()).getVersion())))
throw new
IllegalArgumentException(("LM preparation of " + profile.getProfile()) + " already exists in storage and doesn't match configuration");
// we load landmark storages that already exist and prepare the other ones
List<LMConfig> lmConfigs = createLMConfigs(lmPreparationHandler.getLMProfiles());
List<LandmarkStorage> loaded = lmPreparationHandler.load(lmConfigs, baseGraph, encodingManager);
List<LMConfig> loadedConfigs
= loaded.stream().map(LandmarkStorage::getLMConfig).collect(Collectors.toList());
List<LMConfig> configsToPrepare = lmConfigs.stream().filter(c -> !loadedConfigs.contains(c)).collect(Collectors.toList());
List<PrepareLandmarks> prepared = m6(closeEarly, configsToPrepare);
// we map all profile names for which there is LM support to the according LM storages
landmarks = new LinkedHashMap<>();
for (LMProfile lmp : lmPreparationHandler.getLMProfiles()) {
// cross-querying
String prepProfile = (lmp.usesOtherPreparation()) ? lmp.getPreparationProfile() : lmp.getProfile();
Optional<LandmarkStorage> loadedLMS = loaded.stream().filter(lms -> lms.getLMConfig().getName().equals(prepProfile)).findFirst();
Optional<PrepareLandmarks> v101 = prepared.stream().filter(pl -> pl.getLandmarkStorage().getLMConfig().getName().equals(prepProfile)).findFirst();
if (loadedLMS.isPresent() && v101.isPresent())
throw new IllegalStateException("LM should be either loaded or prepared, but not both: " + prepProfile);
else if (v101.isPresent()) {
setLMProfileVersion(lmp.getProfile(), profilesByName.get(lmp.getProfile()).getVersion());
landmarks.put(lmp.getProfile(), v101.get().getLandmarkStorage());
} else
loadedLMS.ifPresent(landmarkStorage -> landmarks.put(lmp.getProfile(), landmarkStorage));
}
}
| 3.26 |
graphhopper_GraphHopper_setGraphHopperLocation_rdh
|
/**
* Sets the graphhopper folder.
*/
public GraphHopper setGraphHopperLocation(String ghLocation) {
ensureNotLoaded();
if (ghLocation == null)throw new IllegalArgumentException("graphhopper location cannot be null");
this.ghLocation = ghLocation;
return this;
}
| 3.26 |
graphhopper_GraphHopper_importOrLoad_rdh
|
/**
* Imports provided data from disc and creates graph. Depending on the settings the resulting
* graph will be stored to disc so on a second call this method will only load the graph from
* disc which is usually a lot faster.
*/
public GraphHopper importOrLoad() {
if (!load()) {
printInfo();
process(false);
} else {
printInfo();
}
return this;
}
| 3.26 |
graphhopper_GraphHopper_load_rdh
|
/**
* Load from existing graph folder.
*/
public boolean load() {
if (isEmpty(ghLocation))
throw new IllegalStateException("GraphHopperLocation is not specified. Call setGraphHopperLocation or init before");
if (fullyLoaded)
throw new IllegalStateException("graph is already successfully loaded");
File tmpFileOrFolder = new File(ghLocation);
if ((!tmpFileOrFolder.isDirectory()) && tmpFileOrFolder.exists()) {
throw new IllegalArgumentException("GraphHopperLocation cannot be an existing file. Has to be either non-existing or a folder.");
} else {
File compressed = new File(ghLocation + ".ghz");
if (compressed.exists() && (!compressed.isDirectory())) {
try {
new Unzipper().unzip(compressed.getAbsolutePath(), ghLocation, removeZipped);
} catch (IOException ex) {
throw new RuntimeException((("Couldn't extract file " + compressed.getAbsolutePath()) + " to ") + ghLocation, ex);
}
}
}
// todo: this does not really belong here, we abuse the load method to derive the dataAccessDefaultType setting from others
if ((!allowWrites) && dataAccessDefaultType.isMMap())
dataAccessDefaultType = DAType.MMAP_RO;
// there is just nothing to load
if (!new File(ghLocation).exists())
return false;
GHDirectory directory = new GHDirectory(ghLocation, dataAccessDefaultType);
directory.configure(dataAccessConfig);
GHLock lock = null;
try {
// create locks only if writes are allowed, if they are not allowed a lock cannot be created
// (e.g. on a read only filesystem locks would fail)
if (directory.getDefaultType().isStoring() && isAllowWrites()) {
lockFactory.setLockDir(new File(ghLocation));
lock = lockFactory.create(fileLockName, false);if (!lock.tryLock())
throw new RuntimeException("To avoid reading partial data we need to obtain the read lock but it failed. In " + ghLocation, lock.getObtainFailedReason());
}
properties = new StorableProperties(directory);
// the -gh folder exists, but there is no properties file. it might be just empty, so let's act as if
// the import did not run yet or is not complete for some reason
if (!properties.loadExisting())
return false;
encodingManager = EncodingManager.fromProperties(properties);
baseGraph = new BaseGraph.Builder(encodingManager).setDir(directory).set3D(hasElevation()).withTurnCosts(encodingManager.needsTurnCostsSupport()).setSegmentSize(defaultSegmentSize).build();
baseGraph.loadExisting();
String storedProfiles = properties.get("profiles");
String configuredProfiles = getProfilesString();
if (!storedProfiles.equals(configuredProfiles))
throw new IllegalStateException(((((("Profiles do not match:" + "\nGraphhopper config: ") + configuredProfiles) + "\nGraph: ") + storedProfiles) + "\nChange configuration to match the graph or delete ") + baseGraph.getDirectory().getLocation());
checkProfilesConsistency();
postProcessing(false);
directory.loadMMap();
setFullyLoaded();return true;
} finally {if (lock != null)
lock.release();
}
}
| 3.26 |
graphhopper_GraphHopper_getProfile_rdh
|
/**
* Returns the profile for the given profile name, or null if it does not exist
*/
public Profile getProfile(String profileName) {
return profilesByName.get(profileName);
}
| 3.26 |
graphhopper_GraphHopper__getOSMFile_rdh
|
/**
* Currently we use this for a few tests where the dataReaderFile is loaded from the classpath
*/
protected File _getOSMFile() {
return new
File(osmFile);
}
| 3.26 |
graphhopper_GraphHopper_setSortGraph_rdh
|
/**
* Sorts the graph which requires more RAM while import. See #12
*/
public GraphHopper setSortGraph(boolean sortGraph) {
ensureNotLoaded();
this.sortGraph = sortGraph;
return this;
}
| 3.26 |
graphhopper_GraphHopper_setCountryRuleFactory_rdh
|
/**
* Sets the factory used to create country rules. Use `null` to disable country rules
*/
public GraphHopper setCountryRuleFactory(CountryRuleFactory countryRuleFactory) {
this.countryRuleFactory = countryRuleFactory;
return this;
}
| 3.26 |
graphhopper_GraphHopper_setElevation_rdh
|
/**
* Enable storing and fetching elevation data. Default is false
*/
public GraphHopper setElevation(boolean includeElevation) {
this.elevation = includeElevation;
return
this;
}
| 3.26 |
graphhopper_GraphHopper_setPreciseIndexResolution_rdh
|
/**
* Precise location resolution index means also more space (disc/RAM) could be consumed and
* probably slower query times, which would be e.g. not suitable for Android. The resolution
* specifies the tile width (in meter).
*/
public GraphHopper setPreciseIndexResolution(int precision) {
ensureNotLoaded();
preciseIndexResolution = precision;
return this;
}
| 3.26 |
graphhopper_GraphHopper_setStoreOnFlush_rdh
|
/**
* Only valid option for in-memory graph and if you e.g. want to disable store on flush for unit
* tests. Specify storeOnFlush to true if you want that existing data will be loaded FROM disc
* and all in-memory data will be flushed TO disc after flush is called e.g. while OSM import.
*
* @param storeOnFlush
* true by default
*/
public GraphHopper setStoreOnFlush(boolean storeOnFlush) {
ensureNotLoaded();
if (storeOnFlush)
dataAccessDefaultType = DAType.RAM_STORE;
else
dataAccessDefaultType = DAType.RAM;
return this;
}
| 3.26 |
graphhopper_GraphHopper_hasElevation_rdh
|
/**
*
* @return true if storing and fetching elevation data is enabled. Default is false
*/
public boolean hasElevation() {
return elevation;
}
| 3.26 |
graphhopper_GraphHopper_clean_rdh
|
/**
* Removes the on-disc routing files. Call only after calling close or before importOrLoad or
* load
*/
public void clean() {
if (getGraphHopperLocation().isEmpty())
throw new IllegalStateException("Cannot clean GraphHopper without specified graphHopperLocation");
File folder = new File(getGraphHopperLocation());
removeDir(folder);
}
| 3.26 |
graphhopper_EdgeBasedNodeContractor_findAndHandlePrepareShortcuts_rdh
|
/**
* This method performs witness searches between all nodes adjacent to the given node and calls the
* given handler for all required shortcuts.
*/
private void findAndHandlePrepareShortcuts(int node, PrepareShortcutHandler shortcutHandler, int maxPolls, EdgeBasedWitnessPathSearcher.Stats wpsStats) {
stats().nodes++;
addedShortcuts.clear();sourceNodes.clear();
// traverse incoming edges/shortcuts to find all the source nodes
PrepareGraphEdgeIterator incomingEdges = inEdgeExplorer.setBaseNode(node);
while (incomingEdges.next()) { final int sourceNode = incomingEdges.getAdjNode();
if (sourceNode == node)
continue;
// make sure we process each source node only once
if (!sourceNodes.add(sourceNode))
continue;
// for each source node we need to look at every incoming original edge and check which target edges are reachable
PrepareGraphOrigEdgeIterator origInIter = sourceNodeOrigInEdgeExplorer.setBaseNode(sourceNode);
while (origInIter.next()) {
int origInKey = reverseEdgeKey(origInIter.getOrigEdgeKeyLast());
// we search 'bridge paths' leading to the target edges
IntObjectMap<BridgePathFinder.BridePathEntry> bridgePaths = bridgePathFinder.find(origInKey, sourceNode, node);
if (bridgePaths.isEmpty())
continue;
witnessPathSearcher.initSearch(origInKey, sourceNode, node, wpsStats);
for (IntObjectCursor<BridgePathFinder.BridePathEntry> bridgePath : bridgePaths) {
if (!Double.isFinite(bridgePath.value.weight))
throw new IllegalStateException("Bridge entry weights should always be finite");
int targetEdgeKey = bridgePath.key;
dijkstraSW.start();
double weight = witnessPathSearcher.runSearch(bridgePath.value.chEntry.adjNode, targetEdgeKey, bridgePath.value.weight, maxPolls);
dijkstraSW.stop();
// we found a witness, nothing to do
if (weight <= bridgePath.value.weight)
continue;
PrepareCHEntry root = bridgePath.value.chEntry;
while (EdgeIterator.Edge.isValid(root.parent.prepareEdge))
root = root.getParent();
// we make sure to add each shortcut only once. when we are actually adding shortcuts we check for existing
// shortcuts anyway, but at least this is important when we *count* shortcuts.
long addedShortcutKey = BitUtil.LITTLE.toLong(root.firstEdgeKey, bridgePath.value.chEntry.incEdgeKey);
if (!addedShortcuts.add(addedShortcutKey))
continue;
double initialTurnCost = prepareGraph.getTurnWeight(origInKey, sourceNode, root.firstEdgeKey);
bridgePath.value.chEntry.weight -= initialTurnCost;
LOGGER.trace("Adding shortcuts for target entry {}", bridgePath.value.chEntry);
// todo: re-implement loop-avoidance heuristic as it existed in GH 1.0? it did not work the
// way it was implemented so it was removed at some point
shortcutHandler.handleShortcut(root, bridgePath.value.chEntry, bridgePath.value.chEntry.origEdges);
}
witnessPathSearcher.finishSearch();
}
}
}
| 3.26 |
graphhopper_GHMRequest_putHint_rdh
|
// a good trick to serialize unknown properties into the HintsMap
@JsonAnySetter
public GHMRequest putHint(String fieldName, Object value) {
f1.putObject(fieldName, value);
return this;
}
| 3.26 |
graphhopper_GHMRequest_setFailFast_rdh
|
/**
*
* @param failFast
* if false the matrix calculation will be continued even when some points are not connected
*/
@JsonProperty("fail_fast")
public void setFailFast(boolean failFast) {
this.failFast = failFast;
}
| 3.26 |
graphhopper_GHMRequest_setOutArrays_rdh
|
/**
* Possible values are 'weights', 'times', 'distances'
*/public
GHMRequest
setOutArrays(List<String> outArrays) {
this.outArrays = outArrays;
return this;
}
| 3.26 |
graphhopper_LMApproximator_initCollections_rdh
|
// We only expect a very short search
@Override
protected void initCollections(int size) {
super.initCollections(2);}
| 3.26 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.