code
stringlengths
67
466k
docstring
stringlengths
1
13.2k
public int read(long offset) throws IOException { int b; if (maxBufferSize <= 0) { // old-school (no buffer optimization) b = input.read(); } else { checkBuffer(offset); b = buffer[(int) (offset - bufferOffset)]; } if (b < 0) return 256+b; return b; }
Reads a byte. @param offset the offset @return the byte @throws IOException Signals that an I/O exception has occurred.
public static void main(String[] args) { Interval1D<Integer> a = new Interval1D<>(15, 20); Interval1D<Integer> b = new Interval1D<>(25, 30); Interval1D<Integer> c = new Interval1D<>(10, 40); Interval1D<Integer> d = new Interval1D<>(40, 50); System.out.println("a = " + a); System.out.println("b = " + b); System.out.println("c = " + c); System.out.println("d = " + d); System.out.println("b intersects a = " + b.intersects(a)); System.out.println("a intersects b = " + a.intersects(b)); System.out.println("a intersects c = " + a.intersects(c)); System.out.println("a intersects d = " + a.intersects(d)); System.out.println("b intersects c = " + b.intersects(c)); System.out.println("b intersects d = " + b.intersects(d)); System.out.println("c intersects d = " + c.intersects(d)); }
test client
public boolean intersects(Interval1D<V> other) { if (other.hi.compareTo(this.lo) < 0) { return false; } if (this.hi.compareTo(other.lo) < 0) { return false; } return true; }
Does this interval intersect that one? @return true, if intervals share at least one point
public boolean select(int ID) { // verify that the ID occurs if(!all.contains(ID)){ throw new SolutionModificationException("Error while modifying subset solution: " + "unable to select ID " + ID + " (no entity with this ID).", this); } // verify that ID is currently not selected if(selected.contains(ID)){ // already selected: return false return false; } // currently unselected, existing ID: select it selected.add(ID); unselected.remove(ID); return true; }
Select the given ID. If there is no entity with the given ID, a {@link SolutionModificationException} is thrown. If the ID is currently already selected, the subset solution is not modified and false is returned. Finally, true is returned if the ID has been successfully selected. @param ID ID to be selected @throws SolutionModificationException if there is no entity with this ID @return true if the ID has been successfully selected, false if it was already selected
public boolean selectAll(Collection<Integer> IDs) { boolean modified = false; for(int ID : IDs){ if(select(ID)){ modified = true; } } return modified; }
Select all IDs contained in the given collection. Returns true if the subset solution was modified by this operation, i.e. if at least one previously unselected ID has been selected. @param IDs collection of IDs to be selected @throws SolutionModificationException if the given collection contains at least one ID which does not correspond to an entity @throws NullPointerException if <code>null</code> is passed or the given collection contains at least one <code>null</code> element @return true if the subset solution was modified
public boolean deselectAll(Collection<Integer> IDs) { boolean modified = false; for(int ID : IDs){ if(deselect(ID)){ modified = true; } } return modified; }
Deselect all IDs contained in the given collection. Returns true if the subset solution was modified by this operation, i.e. if at least one previously selected ID has been deselected. @param IDs collection of IDs to be deselected @throws SolutionModificationException if the given collection contains at least one ID which does not correspond to an entity @throws NullPointerException if <code>null</code> is passed or the given collection contains at least one <code>null</code> element @return true if the subset solution was modified
@XmlElementDecl(namespace = "http://psi.hupo.org/ms/mzml", name = "mzML") public JAXBElement<MzMLType> createMzML(MzMLType value) { return new JAXBElement<MzMLType>(_MzML_QNAME, MzMLType.class, null, value); }
Create an instance of {@link JAXBElement }{@code <}{@link MzMLType }{@code >}}
public static final void setJavolutionLogLevel(org.slf4j.Logger log) { // disable Javolution info-level logging if (log.isTraceEnabled()) { javolution.context.LogContext.enter().setLevel(LogContext.Level.DEBUG); } else if (log.isDebugEnabled()) { javolution.context.LogContext.enter().setLevel(LogContext.Level.DEBUG); } else if (log.isInfoEnabled()) { javolution.context.LogContext.enter().setLevel(LogContext.Level.INFO); } else if (log.isWarnEnabled()) { javolution.context.LogContext.enter().setLevel(LogContext.Level.WARNING); } else if (log.isErrorEnabled()) { javolution.context.LogContext.enter().setLevel(LogContext.Level.ERROR); } else { javolution.context.LogContext.enter().setLevel(LogContext.Level.FATAL); } }
Set Javolution log level according to the provided logger. @param log SLF4J wrapper for many logging frameworks
public static final void configureJavaUtilLogging() { try (InputStream is = LogHelper.class.getResourceAsStream("logging.properties")) { if (is == null) { throw new IOException("Can't find/open logging.properties"); } LogManager logMan = LogManager.getLogManager(); logMan.readConfiguration(is); } catch (final IOException e) { java.util.logging.Logger.getAnonymousLogger().severe( "Could not load development logging.properties file using " + "LogHelper.class.getResourceAsStream(\"/logging.properties\")"); java.util.logging.Logger.getAnonymousLogger().severe(e.getMessage()); } }
Configures JUL (java.util.logging) using the logging.properties file located in this package. Only use this method for testing purposes, clients should configure logging themselves - that is you need to provide a logging bridge for SLF4J compatible to your logging infrastructure, or use SLF4J no-op logger.
public void setNeighbourhoods(List<? extends Neighbourhood<? super SolutionType>> neighs){ // synchronize with status updates synchronized(getStatusLock()){ // assert idle assertIdle("Cannot set list of neighbourhoods."); // check not null if(neighs == null){ throw new NullPointerException("Can not set neighbourhoods: received null."); } // check that neighs does not contain any null elements for(Neighbourhood<?> n : neighs){ if(n == null){ throw new NullPointerException("Can not set neighbourhoods: neighbourhood list can not" + " contain any null elements."); } } // check not empty if(neighs.isEmpty()){ throw new IllegalArgumentException("Can not set neighbourhoods: received empty list."); } // go ahead this.neighs = Collections.unmodifiableList(neighs); } }
Sets the list of neighbourhoods used to modify the current solution. Note that <code>neighs</code> can not be <code>null</code> nor empty and can not contain any <code>null</code> elements. This method may only be called when the search is idle. It should be used with care for searches that have already been run, as updating the neighbourhoods might break the execution of a restarted search that tries to continue from where it had arrived. <p> An unmodifiable view of the given list is stored. @throws NullPointerException if <code>neighs</code> is <code>null</code> or contains any <code>null</code> elements @throws IllegalArgumentException if <code>neighs</code> is empty @throws SearchException if the search is currently not idle @param neighs list of neighbourhoods used to modify the current solution
@Override public boolean isTabu(Move<? super SubsetSolution> move, SubsetSolution currentSolution) { // check move type if(move instanceof SubsetMove){ // cast SubsetMove sMove = (SubsetMove) move; // check if any involved ID is tabu return containsTabuID(sMove.getAddedIDs()) || containsTabuID(sMove.getDeletedIDs()); } else { // wrong move type throw new IncompatibleTabuMemoryException("ID based subset tabu memory can only be used in combination with " + "neighbourhoods that generate moves of type SubsetMove. Received: " + move.getClass().getSimpleName()); } }
A move is considered tabu if any involved ID (added or deleted) is currently contained in the tabu memory. If not, the move is allowed. It is required that the given move is of type {@link SubsetMove}, else an {@link IncompatibleTabuMemoryException} will be thrown. Note that the argument <code>currentSolution</code> is not used here, because the move itself contains all necessary information, and may be <code>null</code>. @param move subset move to be applied to the current solution (required to be of type {@link SubsetMove}) @param currentSolution current solution (not used here, allowed to be <code>null</code>) @return <code>true</code> if the current memory contains any ID which is added or deleted by the given move @throws IncompatibleTabuMemoryException if the given move is not of type {@link SubsetMove}
@Override public void registerVisitedSolution(SubsetSolution visitedSolution, Move<? super SubsetSolution> appliedMove) { // don't do anything if move is null if(appliedMove != null){ // check move type if(appliedMove instanceof SubsetMove){ // cast SubsetMove sMove = (SubsetMove) appliedMove; // store involved IDs memory.addAll(sMove.getAddedIDs()); memory.addAll(sMove.getDeletedIDs()); } else { // wrong move type throw new IncompatibleTabuMemoryException("ID based subset tabu memory can only be used in combination with " + "neighbourhoods that generate moves of type SubsetMove. Received: " + appliedMove.getClass().getName()); } } }
Registers an applied subset move by storing all involved IDs (added or deleted) in the tabu memory. It is required that the given move is of type {@link SubsetMove}, else an {@link IncompatibleTabuMemoryException} will be thrown. The argument <code>visitedSolution</code> is ignored, as the applied move contains all necessary information, and may be <code>null</code>. If <code>appliedMove</code> is <code>null</code>, calling this method does not have any effect. @param visitedSolution newly visited solution (not used here, allowed be <code>null</code>) @param appliedMove applied move of which all involved IDs are stored in the tabu memory @throws IncompatibleTabuMemoryException if the given move is not of type {@link SubsetMove}
public void onResume() { if (requestPermissionOnStart && firstTime) { checkPermission(false); firstTime = false; } else if (pendingAppInfoDialog) { pendingAppInfoDialog = false; showAppInfoDialog(); } }
This method must be called from the respective method of the fragment/activity.
public void onRequestPermissionsResult(int requestCode, @NonNull String[] permissions, @NonNull int[] grantResults) { if (requestCode == permissionRequestCode) { if (grantResults.length == 1 && grantResults[0] == PackageManager.PERMISSION_GRANTED) { if (onRequestPermissionsResultListener != null) { onRequestPermissionsResultListener.onRequestPermissionsGranted(); } } else { if (onRequestPermissionsResultListener != null) { onRequestPermissionsResultListener.onRequestPermissionsDenied(); } if (previouslyShouldShowRequestPermissionRationale != null && !previouslyShouldShowRequestPermissionRationale && !permissionDelegate.shouldShowRequestPermissionRationale(permission)) { //Note: onRequestPermissionsResult(...) is called immediately before onResume() (like onActivityResult(...)), // if the dialog is show immediately in this case an exception occurs ("java.lang.IllegalStateException: Can not perform this action after onSaveInstanceState"). pendingAppInfoDialog = true; } } } }
This method must be called from the respective method of the fragment/activity.
public boolean checkPermission(boolean showAppInfoDialogEnabled) { boolean hasPermission; if (permissionRationaleMessageResId != 0) { hasPermission = checkPermission(permissionDelegate, getPermissionRationaleTitleResId(), permissionRationaleMessageResId, permission, permissionRequestCode); } else { hasPermission = checkPermission(permissionDelegate, permission, permissionRequestCode); } if (!hasPermission && showAppInfoDialogEnabled) { previouslyShouldShowRequestPermissionRationale = permissionDelegate.shouldShowRequestPermissionRationale(permission); } else { previouslyShouldShowRequestPermissionRationale = null; } return hasPermission; }
Similar to {@link #checkPermission()}, but it allows the AppInfoDialog will be enabled or disabled for this call. @param showAppInfoDialogEnabled true if the AppInfoDialog is enabled, false to disabled it.
public Map<String, String> getChildrenMap(String parentId) { Map<String, String> map = new HashMap<>(); List<StringEntity> children = getByField(Column.PARENT_ID, parentId); for (StringEntity stringEntity : children) { map.put(stringEntity.getId(), stringEntity.getValue()); } return map; }
This method returns the map of children associated with given parent id. @param parentId of parent entity. @return map of children
public void replaceMapChildren(Map<String, String> map, String parentId) { ArrayList<StringEntity> entities = new ArrayList<>(); for (String key : map.keySet()) { StringEntity entity = new StringEntity(); entity.setParentId(parentId); entity.setId(key); entity.setValue(map.get(key)); entities.add(entity); } replaceChildren(entities, parentId); }
This method allows to replace all children of a given parent, it will remove any children which are not in the list, add the new ones and update which are in the list. @param map map of list to replace. @param parentId id of parent entity.
@Override protected void searchStep() { // stop if no more neighbourhoods available if(k >= getNeighbourhoods().size()){ stop(); } else { // use k-th neighbourhood to get best valid move with positive delta, if any Neighbourhood<? super SolutionType> neigh = getNeighbourhoods().get(k); Move<? super SolutionType> move = getBestMove( neigh.getAllMoves(getCurrentSolution()), // generate all moves true); // only improvements // found improvement ? if(move != null){ // improvement: accept move and reset k accept(move); k = 0; } else { // switch to next neighbourhood (to be used in next step) k++; } } }
Investigates all neighbours of the current solution, using the k-th neighbourhood, and adopts the best one as the new current solution, if it is an improvement. If no improvement is found, k is increased. Upon each improvement, k is reset to 0, and when k has reached the number of available neighbourhoods, the search stops. @throws JamesRuntimeException if depending on malfunctioning components (problem, neighbourhood, ...)
public static BarcodeIntentResult parseActivityResult(int requestCode, int resultCode, Intent intent) { if (requestCode == REQUEST_CODE && resultCode == Activity.RESULT_OK) { String contents = intent.getStringExtra(SCAN_RESULT); String formatName = intent.getStringExtra(SCAN_RESULT_FORMAT); return new BarcodeIntentResult(contents, formatName); } return null; }
<p> Call this from your {@link Activity}'s {@link Activity#onActivityResult(int, int, Intent)} method. </p> @param requestCode @param resultCode @param intent @return null if the event handled here was not related to this class, or else an {@link BarcodeIntentResult} containing the result of the scan. If the user cancelled scanning, the fields will be null.
@Override public SubsetMove getRandomMove(SubsetSolution solution, Random rnd) { // get set of candidate IDs for addition (fixed IDs are discarded) Set<Integer> addCandidates = getAddCandidates(solution); // compute maximum number of adds int curMaxAdds = maxAdditions(addCandidates, solution); // return null if no additions are possible if(curMaxAdds == 0){ return null; } // pick number of additions (in [1, curMaxAdds]) int numAdds = rnd.nextInt(curMaxAdds) + 1; // pick random IDs to add to selection Set<Integer> add = SetUtilities.getRandomSubset(addCandidates, numAdds, rnd); // create and return move return new GeneralSubsetMove(add, Collections.emptySet()); }
<p> Generates a move for the given subset solution that adds a random subset of currently unselected IDs to the selection. Possible fixed IDs are not considered to be selected. The maximum number of additions \(k\) and maximum allowed subset size are respected. If no items can be added, <code>null</code> is returned. </p> <p> Note that first, a random number of additions is picked (uniformly distributed) from the valid range and then, a random subset of this size is sampled from the currently unselected IDs, to be added (again, all possible subsets are uniformly distributed, within the fixed size). Because the amount of possible moves increases with the number of performed additions, the probability of generating each specific move thus decreases with the number of additions. In other words, randomly generated moves are <b>not</b> uniformly distributed across different numbers of performed additions, but each specific move performing fewer additions is more likely to be selected than each specific move performing more additions. </p> @param solution solution for which a random multi addition move is generated @param rnd source of randomness used to generate random move @return random multi addition move, <code>null</code> if no items can be added
@Override protected boolean accept(Move<? super SolutionType> move){ // call super if(super.accept(move)){ // update tabu memory tabuMemory.registerVisitedSolution(getCurrentSolution(), move); // update successful return true; } else { return false; } }
Overrides acceptance of a move to update the tabu memory by registering the newly visited solution. @param move move to be accepted @return <code>true</code> if the update was successful, <code>false</code> if the move is invalid
@Override public void setCurrentSolution(SolutionType solution){ // call super (also verifies search status) super.setCurrentSolution(solution); // update tabu memory (no move has been applied to obtain this solution, pass null) tabuMemory.registerVisitedSolution(solution, null); }
Updates the tabu memory when a custom current/initial solution is set. Note that this method may only be called when the search is idle. @param solution manually specified current solution @throws SearchException if the search is not idle @throws NullPointerException if <code>solution</code> is <code>null</code>
protected Predicate<Move<? super SolutionType>> getTabuFilter(){ return m -> !tabuMemory.isTabu(m, getCurrentSolution()) || (validate(m).passed() && computeDelta(evaluate(m), getBestSolutionEvaluation()) > 0); }
Returns the filter used to discard tabu moves, in the form of a predicate. As an exception, tabu moves that improve the currently known best solution are admitted anyway (aspiration criterion). The returned predicate is <code>false</code> for all tabu moves that have to be discarded, else it is <code>true</code>. @return filter discarding tabu moves
public Collection<T> next(Collection<T> subset){ // check if there is a next subset to generate if(!hasNext()){ throw new NoSuchElementException("No more subsets to be generated."); } // fill collection with currently selected items (returned at the end of the method) for(int i=0; i<t.length-1; i++){ // skip last element (= dummy) subset.add(items[t[i]]); } // set indices of items to be selected in next subset, if any, according to kSubsetRevDoorSuccessor // algorithm by Kreher and Stinson (p. 52), modified so that // - it is detected when all subsets of the current size have been generated // - in the latter case, the generation continues with the next size, if still valid // - indices and values in t are counted from 0 to k-1 instead of 1 to k // - special cases (size 1 and full size) also work // k indicates current subset size (account for dummy element!) int k = t.length-1; // search for first index j where t[j] is different from j int j=0; while(j < k && t[j] == j){ j++; } // if j = k-1 and t[j] = |items|-1, or k = |items| or k = 0, all subsets of the current size have been generated if (j == k-1 && t[j] == items.length-1 || k == items.length || k == 0){ // go to next size, if still within bounds int nextSize = k+1; if(nextSize <= maxSubsetSize && nextSize <= items.length){ // set first subset of next size (t = {0,1,...,nextSize-1}) t = new int[nextSize+1]; for(int i=0; i<nextSize; i++){ t[i] = i; } // set dummy t[nextSize] = items.length; } else { // next size is no longer within bounds t = null; } } else { // generate next subset of current size // (according to revolving door successor algorithm) if((k - (j+1)) % 2 != 0){ if(j == 0){ t[0] = t[0]-1; } else { t[j-1] = j; if(j-2 >= 0){ t[j-2] = j-1; } } } else { if(t[j+1] != t[j]+1){ if(j-1 >= 0){ t[j-1] = t[j]; } t[j] = t[j] + 1; } else { t[j+1] = t[j]; t[j] = j; } } } // return current subset return subset; }
<p> Fill the given collection with the items from the next subset. The collection is <b>not</b> cleared so already contained items will be retained. A reference to this same collection is returned after it has been modified. </p> <p> To store the next subset in a newly allocated {@link LinkedHashSet} the alternative method {@link #next()} may also be used. </p> @param subset collection to fill with items from next generated subset @return reference to given collection, after it has been filled with the items from the next subset @throws NoSuchElementException if there is no next subset to be generated
@Override public Set<T> next() { Set<T> subset = new LinkedHashSet<>(); next(subset); return subset; }
Generate the next subset in a newly allocated {@link LinkedHashSet}. @return next subset stored in newly allocated {@link LinkedHashSet}. @throws NoSuchElementException if there is no next subset to be generated
public void addPenalizingValidation(Object key, PenalizingValidation penalizingValidation){ initMapOnce(); penalties.put(key, penalizingValidation); // update penalized value if(!penalizingValidation.passed()){ assignedPenalties = true; double p = penalizingValidation.getPenalty(); penalizedValue += minimizing ? p : -p; } }
Add a penalty expressed by a penalizing validation object. A key is required that can be used to retrieve the validation object later. @param key key used to retrieve the validation object later @param penalizingValidation penalizing validation that indicates the assigned penalty
public PenalizingValidation getPenalizingValidation(Object key){ return penalties == null ? null : penalties.get(key); }
Retrieve the penalizing validation object corresponding to the given key. If no penalty has been added with this key, <code>null</code> is returned. @param key key specified when adding the penalizing validation @return retrieved validation object, or <code>null</code> if no validation object was added with this key
public double distance(Double lat, Double lon) { return calculateDistance(latitude, longitude, lat, lon); }
/* @return: Distance in kilometers between this location and the specified
public double calculateDistance(double srcLat, double srcLong, double destLat, double destLong) { float[] results = new float[1]; Location.distanceBetween(srcLat, srcLong, destLat, destLong, results); return results[0] / 1000; }
/* @return: Distance in kilometers between this src location and the specified destination
public static LCMSDataSource<?> create(Path path) { path = path.toAbsolutePath(); String lowerCaseName = path.getFileName().toString().toLowerCase(); if (lowerCaseName.endsWith(".mzxml")) { return new MZXMLFile(path.toString()); } else if (lowerCaseName.endsWith(".mzml")) { return new MZMLFile(path.toString()); } return null; }
Try and create a data source from a given file path. @return null if the provided path was not recognized
@XmlElementDecl(namespace = "http://psidev.info/psi/pi/mzIdentML/1.2", name = "MzIdentML") public JAXBElement<MzIdentMLType> createMzIdentML(MzIdentMLType value) { return new JAXBElement<MzIdentMLType>(_MzIdentML_QNAME, MzIdentMLType.class, null, value); }
Create an instance of {@link JAXBElement }{@code <}{@link MzIdentMLType }{@code >}}
public static double[] decode( String cvAccession, byte[] data, int dataSize ) { switch (cvAccession) { case ACC_NUMPRESS_LINEAR: { double[] buffer = new double[dataSize * 2]; int nbrOfDoubles = MSNumpress.decodeLinear(data, dataSize, buffer); double[] result = new double[nbrOfDoubles]; System.arraycopy(buffer, 0, result, 0, nbrOfDoubles); return result; } case ACC_NUMPRESS_SLOF: { double[] result = new double[dataSize / 2]; MSNumpress.decodeSlof(data, dataSize, result); return result; } case ACC_NUMPRESS_PIC: { double[] buffer = new double[dataSize * 2]; int nbrOfDoubles = MSNumpress.decodePic(data, dataSize, buffer); double[] result = new double[nbrOfDoubles]; System.arraycopy(buffer, 0, result, 0, nbrOfDoubles); return result; } } throw new IllegalArgumentException("'" + cvAccession + "' is not a numpress compression term"); }
Convenience function for decoding binary data encoded by MSNumpress. If the passed cvAccession is one of <p> ACC_NUMPRESS_LINEAR = "MS:1002312" ACC_NUMPRESS_PIC = "MS:1002313" ACC_NUMPRESS_SLOF = "MS:1002314" <p> the corresponding decode function will be called. @param cvAccession The PSI-MS obo CV accession of the encoded data. @param data array of double to be encoded @param dataSize number of doubles from data to encode @return The decoded doubles
protected static int encodeInt( long x, byte[] res, int resOffset ) { byte i, l; long m; long mask = 0xf0000000; long init = x & mask; if (init == 0) { l = 8; for (i = 0; i < 8; i++) { m = mask >> (4 * i); if ((x & m) != 0) { l = i; break; } } res[resOffset] = l; for (i = l; i < 8; i++) { res[resOffset + 1 + i - l] = (byte) (0xf & (x >> (4 * (i - l)))); } return 1 + 8 - l; } else if (init == mask) { l = 7; for (i = 0; i < 8; i++) { m = mask >> (4 * i); if ((x & m) != m) { l = i; break; } } res[resOffset] = (byte) (l | 8); for (i = l; i < 8; i++) { res[resOffset + 1 + i - l] = (byte) (0xf & (x >> (4 * (i - l)))); } return 1 + 8 - l; } else { res[resOffset] = 0; for (i = 0; i < 8; i++) { res[resOffset + 1 + i] = (byte) (0xf & (x >> (4 * i))); } return 9; } }
This encoding works on a 4 byte integer, by truncating initial zeros or ones. If the initial (most significant) half byte is 0x0 or 0xf, the number of such halfbytes starting from the most significant is stored in a halfbyte. This initial count is then followed by the rest of the ints halfbytes, in little-endian order. A count halfbyte c of <p> 0 &lt;= c &lt;= 8 is interpreted as an initial c 0x0 halfbytes 9 &lt;= c &lt;= 15 is interpreted as an initial (c-8) 0xf halfbytes <p> Ex: int c rest 0 =&gt; 0x8 -1 =&gt; 0xf 0xf 23 =&gt; 0x6 0x7 0x1 @param x the int to be encoded @param res the byte array were halfbytes are stored @param resOffset position in res were halfbytes are written @return the number of resulting halfbytes
public static double optimalLinearFixedPoint( double[] data, int dataSize ) { if (dataSize == 0) { return 0; } if (dataSize == 1) { return Math.floor(0xFFFFFFFFl / data[0]); } double maxDouble = Math.max(data[0], data[1]); for (int i = 2; i < dataSize; i++) { double extrapol = data[i - 1] + (data[i - 1] - data[i - 2]); double diff = data[i] - extrapol; maxDouble = Math.max(maxDouble, Math.ceil(Math.abs(diff) + 1)); } return Math.floor(0x7FFFFFFFl / maxDouble); }
///////////////////////////////////////////////////////////////////////////////
public static int encodeLinear( double[] data, int dataSize, byte[] result, double fixedPoint ) { long[] ints = new long[3]; int i, ri, halfByteCount, hbi; byte halfBytes[] = new byte[10]; long extrapol, diff; encodeFixedPoint(fixedPoint, result); if (dataSize == 0) { return 8; } ints[1] = (long) (data[0] * fixedPoint + 0.5); for (i = 0; i < 4; i++) { result[8 + i] = (byte) ((ints[1] >> (i * 8)) & 0xff); } if (dataSize == 1) { return 12; } ints[2] = (long) (data[1] * fixedPoint + 0.5); for (i = 0; i < 4; i++) { result[12 + i] = (byte) ((ints[2] >> (i * 8)) & 0xff); } halfByteCount = 0; ri = 16; for (i = 2; i < dataSize; i++) { ints[0] = ints[1]; ints[1] = ints[2]; ints[2] = (long) (data[i] * fixedPoint + 0.5); extrapol = ints[1] + (ints[1] - ints[0]); diff = ints[2] - extrapol; halfByteCount += encodeInt(diff, halfBytes, halfByteCount); for (hbi = 1; hbi < halfByteCount; hbi += 2) { result[ri++] = (byte) ((halfBytes[hbi - 1] << 4) | (halfBytes[hbi] & 0xf)); } if (halfByteCount % 2 != 0) { halfBytes[0] = halfBytes[halfByteCount - 1]; halfByteCount = 1; } else { halfByteCount = 0; } } if (halfByteCount == 1) { result[ri++] = (byte) (halfBytes[0] << 4); } return ri; }
Encodes the doubles in data by first using a - lossy conversion to a 4 byte 5 decimal fixed point repressentation - storing the residuals from a linear prediction after first to values - encoding by encodeInt (see above) <p> The resulting binary is maximally 8 + dataSize * 5 bytes, but much less if the data is reasonably smooth on the first order. <p> This encoding is suitable for typical m/z or retention time binary arrays. On a test set, the encoding was empirically show to be accurate to at least 0.002 ppm. @param data array of doubles to be encoded @param dataSize number of doubles from data to encode @param result array were resulting bytes should be stored @param fixedPoint the scaling factor used for getting the fixed point repr. This is stored in the binary and automatically extracted on decoding. @return the number of encoded bytes
public static int decodeLinear( byte[] data, int dataSize, double[] result ) { int ri = 2; long[] ints = new long[3]; long extrapol; long y; IntDecoder dec = new IntDecoder(data, 16); if (dataSize < 8) { return -1; } double fixedPoint = decodeFixedPoint(data); if (dataSize < 12) { return -1; } ints[1] = 0; for (int i = 0; i < 4; i++) { ints[1] = ints[1] | ((0xFFl & data[8 + i]) << (i * 8)); } result[0] = ints[1] / fixedPoint; if (dataSize == 12) { return 1; } if (dataSize < 16) { return -1; } ints[2] = 0; for (int i = 0; i < 4; i++) { ints[2] = ints[2] | ((0xFFl & data[12 + i]) << (i * 8)); } result[1] = ints[2] / fixedPoint; while (dec.pos < dataSize) { if (dec.pos == (dataSize - 1) && dec.half) { if ((data[dec.pos] & 0xf) != 0x8) { break; } } ints[0] = ints[1]; ints[1] = ints[2]; ints[2] = dec.next(); extrapol = ints[1] + (ints[1] - ints[0]); y = extrapol + ints[2]; result[ri++] = y / fixedPoint; ints[2] = y; } return ri; }
Decodes data encoded by encodeLinear. <p> result vector guaranteed to be shorter or equal to (|data| - 8) * 2 <p> Note that this method may throw a ArrayIndexOutOfBoundsException if it deems the input data to be corrupt, i.e. that the last encoded int does not use the last byte in the data. In addition the last encoded int need to use either the last halfbyte, or the second last followed by a 0x0 halfbyte. @param data array of bytes to be decoded @param dataSize number of bytes from data to decode @param result array were resulting doubles should be stored @return the number of decoded doubles, or -1 if dataSize &lt; 4 or 4 &lt; dataSize &lt; 8
public static int encodePic( double[] data, int dataSize, byte[] result ) { long count; int ri = 0; int hbi; byte halfBytes[] = new byte[10]; int halfByteCount = 0; //printf("Encoding %d doubles\n", (int)dataSize); for (int i = 0; i < dataSize; i++) { count = (long) (data[i] + 0.5); halfByteCount += encodeInt(count, halfBytes, halfByteCount); for (hbi = 1; hbi < halfByteCount; hbi += 2) { result[ri++] = (byte) ((halfBytes[hbi - 1] << 4) | (halfBytes[hbi] & 0xf)); } if (halfByteCount % 2 != 0) { halfBytes[0] = halfBytes[halfByteCount - 1]; halfByteCount = 1; } else { halfByteCount = 0; } } if (halfByteCount == 1) { result[ri++] = (byte) (halfBytes[0] << 4); } return ri; }
Encodes ion counts by simply rounding to the nearest 4 byte integer, and compressing each integer with encodeInt. <p> The handleable range is therefore 0 -&gt; 4294967294. The resulting binary is maximally dataSize * 5 bytes, but much less if the data is close to 0 on average. @param data array of doubles to be encoded @param dataSize number of doubles from data to encode @param result array were resulting bytes should be stored @return the number of encoded bytes
public static int decodePic( byte[] data, int dataSize, double[] result ) { int ri = 0; long count; IntDecoder dec = new IntDecoder(data, 0); while (dec.pos < dataSize) { if (dec.pos == (dataSize - 1) && dec.half) { if ((data[dec.pos] & 0xf) != 0x8) { break; } } count = dec.next(); result[ri++] = count; } return ri; }
Decodes data encoded by encodePic <p> result vector guaranteed to be shorter of equal to |data| * 2 <p> Note that this method may throw a ArrayIndexOutOfBoundsException if it deems the input data to be corrupt, i.e. that the last encoded int does not use the last byte in the data. In addition the last encoded int need to use either the last halfbyte, or the second last followed by a 0x0 halfbyte. @param data array of bytes to be decoded (need memorycont. repr.) @param dataSize number of bytes from data to decode @param result array were resulting doubles should be stored @return the number of decoded doubles
public static double optimalSlofFixedPoint( double[] data, int dataSize ) { if (dataSize == 0) { return 0; } double maxDouble = 1; double x; double fp; for (int i = 0; i < dataSize; i++) { x = Math.log(data[i] + 1); maxDouble = Math.max(maxDouble, x); } fp = Math.floor(0xFFFF / maxDouble); return fp; }
///////////////////////////////////////////////////////////////////////////////
public static int encodeSlof( double[] data, int dataSize, byte[] result, double fixedPoint ) { int x; int ri = 8; encodeFixedPoint(fixedPoint, result); for (int i = 0; i < dataSize; i++) { x = (int) (Math.log(data[i] + 1) * fixedPoint + 0.5); result[ri++] = (byte) (0xff & x); result[ri++] = (byte) (x >> 8); } return ri; }
Encodes ion counts by taking the natural logarithm, and storing a fixed point representation of this. This is calculated as <p> unsigned short fp = log(d+1) * fixedPoint + 0.5 <p> the result vector is exactly |data| * 2 + 8 bytes long @param data array of doubles to be encoded @param dataSize number of doubles from data to encode @param result array were resulting bytes should be stored @param fixedPoint the scaling factor used for getting the fixed point repr. This is stored in the binary and automatically extracted on decoding. @return the number of encoded bytes
public static int decodeSlof( byte[] data, int dataSize, double[] result ) { int x; int ri = 0; if (dataSize < 8) { return -1; } double fixedPoint = decodeFixedPoint(data); if (dataSize % 2 != 0) { return -1; } for (int i = 8; i < dataSize; i += 2) { x = (0xff & data[i]) | ((0xff & data[i + 1]) << 8); result[ri++] = Math.exp(((double) (0xffff & x)) / fixedPoint) - 1; } return ri; }
Decodes data encoded by encodeSlof <p> The result vector will be exactly (|data| - 8) / 2 doubles. returns the number of doubles read, or -1 is there is a problem decoding. @param data array of bytes to be decoded (need memorycont. repr.) @param dataSize number of bytes from data to decode @param result array were resulting doubles should be stored @return the number of decoded doubles
@Override public final void cacheMoveEvaluation(Move<?> move, Evaluation evaluation) { evaluatedMove = move; this.evaluation = evaluation; }
Cache the given evaluation, discarding any previously cached evaluations. @param move move applied to the current solution @param evaluation evaluation of obtained neighbour
@Override public final Evaluation getCachedMoveEvaluation(Move<?> move) { if(evaluatedMove == null || !evaluatedMove.equals(move)){ // cache miss return null; } else { // cache hit return evaluation; } }
Retrieve a cached evaluation, if still available. If the evaluation of any other move has been cached at a later point in time, the value for this move will have been overwritten. @param move move applied to the current solution @return cached evaluation of the obtained neighbour, if available, <code>null</code> if not
@Override public final void cacheMoveValidation(Move<?> move, Validation validation) { validatedMove = move; this.validation = validation; }
Cache validation of the given move, discarding any previously cached value. @param move move applied to the current solution @param validation validation of obtained neighbour
@Override public final Validation getCachedMoveValidation(Move<?> move) { if(validatedMove == null || !validatedMove.equals(move)){ // cache miss return null; } else { // cache hit return validation; } }
Retrieve a cached validation, if still available. If the validation of any other move has been cached at a later point in time, the value for this move will have been overwritten. @param move move applied to the current solution @return cached validation of the obtained neighbour, if available, <code>null</code> if not
@Override public final void clear() { evaluatedMove = null; evaluation = null; validatedMove = null; validation = null; }
Clear all cached values.
@Override protected void updateCurrentSolution(SolutionType solution, Evaluation evaluation, Validation validation){ // call super super.updateCurrentSolution(solution, evaluation, validation); // clear evaluated move cache if(cache != null){ cache.clear(); } }
When updating the current solution in a neighbourhood search, the evaluated move cache is cleared because it is no longer valid for the new current solution. @param solution new current solution @param evaluation evaluation of new current solution @param validation validation of new current solution
protected Evaluation evaluate(Move<? super SolutionType> move){ Evaluation eval = null; // check cache if(cache != null){ eval = cache.getCachedMoveEvaluation(move); } if(eval != null){ // cache hit: return cached value return eval; } else { // cache miss: evaluate and cache eval = getProblem().evaluate(move, getCurrentSolution(), getCurrentSolutionEvaluation()); if(cache != null){ cache.cacheMoveEvaluation(move, eval); } return eval; } }
Evaluates a move to be applied to the current solution. If this move has been evaluated before and the obtained evaluation is still available in the cache, the cached evaluation will be returned. Else, the evaluation will be computed and offered to the cache. @param move move to be applied to the current solution @return evaluation of obtained neighbour, possibly retrieved from the evaluated move cache
protected Validation validate(Move<? super SolutionType> move){ Validation val = null; // check cache if(cache != null){ val = cache.getCachedMoveValidation(move); } if(val != null){ // cache hit: return cached value return val; } else { // cache miss: validate and cache val = getProblem().validate(move, getCurrentSolution(), getCurrentSolutionValidation()); if(cache != null){ cache.cacheMoveValidation(move, val); } return val; } }
Validates a move to be applied to the current solution. If this move has been validated before and the obtained validation is still available in the cache, the cached validation will be returned. Else, the validation will be computed and offered to the cache. @param move move to be applied to the current solution @return validation of obtained neighbour, possibly retrieved from the evaluated move cache
protected boolean isImprovement(Move<? super SolutionType> move){ return move != null && validate(move).passed() && (!getCurrentSolutionValidation().passed() || computeDelta(evaluate(move), getCurrentSolutionEvaluation()) > 0); }
<p> Checks whether applying the given move to the current solution yields a valid improvement. An improvement is made if and only if (1) the given move is not <code>null</code>, (2) the move is valid, and (3) the obtained neighbour has a better evaluation than the current solution or the current solution is invalid. </p> <p> Note that computed values are cached to prevent multiple evaluations or validations of the same move. </p> @param move move to be applied to the current solution @return <code>true</code> if applying this move yields a valid improvement
@SafeVarargs protected final Move<? super SolutionType> getBestMove(Collection<? extends Move<? super SolutionType>> moves, boolean requireImprovement, Predicate<? super Move<? super SolutionType>>... filters){ return this.getBestMove(moves, requireImprovement, false, filters); }
<p> Get the best valid move among a collection of possible moves. The best move is the one yielding the largest delta (see {@link #computeDelta(Evaluation, Evaluation)}) when being applied to the current solution, from all valid moves. </p> <p> If <code>requireImprovement</code> is set to <code>true</code>, only moves that improve the current solution are considered, i.e. moves that yield a positive delta (unless the current solution is invalid, then all valid moves are improvements). Any number of additional filters can be specified, where moves are only admitted if they pass through all filters. Each filter is a predicate that should return <code>true</code> if a given move is to be considered. If any filter returns <code>false</code> for a specific move, this move is discarded. </p> <p> Returns <code>null</code> if no move is found that satisfies all conditions. </p> <p> Note that all computed evaluations and validations are cached. Before returning the chosen move, if any, its evaluation and validation are re-cached to maximize the probability that these values will remain available in the cache for later retrieval. </p> @param moves collection of possible moves @param requireImprovement if set to <code>true</code>, only improving moves are considered @param filters additional move filters @return best valid move, may be <code>null</code>
@SafeVarargs protected final Move<? super SolutionType> getBestMove(Collection<? extends Move<? super SolutionType>> moves, boolean requireImprovement, boolean acceptFirstImprovement, Predicate<? super Move<? super SolutionType>>... filters){ // track the chosen move Move<? super SolutionType> chosenMove = null; // track evaluation, validation and delta of chosen move double chosenMoveDelta = -Double.MAX_VALUE; Evaluation chosenMoveEvaluation = null; Validation chosenMoveValidation = null; // define variables for metadata of current move double curMoveDelta; Evaluation curMoveEvaluation; Validation curMoveValidation; // iterate over all moves Iterator<? extends Move<? super SolutionType>> it = moves.iterator(); while ( it.hasNext() // continue as long as there are more moves && !(acceptFirstImprovement && isImprovement(chosenMove)) // if requested, accept first improvement ){ Move<? super SolutionType> curMove = it.next(); if (Arrays.stream(filters).allMatch(filter -> filter.test(curMove))) { curMoveValidation = validate(curMove); if (curMoveValidation.passed()) { curMoveEvaluation = evaluate(curMove); curMoveDelta = computeDelta(curMoveEvaluation, getCurrentSolutionEvaluation()); if (curMoveDelta > chosenMoveDelta // found better move? && (!requireImprovement || isImprovement(curMove)) // if requested, ensure improvement ) { chosenMove = curMove; chosenMoveDelta = curMoveDelta; chosenMoveEvaluation = curMoveEvaluation; chosenMoveValidation = curMoveValidation; } } } } // re-cache the chosen move, if any if(cache != null && chosenMove != null){ cache.cacheMoveEvaluation(chosenMove, chosenMoveEvaluation); cache.cacheMoveValidation(chosenMove, chosenMoveValidation); } // return the chosen move return chosenMove; }
<p> Get the best valid move among a collection of possible moves. The best move is the one yielding the largest delta (see {@link #computeDelta(Evaluation, Evaluation)}) when being applied to the current solution, from all valid moves. </p> <p> If <code>requireImprovement</code> is set to <code>true</code>, only moves that improve the current solution are considered, i.e. moves that yield a positive delta (unless the current solution is invalid, then all valid moves are improvements). Any number of additional filters can be specified, where moves are only admitted if they pass through all filters. Each filter is a predicate that should return <code>true</code> if a given move is to be considered. If any filter returns <code>false</code> for a specific move, this move is discarded. </p> <p> If <code>acceptFirstImprovement</code> is <code>true</code>, the first encountered admissible move that yields an improvement is returned. If there are no admissible improvements, as usual, the best admissible move is returned, which, in this case, always yields a negative delta. This option is used for first descent strategies, as opposed to steepest descent strategies. </p> <p> Returns <code>null</code> if no move is found that satisfies all conditions. </p> <p> Note that all computed evaluations and validations are cached. Before returning the chosen move, if any, its evaluation and validation are re-cached to maximize the probability that these values will remain available in the cache for later retrieval. </p> @param moves collection of possible moves @param requireImprovement if set to <code>true</code>, only improving moves are considered @param acceptFirstImprovement if set to <code>true</code>, the first improvement is returned, if any @param filters additional move filters @return selected move, may be <code>null</code>
protected boolean accept(Move<? super SolutionType> move){ // validate move (often retrieved from cache) Validation newValidation = validate(move); if(newValidation.passed()){ // evaluate move (often retrieved from cache) Evaluation newEvaluation = evaluate(move); // apply move to current solution (IMPORTANT: after evaluation/validation of the move!) move.apply(getCurrentSolution()); // update current solution and best solution updateCurrentAndBestSolution(getCurrentSolution(), newEvaluation, newValidation); // increase accepted move counter incNumAcceptedMoves(1); // update successful return true; } else { // update cancelled: invalid neighbour return false; } }
Accept the given move by applying it to the current solution. Updates the evaluation and validation of the current solution and checks whether a new best solution has been found. The updates only take place if the applied move yields a valid neighbour, else calling this method does not have any effect and <code>false</code> is returned. <p> After updating the current solution, the evaluated move cache is cleared as this cache is no longer valid for the new current solution. Furthermore, any local search listeners are informed and the number of accepted moves is updated. @param move accepted move to be applied to the current solution @return <code>true</code> if the update has been successfully performed, <code>false</code> if the update was canceled because the obtained neighbour is invalid
@Override public void validate() { int n = 0; try { for (TiffObject o : model.getImageIfds()) { IFD ifd = (IFD) o; IfdTags metadata = ifd.getMetadata(); validateMetadata(metadata); checkImage(ifd, n, metadata); n++; } } catch (Exception ex) { } }
Validates the IFD.
public void validateMetadata(IfdTags metadata) { int prevTagId = 0; try { TiffTags.getTiffTags(); } catch (ReadTagsIOException e) { } for (TagValue ie : metadata.getTags()) { if (!TiffTags.tagMap.containsKey(ie.getId())) { validation.addWarning("Ignoring undefined tag id " + ie.getId(), "", "Metadata"); } else if (!TiffTags.tagTypes.containsKey(ie.getType())) { validation.addWarning("Ignoring unknown tag type " + ie.getType(), "", "Metadata"); } else { Tag t = TiffTags.getTag(ie.getId()); String stype = TiffTags.tagTypes.get(ie.getType()); if (ie.getId() == 320) { // Colormap length check long bps = 0; if (metadata.containsTagId(258)) bps = metadata.get(258).getFirstNumericValue(); long calc = 3 * (long) Math.pow(2, bps); if (calc != ie.getCardinality()) { validation.addError("Invalid cardinality for tag " + TiffTags.getTag(ie.getId()).getName() + "[" + ie.getCardinality() + "]", "Metadata", stype); } } try { // Cardinality check int card = Integer.parseInt(t.getCardinality()); if (card != ie.getCardinality()) validation.addError("Cardinality for tag " + TiffTags.getTag(ie.getId()).getName() + " must be " + card, "Metadata", ie.getCardinality()); } catch (Exception e) { // TODO: Deal with formulas? } } if (ie.getId() < prevTagId) { if (tagOrderTolerance > 0) validation.addWarning("Tags are not in ascending order", "", "Metadata"); else validation.addErrorLoc("Tags are not in ascending order", "Metadata"); } prevTagId = ie.getId(); } }
Validates that the ifd entries have correct types and cardinalities, as they are defined in the JSONs tag configuration files. @param metadata the ifd metadata
public void checkImage(IFD ifd, int n, IfdTags metadata) { CheckCommonFields(ifd, n, metadata); if (!metadata.containsTagId(TiffTags.getTagId("PhotometricInterpretation"))) { validation.addErrorLoc("Missing Photometric Interpretation", "IFD" + n); } else if (metadata.get(TiffTags.getTagId("PhotometricInterpretation")).getValue().size() != 1) { validation.addErrorLoc("Invalid Photometric Interpretation", "IFD" + n); } else { photometric = (int) metadata.get(TiffTags.getTagId("PhotometricInterpretation")).getFirstNumericValue(); switch (photometric) { case 0: case 1: if (!metadata.containsTagId(TiffTags.getTagId("BitsPerSample")) || metadata.get(TiffTags.getTagId("BitsPerSample")).getFirstNumericValue() == 1) { type = ImageType.BILEVEL; CheckBilevelImage(metadata, n); } else { type = ImageType.GRAYSCALE; CheckGrayscaleImage(metadata, n); } break; case 2: type = ImageType.RGB; CheckRGBImage(metadata, n); break; case 3: type = ImageType.PALETTE; CheckPalleteImage(metadata, n); break; case 4: type = ImageType.TRANSPARENCY_MASK; CheckTransparencyMask(metadata, n); break; case 5: type = ImageType.CMYK; CheckCMYK(metadata, n); break; case 6: type = ImageType.YCbCr; CheckYCbCr(metadata, n); break; case 8: case 9: case 10: type = ImageType.CIELab; CheckCIELab(metadata, n); break; default: validation.addWarning("Unknown Photometric Interpretation", "" + photometric, "IFD" + n); break; } } }
Check if the tags that define the image are correct and consistent. @param ifd the ifd @param n the ifd number @param metadata the ifd metadata
private void CheckBilevelImage(IfdTags metadata, int n) { // Compression long comp = metadata.get(TiffTags.getTagId("Compression")).getFirstNumericValue(); // if (comp != 1 && comp != 2 && comp != 32773) if (comp < 1) validation.addError("Invalid Compression", "IFD" + n, comp); }
Check Bilevel Image. @param metadata the metadata @param n the IFD number
private void CheckGrayscaleImage(IfdTags metadata, int n) { // Bits per Sample long bps = metadata.get(TiffTags.getTagId("BitsPerSample")).getFirstNumericValue(); // if (bps != 4 && bps != 8) if (bps < 1) validation.addError("Invalid Bits per Sample", "IFD" + n, bps); // Compression long comp = metadata.get(TiffTags.getTagId("Compression")).getFirstNumericValue(); // if (comp != 1 && comp != 32773) if (comp < 1) validation.addError("Invalid Compression", "IFD" + n, comp); }
Check Grayscale Image. @param metadata the metadata @param n the IFD number
private void CheckPalleteImage(IfdTags metadata, int nifd) { // Color Map if (!metadata.containsTagId(TiffTags.getTagId("ColorMap"))) { validation.addErrorLoc("Missing Color Map", "IFD" + nifd); } else { int n = metadata.get(TiffTags.getTagId("ColorMap")).getCardinality(); if (n != 3 * (int) Math.pow(2, metadata.get(TiffTags.getTagId("BitsPerSample")) .getFirstNumericValue())) validation.addError("Incorrect Color Map Cardinality", "IFD" + nifd, metadata.get(320) .getCardinality()); } // Bits per Sample long bps = metadata.get(TiffTags.getTagId("BitsPerSample")).getFirstNumericValue(); if (bps != 4 && bps != 8) validation.addError("Invalid Bits per Sample", "IFD" + nifd, bps); // Compression long comp = metadata.get(TiffTags.getTagId("Compression")).getFirstNumericValue(); // if (comp != 1 && comp != 32773) if (comp < 1) validation.addError("Invalid Compression", "IFD" + nifd, comp); }
Check Pallete Color Image. @param metadata the metadata @param nifd the IFD number
private void CheckTransparencyMask(IfdTags metadata, int n) { // Samples per pixel if (!metadata.containsTagId(TiffTags.getTagId("SamplesPerPixel"))) { validation.addErrorLoc("Missing Samples Per Pixel", "IFD" + n); } else { long spp = metadata.get(TiffTags.getTagId("SamplesPerPixel")).getFirstNumericValue(); if (spp != 1) { validation.addError("Invalid Samples Per Pixel", "IFD" + n, spp); } } // BitsPerSample if (!metadata.containsTagId(TiffTags.getTagId("BitsPerSample"))) { validation.addErrorLoc("Missing BitsPerSample", "IFD" + n); } else { long bps = metadata.get(TiffTags.getTagId("BitsPerSample")).getFirstNumericValue(); if (bps != 1) { validation.addError("Invalid BitsPerSample", "IFD" + n, bps); } } }
Check transparency mask. @param metadata the metadata @param n the ifd number
private void CheckCMYK(IfdTags metadata, int n) { // Samples per pixel if (!metadata.containsTagId(TiffTags.getTagId("SamplesPerPixel"))) { validation.addErrorLoc("Missing Samples Per Pixel", "IFD" + n); } // BitsPerSample if (!metadata.containsTagId(TiffTags.getTagId("BitsPerSample"))) { validation.addErrorLoc("Missing BitsPerSample", "IFD" + n); } }
Check CMYK. @param metadata the metadata @param n the ifd number
private void CheckYCbCr(IfdTags metadata, int n) { // Samples per pixel if (!metadata.containsTagId(TiffTags.getTagId("SamplesPerPixel"))) { validation.addErrorLoc("Missing Samples Per Pixel", "IFD" + n); } else { long spp = metadata.get(TiffTags.getTagId("SamplesPerPixel")).getFirstNumericValue(); if (spp != 3) { validation.addError("Invalid Samples Per Pixel", "IFD" + n, spp); } } // BitsPerSample if (!metadata.containsTagId(TiffTags.getTagId("BitsPerSample"))) { validation.addErrorLoc("Missing BitsPerSample", "IFD" + n); } else { for (abstractTiffType vi : metadata.get(TiffTags.getTagId("BitsPerSample")).getValue()) { if (vi.toInt() != 8) { validation.addError("Invalid BitsPerSample", "IFD" + n, vi.toInt()); break; } } } // Compression // long comp = metadata.get(TiffTags.getTagId("Compression")).getFirstNumericValue(); // if (comp != 1 && comp != 5 && comp != 6) // validation.addError("Invalid Compression", comp); // if (!metadata.containsTagId(TiffTags.getTagId("ReferenceBlackWhite"))) // validation.addError("Missing ReferenceBlackWhite"); // if (!metadata.containsTagId(TiffTags.getTagId("YCbCrCoefficients"))) // validation.addError("Missing YCbCr Coefficients"); // if (!metadata.containsTagId(TiffTags.getTagId("YCbCrSubSampling"))) // validation.addError("Missing YCbCr SubSampling"); // if (!metadata.containsTagId(TiffTags.getTagId("YCbCrPositioning"))) // validation.addError("Missing YCbCr Positioning"); }
Check YCbCr. @param metadata the metadata @param n the IFD number
private void CheckCIELab(IfdTags metadata, int n) { // BitsPerSample if (!metadata.containsTagId(TiffTags.getTagId("BitsPerSample"))) { validation.addErrorLoc("Missing BitsPerSample", "IFD" + n); } else { for (abstractTiffType vi : metadata.get(TiffTags.getTagId("BitsPerSample")).getValue()) { if (vi.toInt() != 8) { validation.addError("Invalid BitsPerSample", "IFD" + n, vi.toInt()); break; } } } }
Check CIELab. @param metadata the metadata @param n the IFD number
private void CheckRGBImage(IfdTags metadata, int n) { // Samples per Pixel long samples = metadata.get(TiffTags.getTagId("SamplesPerPixel")).getFirstNumericValue(); if (samples < 3) validation.addError("Invalid Samples per Pixel", "IFD" + n, samples); // Compression long comp = metadata.get(TiffTags.getTagId("Compression")).getFirstNumericValue(); // if (comp != 1 && comp != 32773) if (comp < 1) validation.addError("Invalid Compression", "IFD" + n, comp); }
Check RGB Image. @param metadata the metadata @param n the IFD number
private void CheckCommonFields(IFD ifd, int n, IfdTags metadata) { int id; // Width tag is mandatory id = TiffTags.getTagId("ImageWidth"); if (!metadata.containsTagId(id)) validation.addError("Missing required field", "IFD" + n, TiffTags.getTag(id).getName()); else { long val = metadata.get(id).getFirstNumericValue(); if (val <= 0) validation.addError("Invalid value for field " + TiffTags.getTag(id).getName(), "IFD" + n, val); } // Height tag is mandatory id = TiffTags.getTagId("ImageLength"); if (!metadata.containsTagId(id)) validation.addError("Missing required field", "IFD" + n, TiffTags.getTag(id).getName()); else { long val = metadata.get(id).getFirstNumericValue(); if (val <= 0) validation.addError("Invalid value for field " + TiffTags.getTag(id).getName(), "IFD" + n, val); } // Check Resolution Unit id = TiffTags.getTagId("ResolutionUnit"); if (!metadata.containsTagId(id)) { // validation.addError("Missing required field", TiffTags.getTag(id).getName()); } else { long val = metadata.get(id).getFirstNumericValue(); if (val != 1 && val != 2 && val != 3) validation.addError("Invalid value for field " + TiffTags.getTag(id).getName(), "IFD" + n, val); } // Check XResolution id = TiffTags.getTagId("XResolution"); if (!metadata.containsTagId(id)) { // validation.addError("Missing required field", TiffTags.getTag(id).name); } else { float val = ((Rational) metadata.get(id).getValue().get(0)).getFloatValue(); if (val <= 0f) validation.addError("Invalid value for field " + TiffTags.getTag(id).getName(), "IFD" + n, val); } // Check YResolution id = TiffTags.getTagId("YResolution"); if (!metadata.containsTagId(id)) { // validation.addError("Missing required field", TiffTags.getTag(id).name); } else { float val = ((Rational) metadata.get(id).getValue().get(0)).getFloatValue(); if (val <= 0f) validation.addError("Invalid value for field " + TiffTags.getTag(id).getName(), "IFD" + n, val); } // Check Planar Configuration id = TiffTags.getTagId("PlanarConfiguration"); if (!metadata.containsTagId(id)) { // validation.addError("Missing required field", TiffTags.getTag(id).name); } else { long val = metadata.get(id).getFirstNumericValue(); if (val != 1 && val != 2) validation.addError("Invalid value for field " + TiffTags.getTag(id).getName(), "IFD" + n, val); } // Check Orientation id = TiffTags.getTagId("Orientation"); if (!metadata.containsTagId(id)) { // validation.addError("Missing required field", TiffTags.getTag(id).name); } else { long val = metadata.get(id).getFirstNumericValue(); if (val <= 0 || val > 8) validation.addError("Invalid value for field " + TiffTags.getTag(id).getName(), "IFD" + n, val); } // Check whether the image is stored in tiles or strips strips = ifd.hasStrips(); tiles = ifd.hasTiles(); if (!strips && !tiles) { validation.addErrorLoc("Missing image organization tags", "IFD" + n); validation.setFatalError(true, "Missing image organization tags"); } else if (strips && tiles) validation.addErrorLoc("Image in both strips and tiles", "IFD" + n); else if (strips) { CheckStrips(metadata, n); } else if (tiles) { CheckTiles(ifd, metadata, n); } // Check pixel samples bits if (metadata.containsTagId(TiffTags.getTagId("BitsPerSample")) && metadata.containsTagId(TiffTags.getTagId("SampesPerPixel"))) { long spp = metadata.get(TiffTags.getTagId("SamplesPerPixel")).getFirstNumericValue(); int bps = metadata.get(TiffTags.getTagId("BitsPerSample")).getValue().size(); if (spp != bps) { validation .addErrorLoc("Samples per Pixel and Bits per Sample count do not match", "IFD" + n); if (bps == 1) { // TODO: Tolerate and proceed as if the BitsPerSample tag had a count equal to the // SamplesPerPixel tag value, and with all values equal to the single value actually given } } if (metadata.containsTagId(TiffTags.getTagId("ExtraSamples"))) { int ext = metadata.get(TiffTags.getTagId("ExtraSamples")).getValue().size(); if (ext + 3 != bps) { validation.addError("Incorrect Extra Samples Count", "IFD" + n, ext); } else if (ext > 0 && bps <= 3) { validation.addError("Unnecessary Extra Samples", "IFD" + n, ext); } } if (bps > 1) { TagValue lbps = metadata.get(TiffTags.getTagId("BitsPerSample")); if (lbps == null || lbps.getValue() == null) { validation.addErrorLoc("Invalid Bits per Sample", "IFD" + n); } else { boolean distinct_bps_samples = false; for (int i = 1; i < lbps.getCardinality(); i++) { if (lbps.getValue().get(i).toInt() != lbps.getValue().get(i - 1).toInt()) distinct_bps_samples = true; } if (distinct_bps_samples) validation.addErrorLoc("Distinct Bits per Sample values", "IFD" + n); } } } }
Check common fields. @param ifd the ifd @param n the ifd number @param metadata the ifd metadata
private void CheckStrips(IfdTags metadata, int n) { long offset; int id; // Strip offsets id = TiffTags.getTagId("StripOffsets"); offset = metadata.get(id).getFirstNumericValue(); int nso = metadata.get(id).getCardinality(); if (offset <= 0) validation.addError("Invalid value for field " + TiffTags.getTag(id).getName(), "IFD" + n, offset); // Strip Byte Counts id = TiffTags.getTagId("StripBYTECount"); offset = metadata.get(id).getFirstNumericValue(); int nsc = metadata.get(id).getCardinality(); if (offset <= 0) validation.addError("Invalid value for field " + TiffTags.getTag(id).getName(), "IFD" + n, offset); if (nso != nsc) { validation.addErrorLoc("Inconsistent strip lengths", "IFD" + n); } int pixelSize = 0; for (int i = 0; i < metadata.get("BitsPerSample").getCardinality(); i++) { pixelSize += metadata.get("BitsPerSample").getValue().get(i).toInt(); } if (metadata.get("Compression").getFirstNumericValue() == 1 && pixelSize >= 8) { int calculatedImageLength = 0; for (int i = 0; i < nsc; i++) { calculatedImageLength += metadata.get(id).getValue().get(i).toInt(); } if (calculatedImageLength != metadata.get("ImageLength").getFirstNumericValue() * metadata.get("ImageWidth").getFirstNumericValue() * pixelSize / 8) { // validation.toString(); validation.addErrorLoc("Calculated and declared image size do not match", "IFD" + n); } } // Rows per Strip id = TiffTags.getTagId("RowsPerStrip"); if (!metadata.containsTagId(id)) { if (rowsPerStripTolerance > 0) validation.addWarning("Missing required field", TiffTags.getTag(id).getName(), "IFD" + n); else validation.addError("Missing required field", "IFD" + n, TiffTags.getTag(id).getName()); } else { offset = metadata.get(id).getFirstNumericValue(); if (offset <= 0) validation.addError("Invalid value for field " + TiffTags.getTag(id).getName(), "IFD" + n, offset); } }
Check that the strips containing the image are well-formed. @param metadata the metadata @param n the IFD number
private void CheckTiles(IFD ifd, IfdTags metadata, int n) { long offset; int id; // Check Tile Offsets id = TiffTags.getTagId("TileOffsets"); offset = metadata.get(id).getFirstNumericValue(); int no = metadata.get(id).getCardinality(); if (offset <= 0) validation.addError("Invalid value for field " + TiffTags.getTag(id).getName(), "IFD" + n, offset); // Check Tile Byte Counts id = TiffTags.getTagId("TileBYTECounts"); offset = metadata.get(id).getFirstNumericValue(); int nc = metadata.get(id).getCardinality(); if (offset <= 0) validation.addError("Invalid value for field " + TiffTags.getTag(id).getName(), "IFD" + n, offset); if (no != nc) { validation.addErrorLoc("Inconsistent tile lengths", "IFD" + n); } // Check Tile Width long tileWidth = 0; id = TiffTags.getTagId("TileWidth"); if (!metadata.containsTagId(id)) validation.addErrorLoc("Missing required field for tiles " + TiffTags.getTag(id).getName(), "IFD" + n); else { tileWidth = metadata.get(id).getFirstNumericValue(); if (tileWidth <= 0) validation.addError("Invalid value for field " + TiffTags.getTag(id).getName(), "IFD" + n, tileWidth); } // Check Tile Length id = TiffTags.getTagId("TileLength"); long tileLength = 0; if (!metadata.containsTagId(id)) validation.addErrorLoc("Missing required field for tiles " + TiffTags.getTag(id).getName(), "IFD" + n); else { tileLength = metadata.get(id).getFirstNumericValue(); if (tileLength <= 0) validation.addError("Invalid value for field " + TiffTags.getTag(id).getName(), "IFD" + n, tileLength); } long tilesPerImage = ((metadata.get(TiffTags.getTagId("ImageWidth")).getFirstNumericValue() + tileWidth - 1) / tileWidth) * ((metadata.get(TiffTags.getTagId("ImageLength")).getFirstNumericValue() + tileLength - 1) / tileLength); // Check Plannar Configuration id = TiffTags.getTagId("PlanarConfiguration"); int idspp = TiffTags.getTagId("SamplesPerPixel"); if (metadata.containsTagId(id) && metadata.containsTagId(idspp)) { long planar = metadata.get(id).getFirstNumericValue(); long spp = metadata.get(idspp).getFirstNumericValue(); if (planar == 2) { long spp_tpi = spp * tilesPerImage; if (ifd.getImageTiles().getTiles().size() < spp_tpi) { validation.addErrorLoc("Insufficient tiles", "IFD" + n); } } } }
Check that the tiles containing the image are well-formed. @param ifd the ifd @param metadata the metadata @param n the ifd number
public String loadPreference(String key, String defaultValue) { String value = getSharedPreferences().getString(key, defaultValue); logLoad(key, value); return value; }
Retrieve a string value from the preferences. @param key The name of the preference to retrieve @param defaultValue Value to return if this preference does not exist @return the preference value if it exists, or defaultValue.
public Boolean loadPreferenceAsBoolean(String key, Boolean defaultValue) { Boolean value = defaultValue; if (hasPreference(key)) { value = getSharedPreferences().getBoolean(key, false); } logLoad(key, value); return value; }
Retrieve a boolean value from the preferences. @param key The name of the preference to retrieve @param defaultValue Value to return if this preference does not exist @return the preference value if it exists, or defaultValue.
public Long loadPreferenceAsLong(String key, Long defaultValue) { Long value = defaultValue; if (hasPreference(key)) { value = getSharedPreferences().getLong(key, 0L); } logLoad(key, value); return value; }
Retrieve a long value from the preferences. @param key The name of the preference to retrieve @param defaultValue Value to return if this preference does not exist @return the preference value if it exists, or defaultValue.
public Integer loadPreferenceAsInteger(String key, Integer defaultValue) { Integer value = defaultValue; if (hasPreference(key)) { value = getSharedPreferences().getInt(key, 0); } logLoad(key, value); return value; }
Retrieve an Integer value from the preferences. @param key The name of the preference to retrieve @param defaultValue Value to return if this preference does not exist @return the preference value if it exists, or defaultValue.
public Float loadPreferenceAsFloat(String key, Float defaultValue) { Float value = defaultValue; if (hasPreference(key)) { value = getSharedPreferences().getFloat(key, 0); } logLoad(key, value); return value; }
Retrieve a Float value from the preferences. @param key The name of the preference to retrieve @param defaultValue Value to return if this preference does not exist @return the preference value if it exists, or defaultValue.
protected int mapIdRefToInternalScanNum(CharArray id) throws FileParsingException { String idStr = id.toString(); MZMLIndexElement byId = index.getById(idStr); if (byId == null) { String msg = String.format("Could not find a mapping from spectrum id" + " ref to an internal scan number for" + "\n\t file: %s" + "\n\t spectrum index of the spectrum in which the error occured: #%d" + "\n\t idRef searched for: %s", source.getPath(), vars.spectrumIndex, idStr); throw new FileParsingException(msg); } return byId.getNumber(); }
Given a scan ID goes to the index and tries to find a mapping. @throws umich.ms.fileio.exceptions.FileParsingException in case the mapping can't be done
protected int mapRawNumToInternalScanNum(int spectrumIndex) throws FileParsingException { MZMLIndexElement byRawNum = index.getByRawNum(spectrumIndex); if (byRawNum == null) { String msg = String.format("Could not find a mapping from spectrum index" + " ref to an internal scan number for" + "\n\t file: %s" + "\n\t spectrum index searched for: #%d" + "\n\t spectrum index of the spectrum in which the error occured: #%d" , source.getPath(), spectrumIndex, vars.spectrumIndex); throw new FileParsingException(msg); } return byRawNum.getNumber(); }
Given a scan internal number (spectrum index) goes to the index and tries to find a mapping.
public boolean refactorVariable(SymbolDefinition n, final String newName) { Map<String, SymbolDefinition> scope = n.getVariableDefinitions(); if (!scope.containsKey(newName)) { if (n.getUsages() != null) { List<SymbolReference> usages = new LinkedList<SymbolReference>(n.getUsages()); VoidVisitorAdapter<?> visitor = new VoidVisitorAdapter<Object>() { @Override public void visit(NameExpr nexpr, Object ctx) { Map<String, SymbolDefinition> innerScope = nexpr.getVariableDefinitions(); if (innerScope.containsKey(newName)) { nexpr.getParentNode().replaceChildNode(nexpr, new FieldAccessExpr(new ThisExpr(), newName)); } else { nexpr.getParentNode().replaceChildNode(nexpr, new NameExpr(newName)); } } @Override public void visit(FieldAccessExpr nexpr, Object ctx) { nexpr.getParentNode().replaceChildNode(nexpr, new FieldAccessExpr(nexpr.getScope(), nexpr.getTypeArgs(), newName)); } }; for (SymbolReference usage : usages) { Node aux = (Node) usage; aux.accept(visitor, null); } } return true; } return false; }
Generic method to rename a SymbolDefinition variable/parameter. @param n variable to rename. @param newName new name to set. @return if the rename procedure has been applied successfully.
public static List<Long> locate(List<byte[]> targets, List<POSITION> locations, InputStream is, long maxOffset) throws IOException { if (targets.isEmpty()) { throw new IllegalArgumentException("Targets argument can't be empty"); } if (locations.size() != targets.size()) { throw new IllegalArgumentException("Targets and Locations arguments must be of equal length"); } for (byte[] target : targets) { if (target.length == 0) { throw new IllegalArgumentException("Input Targets must be non-zero length"); } } if (maxOffset <= 0) { maxOffset = Long.MAX_VALUE; } long posSource = -1; int iRead; byte bRead; List<Long> result = new ArrayList<>(targets.size()); for (int i = 0; i < targets.size(); i++) { byte[] target = targets.get(i); int posTarget = 0; byte bTarget = target[posTarget]; while ((iRead = is.read()) >= 0) { posSource++; if (posSource > maxOffset) { // reached max allowed offset, returning what we have so far return result; } bRead = (byte) iRead; if (bRead != bTarget) { if (posTarget > 0) { posTarget = 0; bTarget = target[posTarget]; } continue; } else { posTarget++; if (posTarget == target.length) { // the end of target has been reached, add it to result switch (locations.get(i)) { case START: result.add(posSource - target.length + 1); break; case END: result.add(posSource); break; default: throw new IllegalArgumentException("Unsupported ELEMENT_LOCATION"); } // move to next target break; // break out of while(is.read()) } bTarget = target[posTarget]; continue; } } if (iRead < 0 && result.size() != targets.size()) { // reached EOF without finding all the targets in the input stream return Collections.emptyList(); } } return result; }
Locates specific sequences of bytes in the input stream. @param targets Sequences of bytes to be searched for. The returned list will be of the same size if all are found. @param is The stream to search in, it won't be buffered, the stream will be left open and at the position of the last occurrence of the target. @param maxOffset How far down the stream to search? if maxOffset <= 0, then search indefinitely up to Long.MAX_VALUE bytes. @return Empty list if the exact sub-sequence was not found in the stream. An incomplete list of matches (possibly empty) if the 'maxOffset' has been reached. Otherwise byte offsets in the input stream will be returned. @throws IOException In case IO errors occur.
@Deprecated public static OffsetLength locate(String tag1, TAG_TYPE tag1Type, POSITION tag1Loc, String tag2, TAG_TYPE tag2Type, POSITION tag2Loc, long maxOffset, InputStream is) throws FileParsingException { if (tag1Type == TAG_TYPE.SELF_CLOSING || tag2Type == TAG_TYPE.SELF_CLOSING) { throw new IllegalArgumentException("Self-closing tags are not supported."); } LogHelper.setJavolutionLogLevelFatal(); long offsetLo = -1; long offsetHi = -1; try (BufferedInputStream bis = new BufferedInputStream(is)) { final XMLStreamReaderImpl xsr = new XMLStreamReaderImpl(); xsr.setInput(bis, StandardCharsets.UTF_8.name()); final XMLStreamReaderImpl.LocationImpl loc = xsr.getLocation(); int eventType; CharArray localName; // we're looking for the first <msRun> and <scan> tag occurrence. do { eventType = xsr.next(); switch (eventType) { case XMLStreamConstants.START_ELEMENT: localName = xsr.getLocalName(); switch (tag1Type) { case OPENING: if (localName.equals(tag1)) { offsetLo = calcOffset(loc, tag1Loc); } break; default: break; } switch (tag2Type) { case OPENING: if (localName.equals(tag2)) { offsetHi = calcOffset(loc, tag2Loc); } break; default: break; } break; case XMLStreamConstants.END_ELEMENT: localName = xsr.getLocalName(); switch (tag1Type) { case OPENING: if (localName.equals(tag1)) { offsetLo = calcOffset(loc, tag1Loc); } break; default: break; } switch (tag2Type) { case OPENING: if (localName.equals(tag2)) { offsetHi = calcOffset(loc, tag2Loc); } break; default: break; } break; case XMLStreamConstants.CHARACTERS: break; } if (loc.getTotalCharsRead() > maxOffset) { throw new FileParsingException(String.format( "Could not locate tags '%s' and '%s' within first %d characters", tag1, tag2, maxOffset)); } } while (eventType != XMLStreamConstants.END_DOCUMENT && (offsetLo == -1 || offsetHi == -1)); } catch (XMLStreamException | IOException e) { throw new RunHeaderParsingException("Error when parsing MS run header info", e); } if (offsetLo == -1 || offsetHi == -1) { throw new FileParsingException(String.format( "Could not locate tags '%s' and '%s'.", tag1, tag2)); } return new OffsetLength(offsetLo, (int) (offsetHi - offsetLo)); }
Reads the run header from the file, locating <b>character (!)</b> positions of {@code <firstTag>} and first {@code <lastTag>} tags. <b></b> @param tag1 The first tag after which run header starts @param tag1Type If the opening tag is a closing tag or an opening tag @param tag1Loc If the location of the beginning of the tag must be taken @param tag2 The tag up to which we need to parse. @param tag2Type If the last tag is an opening tag or a closing tag @param tag2Loc If the location of the beginning or end of the tag entry should be taken @param maxOffset How far in the file should we look? Values <= 0 are treated as infinity. @param is A stream to read from. It will be buffered internally @return The CHARACTER offset and length in CHARACTERS in the file. @throws umich.ms.fileio.exceptions.FileParsingException if the tags could not be found or for any IOException @deprecated Try using {@link #locate(List, List, InputStream, long)}. This method uses an XML parser internally, so it's 1) slower than the other method 2) it's reporting of locations is in Characters, not Bytes!
public static boolean advanceReaderToNext(XMLStreamReader xsr, String tag) throws javax.xml.stream.XMLStreamException { if (tag == null) { throw new IllegalArgumentException("Tag name can't be null"); } if (xsr == null) { throw new IllegalArgumentException("Stream Reader can't be null"); } do { if (xsr.next() == javax.xml.stream.XMLStreamConstants.END_DOCUMENT) { return false; } } while (!(xsr.isStartElement() && xsr.getLocalName().equals(tag))); return true; }
Advances the Stream Reader to the next occurrence of a user-specified tag. @param xsr The reader to advance. @param tag The tag to advance to. No brackets, just the name. @return True if advanced successfully, false when the end of document was successfully reached. @throws javax.xml.stream.XMLStreamException In all cases other than described by 'return'.
public static boolean isIntentAvailable(Intent intent) { List<ResolveInfo> list = AbstractApplication.get().getPackageManager().queryIntentActivities(intent, PackageManager.MATCH_DEFAULT_ONLY); return !list.isEmpty(); }
Indicates whether the specified intent can be used. This method queries the package manager for installed packages that can respond to the specified intent. If no suitable package is found, this method returns false. @param intent The Intent to check for availability. @return True if the specified Intent can be sent and responded to, false otherwise.
protected Set<Integer> getAddCandidates(SubsetSolution currentSolution){ // get set of candidate IDs for addition Set<Integer> addCandidates = currentSolution.getUnselectedIDs(); // remove fixed IDs, if any, from candidates if(fixedIDs != null && !fixedIDs.isEmpty()){ addCandidates = new LinkedHashSet<>(addCandidates); addCandidates.removeAll(fixedIDs); } return addCandidates; }
Infer the set of IDs that may be added to the selection in the given subset solution. If no IDs were fixed at construction this method simply returns the set of currently unselected IDs obtained by calling {@link SubsetSolution#getUnselectedIDs()}. Else, it returns a copy of this (immutable) set from which all fixed IDs have been removed. @param currentSolution current subset solution @return set of IDs that may be added
protected Set<Integer> getRemoveCandidates(SubsetSolution currentSolution){ // get set of candidate IDs for removal Set<Integer> removeCandidates = currentSolution.getSelectedIDs(); // remove fixed IDs, if any, from candidates if(fixedIDs != null && !fixedIDs.isEmpty()){ removeCandidates = new LinkedHashSet<>(removeCandidates); removeCandidates.removeAll(fixedIDs); } return removeCandidates; }
Infer the set of IDs that may be removed from the selection in the given subset solution. If no IDs were fixed at construction this method simply returns the set of currently selected IDs obtained by calling {@link SubsetSolution#getSelectedIDs()}. Else, it returns a copy of this (immutable) set from which all fixed IDs have been removed. @param currentSolution current subset solution @return set of IDs that may be removed
public static String getString(int resId, Object... args) { return AbstractApplication.get().getString(resId, args); }
Returns a formatted string, using the localized resource as format and the supplied arguments @param resId The resource id to obtain the format @param args arguments to replace format specifiers @return The localized and formatted string
@Override public Result<MZXMLIndexElement> buildIndex(final IndexBuilder.Info info) throws Exception { Result<MZXMLIndexElement> result = new IndexBuilder.Result<>(info); int numOpeningScanTagsFound = 0; vars.reset(); XMLStreamReaderImpl reader = (pool == null) ? new XMLStreamReaderImpl() : pool.borrowObject(); try { reader.setInput(info.is, StandardCharsets.UTF_8.name()); LogHelper.setJavolutionLogLevelFatal(); final XMLStreamReaderImpl.LocationImpl location = reader.getLocation(); int eventType = XMLStreamConstants.END_DOCUMENT; CharArray localName, attr; Attributes attrs; do { // Read the next XML element try { eventType = reader.next(); } catch (XMLStreamException e) { if (e instanceof XMLUnexpectedEndTagException) { // it's ok to have unexpected closing tags eventType = reader.getEventType(); } else if (e instanceof XMLUnexpectedEndOfDocumentException) { // as we're reading arbitrary chunks of file, we will almost always finish parsing by hitting this condition break; } else { throw new FileParsingException(e); } } // Process the read event switch (eventType) { case XMLStreamConstants.START_ELEMENT: localName = reader.getLocalName(); attrs = reader.getAttributes(); if (localName.contentEquals(MZXMLMultiSpectraParser.TAG.SCAN.name)) { if (vars.offsetLo != null) { // this means we've encountered nested Spectrum tags long lastStartTagPos = location.getLastStartTagPos(); vars.length = (int) (vars.offsetLo - lastStartTagPos); addAndFlush(result, info.offsetInFile); } //tagScanStart(reader); vars.offsetLo = location.getLastStartTagPos(); try { vars.scanNumRaw = attrs.getValue(MZXMLMultiSpectraParser.ATTR.SCAN_NUM.name) .toInt(); } catch (NumberFormatException e) { throw new FileParsingException("Malformed scan number while building index", e); } } break; case XMLStreamConstants.CHARACTERS: break; case XMLStreamConstants.END_ELEMENT: localName = reader.getLocalName(); if (localName.contentEquals(MZXMLMultiSpectraParser.TAG.SCAN.name)) { vars.offsetHi = location.getTotalCharsRead(); addAndFlush(result, info.offsetInFile); } break; } } while (eventType != XMLStreamConstants.END_DOCUMENT); } finally { addAndFlush(result, info.offsetInFile); // we need to return the reaer to the pool, if we borrowed it from there if (pool != null && reader != null) { pool.returnObject(reader); } } return result; }
For use with Executors, consider using instead of calling this method directly. @param info info about offsets in the file and in the currently read buffer
public void setObjective(Objective<? super SolutionType, ? super DataType> objective) { // check not null if(objective == null){ throw new NullPointerException("Error while setting objective: null is not allowed."); } this.objective = objective; }
Set the objective function. Any objective designed for the solution and data types of the problem, or more general types, is accepted. The objective can not be <code>null</code>. @param objective objective function @throws NullPointerException if <code>objective</code> is <code>null</code>
public void setRandomSolutionGenerator(RandomSolutionGenerator<? extends SolutionType, ? super DataType> randomSolutionGenerator){ // check not null if(randomSolutionGenerator == null){ throw new NullPointerException("Error while setting random solution generator: null is not allowed"); } this.randomSolutionGenerator = randomSolutionGenerator; }
Set random solution generator. It is allowed for the generator to produce subtypes of the problem's solution type, requiring any supertype of the problem's data type. The generator can not be <code>null</code>. @param randomSolutionGenerator random solution generator @throws NullPointerException if <code>randomSolutionGenerator</code> is <code>null</code>
@Override public Validation validate(SolutionType solution){ if(mandatoryConstraints.isEmpty()){ // CASE 1: no mandatory constraints return SimpleValidation.PASSED; } else if (mandatoryConstraints.size() == 1){ // CASE 2: single mandatory constraint return mandatoryConstraints.get(0).validate(solution, data); } else { // CASE 3 (default): aggregate multiple constraint validations UnanimousValidation val = new UnanimousValidation(); mandatoryConstraints.stream() .allMatch(c -> { // validate solution against constraint c Validation cval = c.validate(solution, data); // add to unanimous validation val.addValidation(c, cval); // continue until one constraint is not satisfied return cval.passed(); }); return val; } }
<p> Validate a solution by checking all mandatory constraints. The solution will only pass validation if all mandatory constraints are satisfied. </p> <p> In case there are no mandatory constraints, this method always returns {@link SimpleValidation#PASSED}. If a single mandatory constraint has been specified, the corresponding validation is returned. In case of two or more constraints, an aggregated validation is constructed that only passes if all constraints are satisfied. Short-circuiting is applied: as soon as one violated constraint is found, the remaining constraints are not checked. </p> @param solution solution to validate @return aggregated validation
@Override public Validation validate(Move<? super SolutionType> move, SolutionType curSolution, Validation curValidation){ if(mandatoryConstraints.isEmpty()){ // CASE 1: no mandatory constraints return SimpleValidation.PASSED; } else if (mandatoryConstraints.size() == 1){ // CASE 2: single mandatory constraint return mandatoryConstraints.get(0).validate(move, curSolution, curValidation, data); } else { // CASE 3 (default): aggregate multiple constraint validations UnanimousValidation curUnanimousVal = (UnanimousValidation) curValidation; UnanimousValidation newUnanimousVal = new UnanimousValidation(); mandatoryConstraints.stream() .allMatch(c -> { // retrieve original validation produced by constraint c Validation curval = curUnanimousVal.getValidation(c); if(curval == null){ // current validation unknown: perform full validation // (can happen due to short-circuiting behaviour) curval = c.validate(curSolution, data); } // validate move against constraint c Validation newval = c.validate(move, curSolution, curval, data); // add to unanimous validation newUnanimousVal.addValidation(c, newval); // continue until one constraint is not satisfied return newval.passed(); }); return newUnanimousVal; } }
<p> Validate a move by checking all mandatory constraints (delta validation). The move will only pass validation if all mandatory constraints are satisfied. </p> <p> In case there are no mandatory constraints, this method always returns {@link SimpleValidation#PASSED}. If a single mandatory constraint has been specified, the corresponding delta validation is returned. In case of two or more constraints, an aggregated validation is constructed that only passes if all constraints are satisfied. Short-circuiting is applied: as soon as one violated constraint is found, the remaining constraints are not checked. </p> @param move move to validate @param curSolution current solution of a local search @param curValidation validation of current solution @throws IncompatibleDeltaValidationException if the provided delta validation of any mandatory constraint is not compatible with the received move type @return aggregated delta validation
public Collection<Constraint<? super SolutionType, ? super DataType>> getViolatedConstraints(SolutionType solution){ // return set with all violated constraints return Stream.concat(mandatoryConstraints.stream(), penalizingConstraints.stream()) .filter(c -> !c.validate(solution, data).passed()) .collect(Collectors.toSet()); }
Returns a collection of all violated constraints (both mandatory and penalizing). @param solution solution for which all violated constraints are determined @return collection of all violated constraints (mandatory and penalizing); possibly empty
@Override public Evaluation evaluate(SolutionType solution) { if(penalizingConstraints.isEmpty()){ // CASE 1: no penalizing constraints return objective.evaluate(solution, data); } else { // CASE 2 (default): aggregate evaluation and penalties Evaluation eval = objective.evaluate(solution, data); // initialize penalized evaluation object PenalizedEvaluation penEval = new PenalizedEvaluation(eval, isMinimizing()); // add penalties penalizingConstraints.forEach(pc -> penEval.addPenalizingValidation(pc, pc.validate(solution, data))); // return aggregated evaluation return penEval; } }
Evaluates a solution by taking into account both the evaluation calculated by the objective function and the penalizing constraints (if any). Penalties are assigned for any violated penalizing constraint, which are subtracted from the evaluation in case of maximization, and added to it in case of minimization. <p> If there are no penalizing constraints, this method returns the evaluation object obtained from applying the objective function to the given solution. If one or more penalizing constraints have been specified, a penalized evaluation is constructed taking into account both the main objective function evaluation and assigned penalties. @param solution solution to be evaluated @return aggregated evaluation taking into account both the objective function and penalizing constraints
@Override public Evaluation evaluate(Move<? super SolutionType> move, SolutionType curSolution, Evaluation curEvaluation){ if(penalizingConstraints.isEmpty()){ // CASE 1: no penalizing constraints -- directly apply delta return objective.evaluate(move, curSolution, curEvaluation, data); } else { // CASE 2 (default): penalizing constraint(s) -- extract components and apply deltas PenalizedEvaluation curPenalizedEval = (PenalizedEvaluation) curEvaluation; // retrieve current evaluation without penalties Evaluation curEval = curPenalizedEval.getEvaluation(); // perform delta evaluation Evaluation newEval = objective.evaluate(move, curSolution, curEval, data); // initialize new penalized evaluation PenalizedEvaluation newPenalizedEval = new PenalizedEvaluation(newEval, isMinimizing()); // perform delta validation for each penalizing constraint penalizingConstraints.forEach(pc -> { // retrieve current penalizing validation PenalizingValidation curVal = curPenalizedEval.getPenalizingValidation(pc); // delta validation PenalizingValidation newVal = pc.validate(move, curSolution, curVal, data); // add penalty newPenalizedEval.addPenalizingValidation(pc, newVal); }); return newPenalizedEval; } }
Evaluate a move (delta evaluation) by taking into account both the evaluation of the modified solution and the penalizing constraints (if any). Penalties are assigned for any violated penalizing constraint, which are subtracted from the evaluation in case of maximization, and added to it in case of minimization. <p> If there are no penalizing constraints, this method returns the delta evaluation obtained from the objective function. If one or more penalizing constraints have been specified, a penalized delta evaluation is constructed taking into account both the main objective function evaluation and assigned penalties. @param move move to evaluate @param curSolution current solution @param curEvaluation current evaluation @throws IncompatibleDeltaEvaluationException if the provided delta evaluation of the objective is not compatible with the received move type @throws IncompatibleDeltaValidationException if the provided delta validation of any penalizing constraint is not compatible with the received move type @return aggregated evaluation of modified solution, taking into account both the objective function and penalizing constraints
public void connect() { readyState = ReadyState.CONNECTING; try { if (webSocketHandler == null) { webSocketHandler = new WebSocketHandlerAdapter(); } container.connectToServer(new SimpleWebSocketClientEndpoint(), ClientEndpointConfig.Builder.create().build(), websocketURI); } catch (Exception e) { readyState = ReadyState.CLOSED; // throws DeploymentException, IOException throw new RuntimeException("could not establish connection"); } }
Establishes the connection to the given WebSocket Server Address.
public void close() { readyState = ReadyState.CLOSING; try { webSocketSession.close(new CloseReason(CloseReason.CloseCodes.NORMAL_CLOSURE, null)); } catch (IOException e) { e.printStackTrace(); } }
Shutting down the current connection.
public void add(LCMSRange range) { Integer msLevel = range.getMsLevel(); DoubleRange mzRange = range.getMzRange(); Range<Integer> scanRange = range.getScanRange(); if (msLevel == null) { // add to the top level range set anyLvlSet.add(scanRange); // remove from all underlying sets for (Map.Entry<Integer, MsLevelRangeSet> lvlMapEntry : lvlMap.entrySet()) { MsLevelRangeSet msLevelRangeSet = lvlMapEntry.getValue(); msLevelRangeSet.removeFromAll(range); } return; } TreeRangeSet<Integer> scanRangeLeft = TreeRangeSet.create(); scanRangeLeft.removeAll(anyLvlSet); if (scanRangeLeft.isEmpty()) { return; // if there's nothing left in the scan range, just stop } MsLevelRangeSet msLvlRanges = lvlMap.get(msLevel); if (msLvlRanges == null) { msLvlRanges = new MsLevelRangeSet(); lvlMap.put(msLevel, msLvlRanges); } if (mzRange == null) { // add to any-mz-range range set msLvlRanges.anyPrecursorSet.addAll(scanRangeLeft); // remove from all specific mz-range sets msLvlRanges.removeFromSpecific(range); return; } scanRangeLeft.removeAll(msLvlRanges.anyPrecursorSet); if (scanRangeLeft.isEmpty()) { return; } RangeSet<Integer> rangeSetForMzRange = msLvlRanges.rngMap.get(mzRange); if (rangeSetForMzRange == null) { rangeSetForMzRange = TreeRangeSet.create(); msLvlRanges.rngMap.put(mzRange, rangeSetForMzRange); } rangeSetForMzRange.addAll(scanRangeLeft); }
This add method gradually removes scan-ranges from the input range as it descends down the hierarchy.
public void subtract(LCMSRangeSet other) { anyLvlSet.removeAll(other.anyLvlSet); for (Map.Entry<Integer, MsLevelRangeSet> lvlMapEntry : lvlMap.entrySet()) { Integer msLevel = lvlMapEntry.getKey(); MsLevelRangeSet msLevelRangeSet = lvlMapEntry.getValue(); // remove any top level scan ranges from the "any precursor" map at // this ms level msLevelRangeSet.anyPrecursorSet.removeAll(other.anyLvlSet); // if the other range set had this level, remove entries contained in it MsLevelRangeSet otherMsLevelRangeSet = other.lvlMap.get(msLevel); msLevelRangeSet.anyPrecursorSet.removeAll(otherMsLevelRangeSet.anyPrecursorSet); for (Map.Entry<DoubleRange, RangeSet<Integer>> rngMapEntry : msLevelRangeSet.rngMap .entrySet()) { if (rngMapEntry.getValue().isEmpty()) { continue; } DoubleRange mzRange = rngMapEntry.getKey(); RangeSet<Integer> rangeSet = rngMapEntry.getValue(); rangeSet.removeAll(other.anyLvlSet); if (!rangeSet.isEmpty()) { rangeSet.removeAll(otherMsLevelRangeSet.anyPrecursorSet); } if (!rangeSet.isEmpty()) { RangeSet<Integer> otherRangeSetAtMsLevelForPrecursorRange = otherMsLevelRangeSet.rngMap .get(mzRange); if (otherRangeSetAtMsLevelForPrecursorRange != null) { rangeSet.removeAll(otherRangeSetAtMsLevelForPrecursorRange); } } } } }
Will modify the set. Intended usage - when unloading data in LCMSData, create one range set for data loaded by other users, then create a separate range set for the LCMSRange that you want to unload. Subtract the "loaded by others" from the range set you want to unload. Use this range set for unloading.
public void write(String filename) throws Exception { data = new TiffOutputStream(input); data.setByteOrder(byteOrder); try { data.create(filename); writeHeader(); writeIfds(); data.close(); } catch (Exception ex) { ex.printStackTrace(); if (data != null) { data.close(); } throw ex; } }
Write. @param filename the filename @throws Exception the exception
public void writeHeader() throws IOException { if (byteOrder == ByteOrder.LITTLE_ENDIAN) { data.put((byte) 'I'); data.put((byte) 'I'); } else if (byteOrder == ByteOrder.BIG_ENDIAN) { data.put((byte) 'M'); data.put((byte) 'M'); } data.putShort((short) 42); }
Writes the header. @throws IOException Signals that an I/O exception has occurred.
public void writeIfds() throws IOException { IFD first = model.getFirstIFD(); IFD current = first; if (current != null) { // First IFD offset data.putInt((int) data.position() + 4); } while (current != null) { writeIFD(current); current = current.getNextIFD(); } }
Write. @throws IOException Signals that an I/O exception has occurred.
private int classifyTags(IFD ifd, ArrayList<TagValue> oversized, ArrayList<TagValue> undersized) { int tagValueSize = 4; int n = 0; for (TagValue tag : ifd.getMetadata().getTags()) { int tagsize = getTagSize(tag); if (tagsize > tagValueSize) { oversized.add(tag); } else { undersized.add(tag); } n++; } return n; }
Gets the oversized tags. @param ifd the ifd @param oversized the oversized @param undersized the undersized @return the number of tags
public void writeIFD(IFD ifd) throws IOException { ArrayList<TagValue> oversizedTags = new ArrayList<TagValue>(); ArrayList<TagValue> undersizedTags = new ArrayList<TagValue>(); int ntags = classifyTags(ifd, oversizedTags, undersizedTags); HashMap<Integer, Integer> pointers = new HashMap<Integer, Integer>(); // Write IFD entries data.putShort((short) ntags); ArrayList<TagValue> ltags = ifd.getTags().getTags(); Collections.sort(ltags, new Comparator<TagValue>() { @Override public int compare(TagValue a1, TagValue a2) { return a1.getId()-a2.getId(); } }); for (TagValue tv : ltags) { int n = tv.getCardinality(); int id = tv.getId(); int tagtype = tv.getType(); data.putShort((short) id); data.putShort((short) tagtype); if (id == 700) { if (tv.getValue().size() > 0) n = ((XMP) tv.getValue().get(0)).getLength(); } if (id == 34675) n = tv.getReadlength(); if (id == 33723) { if (tv.getValue().size() > 0) { abstractTiffType att = tv.getValue().get(0); if (att instanceof IPTC) { IPTC iptc = (IPTC) att; n = (iptc).getLength(); //n = iptc.getOriginal().size(); } else n = tv.getCardinality(); } } data.putInt(n); pointers.put(id, (int) data.position()); int startpos = (int) data.position(); if (oversizedTags.contains(tv)) { data.putInt(1); // Any number, later we will update the pointer } else { writeTagValue(tv); while ((int) data.position() - startpos < 4) data.put((byte) 0); } } long positionNextIfdOffset = data.position(); data.putInt(0); // No next IFD (later we will update this value if there is a next IFD) // Update pointers and write tag values for (TagValue tv : oversizedTags) { // Update pointer of the tag entry int currentPosition = (int) data.position(); if (currentPosition % 2 != 0) currentPosition++; // Word alignment check data.seek(pointers.get(tv.getId())); data.putInt(currentPosition); data.seek(currentPosition); writeTagValue(tv); } if (ifd.hasStrips()) { long stripOffsetsPointer = data.position(); if (stripOffsetsPointer % 2 != 0) { // Correct word alignment data.put((byte) 0); stripOffsetsPointer = (int) data.position(); } // Write strips and return its offsets ArrayList<Integer> offsets = writeStripData(ifd); if (offsets.size() > 1) { // Write offsets stripOffsetsPointer = data.position(); for (int off : offsets) { data.putInt(off); } } // Update pointer of the strip offets int currentPosition = (int) data.position(); data.seek(pointers.get(273)); data.putInt((int) stripOffsetsPointer); data.seek(currentPosition); } else if (ifd.hasTiles()) { long tilesOffsetsPointer = data.position(); if (tilesOffsetsPointer % 2 != 0) { // Correct word alignment data.put((byte) 0); tilesOffsetsPointer = (int) data.position(); } ArrayList<Integer> offsets = writeTileData(ifd); if (offsets.size() > 1) { // Write offsets tilesOffsetsPointer = data.position(); for (int off : offsets) { data.putInt(off); } } // Update pointer of the tag entry int currentPosition = (int) data.position(); data.seek(pointers.get(324)); data.putInt((int) tilesOffsetsPointer); data.seek(currentPosition); } if (ifd.hasNextIFD()) { // Update pointer of the next IFD offset int currentPosition = (int) data.position(); if (currentPosition % 2 != 0) currentPosition++; // Word alignment check data.seek((int)positionNextIfdOffset); data.putInt(currentPosition); data.seek(currentPosition); } }
Write IFD data. @param ifd the ifd @throws IOException Signals that an I/O exception has occurred.
private int getTagSize(TagValue tag) { int n = tag.getCardinality(); int id = tag.getId(); // Calculate tag size int type = tag.getType(); if (id == 330) { // SubIFD n = 1000; } if (id == 700) { // XMP if (tag.getValue().size() > 0) n = tag.getValue().get(0).toString().length(); } if (id == 33723) { // IPTC n = tag.getReadlength(); } if (id == 34665) { // EXIF n = 1000; } if (id == 34675) { // ICC n = tag.getReadlength(); } int typeSize = TiffTags.getTypeSize(type); int tagSize = typeSize * n; return tagSize; }
Gets the tag size. @param tag the tag @return the tag size