file_name
stringlengths 6
86
| file_path
stringlengths 45
249
| content
stringlengths 47
6.26M
| file_size
int64 47
6.26M
| language
stringclasses 1
value | extension
stringclasses 1
value | repo_name
stringclasses 767
values | repo_stars
int64 8
14.4k
| repo_forks
int64 0
1.17k
| repo_open_issues
int64 0
788
| repo_created_at
stringclasses 767
values | repo_pushed_at
stringclasses 767
values |
---|---|---|---|---|---|---|---|---|---|---|---|
ExtractBiome.java | /FileExtraction/Java_unseen/husonlab_megan-ce/src/megan/tools/ExtractBiome.java | /*
* ExtractBiome.java Copyright (C) 2024 Daniel H. Huson
*
* (Some files contain contributions from other authors, who are then mentioned separately.)
*
* This program is free software: you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
package megan.tools;
import jloda.swing.director.ProjectManager;
import jloda.swing.util.ArgsOptions;
import jloda.swing.util.ResourceManager;
import jloda.util.*;
import jloda.util.parse.NexusStreamParser;
import jloda.util.progress.ProgressSilent;
import megan.commands.SaveCommand;
import megan.commands.algorithms.ComputeBiomeCommand;
import megan.core.Director;
import megan.core.Document;
import megan.core.MeganFile;
import megan.main.MeganProperties;
import megan.viewer.TaxonomyData;
import java.io.File;
import java.io.IOException;
import java.io.StringReader;
import java.util.ArrayList;
import java.util.Arrays;
/**
* extracts a biome from a comparison file
* Daniel Huson, 8.2018
*/
public class ExtractBiome {
public enum Mode {total, core, rare}
/**
* extracts a biome from a comparison file
*
*/
public static void main(String[] args) {
try {
ResourceManager.insertResourceRoot(megan.resources.Resources.class);
ProgramProperties.setProgramName("ExtractBiome");
ProgramProperties.setProgramVersion(megan.main.Version.SHORT_DESCRIPTION);
PeakMemoryUsageMonitor.start();
(new ExtractBiome()).run(args);
System.err.println("Total time: " + PeakMemoryUsageMonitor.getSecondsSinceStartString());
System.err.println("Peak memory: " + PeakMemoryUsageMonitor.getPeakUsageString());
System.exit(0);
} catch (Exception ex) {
Basic.caught(ex);
System.exit(1);
}
}
/**
* run
*
*/
private void run(String[] args) throws Exception {
final ArgsOptions options = new ArgsOptions(args, this, "Extracts the total, core or rare biome from a MEGAN comparison file");
options.setVersion(ProgramProperties.getProgramVersion());
options.setLicense("Copyright (C) 2024. This program comes with ABSOLUTELY NO WARRANTY.");
options.setAuthors("Daniel H. Huson");
options.comment("Input and Output:");
final String inputFile = options.getOptionMandatory("-i", "in", "Input MEGAN comparison file (.megan file)", "");
final String outputFile = options.getOption("-o", "out", "Output file", "biome.megan");
options.comment("Options:");
final Mode mode = StringUtils.valueOfIgnoreCase(Mode.class, options.getOption("-b", "biome", "Biome type to compute", Mode.values(), Mode.total.toString()));
final String[] samplesToUseOption = options.getOption("-s", "samples", "Samples to use or 'ALL'", new String[]{"ALL"});
final float sampleThreshold = (float) options.getOption("-stp", "sampleThresholdPercent", "Min or max percent of samples that class must be present in to be included in core or rare biome, resp.", 50.0);
final float classThreshold = (float) options.getOption("-ctp", "classThresholdPercent", "Min percent of sample that reads assigned to class must achieve for class to be considered present in sample", 0.1);
final var propertiesFile = options.getOption("-P", "propertiesFile", "Properties file",megan.main.Megan6.getDefaultPropertiesFile());
options.done();
MeganProperties.initializeProperties(propertiesFile);
if (mode == null)
throw new UsageException("Unknown compare mode");
if ((new File(inputFile)).equals(new File(outputFile)))
throw new UsageException("Input file equals output file");
TaxonomyData.load();
final Document doc = new Document();
final Director dir = new Director(doc);
ProjectManager.addProject(dir, null);
doc.setProgressListener(new ProgressSilent());
doc.getMeganFile().setFile(inputFile, MeganFile.Type.MEGAN_SUMMARY_FILE);
doc.loadMeganFile();
{
if (doc.getDataTable().getTotalReads() > 0) {
doc.setNumberReads(doc.getDataTable().getTotalReads());
} else {
throw new IOException("File is either empty or format is too old: " + inputFile);
}
}
//dir.updateView(Director.ALL);
final ArrayList<String> selectedSamples = new ArrayList<>();
if (samplesToUseOption.length == 1 && samplesToUseOption[0].equalsIgnoreCase("ALL"))
selectedSamples.addAll(doc.getSampleNames());
else
selectedSamples.addAll(Arrays.asList(samplesToUseOption));
if (selectedSamples.size() == 0)
throw new UsageException("No valid samples-to-use specified");
final ComputeBiomeCommand computeBiomeCommand = new ComputeBiomeCommand();
computeBiomeCommand.setDir(dir);
// compute the biome:
final String command = "compute biome=" + mode + " classThreshold=" + classThreshold + " sampleThreshold=" + sampleThreshold + " samples='"
+ StringUtils.toString(selectedSamples, "' '") + "';";
computeBiomeCommand.apply(new NexusStreamParser(new StringReader(command)));
// save to new file:
final Director newDir = computeBiomeCommand.getNewDir();
final Document newDoc = newDir.getDocument();
newDoc.getMeganFile().setFile(outputFile, MeganFile.Type.MEGAN_SUMMARY_FILE);
final SaveCommand saveCommand = new SaveCommand();
saveCommand.setDir(newDir);
System.err.println("Saving to file: " + outputFile);
saveCommand.apply(new NexusStreamParser(new StringReader("save file='" + outputFile + "';")));
}
}
| 6,302 | Java | .java | husonlab/megan-ce | 62 | 21 | 18 | 2016-05-09T10:55:38Z | 2024-02-22T23:23:42Z |
Blast2LCA.java | /FileExtraction/Java_unseen/husonlab_megan-ce/src/megan/tools/Blast2LCA.java | /*
* Blast2LCA.java Copyright (C) 2024 Daniel H. Huson
*
* (Some files contain contributions from other authors, who are then mentioned separately.)
*
* This program is free software: you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
package megan.tools;
import jloda.seq.BlastMode;
import jloda.swing.util.ArgsOptions;
import jloda.swing.util.ResourceManager;
import jloda.util.*;
import jloda.util.progress.ProgressPercentage;
import megan.algorithms.ActiveMatches;
import megan.algorithms.TaxonPathAssignment;
import megan.algorithms.TopAssignment;
import megan.classification.Classification;
import megan.classification.ClassificationManager;
import megan.classification.IdMapper;
import megan.classification.IdParser;
import megan.core.Document;
import megan.data.IReadBlock;
import megan.main.MeganProperties;
import megan.parsers.blast.BlastFileFormat;
import megan.parsers.blast.BlastModeUtils;
import megan.rma6.BlastFileReadBlockIterator;
import java.io.BufferedWriter;
import java.io.File;
import java.io.FileWriter;
import java.io.IOException;
import java.util.BitSet;
/**
* Program that parses Blast input and computes a taxonomy classification and also a KEGG mapping, if desired
* Daniel Huson, 3.2012
*/
@Deprecated
public class Blast2LCA {
/**
* prepare DNA protein for pDNA
*
*/
public static void main(String[] args) {
try {
ResourceManager.insertResourceRoot(megan.resources.Resources.class);
ProgramProperties.setProgramName("Blast2LCA");
ProgramProperties.setProgramVersion(megan.main.Version.SHORT_DESCRIPTION);
long start = System.currentTimeMillis();
(new Blast2LCA()).run(args);
System.err.println("Time: " + ((System.currentTimeMillis() - start) / 1000) + "s");
System.exit(0);
} catch (Exception ex) {
Basic.caught(ex);
System.exit(1);
}
}
/**
* run
*
*/
private void run(String[] args) throws UsageException, IOException, CanceledException {
final ArgsOptions options = new ArgsOptions(args, this, "Applies the LCA alignment to reads and produce a taxonomic classification");
options.setVersion(ProgramProperties.getProgramVersion());
options.setLicense("Copyright (C) 2024. This program comes with ABSOLUTELY NO WARRANTY.");
options.setAuthors("Daniel H. Huson");
options.comment("Input");
String blastFile = options.getOptionMandatory("-i", "input", "Input BLAST file", "foo.blast");
String blastFormat = options.getOption("-f", "format", "BLAST format", BlastFileFormat.values(), BlastFileFormat.Unknown.toString());
String blastMode = options.getOption("-m", "mode", "BLAST mode", BlastMode.values(), BlastMode.Unknown.toString());
options.comment("Output");
String outputFile = options.getOption("-o", "output", "Taxonomy output file", FileUtils.getFileBaseName(FileUtils.getFileNameWithoutZipOrGZipSuffix(blastFile)) + "-taxonomy.txt");
String keggOutputFile = options.getOption("-ko", "keggOutput", "KEGG output file", FileUtils.getFileBaseName(FileUtils.getFileNameWithoutZipOrGZipSuffix(blastFile)) + "-kegg.txt");
options.comment("Functional classification:");
final boolean doKegg = options.getOption("-k", "kegg", "Map reads to KEGG KOs?", false);
options.comment("Output options:");
final boolean showRank = options.getOption("-sr", "showRanks", "Show taxonomic ranks", true);
final boolean useOfficialRanksOnly = options.getOption("-oro", "officialRanksOnly", "Report only taxa that have an official rank", true);
final boolean showTaxonIds = options.getOption("-tid", "showTaxIds", "Report taxon ids rather than taxon names", false);
options.comment("Parameters");
// todo: implement long reads
final boolean longReads = false;
// final boolean longReads=options.getOption("-lg","longReads","Parse and analyse as long reads",Document.DEFAULT_LONG_READS);
final float minScore = options.getOption("-ms", "minScore", "Min score", Document.DEFAULT_MINSCORE);
final float maxExpected = options.getOption("-me", "maxExpected", "Max expected", 0.01f);
float topPercent = options.getOption("-top", "topPercent", "Top percent", Document.DEFAULT_TOPPERCENT);
final float minPercentIdentity = options.getOption("-mid", "minPercentIdentity", "Min percent identity", Document.DEFAULT_MIN_PERCENT_IDENTITY);
final double minComplexity = 0; //options.getOption("-c","Minimum complexity (between 0 and 1)",0.0);
final int keggRanksToReport = options.getOption("-kr", "maxKeggPerRead", "Maximum number of KEGG assignments to report for a read", 4);
final boolean applyTopPercentFilterToKEGGAnalysis = options.getOption("+ktp", "applyTopPercentKegg", "Apply top percent filter in KEGG KO analysis", true);
options.comment("Classification support:");
final boolean parseTaxonNames = options.getOption("-tn", "parseTaxonNames", "Parse taxon names", true);
final String mapDBFile = options.getOption("-mdb", "mapDB", "MEGAN mapping db (file megan-map.db)", "");
final String acc2TaxaFile = options.getOption("-a2t", "acc2taxa", "Accession-to-Taxonomy mapping file", "");
final String synonyms2TaxaFile = options.getOption("-s2t", "syn2taxa", "Synonyms-to-Taxonomy mapping file", "");
final String acc2KeggFile = options.getOption("-a2kegg", "acc2kegg", "Accession-to-KEGG mapping file", "");
final String synonyms2KeggFile = options.getOption("-s2kegg", "syn2kegg", "Synonyms-to-KEGG mapping file", "");
options.comment(ArgsOptions.OTHER);
ProgramProperties.put(IdParser.PROPERTIES_FIRST_WORD_IS_ACCESSION, options.getOption("-fwa", "firstWordIsAccession", "First word in reference header is accession number (set to 'true' for NCBI-nr downloaded Sep 2016 or later)", true));
ProgramProperties.put(IdParser.PROPERTIES_ACCESSION_TAGS, options.getOption("-atags", "accessionTags", "List of accession tags", ProgramProperties.get(IdParser.PROPERTIES_ACCESSION_TAGS, IdParser.ACCESSION_TAGS)));
final var propertiesFile = options.getOption("-P", "propertiesFile", "Properties file",megan.main.Megan6.getDefaultPropertiesFile());
options.done();
MeganProperties.initializeProperties(propertiesFile);
if (mapDBFile.length() > 0 && (acc2TaxaFile.length() > 0 || synonyms2TaxaFile.length() > 0 || acc2KeggFile.length() > 0 || synonyms2KeggFile.length() > 0))
throw new UsageException("Illegal to use both --mapDB and ---acc2... or --syn2... options");
if (mapDBFile.length() > 0)
ClassificationManager.setMeganMapDBFile(mapDBFile);
if (topPercent == 0)
topPercent = 0.0001f;
if (blastFormat.equalsIgnoreCase(BlastFileFormat.Unknown.toString())) {
blastFormat = BlastFileFormat.detectFormat(null, blastFile, true).toString();
}
if (blastMode.equalsIgnoreCase(BlastMode.Unknown.toString()))
blastMode = BlastModeUtils.detectMode(null, blastFile, false).toString();
final IdMapper taxonIdMapper = ClassificationManager.get(Classification.Taxonomy, true).getIdMapper();
// load taxonomy:
{
taxonIdMapper.setUseTextParsing(parseTaxonNames);
if (mapDBFile.length() > 0) {
taxonIdMapper.loadMappingFile(mapDBFile, IdMapper.MapType.MeganMapDB, false, new ProgressPercentage());
}
if (acc2TaxaFile.length() > 0) {
taxonIdMapper.loadMappingFile(acc2TaxaFile, IdMapper.MapType.Accession, false, new ProgressPercentage());
}
if (synonyms2TaxaFile.length() > 0) {
taxonIdMapper.loadMappingFile(synonyms2TaxaFile, IdMapper.MapType.Synonyms, false, new ProgressPercentage());
}
final IdMapper keggMapper = ClassificationManager.get("KEGG", true).getIdMapper();
if (doKegg) {
if (mapDBFile.length() > 0) {
keggMapper.loadMappingFile(mapDBFile, IdMapper.MapType.MeganMapDB, false, new ProgressPercentage());
}
if (acc2KeggFile.length() > 0) {
keggMapper.loadMappingFile(acc2KeggFile, IdMapper.MapType.Accession, false, new ProgressPercentage());
}
if (synonyms2KeggFile.length() > 0) {
keggMapper.loadMappingFile(synonyms2KeggFile, IdMapper.MapType.Synonyms, false, new ProgressPercentage());
}
}
int totalIn = 0;
int totalOut = 0;
System.err.println("Reading file: " + blastFile);
System.err.println("Writing file: " + outputFile);
try (var it = new BlastFileReadBlockIterator(blastFile, null, BlastFileFormat.valueOfIgnoreCase(blastFormat), BlastMode.valueOfIgnoreCase(blastMode), new String[]{"Taxonomy", "KEGG"}, 100, longReads)) {
final ProgressPercentage progressListener = new ProgressPercentage();
progressListener.setMaximum(it.getMaximumProgress());
try (BufferedWriter w = new BufferedWriter(new FileWriter(outputFile))) {
final BufferedWriter keggW;
if (doKegg) {
keggW = new BufferedWriter(new FileWriter(keggOutputFile));
System.err.println("Writing file: " + keggOutputFile);
} else
keggW = null;
try {
while (it.hasNext()) {
final IReadBlock readBlock = it.next();
totalIn++;
final BitSet activeMatchesForTaxa = new BitSet();
final BitSet activeMatchesForGenes = (keggW == null ? null : new BitSet());
boolean hasLowComplexity = readBlock.getComplexity() > 0 && readBlock.getComplexity() + 0.01 < minComplexity;
if (hasLowComplexity) {
w.write(readBlock.getReadName() + "; ; " + IdMapper.LOW_COMPLEXITY_ID + " " + readBlock.getComplexity() + "\n");
} else {
if (keggW != null) {
ActiveMatches.compute(minScore, applyTopPercentFilterToKEGGAnalysis ? topPercent : 0, maxExpected, minPercentIdentity, readBlock, "KEGG", activeMatchesForGenes);
keggW.write(readBlock.getReadName() + "; ;" + TopAssignment.compute("KEGG", activeMatchesForGenes, readBlock, keggRanksToReport) + "\n");
}
ActiveMatches.compute(minScore, topPercent, maxExpected, minPercentIdentity, readBlock, Classification.Taxonomy, activeMatchesForTaxa);
w.write(readBlock.getReadName() + "; ;" + TaxonPathAssignment.getPathAndPercent(readBlock, activeMatchesForTaxa, showTaxonIds, showRank, useOfficialRanksOnly, true) + "\n");
totalOut++;
progressListener.setProgress(it.getProgress());
}
}
} finally {
if (keggW != null)
keggW.close();
}
}
progressListener.close();
}
System.err.printf("Reads in: %,11d%n", totalIn);
System.err.printf("Reads out:%,11d%n", totalOut);
}
}
}
| 12,260 | Java | .java | husonlab/megan-ce | 62 | 21 | 18 | 2016-05-09T10:55:38Z | 2024-02-22T23:23:42Z |
SAM2RMA6.java | /FileExtraction/Java_unseen/husonlab_megan-ce/src/megan/tools/SAM2RMA6.java | /*
* SAM2RMA6.java Copyright (C) 2024 Daniel H. Huson
*
* (Some files contain contributions from other authors, who are then mentioned separately.)
*
* This program is free software: you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
package megan.tools;
import jloda.fx.util.ProgramExecutorService;
import jloda.swing.commands.CommandManager;
import jloda.swing.util.ArgsOptions;
import jloda.swing.util.ResourceManager;
import jloda.util.*;
import jloda.util.progress.ProgressListener;
import jloda.util.progress.ProgressPercentage;
import megan.accessiondb.AccessAccessionMappingDatabase;
import megan.accessiondb.ConfigRequests;
import megan.classification.Classification;
import megan.classification.ClassificationManager;
import megan.classification.IdMapper;
import megan.classification.IdParser;
import megan.classification.data.ClassificationCommandHelper;
import megan.core.ContaminantManager;
import megan.core.Document;
import megan.core.SampleAttributeTable;
import megan.main.Megan6;
import megan.main.MeganProperties;
import megan.parsers.blast.BlastFileFormat;
import megan.parsers.blast.BlastModeUtils;
import megan.rma6.RMA6Connector;
import megan.rma6.RMA6FromBlastCreator;
import megan.util.SAMFileFilter;
import java.io.File;
import java.io.FileReader;
import java.io.IOException;
import java.sql.SQLException;
import java.util.*;
/**
* compute an RMA6 file from a SAM file generated by DIAMOND or MALT
* Daniel Huson, 3.2012
*/
public class SAM2RMA6 {
/**
* merge RMA files
*
*/
public static void main(String[] args) {
try {
ResourceManager.insertResourceRoot(megan.resources.Resources.class);
ProgramProperties.setProgramName("SAM2RMA6");
ProgramProperties.setProgramVersion(megan.main.Version.SHORT_DESCRIPTION);
PeakMemoryUsageMonitor.start();
(new SAM2RMA6()).run(args);
System.err.println("Total time: " + PeakMemoryUsageMonitor.getSecondsSinceStartString());
System.err.println("Peak memory: " + PeakMemoryUsageMonitor.getPeakUsageString());
System.exit(0);
} catch (Exception ex) {
Basic.caught(ex);
System.exit(1);
}
}
/**
* run
*
*/
private void run(String[] args) throws UsageException, IOException, SQLException {
CommandManager.getGlobalCommands().addAll(ClassificationCommandHelper.getGlobalCommands());
final ArgsOptions options = new ArgsOptions(args, this, "Computes a MEGAN RMA (.rma) file from a SAM (.sam) file that was created by DIAMOND or MALT");
options.setVersion(ProgramProperties.getProgramVersion());
options.setLicense("Copyright (C) 2024. This program comes with ABSOLUTELY NO WARRANTY.");
options.setAuthors("Daniel H. Huson");
options.comment("Input");
final String[] samFiles = options.getOptionMandatory("-i", "in", "Input SAM file[s] generated by DIAMOND or MALT (gzipped ok)", new String[0]);
String[] readsFiles = options.getOption("-r", "reads", "Reads file(s) (fasta or fastq, gzipped ok)", new String[0]);
final String[] metaDataFiles = options.getOption("-mdf", "metaDataFile", "Files containing metadata to be included in RMA6 files", new String[0]);
options.comment("Output");
String[] outputFiles = options.getOptionMandatory("-o", "out", "Output file(s), one for each input file, or a directory", new String[0]);
boolean useCompression = options.getOption("-c", "useCompression", "Compress reads and matches in RMA file (smaller files, longer to generate", true);
options.comment("Reads");
final boolean pairedReads = options.getOption("-p", "paired", "Reads are paired", false);
final int pairedReadsSuffixLength = options.getOption("-ps", "pairedSuffixLength", "Length of name suffix used to distinguish between name of read and its mate", 0);
options.comment("Parameters");
boolean longReads = options.getOption("-lg", "longReads", "Parse and analyse as long reads", Document.DEFAULT_LONG_READS);
final int maxMatchesPerRead = options.getOption("-m", "maxMatchesPerRead", "Max matches per read", 100);
final boolean runClassifications = options.getOption("-class", "classify", "Run classification algorithm", true);
final float minScore = options.getOption("-ms", "minScore", "Min score", Document.DEFAULT_MINSCORE);
final float maxExpected = options.getOption("-me", "maxExpected", "Max expected", Document.DEFAULT_MAXEXPECTED);
final float topPercent = options.getOption("-top", "topPercent", "Top percent", Document.DEFAULT_TOPPERCENT);
final float minSupportPercent = options.getOption("-supp", "minSupportPercent", "Min support as percent of assigned reads (0==off)", Document.DEFAULT_MINSUPPORT_PERCENT);
final int minSupport = options.getOption("-sup", "minSupport", "Min support", Document.DEFAULT_MINSUPPORT);
final float minPercentReadToCover = options.getOption("-mrc", "minPercentReadCover", "Min percent of read length to be covered by alignments", Document.DEFAULT_MIN_PERCENT_READ_TO_COVER);
final float minPercentReferenceToCover = options.getOption("-mrefc", "minPercentReferenceCover", "Min percent of reference length to be covered by alignments", Document.DEFAULT_MIN_PERCENT_REFERENCE_TO_COVER);
final int minReadLength=options.getOption("-mrl","minReadLength","Minimum read length",0);
final Document.LCAAlgorithm lcaAlgorithm = Document.LCAAlgorithm.valueOfIgnoreCase(options.getOption("-alg", "lcaAlgorithm", "Set the LCA algorithm to use for taxonomic assignment",
Document.LCAAlgorithm.values(), longReads ? Document.DEFAULT_LCA_ALGORITHM_LONG_READS.toString() : Document.DEFAULT_LCA_ALGORITHM_SHORT_READS.toString()));
final float lcaCoveragePercent = options.getOption("-lcp", "lcaCoveragePercent", "Set the percent for the LCA to cover", Document.DEFAULT_LCA_COVERAGE_PERCENT_SHORT_READS);
final String readAssignmentModeDefaultValue;
if (options.isDoHelp()) {
readAssignmentModeDefaultValue = (Document.DEFAULT_READ_ASSIGNMENT_MODE_LONG_READS + " in long read mode, " + Document.DEFAULT_READ_ASSIGNMENT_MODE_SHORT_READS + " else");
} else if (longReads)
readAssignmentModeDefaultValue = Document.DEFAULT_READ_ASSIGNMENT_MODE_LONG_READS.toString();
else
readAssignmentModeDefaultValue = Document.DEFAULT_READ_ASSIGNMENT_MODE_SHORT_READS.toString();
final Document.ReadAssignmentMode readAssignmentMode = Document.ReadAssignmentMode.valueOfIgnoreCase(options.getOption("-ram", "readAssignmentMode", "Set the read assignment mode", readAssignmentModeDefaultValue));
final String contaminantsFile = options.getOption("-cf", "conFile", "File of contaminant taxa (one Id or name per line)", "");
options.comment("Classification support:");
final String mapDBFile = options.getOption("-mdb", "mapDB", "MEGAN mapping db (file megan-map.db)", "");
final Set<String> selectedClassifications = new HashSet<>(Arrays.asList(options.getOption("-on", "only", "Use only named classifications (if not set: use all)", new String[0])));
options.comment("Deprecated classification support:");
final boolean parseTaxonNames = options.getOption("-tn", "parseTaxonNames", "Parse taxon names", true);
final String acc2TaxaFile = options.getOption("-a2t", "acc2taxa", "Accession-to-Taxonomy mapping file", "");
final String synonyms2TaxaFile = options.getOption("-s2t", "syn2taxa", "Synonyms-to-Taxonomy mapping file", "");
{
final String tags = options.getOption("-t4t", "tags4taxonomy", "Tags for taxonomy id parsing (must set to activate id parsing)", "").trim();
ProgramProperties.preset("TaxonomyTags", tags);
ProgramProperties.preset("TaxonomyParseIds", tags.length() > 0);
}
final HashMap<String, String> class2AccessionFile = new HashMap<>();
final HashMap<String, String> class2SynonymsFile = new HashMap<>();
for (String cName : ClassificationManager.getAllSupportedClassificationsExcludingNCBITaxonomy()) {
class2AccessionFile.put(cName, options.getOption("-a2" + cName.toLowerCase(), "acc2" + cName.toLowerCase(), "Accession-to-" + cName + " mapping file", ""));
class2SynonymsFile.put(cName, options.getOption("-s2" + cName.toLowerCase(), "syn2" + cName.toLowerCase(), "Synonyms-to-" + cName + " mapping file", ""));
final String tags = options.getOption("-t4" + cName.toLowerCase(), "tags4" + cName.toLowerCase(), "Tags for " + cName + " id parsing (must set to activate id parsing)", "").trim();
ProgramProperties.preset(cName + "Tags", tags);
ProgramProperties.preset(cName + "ParseIds", tags.length() > 0);
}
ProgramProperties.preset(IdParser.PROPERTIES_FIRST_WORD_IS_ACCESSION, options.getOption("-fwa", "firstWordIsAccession", "First word in reference header is accession number (set to 'true' for NCBI-nr downloaded Sep 2016 or later)", true));
ProgramProperties.preset(IdParser.PROPERTIES_ACCESSION_TAGS, options.getOption("-atags", "accessionTags", "List of accession tags", ProgramProperties.get(IdParser.PROPERTIES_ACCESSION_TAGS, IdParser.ACCESSION_TAGS)));
options.comment(ArgsOptions.OTHER);
ProgramExecutorService.setNumberOfCoresToUse(options.getOption("-t", "threads", "Number of threads", 8));
ConfigRequests.setCacheSize(options.getOption("-cs","cacheSize","Cache size for SQLITE (use with care)", ConfigRequests.getCacheSize()));
final var propertiesFile = options.getOption("-P", "propertiesFile", "Properties file", Megan6.getDefaultPropertiesFile());
options.done();
MeganProperties.initializeProperties(propertiesFile);
if (minSupport > 0 && minSupportPercent > 0)
throw new IOException("Please specify a positive value for either --minSupport or --minSupportPercent, but not for both");
for (String fileName : samFiles) {
FileUtils.checkFileReadableNonEmpty(fileName);
if (!SAMFileFilter.getInstance().accept(fileName))
throw new IOException("File not in SAM format (or incorrect file suffix?): " + fileName);
}
for (String fileName : metaDataFiles) {
FileUtils.checkFileReadableNonEmpty(fileName);
}
for (String fileName : readsFiles) {
FileUtils.checkFileReadableNonEmpty(fileName);
}
if (StringUtils.notBlank(contaminantsFile))
FileUtils.checkFileReadableNonEmpty(contaminantsFile);
final Collection<String> mapDBClassifications = AccessAccessionMappingDatabase.getContainedClassificationsIfDBExists(mapDBFile);
if (mapDBClassifications.size() > 0 && (StringUtils.hasPositiveLengthValue(class2AccessionFile) || StringUtils.hasPositiveLengthValue(class2SynonymsFile)))
throw new UsageException("Illegal to use both --mapDB and ---acc2... or --syn2... options");
if (mapDBClassifications.size() > 0)
ClassificationManager.setMeganMapDBFile(mapDBFile);
final ArrayList<String> cNames = new ArrayList<>();
for (String cName : ClassificationManager.getAllSupportedClassificationsExcludingNCBITaxonomy()) {
if ((selectedClassifications.size() == 0 || selectedClassifications.contains(cName))
&& (mapDBClassifications.contains(cName) || class2AccessionFile.get(cName).length() > 0 || class2SynonymsFile.get(cName).length() > 0))
cNames.add(cName);
}
if (cNames.size() > 0)
System.err.println("Functional classifications to use: " + StringUtils.toString(cNames, ", "));
if (outputFiles.length == 1) {
if (samFiles.length == 1) {
if ((new File(outputFiles[0]).isDirectory()))
outputFiles[0] = (new File(outputFiles[0], FileUtils.replaceFileSuffix(FileUtils.getFileNameWithoutPath(FileUtils.getFileNameWithoutZipOrGZipSuffix(samFiles[0])), ".rma6"))).getPath();
} else if (samFiles.length > 1) {
if (!(new File(outputFiles[0]).isDirectory()))
throw new IOException("Multiple files given, but given single output is not a directory");
String outputDirectory = (new File(outputFiles[0])).getParent();
outputFiles = new String[samFiles.length];
for (int i = 0; i < samFiles.length; i++)
outputFiles[i] = new File(outputDirectory, FileUtils.replaceFileSuffix(FileUtils.getFileNameWithoutZipOrGZipSuffix(FileUtils.getFileNameWithoutPath(samFiles[i])), ".rma6")).getPath();
}
} else // output.length >1
{
if (samFiles.length != outputFiles.length)
throw new IOException("Number of input and output files do not match");
}
if (metaDataFiles.length > 1 && metaDataFiles.length != samFiles.length) {
throw new IOException("Number of metadata files (" + metaDataFiles.length + ") doesn't match number of SAM files (" + samFiles.length + ")");
}
if (readsFiles.length == 0) {
readsFiles = new String[samFiles.length];
Arrays.fill(readsFiles, "");
} else if (readsFiles.length != samFiles.length)
throw new IOException("Number of reads files must equal number of SAM files");
final IdMapper taxonIdMapper = ClassificationManager.get(Classification.Taxonomy, true).getIdMapper();
final IdMapper[] idMappers = new IdMapper[cNames.size()];
// Load all mapping files:
if (runClassifications) {
ClassificationManager.get(Classification.Taxonomy, true);
taxonIdMapper.setUseTextParsing(parseTaxonNames);
if (mapDBFile.length() > 0) {
taxonIdMapper.loadMappingFile(mapDBFile, IdMapper.MapType.MeganMapDB, false, new ProgressPercentage());
}
if (acc2TaxaFile.length() > 0) {
taxonIdMapper.loadMappingFile(acc2TaxaFile, IdMapper.MapType.Accession, false, new ProgressPercentage());
}
if (synonyms2TaxaFile.length() > 0) {
taxonIdMapper.loadMappingFile(synonyms2TaxaFile, IdMapper.MapType.Synonyms, false, new ProgressPercentage());
}
for (int i = 0; i < cNames.size(); i++) {
final String cName = cNames.get(i);
idMappers[i] = ClassificationManager.get(cName, true).getIdMapper();
if (mapDBClassifications.contains(cName))
idMappers[i].loadMappingFile(mapDBFile, IdMapper.MapType.MeganMapDB, false, new ProgressPercentage());
if (class2AccessionFile.get(cName).length() > 0)
idMappers[i].loadMappingFile(class2AccessionFile.get(cName), IdMapper.MapType.Accession, false, new ProgressPercentage());
if (class2SynonymsFile.get(cName).length() > 0)
idMappers[i].loadMappingFile(class2SynonymsFile.get(cName), IdMapper.MapType.Synonyms, false, new ProgressPercentage());
}
}
/*
* process each set of files:
*/
for (int i = 0; i < samFiles.length; i++) {
System.err.println("Current SAM file: " + samFiles[i]);
if (i < readsFiles.length)
System.err.println("Reads file: " + readsFiles[i]);
System.err.println("Output file: " + outputFiles[i]);
ProgressListener progressListener = new ProgressPercentage();
final Document doc = new Document();
doc.getActiveViewers().add(Classification.Taxonomy);
doc.getActiveViewers().addAll(cNames);
doc.setLongReads(longReads);
doc.setMinScore(minScore);
doc.setMaxExpected(maxExpected);
doc.setTopPercent(topPercent);
doc.setMinSupportPercent(minSupportPercent);
doc.setMinSupport(minSupport);
doc.setPairedReads(pairedReads);
doc.setPairedReadSuffixLength(pairedReadsSuffixLength);
doc.setMinReadLength(minReadLength);
doc.setBlastMode(BlastModeUtils.determineBlastModeSAMFile(samFiles[i]));
doc.setLcaAlgorithm(lcaAlgorithm);
doc.setLcaCoveragePercent(lcaCoveragePercent);
doc.setMinPercentReadToCover(minPercentReadToCover);
doc.setMinPercentReferenceToCover(minPercentReferenceToCover);
doc.setReadAssignmentMode(readAssignmentMode);
if (contaminantsFile.length() > 0) {
ContaminantManager contaminantManager = new ContaminantManager();
contaminantManager.read(contaminantsFile);
System.err.printf("Contaminants profile: %,d input, %,d total%n", contaminantManager.inputSize(), contaminantManager.size());
doc.getDataTable().setContaminants(contaminantManager.getTaxonIdsString());
doc.setUseContaminantFilter(contaminantManager.size() > 0);
}
createRMA6FileFromSAM("SAM2RMA6", samFiles[i], readsFiles[i], outputFiles[i], useCompression, doc, maxMatchesPerRead, progressListener);
progressListener.close();
final RMA6Connector connector = new RMA6Connector(outputFiles[i]);
if (metaDataFiles.length > 0) {
try {
System.err.println("Saving metadata:");
SampleAttributeTable sampleAttributeTable = new SampleAttributeTable();
sampleAttributeTable.read(new FileReader(metaDataFiles[Math.min(i, metaDataFiles.length - 1)]),
Collections.singletonList(FileUtils.getFileBaseName(FileUtils.getFileNameWithoutPath(outputFiles[i]))), false);
Map<String, byte[]> label2data = new HashMap<>();
label2data.put(SampleAttributeTable.SAMPLE_ATTRIBUTES, sampleAttributeTable.getBytes());
connector.putAuxiliaryData(label2data);
System.err.println("done");
} catch (Exception ex) {
Basic.caught(ex);
}
}
progressListener.incrementProgress();
}
}
/**
* create an RMA6 file from a SAM file (generated by DIAMOND or MALT)
*
* @param progressListener @throws CanceledException
*/
private static void createRMA6FileFromSAM(String creator, String samFile, String queryFile, String rma6FileName, boolean useCompression, Document doc,
int maxMatchesPerRead, ProgressListener progressListener) throws IOException, SQLException {
final RMA6FromBlastCreator rma6Creator =
new RMA6FromBlastCreator(creator, BlastFileFormat.SAM, doc.getBlastMode(), new String[]{samFile}, new String[]{queryFile}, rma6FileName, useCompression, doc, maxMatchesPerRead);
rma6Creator.parseFiles(progressListener);
}
}
| 19,623 | Java | .java | husonlab/megan-ce | 62 | 21 | 18 | 2016-05-09T10:55:38Z | 2024-02-22T23:23:42Z |
GCAssembler.java | /FileExtraction/Java_unseen/husonlab_megan-ce/src/megan/tools/GCAssembler.java | /*
* GCAssembler.java Copyright (C) 2024 Daniel H. Huson
*
* (Some files contain contributions from other authors, who are then mentioned separately.)
*
* This program is free software: you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
package megan.tools;
import jloda.swing.commands.CommandManager;
import jloda.swing.util.ArgsOptions;
import jloda.swing.util.ResourceManager;
import jloda.util.*;
import jloda.util.progress.ProgressListener;
import jloda.util.progress.ProgressPercentage;
import jloda.util.progress.ProgressSilent;
import megan.assembly.ReadAssembler;
import megan.assembly.ReadDataCollector;
import megan.classification.Classification;
import megan.classification.ClassificationManager;
import megan.classification.data.ClassificationCommandHelper;
import megan.core.Document;
import megan.data.IConnector;
import megan.data.IReadBlockIterator;
import megan.main.MeganProperties;
import java.io.BufferedWriter;
import java.io.FileWriter;
import java.io.IOException;
import java.util.ArrayList;
import java.util.Collections;
import java.util.List;
import java.util.concurrent.Executors;
import java.util.concurrent.TimeUnit;
import java.util.concurrent.atomic.LongAdder;
/**
* performs gene-centric assemblies
* Daniel Huson, 8/2016
*/
public class GCAssembler {
/**
* performs gene-centric assemblies
*
*/
public static void main(String[] args) {
try {
ResourceManager.insertResourceRoot(megan.resources.Resources.class);
ProgramProperties.setProgramName("GCAssembler");
ProgramProperties.setProgramVersion(megan.main.Version.SHORT_DESCRIPTION);
PeakMemoryUsageMonitor.start();
(new GCAssembler()).run(args);
System.err.println("Total time: " + PeakMemoryUsageMonitor.getSecondsSinceStartString());
System.err.println("Peak memory: " + PeakMemoryUsageMonitor.getPeakUsageString());
System.exit(0);
} catch (Exception ex) {
Basic.caught(ex);
System.exit(1);
}
}
/**
* parse arguments the program
*
*/
private void run(String[] args) throws UsageException, IOException {
CommandManager.getGlobalCommands().addAll(ClassificationCommandHelper.getGlobalCommands());
final var options = new ArgsOptions(args, this, "Gene-centric assembly");
options.setVersion(ProgramProperties.getProgramVersion());
options.setLicense("Copyright (C) 2024. This program comes with ABSOLUTELY NO WARRANTY.");
options.setAuthors("Daniel H. Huson");
options.comment("Input and output");
final var inputFile = options.getOptionMandatory("-i", "input", "Input DAA or RMA6 file", "");
final var outputFileTemplate = options.getOption("-o", "output", "Output filename template, use %d or %s to represent class id or name, respectively",
FileUtils.replaceFileSuffix(inputFile.length() == 0 ? "input" : inputFile, "-%d.fasta"));
options.comment("Classification");
final var classificationName = options.getOptionMandatory("-fun", "function", "Name of functional classification (choices: "
+ StringUtils.toString(ClassificationManager.getAllSupportedClassificationsExcludingNCBITaxonomy(), ", ") + ", none)", "");
final var selectedClassIds = options.getOptionMandatory("-id", "ids", "Names or ids of classes to assemble, or keyword ALL for all", new String[0]);
options.comment("Options");
final var minOverlapReads = options.getOption("-mor", "minOverlapReads", "Minimum overlap for two reads", 20);
final var minLength = options.getOption("-len", "minLength", "Minimum contig length", 200);
final var minReads = options.getOption("-reads", "minReads", "Minimum number of reads", 2);
final var minAvCoverage = options.getOption("-mac", "minAvCoverage", "Minimum average coverage", 1);
final var doOverlapContigs = options.getOption("-c", "overlapContigs", "Attempt to overlap contigs", true);
final var minOverlapContigs = options.getOption("-moc", "minOverlapContigs", "Minimum overlap for two contigs", 20);
final var minPercentIdentityContigs = (float) options.getOption("-mic", "minPercentIdentityContigs", "Mininum percent identity to merge contigs", 98.0);
options.comment(ArgsOptions.OTHER);
final var desiredNumberOfThreads = options.getOption("-t", "threads", "Number of worker threads", 8);
final var veryVerbose = options.getOption("-vv", "veryVerbose", "Report program is very verbose detail", false);
final var propertiesFile = options.getOption("-P", "propertiesFile", "Properties file",megan.main.Megan6.getDefaultPropertiesFile());
options.done();
MeganProperties.initializeProperties(propertiesFile);
final var doAllClasses = selectedClassIds.length == 1 && selectedClassIds[0].equalsIgnoreCase("all");
final var supportedClassifications = ClassificationManager.getAllSupportedClassificationsExcludingNCBITaxonomy();
if (!supportedClassifications.contains(classificationName) && !classificationName.equalsIgnoreCase("none")) {
throw new UsageException("--function: Must be one of: " + StringUtils.toString(supportedClassifications, ",") + ", none");
}
// todo; fun=none mode does not work
if (classificationName.equalsIgnoreCase("none") && !(selectedClassIds.length == 1 && selectedClassIds[0].equalsIgnoreCase("all")))
throw new UsageException("--function 'none': --ids must be 'all' ");
if (options.isVerbose())
System.err.println("Opening file: " + inputFile);
final var document = new Document();
document.getMeganFile().setFileFromExistingFile(inputFile, true);
if (!(document.getMeganFile().isDAAFile() || document.getMeganFile().isRMA6File()))
throw new IOException("Input file has wrong type: must be meganized DAA file or RMA6 file");
if (document.getMeganFile().isDAAFile() && document.getConnector() == null)
throw new IOException("Input DAA file: Must first be meganized");
final Classification classification;
final List<Integer> classIdsList;
if (classificationName.equalsIgnoreCase("none")) {
classification = null; // all reads!
classIdsList = Collections.singletonList(0); // all reads!
} else {
classification = ClassificationManager.get(classificationName, true);
final var connector = document.getConnector();
final var classificationBlock = connector.getClassificationBlock(classificationName);
if (doAllClasses) {
classIdsList = new ArrayList<>(classificationBlock.getKeySet().size());
for (Integer id : classificationBlock.getKeySet()) {
if (id > 0 && classificationBlock.getSum(id) > 0)
classIdsList.add(id);
classIdsList.sort(Integer::compareTo);
}
} else {
classIdsList = new ArrayList<>(selectedClassIds.length);
for (String str : selectedClassIds) {
if (NumberUtils.isInteger(str))
classIdsList.add(NumberUtils.parseInt(str));
else {
if (classification != null) {
int id = classification.getName2IdMap().get(str);
if (id != 0)
classIdsList.add(NumberUtils.parseInt(str));
else
System.err.println("Unknown class: " + str);
}
}
}
}
}
if (options.isVerbose())
System.err.println("Number of classes to assemble: " + classIdsList.size());
if (classIdsList.size() == 0)
throw new UsageException("No valid classes specified");
final var numberOfThreads = Math.min(classIdsList.size(), desiredNumberOfThreads);
var numberOfFilesProduced = new LongAdder();
var totalContigs = new LongAdder();
final var executorService = Executors.newFixedThreadPool(Math.max(1,doOverlapContigs?numberOfThreads/2:numberOfThreads));
try (ProgressListener totalProgress = (veryVerbose ? new ProgressSilent() : new ProgressPercentage("Progress:", classIdsList.size()))) {
var exception = new Single<Exception>();
final var doc = new Document();
doc.getMeganFile().setFileFromExistingFile(inputFile, true);
doc.loadMeganFile();
final var connector = doc.getConnector();
for (var classId : classIdsList) {
if (exception.isNull()) {
try (final var it = getIterator(connector, classificationName, classId)) {
final var readAssembler = new ReadAssembler(veryVerbose);
final var readData = ReadDataCollector.apply(it, veryVerbose ? new ProgressPercentage() : new ProgressSilent());
executorService.submit(() -> {
try {
final var progress = (veryVerbose ? new ProgressPercentage() : new ProgressSilent());
final var className = classification != null ? classification.getName2IdMap().get(classId) : "none";
if (veryVerbose)
System.err.println("++++ Assembling class " + classId + ": " + className + ": ++++");
final var outputFile = createOutputFileName(outputFileTemplate, classId, className, classIdsList.size());
final var label = classificationName + ". Id: " + classId;
readAssembler.computeOverlapGraph(label, minOverlapReads, readData, progress);
var count = readAssembler.computeContigs(minReads, minAvCoverage, minLength, progress);
if (veryVerbose)
System.err.printf("Number of contigs:%6d%n", count);
if (doOverlapContigs) {
count = ReadAssembler.mergeOverlappingContigs(4, progress, minPercentIdentityContigs, minOverlapContigs, readAssembler.getContigs(), veryVerbose);
if (veryVerbose)
System.err.printf("Remaining contigs:%6d%n", count);
}
try (var w = new BufferedWriter(new FileWriter(outputFile))) {
readAssembler.writeContigs(w, progress);
if (veryVerbose) {
System.err.println("Contigs written to: " + outputFile);
readAssembler.reportContigStats();
}
numberOfFilesProduced.increment();
totalContigs.add(readAssembler.getContigs().size());
}
synchronized (totalProgress) {
totalProgress.incrementProgress();
}
} catch (Exception ex) {
exception.setIfCurrentValueIsNull(ex);
}
});
}
}
}
executorService.shutdown();
try {
executorService.awaitTermination(1000, TimeUnit.DAYS);
} catch (InterruptedException e) {
exception.set(e);
}
if (exception.isNotNull())
throw new IOException(exception.get());
} finally {
executorService.shutdownNow();
}
if (options.isVerbose()) {
System.err.println("Number of files produced: " + numberOfFilesProduced.intValue());
System.err.println("Total number of contigs: " + totalContigs.intValue());
}
}
/**
* create the output file name
*
* @return output file name
*/
private String createOutputFileName(String outputFileTemplate, int classId, String className, int numberOfIds) {
String outputFile = null;
if (outputFileTemplate.contains("%d"))
outputFile = outputFileTemplate.replaceAll("%d", "" + classId);
if (outputFileTemplate.contains("%s"))
outputFile = (outputFile == null ? outputFileTemplate : outputFile).replaceAll("%s", StringUtils.toCleanName(className));
if (outputFile == null && numberOfIds > 1)
outputFile = FileUtils.replaceFileSuffix(outputFileTemplate, "-" + classId + ".fasta");
if (outputFile == null)
outputFile = outputFileTemplate;
return outputFile;
}
/**
* get the iterator. It will be an interator over all reads in a given class, if classificationName and classId given, otherwise, over all reads
*
* @return iterator
*/
private IReadBlockIterator getIterator(IConnector connector, String classificationName, int classId) throws IOException {
if (classificationName.equalsIgnoreCase("none"))
return connector.getAllReadsIterator(0, 10, true, true);
else
return connector.getReadsIterator(classificationName, classId, 0, 10, true, true);
}
} | 12,194 | Java | .java | husonlab/megan-ce | 62 | 21 | 18 | 2016-05-09T10:55:38Z | 2024-02-22T23:23:42Z |
ReadExtractorTool.java | /FileExtraction/Java_unseen/husonlab_megan-ce/src/megan/tools/ReadExtractorTool.java | /*
* ReadExtractorTool.java Copyright (C) 2024 Daniel H. Huson
*
* (Some files contain contributions from other authors, who are then mentioned separately.)
*
* This program is free software: you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
package megan.tools;
import jloda.seq.BlastMode;
import jloda.swing.util.ArgsOptions;
import jloda.swing.util.ResourceManager;
import jloda.util.*;
import jloda.util.progress.ProgressPercentage;
import megan.classification.ClassificationManager;
import megan.core.Document;
import megan.daa.io.DAAParser;
import megan.dialogs.export.ReadsExporter;
import megan.dialogs.export.analysis.FrameShiftCorrectedReadsExporter;
import megan.dialogs.extractor.ReadsExtractor;
import megan.main.MeganProperties;
import java.io.File;
import java.io.IOException;
import java.util.ArrayList;
import java.util.Arrays;
import java.util.Collection;
import java.util.TreeSet;
/**
* extracts reads from a DAA or RMA file, by taxa
* Daniel Huson, 1.2019
*/
public class ReadExtractorTool {
/**
* ReadExtractorTool
*/
public static void main(String[] args) {
try {
ResourceManager.insertResourceRoot(megan.resources.Resources.class);
ProgramProperties.setProgramName("ReadExtractorTool");
ProgramProperties.setProgramVersion(megan.main.Version.SHORT_DESCRIPTION);
PeakMemoryUsageMonitor.start();
(new ReadExtractorTool()).run(args);
PeakMemoryUsageMonitor.report();
System.exit(0);
} catch (Exception ex) {
Basic.caught(ex);
System.exit(1);
}
}
/**
* run
*/
private void run(String[] args) throws UsageException, IOException {
final var options = new ArgsOptions(args, this, "Extracts reads from a DAA or RMA file by classification");
options.setVersion(ProgramProperties.getProgramVersion());
options.setLicense("Copyright (C) 2024. This program comes with ABSOLUTELY NO WARRANTY.");
options.setAuthors("Daniel H. Huson");
options.comment("Input and Output");
final var inputFiles = new ArrayList<>(Arrays.asList(options.getOptionMandatory("-i", "input", "Input DAA and/or RMA file(s)", new String[0])));
final var outputFiles = new ArrayList<>(Arrays.asList(options.getOption("-o", "output", "Output file(s). Use %f for input file name, %t for class name and %i for class id. (Directory, stdout, .gz ok)", new String[]{"stdout"})));
options.comment("Options");
final var extractCorrectedReads = options.getOption("-fsc", "frameShiftCorrect", "Extract frame-shift corrected reads", false);
final var classificationName = options.getOption("-c", "classification", "The classification to use", ClassificationManager.getAllSupportedClassifications(), "");
final var classNames = new ArrayList<>(Arrays.asList(options.getOption("-n", "classNames", "Names (or ids) of classes to extract reads from (default: extract all classes)", new String[0])));
final var allBelow = options.getOption("-b", "allBelow", "Report all reads assigned to or below a named class", false);
final var all = options.getOption("-a", "all", "Extract all reads (not by class)", false);
options.comment(ArgsOptions.OTHER);
final var ignoreExceptions = options.getOption("-IE", "ignoreExceptions", "Ignore exceptions and continue processing", false);
final var gzOutputFiles = options.getOption("-gz", "gzipOutputFiles", "If output directory is given, gzip files written to directory", true);
final var propertiesFile = options.getOption("-P", "propertiesFile", "Properties file",megan.main.Megan6.getDefaultPropertiesFile());
options.done();
MeganProperties.initializeProperties(propertiesFile);
if (classificationName.equals("") != all)
throw new UsageException("Must specific either option --classification or --all, but not both");
if(allBelow && all)
throw new UsageException("Must specific either option --allBelow or --all, but not both");
if(allBelow && classNames.size()==0)
throw new UsageException("When using --allBelow, must specify --classNames");
if(allBelow && extractCorrectedReads)
throw new UsageException("Option --allBelow is not implemented for --extractCorrectedReads");
if (outputFiles.size() == 1 && outputFiles.get(0).equals("stdout")) {
outputFiles.clear();
for (var i = 0; i < inputFiles.size(); i++)
outputFiles.add("stdout");
} else if (outputFiles.size() == 1 && FileUtils.isDirectory(outputFiles.get(0))) {
final var directory = outputFiles.get(0);
outputFiles.clear();
for (var name : inputFiles) {
if (all)
outputFiles.add(new File(directory, FileUtils.replaceFileSuffix(FileUtils.getFileNameWithoutPath(name), "-all.txt" + (gzOutputFiles ? ".gz" : ""))).getPath());
else
outputFiles.add(new File(directory, FileUtils.replaceFileSuffix(FileUtils.getFileNameWithoutPath(name), "-%i-%t.txt" + (gzOutputFiles ? ".gz" : ""))).getPath());
}
} else if (inputFiles.size() != outputFiles.size()) {
throw new UsageException("Number of input and output files must be equal, or output must be 'stdout' or a directory");
}
int totalReads = 0;
for (int i = 0; i < inputFiles.size(); i++) {
final var inputFile = inputFiles.get(i);
final var outputFile = outputFiles.get(i);
try {
if (inputFile.toLowerCase().endsWith("daa") && !DAAParser.isMeganizedDAAFile(inputFile, true)) {
throw new IOException("Warning: non-meganized DAA file: " + inputFile);
} else {
totalReads += extract(extractCorrectedReads, classificationName, classNames, all,allBelow, inputFile, outputFile);
}
} catch (Exception ex) {
if (ignoreExceptions)
System.err.println(Basic.getShortName(ex.getClass()) + ": " + ex.getMessage() + ", while processing file: " + inputFile);
else
throw ex;
}
}
System.err.printf("Reads extracted: %,d%n", totalReads);
}
/**
* extract all reads for each specified classes, or all classes, if none specified
*/
private long extract(boolean extractCorrectedReads, String classificationName, Collection<String> classNames,
boolean all, boolean allBelow, String inputFile, String outputFile) throws IOException {
final var doc = new Document();
doc.getMeganFile().setFileFromExistingFile(inputFile, true);
doc.loadMeganFile();
final var connector = doc.getConnector();
if (extractCorrectedReads && doc.getBlastMode() != BlastMode.BlastX)
throw new IOException("Frame-shift correction only possible when BlastMode is BLASTX");
if (all) {
try (ProgressPercentage progress = new ProgressPercentage("Processing file: " + inputFile)) {
if (extractCorrectedReads) {
return FrameShiftCorrectedReadsExporter.exportAll(connector, outputFile, progress);
} else {
return ReadsExporter.exportAll(connector, outputFile, progress);
}
}
} else {
if (!Arrays.asList(connector.getAllClassificationNames()).contains(classificationName)) {
throw new IOException("Input file does not contain the requested classification '" + classificationName + "'");
}
final var classIds = new TreeSet<Integer>();
final var classification = ClassificationManager.get(classificationName, true);
final var classificationBlock = connector.getClassificationBlock(classificationName);
if (classNames.size() == 0)// no class names given, use all
{
for (Integer classId : classificationBlock.getKeySet()) {
if (classId > 0)
classIds.add(classId);
}
} else {
var warnings = 0;
for (var name : classNames) {
if (NumberUtils.isInteger(name))
classIds.add(NumberUtils.parseInt(name));
else {
var id = classification.getName2IdMap().get(name);
if (id > 0)
classIds.add(id);
else {
if (warnings++ < 5) {
System.err.println("Warning: unknown class: '" + name + "'");
if (warnings == 5)
System.err.println("No further warnings");
}
}
}
}
}
try (var progress = new ProgressPercentage("Processing file: " + inputFile)) {
if (!extractCorrectedReads) {
return ReadsExtractor.extractReadsByFViewer(classificationName, progress, classIds, "", outputFile, doc, allBelow);
} else {
return FrameShiftCorrectedReadsExporter.export(classificationName, classIds, connector, outputFile, progress);
}
}
}
}
}
| 10,030 | Java | .java | husonlab/megan-ce | 62 | 21 | 18 | 2016-05-09T10:55:38Z | 2024-02-22T23:23:42Z |
BLAST2RMA6.java | /FileExtraction/Java_unseen/husonlab_megan-ce/src/megan/tools/BLAST2RMA6.java | /*
* BLAST2RMA6.java Copyright (C) 2024 Daniel H. Huson
*
* (Some files contain contributions from other authors, who are then mentioned separately.)
*
* This program is free software: you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
package megan.tools;
import jloda.fx.util.ProgramExecutorService;
import jloda.seq.BlastMode;
import jloda.swing.commands.CommandManager;
import jloda.swing.util.ArgsOptions;
import jloda.swing.util.ResourceManager;
import jloda.util.*;
import jloda.util.progress.ProgressListener;
import jloda.util.progress.ProgressPercentage;
import megan.accessiondb.AccessAccessionMappingDatabase;
import megan.accessiondb.ConfigRequests;
import megan.classification.Classification;
import megan.classification.ClassificationManager;
import megan.classification.IdMapper;
import megan.classification.IdParser;
import megan.classification.data.ClassificationCommandHelper;
import megan.core.ContaminantManager;
import megan.core.Document;
import megan.core.SampleAttributeTable;
import megan.main.MeganProperties;
import megan.parsers.blast.BlastFileFormat;
import megan.parsers.blast.BlastModeUtils;
import megan.rma6.RMA6Connector;
import megan.rma6.RMA6FromBlastCreator;
import java.io.File;
import java.io.FileReader;
import java.io.IOException;
import java.sql.SQLException;
import java.util.*;
/**
* compute an RMA6 file from a SAM file generated by DIAMOND or MALT
* Daniel Huson, 3.2012
*/
public class BLAST2RMA6 {
/**
* merge RMA files
*
*/
public static void main(String[] args) {
try {
ResourceManager.insertResourceRoot(megan.resources.Resources.class);
ProgramProperties.setProgramName("Blast2RMA");
ProgramProperties.setProgramVersion(megan.main.Version.SHORT_DESCRIPTION);
PeakMemoryUsageMonitor.start();
(new BLAST2RMA6()).run(args);
System.err.println("Total time: " + PeakMemoryUsageMonitor.getSecondsSinceStartString());
System.err.println("Peak memory: " + PeakMemoryUsageMonitor.getPeakUsageString());
System.exit(0);
} catch (Exception ex) {
Basic.caught(ex);
System.exit(1);
}
}
/**
* run
*
*/
private void run(String[] args) throws UsageException, IOException, SQLException {
CommandManager.getGlobalCommands().addAll(ClassificationCommandHelper.getGlobalCommands());
final ArgsOptions options = new ArgsOptions(args, this, "Computes MEGAN RMA files from BLAST (or similar) files");
options.setVersion(ProgramProperties.getProgramVersion());
options.setLicense("Copyright (C) 2024. This program comes with ABSOLUTELY NO WARRANTY.");
options.setAuthors("Daniel H. Huson");
options.comment("Input");
final String[] blastFiles = options.getOptionMandatory("-i", "in", "Input BLAST file[s] (.gz ok)", new String[0]);
final BlastFileFormat blastFormat = BlastFileFormat.valueOfIgnoreCase(options.getOptionMandatory("-f", "format", "Input file format", BlastFileFormat.values(), BlastFileFormat.Unknown.toString()));
final BlastMode blastMode = BlastMode.valueOfIgnoreCase(options.getOption("-bm", "blastMode", "Blast mode", BlastMode.values(), BlastMode.Unknown.toString()));
String[] readsFiles = options.getOption("-r", "reads", "Reads file(s) (fasta or fastq, .gz ok)", new String[0]);
final String[] metaDataFiles = options.getOption("-mdf", "metaDataFile", "Files containing metadata to be included in RMA6 files", new String[0]);
options.comment("Output");
String[] outputFiles = options.getOptionMandatory("-o", "out", "Output file(s), one for each input file, or a directory", new String[0]);
boolean useCompression = options.getOption("-c", "useCompression", "Compress reads and matches in RMA file (smaller files, longer to generate", true);
options.comment("Reads");
final boolean pairedReads = options.getOption("-p", "paired", "Reads are paired", false);
final int pairedReadsSuffixLength = options.getOption("-ps", "pairedSuffixLength", "Length of name suffix used to distinguish between name of read and its mate", 0);
final boolean pairsInSingleFile = options.getOption("-pof", "pairedReadsInOneFile", "Are paired reads in one file (usually they are in two)", false);
options.comment("Parameters");
final boolean longReads = options.getOption("-lg", "longReads", "Parse and analyse as long reads", Document.DEFAULT_LONG_READS);
final int maxMatchesPerRead = options.getOption("-m", "maxMatchesPerRead", "Max matches per read", 100);
final boolean runClassifications = options.getOption("-class", "classify", "Run classification algorithm", true);
final float minScore = options.getOption("-ms", "minScore", "Min score", Document.DEFAULT_MINSCORE);
final float maxExpected = options.getOption("-me", "maxExpected", "Max expected", Document.DEFAULT_MAXEXPECTED);
final float minPercentIdentity = options.getOption("-mpi", "minPercentIdentity", "Min percent identity", Document.DEFAULT_MIN_PERCENT_IDENTITY);
final float topPercent = options.getOption("-top", "topPercent", "Top percent", Document.DEFAULT_TOPPERCENT);
final int minSupport;
final float minSupportPercent;
{
final float minSupportPercent0 = options.getOption("-supp", "minSupportPercent", "Min support as percent of assigned reads (0==off)", Document.DEFAULT_MINSUPPORT_PERCENT);
final int minSupport0 = options.getOption("-sup", "minSupport", "Min support (0==off)", Document.DEFAULT_MINSUPPORT);
if (minSupportPercent0 != Document.DEFAULT_MINSUPPORT_PERCENT && minSupport0 == Document.DEFAULT_MINSUPPORT) {
minSupportPercent = minSupportPercent0;
minSupport = 0;
} else if (minSupportPercent0 == Document.DEFAULT_MINSUPPORT_PERCENT && minSupport0 != Document.DEFAULT_MINSUPPORT) {
minSupportPercent = 0;
minSupport = minSupport0;
} else if (minSupportPercent0 != Document.DEFAULT_MINSUPPORT_PERCENT && minSupport0 != Document.DEFAULT_MINSUPPORT) {
throw new IOException("Please specify a value for either --minSupport or --minSupportPercent, but not for both");
} else {
minSupportPercent = minSupportPercent0;
minSupport = minSupport0;
}
}
final float minPercentReadToCover = options.getOption("-mrc", "minPercentReadCover", "Min percent of read length to be covered by alignments", Document.DEFAULT_MIN_PERCENT_READ_TO_COVER);
final float minPercentReferenceToCover = options.getOption("-mrefc", "minPercentReferenceCover", "Min percent of reference length to be covered by alignments", Document.DEFAULT_MIN_PERCENT_REFERENCE_TO_COVER);
final int minReadLength=options.getOption("-mrl","minReadLength","Minimum read length",0);
final Document.LCAAlgorithm lcaAlgorithm = Document.LCAAlgorithm.valueOfIgnoreCase(options.getOption("-alg", "lcaAlgorithm", "Set the LCA algorithm to use for taxonomic assignment",
Document.LCAAlgorithm.values(), longReads ? Document.DEFAULT_LCA_ALGORITHM_LONG_READS.toString() : Document.DEFAULT_LCA_ALGORITHM_SHORT_READS.toString()));
final float lcaCoveragePercent = options.getOption("-lcp", "lcaCoveragePercent", "Set the percent for the LCA to cover",
lcaAlgorithm == Document.LCAAlgorithm.longReads ? Document.DEFAULT_LCA_COVERAGE_PERCENT_LONG_READS : (lcaAlgorithm == Document.LCAAlgorithm.weighted ? Document.DEFAULT_LCA_COVERAGE_PERCENT_WEIGHTED_LCA : Document.DEFAULT_LCA_COVERAGE_PERCENT_SHORT_READS));
final String readAssignmentModeDefaultValue;
if (options.isDoHelp()) {
readAssignmentModeDefaultValue = (Document.DEFAULT_READ_ASSIGNMENT_MODE_LONG_READS + " in long read mode, " + Document.DEFAULT_READ_ASSIGNMENT_MODE_SHORT_READS + " else");
} else if (longReads)
readAssignmentModeDefaultValue = Document.DEFAULT_READ_ASSIGNMENT_MODE_LONG_READS.toString();
else
readAssignmentModeDefaultValue = Document.DEFAULT_READ_ASSIGNMENT_MODE_SHORT_READS.toString();
final Document.ReadAssignmentMode readAssignmentMode = Document.ReadAssignmentMode.valueOfIgnoreCase(options.getOption("-ram", "readAssignmentMode", "Set the read assignment mode", readAssignmentModeDefaultValue));
final String contaminantsFile = options.getOption("-cf", "conFile", "File of contaminant taxa (one Id or name per line)", "");
options.comment("Classification support:");
final String mapDBFile = options.getOption("-mdb", "mapDB", "MEGAN mapping db (file megan-map.db)", "");
final Set<String> selectedClassifications = new HashSet<>(Arrays.asList(options.getOption("-on", "only", "Use only named classifications (if not set: use all)", new String[0])));
options.comment("Deprecated classification support:");
final boolean parseTaxonNames = options.getOption("-tn", "parseTaxonNames", "Parse taxon names", true);
final String acc2TaxaFile = options.getOption("-a2t", "acc2taxa", "Accessopm-to-Taxonomy mapping file", "");
final String synonyms2TaxaFile = options.getOption("-s2t", "syn2taxa", "Synonyms-to-Taxonomy mapping file", "");
{
final String tags = options.getOption("-t4t", "tags4taxonomy", "Tags for taxonomy id parsing (must set to activate id parsing)", "").trim();
ProgramProperties.preset("TaxonomyTags", tags);
ProgramProperties.preset("TaxonomyParseIds", tags.length() > 0);
}
final HashMap<String, String> class2AccessionFile = new HashMap<>();
final HashMap<String, String> class2SynonymsFile = new HashMap<>();
for (String cName : ClassificationManager.getAllSupportedClassificationsExcludingNCBITaxonomy()) {
class2AccessionFile.put(cName, options.getOption("-a2" + cName.toLowerCase(), "acc2" + cName.toLowerCase(), "Accession-to-" + cName + " mapping file", ""));
class2SynonymsFile.put(cName, options.getOption("-s2" + cName.toLowerCase(), "syn2" + cName.toLowerCase(), "Synonyms-to-" + cName + " mapping file", ""));
final String tags = options.getOption("-t4" + cName.toLowerCase(), "tags4" + cName.toLowerCase(), "Tags for " + cName + " id parsing (must set to activate id parsing)", "").trim();
ProgramProperties.preset(cName + "Tags", tags);
ProgramProperties.preset(cName + "ParseIds", tags.length() > 0);
}
ProgramProperties.preset(IdParser.PROPERTIES_FIRST_WORD_IS_ACCESSION, options.getOption("-fwa", "firstWordIsAccession", "First word in reference header is accession number (set to 'true' for NCBI-nr downloaded Sep 2016 or later)", true));
ProgramProperties.preset(IdParser.PROPERTIES_ACCESSION_TAGS, options.getOption("-atags", "accessionTags", "List of accession tags", ProgramProperties.get(IdParser.PROPERTIES_ACCESSION_TAGS, IdParser.ACCESSION_TAGS)));
options.comment(ArgsOptions.OTHER);
ProgramExecutorService.setNumberOfCoresToUse(options.getOption("-t", "threads", "Number of threads", 8));
ConfigRequests.setCacheSize(options.getOption("-cs","cacheSize","Cache size for SQLITE (use with care)", ConfigRequests.getCacheSize()));
final var propertiesFile = options.getOption("-P", "propertiesFile", "Properties file",megan.main.Megan6.getDefaultPropertiesFile());
options.done();
MeganProperties.initializeProperties(propertiesFile);
for (String fileName : metaDataFiles) {
FileUtils.checkFileReadableNonEmpty(fileName);
}
for (String fileName : readsFiles) {
FileUtils.checkFileReadableNonEmpty(fileName);
}
if (StringUtils.notBlank(contaminantsFile))
FileUtils.checkFileReadableNonEmpty(contaminantsFile);
final Collection<String> mapDBClassifications = AccessAccessionMappingDatabase.getContainedClassificationsIfDBExists(mapDBFile);
if (mapDBClassifications.size() > 0 && (StringUtils.hasPositiveLengthValue(class2AccessionFile) || StringUtils.hasPositiveLengthValue(class2SynonymsFile)))
throw new UsageException("Illegal to use both --mapDB and ---acc2... or --syn2... options");
if (mapDBClassifications.size() > 0)
ClassificationManager.setMeganMapDBFile(mapDBFile);
final ArrayList<String> cNames = new ArrayList<>();
for (String cName : ClassificationManager.getAllSupportedClassificationsExcludingNCBITaxonomy()) {
if ((selectedClassifications.size() == 0 || selectedClassifications.contains(cName))
&& (mapDBClassifications.contains(cName) || class2AccessionFile.get(cName).length() > 0 || class2SynonymsFile.get(cName).length() > 0))
cNames.add(cName);
}
if (cNames.size() > 0)
System.err.println("Functional classifications to use: " + StringUtils.toString(cNames, ", "));
final boolean processInPairs = (pairedReads && !pairsInSingleFile);
if (outputFiles.length == 1) {
if (blastFiles.length == 1 || (processInPairs && blastFiles.length == 2)) {
if ((new File(outputFiles[0]).isDirectory()))
outputFiles[0] = (new File(outputFiles[0], FileUtils.replaceFileSuffix(FileUtils.getFileNameWithoutPath(FileUtils.getFileNameWithoutZipOrGZipSuffix(blastFiles[0])), ".rma6"))).getPath();
} else if (blastFiles.length > 1) {
if (!(new File(outputFiles[0]).isDirectory()))
throw new IOException("Multiple files given, but given single output is not a directory");
String outputDirectory = (new File(outputFiles[0])).getParent();
if (!processInPairs) {
outputFiles = new String[blastFiles.length];
for (int i = 0; i < blastFiles.length; i++)
outputFiles[i] = new File(outputDirectory, FileUtils.replaceFileSuffix(FileUtils.getFileNameWithoutZipOrGZipSuffix(FileUtils.getFileNameWithoutPath(blastFiles[i])), ".rma6")).getPath();
} else {
outputFiles = new String[blastFiles.length / 2];
for (int i = 0; i < blastFiles.length; i += 2)
outputFiles[i / 2] = new File(outputDirectory, FileUtils.replaceFileSuffix(FileUtils.getFileNameWithoutZipOrGZipSuffix(FileUtils.getFileNameWithoutPath(blastFiles[i])), ".rma6")).getPath();
}
}
} else // output.length >1
{
if ((!processInPairs && blastFiles.length != outputFiles.length) || (processInPairs && blastFiles.length != 2 * outputFiles.length))
throw new IOException("Number of input and output files do not match");
}
if (metaDataFiles.length > 1 && metaDataFiles.length != outputFiles.length) {
throw new IOException("Number of metadata files (" + metaDataFiles.length + ") doesn't match number of output files (" + outputFiles.length + ")");
}
if (readsFiles.length == 0) {
readsFiles = new String[blastFiles.length];
Arrays.fill(readsFiles, "");
} else if (readsFiles.length != blastFiles.length)
throw new IOException("Number of reads files must equal number of BLAST files");
final IdMapper taxonIdMapper = ClassificationManager.get(Classification.Taxonomy, true).getIdMapper();
final IdMapper[] idMappers = new IdMapper[cNames.size()];
// Load all mapping files:
if (runClassifications) {
ClassificationManager.get(Classification.Taxonomy, true);
taxonIdMapper.setUseTextParsing(parseTaxonNames);
if (mapDBFile.length() > 0) {
taxonIdMapper.loadMappingFile(mapDBFile, IdMapper.MapType.MeganMapDB, false, new ProgressPercentage());
}
if (acc2TaxaFile.length() > 0) {
taxonIdMapper.loadMappingFile(acc2TaxaFile, IdMapper.MapType.Accession, false, new ProgressPercentage());
}
if (synonyms2TaxaFile.length() > 0) {
taxonIdMapper.loadMappingFile(synonyms2TaxaFile, IdMapper.MapType.Synonyms, false, new ProgressPercentage());
}
for (int i = 0; i < cNames.size(); i++) {
final String cName = cNames.get(i);
idMappers[i] = ClassificationManager.get(cName, true).getIdMapper();
if (mapDBClassifications.contains(cName))
idMappers[i].loadMappingFile(mapDBFile, IdMapper.MapType.MeganMapDB, false, new ProgressPercentage());
if (class2AccessionFile.get(cName).length() > 0)
idMappers[i].loadMappingFile(class2AccessionFile.get(cName), IdMapper.MapType.Accession, false, new ProgressPercentage());
if (class2SynonymsFile.get(cName).length() > 0)
idMappers[i].loadMappingFile(class2SynonymsFile.get(cName), IdMapper.MapType.Synonyms, false, new ProgressPercentage());
}
}
/*
* process each set of files:
*/
for (int i = 0; i < blastFiles.length; i++) {
final int iOutput;
if (processInPairs) {
if ((i % 2) == 1)
continue; // skip odd numbers
iOutput = i / 2;
System.err.println("Processing " + blastFormat + " files: " + blastFiles[i] + ", " + blastFiles[i + 1]);
System.err.println("Output file: " + outputFiles[iOutput]);
} else {
iOutput = i;
System.err.println("Processing " + blastFormat + " file: " + blastFiles[i]);
System.err.println("Output file: " + outputFiles[i]);
}
ProgressListener progressListener = new ProgressPercentage();
final Document doc = new Document();
doc.getActiveViewers().add(Classification.Taxonomy);
doc.getActiveViewers().addAll(cNames);
doc.setMinScore(minScore);
doc.setMinPercentIdentity(minPercentIdentity);
doc.setMaxExpected(maxExpected);
doc.setTopPercent(topPercent);
doc.setMinSupportPercent(minSupportPercent);
doc.setMinSupport(minSupport);
doc.setPairedReads(pairedReads);
doc.setPairedReadSuffixLength(pairedReadsSuffixLength);
if (blastMode == BlastMode.Unknown)
doc.setBlastMode(BlastModeUtils.getBlastMode(blastFiles[0]));
else
doc.setBlastMode(blastMode);
doc.setLcaAlgorithm(lcaAlgorithm);
doc.setLcaCoveragePercent(lcaCoveragePercent);
doc.setMinPercentReadToCover(minPercentReadToCover);
doc.setMinPercentReferenceToCover(minPercentReferenceToCover);
doc.setMinReadLength(minReadLength);
doc.setLongReads(longReads);
doc.setReadAssignmentMode(readAssignmentMode);
if (contaminantsFile.length() > 0) {
ContaminantManager contaminantManager = new ContaminantManager();
contaminantManager.read(contaminantsFile);
System.err.printf("Contaminants profile: %,d input, %,d total%n", contaminantManager.inputSize(), contaminantManager.size());
doc.getDataTable().setContaminants(contaminantManager.getTaxonIdsString());
doc.setUseContaminantFilter(contaminantManager.size() > 0);
}
if (!processInPairs)
createRMA6FileFromBLAST("BLAST2RMA6", blastFiles[i], blastFormat, readsFiles[i], outputFiles[iOutput], useCompression, doc, maxMatchesPerRead, progressListener);
else
createRMA6FileFromBLASTPair("BLAST2RMA6", blastFiles[i], blastFiles[i + 1], blastFormat, readsFiles[i], readsFiles[i + 1], outputFiles[iOutput], useCompression, doc, maxMatchesPerRead, progressListener);
progressListener.close();
final RMA6Connector connector = new RMA6Connector(outputFiles[i]);
if (metaDataFiles.length > 0) {
try {
System.err.println("Saving metadata:");
SampleAttributeTable sampleAttributeTable = new SampleAttributeTable();
sampleAttributeTable.read(new FileReader(metaDataFiles[Math.min(iOutput, metaDataFiles.length - 1)]),
Collections.singletonList(FileUtils.getFileBaseName(FileUtils.getFileNameWithoutPath(outputFiles[iOutput]))), false);
Map<String, byte[]> label2data = new HashMap<>();
label2data.put(SampleAttributeTable.SAMPLE_ATTRIBUTES, sampleAttributeTable.getBytes());
connector.putAuxiliaryData(label2data);
System.err.println("done");
} catch (Exception ex) {
Basic.caught(ex);
}
}
progressListener.incrementProgress();
}
}
/**
* create an RMA6 file from a BLAST file
*
*/
private static void createRMA6FileFromBLAST(String creator, String blastFile, BlastFileFormat format, String queryFile, String rma6FileName, boolean useCompression, Document doc,
int maxMatchesPerRead, ProgressListener progressListener) throws IOException, CanceledException, SQLException {
final RMA6FromBlastCreator rma6Creator = new RMA6FromBlastCreator(creator, format, doc.getBlastMode(), new String[]{blastFile}, new String[]{queryFile}, rma6FileName, useCompression, doc, maxMatchesPerRead);
rma6Creator.parseFiles(progressListener);
}
/**
* create an RMA6 file from a pair of BLAST files
*
*/
private static void createRMA6FileFromBLASTPair(String creator, String blastFile1, String blastFile2, BlastFileFormat format, String queryFile1, String queryFile2, String rma6FileName, boolean useCompression, Document doc,
int maxMatchesPerRead, ProgressListener progressListener) throws IOException, CanceledException, SQLException {
final RMA6FromBlastCreator rma6Creator = new RMA6FromBlastCreator(creator, format, doc.getBlastMode(), new String[]{blastFile1, blastFile2}, new String[]{queryFile1, queryFile2}, rma6FileName, useCompression, doc, maxMatchesPerRead);
rma6Creator.parseFiles(progressListener);
}
}
| 23,104 | Java | .java | husonlab/megan-ce | 62 | 21 | 18 | 2016-05-09T10:55:38Z | 2024-02-22T23:23:42Z |
CompareProteinAlignments.java | /FileExtraction/Java_unseen/husonlab_megan-ce/src/megan/tools/CompareProteinAlignments.java | /*
* CompareProteinAlignments.java Copyright (C) 2024 Daniel H. Huson
*
* (Some files contain contributions from other authors, who are then mentioned separately.)
*
* This program is free software: you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
package megan.tools;
import jloda.swing.util.ArgsOptions;
import jloda.swing.util.ResourceManager;
import jloda.util.*;
import jloda.util.interval.Interval;
import jloda.util.interval.IntervalTree;
import jloda.util.progress.ProgressPercentage;
import megan.core.MeganFile;
import megan.data.*;
import megan.main.MeganProperties;
import java.io.*;
import java.util.ArrayList;
import java.util.HashMap;
import java.util.Map;
public class CompareProteinAlignments {
/**
* compares protein alignments
*
*/
public static void main(String[] args) {
try {
ResourceManager.insertResourceRoot(megan.resources.Resources.class);
ProgramProperties.setProgramName("Compare Protein Alignments");
ProgramProperties.setProgramVersion(megan.main.Version.SHORT_DESCRIPTION);
long start = System.currentTimeMillis();
(new CompareProteinAlignments()).run(args);
System.err.println("Time: " + ((System.currentTimeMillis() - start) / 1000) + "s");
System.exit(0);
} catch (Exception ex) {
Basic.caught(ex);
System.exit(1);
}
}
/**
* run the program
*
*/
private void run(String[] args) throws Exception {
final ArgsOptions options = new ArgsOptions(args, this, "Compares protein alignments for different analyses of the same sequences");
options.setVersion(ProgramProperties.getProgramVersion());
options.setLicense("Copyright (C) 2024. This program comes with ABSOLUTELY NO WARRANTY.");
options.setAuthors("Daniel H. Huson");
options.comment("Input and output");
final String[] inputFiles = options.getOptionMandatory("-i", "Input DAA or RMA files", "Input files", new String[0]);
final String outputFileName = options.getOption("-o", "output", "Output file (stdout ok)", "");
options.comment("Options");
final NameNormalizer normalizer = new NameNormalizer(options.getOption("-e", "nameEdit", "Command A/B applied as replaceAll(A,B) to all read/contig names", ""));
final boolean onlyCompareDominatingMatches = options.getOption("-d", "dominatingOnly", "Compare only dominating matches", false);
final var propertiesFile = options.getOption("-P", "propertiesFile", "Properties file",megan.main.Megan6.getDefaultPropertiesFile());
options.done();
MeganProperties.initializeProperties(propertiesFile);
if (inputFiles.length < 2)
throw new UsageException("--input '" + StringUtils.toString(inputFiles, " ") + "': must specify at least 2 input files");
if (onlyCompareDominatingMatches)
throw new UsageException("--dominatingOnly: not implemented");
final Writer w = new BufferedWriter(outputFileName.equalsIgnoreCase("stdout") ? new OutputStreamWriter(System.out) : new FileWriter(outputFileName));
try {
w.write("# " + new ComparisonResult().getFormatString() + "\n");
for (int i = 0; i < inputFiles.length; i++) {
final MeganFile file1 = new MeganFile();
file1.setFileFromExistingFile(inputFiles[i], true);
final IConnector connector1 = file1.getConnector();
for (int j = i + 1; j < inputFiles.length; j++) {
final MeganFile file2 = new MeganFile();
file2.setFileFromExistingFile(inputFiles[j], true);
final IConnector connector2 = file2.getConnector();
final Map<String, Long> name2Uid = getName2Uid(connector2, normalizer);
final IReadBlockGetter getter2 = connector2.getReadBlockGetter(0, 10, true, true);
final ComparisonResult total = new ComparisonResult("total", 0, 0);
int count = 0;
try (IReadBlockIterator it = connector1.getAllReadsIterator(0, 10, true, true);
ProgressPercentage progress = new ProgressPercentage("Comparing files " + inputFiles[i] + " and " + inputFiles[j], it.getMaximumProgress())) {
w.write("# Comparison " + FileUtils.getFileNameWithoutPath(inputFiles[i]) + " and " + FileUtils.getFileNameWithoutPath(inputFiles[j]) + ":\n");
while (it.hasNext()) {
final IReadBlock readBlock1 = it.next();
final String name1 = normalizer.apply(readBlock1.getReadName());
final Long uid2 = name2Uid.get(name1);
if (uid2 == null)
throw new IOException("Read '" + name1 + "' not found, uid=null");
final IReadBlock readBlock2 = getter2.getReadBlock(uid2);
final ComparisonResult comparison = computeComparison(normalizer.apply(name1), readBlock1, readBlock2);
total.add(comparison);
w.write(comparison + "\n");
progress.setProgress(it.getProgress());
count++;
}
}
if (count > 1)
w.write(total + "\n");
}
}
w.flush();
} finally {
if (!outputFileName.equalsIgnoreCase("stdout"))
w.close();
}
}
private ComparisonResult computeComparison(final String name, IReadBlock readBlock1, IReadBlock readBlock2) {
final Map<String, ArrayList<IMatchBlock>> accession2Matches1 = computeAccession2Matches(readBlock1);
final Map<String, ArrayList<IMatchBlock>> accession2Matches2 = computeAccession2Matches(readBlock2);
final ComparisonResult comparison = new ComparisonResult(name, readBlock1.getReadLength(), readBlock2.getReadLength());
comparison.coveredInA = computeIntervalTreeOnQuery(getAllMatches(readBlock1)).getCovered();
comparison.coveredInB = computeIntervalTreeOnQuery(getAllMatches(readBlock2)).getCovered();
for (String accession : accession2Matches1.keySet()) {
final ArrayList<IMatchBlock> matches1 = accession2Matches1.get(accession);
if (!accession2Matches2.containsKey(accession)) {
comparison.matchesOnlyInA += matches1.size();
comparison.alignedAAOnlyInA += computeAlignedBases(matches1);
} else {
final IntervalTree<IMatchBlock> intervalTree1 = computeIntervalTreeOnReference(matches1);
final ArrayList<IMatchBlock> matches2 = accession2Matches2.get(accession);
final IntervalTree<IMatchBlock> intervalTree2 = computeIntervalTreeOnReference(matches2);
{
int[] count = computeOnlyInFirst(matches1, intervalTree2);
comparison.matchesOnlyInA += count[0];
comparison.alignedAAOnlyInA += count[1];
}
{
int[] count = computeLongerInFirst(matches1, intervalTree2);
comparison.matchesLongerInA += count[0];
comparison.alignedAALongerInA += count[1];
comparison.diffAALongerInA += count[2];
}
{
int[] count = computeLongerInFirst(matches2, intervalTree1);
comparison.matchesLongerInB += count[0];
comparison.alignedAALongerInB += count[1];
comparison.diffAALongerInB += count[2];
}
{
int[] count = computeSameInBoth(matches2, intervalTree1);
comparison.matchesInBoth += count[0];
comparison.alignedAAInBoth += count[1];
}
}
}
for (String accession : accession2Matches2.keySet()) {
final ArrayList<IMatchBlock> matches2 = accession2Matches2.get(accession);
if (!accession2Matches1.containsKey(accession)) {
comparison.matchesOnlyInB += matches2.size();
comparison.alignedAAOnlyInB += computeAlignedBases(matches2);
}
}
return comparison;
}
private ArrayList<IMatchBlock> getAllMatches(IReadBlock readBlock) {
final ArrayList<IMatchBlock> list = new ArrayList<>(readBlock.getNumberOfAvailableMatchBlocks());
for (int i = 0; i < readBlock.getNumberOfAvailableMatchBlocks(); i++)
list.add(readBlock.getMatchBlock(i));
return list;
}
/**
* computes the number of matches not present in the tree
*
* @return number of alignments and bases
*/
private static int[] computeOnlyInFirst(ArrayList<IMatchBlock> matches, IntervalTree<IMatchBlock> tree) {
int[] count = {0, 0};
for (IMatchBlock matchBlock : matches) {
int a = getSubjStart(matchBlock);
int b = getSubjEnd(matchBlock);
if (tree.getIntervals(a, b).size() == 0) {
count[0]++;
count[1] += Math.abs(a - b) + 1;
}
}
return count;
}
/**
* get the number of matches longer than in the tree
*
* @return number of alignments and bases
*/
private static int[] computeLongerInFirst(ArrayList<IMatchBlock> matches, IntervalTree<IMatchBlock> tree) {
int[] count = {0, 0, 0};
for (IMatchBlock matchBlock : matches) {
int a = getSubjStart(matchBlock);
int b = getSubjEnd(matchBlock);
final Interval<IMatchBlock>[] overlappers = tree.getIntervalsSortedByDecreasingIntersectionLength(a, b);
if (overlappers.length > 0) {
final int diff = Math.abs(a - b) + 1 - overlappers[0].length();
if (diff > 0) {
count[0]++;
count[1] += Math.abs(a - b) + 1;
count[2] += diff;
}
}
}
return count;
}
/**
* get the number of matches same in the tree
*
* @return number of alignments and bases
*/
private static int[] computeSameInBoth(ArrayList<IMatchBlock> matches, IntervalTree<IMatchBlock> tree) {
int[] count = {0, 0};
for (IMatchBlock matchBlock : matches) {
int a = getSubjStart(matchBlock);
int b = getSubjEnd(matchBlock);
final Interval<IMatchBlock>[] overlappers = tree.getIntervalsSortedByDecreasingIntersectionLength(a, b);
if (overlappers.length > 0) {
final int diff = Math.abs(a - b) + 1 - overlappers[0].length();
if (diff == 0) {
count[0]++;
count[1] += Math.abs(a - b) + 1;
}
}
}
return count;
}
/**
* compute the interval tree for a set of matches
*
* @return interval tree
*/
private static IntervalTree<IMatchBlock> computeIntervalTreeOnReference(ArrayList<IMatchBlock> matches) {
final IntervalTree<IMatchBlock> tree = new IntervalTree<>();
for (IMatchBlock matchBlock : matches) {
tree.add(getSubjStart(matchBlock), getSubjEnd(matchBlock), matchBlock);
}
return tree;
}
/**
* compute the interval tree for a set of matches
*
* @return interval tree
*/
private static IntervalTree<IMatchBlock> computeIntervalTreeOnQuery(ArrayList<IMatchBlock> matches) {
final IntervalTree<IMatchBlock> tree = new IntervalTree<>();
for (IMatchBlock matchBlock : matches) {
tree.add(matchBlock.getAlignedQueryStart(), matchBlock.getAlignedQueryEnd(), matchBlock);
}
return tree;
}
/**
* compute the number of aligned bases in this list of matches
*
*/
private static int computeAlignedBases(ArrayList<IMatchBlock> matches) {
int count = 0;
for (IMatchBlock matchBlock : matches) {
count += Math.abs(getSubjStart(matchBlock) - getSubjEnd(matchBlock)) + 1;
}
return count;
}
/**
* get the subject start
*
* @return start 0 0
*/
private static int getSubjStart(IMatchBlock matchBlock) {
final String text = matchBlock.getText();
int pos = text.indexOf("Sbjct");
return (pos != -1 ? NumberUtils.parseInt(text.substring(pos + 5)) : 0);
}
/**
* get the subject end
*
* @return end or 0
*/
private static int getSubjEnd(IMatchBlock matchBlock) {
final String text = matchBlock.getText();
int pos = text.lastIndexOf("Sbjct");
if (pos == -1)
return 0;
pos = StringUtils.skipNonWhiteSpace(text, pos); // Sjbct:
pos = StringUtils.skipWhiteSpace(text, pos);
pos = StringUtils.skipNonWhiteSpace(text, pos); // number
pos = StringUtils.skipNonWhiteSpace(text, pos);
pos = StringUtils.skipWhiteSpace(text, pos); // sequence
pos = StringUtils.skipNonWhiteSpace(text, pos);
if (pos >= text.length())
return 0;
return NumberUtils.parseInt(text.substring(pos));
}
/**
* compute accession to matches mapping
*
* @return mapping
*/
private static Map<String, ArrayList<IMatchBlock>> computeAccession2Matches(IReadBlock readBlock) {
final Map<String, ArrayList<IMatchBlock>> map = new HashMap<>();
for (int m = 0; m < readBlock.getNumberOfAvailableMatchBlocks(); m++) {
final IMatchBlock matchBlock = readBlock.getMatchBlock(m);
final String accession = matchBlock.getTextFirstWord();
ArrayList<IMatchBlock> matches = map.computeIfAbsent(accession, k -> new ArrayList<>());
matches.add(matchBlock);
}
return map;
}
/**
* computes a name to Uid
*
* @return mapping
*/
private Map<String, Long> getName2Uid(IConnector connector, final NameNormalizer normalizer) throws IOException {
final Map<String, Long> name2uid = new HashMap<>();
for (IReadBlockIterator it = connector.getAllReadsIterator(0, 10, false, false); it.hasNext(); ) {
final IReadBlock readBlock = it.next();
name2uid.put(normalizer.apply(readBlock.getReadName()), readBlock.getUId());
}
return name2uid;
}
/**
* reports the result of a comparison
*/
public static class ComparisonResult {
String name;
int lengthA;
int coveredInA;
int matchesOnlyInA;
int alignedAAOnlyInA;
int matchesLongerInA;
int alignedAALongerInA;
int diffAALongerInA;
int matchesInBoth;
int alignedAAInBoth;
int matchesLongerInB;
int alignedAALongerInB;
int diffAALongerInB;
int matchesOnlyInB;
int alignedAAOnlyInB;
int lengthB;
int coveredInB;
public ComparisonResult() {
}
public ComparisonResult(String name, int lengthA, int lengthB) {
this.name = name;
this.lengthA = lengthA;
this.lengthB = lengthB;
}
void add(ComparisonResult that) {
this.lengthA += that.lengthA;
this.coveredInA += that.coveredInA;
this.matchesOnlyInA += that.matchesOnlyInA;
this.alignedAAOnlyInA += that.alignedAAOnlyInA;
this.matchesLongerInA += that.matchesLongerInA;
this.alignedAALongerInA += that.alignedAALongerInA;
this.diffAALongerInA += that.diffAALongerInA;
this.matchesInBoth += that.matchesInBoth;
this.alignedAAInBoth += that.alignedAAInBoth;
this.matchesLongerInB += that.matchesLongerInB;
this.alignedAALongerInB += that.alignedAALongerInB;
this.diffAALongerInB += that.diffAALongerInB;
this.matchesOnlyInB += that.matchesOnlyInB;
this.alignedAAOnlyInB += that.alignedAAOnlyInB;
this.lengthB += that.lengthB;
this.coveredInB = that.coveredInB;
}
public String toString() {
final int totalAA = alignedAAOnlyInA + alignedAALongerInA + alignedAAInBoth + alignedAALongerInB + alignedAAOnlyInB;
return String.format("%s\t%,d (%,d %.1f%%) %d (%,d %.1f%%) %,d (%,d +%d, %.1f%%) %,d (%,d %.1f%%) %,d (%,d +%,d %.1f%%) %,d (%,d %.1f%%) %,d (%,d %.1f%%)",
name, lengthA,
coveredInA, (100.0 * coveredInA) / lengthA,
matchesOnlyInA, alignedAAOnlyInA,
(100.0 * alignedAAOnlyInA) / totalAA,
matchesLongerInA, alignedAALongerInA, diffAALongerInA,
(100.0 * alignedAALongerInA) / totalAA,
matchesInBoth, alignedAAInBoth,
(100.0 * alignedAAInBoth) / totalAA,
matchesLongerInB, alignedAALongerInB, diffAALongerInB,
(100.0 * alignedAALongerInB) / totalAA,
matchesOnlyInB, alignedAAOnlyInB,
(100.0 * alignedAAOnlyInB) / totalAA,
lengthB,
coveredInB, (100.0 * coveredInB) / lengthB);
}
String getFormatString() {
return "name length-A (covered-A %) only-A (aa %) longer-A (aa + %) both (aa %) longer-B (aa + %) only-B (a %) length-B (covered-B %)";
}
}
}
| 18,300 | Java | .java | husonlab/megan-ce | 62 | 21 | 18 | 2016-05-09T10:55:38Z | 2024-02-22T23:23:42Z |
RMA2Info.java | /FileExtraction/Java_unseen/husonlab_megan-ce/src/megan/tools/RMA2Info.java | /*
* RMA2Info.java Copyright (C) 2024 Daniel H. Huson
*
* (Some files contain contributions from other authors, who are then mentioned separately.)
*
* This program is free software: you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
package megan.tools;
import jloda.graph.Node;
import jloda.swing.util.ArgsOptions;
import jloda.swing.util.ResourceManager;
import jloda.util.*;
import megan.classification.Classification;
import megan.classification.ClassificationManager;
import megan.classification.data.ClassificationFullTree;
import megan.classification.data.Name2IdMap;
import megan.core.Document;
import megan.data.*;
import megan.dialogs.export.CSVExportCViewer;
import megan.main.MeganProperties;
import megan.viewer.TaxonomicLevels;
import megan.viewer.TaxonomyData;
import java.io.*;
import java.util.*;
import java.util.function.Function;
/**
* provides info on a RMA files
* Daniel Huson, 11.2016
*/
public class RMA2Info {
/**
* RMA 2 info
*
*/
public static void main(String[] args) {
try {
ResourceManager.insertResourceRoot(megan.resources.Resources.class);
ProgramProperties.setProgramName("RMA2Info");
ProgramProperties.setProgramVersion(megan.main.Version.SHORT_DESCRIPTION);
PeakMemoryUsageMonitor.start();
(new RMA2Info()).run(args);
System.err.println("Total time: " + PeakMemoryUsageMonitor.getSecondsSinceStartString());
System.err.println("Peak memory: " + PeakMemoryUsageMonitor.getPeakUsageString());
System.exit(0);
} catch (Exception ex) {
Basic.caught(ex);
System.exit(1);
}
}
/**
* run
*
*/
private void run(String[] args) throws UsageException, IOException {
final var options = new ArgsOptions(args, this, "Analyses an RMA file");
options.setVersion(ProgramProperties.getProgramVersion());
options.setLicense("Copyright (C) 2024. This program comes with ABSOLUTELY NO WARRANTY.");
options.setAuthors("Daniel H. Huson");
options.comment("Input and Output");
final var daaFile = options.getOptionMandatory("-i", "in", "Input RMA file", "");
final var outputFile = options.getOption("-o", "out", "Output file (stdout or .gz ok)", "stdout");
options.comment("Commands");
final var listGeneralInfo = options.getOption("-l", "list", "List general info about file", false);
final var listMoreStuff = options.getOption("-m", "listMore", "List more info about file (if meganized)", false);
final var listClass2Count = new HashSet<>(options.getOption("-c2c", "class2count", "List class to count for named classification(s) (Possible values: " + StringUtils.toString(ClassificationManager.getAllSupportedClassifications(), " ") + ")", new ArrayList<>()));
final var listRead2Class = new HashSet<>(options.getOption("-r2c", "read2class", "List read to class assignments for named classification(s) (Possible values: " + StringUtils.toString(ClassificationManager.getAllSupportedClassifications(), " ") + ")", new ArrayList<>()));
final var reportNames = options.getOption("-n", "names", "Report class names rather than class Id numbers", false);
final var reportPaths = options.getOption("-p", "paths", "Report class paths rather than class Id numbers", false);
final var prefixRank = options.getOption("-r", "ranks", "When reporting taxonomy, report taxonomic rank using single letter (K for Kingdom, P for Phylum etc)", false);
final var majorRanksOnly = options.getOption("-mro", "majorRanksOnly", "Only use major taxonomic ranks", false);
final var bacteriaOnly = options.getOption("-bo", "bacteriaOnly", "Only report bacterial reads and counts in taxonomic report", false);
final var viralOnly = options.getOption("-vo", "virusOnly", "Only report viral reads and counts in taxonomic report", false);
final var ignoreUnassigned = options.getOption("-u", "ignoreUnassigned", "Don't report on reads that are unassigned", true);
final var useSummarized = options.getOption("-s", "sum", "Use summarized rather than assigned counts when listing class to count", false);
final var extractSummaryFile = options.getOption("-es", "extractSummaryFile", "Output a MEGAN summary file (contains all classifications, but no reads or alignments)", "");
final var propertiesFile = options.getOption("-P", "propertiesFile", "Properties file",megan.main.Megan6.getDefaultPropertiesFile());
options.done();
MeganProperties.initializeProperties(propertiesFile);
final int taxonomyRoot;
if (bacteriaOnly && viralOnly)
throw new UsageException("Please specify only one of -bo and -vo");
else if (bacteriaOnly)
taxonomyRoot = TaxonomyData.BACTERIA_ID;
else if (viralOnly)
taxonomyRoot = TaxonomyData.VIRUSES_ID;
else
taxonomyRoot = TaxonomyData.ROOT_ID; // means no root set
final var doc = new Document();
doc.getMeganFile().setFileFromExistingFile(daaFile, true);
if (!doc.getMeganFile().isRMA2File() && !doc.getMeganFile().isRMA3File() && !doc.getMeganFile().isRMA6File())
throw new IOException("Incorrect file type: " + doc.getMeganFile().getFileType());
doc.loadMeganFile();
try (var outs = new BufferedWriter(new OutputStreamWriter(FileUtils.getOutputStreamPossiblyZIPorGZIP(outputFile)))) {
if (listGeneralInfo || listMoreStuff) {
final var connector = doc.getConnector();
outs.write(String.format("# Number of reads: %,d\n", doc.getNumberOfReads()));
outs.write(String.format("# Number of matches: %,d\n", connector.getNumberOfMatches()));
outs.write(String.format("# Alignment mode: %s\n", doc.getDataTable().getBlastMode()));
outs.write("# Classifications:");
for (var classificationName : connector.getAllClassificationNames()) {
if (ClassificationManager.getAllSupportedClassifications().contains(classificationName)) {
outs.write(" " + classificationName);
}
}
outs.write("\n");
if (listMoreStuff) {
outs.write("# Summary:\n");
outs.write(doc.getDataTable().getSummary().replaceAll("^", "## ").replaceAll("\n", "\n## ") + "\n");
}
}
if (listClass2Count.size() > 0) {
reportClass2Count(doc, listGeneralInfo, listMoreStuff, reportPaths, reportNames, prefixRank, ignoreUnassigned, majorRanksOnly, listClass2Count, taxonomyRoot,useSummarized, outs);
}
if (listRead2Class.size() > 0) {
reportRead2Count(doc, listGeneralInfo, listMoreStuff, reportPaths, reportNames, prefixRank, ignoreUnassigned, majorRanksOnly, listRead2Class, taxonomyRoot, outs);
}
}
if (extractSummaryFile.length() > 0) {
try (var w = new FileWriter(extractSummaryFile)) {
doc.getDataTable().write(w);
doc.getSampleAttributeTable().write(w, false, true);
}
}
}
/**
* report class to count
*/
public static void reportClass2Count(Document doc, boolean listGeneralInfo, boolean listMoreStuff, boolean reportPaths, boolean reportNames,
boolean prefixRank, boolean ignoreUnassigned, boolean majorRanksOnly, Collection<String> classificationNames,
int taxonomyRootId, boolean useSummarized, Writer writer) throws IOException {
final var connector = doc.getConnector();
final var availableClassificationNames = new HashSet<String>();
for (var classificationName : connector.getAllClassificationNames()) {
if (ClassificationManager.getAllSupportedClassifications().contains(classificationName)) {
availableClassificationNames.add(classificationName);
}
}
ClassificationFullTree taxonomyTree = null;
for (var classificationName : classificationNames) {
if (availableClassificationNames.contains(classificationName)) {
if (listGeneralInfo || listMoreStuff)
writer.write("# Class to count for '" + classificationName + "':\n");
if (!availableClassificationNames.contains(classificationName))
throw new IOException("Classification '" + classificationName + "' not found in file, available: " + StringUtils.toString(availableClassificationNames, " "));
final var isTaxonomy = (classificationName.equals(Classification.Taxonomy));
final Name2IdMap name2IdMap;
if (isTaxonomy && reportPaths) {
ClassificationManager.ensureTreeIsLoaded(Classification.Taxonomy);
name2IdMap = null;
} else if (reportNames) {
name2IdMap = new Name2IdMap();
name2IdMap.loadFromFile((classificationName.equals(Classification.Taxonomy) ? "ncbi" : classificationName.toLowerCase()) + ".map");
} else {
name2IdMap = null;
}
if (isTaxonomy && prefixRank) {
ClassificationManager.ensureTreeIsLoaded(Classification.Taxonomy);
}
if (isTaxonomy) {
taxonomyTree = ClassificationManager.get(Classification.Taxonomy, true).getFullTree();
}
final IClassificationBlock classificationBlock = connector.getClassificationBlock(classificationName);
Function<Integer, Float> id2count;
var ids = new TreeSet<Integer>();
if (!useSummarized) {
id2count = classificationBlock::getWeightedSum;
ids.addAll(classificationBlock.getKeySet());
} else {
ClassificationManager.ensureTreeIsLoaded(classificationName);
var tree = ClassificationManager.get(classificationName, true).getFullTree();
var id2summarized = new HashMap<Integer, Float>();
var root = (isTaxonomy? taxonomyTree.getANode(taxonomyRootId) : tree.getRoot());
tree.postorderTraversal(root,v -> {
var summarized = classificationBlock.getWeightedSum((Integer) v.getInfo());
for (var w : v.children()) {
var id = (Integer) w.getInfo();
if (id2summarized.containsKey(id))
summarized += id2summarized.get(id);
}
if (summarized > 0) {
var id = (Integer) v.getInfo();
id2summarized.put(id, summarized);
}
});
id2count = (id) -> id2summarized.getOrDefault(id, 0f);
ids.addAll(id2summarized.keySet());
}
if (isTaxonomy) {
final Function<Integer, Float> taxId2count;
if (!majorRanksOnly) {
taxId2count = id2count;
} else { // major ranks only
if (!useSummarized) {
var unused = new HashMap<Integer, Float>();
var map = new HashMap<Integer, Float>();
taxId2count = map::get;
taxonomyTree.postorderTraversal(taxonomyTree.getANode(taxonomyRootId), v -> {
var vid = (Integer) v.getInfo();
var count = id2count.apply(vid);
for (var w : v.children()) {
var id = (Integer) w.getInfo();
count += unused.getOrDefault(id, 0f);
}
if (count > 0) {
if (vid.equals(taxonomyRootId) || TaxonomicLevels.isMajorRank(TaxonomyData.getTaxonomicRank(vid))) {
map.put(vid, count);
} else
unused.put(vid, count);
}
});
ids.clear();
ids.addAll(map.keySet());
} else { // use summarized: remove any ids that are not at official rank
taxId2count = id2count;
var keep = new ArrayList<Integer>();
for (var id : ids) {
if (id.equals(taxonomyRootId) || TaxonomicLevels.isMajorRank(TaxonomyData.getTaxonomicRank(id)))
keep.add(id);
}
ids.clear();
ids.addAll(keep);
}
}
var totalCount=0;
for (Integer taxId : ids) {
if (taxId > 0 || !ignoreUnassigned) {
final String classLabel;
if (reportPaths) {
classLabel = TaxonomyData.getPathOrId(taxId, majorRanksOnly);
} else if (name2IdMap == null || name2IdMap.get(taxId) == null)
classLabel = "" + taxId;
else
classLabel = name2IdMap.get(taxId);
if (prefixRank) {
int rank = TaxonomyData.getTaxonomicRank(taxId);
String rankLabel = null;
if (TaxonomicLevels.isMajorRank(rank))
rankLabel = TaxonomicLevels.getName(rank);
if (rankLabel == null || rankLabel.isEmpty())
rankLabel = "-";
writer.write(rankLabel.charAt(0) + "\t");
}
writer.write(classLabel + "\t" + taxId2count.apply(taxId) + "\n");
totalCount++;
if(!Stats.count.apply(totalCount))
break;
}
}
} else { // not taxonomy
if (reportPaths) {
final var classification = ClassificationManager.get(classificationName, true);
var totalCount=0;
for (var classId : ids) {
final var nodes = classification.getFullTree().getNodes(classId);
if (nodes != null) {
for (var v : nodes) {
String label = CSVExportCViewer.getPath(classification, v);
writer.write(label + "\t" + id2count.apply(classId) + "\n");
totalCount++;
}
} else {
writer.write("Class " + classId + "\t" + id2count.apply(classId) + "\n");
totalCount++;
}
if(!Stats.count.apply(totalCount))
break;
}
} else {
var totalCount=0;
for (var classId : classificationBlock.getKeySet()) {
if (classId > 0 || !ignoreUnassigned) {
final String className;
if (name2IdMap == null || name2IdMap.get(classId) == null)
className = "" + classId;
else
className = name2IdMap.get(classId);
writer.write(className + "\t" + id2count.apply(classId) + "\n");
totalCount++;
if(!Stats.count.apply(totalCount))
break;
}
}
}
}
}
}
}
/**
* report read to count
*/
public static void reportRead2Count(Document doc, boolean listGeneralInfo, boolean listMoreStuff, boolean reportPaths, boolean reportNames,
boolean prefixRank, boolean ignoreUnassigned, boolean majorRanksOnly,
Collection<String> classificationNames, int taxonomyRoot, Writer w) throws IOException {
final var connector = doc.getConnector();
final var classification2NameMap = new HashMap<String, Name2IdMap>();
final var availableClassificationNames = new HashSet<String>();
for (var classificationName : connector.getAllClassificationNames()) {
if (ClassificationManager.getAllSupportedClassifications().contains(classificationName)) {
availableClassificationNames.add(classificationName);
}
}
ClassificationFullTree taxonomyTree = null;
var totalCount=0;
for (var classificationName : classificationNames) {
if (availableClassificationNames.contains(classificationName)) {
if (listGeneralInfo || listMoreStuff)
w.write("# Reads to class for '" + classificationName + "':\n");
if (!availableClassificationNames.contains(classificationName))
throw new IOException("Classification '" + classificationName + "' not found in file, available: " + StringUtils.toString(availableClassificationNames, " "));
final var isTaxonomy = (classificationName.equals(Classification.Taxonomy));
final Name2IdMap name2IdMap;
final Classification classification;
if (reportPaths) {
classification = ClassificationManager.get(classificationName, true);
name2IdMap = null;
} else if (reportNames) {
if (classification2NameMap.containsKey(classificationName))
name2IdMap = classification2NameMap.get(classificationName);
else {
name2IdMap = new Name2IdMap();
name2IdMap.loadFromFile((classificationName.equals(Classification.Taxonomy) ? "ncbi" : classificationName.toLowerCase()) + ".map");
classification2NameMap.put(classificationName, name2IdMap);
}
classification = null;
} else {
name2IdMap = null;
classification = null;
}
if (isTaxonomy && prefixRank) {
ClassificationManager.ensureTreeIsLoaded(Classification.Taxonomy);
}
if (isTaxonomy && taxonomyRoot > 0) {
taxonomyTree = ClassificationManager.get(Classification.Taxonomy, true).getFullTree();
}
final var ids = new TreeSet<>(connector.getClassificationBlock(classificationName).getKeySet());
for (var classId : ids) {
if (isTaxonomy && !(taxonomyRoot == 0 || isDescendant(Objects.requireNonNull(taxonomyTree), classId, taxonomyRoot)))
continue;
if (classId > 0 || !ignoreUnassigned) {
try (IReadBlockIterator it = connector.getReadsIterator(classificationName, classId, 0, 10, true, false)) {
while (it.hasNext()) {
final IReadBlock readBlock = it.next();
final String className;
if (isTaxonomy) {
if (majorRanksOnly)
classId = TaxonomyData.getLowestAncestorWithMajorRank(classId);
if (reportPaths) {
className = TaxonomyData.getPathOrId(classId, majorRanksOnly);
} else if (name2IdMap == null || name2IdMap.get(classId) == null)
className = "" + classId;
else
className = name2IdMap.get(classId);
if (prefixRank) {
var rank = TaxonomyData.getTaxonomicRank(classId);
var rankLabel = TaxonomicLevels.getName(rank);
if (rankLabel == null || rankLabel.isBlank())
rankLabel = "?";
w.write(readBlock.getReadName() + "\t" + rankLabel.charAt(0) + "\t" + className + "\n");
totalCount++;
} else {
w.write(readBlock.getReadName() + "\t" + className + "\n");
totalCount++;
}
} else {
if (reportPaths) {
var nodes = classification.getFullTree().getNodes(classId);
if (nodes != null) {
for (Node v : nodes) {
var label = CSVExportCViewer.getPath(classification, v);
w.write(readBlock.getReadName() + "\t" + label + "\n");
totalCount++;
}
}
} else {
if (name2IdMap == null || name2IdMap.get(classId) == null)
className = "" + classId;
else
className = name2IdMap.get(classId);
w.write(readBlock.getReadName() + "\t" + className + "\n");
totalCount++;
}
}
if(!Stats.count.apply(totalCount))
return;
}
}
}
}
}
}
}
/**
* determine whether given taxon is ancestor of one of the named taxa
*
* @return true, if is ancestor
*/
private static boolean isDescendant(ClassificationFullTree taxonomy, int taxId, int... ancestorIds) {
var v = taxonomy.getANode(taxId);
if(v!=null) {
while (true) {
for (int id : ancestorIds)
if (v.getInfo() != null && v.getInfo() instanceof Integer integer && id==integer)
return true;
else if (v.getInDegree() > 0)
v = v.getFirstInEdge().getSource();
else
return false;
}
}
return false;
}
}
| 24,038 | Java | .java | husonlab/megan-ce | 62 | 21 | 18 | 2016-05-09T10:55:38Z | 2024-02-22T23:23:42Z |
HMM2Blastx.java | /FileExtraction/Java_unseen/husonlab_megan-ce/src/megan/tools/HMM2Blastx.java | /*
* HMM2Blastx.java Copyright (C) 2024 Daniel H. Huson
*
* (Some files contain contributions from other authors, who are then mentioned separately.)
*
* This program is free software: you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
package megan.tools;
import jloda.seq.FastAFileIterator;
import jloda.swing.util.ArgsOptions;
import jloda.swing.util.ResourceManager;
import jloda.util.*;
import jloda.util.progress.ProgressPercentage;
import megan.main.MeganProperties;
import java.io.BufferedWriter;
import java.io.FileWriter;
import java.io.IOException;
import java.util.*;
/**
* converts HMM output to BLASTX-like output
*/
public class HMM2Blastx {
public enum EXPECTING {NextRefOrQuery, NextReference, NextQuery, Score, DomainAlignment}
/**
* converts the file
*
*/
public static void main(String[] args) {
try {
ResourceManager.insertResourceRoot(megan.resources.Resources.class);
ProgramProperties.setProgramName("HMM2BlastX");
ProgramProperties.setProgramVersion(megan.main.Version.SHORT_DESCRIPTION);
long start = System.currentTimeMillis();
(new HMM2Blastx()).run(args);
System.err.println("Time: " + ((System.currentTimeMillis() - start) / 1000) + "s");
System.exit(0);
} catch (Exception ex) {
Basic.caught(ex);
System.exit(1);
}
}
/**
* run the program
*
*/
private void run(String[] args) throws Exception {
final ArgsOptions options = new ArgsOptions(args, this, "Converts HMM output to BLASTX");
options.setVersion(ProgramProperties.getProgramVersion());
options.setLicense("Copyright (C) 2024. This program comes with ABSOLUTELY NO WARRANTY.");
options.setAuthors("Daniel H. Huson");
final String[] inputFiles = options.getOptionMandatory("-i", "input", "HMM files", new String[0]);
final String[] readsFiles = options.getOption("-r", "reads", "Reads files (to determine order of output)", new String[0]);
final String outputFileName = options.getOption("-o", "output", "Output file", "");
final float minScore = options.getOption("-ms", "minScore", "Minimum bit score", 0);
final int maxMatchesPerRead = options.getOption("-ma", "maxAlignmentsPerRead", "Maximum number of alignments per read", 25);
final boolean reportNoHits = options.getOption("nh", "reportNoHits", "Report reads with no hits", false);
final var propertiesFile = options.getOption("-P", "propertiesFile", "Properties file",megan.main.Megan6.getDefaultPropertiesFile());
options.done();
MeganProperties.initializeProperties(propertiesFile);
final List<String> reads = new LinkedList<>();
final Map<String, Integer> read2length = new HashMap<>();
if (readsFiles != null) {
for (String readsFile : readsFiles) {
try (IFastAIterator it = FastAFileIterator.getFastAOrFastQAsFastAIterator(readsFile)) {
final ProgressPercentage progress = new ProgressPercentage("Parsing file: " + readsFile, it.getMaximumProgress());
while (it.hasNext()) {
Pair<String, String> pair = it.next();
String name = StringUtils.getFirstWord(StringUtils.swallowLeadingGreaterSign(pair.getFirst()));
reads.add(name);
read2length.put(StringUtils.getFirstWord(StringUtils.swallowLeadingGreaterSign(pair.getFirst())), pair.getSecond().length());
progress.setProgress(it.getProgress());
}
progress.close();
}
}
System.err.printf("Reads: %,9d%n", reads.size());
}
final Map<String, SortedSet<Pair<Float, String>>> query2alignments = new HashMap<>();
int countReferences = 0;
int countQueries = 0;
int countAlignments = 0;
for (String inputFile : inputFiles) {
try (final FileLineIterator it = new FileLineIterator(inputFile)) {
final ProgressPercentage progress = new ProgressPercentage("Parsing file: " + inputFile, it.getMaximumProgress());
EXPECTING state = EXPECTING.NextRefOrQuery;
String referenceName = null;
String queryName = null;
int frame = 0;
float score = 0;
float expected = 0;
while (it.hasNext()) {
String aLine = it.next().trim();
if (state == EXPECTING.NextRefOrQuery) {
if (aLine.startsWith("Query:"))
state = EXPECTING.NextReference;
else if (aLine.startsWith(">>"))
state = EXPECTING.NextQuery;
}
switch (state) {
case NextRefOrQuery:
break;
case NextReference:
if (aLine.startsWith("Query:")) { // yes, queries are references...
referenceName = StringUtils.getWordAfter("Query:", aLine);
state = EXPECTING.NextQuery;
countReferences++;
}
break;
case NextQuery:
if (aLine.startsWith(">>")) {
queryName = StringUtils.getWordAfter(">>", aLine);
frame = getFrameFromSuffix(Objects.requireNonNull(queryName));
queryName = removeFrameSuffix(queryName);
state = EXPECTING.Score;
countQueries++;
}
break;
case Score:
if (aLine.contains(" score:")) {
score = NumberUtils.parseFloat(StringUtils.getWordAfter(" score:", aLine));
if (aLine.contains(" E-value:"))
expected = NumberUtils.parseFloat(StringUtils.getWordAfter(" E-value:", aLine));
else
throw new IOException("Couldn't find E-value in: " + aLine);
state = EXPECTING.DomainAlignment;
countAlignments++;
}
break;
case DomainAlignment:
if (aLine.endsWith("RF"))
aLine = it.next().trim();
/*
xxxxxxxxxxxxxxxxxx....... RF
RNA_pol_Rpb2_1 134 GtFIInGtERVvvsQehrspgvffd 158
GtF+InGtERV+vsQ+hrspgvffd
SRR172902.5536465_RF1.0 1 GTFVINGTERVIVSQLHRSPGVFFD 25
9***********************7 PP
*/
int queryStart;
int queryEnd;
int refStart;
int refEnd;
String refAligned;
String midAligned;
String queryAligned;
{
final String[] refTokens = aLine.split("\\s+");
if (refTokens.length != 4)
throw new IOException("Expected 4 tokens, got: " + refTokens.length + ": " + aLine);
if (!refTokens[0].equals(referenceName))
throw new IOException("Ref expected, got: " + aLine);
refStart = NumberUtils.parseInt(refTokens[1]);
refAligned = refTokens[2];
refEnd = NumberUtils.parseInt(refTokens[3]);
}
{
midAligned = it.next().trim();
}
{
aLine = it.next().trim();
final String[] queryTokens = aLine.split("\\s+");
if (queryTokens.length != 4)
throw new IOException("Expected 4 tokens, got: " + queryTokens.length);
if (!removeFrameSuffix(queryTokens[0]).equals(queryName))
throw new IOException("Query expected, got: " + aLine);
queryStart = NumberUtils.parseInt(queryTokens[1]);
queryAligned = queryTokens[2];
queryEnd = NumberUtils.parseInt(queryTokens[3]);
}
if (score >= minScore) {
String blastString = makeBlastXAlignment(referenceName, score, expected, queryAligned, midAligned, refAligned, queryStart, queryEnd, refStart, refEnd, frame, read2length.get(queryName));
SortedSet<Pair<Float, String>> alignments = query2alignments.computeIfAbsent(queryName, k -> new TreeSet<>((o1, o2) -> {
if (o1.getFirst() > o2.getFirst())
return -1;
else if (o1.getFirst() < o2.getFirst())
return 1;
else return o1.getSecond().compareTo(o2.getSecond());
}));
if (alignments.size() == maxMatchesPerRead) {
if (score >= alignments.last().getFirst()) {
alignments.add(new Pair<>(score, blastString));
alignments.remove(alignments.last());
}
} else
alignments.add(new Pair<>(score, blastString));
}
state = EXPECTING.NextRefOrQuery;
break;
default:
throw new IOException("Invalid case: " + state);
}
progress.setProgress(it.getProgress());
}
progress.close();
}
}
System.err.printf("HMMs: %,9d%n", countReferences);
System.err.printf("Reads: %,9d%n", countQueries);
System.err.printf("Matches:%,10d%n", countAlignments);
final Collection<String> queryNames;
if (reads.size() > 0)
queryNames = reads;
else
queryNames = query2alignments.keySet();
int countAlignmentsWritten = 0;
ProgressPercentage progress = new ProgressPercentage("Writing: " + outputFileName, queryNames.size());
try (BufferedWriter writer = new BufferedWriter(new FileWriter(outputFileName))) {
writer.write("BLASTX from HMM using " + Basic.getShortName(this.getClass()) + "\n\n");
for (String queryName : queryNames) {
Set<Pair<Float, String>> alignments = query2alignments.get(queryName);
if (alignments == null || alignments.size() == 0) {
if (reportNoHits) {
writer.write("Query= " + queryName + "\n\n");
writer.write(" ***** No hits found ******\n\n");
}
} else {
writer.write("Query= " + queryName + "\n\n");
for (Pair<Float, String> pair : alignments) {
writer.write(pair.getSecond());
countAlignmentsWritten++;
}
}
}
progress.incrementProgress();
}
progress.close();
System.err.printf("Written:%,10d%n", countAlignmentsWritten);
}
/**
* make a blast match text
*
* @return blast match text
*/
private String makeBlastXAlignment(String referenceName, float score, float expected, String queryAligned, String midAligned, String refAligned, int queryStart, int queryEnd, int refStart, int refEnd, int frame, Integer queryLength) throws IOException {
queryAligned = queryAligned.toUpperCase();
midAligned = midAligned.toUpperCase();
refAligned = refAligned.toUpperCase();
if (frame < 0)
throw new IOException("Illegal: frame=" + frame);
if (frame > 0) {
frame = (frame <= 3 ? frame : 3 - frame);
if (queryLength != null) {
if (frame > 0) {
queryStart = 3 * (queryStart - 1) + 1 + (frame - 1);
queryEnd = 3 * (queryEnd) + (frame - 1);
} else // frame <0
{
queryStart = queryLength - 3 * (queryStart - 1) + (frame + 1);
queryEnd = queryLength - 3 * queryEnd + 1 + (frame + 1);
}
}
}
if (frame == -2 && queryEnd == 0) { // remove last letter from alignment:
queryAligned = queryAligned.substring(0, queryAligned.length() - 1);
midAligned = midAligned.substring(0, midAligned.length() - 1);
refAligned = refAligned.substring(0, refAligned.length() - 1);
queryEnd = 1;
}
if (frame == 2 && queryLength != null && queryEnd == queryLength + 1) {
queryAligned = queryAligned.substring(0, queryAligned.length() - 1);
midAligned = midAligned.substring(0, midAligned.length() - 1);
refAligned = refAligned.substring(0, refAligned.length() - 1);
queryEnd--;
}
StringBuilder buf = new StringBuilder();
buf.append(">").append(referenceName).append("\n Length = -1\n\n");
buf.append(String.format(" Score = %.1f (0), Expect = %g\n", score, expected));
int[] identities = computeIdentities(midAligned);
int[] positives = computePositives(midAligned);
int[] gaps = computeGaps(queryAligned, refAligned, midAligned);
buf.append(String.format(" Identities = %d/%d (%d%%), Positives = %d/%d (%d%%), Gaps = %d/%d (%d%%)\n",
identities[0], identities[1], identities[2], positives[0], positives[1], positives[2], gaps[0], gaps[1], gaps[2]));
buf.append(String.format(" Frame = %+d\n", frame));
buf.append("\n");
buf.append(String.format("Query: %8d %s %d\n", queryStart, queryAligned, queryEnd));
buf.append(String.format(" %s\n", midAligned));
buf.append(String.format("Sbjct: %8d %s %d\n", refStart, refAligned, refEnd));
buf.append("\n");
return buf.toString();
}
private int[] computeIdentities(String midLine) {
int count = 0;
for (int i = 0; i < midLine.length(); i++) {
if (Character.isLetter(midLine.charAt(i)))
count++;
}
return new int[]{count, midLine.length(), (int) Math.round(100.0 * count / midLine.length())};
}
private int[] computePositives(String midLine) {
int count = 0;
for (int i = 0; i < midLine.length(); i++) {
if (midLine.charAt(i) != ' ')
count++;
}
return new int[]{count, midLine.length(), (int) Math.round(100.0 * count / midLine.length())};
}
private int[] computeGaps(String queryAligned, String refAligned, String midLine) {
int count = 0;
for (int i = 0; i < queryAligned.length(); i++) {
if (queryAligned.charAt(i) == '-')
count++;
}
for (int i = 0; i < refAligned.length(); i++) {
if (refAligned.charAt(i) == '-')
count++;
}
return new int[]{count, midLine.length(), (int) Math.round(100.0 * count / midLine.length())};
}
/**
* get the frame, or -1, if not defined
*
* @return frame 1-6 or -1
*/
private int getFrameFromSuffix(String query) {
int pos = query.indexOf("_RF");
if (pos != -1)
return NumberUtils.parseInt(query.substring(pos + 3));
else
return -1;
}
/**
* remove frame suffix
*
* @return query without frame suffix
*/
private String removeFrameSuffix(String query) {
int pos = query.indexOf("_RF");
if (pos != -1)
return query.substring(0, pos);
else
return query;
}
}
| 17,205 | Java | .java | husonlab/megan-ce | 62 | 21 | 18 | 2016-05-09T10:55:38Z | 2024-02-22T23:23:42Z |
ComputeComparison.java | /FileExtraction/Java_unseen/husonlab_megan-ce/src/megan/tools/ComputeComparison.java | /*
* ComputeComparison.java Copyright (C) 2024 Daniel H. Huson
*
* (Some files contain contributions from other authors, who are then mentioned separately.)
*
* This program is free software: you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
package megan.tools;
import jloda.swing.util.ArgsOptions;
import jloda.swing.util.ResourceManager;
import jloda.util.*;
import jloda.util.parse.NexusStreamParser;
import jloda.util.progress.ProgressSilent;
import megan.commands.SaveCommand;
import megan.commands.show.CompareCommand;
import megan.core.Director;
import megan.core.Document;
import megan.core.MeganFile;
import megan.dialogs.compare.Comparer;
import megan.main.MeganProperties;
import java.io.BufferedReader;
import java.io.IOException;
import java.io.InputStreamReader;
import java.io.StringReader;
import java.util.ArrayList;
import java.util.Arrays;
/**
* compares multiple samples
* Daniel Huson, 8.2018
* @deprecated use CompareFiles.java instead
*/
@Deprecated
public class ComputeComparison {
/**
* ComputeComparison
*
*/
public static void main(String[] args) {
try {
ResourceManager.insertResourceRoot(megan.resources.Resources.class);
ProgramProperties.setProgramName("ComputeComparison");
ProgramProperties.setProgramVersion(megan.main.Version.SHORT_DESCRIPTION);
PeakMemoryUsageMonitor.start();
(new ComputeComparison()).run(args);
System.err.println("Total time: " + PeakMemoryUsageMonitor.getSecondsSinceStartString());
System.err.println("Peak memory: " + PeakMemoryUsageMonitor.getPeakUsageString());
System.exit(0);
} catch (Exception ex) {
Basic.caught(ex);
System.exit(1);
}
}
/**
* run
*
*/
private void run(String[] args) throws Exception {
final ArgsOptions options = new ArgsOptions(args, this, "Computes the comparison of multiple megan, RMA or meganized DAA files");
options.setVersion(ProgramProperties.getProgramVersion());
options.setLicense("Copyright (C) 2024. This program comes with ABSOLUTELY NO WARRANTY.");
options.setAuthors("Daniel H. Huson");
options.comment("Input and Output:");
ArrayList<String> inputFiles = new ArrayList<>(Arrays.asList(options.getOptionMandatory("-i", "in", "Input RMA and/or meganized DAA files (single directory ok)", new String[0])));
final String outputFile = options.getOption("-o", "out", "Output file", "comparison.megan");
final String metadataFile = options.getOption("-mdf", "metaDataFile", "Metadata file", "");
options.comment("Options:");
final boolean normalize = options.getOption("-n", "normalize", "Normalize counts", true);
final boolean ignoreUnassignedReads = options.getOption("-iu", "ignoreUnassignedReads", "Ignore unassigned, no-hit or contaminant reads", false);
final Document.ReadAssignmentMode readAssignmentMode = Document.ReadAssignmentMode.valueOfIgnoreCase(options.getOption("-ram", "readAssignmentMode", "Set the desired read-assignment mode", Document.ReadAssignmentMode.readCount.toString()));
final boolean keepOne = options.getOption("-k1", "keepOne", "In a normalized comparison, minimum non-zero count is set to 1", false);
final var propertiesFile = options.getOption("-P", "propertiesFile", "Properties file",megan.main.Megan6.getDefaultPropertiesFile());
options.done();
MeganProperties.initializeProperties(propertiesFile);
if (inputFiles.size() == 1 && FileUtils.isDirectory(inputFiles.get(0))) {
final String directory = inputFiles.get(0);
inputFiles.clear();
inputFiles.addAll(FileUtils.getAllFilesInDirectory(directory, true, ".daa", ".rma", ".rma6"));
}
for (String fileName : inputFiles) {
if (!FileUtils.fileExistsAndIsNonEmpty(fileName))
throw new IOException("No such file or file empty: " + fileName);
}
if (inputFiles.size() == 0)
throw new UsageException("No input file");
final Director dir = Director.newProject(false);
final Document doc = dir.getDocument();
doc.setProgressListener(new ProgressSilent());
{
CompareCommand compareCommand = new CompareCommand();
compareCommand.setDir(dir);
final String command = "compare mode=" + (normalize ? Comparer.COMPARISON_MODE.RELATIVE : Comparer.COMPARISON_MODE.ABSOLUTE) +
" readAssignmentMode=" + readAssignmentMode + " keep1=" + keepOne + " ignoreUnassigned=" + ignoreUnassignedReads +
" meganFile='" + StringUtils.toString(inputFiles, "', '") + "';";
try {
compareCommand.apply(new NexusStreamParser(new StringReader(command)));
} catch (Exception ex) {
Basic.caught(ex);
}
}
if (StringUtils.notBlank(metadataFile)) {
try (BufferedReader r = new BufferedReader(new InputStreamReader(FileUtils.getInputStreamPossiblyZIPorGZIP(metadataFile)))) {
System.err.print("Processing Metadata: " + metadataFile);
doc.getSampleAttributeTable().read(r, doc.getSampleNames(), true);
System.err.println(", attributes: " + doc.getSampleAttributeTable().getNumberOfUnhiddenAttributes());
}
}
doc.getMeganFile().setFile(outputFile, MeganFile.Type.MEGAN_SUMMARY_FILE);
final SaveCommand saveCommand = new SaveCommand();
saveCommand.setDir(dir);
System.err.println("Saving to file: " + outputFile);
saveCommand.apply(new NexusStreamParser(new StringReader("save file='" + outputFile + "';")));
}
}
| 6,136 | Java | .java | husonlab/megan-ce | 62 | 21 | 18 | 2016-05-09T10:55:38Z | 2024-02-22T23:23:42Z |
ApplyLCA.java | /FileExtraction/Java_unseen/husonlab_megan-ce/src/megan/tools/ApplyLCA.java | /*
* ApplyLCA.java Copyright (C) 2024 Daniel H. Huson
*
* (Some files contain contributions from other authors, who are then mentioned separately.)
*
* This program is free software: you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
package megan.tools;
import jloda.swing.util.ArgsOptions;
import jloda.swing.util.ProgramProperties;
import jloda.swing.util.ResourceManager;
import jloda.util.Basic;
import jloda.util.FileUtils;
import jloda.util.NumberUtils;
import megan.algorithms.AssignmentUsingLCA;
import megan.classification.Classification;
import megan.main.MeganProperties;
import java.io.*;
/**
* applies the LCA to input lines
*/
public class ApplyLCA {
/**
* apply the LCA
*
*/
public static void main(String[] args) {
try {
ResourceManager.insertResourceRoot(megan.resources.Resources.class);
ProgramProperties.setProgramName("ApplyLCA");
ProgramProperties.setProgramVersion(megan.main.Version.SHORT_DESCRIPTION);
long start = System.currentTimeMillis();
(new ApplyLCA()).run(args);
System.err.println("Time: " + ((System.currentTimeMillis() - start) / 1000) + "s");
System.exit(0);
} catch (Exception ex) {
Basic.caught(ex);
System.exit(1);
}
}
/**
* run the program
*
*/
private void run(String[] args) throws Exception {
final ArgsOptions options = new ArgsOptions(args, this, "Applies the LCA to taxon-ids");
options.setVersion(ProgramProperties.getProgramVersion());
options.setLicense("Copyright (C) 2024. This program comes with ABSOLUTELY NO WARRANTY.");
options.setAuthors("Daniel H. Huson");
final String inputFile = options.getOptionMandatory("-i", "input", "Input file (stdin ok)", "");
final String outputFile = options.getOption("-o", "output", "Output file (stdout, .gz ok)", "stdout");
String separator = options.getOption("-s", "Separator", "Separator character (or detect)", "detect");
final boolean firstLineIsHeader = options.getOption("-H", "hasHeaderLine", "Has header line", true);
final var propertiesFile = options.getOption("-P", "propertiesFile", "Properties file",megan.main.Megan6.getDefaultPropertiesFile());
options.done();
MeganProperties.initializeProperties(propertiesFile);
final AssignmentUsingLCA assignmentUsingLCA = new AssignmentUsingLCA(Classification.Taxonomy, false, 0);
final Writer w = new BufferedWriter(new OutputStreamWriter(FileUtils.getOutputStreamPossiblyZIPorGZIP(outputFile)));
try (BufferedReader r = new BufferedReader(inputFile.equals("stdin") ? new InputStreamReader(System.in) : new FileReader(inputFile))) {
String line;
boolean first = true;
int lineNumber = 0;
while ((line = r.readLine()) != null) {
lineNumber++;
if (first) {
first = false;
if (separator.equals("detect")) {
if (line.contains("\t"))
separator = "\t";
else if (line.contains(","))
separator = ",";
else if (line.contains(";"))
separator = ";";
else
throw new IOException("Can't detect separator (didn't find tab, comma or semi-colon in first line)");
if (firstLineIsHeader) {
w.write(line + "\n");
continue;
}
}
}
final String[] tokens = line.split("\\s*" + separator + "\\s*");
if (tokens.length > 0) {
int taxonId = -1;
for (int i = 1; i < tokens.length; i++) {
final String token = tokens[i].trim();
if (!NumberUtils.isInteger(token)) {
taxonId = 0;
break;
} else {
final int id = NumberUtils.parseInt(token);
if (id > 0) {
taxonId = (taxonId == -1 ? id : assignmentUsingLCA.getLCA(taxonId, id));
}
}
}
w.write(tokens[0] + separator + taxonId + "\n");
}
}
w.flush();
} finally {
if (!outputFile.equalsIgnoreCase("stdout"))
w.close();
}
}
}
| 5,103 | Java | .java | husonlab/megan-ce | 62 | 21 | 18 | 2016-05-09T10:55:38Z | 2024-02-22T23:23:42Z |
CSV2Megan.java | /FileExtraction/Java_unseen/husonlab_megan-ce/src/megan/tools/CSV2Megan.java | /*
* DAA2Info.java Copyright (C) 2024 Daniel H. Huson
*
* (Some files contain contributions from other authors, who are then mentioned separately.)
*
* This program is free software: you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
package megan.tools;
import jloda.swing.util.ArgsOptions;
import jloda.swing.util.ResourceManager;
import jloda.util.*;
import megan.classification.Classification;
import megan.classification.ClassificationManager;
import megan.core.Document;
import megan.daa.connector.DAAConnector;
import megan.daa.io.DAAHeader;
import megan.daa.io.DAAParser;
import megan.dialogs.export.CSVExporter;
import megan.main.MeganProperties;
import megan.viewer.TaxonomyData;
import java.io.BufferedWriter;
import java.io.FileWriter;
import java.io.IOException;
import java.io.OutputStreamWriter;
import java.util.ArrayList;
import java.util.HashSet;
/**
* import CSV files to MEGAN files
* Daniel Huson,1.2023
*/
public class CSV2Megan {
/**
* import CSV files to MEGAN files
*/
public static void main(String[] args) {
try {
ResourceManager.insertResourceRoot(megan.resources.Resources.class);
ProgramProperties.setProgramName("CSV2Megan");
ProgramProperties.setProgramVersion(megan.main.Version.SHORT_DESCRIPTION);
PeakMemoryUsageMonitor.start();
(new CSV2Megan()).run(args);
PeakMemoryUsageMonitor.report();
System.exit(0);
} catch (Exception ex) {
Basic.caught(ex);
System.exit(1);
}
}
/**
* run
*
*/
private void run(String[] args) throws UsageException, IOException, CanceledException {
final var options = new ArgsOptions(args, this, "Imports CSV files to Megan summary format");
options.setVersion(ProgramProperties.getProgramVersion());
options.setLicense("Copyright (C) 2024. This program comes with ABSOLUTELY NO WARRANTY.");
options.setAuthors("Daniel H. Huson");
options.comment("Input and Output");
final var inputFiles = options.getOptionMandatory("-i", "in", "Input file(s)", new String[0]);
final var outputFiles = options.getOption("-o", "out", "Output file(s) (directory or .gz ok)", new String[0]);
options.comment("Import specification");
final var importType=options.getOption("-t","type","Type of data contained in lines",new String[]{"reads","summary}"},"summary");
}
}
| 3,025 | Java | .java | husonlab/megan-ce | 62 | 21 | 18 | 2016-05-09T10:55:38Z | 2024-02-22T23:23:42Z |
CompareFiles.java | /FileExtraction/Java_unseen/husonlab_megan-ce/src/megan/tools/CompareFiles.java | /*
* CompareFiles.java Copyright (C) 2024 Daniel H. Huson
*
* (Some files contain contributions from other authors, who are then mentioned separately.)
*
* This program is free software: you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
package megan.tools;
import jloda.seq.BlastMode;
import jloda.swing.util.ArgsOptions;
import jloda.swing.util.ResourceManager;
import jloda.util.*;
import megan.core.ClassificationType;
import megan.core.Document;
import megan.dialogs.compare.Comparer;
import megan.main.MeganProperties;
import java.io.BufferedReader;
import java.io.FileWriter;
import java.io.IOException;
import java.io.InputStreamReader;
import java.util.*;
import java.util.stream.Collectors;
/**
* compares multiple samples
* Daniel Huson, 8.2018
*/
public class CompareFiles {
/**
* ComputeComparison
*
*/
public static void main(String[] args) {
try {
ResourceManager.insertResourceRoot(megan.resources.Resources.class);
ProgramProperties.setProgramName("CompareFiles");
ProgramProperties.setProgramVersion(megan.main.Version.SHORT_DESCRIPTION);
PeakMemoryUsageMonitor.start();
(new CompareFiles()).run(args);
System.err.println("Total time: " + PeakMemoryUsageMonitor.getSecondsSinceStartString());
System.err.println("Peak memory: " + PeakMemoryUsageMonitor.getPeakUsageString());
System.exit(0);
} catch (Exception ex) {
Basic.caught(ex);
System.exit(1);
}
}
/**
* run
*
*/
private void run(String[] args) throws Exception {
final var options = new ArgsOptions(args, this, "Computes the comparison of multiple megan, RMA or meganized DAA files");
options.setVersion(ProgramProperties.getProgramVersion());
options.setLicense("Copyright (C) 2024. This program comes with ABSOLUTELY NO WARRANTY.");
options.setAuthors("Daniel H. Huson");
options.comment("Input and Output:");
final var inputFiles = new ArrayList<>(Arrays.asList(options.getOptionMandatory("-i", "in", "Input RMA and/or meganized DAA files (single directory ok)", new String[0])));
final var outputFile = options.getOption("-o", "out", "Output file", "comparison.megan");
final var metadataFile = options.getOption("-mdf", "metaDataFile", "Metadata file", "");
options.comment("Options:");
final var allowSameNames=options.getOption("-s","allowSameNames","All the same sample name to appear multiple times (will add -1, -2 etc)",false);
final var normalize = options.getOption("-n", "normalize", "Normalize counts", true);
final var ignoreUnassignedReads = options.getOption("-iu", "ignoreUnassignedReads", "Ignore unassigned, no-hit or contaminant reads", false);
final var keepOne = options.getOption("-k1", "keepOne", "In a normalized comparison, non-zero counts are mapped to 1 or more", false);
final var propertiesFile = options.getOption("-P", "propertiesFile", "Properties file",megan.main.Megan6.getDefaultPropertiesFile());
options.done();
MeganProperties.initializeProperties(propertiesFile);
if (inputFiles.size() == 1 && FileUtils.isDirectory(inputFiles.get(0))) {
final String directory = inputFiles.get(0);
inputFiles.clear();
inputFiles.addAll(FileUtils.getAllFilesInDirectory(directory, true, ".megan", ".megan.gz", ".daa", ".rma", ".rma6"));
}
for (String fileName : inputFiles) {
if (!FileUtils.fileExistsAndIsNonEmpty(fileName))
throw new IOException("No such file or file empty: " + fileName);
}
if (inputFiles.isEmpty())
throw new UsageException("No input file");
var samples=new ArrayList<SampleData>();
for(var fileName:inputFiles) {
System.err.println("Processing file: "+fileName);
final var doc=new Document();
doc.getMeganFile().setFileFromExistingFile(fileName,true);
doc.loadMeganFile();
final var docSamples=doc.getSampleNamesAsArray();
for(var s=0;s<docSamples.length;s++) {
final var sample=new SampleData(doc,s);
samples.add(sample);
System.err.println(sample);
}
}
// ensure unique names:
{
var names = new HashSet<String>();
var count=0;
for (var sample : samples) {
if (names.contains(sample.getName())) {
if (allowSameNames) {
if(count==0)
System.err.println("Renaming samples to make all names unique:");
final var name = StringUtils.getUniqueName(sample.getName(), names);
System.err.println(sample.getName()+" -> "+name);
sample.setName(name);
}
count++;
}
names.add(sample.getName());
}
if(count>0 && !allowSameNames)
throw new IOException("Same sample name occurs more than once, "+count+" times (use option -s to allow)");
}
System.err.printf("Input files:%13d%n",inputFiles.size());
System.err.printf("Input samples:%11d%n",samples.size());
//System.err.printf("Input files: %s%n",Basic.toString(inputFiles,", "));
System.err.printf("Input count:%,13d%n",(long) getTotalCount(samples));
System.err.printf("In assigned:%,13d%n",(long) getTotalAssigned(samples));
System.err.printf("Read assignment mode: %s%n",samples.get(0).getReadAssignmentMode());
final Document.ReadAssignmentMode readAssignmentMode;
{
final var modes = new TreeSet<>(Arrays.asList(getReadAssignmentModes(samples)));
if (modes.size() > 1)
throw new IOException("Can't compare normalized samples with mixed assignment modes, found: " + StringUtils.toString(modes, ", "));
readAssignmentMode = (modes.size() == 0 ? Document.ReadAssignmentMode.readCount : modes.first());
}
final OptionalDouble min;
if(ignoreUnassignedReads)
min=samples.stream().mapToDouble(SampleData::getAssigned).min();
else
min=samples.stream().mapToDouble(SampleData::getCount).min();
if(min.isEmpty())
throw new IOException("No reads found");
else if(normalize) {
System.err.printf("Normalizing to:%,10d per sample%n",(long) min.getAsDouble());
}
var numberOfSamples=samples.size();
var doc = new Document();
final float[] sizes;
if(!normalize) {
if(!ignoreUnassignedReads)
sizes=getCounts(samples);
else
sizes=getAssigneds(samples);
} else {
sizes = new float[numberOfSamples];
Arrays.fill(sizes,(float)min.getAsDouble());
}
doc.getDataTable().setSamples(getSampleNames(samples), getUids(samples), sizes, getBlastModes(samples));
{
var sample2source=new HashMap<String,Object>();
for(var sample:samples) {
sample2source.put(sample.getName(),sample.getDoc().getMeganFile().getFileName());
}
doc.getSampleAttributeTable().addAttribute("@Source",sample2source,false,true);
}
doc.setNumberReads(Math.round(CollectionUtils.getSum(sizes)));
for(var classification: getClassifications(samples)) {
final Map<Integer,float[]> class2counts=new HashMap<>();
for(var sample:samples) {
final double factor;
if(normalize) {
if(ignoreUnassignedReads)
factor=(sample.getAssigned()>0?min.getAsDouble()/sample.getAssigned():1);
else
factor=(sample.getCount()>0?min.getAsDouble()/sample.getCount():1);
}
else
factor=1;
sample.setFactor(factor);
}
for(var c: getClassIds(classification,samples,ignoreUnassignedReads)) {
var newValues=class2counts.computeIfAbsent(c,z->new float[numberOfSamples]);
for (var s = 0; s < numberOfSamples; s++) {
final SampleData sample = samples.get(s);
final int which=sample.getWhich();
var counts=sample.getDoc().getDataTable().getClass2Counts(classification);
if(counts!=null) {
var values = counts.get(c);
if (values != null && which < values.length) {
var value = values[which];
newValues[s] = (float) sample.getFactor() * value;
if (keepOne && value > 0 && newValues[s] == 0)
newValues[s] = 1;
}
}
}
}
doc.getDataTable().setClass2Counts(classification,class2counts);
}
doc.setReadAssignmentMode(readAssignmentMode);
var parameters = "mode=" + (normalize ? Comparer.COMPARISON_MODE.RELATIVE : Comparer.COMPARISON_MODE.ABSOLUTE);
if (normalize)
parameters += " normalizedTo=" + StringUtils.removeTrailingZerosAfterDot("" + min.getAsDouble());
parameters += " readAssignmentMode=" + readAssignmentMode.toString();
if (ignoreUnassignedReads)
parameters += " ignoreUnassigned=true";
doc.getDataTable().setParameters(parameters);
System.err.printf("Output count:%,12d%n", doc.getNumberOfReads());
if (StringUtils.notBlank(metadataFile)) {
try (var r = new BufferedReader(new InputStreamReader(FileUtils.getInputStreamPossiblyZIPorGZIP(metadataFile)))) {
System.err.print("Processing Metadata: " + metadataFile);
doc.getSampleAttributeTable().read(r, doc.getSampleNames(), true);
System.err.println(", attributes: " + doc.getSampleAttributeTable().getNumberOfUnhiddenAttributes());
}
}
System.err.println("Saving to file: " + outputFile);
try (var writer = new FileWriter(outputFile)) {
doc.getDataTable().write(writer);
doc.getSampleAttributeTable().write(writer, false, true);
}
}
public static double getTotalCount (Collection<SampleData> samples) {
return samples.stream().mapToDouble(SampleData::getCount).sum();
}
public static float[] getCounts(List<SampleData> samples) {
final var counts=new float[samples.size()];
for(int s=0;s<samples.size();s++)
counts[s]=samples.get(s).getCount();
return counts;
}
public static float[] getAssigneds(List<SampleData> samples) {
final var assigneds=new float[samples.size()];
for(int s=0;s<samples.size();s++)
assigneds[s]=samples.get(s).getAssigned();
return assigneds;
}
public static double getTotalAssigned(Collection<SampleData> samples) {
return samples.stream().mapToDouble(SampleData::getAssigned).sum();
}
public static List<String> getClassifications (Collection<SampleData> samples) {
return samples.stream().map(SampleData::getClassifications).flatMap(Collection::stream).distinct().collect(Collectors.toList());
}
public static Set<Integer> getClassIds (String classification, Collection<SampleData> samples,boolean assignedOnly) {
return samples.parallelStream().map(s->s.getDoc().getDataTable().getClass2Counts(classification)).filter(Objects::nonNull).map(Map::keySet).flatMap(Set::stream).
filter(id->!assignedOnly || id>0).collect(Collectors.toSet());
}
public static String[] getSampleNames(Collection<SampleData> samples) {
return samples.stream().map(SampleData::getName).toArray(String[]::new);
}
public static Long[] getUids (Collection<SampleData> samples) {
return samples.stream().map(SampleData::getUid).toArray(Long[]::new);
}
public static BlastMode[] getBlastModes(Collection<SampleData> samples) {
return samples.stream().map(SampleData::getBlastMode).toArray(BlastMode[]::new);
}
public static Document.ReadAssignmentMode[] getReadAssignmentModes(Collection<SampleData> samples) {
return samples.stream().map(SampleData::getReadAssignmentMode).toArray(Document.ReadAssignmentMode[]::new);
}
public static class SampleData {
private final Document doc;
private String name;
private final long uid;
private final int which;
private final float count;
private final float assigned;
private final BlastMode blastMode;
private final Document.ReadAssignmentMode readAssignmentMode;
private final ArrayList<String> classifications;
private double factor=1;
public SampleData(Document doc, int which) {
this.doc = doc;
this.which = which;
this.name = doc.getSampleNames().get(which);
this.uid=doc.getDataTable().getSampleUIds()[which];
final Map<Integer, float[]> class2count = doc.getDataTable().getClass2Counts(ClassificationType.Taxonomy);
float assigned=0;
float unassigned=0;
for(var id:class2count.keySet()) {
final float[] values=class2count.get(id);
if(which<values.length) {
if (id > 0)
assigned += values[which];
else
unassigned += values[which];
}
}
this.count=assigned+unassigned;
this.assigned=assigned;
blastMode=doc.getBlastMode();
readAssignmentMode = doc.getReadAssignmentMode();
classifications = new ArrayList<>(doc.getClassificationNames());
}
public Document getDoc() {
return doc;
}
public String getName() {
return name;
}
public void setName(String name) {
this.name = name;
}
public long getUid() {
return uid;
}
public int getWhich() {
return which;
}
public float getCount() {
return count;
}
public float getAssigned() {
return assigned;
}
public BlastMode getBlastMode() {
return blastMode;
}
public Document.ReadAssignmentMode getReadAssignmentMode() {
return readAssignmentMode;
}
public ArrayList<String> getClassifications() {
return classifications;
}
public double getFactor() {
return factor;
}
public void setFactor(double factor) {
this.factor = factor;
}
@Override
public String toString() {
return String.format("Sample %s [%d in %s]: count=%,d assigned=%,d mode=%s classifications=%s",
name, which, FileUtils.getFileNameWithoutPath(doc.getMeganFile().getFileName()), (int) count, (int) assigned, readAssignmentMode.toString(), StringUtils.toString(classifications, " "));
}
@Override
public boolean equals(Object o) {
if (this == o) return true;
if (o == null || getClass() != o.getClass()) return false;
final var that = (SampleData) o;
return which == that.which &&
Float.compare(that.count, count) == 0 &&
Float.compare(that.assigned, assigned) == 0 &&
doc.getMeganFile().getFileName().equals(that.doc.getMeganFile().getFileName()) &&
name.equals(that.name) &&
readAssignmentMode == that.readAssignmentMode &&
classifications.equals(that.classifications);
}
@Override
public int hashCode() {
return Objects.hash(doc.getMeganFile().getFileName(), name, which, count, assigned, readAssignmentMode, classifications);
}
}
}
| 16,753 | Java | .java | husonlab/megan-ce | 62 | 21 | 18 | 2016-05-09T10:55:38Z | 2024-02-22T23:23:42Z |
Taxonomy2Function.java | /FileExtraction/Java_unseen/husonlab_megan-ce/src/megan/tools/Taxonomy2Function.java | /*
* Taxonomy2Function.java Copyright (C) 2024 Daniel H. Huson
*
* (Some files contain contributions from other authors, who are then mentioned separately.)
*
* This program is free software: you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
package megan.tools;
import jloda.swing.util.ArgsOptions;
import jloda.swing.util.ResourceManager;
import jloda.util.*;
import jloda.util.progress.ProgressPercentage;
import megan.classification.Classification;
import megan.classification.ClassificationManager;
import megan.core.Document;
import megan.main.MeganProperties;
import megan.viewer.TaxonomicLevels;
import megan.viewer.TaxonomyData;
import java.io.BufferedWriter;
import java.io.IOException;
import java.io.OutputStreamWriter;
import java.util.*;
/**
* Reports taxonomy-by-function classification
* Daniel Huson, 10.2021
*/
public class Taxonomy2Function {
/**
* taxonomy-by-function classification
*
*/
public static void main(String[] args) {
try {
ResourceManager.insertResourceRoot(megan.resources.Resources.class);
ProgramProperties.setProgramName("Taxonomy2Function");
ProgramProperties.setProgramVersion(megan.main.Version.SHORT_DESCRIPTION);
PeakMemoryUsageMonitor.start();
(new Taxonomy2Function()).run(args);
PeakMemoryUsageMonitor.report();
System.exit(0);
} catch (Exception ex) {
Basic.caught(ex);
System.exit(1);
}
}
/**
* run
*/
private void run(String[] args) throws UsageException, IOException {
final var options = new ArgsOptions(args, this, "Reports taxonomy-by-function classification");
options.setVersion(ProgramProperties.getProgramVersion());
options.setLicense("Copyright (C) 2024. This program comes with ABSOLUTELY NO WARRANTY.");
options.setAuthors("Daniel H. Huson");
options.comment("Input and Output");
final var inputFiles = options.getOptionMandatory("-i", "in", "Input file(s)", new String[0]);
final var outputFile = options.getOption("-o", "out", "Output file (stdout or .gz ok)", "stdout");
options.comment("Options");
final var firstClassificationName = options.getOption("-a", "firstClassification", "First classification name", ClassificationManager.getAllSupportedClassifications(), "Taxonomy");
final var firstClasses = options.getOption("-ac", "firstClasses", "Class IDs in first classification?", List.of("all"));
final var secondClassificationName = options.getOption("-b", "secondClassification", "Second classification name", ClassificationManager.getAllSupportedClassifications(), "EGGNOG");
final var secondClasses = options.getOption("-bc", "secondClasses", "Class IDs in second classifications?", List.of("all"));
final var firstFormat = options.getOption("-af", "firstFormat", "Format to report first classification class", new String[]{"name", "id", "path"}, "name");
final var secondFormat = options.getOption("-bf", "secondFormat", "Format to report second classification class", new String[]{"name", "id", "path"}, firstFormat);
final var listOption = options.getOption("-l", "list", "List counts or read names?", new String[]{"counts", "reads"}, "counts");
final var majorRanksOnly = options.getOption("-mro", "majorRanksOnly", "Only use major ranks for NCBI taxonomy", false);
var separator = options.getOption("-s", "separator", "Separator", new String[]{"tab", "comma", "semi-colon"}, "tab");
final var includeFirstUnassigned = options.getOption("-au", "includeFirstUnassigned", "include reads unassigned in first classification", true);
final var includeSecondUnassigned = options.getOption("-bu", "includeSecondUnassigned", "include reads unassigned second classification", true);
options.comment(ArgsOptions.OTHER);
var firstRank=options.getOption("-ar","firstRank","If the first classification is Taxonomy, report at specified rank",TaxonomicLevels.getAllMajorRanks(),"");
var secondRank=options.getOption("-br","secondRank","If the second classification is Taxonomy, report at specified rank",TaxonomicLevels.getAllMajorRanks(),"");
var showHeaderLine = options.getOption("-sh", "showHeadline", "Show a headline in the output naming classifications and files", false);
var pathSeparator = options.getOption("-ps", "pathSeparator", "Separator used when reporting paths", new String[]{"::", "|", "tab", "comma", "semi-colon"}, "::");
final var propertiesFile = options.getOption("-P", "propertiesFile", "Properties file",megan.main.Megan6.getDefaultPropertiesFile());
options.done();
MeganProperties.initializeProperties(propertiesFile);
if (firstClassificationName.equals(secondClassificationName))
throw new UsageException("First and second classifications must be different");
for (var file : inputFiles) {
if (!FileUtils.fileExistsAndIsNonEmpty(file))
throw new IOException("Can't open input file: " + file);
}
if (inputFiles.length > 1 && listOption.equals("reads"))
throw new UsageException("You must not specify multiple input files and use the option --list reads");
switch (separator) {
case "comma" -> separator = "'";
case "semi-colon" -> separator = ";";
case "tab" -> separator = "\t";
}
switch (pathSeparator) {
case "comma" -> pathSeparator = "'";
case "semi-colon" -> pathSeparator = ";";
case "tab" -> pathSeparator = "\t";
}
final var firstClassificationIsTaxonomy=firstClassificationName.equals(Classification.Taxonomy);
final var secondClassificationIsTaxonomy=secondClassificationName.equals(Classification.Taxonomy);
var firstClassificationMajorRanksOnly=(majorRanksOnly && firstClassificationIsTaxonomy);
var secondClassificationMajorRanksOnly=(majorRanksOnly &&secondClassificationIsTaxonomy);
final int firstRankId;
if(!firstClassificationIsTaxonomy && !firstRank.isEmpty())
throw new UsageException("--firstRank: first classification must be Taxonomy");
else
firstRankId=TaxonomicLevels.getId(firstRank);
final int secondRankId;
if(!secondClassificationIsTaxonomy && !secondRank.isEmpty())
throw new UsageException("--secondRank: second classification must be Taxonomy");
else
secondRankId=TaxonomicLevels.getId(secondRank);
if(firstClassificationIsTaxonomy || secondClassificationIsTaxonomy){
ClassificationManager.get(Classification.Taxonomy, true);
}
Collection<Integer> firstIds = null;
if (!(firstClasses.size() == 1 && firstClasses.get(0).equals("all"))) {
firstIds = new HashSet<>();
for (var token : firstClasses) {
if (!NumberUtils.isInteger(token))
throw new UsageException("--firstClasses: integer expected, got: " + token);
else
firstIds.add(NumberUtils.parseInt(token));
}
}
Collection<Integer> secondIds = null;
if (!(secondClasses.size() == 1 && secondClasses.get(0).equals("all"))) {
secondIds = new HashSet<>();
for (var token : secondClasses) {
if (!NumberUtils.isInteger(token))
throw new UsageException("--secondClasses: integer expected, got: " + token);
else
secondIds.add(NumberUtils.parseInt(token));
}
}
var useReadsTable = listOption.equals("reads");
var readsTable = new Table<Integer, Integer, ArrayList<String>>();
var countsTable = new Table<Integer, Integer, int[]>();
for (var f = 0; f < inputFiles.length; f++) {
var inputFile = inputFiles[f];
var progress = new ProgressPercentage("Processing file:", inputFile);
final var doc = new Document();
doc.getMeganFile().setFileFromExistingFile(inputFile, true);
doc.loadMeganFile();
var connector = doc.getConnector();
if (doc.getMeganFile().isMeganSummaryFile())
throw new UsageException("Input file '" + inputFile + "': must be RMA or meganized DAA file");
var first2reads = new TreeMap<Integer, ArrayList<String>>();
var firstClassificationBlock = connector.getClassificationBlock(firstClassificationName);
progress.setTasks("Processing:","First classification");
progress.setMaximum(firstClassificationBlock.getKeySet().size());
if (firstIds == null || firstIds.isEmpty())
firstIds = firstClassificationBlock.getKeySet();
for (var classId : firstIds) {
if (includeFirstUnassigned || classId > 0) {
var mappedClassId=classId;
if(firstClassificationIsTaxonomy) {
if (firstClassificationMajorRanksOnly) {
mappedClassId = TaxonomyData.getLowestAncestorWithMajorRank(classId);
}
if (firstRankId != 0) {
mappedClassId = TaxonomyData.getAncestorAtGivenRank(classId, firstRankId);
if (mappedClassId == 0)
mappedClassId = 1;
}
}
var list = first2reads.computeIfAbsent(mappedClassId,k->new ArrayList<>());
var it = connector.getReadsIterator(firstClassificationName, classId, 0, 10, false, false);
while (it.hasNext()) {
var readBlock = it.next();
list.add(readBlock.getReadName());
}
}
progress.incrementProgress();
}
progress.reportTaskCompleted();
var read2second = new HashMap<String, Integer>();
var secondClassificationBlock = connector.getClassificationBlock(secondClassificationName);
progress.setTasks("Processing:","Second classification");
progress.setProgress(0);
progress.setMaximum(secondClassificationBlock.getKeySet().size());
if (secondIds == null || secondIds.isEmpty())
secondIds = secondClassificationBlock.getKeySet();
for (var classId : secondIds) {
var mappedClassId=classId;
if(secondClassificationIsTaxonomy) {
if (secondClassificationMajorRanksOnly) {
mappedClassId = TaxonomyData.getLowestAncestorWithMajorRank(classId);
}
if (secondRankId != 0) {
mappedClassId = TaxonomyData.getAncestorAtGivenRank(classId, secondRankId);
if (mappedClassId == 0)
mappedClassId = 1;
}
}
if (includeSecondUnassigned || classId > 0) {
var it = connector.getReadsIterator(secondClassificationName, classId, 0, 10, false, false);
while (it.hasNext()) {
var readBlock = it.next();
read2second.put(readBlock.getReadName(),mappedClassId);
}
}
progress.incrementProgress();
}
progress.reportTaskCompleted();
progress.setSubtask("Merging");
progress.setProgress(0);
progress.setMaximum(firstClassificationBlock.getKeySet().size());
for (var classId : firstClassificationBlock.getKeySet()) {
if (first2reads.containsKey(classId)) {
for (var readName : first2reads.get(classId)) {
var otherId = read2second.get(readName);
if (otherId != null) {
if (useReadsTable) {
var list = readsTable.get(classId, otherId);
if (list == null) {
list = new ArrayList<>();
readsTable.put(classId, otherId, list);
}
list.add(readName);
} else {
var counts = countsTable.get(classId, otherId);
if (counts == null) {
counts = new int[inputFiles.length];
countsTable.put(classId, otherId, counts);
}
counts[f]++;
}
}
}
}
progress.incrementProgress();
}
progress.reportTaskCompleted();
doc.closeConnector();
}
{
var firstClassification = (firstFormat.equals("id") ? null : ClassificationManager.get(firstClassificationName, true));
var secondClassification = (secondFormat.equals("id") ? null : ClassificationManager.get(secondClassificationName, true));
try (var progress = new ProgressPercentage("Writing", outputFile)) {
var rowSet = (useReadsTable ? readsTable.rowKeySet() : countsTable.rowKeySet());
var colSet = (useReadsTable ? readsTable.columnKeySet() : countsTable.columnKeySet());
var numberOfReads = (useReadsTable ? readsTable.getNumberOfRows() : countsTable.getNumberOfRows());
progress.setMaximum(numberOfReads);
progress.setProgress(0);
try (var w = new BufferedWriter(new OutputStreamWriter(FileUtils.getOutputStreamPossiblyZIPorGZIP(outputFile)))) {
if (showHeaderLine)
w.write(firstClassificationName + separator + secondClassificationName + separator + StringUtils.toString(inputFiles, separator) + "\n");
for (var firstId : sorted(firstClassification, firstFormat, rowSet)) {
var firstName =
switch (firstFormat) {
default -> String.valueOf(firstId);
case "id" -> String.valueOf(firstId);
case "name" -> firstClassification.getName2IdMap().get(firstId);
case "path" -> firstClassification.getPath(firstId, pathSeparator);
};
for (var secondId : sorted(secondClassification, secondFormat, colSet)) {
var secondName =
switch (secondFormat) {
default -> String.valueOf(secondId);
case "id" -> String.valueOf(secondId);
case "name" -> secondClassification.getName2IdMap().get(secondId);
case "path" -> secondClassification.getPath(secondId, pathSeparator);
};
if (useReadsTable) {
if (readsTable.contains(firstId, secondId)) {
var values = readsTable.get(firstId, secondId);
w.write(firstName + separator + secondName + separator + StringUtils.toString(values, ", ") + "\n");
}
} else {
if (countsTable.contains(firstId, secondId)) {
var values = countsTable.get(firstId, secondId);
w.write(firstName + separator + secondName + separator + StringUtils.toString(values, separator) + "\n");
}
}
}
progress.incrementProgress();
}
}
}
}
}
private Collection<Integer> sorted(Classification classification, String format, Collection<Integer> values) {
if (format.equals("name")) {
var map = new TreeMap<String, Integer>();
for (var value : values) {
map.put(classification.getName2IdMap().get(value), value);
}
return map.values();
} else {
var list = new ArrayList<>(values);
list.sort(Integer::compareTo);
return list;
}
}
}
| 14,396 | Java | .java | husonlab/megan-ce | 62 | 21 | 18 | 2016-05-09T10:55:38Z | 2024-02-22T23:23:42Z |
DAA2RMA6.java | /FileExtraction/Java_unseen/husonlab_megan-ce/src/megan/tools/DAA2RMA6.java | /*
* DAA2RMA6.java Copyright (C) 2024 Daniel H. Huson
*
* (Some files contain contributions from other authors, who are then mentioned separately.)
*
* This program is free software: you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
package megan.tools;
import jloda.fx.util.ProgramExecutorService;
import jloda.swing.commands.CommandManager;
import jloda.swing.util.ArgsOptions;
import jloda.swing.util.ResourceManager;
import jloda.util.*;
import jloda.util.progress.ProgressListener;
import jloda.util.progress.ProgressPercentage;
import megan.accessiondb.AccessAccessionMappingDatabase;
import megan.accessiondb.ConfigRequests;
import megan.classification.Classification;
import megan.classification.ClassificationManager;
import megan.classification.IdMapper;
import megan.classification.IdParser;
import megan.classification.data.ClassificationCommandHelper;
import megan.core.ContaminantManager;
import megan.core.Document;
import megan.core.SampleAttributeTable;
import megan.daa.io.DAAParser;
import megan.main.MeganProperties;
import megan.parsers.blast.BlastFileFormat;
import megan.rma6.RMA6Connector;
import megan.rma6.RMA6FromBlastCreator;
import megan.util.DAAFileFilter;
import java.io.File;
import java.io.FileReader;
import java.io.IOException;
import java.sql.SQLException;
import java.util.*;
/**
* compute an RMA6 file from a DAA file generated by DIAMOND
* Daniel Huson, 8.2015
*/
public class DAA2RMA6 {
/**
* merge RMA files
*
*/
public static void main(String[] args) {
try {
ResourceManager.insertResourceRoot(megan.resources.Resources.class);
ProgramProperties.setProgramName("DAA2RMA");
ProgramProperties.setProgramVersion(megan.main.Version.SHORT_DESCRIPTION);
PeakMemoryUsageMonitor.start();
(new DAA2RMA6()).run(args);
System.err.println("Total time: " + PeakMemoryUsageMonitor.getSecondsSinceStartString());
System.err.println("Peak memory: " + PeakMemoryUsageMonitor.getPeakUsageString());
System.exit(0);
} catch (Exception ex) {
Basic.caught(ex);
System.exit(1);
}
}
/**
* run
*
*/
private void run(String[] args) throws UsageException, IOException, CanceledException, SQLException {
CommandManager.getGlobalCommands().addAll(ClassificationCommandHelper.getGlobalCommands());
final ArgsOptions options = new ArgsOptions(args, this, "Computes a MEGAN .rma6 file from a DIAMOND .daa file");
options.setVersion(ProgramProperties.getProgramVersion());
options.setLicense("Copyright (C) 2024. This program comes with ABSOLUTELY NO WARRANTY.");
options.setAuthors("Daniel H. Huson");
options.comment("Input");
final String[] daaFiles = options.getOptionMandatory("-i", "in", "Input DAA file", new String[0]);
final String[] metaDataFiles = options.getOption("-mdf", "metaDataFile", "Files containing metadata to be included in RMA6 files", new String[0]);
options.comment("Output");
String[] outputFiles = options.getOptionMandatory("-o", "out", "Output file(s), one for each input file, or a directory", new String[0]);
boolean useCompression = options.getOption("-c", "useCompression", "Compress reads and matches in RMA file (smaller files, longer to generate", true);
options.comment("Reads");
final boolean pairedReads = options.getOption("-p", "paired", "Reads are paired", false);
final int pairedReadsSuffixLength = options.getOption("-ps", "pairedSuffixLength", "Length of name suffix used to distinguish between name (i.e. first word in header) of read and its mate (use 0 if read and mate have same name)", 0);
final boolean pairsInSingleFile = options.getOption("-pof", "pairedReadsInOneFile", "Are paired reads in one file (usually they are in two)", false);
options.comment("Parameters");
boolean longReads = options.getOption("-lg", "longReads", "Parse and analyse as long reads", Document.DEFAULT_LONG_READS);
final int maxMatchesPerRead = options.getOption("-m", "maxMatchesPerRead", "Max matches per read", 100);
final boolean runClassifications = options.getOption("-class", "classify", "Run classification algorithm", true);
final float minScore = options.getOption("-ms", "minScore", "Min score", Document.DEFAULT_MINSCORE);
final float maxExpected = options.getOption("-me", "maxExpected", "Max expected", Document.DEFAULT_MAXEXPECTED);
final float minPercentIdentity = options.getOption("-mpi", "minPercentIdentity", "Min percent identity", Document.DEFAULT_MIN_PERCENT_IDENTITY);
final float topPercent = options.getOption("-top", "topPercent", "Top percent", Document.DEFAULT_TOPPERCENT);
final int minSupport;
final float minSupportPercent;
{
final float minSupportPercent0 = options.getOption("-supp", "minSupportPercent", "Min support as percent of assigned reads (0==off)", Document.DEFAULT_MINSUPPORT_PERCENT);
final int minSupport0 = options.getOption("-sup", "minSupport", "Min support (0==off)", Document.DEFAULT_MINSUPPORT);
if (minSupportPercent0 != Document.DEFAULT_MINSUPPORT_PERCENT && minSupport0 == Document.DEFAULT_MINSUPPORT) {
minSupportPercent = minSupportPercent0;
minSupport = 0;
} else if (minSupportPercent0 == Document.DEFAULT_MINSUPPORT_PERCENT && minSupport0 != Document.DEFAULT_MINSUPPORT) {
minSupportPercent = 0;
minSupport = minSupport0;
} else if (minSupportPercent0 != Document.DEFAULT_MINSUPPORT_PERCENT) {
throw new IOException("Please specify a value for either --minSupport or --minSupportPercent, but not for both");
} else {
minSupportPercent = minSupportPercent0;
minSupport = minSupport0;
}
}
final float minPercentReadToCover = options.getOption("-mrc", "minPercentReadCover", "Min percent of read length to be covered by alignments", Document.DEFAULT_MIN_PERCENT_READ_TO_COVER);
final float minPercentReferenceToCover = options.getOption("-mrefc", "minPercentReferenceCover", "Min percent of reference length to be covered by alignments", Document.DEFAULT_MIN_PERCENT_REFERENCE_TO_COVER);
final int minReadLength=options.getOption("-mrl","minReadLength","Minimum read length",0);
final Document.LCAAlgorithm lcaAlgorithm = Document.LCAAlgorithm.valueOfIgnoreCase(options.getOption("-alg", "lcaAlgorithm", "Set the LCA algorithm to use for taxonomic assignment",
Document.LCAAlgorithm.values(), longReads ? Document.DEFAULT_LCA_ALGORITHM_LONG_READS.toString() : Document.DEFAULT_LCA_ALGORITHM_SHORT_READS.toString()));
final float lcaCoveragePercent = options.getOption("-lcp", "lcaCoveragePercent", "Set the percent for the LCA to cover",
lcaAlgorithm == Document.LCAAlgorithm.longReads ? Document.DEFAULT_LCA_COVERAGE_PERCENT_LONG_READS : (lcaAlgorithm == Document.LCAAlgorithm.weighted ? Document.DEFAULT_LCA_COVERAGE_PERCENT_WEIGHTED_LCA : Document.DEFAULT_LCA_COVERAGE_PERCENT_SHORT_READS));
final String readAssignmentModeDefaultValue;
if (options.isDoHelp()) {
readAssignmentModeDefaultValue = (Document.DEFAULT_READ_ASSIGNMENT_MODE_LONG_READS + " in long read mode, " + Document.DEFAULT_READ_ASSIGNMENT_MODE_SHORT_READS + " else");
} else if (longReads)
readAssignmentModeDefaultValue = Document.DEFAULT_READ_ASSIGNMENT_MODE_LONG_READS.toString();
else
readAssignmentModeDefaultValue = Document.DEFAULT_READ_ASSIGNMENT_MODE_SHORT_READS.toString();
final Document.ReadAssignmentMode readAssignmentMode = Document.ReadAssignmentMode.valueOfIgnoreCase(options.getOption("-ram", "readAssignmentMode", "Set the read assignment mode", readAssignmentModeDefaultValue));
final String contaminantsFile = options.getOption("-cf", "conFile", "File of contaminant taxa (one Id or name per line)", "");
options.comment("Classification support:");
final String mapDBFile = options.getOption("-mdb", "mapDB", "MEGAN mapping db (file megan-map.db)", "");
final Set<String> selectedClassifications = new HashSet<>(Arrays.asList(options.getOption("-on", "only", "Use only named classifications (if not set: use all)", new String[0])));
options.comment("Deprecated classification support:");
final boolean parseTaxonNames = options.getOption("-tn", "parseTaxonNames", "Parse taxon names", true);
final String acc2TaxaFile = options.getOption("-a2t", "acc2taxa", "Accessopm-to-Taxonomy mapping file", "");
final String synonyms2TaxaFile = options.getOption("-s2t", "syn2taxa", "Synonyms-to-Taxonomy mapping file", "");
{
final String tags = options.getOption("-t4t", "tags4taxonomy", "Tags for taxonomy id parsing (must set to activate id parsing)", "").trim();
ProgramProperties.preset("TaxonomyTags", tags);
ProgramProperties.preset("TaxonomyParseIds", tags.length() > 0);
}
final HashMap<String, String> class2AccessionFile = new HashMap<>();
final HashMap<String, String> class2SynonymsFile = new HashMap<>();
for (String cName : ClassificationManager.getAllSupportedClassificationsExcludingNCBITaxonomy()) {
class2AccessionFile.put(cName, options.getOption("-a2" + cName.toLowerCase(), "acc2" + cName.toLowerCase(), "Accession-to-" + cName + " mapping file", ""));
class2SynonymsFile.put(cName, options.getOption("-s2" + cName.toLowerCase(), "syn2" + cName.toLowerCase(), "Synonyms-to-" + cName + " mapping file", ""));
final String tags = options.getOption("-t4" + cName.toLowerCase(), "tags4" + cName.toLowerCase(), "Tags for " + cName + " id parsing (must set to activate id parsing)", "").trim();
ProgramProperties.preset(cName + "Tags", tags);
ProgramProperties.preset(cName + "ParseIds", tags.length() > 0);
}
ProgramProperties.preset(IdParser.PROPERTIES_FIRST_WORD_IS_ACCESSION, options.getOption("-fwa", "firstWordIsAccession", "First word in reference header is accession number (set to 'true' for NCBI-nr downloaded Sep 2016 or later)", true));
ProgramProperties.preset(IdParser.PROPERTIES_ACCESSION_TAGS, options.getOption("-atags", "accessionTags", "List of accession tags", ProgramProperties.get(IdParser.PROPERTIES_ACCESSION_TAGS, IdParser.ACCESSION_TAGS)));
options.comment(ArgsOptions.OTHER);
ProgramExecutorService.setNumberOfCoresToUse(options.getOption("-t", "threads", "Number of threads", 8));
ConfigRequests.setCacheSize(options.getOption("-cs","cacheSize","Cache size for SQLITE (use with care)", ConfigRequests.getCacheSize()));
final var propertiesFile = options.getOption("-P", "propertiesFile", "Properties file",megan.main.Megan6.getDefaultPropertiesFile());
options.done();
MeganProperties.initializeProperties(propertiesFile);
for (String fileName : daaFiles) {
FileUtils.checkFileReadableNonEmpty(fileName);
if (!DAAFileFilter.getInstance().accept(fileName))
throw new IOException("File not in DAA format (or incorrect file suffix?): " + fileName);
}
for (String fileName : metaDataFiles) {
FileUtils.checkFileReadableNonEmpty(fileName);
}
if (StringUtils.notBlank(contaminantsFile))
FileUtils.checkFileReadableNonEmpty(contaminantsFile);
final Collection<String> mapDBClassifications = AccessAccessionMappingDatabase.getContainedClassificationsIfDBExists(mapDBFile);
if (mapDBClassifications.size() > 0 && (StringUtils.hasPositiveLengthValue(class2AccessionFile) || StringUtils.hasPositiveLengthValue(class2SynonymsFile)))
throw new UsageException("Illegal to use both --mapDB and ---acc2... or --syn2... options");
if (mapDBClassifications.size() > 0)
ClassificationManager.setMeganMapDBFile(mapDBFile);
final ArrayList<String> cNames = new ArrayList<>();
for (String cName : ClassificationManager.getAllSupportedClassificationsExcludingNCBITaxonomy()) {
if ((selectedClassifications.size() == 0 || selectedClassifications.contains(cName))
&& (mapDBClassifications.contains(cName) || class2AccessionFile.get(cName).length() > 0 || class2SynonymsFile.get(cName).length() > 0))
cNames.add(cName);
}
if (cNames.size() > 0)
System.err.println("Functional classifications to use: " + StringUtils.toString(cNames, ", "));
final boolean processInPairs = (pairedReads && !pairsInSingleFile);
if (outputFiles.length == 1) {
if (daaFiles.length == 1 || (processInPairs && daaFiles.length == 2)) {
if ((new File(outputFiles[0]).isDirectory()))
outputFiles[0] = (new File(outputFiles[0], FileUtils.replaceFileSuffix(FileUtils.getFileNameWithoutPath(FileUtils.getFileNameWithoutZipOrGZipSuffix(daaFiles[0])), ".rma6"))).getPath();
} else if (daaFiles.length > 1) {
if (!(new File(outputFiles[0]).isDirectory()))
throw new IOException("Multiple files given, but given single output is not a directory");
String outputDirectory = (new File(outputFiles[0])).getParent();
if (!processInPairs) {
outputFiles = new String[daaFiles.length];
for (int i = 0; i < daaFiles.length; i++)
outputFiles[i] = new File(outputDirectory, FileUtils.replaceFileSuffix(FileUtils.getFileNameWithoutZipOrGZipSuffix(FileUtils.getFileNameWithoutPath(daaFiles[i])), ".rma6")).getPath();
} else {
outputFiles = new String[daaFiles.length / 2];
for (int i = 0; i < daaFiles.length; i += 2)
outputFiles[i / 2] = new File(outputDirectory, FileUtils.replaceFileSuffix(FileUtils.getFileNameWithoutZipOrGZipSuffix(FileUtils.getFileNameWithoutPath(daaFiles[i])), ".rma6")).getPath();
}
}
} else // output.length >1
{
if ((!processInPairs && daaFiles.length != outputFiles.length) || (processInPairs && daaFiles.length != 2 * outputFiles.length))
throw new IOException("Number of input and output files do not match");
}
if (metaDataFiles.length > 1 && metaDataFiles.length != outputFiles.length) {
throw new IOException("Number of metadata files (" + metaDataFiles.length + ") doesn't match number of output files (" + outputFiles.length + ")");
}
final IdMapper taxonIdMapper = ClassificationManager.get(Classification.Taxonomy, true).getIdMapper();
final IdMapper[] idMappers = new IdMapper[cNames.size()];
// Load all mapping files:
if (runClassifications) {
ClassificationManager.get(Classification.Taxonomy, true);
taxonIdMapper.setUseTextParsing(parseTaxonNames);
if (mapDBFile.length() > 0) {
taxonIdMapper.loadMappingFile(mapDBFile, IdMapper.MapType.MeganMapDB, false, new ProgressPercentage());
}
if (acc2TaxaFile.length() > 0) {
taxonIdMapper.loadMappingFile(acc2TaxaFile, IdMapper.MapType.Accession, false, new ProgressPercentage());
}
if (synonyms2TaxaFile.length() > 0) {
taxonIdMapper.loadMappingFile(synonyms2TaxaFile, IdMapper.MapType.Synonyms, false, new ProgressPercentage());
}
for (int i = 0; i < cNames.size(); i++) {
final String cName = cNames.get(i);
idMappers[i] = ClassificationManager.get(cName, true).getIdMapper();
if (mapDBClassifications.contains(cName))
idMappers[i].loadMappingFile(mapDBFile, IdMapper.MapType.MeganMapDB, false, new ProgressPercentage());
if (class2AccessionFile.get(cName).length() > 0)
idMappers[i].loadMappingFile(class2AccessionFile.get(cName), IdMapper.MapType.Accession, false, new ProgressPercentage());
if (class2SynonymsFile.get(cName).length() > 0)
idMappers[i].loadMappingFile(class2SynonymsFile.get(cName), IdMapper.MapType.Synonyms, false, new ProgressPercentage());
}
}
/*
* process each set of files:
*/
for (int i = 0; i < daaFiles.length; i++) {
final int iOutput;
if (processInPairs) {
if ((i % 2) == 1)
continue; // skip odd numbers
iOutput = i / 2;
System.err.println("In DAA files: " + daaFiles[i] + ", " + daaFiles[i + 1]);
System.err.println("Output file: " + outputFiles[iOutput]);
} else {
iOutput = i;
System.err.println("In DAA file: " + daaFiles[i]);
System.err.println("Output file: " + outputFiles[i]);
}
ProgressListener progressListener = new ProgressPercentage();
final Document doc = new Document();
doc.getActiveViewers().add(Classification.Taxonomy);
doc.getActiveViewers().addAll(cNames);
doc.setMinScore(minScore);
doc.setMaxExpected(maxExpected);
doc.setMinPercentIdentity(minPercentIdentity);
doc.setLcaAlgorithm(lcaAlgorithm);
doc.setLcaCoveragePercent(lcaCoveragePercent);
doc.setTopPercent(topPercent);
doc.setMinSupportPercent(minSupportPercent);
doc.setMinSupport(minSupport);
doc.setPairedReads(pairedReads);
doc.setPairedReadSuffixLength(pairedReadsSuffixLength);
doc.setMinReadLength(minReadLength);
doc.setBlastMode(DAAParser.getBlastMode(daaFiles[i]));
doc.setLongReads(longReads);
doc.setMinPercentReadToCover(minPercentReadToCover);
doc.setMinPercentReferenceToCover(minPercentReferenceToCover);
doc.setReadAssignmentMode(readAssignmentMode);
if (contaminantsFile.length() > 0) {
ContaminantManager contaminantManager = new ContaminantManager();
contaminantManager.read(contaminantsFile);
System.err.printf("Contaminants profile: %,d input, %,d total%n", contaminantManager.inputSize(), contaminantManager.size());
doc.getDataTable().setContaminants(contaminantManager.getTaxonIdsString());
doc.setUseContaminantFilter(contaminantManager.size() > 0);
}
if (!processInPairs)
createRMA6FileFromDAA("DAA2RMA6", daaFiles[i], outputFiles[iOutput], useCompression, doc, maxMatchesPerRead, progressListener);
else
createRMA6FileFromDAAPair("DAA2RMA6", daaFiles[i], daaFiles[i + 1], outputFiles[iOutput], useCompression, doc, maxMatchesPerRead, progressListener);
progressListener.close();
final RMA6Connector connector = new RMA6Connector(outputFiles[iOutput]);
if (metaDataFiles.length > 0) {
try {
System.err.println("Saving metadata:");
SampleAttributeTable sampleAttributeTable = new SampleAttributeTable();
sampleAttributeTable.read(new FileReader(metaDataFiles[Math.min(iOutput, metaDataFiles.length - 1)]),
Collections.singletonList(FileUtils.getFileBaseName(FileUtils.getFileNameWithoutPath(outputFiles[iOutput]))), false);
Map<String, byte[]> label2data = new HashMap<>();
label2data.put(SampleAttributeTable.SAMPLE_ATTRIBUTES, sampleAttributeTable.getBytes());
connector.putAuxiliaryData(label2data);
System.err.println("done");
} catch (Exception ex) {
Basic.caught(ex);
}
}
progressListener.incrementProgress();
}
}
/**
* create an RMA6 file from a DAA file
*
* @param progressListener @throws CanceledException
*/
private static void createRMA6FileFromDAA(String creator, String daaFile, String rma6FileName, boolean useCompression, Document doc,
int maxMatchesPerRead, ProgressListener progressListener) throws IOException, CanceledException, SQLException {
final RMA6FromBlastCreator rma6Creator = new RMA6FromBlastCreator(creator, BlastFileFormat.DAA, doc.getBlastMode(), new String[]{daaFile}, new String[]{}, rma6FileName, useCompression, doc, maxMatchesPerRead);
rma6Creator.parseFiles(progressListener);
}
/**
* create an RMA6 file from a pair of DAA files
*
* @param progressListener @throws CanceledException
*/
private static void createRMA6FileFromDAAPair(String creator, String daaFile1, String daaFile2, String rma6FileName, boolean useCompression, Document doc,
int maxMatchesPerRead, ProgressListener progressListener) throws IOException, CanceledException, SQLException {
final RMA6FromBlastCreator rma6Creator =
new RMA6FromBlastCreator(creator, BlastFileFormat.DAA, doc.getBlastMode(), new String[]{daaFile1, daaFile2}, new String[]{}, rma6FileName, useCompression, doc, maxMatchesPerRead);
rma6Creator.parseFiles(progressListener);
}
}
| 22,181 | Java | .java | husonlab/megan-ce | 62 | 21 | 18 | 2016-05-09T10:55:38Z | 2024-02-22T23:23:42Z |
DAAMeganizer.java | /FileExtraction/Java_unseen/husonlab_megan-ce/src/megan/tools/DAAMeganizer.java | /*
* DAAMeganizer.java Copyright (C) 2024 Daniel H. Huson
*
* (Some files contain contributions from other authors, who are then mentioned separately.)
*
* This program is free software: you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
package megan.tools;
import jloda.fx.util.ProgramExecutorService;
import jloda.swing.commands.CommandManager;
import jloda.swing.util.ArgsOptions;
import jloda.swing.util.ResourceManager;
import jloda.util.*;
import jloda.util.progress.ProgressPercentage;
import megan.accessiondb.AccessAccessionMappingDatabase;
import megan.accessiondb.ConfigRequests;
import megan.classification.Classification;
import megan.classification.ClassificationManager;
import megan.classification.IdMapper;
import megan.classification.IdParser;
import megan.classification.data.ClassificationCommandHelper;
import megan.core.Document;
import megan.daa.Meganize;
import megan.main.MeganProperties;
import megan.util.DAAFileFilter;
import java.io.IOException;
import java.util.*;
/**
* prepares a DAA file for use with MEGAN
* Daniel Huson, 8.2015
*/
public class DAAMeganizer {
/**
* meganizes a DAA file
*
*/
public static void main(String[] args) {
try {
ResourceManager.insertResourceRoot(megan.resources.Resources.class);
ProgramProperties.setProgramName("Meganizer");
ProgramProperties.setProgramVersion(megan.main.Version.SHORT_DESCRIPTION);
PeakMemoryUsageMonitor.start();
(new DAAMeganizer()).run(args);
System.err.println("Total time: " + PeakMemoryUsageMonitor.getSecondsSinceStartString());
System.err.println("Peak memory: " + PeakMemoryUsageMonitor.getPeakUsageString());
System.exit(0);
} catch (Exception ex) {
Basic.caught(ex);
System.exit(1);
}
}
/**
* run
*
*/
private void run(String[] args) throws Exception {
CommandManager.getGlobalCommands().addAll(ClassificationCommandHelper.getGlobalCommands());
final var options = new ArgsOptions(args, this, "Prepares ('meganizes') a DIAMOND .daa file for use with MEGAN");
options.setVersion(ProgramProperties.getProgramVersion());
options.setLicense("Copyright (C) 2024. This program comes with ABSOLUTELY NO WARRANTY.");
options.setAuthors("Daniel H. Huson");
options.comment("Files");
final var daaFiles = options.getOptionMandatory("-i", "in", "Input DAA file(s). Each is meganized separately", new String[0]);
final var metaDataFiles = options.getOption("-mdf", "metaDataFile", "Files containing metadata to be included in files", new String[0]);
// options.comment("Reads");
final var pairedReads = false; // options.getOption("-pr", "paired", "Reads are paired", false);
final var pairedReadsSuffixLength = 0; // options.getOption("-ps", "pairedSuffixLength", "Length of name suffix used to distinguish between name of read and its mate", 0);
options.comment("Mode");
var longReads = options.getOption("-lg", "longReads", "Parse and analyse as long reads", Document.DEFAULT_LONG_READS);
options.comment("Parameters");
final var runClassifications = options.getOption("-class", "classify", "Run classification algorithm", true);
final var minScore = options.getOption("-ms", "minScore", "Min score", Document.DEFAULT_MINSCORE);
final var maxExpected = options.getOption("-me", "maxExpected", "Max expected", Document.DEFAULT_MAXEXPECTED);
final var minPercentIdentity = options.getOption("-mpi", "minPercentIdentity", "Min percent identity", Document.DEFAULT_MIN_PERCENT_IDENTITY);
final var topPercent = options.getOption("-top", "topPercent", "Top percent", Document.DEFAULT_TOPPERCENT);
final int minSupport;
final float minSupportPercent;
{
final var minSupportPercent0 = options.getOption("-supp", "minSupportPercent", "Min support as percent of assigned reads (0==off)", Document.DEFAULT_MINSUPPORT_PERCENT);
final var minSupport0 = options.getOption("-sup", "minSupport", "Min support (0==off)", Document.DEFAULT_MINSUPPORT);
if (minSupportPercent0 != Document.DEFAULT_MINSUPPORT_PERCENT && minSupport0 == Document.DEFAULT_MINSUPPORT) {
minSupportPercent = minSupportPercent0;
minSupport = 0;
} else if (minSupportPercent0 == Document.DEFAULT_MINSUPPORT_PERCENT && minSupport0 != Document.DEFAULT_MINSUPPORT) {
minSupportPercent = 0;
minSupport = minSupport0;
} else if (minSupportPercent0 != Document.DEFAULT_MINSUPPORT_PERCENT) {
throw new IOException("Please specify a value for either --minSupport or --minSupportPercent, but not for both");
} else {
minSupportPercent = minSupportPercent0;
minSupport = minSupport0;
}
}
final var minPercentReadToCover = options.getOption("-mrc", "minPercentReadCover", "Min percent of read length to be covered by alignments", Document.DEFAULT_MIN_PERCENT_READ_TO_COVER);
final var minPercentReferenceToCover = options.getOption("-mrefc", "minPercentReferenceCover", "Min percent of reference length to be covered by alignments", Document.DEFAULT_MIN_PERCENT_REFERENCE_TO_COVER);
final var minReadLength=options.getOption("-mrl","minReadLength","Minimum read length",0);
final var lcaAlgorithm = Document.LCAAlgorithm.valueOfIgnoreCase(options.getOption("-alg", "lcaAlgorithm", "Set the LCA algorithm to use for taxonomic assignment",
Document.LCAAlgorithm.values(), longReads ? Document.DEFAULT_LCA_ALGORITHM_LONG_READS.toString() : Document.DEFAULT_LCA_ALGORITHM_SHORT_READS.toString()));
final var lcaCoveragePercent = options.getOption("-lcp", "lcaCoveragePercent", "Set the percent for the LCA to cover",
lcaAlgorithm == Document.LCAAlgorithm.longReads ? Document.DEFAULT_LCA_COVERAGE_PERCENT_LONG_READS : (lcaAlgorithm == Document.LCAAlgorithm.weighted ? Document.DEFAULT_LCA_COVERAGE_PERCENT_WEIGHTED_LCA : Document.DEFAULT_LCA_COVERAGE_PERCENT_SHORT_READS));
final String readAssignmentModeDefaultValue;
if (options.isDoHelp()) {
readAssignmentModeDefaultValue = (Document.DEFAULT_READ_ASSIGNMENT_MODE_LONG_READS + " in long read mode, " + Document.DEFAULT_READ_ASSIGNMENT_MODE_SHORT_READS + " else");
} else if (longReads)
readAssignmentModeDefaultValue = Document.DEFAULT_READ_ASSIGNMENT_MODE_LONG_READS.toString();
else
readAssignmentModeDefaultValue = Document.DEFAULT_READ_ASSIGNMENT_MODE_SHORT_READS.toString();
final Document.ReadAssignmentMode readAssignmentMode = Document.ReadAssignmentMode.valueOfIgnoreCase(options.getOption("-ram", "readAssignmentMode", "Set the read assignment mode", readAssignmentModeDefaultValue));
final var contaminantsFile = options.getOption("-cf", "conFile", "File of contaminant taxa (one Id or name per line)", "");
options.comment("Classification support:");
final var mapDBFile = options.getOption("-mdb", "mapDB", "MEGAN mapping db (file megan-map.db)", "");
final var dbSelectedClassifications = new HashSet<>(Arrays.asList(options.getOption("-on", "only", "Use only named classifications (if not set: use all)", new String[0])));
options.comment("Deprecated classification support:");
final var parseTaxonNames = options.getOption("-tn", "parseTaxonNames", "Parse taxon names", true);
final var acc2TaxaFile = options.getOption("-a2t", "acc2taxa", "Accession-to-Taxonomy mapping file", "");
final var synonyms2TaxaFile = options.getOption("-s2t", "syn2taxa", "Synonyms-to-Taxonomy mapping file", "");
{
final var tags = options.getOption("-t4t", "tags4taxonomy", "Tags for taxonomy id parsing (must set to activate id parsing)", "").trim();
ProgramProperties.preset("TaxonomyTags", tags);
ProgramProperties.preset("TaxonomyParseIds", tags.length() > 0);
}
final var class2AccessionFile = new HashMap<String, String>();
final var class2SynonymsFile = new HashMap<String, String>();
for (var cName : ClassificationManager.getAllSupportedClassificationsExcludingNCBITaxonomy()) {
class2AccessionFile.put(cName, options.getOption("-a2" + cName.toLowerCase(), "acc2" + cName.toLowerCase(), "Accession-to-" + cName + " mapping file", ""));
class2SynonymsFile.put(cName, options.getOption("-s2" + cName.toLowerCase(), "syn2" + cName.toLowerCase(), "Synonyms-to-" + cName + " mapping file", ""));
final var tags = options.getOption("-t4" + cName.toLowerCase(), "tags4" + cName.toLowerCase(), "Tags for " + cName + " id parsing (must set to activate id parsing)", "").trim();
ProgramProperties.preset(cName + "Tags", tags);
ProgramProperties.preset(cName + "ParseIds", tags.length() > 0);
}
ProgramProperties.preset(IdParser.PROPERTIES_FIRST_WORD_IS_ACCESSION, options.getOption("-fwa", "firstWordIsAccession", "First word in reference header is accession number (set to 'true' for NCBI-nr downloaded Sep 2016 or later)", true));
ProgramProperties.preset(IdParser.PROPERTIES_ACCESSION_TAGS, options.getOption("-atags", "accessionTags", "List of accession tags", ProgramProperties.get(IdParser.PROPERTIES_ACCESSION_TAGS, IdParser.ACCESSION_TAGS)));
options.comment(ArgsOptions.OTHER);
ProgramExecutorService.setNumberOfCoresToUse(options.getOption("-t", "threads", "Number of threads", 8));
ConfigRequests.setCacheSize(options.getOption("-cs","cacheSize","Cache size for SQLITE (use with care)", ConfigRequests.getCacheSize()));
final var propertiesFile = options.getOption("-P", "propertiesFile", "Properties file",megan.main.Megan6.getDefaultPropertiesFile());
options.done();
MeganProperties.initializeProperties(propertiesFile);
for (var fileName : daaFiles) {
FileUtils.checkFileReadableNonEmpty(fileName);
if (!DAAFileFilter.getInstance().accept(fileName))
throw new IOException("File not in DAA format (or incorrect file suffix?): " + fileName);
}
for (var fileName : metaDataFiles) {
FileUtils.checkFileReadableNonEmpty(fileName);
}
if (metaDataFiles.length > 1 && metaDataFiles.length != daaFiles.length) {
throw new IOException("Number of metadata files (" + metaDataFiles.length + ") doesn't match number of DAA files (" + daaFiles.length + ")");
}
if (StringUtils.notBlank(mapDBFile))
FileUtils.checkFileReadableNonEmpty(mapDBFile);
if (StringUtils.notBlank(contaminantsFile))
FileUtils.checkFileReadableNonEmpty(contaminantsFile);
final var mapDBClassifications = AccessAccessionMappingDatabase.getContainedClassificationsIfDBExists(mapDBFile);
if (!mapDBClassifications.isEmpty() && (StringUtils.hasPositiveLengthValue(class2AccessionFile) || StringUtils.hasPositiveLengthValue(class2SynonymsFile)))
throw new UsageException("Illegal to use both --mapDB and ---acc2... or --syn2... options");
if (!mapDBClassifications.isEmpty())
ClassificationManager.setMeganMapDBFile(mapDBFile);
final var cNames = new ArrayList<String>();
for (var cName : ClassificationManager.getAllSupportedClassificationsExcludingNCBITaxonomy()) {
if ((dbSelectedClassifications.isEmpty() || dbSelectedClassifications.contains(cName))
&& (mapDBClassifications.contains(cName) || StringUtils.notBlank(class2AccessionFile.get(cName)) || StringUtils.notBlank(class2SynonymsFile.get(cName))))
cNames.add(cName);
}
if (!cNames.isEmpty())
System.err.println("Functional classifications to use: " + StringUtils.toString(cNames, ", "));
final var taxonIdMapper = ClassificationManager.get(Classification.Taxonomy, true).getIdMapper();
final var idMappers = new IdMapper[cNames.size()];
// Load all mapping files:
if (runClassifications) {
ClassificationManager.get(Classification.Taxonomy, true);
taxonIdMapper.setUseTextParsing(parseTaxonNames);
if (StringUtils.notBlank(mapDBFile)) {
taxonIdMapper.loadMappingFile(mapDBFile, IdMapper.MapType.MeganMapDB, false, new ProgressPercentage());
}
if (StringUtils.notBlank(acc2TaxaFile)) {
taxonIdMapper.loadMappingFile(acc2TaxaFile, IdMapper.MapType.Accession, false, new ProgressPercentage());
}
if (StringUtils.notBlank(synonyms2TaxaFile)) {
taxonIdMapper.loadMappingFile(synonyms2TaxaFile, IdMapper.MapType.Synonyms, false, new ProgressPercentage());
}
for (var i = 0; i < cNames.size(); i++) {
final var cName = cNames.get(i);
idMappers[i] = ClassificationManager.get(cName, true).getIdMapper();
if (mapDBClassifications.contains(cName))
idMappers[i].loadMappingFile(mapDBFile, IdMapper.MapType.MeganMapDB, false, new ProgressPercentage());
if (StringUtils.notBlank(class2AccessionFile.get(cName)))
idMappers[i].loadMappingFile(class2AccessionFile.get(cName), IdMapper.MapType.Accession, false, new ProgressPercentage());
if (StringUtils.notBlank(class2SynonymsFile.get(cName)))
idMappers[i].loadMappingFile(class2SynonymsFile.get(cName), IdMapper.MapType.Synonyms, false, new ProgressPercentage());
}
}
/*
* process each file
*/
for (var i = 0; i < daaFiles.length; i++) {
final var daaFile = daaFiles[i];
System.err.println("Meganizing: " + daaFile);
final var metaDataFile = (metaDataFiles.length > 0 ? metaDataFiles[Math.min(i, metaDataFiles.length - 1)] : "");
Meganize.apply(new ProgressPercentage(), daaFile, metaDataFile, cNames, minScore, maxExpected, minPercentIdentity,
topPercent, minSupportPercent, minSupport, pairedReads, pairedReadsSuffixLength,minReadLength, lcaAlgorithm, readAssignmentMode, lcaCoveragePercent, longReads,
minPercentReadToCover, minPercentReferenceToCover, contaminantsFile);
}
}
}
| 14,818 | Java | .java | husonlab/megan-ce | 62 | 21 | 18 | 2016-05-09T10:55:38Z | 2024-02-22T23:23:42Z |
SortLastMAFAlignmentsByQuery.java | /FileExtraction/Java_unseen/husonlab_megan-ce/src/megan/tools/SortLastMAFAlignmentsByQuery.java | /*
* SortLastMAFAlignmentsByQuery.java Copyright (C) 2024 Daniel H. Huson
*
* (Some files contain contributions from other authors, who are then mentioned separately.)
*
* This program is free software: you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
package megan.tools;
import jloda.seq.FastAFileIterator;
import jloda.swing.util.ArgsOptions;
import jloda.swing.util.ResourceManager;
import jloda.util.*;
import jloda.util.progress.ProgressPercentage;
import megan.main.MeganProperties;
import java.io.BufferedWriter;
import java.io.OutputStreamWriter;
import java.util.ArrayList;
import java.util.Collection;
import java.util.HashMap;
/**
* sort last MAF alignments
*/
public class SortLastMAFAlignmentsByQuery {
/**
* sort last MAF alignments
*/
public static void main(String[] args) {
try {
ResourceManager.insertResourceRoot(megan.resources.Resources.class);
ProgramProperties.setProgramName("SortLastMAFAlignments");
ProgramProperties.setProgramVersion(megan.main.Version.SHORT_DESCRIPTION);
PeakMemoryUsageMonitor.start();
(new SortLastMAFAlignmentsByQuery()).run(args);
System.err.println("Total time: " + PeakMemoryUsageMonitor.getSecondsSinceStartString());
System.err.println("Peak memory: " + PeakMemoryUsageMonitor.getPeakUsageString());
System.exit(0);
} catch (Exception ex) {
Basic.caught(ex);
System.exit(1);
}
}
/**
* run the program
*
*/
private void run(String[] args) throws Exception {
final ArgsOptions options = new ArgsOptions(args, this, "Sorts alignments in an MAF file by query");
options.setVersion(ProgramProperties.getProgramVersion());
options.setLicense("Copyright (C) 2024. This program comes with ABSOLUTELY NO WARRANTY.");
options.setAuthors("Daniel H. Huson");
final String lastMAFFile = options.getOptionMandatory("-i", "input", "Input file in MAF format as produced by Last (.gz ok)", "");
String readsFile = options.getOption("-r", "readsFile", "File containing all reads, if given, determines output order (.gz ok)", "");
final String outputFile = options.getOption("-o", "output", "Output file (stdout or .gz ok)", "stdout");
final var propertiesFile = options.getOption("-P", "propertiesFile", "Properties file",megan.main.Megan6.getDefaultPropertiesFile());
options.done();
MeganProperties.initializeProperties(propertiesFile);
final HashMap<String, ArrayList<byte[][]>> readName2Alignments = new HashMap<>(1000000);
final ArrayList<String> readNamesOrder = new ArrayList<>(1000000);
final boolean orderSetFromReadsFile;
if (readsFile.length() > 0) {
try (IFastAIterator iterator = FastAFileIterator.getFastAOrFastQAsFastAIterator(readsFile); ProgressPercentage progress = new ProgressPercentage("Processing file: " + readsFile)) {
progress.setMaximum(iterator.getMaximumProgress());
while (iterator.hasNext()) {
readNamesOrder.add(StringUtils.getFirstWord(StringUtils.swallowLeadingGreaterSign(iterator.next().getFirst())));
progress.setProgress(iterator.getProgress());
}
}
orderSetFromReadsFile = (readNamesOrder.size() > 0);
} else
orderSetFromReadsFile = false;
boolean inInitialComments = true;
long readsIn = 0;
long readsOut = 0;
long alignmentsIn = 0;
long alignmentsOut = 0;
try (FileLineIterator it = new FileLineIterator(lastMAFFile);
BufferedWriter w = new BufferedWriter(new OutputStreamWriter(FileUtils.getOutputStreamPossiblyZIPorGZIP(outputFile)))) {
try (ProgressPercentage progress = new ProgressPercentage("Processing file: " + lastMAFFile)) {
progress.setMaximum(it.getMaximumProgress());
while (it.hasNext()) {
String line = it.next();
if (line.startsWith("#")) {
if (inInitialComments && !line.startsWith("# batch")) {
w.write(line);
w.write('\n');
}
} else {
if (inInitialComments)
inInitialComments = false;
if (line.startsWith("a ")) {
final byte[][] alignment = new byte[3][];
alignment[0] = line.getBytes();
if (it.hasNext()) {
alignment[1] = it.next().getBytes();
if (it.hasNext()) {
final String line2 = it.next();
alignment[2] = line2.getBytes();
alignmentsIn++;
String readName = getSecondWord(line2);
ArrayList<byte[][]> alignments = readName2Alignments.get(readName);
if (alignments == null) {
alignments = new ArrayList<>(100);
readName2Alignments.put(readName, alignments);
if (!orderSetFromReadsFile)
readNamesOrder.add(readName);
readsIn++;
}
alignments.add(alignment);
}
}
}
}
progress.setProgress(it.getProgress());
}
}
try (ProgressPercentage progress = new ProgressPercentage("Writing file: " + outputFile)) {
progress.setMaximum(readName2Alignments.keySet().size());
Collection<String>[] order = new Collection[]{readNamesOrder, readName2Alignments.keySet()};
// first output in order, then output any others that were not mentioned...
for (int i = 0; i <= 1; i++) {
if (i == 1 && order[i].size() > 0 && orderSetFromReadsFile) {
System.err.println("Warning: alignments found for queries that are not mentioned in the provided reads file");
}
for (String readName : order[i]) {
ArrayList<byte[][]> alignments = readName2Alignments.get(readName);
if (alignments != null) {
alignments.sort((a, b) -> {
final int scoreA = parseScoreFromA(a[0]);
final int scoreB = parseScoreFromA(b[0]);
return Integer.compare(scoreB, scoreA);
});
for (byte[][] alignment : alignments) {
for (byte[] line : alignment) {
w.write(StringUtils.toString(line));
w.write('\n');
}
w.write('\n');
alignmentsOut++;
}
readsOut++;
}
readName2Alignments.remove(readName);
progress.incrementProgress();
}
}
}
}
if (alignmentsIn != alignmentsOut)
System.err.println("Alignments: in=" + alignmentsIn + ", out=" + alignmentsOut);
if (readsIn != readsOut)
System.err.println("Reads: in=" + alignmentsIn + ", out=" + alignmentsOut);
System.err.printf("Alignments: %,10d%n", alignmentsIn);
System.err.printf("Reads :%,10d%n", readsIn);
}
private int parseScoreFromA(byte[] s) {
String string = StringUtils.toString(s);
int a = string.indexOf('=') + 1;
int b = a;
while (b < s.length && !Character.isWhitespace(string.charAt(b)))
b++;
return NumberUtils.parseInt(string.substring(a, b));
}
private String getSecondWord(String string) {
int a = 0;
while (a < string.length() && Character.isWhitespace(string.charAt(a))) // skip leading white space
a++;
while (a < string.length() && !Character.isWhitespace(string.charAt(a))) // skip first word
a++;
while (a < string.length() && Character.isWhitespace(string.charAt(a))) // skip separating white space
a++;
int b = a;
while (b < string.length() && !Character.isWhitespace(string.charAt(b))) // find end of second word
b++;
if (b < string.length())
return string.substring(a, b);
else if (a < string.length())
return string.substring(a);
else
return "";
}
}
| 9,544 | Java | .java | husonlab/megan-ce | 62 | 21 | 18 | 2016-05-09T10:55:38Z | 2024-02-22T23:23:42Z |
DAA2Info.java | /FileExtraction/Java_unseen/husonlab_megan-ce/src/megan/tools/DAA2Info.java | /*
* DAA2Info.java Copyright (C) 2024 Daniel H. Huson
*
* (Some files contain contributions from other authors, who are then mentioned separately.)
*
* This program is free software: you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
package megan.tools;
import jloda.swing.util.ArgsOptions;
import jloda.swing.util.ResourceManager;
import jloda.util.*;
import megan.classification.ClassificationManager;
import megan.core.Document;
import megan.daa.connector.DAAConnector;
import megan.daa.io.DAAHeader;
import megan.daa.io.DAAParser;
import megan.main.MeganProperties;
import megan.viewer.TaxonomyData;
import java.io.*;
import java.util.ArrayList;
import java.util.HashSet;
import java.util.Set;
/**
* provides info on a DAA files
* Daniel Huson, 11.2016
*/
public class DAA2Info {
/**
* DAA 2 info
*
*/
public static void main(String[] args) {
try {
ResourceManager.insertResourceRoot(megan.resources.Resources.class);
ProgramProperties.setProgramName("DAA2Info");
ProgramProperties.setProgramVersion(megan.main.Version.SHORT_DESCRIPTION);
PeakMemoryUsageMonitor.start();
(new DAA2Info()).run(args);
PeakMemoryUsageMonitor.report();
System.exit(0);
} catch (Exception ex) {
Basic.caught(ex);
System.exit(1);
}
}
/**
* run
*
*/
private void run(String[] args) throws UsageException, IOException, CanceledException {
final var options = new ArgsOptions(args, this, "Analyses a DIAMOND file");
options.setVersion(ProgramProperties.getProgramVersion());
options.setLicense("Copyright (C) 2024. This program comes with ABSOLUTELY NO WARRANTY.");
options.setAuthors("Daniel H. Huson");
options.comment("Input and Output");
final var daaFile = options.getOptionMandatory("-i", "in", "Input DAA file", "");
final var outputFile = options.getOption("-o", "out", "Output file (stdout or .gz ok)", "stdout");
options.comment("Commands");
final var listGeneralInfo = options.getOption("-l", "list", "List general info about file", false);
final var listMoreStuff = options.getOption("-m", "listMore", "List more info about file (if meganized)", false);
final var listClass2Count = new HashSet<>(options.getOption("-c2c", "class2count", "List class to count for named classification(s) (Possible values: " + StringUtils.toString(ClassificationManager.getAllSupportedClassifications(), " ") + ")", new ArrayList<>()));
final var listRead2Class = new HashSet<>(options.getOption("-r2c", "read2class", "List read to class assignments for named classification(s) (Possible values: " + StringUtils.toString(ClassificationManager.getAllSupportedClassifications(), " ") + ")", new ArrayList<>()));
final var reportNames = options.getOption("-n", "names", "Report class names rather than class Id numbers", false);
final var reportPaths = options.getOption("-p", "paths", "Report class paths rather than class Id numbers", false);
final var prefixRank = options.getOption("-r", "prefixRank", "When reporting class paths for taxonomy, prefix single letter to indicate taxonomic rank", false);
final var majorRanksOnly = options.getOption("-mro", "majorRanksOnly", "Only use major taxonomic ranks", false);
final var bacteriaOnly = options.getOption("-bo", "bacteriaOnly", "Only report bacterial reads and counts in taxonomic report", false);
final var viralOnly = options.getOption("-vo", "virusOnly", "Only report viral reads and counts in taxonomic report", false);
final var ignoreUnassigned = options.getOption("-u", "ignoreUnassigned", "Don't report on reads that are unassigned", true);
final var useSummary = options.getOption("-s", "sum", "Use summarized rather than assigned counts when listing class to count", false);
final var extractSummaryFile = options.getOption("-es", "extractSummaryFile", "Output a MEGAN summary file (contains all classifications, but no reads or alignments)", "");
final var propertiesFile = options.getOption("-P", "propertiesFile", "Properties file",megan.main.Megan6.getDefaultPropertiesFile());
options.done();
MeganProperties.initializeProperties(propertiesFile);
final int taxonomyRoot;
if (bacteriaOnly && viralOnly)
throw new UsageException("Please specify only one of -bo and -vo");
else if (bacteriaOnly)
taxonomyRoot = TaxonomyData.BACTERIA_ID;
else if (viralOnly)
taxonomyRoot = TaxonomyData.VIRUSES_ID;
else
taxonomyRoot = TaxonomyData.ROOT_ID;
final var isMeganized = DAAParser.isMeganizedDAAFile(daaFile, true);
final var doc = new Document();
if (isMeganized) {
doc.getMeganFile().setFileFromExistingFile(daaFile, true);
doc.loadMeganFile();
}
try (var w = new BufferedWriter(new OutputStreamWriter(FileUtils.getOutputStreamPossiblyZIPorGZIP(outputFile)))) {
if (listGeneralInfo || listMoreStuff) {
final DAAHeader daaHeader = new DAAHeader(daaFile, true);
w.write(String.format("# DIAMOND version + build: %d %d%n",daaHeader.getVersion(),daaHeader.getDiamondBuild()));
w.write(String.format("# Number of reads: %,d\n", daaHeader.getQueryRecords()));
w.write(String.format("# Alignment mode: %s\n", daaHeader.getAlignMode().toString().toUpperCase()));
w.write(String.format("# Is meganized: %s\n", isMeganized));
if (isMeganized) {
w.write("# Classifications:");
final DAAConnector connector = new DAAConnector(daaFile);
for (String classificationName : connector.getAllClassificationNames()) {
if (ClassificationManager.getAllSupportedClassifications().contains(classificationName)) {
w.write(" " + classificationName);
}
}
w.write("\n");
if (listMoreStuff) {
w.write("# Meganization summary:\n");
w.write(doc.getDataTable().getSummary().replaceAll("^", "## ").replaceAll("\n", "\n## ") + "\n");
}
}
}
if (!listClass2Count.isEmpty()) {
if (isMeganized)
RMA2Info.reportClass2Count(doc, listGeneralInfo, listMoreStuff, reportPaths, reportNames, prefixRank, ignoreUnassigned, majorRanksOnly, listClass2Count, taxonomyRoot,useSummary, w);
else
System.err.println("Can't list class-to-count: file has not been meganized");
}
if (!listRead2Class.isEmpty()) {
if (isMeganized)
RMA2Info.reportRead2Count(doc, listGeneralInfo, listMoreStuff, reportPaths, reportNames, prefixRank, ignoreUnassigned, majorRanksOnly, listRead2Class, taxonomyRoot, w);
else
System.err.println("Can't list read-to-count: file has not been meganized");
}
}
if (!extractSummaryFile.isEmpty()) {
try (var w = new FileWriter(extractSummaryFile)) {
doc.getDataTable().write(w);
doc.getSampleAttributeTable().write(w, false, true);
}
}
}
}
| 7,897 | Java | .java | husonlab/megan-ce | 62 | 21 | 18 | 2016-05-09T10:55:38Z | 2024-02-22T23:23:42Z |
DAA2GFF3.java | /FileExtraction/Java_unseen/husonlab_megan-ce/src/megan/tools/DAA2GFF3.java | /*
* DAA2GFF3.java Copyright (C) 2024 Daniel H. Huson
*
* (Some files contain contributions from other authors, who are then mentioned separately.)
*
* This program is free software: you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
package megan.tools;
import jloda.swing.util.ArgsOptions;
import jloda.swing.util.ResourceManager;
import jloda.util.*;
import jloda.util.progress.ProgressPercentage;
import megan.core.Document;
import megan.daa.io.DAAParser;
import megan.dialogs.export.ExportAlignedReads2GFF3Format;
import megan.main.MeganProperties;
import java.io.File;
import java.io.IOException;
/**
* computes GFF3 file from DAA file
*/
public class DAA2GFF3 {
/**
* computes GFF3 file from DAA file
*
*/
public static void main(String[] args) {
try {
ResourceManager.insertResourceRoot(megan.resources.Resources.class);
ProgramProperties.setProgramName("DAA2GFF3");
ProgramProperties.setProgramVersion(megan.main.Version.SHORT_DESCRIPTION);
PeakMemoryUsageMonitor.start();
(new DAA2GFF3()).run(args);
PeakMemoryUsageMonitor.report();
System.exit(0);
} catch (Exception ex) {
Basic.caught(ex);
System.exit(1);
}
}
/**
* run
*
*/
private void run(String[] args) throws UsageException, IOException, CanceledException {
final ArgsOptions options = new ArgsOptions(args, this, "Extracts a GFF3 annotation file from a meganized DAA file");
options.setVersion(ProgramProperties.getProgramVersion());
options.setLicense("Copyright (C) 2024. This program comes with ABSOLUTELY NO WARRANTY.");
options.setAuthors("Daniel H. Huson");
options.comment("Input and Output");
final String daaFile = options.getOptionMandatory("-i", "in", "Input meganized DAA file", "");
final String outputFile = options.getOption("-o", "out", "Output file (stdout or .gz ok)", "stdout");
options.comment("Options");
final String classificationToReport = options.getOption("-c", "classification", "Name of classification to report, or 'all'", "all");
final boolean includeIncompatible = options.getOption("-k", "incompatible", "Include incompatible", false);
final boolean includeDominated = options.getOption("-d", "dominated", "Include dominated", false);
final var propertiesFile = options.getOption("-P", "propertiesFile", "Properties file",megan.main.Megan6.getDefaultPropertiesFile());
options.done();
MeganProperties.initializeProperties(propertiesFile);
if (!DAAParser.isMeganizedDAAFile(daaFile, true))
throw new IOException("Input file is not meganized DAA file");
final var doc = new Document();
doc.getMeganFile().setFileFromExistingFile(daaFile, true);
doc.loadMeganFile();
ExportAlignedReads2GFF3Format.apply(doc, new File(outputFile), classificationToReport, !includeIncompatible, !includeDominated, new ProgressPercentage());
}
}
| 3,456 | Java | .java | husonlab/megan-ce | 62 | 21 | 18 | 2016-05-09T10:55:38Z | 2024-02-22T23:23:42Z |
Reanalyzer.java | /FileExtraction/Java_unseen/husonlab_megan-ce/src/megan/tools/Reanalyzer.java | /*
* Reanalyzer.java Copyright (C) 2024 Daniel H. Huson
*
* (Some files contain contributions from other authors, who are then mentioned separately.)
*
* This program is free software: you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
package megan.tools;
import jloda.swing.util.ArgsOptions;
import jloda.swing.util.ProgramProperties;
import jloda.swing.util.ResourceManager;
import jloda.util.Basic;
import jloda.util.StringUtils;
import megan.classification.Classification;
import megan.classification.ClassificationManager;
import megan.core.Director;
import megan.core.Document;
import megan.main.MeganProperties;
import java.io.IOException;
/**
* Reanalyze DAA and RMA files
* Daniel Huson, 12.2019
*/
public class Reanalyzer {
/**
* Reanalyze DAA and RMA files
*
*/
public static void main(String[] args) {
try {
ResourceManager.insertResourceRoot(megan.resources.Resources.class);
ProgramProperties.setProgramName("Reanalyzer");
ProgramProperties.setProgramVersion(megan.main.Version.SHORT_DESCRIPTION);
long start = System.currentTimeMillis();
(new Reanalyzer()).run(args);
System.err.println("Time: " + ((System.currentTimeMillis() - start) / 1000) + "s");
System.exit(0);
} catch (Exception ex) {
Basic.caught(ex);
System.exit(1);
}
}
/**
* run the program
*
*/
private void run(String[] args) throws Exception {
final ArgsOptions options = new ArgsOptions(args, this, "Reanalyze DAA and RMA files");
options.setVersion(ProgramProperties.getProgramVersion());
options.setLicense("Copyright (C) 2024. This program comes with ABSOLUTELY NO WARRANTY.");
options.setAuthors("Daniel H. Huson");
final String[] inputFiles = options.getOptionMandatory("-i", "input", "Input file (stdin ok)", new String[0]);
options.comment("Parameters");
final boolean longReads = options.getOption("-lg", "longReads", "Parse and analyse as long reads", Document.DEFAULT_LONG_READS);
final boolean longReadsSet = options.optionWasExplicitlySet();
final boolean runClassifications = options.getOption("-class", "classify", "Run classification algorithm", true);
final float minScore = options.getOption("-ms", "minScore", "Min score (-1: no change)", -1f);
final float maxExpected = options.getOption("-me", "maxExpected", "Max expected (-1: no change)", -1f);
final float minPercentIdentity = options.getOption("-mpi", "minPercentIdentity", "Min percent identity (-1: no change)", -1f);
final float topPercent = options.getOption("-top", "topPercent", "Top percent (-1: no change)", -1f);
final int minSupport;
final float minSupportPercent;
{
final float minSupportPercent0 = options.getOption("-supp", "minSupportPercent", "Min support as percent of assigned reads (0: off, -1: no change)", -1f);
final int minSupport0 = options.getOption("-sup", "minSupport", "Min support (0: off, -1; no change)", -1);
if (minSupportPercent0 != -1 && minSupport0 == -1) {
minSupportPercent = minSupportPercent0;
minSupport = 0;
} else if (minSupportPercent0 == -1 && minSupport0 != -1) {
minSupportPercent = 0;
minSupport = minSupport0;
} else if (minSupportPercent0 != -1) {
throw new IOException("Please specify a value for either --minSupport or --minSupportPercent, but not for both");
} else {
minSupportPercent = minSupportPercent0;
minSupport = minSupport0;
}
}
final float minPercentReadToCover = options.getOption("-mrc", "minPercentReadCover", "Min percent of read length to be covered by alignments (-1: no change)", -1f);
final float minPercentReferenceToCover = options.getOption("-mrefc", "minPercentReferenceCover", "Min percent of reference length to be covered by alignments (-1: no change)", -1f);
final Document.LCAAlgorithm lcaAlgorithm = Document.LCAAlgorithm.valueOfIgnoreCase(options.getOption("-alg", "lcaAlgorithm", "Set the LCA algorithm to use for taxonomic assignment",
Document.LCAAlgorithm.values(), longReads ? Document.DEFAULT_LCA_ALGORITHM_LONG_READS.toString() : Document.DEFAULT_LCA_ALGORITHM_SHORT_READS.toString()));
final boolean lcaAlgorithmWasSet = options.optionWasExplicitlySet();
final float lcaCoveragePercent = options.getOption("-lcp", "lcaCoveragePercent", "Set the percent for the LCA to cover (-1: no change)", -1f);
final String readAssignmentModeDefaultValue;
if (options.isDoHelp()) {
readAssignmentModeDefaultValue = (Document.DEFAULT_READ_ASSIGNMENT_MODE_LONG_READS + " in long read mode, " + Document.DEFAULT_READ_ASSIGNMENT_MODE_SHORT_READS + " else");
} else if (longReads)
readAssignmentModeDefaultValue = Document.DEFAULT_READ_ASSIGNMENT_MODE_LONG_READS.toString();
else
readAssignmentModeDefaultValue = Document.DEFAULT_READ_ASSIGNMENT_MODE_SHORT_READS.toString();
final Document.ReadAssignmentMode readAssignmentMode = Document.ReadAssignmentMode.valueOfIgnoreCase(options.getOption("-ram", "readAssignmentMode", "Set the read assignment mode", readAssignmentModeDefaultValue));
final boolean readAssignmentModeSet = options.optionWasExplicitlySet();
final String contaminantsFile = options.getOption("-cf", "conFile", "File of contaminant taxa (one Id or name per line)", "");
final boolean useContaminantFilter = (contaminantsFile.length() > 0);
final boolean pairedReads = options.getOption("-pr", "paired", "Reads are paired", false);
final boolean pairedReadsSet = options.optionWasExplicitlySet();
final var propertiesFile = options.getOption("-P", "propertiesFile", "Properties file",megan.main.Megan6.getDefaultPropertiesFile());
options.done();
MeganProperties.initializeProperties(propertiesFile);
ClassificationManager.ensureTreeIsLoaded(Classification.Taxonomy);
if (runClassifications) {
final StringBuilder buf = new StringBuilder();
buf.append("reanalyzeFiles file='").append(StringUtils.toString(inputFiles, "', '")).append("'");
if (minSupportPercent != -1f)
buf.append(" minSupportPercent = ").append(minSupportPercent);
if (minSupport != -1f)
buf.append(" minSupport = ").append(minSupport);
if (minScore != -1f)
buf.append(" minScore = ").append(minScore);
if (maxExpected != -1f)
buf.append(" maxExpected = ").append(maxExpected);
if (minPercentIdentity != -1f)
buf.append(" minPercentIdentity = ").append(minPercentIdentity);
if (topPercent != -1f)
buf.append(" topPercent = ").append(topPercent);
if (lcaAlgorithmWasSet)
buf.append(" lcaAlgorithm = ").append(lcaAlgorithm);
if (lcaCoveragePercent != -1f)
buf.append(" lcaCoveragePercent = ").append(lcaCoveragePercent);
if (minPercentReadToCover != -1f)
buf.append(" minPercentReadToCover = ").append(minPercentReadToCover);
if (minPercentReferenceToCover != -1f)
buf.append(" minPercentReferenceToCover = ").append(minPercentReferenceToCover);
//" minComplexity = ");minComplexity);
if (longReadsSet)
buf.append(" longReads = ").append(longReads);
if (pairedReadsSet)
buf.append(" pairedReads = ").append(pairedReads);
// " useIdentityFilter =");useIdentityFilter);
if (useContaminantFilter) {
buf.append(" useContaminantFilter = ").append(useContaminantFilter);
buf.append(" loadContaminantFile = '").append(contaminantsFile).append("'");
}
if (readAssignmentModeSet)
buf.append(" readAssignmentMode = ").append(readAssignmentMode);
buf.append(" fNames=*;");
final Director director = Director.newProject(false, true);
director.executeImmediately(buf.toString(), director.getMainViewer().getCommandManager());
}
}
}
| 9,049 | Java | .java | husonlab/megan-ce | 62 | 21 | 18 | 2016-05-09T10:55:38Z | 2024-02-22T23:23:42Z |
MergeMultipleAccessionAssignments.java | /FileExtraction/Java_unseen/husonlab_megan-ce/src/megan/tools/utils/MergeMultipleAccessionAssignments.java | /*
* ReferencesAnnotator.java Copyright (C) 2024 Daniel H. Huson
*
* (Some files contain contributions from other authors, who are then mentioned separately.)
*
* This program is free software: you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
package megan.tools.utils;
import jloda.swing.commands.CommandManager;
import jloda.swing.util.ArgsOptions;
import jloda.swing.util.ResourceManager;
import jloda.util.*;
import megan.accessiondb.AccessAccessionMappingDatabase;
import megan.classification.Classification;
import megan.classification.ClassificationManager;
import megan.classification.IdParser;
import megan.classification.data.ClassificationCommandHelper;
import megan.main.Megan6;
import megan.main.MeganProperties;
import java.io.BufferedWriter;
import java.io.IOException;
import java.sql.SQLException;
import java.util.ArrayList;
import java.util.Collections;
import java.util.stream.Collectors;
/**
* merge accession assignments
* Daniel Huson, 9.2022, 1.2024
*/
public class MergeMultipleAccessionAssignments {
/**
* merge accession assignments
*/
public static void main(String[] args) {
try {
ResourceManager.insertResourceRoot(megan.resources.Resources.class);
ProgramProperties.setProgramName(MergeMultipleAccessionAssignments.class.getSimpleName());
ProgramProperties.setProgramVersion(megan.main.Version.SHORT_DESCRIPTION);
PeakMemoryUsageMonitor.start();
(new MergeMultipleAccessionAssignments()).run(args);
System.err.println("Total time: " + PeakMemoryUsageMonitor.getSecondsSinceStartString());
System.err.println("Peak memory: " + PeakMemoryUsageMonitor.getPeakUsageString());
System.exit(0);
} catch (Exception ex) {
Basic.caught(ex);
System.exit(1);
}
}
/**
* run
*/
private void run(String[] args) throws UsageException, IOException, SQLException {
CommandManager.getGlobalCommands().addAll(ClassificationCommandHelper.getGlobalCommands());
final var options = new ArgsOptions(args, this, "Merge multiple accession assignments");
options.setVersion(ProgramProperties.getProgramVersion());
options.setLicense("Copyright (C) 2024. This program comes with ABSOLUTELY NO WARRANTY.");
options.setAuthors("Daniel H. Huson");
final var inputFile = options.getOptionMandatory("-i", "in", "Input file, each line containing a cluster accession followed by member accessions (stdin, .gz ok)", "");
var outputFile = options.getOption("-o", "out", "Output file, each line containing first accession and merged assignments (stdout or .gz ok)", "stdout");
var outputScript = options.getOption("-s", "script", "Script to be processed by sqlite3", "stdout");
final var mapDBFile = options.getOptionMandatory("-mdb", "mapDB", "MEGAN mapping db (file megan-map.db)", "");
var cNames = options.getOption("-c", "classifications", "Classifications to assign (ALL or list of names)", new String[]{"ALL"});
final var propertiesFile = options.getOption("-P", "propertiesFile", "Properties file", Megan6.getDefaultPropertiesFile());
options.comment("Advanced");
final var linesPerCall = options.getOption("-lpc", "linesPerCall", "Lines to process per call", 100);
final var accessionsPerQuery = options.getOption("-apc", "accessionsPerQuery", "Maximum number of accessions per SQLITE query", 10000);
options.done();
MeganProperties.initializeProperties(propertiesFile);
FileUtils.checkFileReadableNonEmpty(inputFile);
FileUtils.checkFileReadableNonEmpty(mapDBFile);
FileUtils.checkAllFilesDifferent(inputFile, outputFile, mapDBFile);
ClassificationManager.setMeganMapDBFile(mapDBFile);
var database = new AccessAccessionMappingDatabase(mapDBFile);
var supportedCNames = database.getClassificationNames().stream().filter(name -> ClassificationManager.getAllSupportedClassifications().contains(name)).collect(Collectors.toList());
if (StringUtils.containsIgnoreCase(cNames, "all")) {
if (cNames.length != 1)
throw new UsageException("--classifications: 'ALL' must be only value");
cNames = supportedCNames.toArray(new String[0]);
} else {
for (var name : cNames) {
if (!supportedCNames.contains(name))
throw new UsageException("--classifications: " + name + " not supported, must be one of: " + StringUtils.toString(supportedCNames, ", "));
}
}
if (cNames.length == 0) {
throw new UsageException("--classifications: must specify at least one, or ALL");
}
System.err.println("Classifications: " + StringUtils.toString(cNames, ", "));
final var idParsers = new IdParser[cNames.length];
for (var i = 0; i < cNames.length; i++) {
final var cName = cNames[i];
var idMapper = ClassificationManager.get(cName, true).getIdMapper();
idParsers[i] = idMapper.createIdParser();
if (cNames[i].equals(Classification.Taxonomy) || cNames[i].equals("GTDB"))
idParsers[i].setAlgorithm(IdParser.Algorithm.LCA);
else
idParsers[i].setAlgorithm(IdParser.Algorithm.Majority);
}
final var workingData = new WorkingData(accessionsPerQuery);
var sizes = new long[cNames.length];
try (var it = new FileLineIterator(inputFile, true);
var w = new BufferedWriter(FileUtils.getOutputWriterPossiblyZIPorGZIP(outputFile));) {
System.err.println("Writing file: " + outputFile);
w.write("#Accession\t" + StringUtils.toString(cNames, "\t") + "\n");
var rowCount = 0;
final var accessionRows = new String[linesPerCall][];
{
var prevAccession = "";
var prevVersion = 1;
var collectedTokens = new ArrayList<String>();
while (it.hasNext()) {
final var line = it.next().trim();
var tokens = line.split("\\s");
if (tokens.length > 0) {
var accession = getAccession(tokens[0]);
var version = getVersion(tokens[0]);
if (accession.equals(prevAccession)) { // same as previous
if (version != prevVersion) {
collectedTokens.clear(); // is other version, clear
}
for (var i = 1; i < tokens.length; i++) { // copy accessions
collectedTokens.add(tokens[i].replaceAll("\\.[0-9]*$", ""));
}
} else { // not same as previous
if (!prevAccession.isBlank()) { // if previous set, flush
var row = new String[collectedTokens.size() + 1];
row[0] = prevAccession;
var i = 1;
for (var acc : collectedTokens) {
row[i++] = acc;
}
accessionRows[rowCount++] = row;
if (rowCount >= linesPerCall) { // time to process what we have
writeOutput(w, database, idParsers, accessionRows, rowCount, cNames, workingData, sizes);
rowCount = 0;
}
}
// copy to previous
prevAccession = accession;
prevVersion = version;
collectedTokens.clear();
for (var i = 1; i < tokens.length; i++) {
collectedTokens.add(tokens[i].replaceAll("\\.[0-9]*$", ""));
}
}
}
}
if (!prevAccession.isBlank()) { // if previous set, flush
var nextRow = new String[collectedTokens.size() + 1];
nextRow[0] = prevAccession;
var i = 1;
for (var acc : collectedTokens) {
nextRow[i++] = acc;
}
accessionRows[rowCount++] = nextRow;
}
if (rowCount > 0) {
writeOutput(w, database, idParsers, accessionRows, rowCount, cNames, workingData, sizes);
}
}
/*
if (false) { // this is the old code
while (it.hasNext()) {
final var line = it.next().trim();
final var nextRow = Arrays.stream(line.split("\\s+")).map(s -> s.replaceAll("\\.[0-9]*$", "")).toArray(String[]::new);
if (nextRow.length > 0) {
accessionRows[rowCount++] = nextRow;
if (rowCount >= linesPerCall) { // time to process what we have
writeOutput(w, database, idParsers, accessionRows, rowCount, cNames, workingData,sizes);
rowCount = 0;
}
}
}
if (rowCount > 0) {
writeOutput(w, database, idParsers, accessionRows, rowCount, cNames, workingData,sizes);
}
}
*/
}
try (var w = FileUtils.getOutputWriterPossiblyZIPorGZIP(outputScript)) {
w.write(getSQLITECommands(cNames, getInfo(cNames, database), sizes, outputFile));
}
}
private static void writeOutput(final BufferedWriter w, final AccessAccessionMappingDatabase database, final IdParser[] idParsers,
final String[][] accessionRows, final int rowCount, final String[] cNames, WorkingData workingData, long[] counts) throws SQLException, IOException {
final int[][] accessionClassesMap;
// compute mapping of accessions to their classes in different classifications
{
final var accessions = workingData.accessionsCleared();
for (var r = 0; r < rowCount; r++) {
Collections.addAll(accessions, accessionRows[r]);
}
var totalAccessions = accessions.size();
accessionClassesMap = new int[totalAccessions][];
for (var start = 0; start < totalAccessions; start += workingData.maxQuerySize()) {
var end = Math.min(totalAccessions, start + workingData.maxQuerySize());
var subAccessions = accessions.subList(start, end).toArray(new String[0]);
var subAccessionClassesTable = database.getValues(subAccessions, subAccessions.length, cNames);
System.arraycopy(subAccessionClassesTable, 0, accessionClassesMap, start, subAccessions.length);
}
}
if (false) {
System.err.println("Accession\t" + StringUtils.toString(cNames, "\t"));
var count = 0;
for (var r = 0; r < rowCount; r++) {
var row = accessionRows[r];
for (var accession : row) {
System.err.println(accession + ": " + StringUtils.toString(accessionClassesMap[count++]));
}
}
}
// for each row of accessions, compute the resulting class ids and write out 'first-accession to classes' table:
{
final var classIds = workingData.classIdsCleared();
var accessionNumber = 0; // number of accession in flat list
for (var r = 0; r < rowCount; r++) {
final var row = accessionRows[r];
final var firstAccessionInRow = row[0];
w.write(firstAccessionInRow);
for (var c = 0; c < cNames.length; c++) {
classIds.clear();
for (var posInRow = 0; posInRow < row.length; posInRow++) {
var id = accessionClassesMap[accessionNumber + posInRow][c];
if (id != 0) {
classIds.add(id);
}
// if(id<0) System.err.println(id);
}
var id = idParsers[c].processMultipleIds(classIds);
if (id != 0) {
w.write("\t" + id);
counts[c]++;
} else {
w.write("\t");
}
}
w.write("\n");
accessionNumber += row.length;
}
}
}
private static String getAccession(String accessionAndVersion) {
var pos = accessionAndVersion.indexOf(".");
return (pos == -1 ? accessionAndVersion : accessionAndVersion.substring(0, pos));
}
private static int getVersion(String accessionAndVersion) {
var pos = accessionAndVersion.indexOf(".");
return pos == -1 || pos == accessionAndVersion.length() - 1 ? 1 : NumberUtils.parseInt(accessionAndVersion.substring(pos + 1));
}
/**
* some tmp data structures that we recycle
*/
private record WorkingData(int maxQuerySize, ArrayList<Integer> classIds, ArrayList<String> accessions) {
public WorkingData(int maxQuerySize) {
this(maxQuerySize, new ArrayList<>(), new ArrayList<>());
}
public ArrayList<Integer> classIdsCleared() {
classIds.clear();
return classIds();
}
public ArrayList<String> accessionsCleared() {
accessions.clear();
return accessions();
}
}
public static String getSQLITECommands(String[] cNames, String[] info, long[] size, String dataFile) {
var buf = new StringBuilder();
buf.append("CREATE TABLE info(id TEXT PRIMARY KEY, info_string TEXT, size NUMERIC );\n");
for (var i = 0; i < cNames.length; i++) {
buf.append("INSERT INTO info (id, info_string, size) VALUES ('%s', '%s', '%d';\n".formatted(cNames[i], info[i], size[i]));
}
buf.append("CREATE TABLE mappings (Accession PRIMARY KEY\n");
for (var name : cNames) {
buf.append(", ").append(name.toUpperCase()).append(" INT");
}
buf.append(") WITHOUT ROWID;\n");
if (!dataFile.equals("stdout") && FileUtils.isZIPorGZIPFile(dataFile)) {
buf.append(".mode tabs\n");
buf.append(".import data.tab %s%n".formatted(dataFile));
}
return buf.toString();
}
/**
* gets the info strings from classifications
*
* @param cNames
* @param database
* @return info
*/
private String[] getInfo(String[] cNames, AccessAccessionMappingDatabase database) {
var info = new String[cNames.length];
for (var i = 0; i < cNames.length; i++) {
try {
info[i] = database.getInfo(cNames[i]);
} catch (SQLException ignored) {
info[i] = "?";
}
}
return info;
}
} | 16,045 | Java | .java | husonlab/megan-ce | 62 | 21 | 18 | 2016-05-09T10:55:38Z | 2024-02-22T23:23:42Z |
ReferencesAnnotator.java | /FileExtraction/Java_unseen/husonlab_megan-ce/src/megan/tools/utils/ReferencesAnnotator.java | /*
* ReferencesAnnotator.java Copyright (C) 2024 Daniel H. Huson
*
* (Some files contain contributions from other authors, who are then mentioned separately.)
*
* This program is free software: you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
package megan.tools.utils;
import jloda.seq.FastAFileIterator;
import jloda.swing.commands.CommandManager;
import jloda.swing.util.ArgsOptions;
import jloda.swing.util.ResourceManager;
import jloda.util.*;
import jloda.util.progress.ProgressPercentage;
import megan.accessiondb.AccessAccessionMappingDatabase;
import megan.classification.Classification;
import megan.classification.ClassificationManager;
import megan.classification.IdMapper;
import megan.classification.IdParser;
import megan.classification.data.ClassificationCommandHelper;
import megan.main.MeganProperties;
import java.io.BufferedOutputStream;
import java.io.File;
import java.io.IOException;
import java.util.ArrayList;
import java.util.HashMap;
/**
* annotate a file of reference sequences
* Daniel Huson, 3.2016
*/
public class ReferencesAnnotator {
/**
* annotate a file of reference sequences
*/
public static void main(String[] args) {
try {
ResourceManager.insertResourceRoot(megan.resources.Resources.class);
ProgramProperties.setProgramName("ReferencesAnnotator");
ProgramProperties.setProgramVersion(megan.main.Version.SHORT_DESCRIPTION);
PeakMemoryUsageMonitor.start();
(new ReferencesAnnotator()).run(args);
System.err.println("Total time: " + PeakMemoryUsageMonitor.getSecondsSinceStartString());
System.err.println("Peak memory: " + PeakMemoryUsageMonitor.getPeakUsageString());
System.exit(0);
} catch (Exception ex) {
Basic.caught(ex);
System.exit(1);
}
}
/**
* run
*
*/
private void run(String[] args) throws UsageException, IOException, CanceledException {
CommandManager.getGlobalCommands().addAll(ClassificationCommandHelper.getGlobalCommands());
final ArgsOptions options = new ArgsOptions(args, this, "Annotates reference sequences");
options.setVersion(ProgramProperties.getProgramVersion());
options.setLicense("Copyright (C) 2024. This program comes with ABSOLUTELY NO WARRANTY.");
options.setAuthors("Daniel H. Huson");
options.comment("Input and output");
final var inputFile = options.getOptionMandatory("-i", "in", "Input references file (stdin, .gz ok)", "");
var outputFile = options.getOptionMandatory("-o", "out", "Output file (stdout or .gz ok)", "");
options.comment("Classification support:");
final var parseTaxonNames = options.getOption("-tn", "parseTaxonNames", "Parse taxon names", true);
final var mapDBFile = options.getOption("-mdb", "mapDB", "MEGAN mapping db (file megan-map.db)", "");
final var acc2TaxaFile = options.getOption("-a2t", "acc2taxa", "Accession-to-Taxonomy mapping file", "");
final var synonyms2TaxaFile = options.getOption("-s2t", "syn2taxa", "Synonyms-to-Taxonomy mapping file", "");
final var class2AccessionFile = new HashMap<String, String>();
final var class2SynonymsFile = new HashMap<String, String>();
for (var cName : ClassificationManager.getAllSupportedClassificationsExcludingNCBITaxonomy()) {
class2AccessionFile.put(cName, options.getOption("-a2" + cName.toLowerCase(), "acc2" + cName.toLowerCase(), "Accession-to-" + cName + " mapping file", ""));
class2SynonymsFile.put(cName, options.getOption("-s2" + cName.toLowerCase(), "syn2" + cName.toLowerCase(), "Synonyms-to-" + cName + " mapping file", ""));
final var tags = options.getOption("-t4" + cName.toLowerCase(), "tags4" + cName.toLowerCase(), "Tags for " + cName + " id parsing (must set to activate id parsing)", "").trim();
if (tags.length() > 0)
ProgramProperties.put(cName + "Tags", tags);
ProgramProperties.put(cName + "ParseIds", tags.length() > 0);
}
options.comment(ArgsOptions.OTHER);
ProgramProperties.put(IdParser.PROPERTIES_FIRST_WORD_IS_ACCESSION, options.getOption("-fwa", "firstWordIsAccession", "First word in reference header is accession number", ProgramProperties.get(IdParser.PROPERTIES_FIRST_WORD_IS_ACCESSION, true)));
ProgramProperties.put(IdParser.PROPERTIES_ACCESSION_TAGS, options.getOption("-atags", "accessionTags", "List of accession tags", ProgramProperties.get(IdParser.PROPERTIES_ACCESSION_TAGS, IdParser.ACCESSION_TAGS)));
final var propertiesFile = options.getOption("-P", "propertiesFile", "Properties file",megan.main.Megan6.getDefaultPropertiesFile());
options.done();
MeganProperties.initializeProperties(propertiesFile);
FileUtils.checkFileReadableNonEmpty(inputFile);
final var mapDBClassifications = AccessAccessionMappingDatabase.getContainedClassificationsIfDBExists(mapDBFile);
if (mapDBClassifications.size() > 0 && (StringUtils.hasPositiveLengthValue(class2AccessionFile) || StringUtils.hasPositiveLengthValue(class2SynonymsFile)))
throw new UsageException("Illegal to use both --mapDB and ---acc2... or --syn2... options");
final var cNames = new ArrayList<String>();
for (var cName : ClassificationManager.getAllSupportedClassificationsExcludingNCBITaxonomy()) {
if (mapDBClassifications.contains(cName) || class2AccessionFile.get(cName).length() > 0 || class2SynonymsFile.get(cName).length() > 0)
cNames.add(cName);
}
if (cNames.size() > 0)
System.err.println("Functional classifications to use: " + StringUtils.toString(cNames, ", "));
final var idMappers = new IdMapper[cNames.size() + 1];
final var taxonIdMapper = ClassificationManager.get(Classification.Taxonomy, true).getIdMapper();
{
// Load all mapping files:
ClassificationManager.get(Classification.Taxonomy, true);
taxonIdMapper.setUseTextParsing(parseTaxonNames);
if (mapDBFile.length() > 0) {
taxonIdMapper.loadMappingFile(mapDBFile, IdMapper.MapType.MeganMapDB, false, new ProgressPercentage());
}
if (acc2TaxaFile.length() > 0) {
taxonIdMapper.loadMappingFile(acc2TaxaFile, IdMapper.MapType.Accession, false, new ProgressPercentage());
}
if (synonyms2TaxaFile.length() > 0) {
taxonIdMapper.loadMappingFile(synonyms2TaxaFile, IdMapper.MapType.Synonyms, false, new ProgressPercentage());
}
idMappers[idMappers.length - 1] = taxonIdMapper;
}
for (var i = 0; i < cNames.size(); i++) {
final var cName = cNames.get(i);
idMappers[i] = ClassificationManager.get(cName, true).getIdMapper();
if (mapDBClassifications.contains(cName))
idMappers[i].loadMappingFile(mapDBFile, IdMapper.MapType.MeganMapDB, false, new ProgressPercentage());
if (class2AccessionFile.get(cName).length() > 0)
idMappers[i].loadMappingFile(class2AccessionFile.get(cName), IdMapper.MapType.Accession, false, new ProgressPercentage());
if (class2SynonymsFile.get(cName).length() > 0)
idMappers[i].loadMappingFile(class2SynonymsFile.get(cName), IdMapper.MapType.Synonyms, false, new ProgressPercentage());
}
final var idParsers = new IdParser[idMappers.length];
for (var i = 0; i < idMappers.length; i++) {
idParsers[i] = new IdParser(idMappers[i]);
}
final var counts = new int[idMappers.length];
try (var it = new FastAFileIterator(inputFile);
var outs = new BufferedOutputStream(FileUtils.getOutputStreamPossiblyZIPorGZIP(outputFile));
var progress = new ProgressPercentage("Reading file: " + inputFile, it.getMaximumProgress())) {
System.err.println("Writing file: " + outputFile);
while (it.hasNext()) {
final var pair = it.next();
final var header = new StringBuilder();
final var firstWord = StringUtils.getFirstWord(pair.getFirst());
header.append(firstWord);
var first = true;
for (var i = 0; i < idParsers.length; i++) {
var id = idParsers[i].getIdFromHeaderLine(pair.getFirst());
if (id != 0) {
if (first) {
if (!firstWord.endsWith("|"))
header.append("|");
first = false;
} else
header.append("|");
header.append(String.format("%s%d", Classification.createShortTag(idMappers[i].getCName()), id));
counts[i]++;
}
progress.setProgress(it.getProgress());
}
outs.write(header.toString().getBytes());
outs.write('\n');
outs.write(pair.getSecond().getBytes());
outs.write('\n');
}
}
// report classification sizes:
for (var i = 0; i < idMappers.length; i++) {
System.err.printf("Class. %-13s%,10d%n", idMappers[i].getCName() + ":", counts[i]);
}
}
}
| 10,035 | Java | .java | husonlab/megan-ce | 62 | 21 | 18 | 2016-05-09T10:55:38Z | 2024-02-22T23:23:42Z |
IAssignmentAlgorithmCreator.java | /FileExtraction/Java_unseen/husonlab_megan-ce/src/megan/algorithms/IAssignmentAlgorithmCreator.java | /*
* IAssignmentAlgorithmCreator.java Copyright (C) 2024 Daniel H. Huson
*
* (Some files contain contributions from other authors, who are then mentioned separately.)
*
* This program is free software: you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
package megan.algorithms;
/**
* Assignment algorithm creator
* Daniel Huson, 1.2016
*/
interface IAssignmentAlgorithmCreator {
/**
* create a new assignment algorithm object
*/
IAssignmentAlgorithm createAssignmentAlgorithm();
}
| 1,075 | Java | .java | husonlab/megan-ce | 62 | 21 | 18 | 2016-05-09T10:55:38Z | 2024-02-22T23:23:42Z |
DataProcessor.java | /FileExtraction/Java_unseen/husonlab_megan-ce/src/megan/algorithms/DataProcessor.java | /*
* DataProcessor.java Copyright (C) 2024 Daniel H. Huson
*
* (Some files contain contributions from other authors, who are then mentioned separately.)
*
* This program is free software: you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
package megan.algorithms;
import jloda.swing.util.ProgramProperties;
import jloda.swing.window.NotificationsInSwing;
import jloda.util.Basic;
import jloda.util.CanceledException;
import jloda.util.interval.Interval;
import jloda.util.interval.IntervalTree;
import jloda.util.progress.ProgressListener;
import jloda.util.progress.ProgressPercentage;
import megan.classification.Classification;
import megan.classification.ClassificationManager;
import megan.classification.IdMapper;
import megan.core.ContaminantManager;
import megan.core.Document;
import megan.core.ReadAssignmentCalculator;
import megan.core.SyncArchiveAndDataTable;
import megan.data.*;
import megan.io.InputOutputReaderWriter;
import megan.main.MeganProperties;
import megan.rma6.RMA6File;
import megan.rma6.ReadBlockRMA6;
import java.io.IOException;
import java.util.*;
/**
* Analyzes all reads in a sample
* Daniel Huson, 1.2009, 3.2016
*/
public class DataProcessor {
/**
* process a dataset
*
*/
public static int apply(final Document doc) {
final ProgressListener progress = doc.getProgressListener();
try {
progress.setTasks("Binning reads", "Initializing...");
System.err.println("Initializing binning...");
if (doc.isUseIdentityFilter()) {
System.err.println("Using rank-specific min percent-identity values for taxonomic assignment of 16S reads");
}
final ContaminantManager contaminantManager;
if (doc.isUseContaminantFilter() && doc.getDataTable().hasContaminants()) {
contaminantManager = new ContaminantManager();
contaminantManager.parseTaxonIdsString(doc.getDataTable().getContaminants());
System.err.printf("Using contaminants profile: %,d input, %,d total%n", contaminantManager.inputSize(), contaminantManager.size());
} else
contaminantManager = null;
final int numberOfClassifications = doc.getActiveViewers().size();
final String[] cNames = doc.getActiveViewers().toArray(new String[numberOfClassifications]);
final boolean[] useLCAForClassification = new boolean[numberOfClassifications];
for (int c = 0; c < numberOfClassifications; c++) {
ClassificationManager.ensureTreeIsLoaded(cNames[c]);
if (Arrays.asList(ProgramProperties.get(MeganProperties.TAXONOMIC_CLASSIFICATIONS, new String[]{"Taxonomy", "GTDB"})).contains(cNames[c]))
useLCAForClassification[c] = true;
}
final var updateList = new UpdateItemList(numberOfClassifications);
final var doMatePairs = doc.isPairedReads() && doc.getMeganFile().isRMA6File();
if (doc.isPairedReads() && !doc.getMeganFile().isRMA6File())
System.err.println("WARNING: Not an RMA6 file, will ignore paired read information");
if (doMatePairs)
System.err.println("Using paired reads in taxonomic assignment...");
// step 0: set up classification algorithms
final double minPercentReadToCover = doc.getMinPercentReadToCover();
int numberOfReadsFailedCoveredThreshold = 0;
final IntervalTree<Object> intervals;
if (minPercentReadToCover > 0 && doc.isLongReads() || doc.getReadAssignmentMode() == Document.ReadAssignmentMode.alignedBases)
intervals = new IntervalTree<>();
else
intervals = null;
if (minPercentReadToCover > 0)
System.err.printf("Minimum percentage of read to be covered: %.1f%%%n", minPercentReadToCover);
final boolean usingLongReadAlgorithm = (doc.getLcaAlgorithm() == Document.LCAAlgorithm.longReads);
int ncbiTaxonomyId = -1;
final IAssignmentAlgorithmCreator[] assignmentAlgorithmCreators = new IAssignmentAlgorithmCreator[numberOfClassifications];
for (int c = 0; c < numberOfClassifications; c++) {
if (cNames[c].equals(Classification.Taxonomy))
ncbiTaxonomyId = c;
if (useLCAForClassification[c]) {
switch (doc.getLcaAlgorithm()) {
case naive -> assignmentAlgorithmCreators[c] = new AssignmentUsingLCACreator(cNames[c], doc.isUseIdentityFilter(), doc.getLcaCoveragePercent());
case weighted ->
// we are assuming that taxonomy classification is The taxonomy classification
assignmentAlgorithmCreators[c] = new AssignmentUsingWeightedLCACreator(cNames[c], doc, doc.isUseIdentityFilter(), doc.getLcaCoveragePercent());
case longReads -> assignmentAlgorithmCreators[c] = new AssignmentUsingIntervalUnionLCACreator(cNames[c], doc);
}
} else if (usingLongReadAlgorithm)
assignmentAlgorithmCreators[c] = new AssignmentUsingMultiGeneBestHitCreator(cNames[c], doc.getMeganFile().getFileName());
else
assignmentAlgorithmCreators[c] = new AssignmentUsingBestHitCreator(cNames[c], doc.getMeganFile().getFileName());
}
final ReferenceCoverFilter referenceCoverFilter;
if (doc.getMinPercentReferenceToCover() > 0) {
referenceCoverFilter = new ReferenceCoverFilter(doc.getMinPercentReferenceToCover());
referenceCoverFilter.compute(doc.getProgressListener(), doc.getConnector(), doc.getMinScore(), doc.getTopPercent(), doc.getMaxExpected(), doc.getMinPercentIdentity());
} else
referenceCoverFilter = null;
// step 1: stream through reads and assign classes
long numberOfReadsFound = 0;
double totalWeight = 0;
long numberOfMatches = 0;
long numberOfReadsWithLowComplexity = 0;
long numberOfReadsTooShort=0;
long numberOfReadsWithHits = 0;
long numberAssignedViaMatePair = 0;
final int[] countUnassigned = new int[numberOfClassifications];
final int[] countAssigned = new int[numberOfClassifications];
final IAssignmentAlgorithm[] assignmentAlgorithm = new IAssignmentAlgorithm[numberOfClassifications];
for (int c = 0; c < numberOfClassifications; c++)
assignmentAlgorithm[c] = assignmentAlgorithmCreators[c].createAssignmentAlgorithm();
final Set<Integer>[] knownIds = new HashSet[numberOfClassifications];
for (int c = 0; c < numberOfClassifications; c++) {
knownIds[c] = new HashSet<>();
knownIds[c].addAll(ClassificationManager.get(cNames[c], true).getName2IdMap().getIds());
}
final IConnector connector = doc.getConnector();
final InputOutputReaderWriter mateReader = doMatePairs ? new InputOutputReaderWriter(doc.getMeganFile().getFileName(), "r") : null;
final float topPercentForActiveMatchFiltering;
if (usingLongReadAlgorithm) {
topPercentForActiveMatchFiltering = 0;
} else
topPercentForActiveMatchFiltering = doc.getTopPercent();
final int[] classIds = new int[numberOfClassifications];
final ArrayList<int[]>[] moreClassIds;
final float[] multiGeneWeights;
if (usingLongReadAlgorithm) {
moreClassIds = new ArrayList[numberOfClassifications];
for (int c = 0; c < numberOfClassifications; c++)
moreClassIds[c] = new ArrayList<>();
multiGeneWeights = new float[numberOfClassifications];
} else {
moreClassIds = null;
multiGeneWeights = null;
}
final ReadAssignmentCalculator readAssignmentCalculator = new ReadAssignmentCalculator(doc.getReadAssignmentMode());
System.err.println("Binning reads...");
progress.setTasks("Binning reads", "Analyzing alignments");
try (final IReadBlockIterator it = connector.getAllReadsIterator(0, 10, false, true)) {
progress.setMaximum(it.getMaximumProgress());
progress.setProgress(0);
final ReadBlockRMA6 mateReadBlock;
if (doMatePairs) {
try (RMA6File RMA6File = new RMA6File(doc.getMeganFile().getFileName(), "r")) {
final String[] matchClassificationNames = RMA6File.getHeaderSectionRMA6().getMatchClassNames();
mateReadBlock = new ReadBlockRMA6(doc.getBlastMode(), true, matchClassificationNames);
}
} else
mateReadBlock = null;
while (it.hasNext()) {
progress.setProgress(it.getProgress());
// clean up previous values
for (int c = 0; c < numberOfClassifications; c++) {
classIds[c] = 0;
if (usingLongReadAlgorithm) {
moreClassIds[c].clear();
multiGeneWeights[c] = 0;
}
}
final IReadBlock readBlock = it.next();
if (readBlock.getNumberOfAvailableMatchBlocks() > 0)
numberOfReadsWithHits += readBlock.getReadWeight();
readBlock.setReadWeight(readAssignmentCalculator.compute(readBlock, intervals));
numberOfReadsFound++;
totalWeight += readBlock.getReadWeight();
numberOfMatches += readBlock.getNumberOfMatches();
final boolean tooShort=readBlock.getReadLength()>0 && readBlock.getReadLength()<doc.getMinReadLength();
if(tooShort)
numberOfReadsTooShort+=readBlock.getReadWeight();
final boolean hasLowComplexity = readBlock.getComplexity() > 0 && readBlock.getComplexity() + 0.01 < doc.getMinComplexity();
if (hasLowComplexity)
numberOfReadsWithLowComplexity += readBlock.getReadWeight();
int taxId = 0;
if (!tooShort && !hasLowComplexity) {
for (int c = 0; c < numberOfClassifications; c++) {
classIds[c] = 0;
if (useLCAForClassification[c]) {
final BitSet activeMatchesForTaxa = new BitSet(); // pre filter matches for taxon identification
ActiveMatches.compute(doc.getMinScore(), topPercentForActiveMatchFiltering, doc.getMaxExpected(), doc.getMinPercentIdentity(), readBlock, cNames[c], activeMatchesForTaxa);
if (referenceCoverFilter != null)
referenceCoverFilter.applyFilter(readBlock, activeMatchesForTaxa);
if (minPercentReadToCover == 0 || ensureCovered(minPercentReadToCover, readBlock, activeMatchesForTaxa, intervals)) {
if (doMatePairs && readBlock.getMateUId() > 0) {
mateReader.seek(readBlock.getMateUId());
mateReadBlock.read(mateReader, false, true, doc.getMinScore(), doc.getMaxExpected());
classIds[c] = assignmentAlgorithm[c].computeId(activeMatchesForTaxa, readBlock);
final BitSet activeMatchesForMateTaxa = new BitSet(); // pre filter matches for mate-based taxon identification
ActiveMatches.compute(doc.getMinScore(), topPercentForActiveMatchFiltering, doc.getMaxExpected(), doc.getMinPercentIdentity(), mateReadBlock, cNames[c], activeMatchesForMateTaxa);
if (referenceCoverFilter != null)
referenceCoverFilter.applyFilter(readBlock, activeMatchesForMateTaxa);
int mateTaxId = assignmentAlgorithm[c].computeId(activeMatchesForMateTaxa, mateReadBlock);
if (mateTaxId > 0) {
if (classIds[c] <= 0) {
classIds[c] = mateTaxId;
if (c == ncbiTaxonomyId)
numberAssignedViaMatePair++;
} else {
int bothId = assignmentAlgorithm[c].getLCA(classIds[c], mateTaxId);
if (bothId == classIds[c])
classIds[c] = mateTaxId;
// else if(bothId==taxId) taxId=taxId; // i.e, no change
else if (bothId != mateTaxId)
classIds[c] = bothId;
}
}
} else {
classIds[c] = assignmentAlgorithm[c].computeId(activeMatchesForTaxa, readBlock);
}
}
if (c == ncbiTaxonomyId) {
if (contaminantManager != null && ((doc.isLongReads() && contaminantManager.isContaminantLongRead(classIds[c]))
|| (!doc.isLongReads() && contaminantManager.isContaminantShortRead(readBlock, activeMatchesForTaxa))))
classIds[c] = IdMapper.CONTAMINANTS_ID;
}
}
if (c == ncbiTaxonomyId) {
taxId = classIds[c];
}
}
} // end !lowComplexity
for (int c = 0; c < numberOfClassifications; c++) {
int id;
if (taxId == IdMapper.CONTAMINANTS_ID) {
id = IdMapper.CONTAMINANTS_ID;
} else if (hasLowComplexity) {
id = IdMapper.LOW_COMPLEXITY_ID;
}
else if (tooShort) {
id = IdMapper.UNASSIGNED_ID;
} else if (useLCAForClassification[c]) {
id = classIds[c];
} else {
final BitSet activeMatchesForFunction = new BitSet(); // pre filter matches for taxon identification
ActiveMatches.compute(doc.getMinScore(), topPercentForActiveMatchFiltering, doc.getMaxExpected(), doc.getMinPercentIdentity(), readBlock, cNames[c], activeMatchesForFunction);
if (referenceCoverFilter != null)
referenceCoverFilter.applyFilter(readBlock, activeMatchesForFunction);
id = assignmentAlgorithm[c].computeId(activeMatchesForFunction, readBlock);
if (id > 0 && usingLongReadAlgorithm && assignmentAlgorithm[c] instanceof IMultiAssignmentAlgorithm) {
int numberOfSegments = ((IMultiAssignmentAlgorithm) assignmentAlgorithm[c]).getAdditionalClassIds(c, numberOfClassifications, moreClassIds[c]);
multiGeneWeights[c] = (numberOfSegments > 0 ? (float) readBlock.getReadWeight() / (float) numberOfSegments : 0);
}
}
if (id <= 0 && readBlock.getNumberOfAvailableMatchBlocks() == 0)
id = IdMapper.NOHITS_ID;
else if (!knownIds[c].contains(id) && (!usingLongReadAlgorithm || !nonEmptyIntersection(knownIds[c], c, moreClassIds[c])))
id = IdMapper.UNASSIGNED_ID;
classIds[c] = id;
if (id == IdMapper.UNASSIGNED_ID)
countUnassigned[c]++;
else if (id > 0)
countAssigned[c]++;
}
updateList.addItem(readBlock.getUId(), readBlock.getReadWeight(), classIds);
if (usingLongReadAlgorithm) {
for (int c = 0; c < numberOfClassifications; c++) {
for (int[] classId : moreClassIds[c]) {
updateList.addItem(readBlock.getUId(), multiGeneWeights[c], classId);
}
}
}
}
} catch (Exception ex) {
Basic.caught(ex);
} finally {
if (mateReader != null)
mateReader.close();
}
if (progress.isUserCancelled())
throw new CanceledException();
progress.reportTaskCompleted();
System.err.printf("Total reads: %,15d%n", numberOfReadsFound);
if (totalWeight > numberOfReadsFound)
System.err.printf("Total weight: %,15d%n", (long) totalWeight);
if (numberOfReadsWithLowComplexity > 0)
System.err.printf("Low complexity:%,15d%n", numberOfReadsWithLowComplexity);
if (numberOfReadsTooShort > 0)
System.err.printf("Reads too short:%,15d%n", numberOfReadsTooShort);
if (numberOfReadsFailedCoveredThreshold > 0)
System.err.printf("Low covered: %,15d%n", numberOfReadsFailedCoveredThreshold);
System.err.printf("With hits: %,15d %n", numberOfReadsWithHits);
System.err.printf("Alignments: %,15d%n", numberOfMatches);
for (int c = 0; c < numberOfClassifications; c++) {
System.err.printf("%-19s%,11d%n", "Assig. " + cNames[c] + ":", countAssigned[c]);
}
// if used mate pairs, report here:
if (numberAssignedViaMatePair > 0) {
System.err.printf("Tax. ass. by mate:%,12d%n", numberAssignedViaMatePair);
}
progress.setCancelable(false); // can't cancel beyond here because file could be left in undefined state
doc.setNumberReads(numberOfReadsFound);
// If min support percentage is set, set the min support:
if (doc.getMinSupportPercent() > 0) {
doc.setMinSupport((int) Math.max(1, (doc.getMinSupportPercent() / 100.0) * (totalWeight)));
System.err.println("MinSupport set to: " + doc.getMinSupport());
}
// 2. apply min support and disabled taxa filter
for (int c = 0; c < numberOfClassifications; c++) {
final String cName = cNames[c];
// todo: need to remove assignments to disabled ids when not using the LCA algorithm
if (useLCAForClassification[c] && countAssigned[c] > 0 && (doc.getMinSupport() > 0 || ClassificationManager.get(cName, false).getIdMapper().getDisabledIds().size() > 0)) {
progress.setTasks("Binning reads", "Applying min-support & disabled filter to " + cName + "...");
final MinSupportFilter minSupportFilter = new MinSupportFilter(cName, updateList.getClassIdToWeightMap(c), doc.getMinSupport(), progress);
final Map<Integer, Integer> changes = minSupportFilter.apply();
for (Integer srcId : changes.keySet()) {
updateList.appendClass(c, srcId, changes.get(srcId));
}
System.err.printf("Min-supp. changes:%,12d%n", changes.size());
}
}
// 3. save classifications
progress.setTasks("Binning reads", "Writing classification tables");
connector.updateClassifications(cNames, updateList, progress);
connector.setNumberOfReads((int) doc.getNumberOfReads());
// 4. sync
progress.setTasks("Binning reads", "Syncing");
SyncArchiveAndDataTable.syncRecomputedArchive2Summary(doc.getReadAssignmentMode(), doc.getTitle(), "LCA", doc.getBlastMode(), doc.getParameterString(), connector, doc.getDataTable(), (int) doc.getAdditionalReads());
if (progress instanceof ProgressPercentage)
progress.reportTaskCompleted();
// MeganProperties.addRecentFile(new File(doc.getMeganFile().getFileName()));
doc.setDirty(false);
// report classification sizes:
for (String cName : cNames) {
System.err.printf("Class. %-13s%,10d%n", cName + ":", connector.getClassificationSize(cName));
}
return (int) doc.getDataTable().getTotalReads();
} catch (IOException ex) {
Basic.caught(ex);
NotificationsInSwing.showInternalError("Data Processor failed: " + ex.getMessage());
}
return 0;
}
/**
* is one of the class ids known?
*
*/
private static boolean nonEmptyIntersection(Set<Integer> knownIds, int classId, ArrayList<int[]> moreClassIds) {
for (int[] array : moreClassIds) {
if (knownIds.contains(array[classId]))
return true;
}
return false;
}
/**
* check that enough of read is covered by alignments
*
* @param minCoveredPercent percent of read that must be covered
* @param intervals this will be non-null in long read mode, in which case we check the total cover, otherwise, we check the amount covered by any one match
* @return true, if sufficient coverage
*/
private static boolean ensureCovered(double minCoveredPercent, IReadBlock readBlock, BitSet activeMatches, IntervalTree<Object> intervals) {
int lengthToCover = (int) (0.01 * minCoveredPercent * readBlock.getReadLength());
if (lengthToCover == 0)
return true;
if (intervals != null)
intervals.clear();
for (int m = activeMatches.nextSetBit(0); m != -1; m = activeMatches.nextSetBit(m + 1)) {
final IMatchBlock matchBlock = readBlock.getMatchBlock(m);
if (Math.abs(matchBlock.getAlignedQueryEnd() - matchBlock.getAlignedQueryStart()) >= lengthToCover)
return true;
if (intervals != null) {
Interval<Object> interval = new Interval<>(matchBlock.getAlignedQueryStart(), matchBlock.getAlignedQueryEnd(), null);
intervals.add(interval);
if (intervals.getCovered() >= lengthToCover)
return true;
}
}
return false;
}
}
| 24,068 | Java | .java | husonlab/megan-ce | 62 | 21 | 18 | 2016-05-09T10:55:38Z | 2024-02-22T23:23:42Z |
TopAssignment.java | /FileExtraction/Java_unseen/husonlab_megan-ce/src/megan/algorithms/TopAssignment.java | /*
* TopAssignment.java Copyright (C) 2024 Daniel H. Huson
*
* (Some files contain contributions from other authors, who are then mentioned separately.)
*
* This program is free software: you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
package megan.algorithms;
import jloda.util.Pair;
import megan.classification.IdMapper;
import megan.data.IMatchBlock;
import megan.data.IReadBlock;
import java.util.*;
/**
* computes the top classification assignments for a read
* Daniel Huson, 5.2012
*/
public class TopAssignment {
/**
* computes the top KEGG assignments for a read
*
* @return top assignments
*/
public static String compute(String classificationName, BitSet activeMatches, IReadBlock readBlock, int ranksToReport) {
if (activeMatches.cardinality() == 0)
return "";
int totalClassMatches = 0;
Map<Integer, Integer> classId2Count = new HashMap<>();
for (int i = activeMatches.nextSetBit(0); i != -1; i = activeMatches.nextSetBit(i + 1)) {
final IMatchBlock matchBlock = readBlock.getMatchBlock(i);
int classId = matchBlock.getId(classificationName);
if (classId > 0) {
Integer count = classId2Count.get(classId);
classId2Count.put(classId, count == null ? 1 : count + 1);
totalClassMatches++;
}
}
if (classId2Count.size() == 0)
return "";
else if (classId2Count.size() == 1) {
final Integer classId = classId2Count.keySet().iterator().next();
final String classificationLetter = classificationName.substring(0, 1);
return String.format(" [1] %s%05d: 100 # %d", classificationLetter, classId, classId2Count.get(classId));
} else {
SortedSet<Pair<Integer, Integer>> sorted = new TreeSet<>((idAndCount1, idAndCount2) -> {
if (idAndCount1.getSecond() > idAndCount2.getSecond())
return -1;
else if (idAndCount1.getSecond() < idAndCount2.getSecond())
return 1;
else
return idAndCount1.getFirst().compareTo(idAndCount2.getFirst());
});
for (Map.Entry<Integer, Integer> entry : classId2Count.entrySet()) {
sorted.add(new Pair<>(entry.getKey(), entry.getValue()));
}
final int top = Math.min(sorted.size(), ranksToReport);
if (top == 0)
return "";
else {
final String classificationLetter = classificationName.substring(0, 1);
int countItems = 0;
StringBuilder buf = new StringBuilder();
for (Pair<Integer, Integer> idAndCount : sorted) {
countItems++;
buf.append(String.format(" [%d] %s%05d: %.1f", countItems, classificationLetter, idAndCount.getFirst(), (100.0 * idAndCount.getSecond()) / totalClassMatches));
if (countItems >= top)
break;
}
buf.append(" # ").append(totalClassMatches);
return buf.toString();
}
}
}
/**
* compute the class id for a read from its matches
* matches
*
* @return id or 0
*/
public static int computeId(String cName, float minScore, float maxExpected, float minPercentIdentity, IReadBlock readBlock) {
if (readBlock.getNumberOfMatches() == 0)
return IdMapper.NOHITS_ID;
for (int i = 0; i < readBlock.getNumberOfAvailableMatchBlocks(); i++) {
IMatchBlock match = readBlock.getMatchBlock(i);
if (match.getBitScore() >= minScore && match.getExpected() <= maxExpected && (minPercentIdentity == 0 || match.getPercentIdentity() >= minPercentIdentity)) {
int id = match.getId(cName);
if (id != 0)
return id;
}
}
return IdMapper.UNASSIGNED_ID;
}
}
| 4,618 | Java | .java | husonlab/megan-ce | 62 | 21 | 18 | 2016-05-09T10:55:38Z | 2024-02-22T23:23:42Z |
AssignmentUsingMultiGeneBestHitCreator.java | /FileExtraction/Java_unseen/husonlab_megan-ce/src/megan/algorithms/AssignmentUsingMultiGeneBestHitCreator.java | /*
* AssignmentUsingMultiGeneBestHitCreator.java Copyright (C) 2024 Daniel H. Huson
*
* (Some files contain contributions from other authors, who are then mentioned separately.)
*
* This program is free software: you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
package megan.algorithms;
/**
* create a best hit assignment algorithm
* Daniel Huson, 3.2016
*/
public class AssignmentUsingMultiGeneBestHitCreator implements IAssignmentAlgorithmCreator {
private final String cName;
/**
* constructor
*
*/
public AssignmentUsingMultiGeneBestHitCreator(String cName, String fileName) {
this.cName = cName;
System.err.println("Using Multi-Gene Best-Hit algorithm for binning: " + cName);
}
/**
* creates an assignment algorithm
*
* @return assignment algorithm
*/
@Override
public IMultiAssignmentAlgorithm createAssignmentAlgorithm() {
return new AssignmentUsingMultiGeneBestHit(cName);
}
}
| 1,554 | Java | .java | husonlab/megan-ce | 62 | 21 | 18 | 2016-05-09T10:55:38Z | 2024-02-22T23:23:42Z |
MinSupportFilter.java | /FileExtraction/Java_unseen/husonlab_megan-ce/src/megan/algorithms/MinSupportFilter.java | /*
* MinSupportFilter.java Copyright (C) 2024 Daniel H. Huson
*
* (Some files contain contributions from other authors, who are then mentioned separately.)
*
* This program is free software: you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
package megan.algorithms;
import jloda.graph.Edge;
import jloda.graph.Node;
import jloda.phylo.PhyloTree;
import jloda.util.CanceledException;
import jloda.util.progress.ProgressListener;
import jloda.util.progress.ProgressPercentage;
import megan.classification.ClassificationManager;
import megan.classification.IdMapper;
import java.util.HashMap;
import java.util.HashSet;
import java.util.Map;
import java.util.Set;
/**
* apply the min-support filter and also moves assigns to disabled nodes up the tree
* Daniel Huson, 4.2010, 3.2016
*/
public class MinSupportFilter {
private final Map<Integer, Float> id2weight;
private final int minSupport;
private final ProgressListener progress;
private final PhyloTree tree;
private final IdMapper idMapper;
/**
* constructor
*
*/
public MinSupportFilter(String cName, Map<Integer, Float> id2weight, int minSupport, final ProgressListener progress) {
this.id2weight = id2weight;
this.minSupport = minSupport;
this.progress = progress;
tree = ClassificationManager.get(cName, true).getFullTree();
this.idMapper = ClassificationManager.get(cName, false).getIdMapper();
}
/**
* applies the min support filter to taxon classification
*
* @return mapping of old taxon ids to new taxon ids
*/
public Map<Integer, Integer> apply() throws CanceledException {
final Map<Integer, Integer> orphan2AncestorMapping = new HashMap<>();
if (progress != null) {
progress.setMaximum(tree.getNumberOfNodes());
progress.setProgress(0);
}
final Set<Integer> orphans = new HashSet<>();
if (tree.getRoot() != null)
computeOrphan2AncestorMappingRec(tree.getRoot(), orphan2AncestorMapping, orphans);
// Any orphans that popped out of the top of the taxonomy are mapped to unassigned
for (Integer id : orphans) {
orphan2AncestorMapping.put(id, IdMapper.UNASSIGNED_ID);
}
orphans.clear();
if (progress instanceof ProgressPercentage)
progress.reportTaskCompleted();
return orphan2AncestorMapping;
}
/**
* recursively move all reads that land on taxa with too little support or on a disabled taxon to higher level nodes
*
* @param orphans nodes that have too few reads
* @return reads on or below this node
*/
private float computeOrphan2AncestorMappingRec(Node v, Map<Integer, Integer> orphan2AncestorMapping, Set<Integer> orphans) throws CanceledException {
if (progress != null)
progress.incrementProgress();
int taxId = (Integer) v.getInfo();
if (taxId < 0)
return 0; // ignore nohits and unassigned
float below = 0;
Set<Integer> orphansBelow = new HashSet<>();
for (Edge e = v.getFirstOutEdge(); e != null; e = v.getNextOutEdge(e)) {
Node w = e.getTarget();
below += computeOrphan2AncestorMappingRec(w, orphan2AncestorMapping, orphansBelow);
}
Float weight = id2weight.get(taxId);
if (weight == null)
weight = 0f;
if (below + weight >= minSupport && !idMapper.isDisabled(taxId)) // this is a strong node, map all orphans to here
{
for (Integer id : orphansBelow) {
orphan2AncestorMapping.put(id, taxId);
}
} else // this node is not strong enough, pass all orphans up
{
if (weight > 0) // this node has reads assigned to it, pass it up as an orpha
{
orphansBelow.add(taxId);
}
orphans.addAll(orphansBelow);
}
return below + weight;
}
}
| 4,587 | Java | .java | husonlab/megan-ce | 62 | 21 | 18 | 2016-05-09T10:55:38Z | 2024-02-22T23:23:42Z |
IAssignmentAlgorithm.java | /FileExtraction/Java_unseen/husonlab_megan-ce/src/megan/algorithms/IAssignmentAlgorithm.java | /*
* IAssignmentAlgorithm.java Copyright (C) 2024 Daniel H. Huson
*
* (Some files contain contributions from other authors, who are then mentioned separately.)
*
* This program is free software: you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
package megan.algorithms;
import megan.data.IReadBlock;
import java.util.BitSet;
/**
* Assignment algorithm
* Daniel Huson, 1.2016
*/
public interface IAssignmentAlgorithm {
/**
* compute the id for a set of active matches
*
* @return id
*/
int computeId(BitSet activeMatches, IReadBlock readBlock);
int getLCA(int id1, int id2);
}
| 1,188 | Java | .java | husonlab/megan-ce | 62 | 21 | 18 | 2016-05-09T10:55:38Z | 2024-02-22T23:23:42Z |
ReferenceCoverFilter.java | /FileExtraction/Java_unseen/husonlab_megan-ce/src/megan/algorithms/ReferenceCoverFilter.java | /*
* ReferenceCoverFilter.java Copyright (C) 2024 Daniel H. Huson
*
* (Some files contain contributions from other authors, who are then mentioned separately.)
*
* This program is free software: you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
package megan.algorithms;
import jloda.fx.util.ProgramExecutorService;
import jloda.util.Basic;
import jloda.util.CanceledException;
import jloda.util.interval.IntervalChain;
import jloda.util.progress.ProgressListener;
import jloda.util.progress.ProgressPercentage;
import megan.daa.connector.ReadBlockDAA;
import megan.data.IConnector;
import megan.data.IMatchBlock;
import megan.data.IReadBlock;
import megan.data.IReadBlockIterator;
import megan.util.BlastParsingUtils;
import java.io.IOException;
import java.util.*;
import java.util.concurrent.CountDownLatch;
import java.util.concurrent.ExecutorService;
import java.util.concurrent.Executors;
import java.util.concurrent.LinkedBlockingQueue;
/**
* Computes all references that are covered to the desired level and then can be used to filter matches
* Daniel Huson, 3.2018
*/
public class ReferenceCoverFilter {
private boolean isActive = false;
private float proportionToCover = 0;
private final Set<String> referencesToUse = new HashSet<>();
private final int mask = 1023; // 2^10 - 1
private final Object[] sync = new Object[mask + 1];
/**
* Constructor
*
*/
public ReferenceCoverFilter(float percentToCover) {
Arrays.fill(sync, new Object());
setPercentToCover(percentToCover);
}
/**
* apply the filter
*
*/
public void compute(ProgressListener progress, final IConnector connector, final float minScore, final float topPercent, final float maxExpected, final float minPercentIdentity) throws CanceledException, IOException {
isActive = false;
referencesToUse.clear();
if (getPercentToCover() > 0) {
final Map<String, Integer> ref2length = new HashMap<>();
final Map<String, IntervalChain> ref2intervals = new HashMap<>();
progress.setSubtask("Determining reference coverage");
System.err.printf("Running reference coverage filter with threshold=%.1f%%%n", getPercentToCover());
final int numberOfThreads = Math.min(ProgramExecutorService.getNumberOfCoresToUse(), connector.getNumberOfReads());
if (numberOfThreads == 0)
return; // no reads
final ExecutorService service = Executors.newFixedThreadPool(ProgramExecutorService.getNumberOfCoresToUse());
try {
final CountDownLatch countDownLatch = new CountDownLatch(numberOfThreads);
final IReadBlock sentinel = new ReadBlockDAA();
final LinkedBlockingQueue<IReadBlock> queue = new LinkedBlockingQueue<>(1000);
for (int t = 0; t < numberOfThreads; t++) {
service.submit(() -> {
try {
while (true) {
final IReadBlock readBlock = queue.take();
if (readBlock == sentinel)
break;
final BitSet activeMatches = new BitSet(); // pre filter matches for taxon identification
ActiveMatches.compute(minScore, topPercent, maxExpected, minPercentIdentity, readBlock, null, activeMatches);
for (int m = activeMatches.nextSetBit(0); m != -1; m = activeMatches.nextSetBit(m + 1)) {
final IMatchBlock matchBlock = readBlock.getMatchBlock(m);
final String refId = matchBlock.getTextFirstWord();
synchronized (sync[refId.hashCode() & mask]) {
if (ref2length.get(refId) == null)
ref2length.put(refId, matchBlock.getRefLength());
IntervalChain intervals = ref2intervals.get(refId);
if (intervals == null) {
intervals = new IntervalChain();
ref2intervals.put(refId, intervals);
}
final String matchText = matchBlock.getText();
final int start = BlastParsingUtils.getStartSubject(matchText);
final int end = BlastParsingUtils.getEndSubject(matchText);
intervals.add(start, end);
}
}
}
} catch (Exception e) {
Basic.caught(e);
} finally {
countDownLatch.countDown();
}
});
}
// feed the queue
try (final IReadBlockIterator it = connector.getAllReadsIterator(0, 10, false, true)) {
progress.setMaximum(it.getMaximumProgress());
progress.setProgress(0);
while (it.hasNext()) {
try {
queue.put(it.next());
} catch (InterruptedException e) {
Basic.caught(e);
break;
}
progress.setProgress(it.getProgress());
}
for (int i = 0; i < numberOfThreads; i++) {
try {
queue.put(sentinel);
} catch (InterruptedException e) {
Basic.caught(e);
break;
}
}
}
try {
countDownLatch.await();
} catch (InterruptedException e) {
e.printStackTrace();
}
} finally {
service.shutdownNow();
}
for (String ref : ref2length.keySet()) {
Integer length = ref2length.get(ref);
if (length != null) {
IntervalChain intervalChain = ref2intervals.get(ref);
if (intervalChain != null && intervalChain.getLength() >= proportionToCover * length)
referencesToUse.add(ref);
}
}
if (progress instanceof ProgressPercentage)
progress.reportTaskCompleted();
System.err.println("Reference cover filter: using " + referencesToUse.size() + " of " + ref2intervals.size() + " references");
if (referencesToUse.size() == ref2intervals.size()) {
isActive = false;
referencesToUse.clear(); // nothing filtered, might as well clear
} else
isActive = true;
}
}
private Set<String> getReferencesToUse() {
return referencesToUse;
}
public boolean useReference(String refId) {
return !isActive || referencesToUse.contains(refId);
}
private float getPercentToCover() {
return 100 * proportionToCover;
}
private void setPercentToCover(float percent) {
this.proportionToCover = percent / 100.0f;
}
public boolean isActive() {
return isActive;
}
/**
* apply the filter to the set of active matches
*
*/
public void applyFilter(IReadBlock readBlock, BitSet activeMatches) {
if (isActive) {
for (int m = activeMatches.nextSetBit(0); m != -1; m = activeMatches.nextSetBit(m + 1)) {
String refId = readBlock.getMatchBlock(m).getTextFirstWord();
if (!getReferencesToUse().contains(refId))
activeMatches.set(m, false);
}
}
}
}
| 8,743 | Java | .java | husonlab/megan-ce | 62 | 21 | 18 | 2016-05-09T10:55:38Z | 2024-02-22T23:23:42Z |
AssignmentUsingIntervalUnionLCA.java | /FileExtraction/Java_unseen/husonlab_megan-ce/src/megan/algorithms/AssignmentUsingIntervalUnionLCA.java | /*
* AssignmentUsingIntervalUnionLCA.java Copyright (C) 2024 Daniel H. Huson
*
* (Some files contain contributions from other authors, who are then mentioned separately.)
*
* This program is free software: you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
package megan.algorithms;
import jloda.graph.Edge;
import jloda.graph.Node;
import megan.classification.ClassificationManager;
import megan.classification.IdMapper;
import megan.classification.data.ClassificationFullTree;
import megan.core.Document;
import megan.data.IMatchBlock;
import megan.data.IReadBlock;
import megan.viewer.TaxonomyData;
import java.util.*;
/**
* performs taxonId assignment using a union-based algorithm
* Created by huson on 4/12/17.
*/
public class AssignmentUsingIntervalUnionLCA implements IAssignmentAlgorithm {
private final String cName;
private final float weightedPercentFactor;
private final float topPercent;
private final ClassificationFullTree fullTree;
// all these are used during computation:
private final HashSet<Node> allNodes = new HashSet<>();
private final HashMap<Integer, IntervalList> taxa2intervals = new HashMap<>();
private final Map<Node, Integer> node2covered = new HashMap<>();
private StartStopEvent[] events = new StartStopEvent[10000]; // not final because may get resized...
private final Comparator<StartStopEvent> comparator;
/**
* constructor
*/
public AssignmentUsingIntervalUnionLCA(final String cName, Document doc) {
this.cName = cName;
this.weightedPercentFactor = Math.min(1f, doc.getLcaCoveragePercent() / 100.0f);
this.topPercent = doc.getTopPercent();
this.fullTree = ClassificationManager.get(cName, true).getFullTree();
comparator = createComparator();
}
/**
* compute taxonId id
*
* @return taxonId id
*/
public int computeId(BitSet activeMatches, IReadBlock readBlock) {
if (readBlock.getNumberOfMatches() == 0)
return IdMapper.NOHITS_ID;
if (activeMatches.cardinality() == 0)
return IdMapper.UNASSIGNED_ID;
taxa2intervals.clear();
computeTaxaToSegmentsMap(activeMatches, readBlock, taxa2intervals);
if (taxa2intervals.size() == 0)
return IdMapper.UNASSIGNED_ID;
if (taxa2intervals.size() == 1)
return taxa2intervals.keySet().iterator().next();
allNodes.clear();
final Node root = computeInducedTree(taxa2intervals, allNodes);
node2covered.clear();
computeCoveredBasesRec(root, allNodes, taxa2intervals, node2covered);
final double threshold = weightedPercentFactor * node2covered.get(root);
return getLCA(root, allNodes, node2covered, threshold);
}
/**
* computes the taxon to segments map. On each segment, we apply the top-percent filter
*
*/
private void computeTaxaToSegmentsMap(BitSet activeMatches, IReadBlock readBlock, HashMap<Integer, IntervalList> taxa2intervals) {
// determine all start and stop events:
int numberOfEvents = 0;
for (int m = activeMatches.nextSetBit(0); m != -1; m = activeMatches.nextSetBit(m + 1)) {
final IMatchBlock matchBlock = readBlock.getMatchBlock(m);
int taxonId = matchBlock.getId(cName);
if (taxonId > 0 && !TaxonomyData.isTaxonDisabled(cName, taxonId)) {
if (numberOfEvents + 1 >= events.length) { // need enough to add two new events
StartStopEvent[] tmp = new StartStopEvent[2 * events.length];
System.arraycopy(events, 0, tmp, 0, numberOfEvents);
events = tmp;
}
if (events[numberOfEvents] == null)
events[numberOfEvents] = new StartStopEvent();
events[numberOfEvents++].set(true, Math.min(matchBlock.getAlignedQueryStart(), matchBlock.getAlignedQueryEnd()), m);
if (events[numberOfEvents] == null)
events[numberOfEvents] = new StartStopEvent();
events[numberOfEvents++].set(false, Math.max(matchBlock.getAlignedQueryStart(), matchBlock.getAlignedQueryEnd()), m);
}
}
Arrays.sort(events, 0, numberOfEvents, comparator);
final BitSet currentMatches = new BitSet(); // set of matches currently active
final Map<Integer, Float> taxon2BestScore = new HashMap<>();
StartStopEvent previousEvent = null;
for (int c = 0; c < numberOfEvents; c++) {
final StartStopEvent currentEvent = events[c];
if (previousEvent == null) {
if (!currentEvent.isStart())
throw new RuntimeException("Taxon end before begin: " + currentEvent);
currentMatches.set(currentEvent.getMatchId());
} else {
if (currentEvent.getPos() > previousEvent.getPos()) {
final int segmentLength = (currentEvent.getPos() - previousEvent.getPos() + 1); // length of segment
if (segmentLength > 0) {
taxon2BestScore.clear();
for (int m = currentMatches.nextSetBit(0); m != -1; m = currentMatches.nextSetBit(m + 1)) {
final IMatchBlock matchBlock = readBlock.getMatchBlock(m);
final int taxonId = matchBlock.getId(cName); // store the best score for each taxon
if (taxonId > 0 && !TaxonomyData.isTaxonDisabled(cName, taxonId)) {
Float bestScore = taxon2BestScore.get(taxonId);
if (bestScore == null)
taxon2BestScore.put(taxonId, matchBlock.getBitScore());
else
taxon2BestScore.put(taxonId, Math.max(bestScore, matchBlock.getBitScore()));
}
}
// determine the top-percent threshold on the current segment:
float topPercentThreshold = 0;
for (Float value : taxon2BestScore.values()) {
topPercentThreshold = Math.max(topPercentThreshold, value);
}
topPercentThreshold = (100.0f - topPercent) / 100.0f * topPercentThreshold;
// add the segments for all taxa whose best match exceeds the threshold:
for (Integer taxonId : taxon2BestScore.keySet()) {
if (taxon2BestScore.get(taxonId) >= topPercentThreshold) {
IntervalList intervals = taxa2intervals.get(taxonId);
if (intervals == null) {
intervals = new IntervalList();
taxa2intervals.put(taxonId, intervals);
}
intervals.add(previousEvent.getPos(), currentEvent.getPos());
}
}
}
}
// update the set of current matches:
if (currentEvent.isStart()) {
currentMatches.set(currentEvent.getMatchId());
} else { // is end event
currentMatches.clear(currentEvent.getMatchId());
}
}
previousEvent = currentEvent;
}
for (IntervalList list : taxa2intervals.values()) {
list.setIsSorted(true); // initially, lists are sorted by construction
}
}
/**
* computes the set of all nodes that lie between the given taxa and their LCA
*
* @return root node
*/
private Node computeInducedTree(HashMap<Integer, IntervalList> taxa2intervals, Set<Node> allNodes) {
// compute the local root node:
final Node rootOfAllNodes;
{
final ArrayList<String> addresses = new ArrayList<>(taxa2intervals.size());
for (Integer taxId : taxa2intervals.keySet()) {
addresses.add(fullTree.getAddress(taxId));
}
final int rootId = fullTree.getAddress2Id(LCAAddressing.getCommonPrefix(addresses, false));
rootOfAllNodes = fullTree.getANode(rootId);
}
allNodes.add(rootOfAllNodes);
// add all nodes between that taxa and the root:
for (Integer taxId : taxa2intervals.keySet()) {
Node v = fullTree.getANode(taxId);
if (v != null) {
while (!allNodes.contains(v)) {
allNodes.add(v);
if (v.getInDegree() > 0)
v = v.getFirstInEdge().getSource();
else
break; // must be v==fullTree.getRoot()
}
}
}
return rootOfAllNodes;
}
/**
* computes the number of bases that each taxon is covered by. Side effect is to change all taxa2intervals intervals.
*
*/
private IntervalList computeCoveredBasesRec(final Node v, final HashSet<Node> allNodes, final HashMap<Integer, IntervalList> taxa2intervals, final Map<Node, Integer> node2covered) {
final int taxId = (Integer) v.getInfo();
final IntervalList intervals;
if (taxa2intervals.get(taxId) != null)
intervals = taxa2intervals.get(taxId);
else {
intervals = new IntervalList();
taxa2intervals.put(taxId, intervals);
}
// get intervals of children:
for (Edge e = v.getFirstOutEdge(); e != null; e = v.getNextOutEdge(e)) {
final Node w = e.getTarget();
if (allNodes.contains(w)) {
final IntervalList intervalsW = computeCoveredBasesRec(w, allNodes, taxa2intervals, node2covered);
intervals.addAll(intervalsW.getAll()); // this will trigger recomputation of amount covered
}
}
node2covered.put(v, intervals.getCovered());
return intervals;
}
/**
* computes the node that is above all nodes whose coverage meets the threshold
*
* @return LCA of all nodes that meet the threshold
*/
private int getLCA(Node v, HashSet<Node> allNodes, Map<Node, Integer> node2covered, double threshold) {
while (true) {
Node bestChild = null;
for (Edge e = v.getFirstOutEdge(); e != null; e = v.getNextOutEdge(e)) {
final Node w = e.getTarget();
if (allNodes.contains(w)) {
if (node2covered.get(w) >= threshold) {
if (bestChild == null)
bestChild = w;
else { // has at least two best children, return v
return (Integer) v.getInfo();
}
}
}
}
if (bestChild != null)
v = bestChild; // has exactly one child that beats threshold, move down to it
else
return (Integer) v.getInfo(); // no best child, return v
}
}
@Override
public int getLCA(int id1, int id2) {
if (id1 == 0)
return id2;
else if (id2 == 0)
return id1;
else
return fullTree.getAddress2Id(LCAAddressing.getCommonPrefix(new String[]{fullTree.getAddress(id1), fullTree.getAddress(id2)}, 2, false));
}
private Comparator<StartStopEvent> createComparator() {
return (a, b) -> {
if (a.getPos() < b.getPos())
return -1;
else if (a.getPos() > b.getPos())
return 1;
else if (a.isStart() && b.isEnd())
return -1;
else if (a.isEnd() && b.isStart())
return 1;
else
return 0;
};
}
private static class StartStopEvent {
private boolean start;
private int pos;
private int matchId;
void set(boolean start, int pos, int matchId) {
this.start = start;
this.pos = pos;
this.matchId = matchId;
}
boolean isStart() {
return start;
}
boolean isEnd() {
return !start;
}
int getPos() {
return pos;
}
int getMatchId() {
return matchId;
}
}
}
| 13,175 | Java | .java | husonlab/megan-ce | 62 | 21 | 18 | 2016-05-09T10:55:38Z | 2024-02-22T23:23:42Z |
AssignmentUsingWeightedLCA.java | /FileExtraction/Java_unseen/husonlab_megan-ce/src/megan/algorithms/AssignmentUsingWeightedLCA.java | /*
* AssignmentUsingWeightedLCA.java Copyright (C) 2024 Daniel H. Huson
*
* (Some files contain contributions from other authors, who are then mentioned separately.)
*
* This program is free software: you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
package megan.algorithms;
import jloda.swing.util.ProgramProperties;
import jloda.util.Basic;
import megan.classification.ClassificationManager;
import megan.classification.IdMapper;
import megan.classification.data.ClassificationFullTree;
import megan.classification.data.Name2IdMap;
import megan.daa.connector.MatchBlockDAA;
import megan.data.IMatchBlock;
import megan.data.IReadBlock;
import java.util.*;
/**
* computes the assignment for a read, using the Weighted LCA algorithm
* This is essentially the same algorithm that is used in MetaScope
* <p>
* Daniel Huson, 3.2016
*/
public class AssignmentUsingWeightedLCA implements IAssignmentAlgorithm {
private final String cName;
private final ClassificationFullTree fullTree;
private final Name2IdMap name2IdMap;
private final IdMapper idMapper;
private final int[] refId2weight;
private final Map<String, Integer> ref2weight; // map reference sequence to number of reads associated with it
private final Taxon2SpeciesMapping taxon2SpeciesMapping;
private final boolean useIdentityFilter;
private final float percentToCover;
private final boolean allowBelowSpeciesAssignment = ProgramProperties.get("allowWeightedLCABelowSpecies", false);
private final Map<Character, Integer> ch2weight = new HashMap<>(Character.MAX_VALUE, 1f);
private WeightedAddress[] addressingArray = new WeightedAddress[0];
private boolean ignoreAncestors = true; // alignments to ancestors are considered ok
/**
* constructor
*
*/
public AssignmentUsingWeightedLCA(final String cName, final int[] refId2Weight, final Map<String, Integer> ref2weight, final Taxon2SpeciesMapping taxon2SpeciesMapping, final float percentToCover, final boolean useIdentityFilter) {
this.cName = cName;
this.useIdentityFilter = useIdentityFilter;
fullTree = ClassificationManager.get(cName, true).getFullTree();
idMapper = ClassificationManager.get(cName, true).getIdMapper();
name2IdMap = ClassificationManager.get(cName, true).getName2IdMap();
this.refId2weight = refId2Weight;
this.ref2weight = ref2weight;
this.taxon2SpeciesMapping = taxon2SpeciesMapping;
this.percentToCover = (percentToCover >= 99.9999 ? 100 : percentToCover);
addressingArray = resizeArray(addressingArray, 1000); // need to call this method so that each element is set
}
/**
* determine the taxon id of a read from its matches
*
* @return taxon id
*/
public int computeId(final BitSet activeMatches, final IReadBlock readBlock) {
if (readBlock.getNumberOfMatches() == 0)
return IdMapper.NOHITS_ID;
if (activeMatches.cardinality() == 0)
return IdMapper.UNASSIGNED_ID;
// compute addresses of all hit taxa:
if (activeMatches.cardinality() > 0) {
int arrayLength = 0;
boolean hasDisabledMatches = false;
// collect the addresses of all non-disabled taxa:
for (int i = activeMatches.nextSetBit(0); i != -1; i = activeMatches.nextSetBit(i + 1)) {
final IMatchBlock matchBlock = readBlock.getMatchBlock(i);
int taxId = matchBlock.getId(cName);
if (taxId > 0) {
if (!allowBelowSpeciesAssignment) {
taxId = taxon2SpeciesMapping.getSpeciesOrReturnTaxonId(taxId);
}
if (!idMapper.isDisabled(taxId)) {
final String address = fullTree.getAddress(taxId);
if (address != null) {
if (arrayLength >= addressingArray.length)
addressingArray = resizeArray(addressingArray, 2 * addressingArray.length);
if (ref2weight != null) {
final String ref = matchBlock.getTextFirstWord();
Integer weight = ref != null ? ref2weight.get(ref) : null;
if (weight == null)
weight = 1;
addressingArray[arrayLength++].set(address, weight);
} else {
final int refId = ((MatchBlockDAA) matchBlock).getSubjectId();
int weight = Math.max(1, refId2weight[refId]);
try {
addressingArray[arrayLength++].set(address, weight);
} catch (NullPointerException ex) {
Basic.caught(ex);
throw ex;
}
}
}
} else
hasDisabledMatches = true;
}
}
// if there only matches to disabled taxa, then use them:
if (arrayLength == 0 && hasDisabledMatches) {
for (int i = activeMatches.nextSetBit(0); i != -1; i = activeMatches.nextSetBit(i + 1)) {
final IMatchBlock matchBlock = readBlock.getMatchBlock(i);
int taxId = matchBlock.getId(cName);
if (taxId > 0) {
if (!allowBelowSpeciesAssignment) {
taxId = taxon2SpeciesMapping.getSpeciesOrReturnTaxonId(taxId);
}
if (!idMapper.isDisabled(taxId)) {
final String address = fullTree.getAddress(taxId);
if (address != null) {
if (arrayLength >= addressingArray.length)
addressingArray = resizeArray(addressingArray, 2 * addressingArray.length);
if (ref2weight != null) {
final String ref = matchBlock.getTextFirstWord();
Integer weight = ref2weight.get(ref);
if (weight == null)
weight = 1;
addressingArray[arrayLength++].set(address, weight);
} else {
final int refId = ((MatchBlockDAA) matchBlock).getSubjectId();
int weight = Math.max(1, refId2weight[refId]);
addressingArray[arrayLength++].set(address, weight);
}
}
}
}
}
}
// compute LCA using addresses:
if (arrayLength > 0) {
final String address = computeWeightedLCA(percentToCover, addressingArray, arrayLength);
int id = fullTree.getAddress2Id(address);
if (id > 0) {
if (useIdentityFilter) {
return AssignmentUsingLCA.adjustByPercentIdentity(id, activeMatches, readBlock, fullTree, name2IdMap);
}
if (allowBelowSpeciesAssignment)
return id;
else
return taxon2SpeciesMapping.getSpeciesOrReturnTaxonId(id);
}
}
}
// although we had some hits, couldn't make an assignment
return IdMapper.UNASSIGNED_ID;
}
/**
* get the LCA of two ids
*
* @return LCA of id1 and id2, not ignoring the case that one may be the lca of the other
*/
@Override
public int getLCA(int id1, int id2) {
if (id1 == 0)
return id2;
else if (id2 == 0)
return id1;
else
return fullTree.getAddress2Id(LCAAddressing.getCommonPrefix(new String[]{fullTree.getAddress(id1), fullTree.getAddress(id2)}, 2, false));
}
/**
* compute the weight LCA for a set of taxa and weights
*
* @return LCA address
*/
public String computeWeightedLCA(final float percentToCover, final Map<Integer, Integer> taxon2weight) {
int arrayLength = 0;
for (Integer taxonId : taxon2weight.keySet()) {
String address = fullTree.getAddress(taxonId);
if (address != null) {
if (arrayLength >= addressingArray.length) {
addressingArray = resizeArray(addressingArray, 2 * addressingArray.length);
}
addressingArray[arrayLength++].set(address, taxon2weight.get(taxonId));
}
// else
// System.err.println("Unknown taxonId: "+taxonId);
}
return computeWeightedLCA(percentToCover, addressingArray, arrayLength);
}
/**
* compute the address of the weighted LCA
*
* @return address or ""
*/
private String computeWeightedLCA(final float percentToCover, final WeightedAddress[] array, final int origLength) {
if (origLength == 0)
return "";
// sort:
Arrays.sort(array, 0, origLength, Comparator.comparing(a -> a.address));
// setup links:
for (int i = 0; i < origLength - 1; i++) {
array[i].next = array[i + 1];
}
array[origLength - 1].next = null;
final WeightedAddress head = new WeightedAddress(null, 0); // head.next points to first element of list, but head is NOT the first element
head.next = array[0];
int length = mergeIdentical(head, origLength);
int totalWeight = getTotalWeight(head);
int weightToCover = (int) Math.min(totalWeight, Math.ceil((totalWeight / 100.0) * percentToCover));
for (int pos = 0; ; pos++) { // look at next letter after current prefix
ch2weight.clear();
// determine weights for each letter at pos, remove any addresses that equal the prefix:
{
WeightedAddress prev = head; // we are using a single-linked list, so need to update prev.next to delete current
for (WeightedAddress current = head.next; current != null; current = current.next) {
final String address = current.address;
if (pos == address.length()) { // current has run out of symbols
if (--length == 0) // run out of addresses, return prefix
return address.substring(0, pos);
prev.next = current.next;
if (ignoreAncestors) {
// this node lies on route to best node, so it is covered and its weight can be removed from totalWeight
totalWeight -= current.weight;
weightToCover = ((int) Math.min(totalWeight, Math.ceil((totalWeight / 100.0) * percentToCover)));
// Note: prev does not change
}
} else {
final char ch = address.charAt(pos);
final Integer count = ch2weight.get(ch);
ch2weight.put(ch, count == null ? current.weight : count + current.weight);
prev = current;
}
}
}
// determine the heaviest character
// no way that weight can be null
char bestCh = 0;
int bestCount = 0;
for (char ch : ch2weight.keySet()) {
int weight = ch2weight.get(ch);
if (weight > bestCount) {
bestCh = ch;
bestCount = weight;
}
}
if (bestCount < weightToCover) // best count no longer good enough, return current prefix
return head.next.getAddress().substring(0, pos);
// remove all that do not match the heaviest character:
{
WeightedAddress prev = head;
for (WeightedAddress current = head.next; current != null; current = current.next) {
final String address = current.address;
if (address.charAt(pos) != bestCh) { // remove the current from the list
if (--length == 0)
return address.substring(0, pos);
prev.next = current.next;
// Note: prev does not change
} else
prev = current;
}
}
}
}
/**
* merge identical entries, using max weight for identical taxa. After running this, still have start=0
*
* @return new length
*/
private static int mergeIdentical(final WeightedAddress headPtr, int length) {
for (WeightedAddress a = headPtr.next; a != null; a = a.next) {
for (WeightedAddress b = a.next; b != null; b = b.next) {
if (a.getAddress().equals(b.getAddress())) {
if (b.weight > a.weight) // keep the maximum weight, NOT the sum
a.weight = b.weight;
a.next = b.next;
length--;
} else
break;
}
}
return length;
}
/**
* compute total weight.
*
* @return sum of weights
*/
private static int getTotalWeight(final WeightedAddress head) {
int totalWeight = 0;
for (WeightedAddress a = head.next; a != null; a = a.next) {
totalWeight += a.weight;
}
return totalWeight;
}
/**
* converts an address to numbers of easier display
*
* @return as numbers
*/
private static String toNumbers(String address) {
StringBuilder buf = new StringBuilder();
for (int i = 0; i < address.length(); i++)
buf.append(String.format("%d.", (int) address.charAt(i)));
return buf.toString();
}
/**
* utility for resizing an array of weighted addresses
*
* @return new array
*/
private static WeightedAddress[] resizeArray(WeightedAddress[] array, int size) {
final WeightedAddress[] result = new WeightedAddress[size];
System.arraycopy(array, 0, result, 0, array.length);
for (int i = array.length; i < result.length; i++)
result[i] = new WeightedAddress();
return result;
}
public float getPercentToCover() {
return percentToCover;
}
public ClassificationFullTree getFullTree() {
return fullTree;
}
public boolean isIgnoreAncestors() {
return ignoreAncestors;
}
public void setIgnoreAncestors(boolean ignoreAncestors) {
this.ignoreAncestors = ignoreAncestors;
}
/**
* address and weight
*/
public static class WeightedAddress {
private String address;
private int weight;
private WeightedAddress next;
/**
* default constructor
*/
public WeightedAddress() {
}
/**
* constructor
*
*/
public WeightedAddress(String address, int weight) {
this.address = address;
this.weight = weight;
}
void set(String address, int weight) {
this.address = address;
this.weight = weight;
}
String getAddress() {
return address;
}
public void setAddress(String address) {
this.address = address;
}
public int getWeight() {
return weight;
}
public void setWeight(int weight) {
this.weight = weight;
}
public String toString() {
return "[" + toNumbers(address) + "," + weight + "]";
}
}
}
| 16,852 | Java | .java | husonlab/megan-ce | 62 | 21 | 18 | 2016-05-09T10:55:38Z | 2024-02-22T23:23:42Z |
LCAAddressing.java | /FileExtraction/Java_unseen/husonlab_megan-ce/src/megan/algorithms/LCAAddressing.java | /*
* LCAAddressing.java Copyright (C) 2024 Daniel H. Huson
*
* (Some files contain contributions from other authors, who are then mentioned separately.)
*
* This program is free software: you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
package megan.algorithms;
import jloda.graph.Edge;
import jloda.graph.Node;
import jloda.phylo.PhyloTree;
import java.util.Collection;
import java.util.Map;
/**
* Computes addresses used in LCA algorithm
* Daniel Huson, 4.2015
*/
public class LCAAddressing {
/**
* compute node addresses used to compute LCA
*
*/
public static void computeAddresses(PhyloTree tree, Map<Integer, String> id2address, Map<String, Integer> address2id) {
var root = tree.getRoot();
if (root != null)
buildId2AddressRec(root, "", id2address, address2id);
}
/**
* computes the id to address mapping
*
*/
private static void buildId2AddressRec(Node v, String path, Map<Integer, String> id2address, Map<String, Integer> address2id) {
var id = (Integer) v.getInfo();
id2address.put(id, path);
address2id.put(path, id);
if (v.getOutDegree() < Character.MAX_VALUE) {
char count = 1;
for (var f = v.getFirstOutEdge(); f != null; f = v.getNextOutEdge(f)) {
buildId2AddressRec(f.getOpposite(v), path + count, id2address, address2id);
count++;
}
} else { // use two characters if outdegree is too big
char count1 = 1;
char count2 = 1;
for (Edge f = v.getFirstOutEdge(); f != null; f = v.getNextOutEdge(f)) {
if (count1 == Character.MAX_VALUE) {
count2++;
count1 = 1;
}
buildId2AddressRec(f.getOpposite(v), (path + count1) + count2, id2address, address2id);
count1++;
}
}
}
/**
* given a set of addresses, returns the common prefix.
*
* @param ignoreAncestors ignore ancestors, i.e. ignore prefixes of longer addresses
* @return prefix
*/
public static String getCommonPrefix(final Collection<String> addresses, boolean ignoreAncestors) {
if (addresses.size() == 0)
return "";
else if (addresses.size() == 1)
return addresses.iterator().next();
String reference = null;
for (String other : addresses) {
if (other != null && other.length() > 0) {
if (reference == null) {
reference = other;
} else {
// if most specific requested, use longest sequence as reference, else use shortest
if (ignoreAncestors && other.length() > reference.length() || !ignoreAncestors && other.length() < reference.length()) {
reference = other;
}
}
}
}
if (reference == null)
return "";
for (int pos = 0; pos < reference.length(); pos++) {
final char charAtPos = reference.charAt(pos);
for (String other : addresses) {
if (other != null && pos < other.length() && other.charAt(pos) != charAtPos)
return reference.substring(0, pos);
}
}
return reference;
}
/**
* given an array of addresses, returns the common prefix
*
* @param ignorePrefixes ignore prefixes of longer addresses
* @return prefix
*/
public static String getCommonPrefix(final String[] addresses, final int numberOfAddresses, boolean ignorePrefixes) {
if (numberOfAddresses == 0)
return "";
else if (numberOfAddresses == 1)
return addresses[0];
String reference = null;
for (int i = 0; i < numberOfAddresses; i++) {
final String other = addresses[i];
if (other != null && other.length() > 0) {
if (reference == null)
reference = other;
// if ignore prefixes, use longest sequence as reference, else use shortest
if (ignorePrefixes && other.length() > reference.length() || !ignorePrefixes && other.length() < reference.length()) {
reference = other;
}
}
}
if (reference == null)
return "";
for (int pos = 0; pos < reference.length(); pos++) {
final char charAtPos = reference.charAt(pos);
for (int i = 0; i < numberOfAddresses; i++) {
final String other = addresses[i];
if (other != null && pos < other.length() && other.charAt(pos) != charAtPos)
return reference.substring(0, pos);
}
}
return reference;
}
}
| 5,451 | Java | .java | husonlab/megan-ce | 62 | 21 | 18 | 2016-05-09T10:55:38Z | 2024-02-22T23:23:42Z |
ChimericCheck.java | /FileExtraction/Java_unseen/husonlab_megan-ce/src/megan/algorithms/ChimericCheck.java | /*
* ChimericCheck.java Copyright (C) 2024 Daniel H. Huson
*
* (Some files contain contributions from other authors, who are then mentioned separately.)
*
* This program is free software: you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
package megan.algorithms;
import jloda.graph.Edge;
import jloda.graph.Node;
import megan.viewer.TaxonomicLevels;
import megan.viewer.TaxonomyData;
import java.util.HashMap;
/**
* checks for possible chimeric read
* Daniel Huson, 9.2017
*/
class ChimericCheck {
/**
* report any long reads that look like they may be chimeric
*
*/
public static void apply(String readName, Node v, HashMap<Integer, IntervalList> taxa2intervals, int totalCovered, int readLength) {
final int ancestorRank = TaxonomyData.getTaxonomicRank(TaxonomyData.getLowestAncestorWithMajorRank((int) v.getInfo()));
if (v.getInDegree() > 0 && v.getFirstInEdge().getSource().getInDegree() > 0 // keep root and top level nodes
&& (/*ancestorRank == 1 || */ ancestorRank == TaxonomicLevels.getSpeciesId() || ancestorRank == TaxonomicLevels.getGenusId())) // keep nothing genus or below
return;
final String taxonName = TaxonomyData.getName2IdMap().get((int) v.getInfo());
if (taxonName.contains("unclassified"))
return;
String message = null;
for (Edge e = v.getFirstOutEdge(); e != null; e = v.getNextOutEdge(e)) {
final int taxon1 = (Integer) e.getTarget().getInfo();
final String taxonName1 = TaxonomyData.getName2IdMap().get(taxon1);
if (!taxonName1.contains("unclassified")) {
final IntervalList intervals1 = taxa2intervals.get(taxon1);
if (intervals1 != null) {
final int covered1 = intervals1.getCovered();
double minProportionOfAlignedBasedCoveredPerSide = 0.2;
int minNumberOfBasesCoveredPerSide = 1000;
if (covered1 >= minNumberOfBasesCoveredPerSide && covered1 >= minProportionOfAlignedBasedCoveredPerSide * totalCovered) {
final int min1 = intervals1.computeMin();
final int max1 = intervals1.computeMax();
for (Edge f = v.getNextOutEdge(e); f != null; f = v.getNextOutEdge(f)) {
final int taxon2 = (Integer) f.getTarget().getInfo();
final String taxonName2 = TaxonomyData.getName2IdMap().get(taxon2);
if (!taxonName2.contains("unclassified")) {
final IntervalList intervals2 = taxa2intervals.get(taxon2);
if (intervals2 != null) {
final int covered2 = intervals2.getCovered();
double minProportionOfBasesCoveredBothSides = 0.6;
if (readLength == 0 || covered1 + covered2 >= minProportionOfBasesCoveredBothSides * readLength) {
// 0.8;
double minProportionOfAlignedBasesCoveredBothSides = 0;
if (covered2 >= minNumberOfBasesCoveredPerSide && covered2 >= minProportionOfAlignedBasedCoveredPerSide * totalCovered
&& covered1 + covered2 >= minProportionOfAlignedBasesCoveredBothSides * totalCovered) {
final int min2 = intervals2.computeMin();
final int max2 = intervals2.computeMax();
if (max1 <= min2 || max2 <= min1) {
if (message == null) {
final int rank = TaxonomyData.getTaxonomicRank((int) v.getInfo());
String rankName = TaxonomicLevels.getName(rank);
if (rankName == null)
rankName = "below " + TaxonomicLevels.getName(ancestorRank);
message = String.format("Possible chimeric read: '%s' (%,d bp): [%s] %s: %s (%,d bp) vs %s (%,d bp)",
readName, readLength, rankName, taxonName, taxonName1, covered1, taxonName2, covered2);
} else
return; // more than one pair, problably not a simple chimeric read
}
}
}
}
}
}
}
}
}
}
if (message != null)
System.err.println(message);
}
}
| 5,617 | Java | .java | husonlab/megan-ce | 62 | 21 | 18 | 2016-05-09T10:55:38Z | 2024-02-22T23:23:42Z |
AssignmentUsingBestHit.java | /FileExtraction/Java_unseen/husonlab_megan-ce/src/megan/algorithms/AssignmentUsingBestHit.java | /*
* AssignmentUsingBestHit.java Copyright (C) 2024 Daniel H. Huson
*
* (Some files contain contributions from other authors, who are then mentioned separately.)
*
* This program is free software: you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
package megan.algorithms;
import jloda.util.FileLineIterator;
import jloda.util.FileUtils;
import jloda.util.NumberUtils;
import jloda.util.StringUtils;
import megan.classification.IdMapper;
import megan.data.IMatchBlock;
import megan.data.IReadBlock;
import java.io.File;
import java.io.IOException;
import java.util.BitSet;
import java.util.HashMap;
import java.util.Map;
/**
* assignment using best hit
* Created by huson on 1/22/16.
*/
public class AssignmentUsingBestHit implements IAssignmentAlgorithm {
private final String cName;
private final Map<String, Integer> externalName2IdMap;
/**
* constructor
*
*/
public AssignmentUsingBestHit(String cName, String fileName) {
this.cName = cName;
externalName2IdMap = loadAssignmentFiles(cName, fileName);
// System.err.println("Using 'best hit' assignment on " + cName);
}
/**
* computes the id for a read from its matches
* matches
*
* @return id or 0
*/
public int computeId(BitSet activeMatches, IReadBlock readBlock) {
if (externalName2IdMap != null) {
final String name = readBlock.getReadName();
final Integer id = externalName2IdMap.get(name);
if (id != null && id > 0)
return id;
}
if (readBlock.getNumberOfMatches() == 0)
return IdMapper.NOHITS_ID;
if (activeMatches.cardinality() == 0)
return IdMapper.UNASSIGNED_ID;
for (int i = activeMatches.nextSetBit(0); i != -1; i = activeMatches.nextSetBit(i + 1)) {
IMatchBlock match = readBlock.getMatchBlock(i);
int id = match.getId(cName);
if (id > 0)
return id;
}
return IdMapper.UNASSIGNED_ID;
}
/**
* get the LCA of two ids
*
* @return LCA of id1 and id2
*/
@Override
public int getLCA(int id1, int id2) {
throw new RuntimeException("getLCA() called for assignment using best hit");
}
/**
* load all assignment files
*
* @return all read to id assignments
*/
private Map<String, Integer> loadAssignmentFiles(String cName, String fileName) {
final File file = new File(FileUtils.replaceFileSuffix(fileName, "." + cName.toLowerCase()));
if (file.exists() && file.canRead()) {
System.err.println("External assignment file for " + cName + " detected: " + file);
final Map<String, Integer> map = new HashMap<>();
try (final FileLineIterator it = new FileLineIterator(file, true)) {
while (it.hasNext()) {
final String[] tokens = StringUtils.split(it.next(), '\t');
if (tokens.length == 2 && NumberUtils.isInteger(tokens[1])) {
int id = NumberUtils.parseInt(tokens[1]);
map.put(tokens[0], id);
}
}
} catch (IOException ex) {
System.err.println(ex.getMessage());
}
System.err.println("Count: " + map.size());
if (map.size() > 0)
return map;
}
return null;
}
}
| 4,034 | Java | .java | husonlab/megan-ce | 62 | 21 | 18 | 2016-05-09T10:55:38Z | 2024-02-22T23:23:42Z |
AssignmentUsingLCA.java | /FileExtraction/Java_unseen/husonlab_megan-ce/src/megan/algorithms/AssignmentUsingLCA.java | /*
* AssignmentUsingLCA.java Copyright (C) 2024 Daniel H. Huson
*
* (Some files contain contributions from other authors, who are then mentioned separately.)
*
* This program is free software: you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
package megan.algorithms;
import jloda.graph.Node;
import megan.classification.ClassificationManager;
import megan.classification.IdMapper;
import megan.classification.data.ClassificationFullTree;
import megan.classification.data.Name2IdMap;
import megan.data.IMatchBlock;
import megan.data.IReadBlock;
import java.util.BitSet;
import java.util.Collection;
import java.util.HashMap;
import java.util.Map;
/**
* computes the taxon assignment for a read, using the LCA algorithm
* Daniel Huson, 7.2014
* todo: merge with AssignmentUsingLCA
*/
public class AssignmentUsingLCA implements IAssignmentAlgorithm {
private String[] addresses;
private final BitSet activeSet;
private final Map<Character, Integer> ch2weight;
private final String cName;
private final boolean useIdentityFilter;
private final float proportionToCover;
private final ClassificationFullTree fullTree;
private final IdMapper idMapper;
private final Name2IdMap name2IdMap;
private final boolean ignoreAncestralTaxa;
/**
* constructor
*
*/
public AssignmentUsingLCA(String cName, boolean useIdentityFilter, float percentToCover) {
this(cName, useIdentityFilter, percentToCover, true);
}
/**
* constructor
*
*/
public AssignmentUsingLCA(String cName, boolean useIdentityFilter, float percentToCover, boolean ignoreAncestralTaxa) {
this.cName = cName;
fullTree = ClassificationManager.get(cName, false).getFullTree();
idMapper = ClassificationManager.get(cName, true).getIdMapper();
name2IdMap = ClassificationManager.get(cName, false).getIdMapper().getName2IdMap();
addresses = new String[1000];
activeSet = new BitSet();
ch2weight = new HashMap<>(Character.MAX_VALUE, 1f);
this.useIdentityFilter = useIdentityFilter;
this.proportionToCover = percentToCover / 100f;
this.ignoreAncestralTaxa = ignoreAncestralTaxa;
}
/**
* determine the taxon id of a read from its matches
*
* @return taxon id
*/
public int computeId(BitSet activeMatches, IReadBlock readBlock) {
if (readBlock.getNumberOfMatches() == 0)
return IdMapper.NOHITS_ID;
if (activeMatches.cardinality() == 0)
return IdMapper.UNASSIGNED_ID;
// compute addresses of all hit taxa:
if (activeMatches.cardinality() > 0) {
boolean hasDisabledMatches = false;
// collect the addresses of all non-disabled taxa:
int numberOfAddresses = 0;
for (int i = activeMatches.nextSetBit(0); i != -1; i = activeMatches.nextSetBit(i + 1)) {
final IMatchBlock matchBlock = readBlock.getMatchBlock(i);
int id = matchBlock.getId(cName);
if (id > 0) {
if (!idMapper.isDisabled(id)) {
final String address = fullTree.getAddress(id);
if (address != null) {
if (numberOfAddresses >= addresses.length) {
String[] tmp = new String[2 * addresses.length];
System.arraycopy(addresses, 0, tmp, 0, addresses.length);
addresses = tmp;
}
addresses[numberOfAddresses++] = address;
}
} else
hasDisabledMatches = true;
}
}
// if there only matches to disabled taxa, then use them:
if (numberOfAddresses == 0 && hasDisabledMatches) {
for (int i = activeMatches.nextSetBit(0); i != -1; i = activeMatches.nextSetBit(i + 1)) {
final IMatchBlock matchBlock = readBlock.getMatchBlock(i);
int id = matchBlock.getId(cName);
if (id > 0) {
final String address = fullTree.getAddress(id);
if (address != null) {
if (numberOfAddresses >= addresses.length) {
String[] tmp = new String[2 * addresses.length];
System.arraycopy(addresses, 0, tmp, 0, addresses.length);
addresses = tmp;
}
addresses[numberOfAddresses++] = address;
}
}
}
}
// compute LCA using addresses:
if (numberOfAddresses > 0) {
final int id;
if (proportionToCover == 1) {
final String address = LCAAddressing.getCommonPrefix(addresses, numberOfAddresses, ignoreAncestralTaxa);
id = fullTree.getAddress2Id(address);
} else {
final int weightToCover = (int) Math.min(numberOfAddresses, Math.ceil(proportionToCover * numberOfAddresses));
final String address = getPrefixCoveringWeight(weightToCover, addresses, numberOfAddresses);
id = fullTree.getAddress2Id(address);
}
if (id > 0) {
if (useIdentityFilter) {
return AssignmentUsingLCA.adjustByPercentIdentity(id, activeMatches, readBlock, fullTree, name2IdMap);
}
return id;
}
}
}
// although we had some hits, couldn't make an assignment
return IdMapper.UNASSIGNED_ID;
}
/**
* returns the LCA of a set of taxon ids
*
* @return id
*/
public int computeNaiveLCA(Collection<Integer> taxonIds) {
if (taxonIds.size() == 0)
return IdMapper.NOHITS_ID;
else if (taxonIds.size() == 1)
return taxonIds.iterator().next();
if (taxonIds.size() > addresses.length) { // grow, if necessary
addresses = new String[taxonIds.size()];
}
int numberOfAddresses = 0;
// compute addresses of all hit taxa:
for (Integer id : taxonIds) {
if (!idMapper.isDisabled(id)) {
final String address = fullTree.getAddress(id);
if (address != null) {
addresses[numberOfAddresses++] = address;
}
}
}
// compute LCA using addresses:
if (numberOfAddresses > 0) {
final String address = LCAAddressing.getCommonPrefix(addresses, numberOfAddresses, ignoreAncestralTaxa);
return fullTree.getAddress2Id(address);
}
return IdMapper.UNASSIGNED_ID;
}
/**
* get the LCA of two ids, not ignoring the case that one may be the lca of the other
*
* @return LCA of id1 and id2
*/
@Override
public int getLCA(int id1, int id2) {
if (id1 == 0)
return id2;
else if (id2 == 0)
return id1;
else
return fullTree.getAddress2Id(LCAAddressing.getCommonPrefix(new String[]{fullTree.getAddress(id1), fullTree.getAddress(id2)}, 2, ignoreAncestralTaxa));
}
/**
* moves reads to higher taxa if the percent identity that they have is not high enough for the given taxonomic rank
*
* @return original or modified taxId
*/
public static int adjustByPercentIdentity(int taxId, BitSet activeMatches, IReadBlock readBlock, ClassificationFullTree tree, Name2IdMap name2IdMap) {
float bestPercentIdentity = 0;
for (int i = activeMatches.nextSetBit(0); i != -1; i = activeMatches.nextSetBit(i + 1)) {
final IMatchBlock matchBlock = readBlock.getMatchBlock(i);
if (matchBlock.getPercentIdentity() > bestPercentIdentity)
bestPercentIdentity = matchBlock.getPercentIdentity();
}
if (bestPercentIdentity >= 99 || bestPercentIdentity == 0)
return taxId;
boolean changed;
do {
changed = false;
boolean ok = true;
int rank = name2IdMap.getRank(taxId);
switch (rank) {
case 100: // species
case 101: // subspecies
if (bestPercentIdentity < 99)
ok = false;
break;
case 99: // species group
case 98: // genus
if (bestPercentIdentity < 97)
ok = false;
break;
case 5: // family
if (bestPercentIdentity < 95)
ok = false;
break;
case 4: // order
if (bestPercentIdentity < 90)
ok = false;
break;
case 3: // class
if (bestPercentIdentity < 85)
ok = false;
break;
case 2: // phylum
if (bestPercentIdentity < 80)
ok = false;
break;
default:
case 0: // no rank
ok = false;
}
if (!ok) // must go up tree:
{
Node v = tree.getANode(taxId);
if (v != null && v.getInDegree() > 0) {
Node w = v.getFirstInEdge().getSource();
taxId = (Integer) w.getInfo();
changed = true;
}
}
} while (changed);
return taxId;
}
/**
* given a set of addresses, returns the longest prefix that equals or exceeds the given weight threshold
*
* @return prefix
*/
private String getPrefixCoveringWeight(int weightToCover, String[] addresses, int length) {
activeSet.clear();
ch2weight.clear();
for (int i = 0; i < length; i++) {
activeSet.set(i);
}
final StringBuilder buf = new StringBuilder();
for (int pos = 0; ; pos++) {
for (int i = activeSet.nextSetBit(0); i != -1; i = activeSet.nextSetBit(i + 1)) {
if (pos == addresses[i].length()) {
activeSet.set(i, false); // run out of symbols
// weightToCover -= 1; // this node lies on route to best node, so it is covered and its weight can be removed from weightToCover
} else {
char ch = addresses[i].charAt(pos);
ch2weight.merge(ch, 1, Integer::sum);
}
}
if (activeSet.cardinality() == 0)
break;
// determine the heaviest character
Character bestCh = null;
int bestCount = 0;
for (Character ch : ch2weight.keySet()) {
Integer weight = ch2weight.get(ch);
if (weight != null && weight > bestCount) {
bestCh = ch;
bestCount = weight;
}
}
if (bestCount >= weightToCover && bestCh != null)
buf.append(bestCh);
else
break;
for (int i = activeSet.nextSetBit(0); i != -1; i = activeSet.nextSetBit(i + 1)) {
if (addresses[i].charAt(pos) != bestCh) // no length problem here, if address too short then it will not be active
activeSet.set(i, false); // not on best path, remove from active nodes
}
if (activeSet.cardinality() == 0)
break;
ch2weight.clear();
}
String result = buf.toString();
if (result.length() > 0) {
return result;
} else
return "";
}
}
| 12,698 | Java | .java | husonlab/megan-ce | 62 | 21 | 18 | 2016-05-09T10:55:38Z | 2024-02-22T23:23:42Z |
AssignmentUsingLCACreator.java | /FileExtraction/Java_unseen/husonlab_megan-ce/src/megan/algorithms/AssignmentUsingLCACreator.java | /*
* AssignmentUsingLCACreator.java Copyright (C) 2024 Daniel H. Huson
*
* (Some files contain contributions from other authors, who are then mentioned separately.)
*
* This program is free software: you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
package megan.algorithms;
/**
* create an LCA assignment algorithm for taxonomy
* Daniel Huson, 3.2016
*/
public class AssignmentUsingLCACreator implements IAssignmentAlgorithmCreator {
private final String cName;
private final boolean usePercentIdentityFilter;
private final float percentToCover;
/**
* constructor
*
*/
public AssignmentUsingLCACreator(String cName, boolean usePercentIdentityFilter, float percentToCover) {
this.cName = cName;
this.usePercentIdentityFilter = usePercentIdentityFilter;
this.percentToCover = percentToCover;
if (percentToCover == 100)
System.err.printf("Using 'Naive LCA' algorithm for binning: %s%n", cName);
else
System.err.printf("Using 'Naive LCA' algorithm (%.1f %%) for binning: %s%n", percentToCover, cName);
}
/**
* creates an assignment algorithm
*
* @return assignment algorithm
*/
@Override
public IAssignmentAlgorithm createAssignmentAlgorithm() {
return new AssignmentUsingLCA(cName, usePercentIdentityFilter, percentToCover);
}
}
| 1,950 | Java | .java | husonlab/megan-ce | 62 | 21 | 18 | 2016-05-09T10:55:38Z | 2024-02-22T23:23:42Z |
IntervalList.java | /FileExtraction/Java_unseen/husonlab_megan-ce/src/megan/algorithms/IntervalList.java | /*
* IntervalList.java Copyright (C) 2024 Daniel H. Huson
*
* (Some files contain contributions from other authors, who are then mentioned separately.)
*
* This program is free software: you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
package megan.algorithms;
import java.util.ArrayList;
import java.util.Collection;
import java.util.Iterator;
/**
* list of intervals
* Daniel Huson, 2017
*/
class IntervalList {
private final ArrayList<IntPair> list = new ArrayList<>();
private int covered = 0;
private boolean isSorted = false;
public IntervalList() {
}
private void updateSort() {
covered = -1;
// sort all the intervals:
list.sort((p, q) -> {
if (p.getA() < q.getA())
return -1;
else if (p.getA() > q.getA())
return 1;
else return Integer.compare(p.getB(), q.getB());
});
// make the intervals disjoint:
final ArrayList<IntPair> orig = new ArrayList<>(list);
list.clear();
IntPair prev = null;
for (IntPair pair : orig) {
if (prev == null)
prev = pair;
else if (pair.getA() > prev.getB()) {
list.add(prev);
prev = new IntPair(pair.getA(), pair.getB());
} else {
prev.setB(Math.max(prev.getB(), pair.getB()));
}
}
if (prev != null)
list.add(prev);
isSorted = true;
}
private void updateCover() {
// recompute the amount covered:
covered = 0;
int lastStart = -1;
int lastFinish = -1;
for (IntPair pair : list) {
if (lastStart == -1) {
lastStart = pair.getA();
lastFinish = pair.getB();
} else {
if (pair.getA() < lastFinish)
lastFinish = pair.getB();
else {
covered += (lastFinish - lastStart + 1);
lastStart = pair.getA();
lastFinish = pair.getB();
}
}
}
if (lastStart <= lastFinish)
covered += (lastFinish - lastStart + 1);
}
public int getCovered() {
if (covered == -1) {
if (!isSorted) {
updateSort();
}
updateCover();
}
return covered;
}
public void add(int a, int b) {
add(new IntPair(a, b));
}
private void add(IntPair pair) {
list.add(pair);
isSorted = false;
covered = -1;
}
public void addAll(Collection<IntPair> pairs) {
list.addAll(pairs);
isSorted = false;
covered = -1;
}
public void setIsSorted(boolean value) {
isSorted = value;
if (!isSorted)
covered = -1;
}
public Iterator<IntPair> iterator() {
return list.iterator();
}
public Collection<IntPair> getAll() {
return list;
}
public int size() {
return list.size();
}
public int computeMin() {
int min = Integer.MAX_VALUE;
for (IntPair pair : list) {
min = Math.min(min, pair.a);
}
return min;
}
public int computeMax() {
int max = Integer.MIN_VALUE;
for (IntPair pair : list) {
max = Math.max(max, pair.b);
}
return max;
}
private static class IntPair {
private final int a;
private int b;
IntPair(int a, int b) {
this.a = Math.min(a, b);
this.b = Math.max(a, b);
}
void setB(int b) {
this.b = b;
}
final int getA() {
return a;
}
final int getB() {
return b;
}
}
} | 4,425 | Java | .java | husonlab/megan-ce | 62 | 21 | 18 | 2016-05-09T10:55:38Z | 2024-02-22T23:23:42Z |
IMultiAssignmentAlgorithm.java | /FileExtraction/Java_unseen/husonlab_megan-ce/src/megan/algorithms/IMultiAssignmentAlgorithm.java | /*
* IMultiAssignmentAlgorithm.java Copyright (C) 2024 Daniel H. Huson
*
* (Some files contain contributions from other authors, who are then mentioned separately.)
*
* This program is free software: you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
package megan.algorithms;
import java.util.ArrayList;
/**
* Assignment algorithm
* Daniel Huson, 1.2016
*/
public interface IMultiAssignmentAlgorithm extends IAssignmentAlgorithm {
/**
* get all additional assignments
*
* @param i the classification number to use in class ids entry
* @param numberOfClassifications the total length of a class ids entry
* @param classIds all additional assignments returned here
* @return the total number of gene segments detected
*/
int getAdditionalClassIds(int i, int numberOfClassifications, ArrayList<int[]> classIds);
}
| 1,468 | Java | .java | husonlab/megan-ce | 62 | 21 | 18 | 2016-05-09T10:55:38Z | 2024-02-22T23:23:42Z |
NaiveProjectionProfile.java | /FileExtraction/Java_unseen/husonlab_megan-ce/src/megan/algorithms/NaiveProjectionProfile.java | /*
* NaiveProjectionProfile.java Copyright (C) 2024 Daniel H. Huson
*
* (Some files contain contributions from other authors, who are then mentioned separately.)
*
* This program is free software: you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
package megan.algorithms;
import jloda.graph.Edge;
import jloda.graph.Node;
import jloda.graph.NodeData;
import jloda.phylo.PhyloTree;
import megan.classification.ClassificationManager;
import megan.classification.IdMapper;
import megan.viewer.ClassificationViewer;
import megan.viewer.TaxonomicLevels;
import java.util.HashMap;
import java.util.HashSet;
import java.util.Map;
import java.util.Set;
/**
* Compute a taxonomic profile by naive projection
*/
public class NaiveProjectionProfile {
/**
* compute a taxonomic profile at a given taxonomic rank using naive projection
*
* @return mapping of each taxon to a count
* todo: needs fixing
*/
public static Map<Integer, float[]> compute(final ClassificationViewer viewer, final String rankName, final float minPercent) {
int rank = TaxonomicLevels.getId(rankName);
final Set<Integer> nodeIdsAtGivenRank = ClassificationManager.get(viewer.getClassName(), true).getFullTree().getNodeIdsAtGivenRank(rank, false);
final int numberOfSamples = viewer.getDocument().getNumberOfSamples();
final Map<Integer, float[]> profile = new HashMap<>();
final PhyloTree tree = viewer.getTree();
final Node root = tree.getRoot();
final float[] rootAssigned = new float[numberOfSamples];
final float[] totalInitiallyAssigned = new float[numberOfSamples];
for (int i = 0; i < numberOfSamples; i++) {
rootAssigned[i] = ((NodeData) root.getData()).getAssigned(i);
totalInitiallyAssigned[i]= ((NodeData) root.getData()).getSummarized(i);
}
// recursively process the tree:
computeRec(root, rootAssigned, profile, nodeIdsAtGivenRank, numberOfSamples);
// copy not assigned etc.:
for (Edge e = root.getFirstOutEdge(); e != null; e = root.getNextOutEdge(e)) {
Node w = e.getTarget();
if (((Integer) w.getInfo()) <= 0) {
final float[] assigned = new float[numberOfSamples];
for (int i = 0; i < numberOfSamples; i++)
assigned[i] = ((NodeData) w.getData()).getAssigned(i);
profile.put(((Integer) w.getInfo()), assigned);
}
}
float[] minSupport = new float[numberOfSamples];
for (int i = 0; i < numberOfSamples; i++) {
minSupport[i] = (totalInitiallyAssigned[i] / 100.0f) * minPercent;
}
float[] unassigned = profile.get(IdMapper.UNASSIGNED_ID);
if (unassigned == null) {
unassigned = new float[numberOfSamples];
for (int i = 0; i < numberOfSamples; i++)
unassigned[i] = 0;
profile.put(IdMapper.UNASSIGNED_ID, unassigned);
}
if(false)
{
Set<Integer> toDelete = new HashSet<>();
for (Integer taxonId : profile.keySet()) {
if (taxonId > 0) {
float[] array = profile.get(taxonId);
boolean hasEntry = false;
for (int i = 0; i < array.length; i++) {
if (array[i] != 0) {
if (array[i] < minSupport[i]) {
unassigned[i] += array[i];
array[i] = 0;
} else {
hasEntry = true;
break;
}
}
}
if (!hasEntry)
toDelete.add(taxonId);
}
}
profile.keySet().removeAll(toDelete);
}
final int[] totalProjected = new int[numberOfSamples];
final int[] lostCount = new int[numberOfSamples];
{
for (Integer taxId : profile.keySet()) {
if (taxId > 0) {
float[] counts = profile.get(taxId);
for (int i = 0; i < counts.length; i++) {
totalProjected[i] += counts[i];
}
}
}
for (int i = 0; i < numberOfSamples; i++) {
lostCount[i] = (int)(totalInitiallyAssigned[i] - totalProjected[i]);
System.err.println("Sample " + (i+1) + ":");
System.err.printf("Reads: %,10.0f%n", viewer.getDocument().getDataTable().getSampleSizes()[i]);
System.err.printf("Assigned: %,10d%n", (int)totalInitiallyAssigned[i]);
System.err.printf("Projected:%,10d%n", totalProjected[i]);
System.err.printf("Lost: %,10d%n", lostCount[i]);
}
}
for (int i = 0; i < numberOfSamples; i++)
unassigned[i] += lostCount[i];
System.err.print("Total projected:");
for(var value:totalProjected)
System.err.printf(" %,d",value);
System.err.println();
System.err.println("Total lost: ");
for(var value:lostCount)
System.err.printf(" %,d",value);
System.err.println();
return profile;
}
/**
* recursively compute profile
*
*/
private static void computeRec(Node v, float[] countFromAbove, Map<Integer, float[]> profile, Set<Integer> H, int numberOfSamples) {
final int taxId = (Integer) v.getInfo();
final NodeData vData = (NodeData) v.getData();
if (H.contains(taxId)) { // is a node at the chosen rank, save profile
float[] counts = new float[numberOfSamples];
for (int i = 0; i <numberOfSamples; i++) {
counts[i] = countFromAbove[i] + (vData.getSummarized(i) - vData.getAssigned(i)); // below=summarized-assigned
}
profile.put(taxId, counts);
} else {
// determine how many below:
boolean hasChild = false;
final int[] belowV = new int[numberOfSamples];
for (Edge e = v.getFirstOutEdge(); e != null; e = v.getNextOutEdge(e)) {
Node w = e.getTarget();
if (((Integer) w.getInfo()) > 0) {
final NodeData wData = (NodeData) w.getData();
for (int i = 0; i < numberOfSamples; i++) {
belowV[i] += wData.getSummarized(i);
}
hasChild = true;
}
}
if (!hasChild) { // has no child, these reads will be lost
} else { // there are some children push down counts:
for (Edge e = v.getFirstOutEdge(); e != null; e = v.getNextOutEdge(e)) {
final Node w = e.getTarget();
if (((Integer) w.getInfo()) > 0) {
final NodeData wData = (NodeData) w.getData();
float[] count = new float[numberOfSamples];
for (int i = 0; i < numberOfSamples; i++) {
if (belowV[i] > 0) {
final double fraction = (double) wData.getSummarized(i) / (double) belowV[i];
count[i] = wData.getAssigned(i) + (int) (countFromAbove[i] * fraction);
}
}
computeRec(w, count, profile, H, numberOfSamples);
}
}
}
}
}
}
| 8,249 | Java | .java | husonlab/megan-ce | 62 | 21 | 18 | 2016-05-09T10:55:38Z | 2024-02-22T23:23:42Z |
AssignmentUsingIntervalUnionLCACreator.java | /FileExtraction/Java_unseen/husonlab_megan-ce/src/megan/algorithms/AssignmentUsingIntervalUnionLCACreator.java | /*
* AssignmentUsingIntervalUnionLCACreator.java Copyright (C) 2024 Daniel H. Huson
*
* (Some files contain contributions from other authors, who are then mentioned separately.)
*
* This program is free software: you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
package megan.algorithms;
import jloda.swing.util.ProgramProperties;
import megan.core.Document;
/**
* create a coverage-base LCA assignment algorithm
* Daniel Huson, 4.2017
*/
public class AssignmentUsingIntervalUnionLCACreator implements IAssignmentAlgorithmCreator {
private final String cName;
private final Document document;
/**
* constructor
*
*/
public AssignmentUsingIntervalUnionLCACreator(String cName, Document document) {
this.cName = cName;
this.document = document;
if (ProgramProperties.get("use-segment-lca", false))
System.err.println("Using 'segment-LCA' algorithm for binning: " + cName);
else {
System.err.printf("Using 'Interval-Union-LCA' algorithm (%.1f %%) for binning: %s%n", document.getLcaCoveragePercent(), cName);
//System.err.println("(setprop CheckForChimeras=true; to turn on experimental chimeric read identification)");
}
}
/**
* creates an assignment algorithm
*
* @return assignment algorithm
*/
@Override
public IAssignmentAlgorithm createAssignmentAlgorithm() {
return new AssignmentUsingIntervalUnionLCA(cName, document);
}
}
| 2,060 | Java | .java | husonlab/megan-ce | 62 | 21 | 18 | 2016-05-09T10:55:38Z | 2024-02-22T23:23:42Z |
IntervalTree4Matches.java | /FileExtraction/Java_unseen/husonlab_megan-ce/src/megan/algorithms/IntervalTree4Matches.java | /*
* IntervalTree4Matches.java Copyright (C) 2024 Daniel H. Huson
*
* (Some files contain contributions from other authors, who are then mentioned separately.)
*
* This program is free software: you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
package megan.algorithms;
import javafx.concurrent.Task;
import jloda.swing.util.ProgramProperties;
import jloda.util.CanceledException;
import jloda.util.Pair;
import jloda.util.interval.Interval;
import jloda.util.interval.IntervalTree;
import jloda.util.progress.ProgressListener;
import megan.data.IMatchBlock;
import megan.data.IReadBlock;
import java.util.ArrayList;
import java.util.HashSet;
import java.util.Set;
/**
* computes interval tree of all matches to keep for a read block
* Created by huson on 3/29/17.
*/
public class IntervalTree4Matches {
private final static float defaultMinPercentCoverToDominate = 50f;
/**
* selects the matches to keep for a given read and puts them into an interval tree
*
* @param task can be null
* @return interval tree
*/
public static IntervalTree<IMatchBlock> computeIntervalTree(IReadBlock readBlock, Task task, ProgressListener progress) throws CanceledException {
final IntervalTree<IMatchBlock> intervalTree = new IntervalTree<>();
if (progress != null) {
progress.setMaximum(readBlock.getNumberOfAvailableMatchBlocks());
progress.setProgress(0);
}
for (int m = 0; m < readBlock.getNumberOfAvailableMatchBlocks(); m++) {
final IMatchBlock matchBlock = readBlock.getMatchBlock(m);
intervalTree.add(new Interval<>(matchBlock.getAlignedQueryStart(), matchBlock.getAlignedQueryEnd(), matchBlock));
if (task != null && task.isCancelled())
break;
if (progress != null)
progress.incrementProgress();
}
return intervalTree;
}
/**
* extracts the set of dominating matches. A match is considered dominated, if more than 50% (default value) is covered by a match that has a better bit score, or the same bit score, but shorter length
*
* @param intervals input
* @param cNames dominator must have value of each of these for which the dominated does
* @param classificationToReport if this is set to some classification, check only this for domination
* @return dominating intervals
*/
public static IntervalTree<IMatchBlock> extractDominatingIntervals(IntervalTree<IMatchBlock> intervals, String[] cNames, String classificationToReport) {
final double dominationProportion = ProgramProperties.get("MinPercentCoverToDominate", defaultMinPercentCoverToDominate) / 100;
if (!classificationToReport.equalsIgnoreCase("all")) {
for (String cName : cNames) {
if (cName.equalsIgnoreCase(classificationToReport)) {
cNames = new String[]{cName}; // only need to dominate on this classification
break;
}
}
}
final IntervalTree<IMatchBlock> allMatches = new IntervalTree<>(); // initially all forward matches, at the end, all resulting matches
final IntervalTree<IMatchBlock> reverseMatches = new IntervalTree<>(); // all reverse matches
for (IMatchBlock matchBlock : intervals.values()) {
if (matchBlock.getAlignedQueryStart() <= matchBlock.getAlignedQueryEnd()) {
allMatches.add(matchBlock.getAlignedQueryStart(), matchBlock.getAlignedQueryEnd(), matchBlock);
} else
reverseMatches.add(matchBlock.getAlignedQueryStart(), matchBlock.getAlignedQueryEnd(), matchBlock);
}
// these will be reused in the loop:
final ArrayList<Pair<Interval<IMatchBlock>, Interval<IMatchBlock>>> pairs = new ArrayList<>(); // dominator,dominated pairs
final Set<Interval<IMatchBlock>> dominated = new HashSet<>(); // dominated
// remove all dominated matches
for (int i = 0; i < 2; i++) {
final IntervalTree<IMatchBlock> matches = (i == 0 ? allMatches : reverseMatches);
while (matches.size() > 1) {
// determine list of pairs of (dominator, dominated)
pairs.clear();
dominated.clear();
for (final Interval<IMatchBlock> interval : matches) {
final IMatchBlock match = interval.getData();
for (final Interval<IMatchBlock> otherInterval : matches.getIntervals(interval)) {
final IMatchBlock other = otherInterval.getData();
if (otherInterval.overlap(interval) > dominationProportion * interval.length() &&
(other.getBitScore() > match.getBitScore() || other.getBitScore() == match.getBitScore() &&
(other.getLength() < match.getLength() || (other.getLength() == match.getLength() && other.getUId() < match.getUId())))) {
boolean ok = true; // check that other interval has all annotations that this one has, otherwise it doesn't really dominate
for (String cName : cNames) {
if (match.getId(cName) > 0 && other.getId(cName) <= 0) {
ok = false;
break;
}
}
if (ok) {
pairs.add(new Pair<>(otherInterval, interval));
dominated.add(interval);
break; // found an other that dominates match...
}
}
}
}
// remove any match that is dominated by an undominated match:
final Set<Interval<IMatchBlock>> toRemove = new HashSet<>();
for (Pair<Interval<IMatchBlock>, Interval<IMatchBlock>> pair : pairs) {
if (!dominated.contains(pair.getFirst())) {
toRemove.add(pair.getSecond()); // first is not dominated and it dominates the second, so remove second
}
}
if (toRemove.size() > 0) {
final ArrayList<Interval<IMatchBlock>> toKeep = new ArrayList<>(matches.size());
for (Interval<IMatchBlock> interval : matches.getAllIntervals(false)) { // get unsorted intervals
if (!toRemove.contains(interval))
toKeep.add(interval);
}
matches.setAll(toKeep);
} else
break; // no change
}
}
allMatches.addAll(reverseMatches.intervals());
return allMatches;
}
/**
* extracts the set of strongly dominating matches. A match is considered strongly dominated, if 90% (default value) is covered by a match that has a bit score that is 10% better
*
* @param intervals input
* @param cNames dominator must have value of each of these for which the dominated does
* @param classificationToReport if this is set to some classification, check only this for domination
* @return dominating intervals
*/
public static IntervalTree<IMatchBlock> extractStronglyDominatingIntervals(IntervalTree<IMatchBlock> intervals, String[] cNames, String classificationToReport) {
final float minPercentCoverToDominate = (float) ProgramProperties.get("MinPercentCoverToStronglyDominate", 90f);
final float minProportionCoverToDominate = minPercentCoverToDominate / 100.0f;
final float topPercentScoreToDominate = (float) ProgramProperties.get("TopPercentScoreToStronglyDominate", 10f);
final float scoreFactor = 1f - (topPercentScoreToDominate / 100.0f);
if (!classificationToReport.equalsIgnoreCase("all")) {
for (String cName : cNames) {
if (cName.equalsIgnoreCase(classificationToReport)) {
cNames = new String[]{cName}; // only need to dominate on this classification
break;
}
}
}
final IntervalTree<IMatchBlock> allMatches = new IntervalTree<>(); // initially all foward matches, at the end, all resulting matches
final IntervalTree<IMatchBlock> reverseMatches = new IntervalTree<>(); // all reverse matches
for (IMatchBlock matchBlock : intervals.values()) {
if (matchBlock.getAlignedQueryStart() <= matchBlock.getAlignedQueryEnd()) {
allMatches.add(matchBlock.getAlignedQueryStart(), matchBlock.getAlignedQueryEnd(), matchBlock);
} else
reverseMatches.add(matchBlock.getAlignedQueryStart(), matchBlock.getAlignedQueryEnd(), matchBlock);
}
// these will be reused in the loop:
final ArrayList<Pair<Interval<IMatchBlock>, Interval<IMatchBlock>>> pairs = new ArrayList<>(); // dominator,dominated pairs
final Set<Interval<IMatchBlock>> dominated = new HashSet<>(); // dominated
// remove all dominated matches
for (int i = 0; i < 2; i++) {
final IntervalTree<IMatchBlock> matches = (i == 0 ? allMatches : reverseMatches);
while (matches.size() > 1) {
// determine list of pairs of (dominator, dominated)
pairs.clear();
dominated.clear();
for (final Interval<IMatchBlock> interval : matches) {
final IMatchBlock match = interval.getData();
for (final Interval<IMatchBlock> otherInterval : matches.getIntervals(interval)) {
final IMatchBlock other = otherInterval.getData();
if (otherInterval.overlap(interval) > minProportionCoverToDominate * interval.length() && scoreFactor * other.getBitScore() > match.getBitScore()) {
boolean ok = true; // check that other interval has all annotations that this one has, otherwise it doesn't really dominate
for (String cName : cNames) {
if (match.getId(cName) > 0 && other.getId(cName) <= 0) {
ok = false;
break;
}
}
if (ok) {
pairs.add(new Pair<>(otherInterval, interval));
dominated.add(interval);
break; // found an other that dominates match...
}
}
}
}
// remove any match that is dominated by an undominated match:
final Set<Interval<IMatchBlock>> toRemove = new HashSet<>();
for (Pair<Interval<IMatchBlock>, Interval<IMatchBlock>> pair : pairs) {
if (!dominated.contains(pair.getFirst())) {
toRemove.add(pair.getSecond()); // first is not dominated and it dominates the second, so remove second
}
}
if (toRemove.size() > 0) {
final ArrayList<Interval<IMatchBlock>> toKeep = new ArrayList<>(matches.size());
for (Interval<IMatchBlock> interval : matches.getAllIntervals(false)) { // get unsorted intervals
if (!toRemove.contains(interval))
toKeep.add(interval);
}
matches.setAll(toKeep);
} else
break; // no change
}
}
allMatches.addAll(reverseMatches.intervals());
return allMatches;
}
}
| 12,587 | Java | .java | husonlab/megan-ce | 62 | 21 | 18 | 2016-05-09T10:55:38Z | 2024-02-22T23:23:42Z |
Taxon2SpeciesMapping.java | /FileExtraction/Java_unseen/husonlab_megan-ce/src/megan/algorithms/Taxon2SpeciesMapping.java | /*
* Taxon2SpeciesMapping.java Copyright (C) 2024 Daniel H. Huson
*
* (Some files contain contributions from other authors, who are then mentioned separately.)
*
* This program is free software: you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
package megan.algorithms;
import jloda.graph.Edge;
import jloda.graph.Node;
import jloda.util.CanceledException;
import jloda.util.progress.ProgressListener;
import jloda.util.progress.ProgressPercentage;
import megan.classification.ClassificationManager;
import megan.classification.data.ClassificationFullTree;
import megan.classification.data.IntIntMap;
import megan.classification.data.Name2IdMap;
import megan.viewer.TaxonomicLevels;
/**
* computes and maintains a taxon to species mapping
* Daniel Huson, May 2017
*/
public class Taxon2SpeciesMapping {
private final IntIntMap taxId2SpeciesId;
public Taxon2SpeciesMapping(final String cName, final ProgressListener progress) throws CanceledException {
ClassificationFullTree fullTree = ClassificationManager.get(cName, true).getFullTree();
final Name2IdMap name2IdMap = ClassificationManager.get(cName, true).getName2IdMap();
taxId2SpeciesId = new IntIntMap(fullTree.getNumberOfNodes(), 0.999f);
progress.setSubtask("Computing taxon-to-species map for '" + cName + "'");
progress.setMaximum(fullTree.getNumberOfNodes());
progress.setProgress(0);
computeTax2SpeciesMapRec(fullTree.getRoot(), 0, taxId2SpeciesId, name2IdMap, progress);
if (progress instanceof ProgressPercentage)
progress.reportTaskCompleted();
}
/**
* recursively compute the taxon-id to species-id map
*
*/
private void computeTax2SpeciesMapRec(final Node v, int speciesId, final IntIntMap taxId2SpeciesId, Name2IdMap name2IdMap, final ProgressListener progress) throws CanceledException {
final int taxId = (Integer) v.getInfo();
if (speciesId == 0) {
// todo: prepare for taxonomies that use other ids
if (name2IdMap.getRank(taxId) == TaxonomicLevels.getSpeciesId()) {
speciesId = taxId;
taxId2SpeciesId.put(taxId, speciesId);
}
} else
taxId2SpeciesId.put(taxId, speciesId);
for (Edge e = v.getFirstOutEdge(); e != null; e = v.getNextOutEdge(e))
computeTax2SpeciesMapRec(e.getTarget(), speciesId, taxId2SpeciesId, name2IdMap, progress);
progress.incrementProgress();
}
/**
* gets the species, if defined, or 0
*
* @return species id or 0
*/
public int getSpecies(int taxonId) {
return taxId2SpeciesId.get(taxonId);
}
/**
* gets the species id, if defined, or returns taxonId
*
* @return species id or taxonId
*/
public int getSpeciesOrReturnTaxonId(int taxonId) {
int id = taxId2SpeciesId.get(taxonId);
return id > 0 ? id : taxonId;
}
}
| 3,524 | Java | .java | husonlab/megan-ce | 62 | 21 | 18 | 2016-05-09T10:55:38Z | 2024-02-22T23:23:42Z |
MinSupportAlgorithm.java | /FileExtraction/Java_unseen/husonlab_megan-ce/src/megan/algorithms/MinSupportAlgorithm.java | /*
* MinSupportAlgorithm.java Copyright (C) 2024 Daniel H. Huson
*
* (Some files contain contributions from other authors, who are then mentioned separately.)
*
* This program is free software: you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
package megan.algorithms;
import jloda.graph.Edge;
import jloda.graph.Node;
import jloda.phylo.PhyloTree;
import jloda.util.Basic;
import jloda.util.CanceledException;
import jloda.util.progress.ProgressListener;
import megan.classification.IdMapper;
import megan.viewer.TaxonomyData;
import java.util.HashMap;
import java.util.HashSet;
import java.util.Map;
import java.util.Set;
/**
* apply the minsupport algorithm
* Daniel Huson, 7.2014
*/
public class MinSupportAlgorithm {
private final Map<Integer, Integer> taxId2count;
private final int minSupport;
private final ProgressListener progressListener;
private final PhyloTree tree;
/**
* applies the min-support algorithm to the given taxonomic analysis
*
*/
public static void apply(Map<Integer, Integer> tax2count, int minSupport, final ProgressListener progressListener) {
MinSupportAlgorithm algorithm = new MinSupportAlgorithm(tax2count, minSupport, progressListener);
try {
Map<Integer, Integer> lowSupportTaxa2HighSupportTaxa = algorithm.apply();
for (Integer lowTaxon : lowSupportTaxa2HighSupportTaxa.keySet()) {
Integer highTaxon = lowSupportTaxa2HighSupportTaxa.get(lowTaxon);
Integer count = tax2count.get(highTaxon);
if (count == null)
tax2count.put(highTaxon, tax2count.get(lowTaxon));
else
tax2count.put(highTaxon, count + tax2count.get(lowTaxon));
}
tax2count.keySet().removeAll(lowSupportTaxa2HighSupportTaxa.keySet());
} catch (CanceledException e) {
Basic.caught(e);
}
}
/**
* constructor
*
*/
public MinSupportAlgorithm(Map<Integer, Integer> taxId2count, int minSupport, final ProgressListener progressListener) {
this.taxId2count = taxId2count;
this.minSupport = minSupport;
this.progressListener = progressListener;
tree = TaxonomyData.getTree();
}
/**
* applies the min support filter to taxon classification
*
* @return mapping of old taxon ids to new taxon ids
*/
private Map<Integer, Integer> apply() throws CanceledException {
Map<Integer, Integer> orphan2AncestorMapping = new HashMap<>();
progressListener.setMaximum(tree.getNumberOfNodes());
progressListener.setProgress(0);
Set<Integer> orphans = new HashSet<>();
computeOrphan2AncestorMappingRec(tree.getRoot(), orphan2AncestorMapping, orphans);
// Any orphans that popped out of the top of the taxonomy are mapped to unassigned
for (Integer id : orphans) {
orphan2AncestorMapping.put(id, IdMapper.UNASSIGNED_ID);
}
orphans.clear();
return orphan2AncestorMapping;
}
/**
* recursively move all reads that land on taxa with too little support to higher level nodes
*
* @param orphans nodes that have too few reads
* @return reads on or below this node
*/
private int computeOrphan2AncestorMappingRec(Node v, Map<Integer, Integer> orphan2AncestorMapping, Set<Integer> orphans) throws CanceledException {
progressListener.incrementProgress();
int taxId = (Integer) v.getInfo();
if (taxId < 0)
return 0; // ignore nohits and unassigned
int below = 0;
Set<Integer> orphansBelow = new HashSet<>();
for (Edge e = v.getFirstOutEdge(); e != null; e = v.getNextOutEdge(e)) {
Node w = e.getTarget();
below += computeOrphan2AncestorMappingRec(w, orphan2AncestorMapping, orphansBelow);
}
Integer count = taxId2count.get(taxId);
if (count == null)
count = 0;
if (below + count >= minSupport) // this is a strong node, map all orphans to here
{
for (Integer id : orphansBelow) {
orphan2AncestorMapping.put(id, taxId);
}
} else // this node is not strong enough, pass all orphans up
{
if (count > 0) // this node has reads assigned to it, pass it up as an orpha
{
orphansBelow.add(taxId);
}
orphans.addAll(orphansBelow);
}
return below + count;
}
}
| 5,141 | Java | .java | husonlab/megan-ce | 62 | 21 | 18 | 2016-05-09T10:55:38Z | 2024-02-22T23:23:42Z |
AssignmentUsingMultiGeneBestHit.java | /FileExtraction/Java_unseen/husonlab_megan-ce/src/megan/algorithms/AssignmentUsingMultiGeneBestHit.java | /*
* AssignmentUsingMultiGeneBestHit.java Copyright (C) 2024 Daniel H. Huson
*
* (Some files contain contributions from other authors, who are then mentioned separately.)
*
* This program is free software: you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
package megan.algorithms;
import jloda.util.interval.Interval;
import jloda.util.interval.IntervalTree;
import megan.classification.IdMapper;
import megan.data.IMatchBlock;
import megan.data.IReadBlock;
import java.util.ArrayList;
import java.util.BitSet;
import java.util.HashSet;
import java.util.Set;
/**
* assignment using multi-gene best hit
* Daniel Huson, 2017
*/
public class AssignmentUsingMultiGeneBestHit implements IMultiAssignmentAlgorithm {
private final IntervalTree<IMatchBlock> allMatches;
private final IntervalTree<IMatchBlock> reverseMatches;
private final Set<Integer> additionalClassIds = new HashSet<>();
private final String cName;
private int minOverlap = 18;
/**
* constructor
*
*/
public AssignmentUsingMultiGeneBestHit(String cName) {
this.cName = cName;
allMatches = new IntervalTree<>();
reverseMatches = new IntervalTree<>();
}
/**
* computes the id for a read from its matches
* matches
*
* @return id or 0
*/
public int computeId(BitSet activeMatches, IReadBlock readBlock) {
additionalClassIds.clear();
if (readBlock.getNumberOfMatches() == 0)
return IdMapper.NOHITS_ID;
if (activeMatches.cardinality() == 0)
return IdMapper.UNASSIGNED_ID;
int result = IdMapper.UNASSIGNED_ID;
final IntervalTree<IMatchBlock> acceptedMatches = computeAcceptedMatches(activeMatches, readBlock);
for (Interval<IMatchBlock> interval : acceptedMatches) {
final int id = interval.getData().getId(cName);
if (result == IdMapper.UNASSIGNED_ID && id > 0)
result = id;
else
additionalClassIds.add(id);
}
return result;
}
/**
* get additional classes found for this read
*
* @param numberOfClassifications used to set length of arrays returned in list
* @param list of assignment arrays for use in DataProcessor
* @return total number of classes
*/
@Override
public int getAdditionalClassIds(int index, int numberOfClassifications, ArrayList<int[]> list) {
for (int classId : additionalClassIds) {
final int[] array = new int[numberOfClassifications];
array[index] = classId;
list.add(array);
}
return additionalClassIds.size();
}
/**
* get the LCA of two ids
*
* @return LCA of id1 and id2
*/
@Override
public int getLCA(int id1, int id2) {
throw new RuntimeException("getLCA() called for assignment using best hit");
}
/**
* computes set of matches accepted for determining the class ids for this read
*
* @return number of ids
*/
private IntervalTree<IMatchBlock> computeAcceptedMatches(BitSet activeMatches, IReadBlock readBlock) {
if (activeMatches == null) {
activeMatches = new BitSet();
for (var i = 0; i < readBlock.getNumberOfAvailableMatchBlocks(); i++) {
activeMatches.set(i);
}
}
allMatches.clear();
reverseMatches.clear();
for (var i = activeMatches.nextSetBit(0); i != -1; i = activeMatches.nextSetBit(i + 1)) {
var matchBlock = readBlock.getMatchBlock(i);
if (matchBlock.getId(cName) > 0) {
if (matchBlock.getAlignedQueryStart() <= matchBlock.getAlignedQueryEnd()) {
allMatches.add(matchBlock.getAlignedQueryStart(), matchBlock.getAlignedQueryEnd(), matchBlock);
} else
reverseMatches.add(matchBlock.getAlignedQueryStart(), matchBlock.getAlignedQueryEnd(), matchBlock);
}
}
// remove all matches covered by stronger ones
for (var i = 0; i < 2; i++) {
final IntervalTree<IMatchBlock> matches = (i == 0 ? allMatches : reverseMatches);
var toDeleteLists = new ArrayList<ArrayList<Interval<IMatchBlock>>>(10000);
var toDelete = new ArrayList<Interval<IMatchBlock>>(10000000);
toDeleteLists.add(toDelete);
for (var interval : matches) {
var match = interval.getData();
for (var otherInterval : matches.getIntervals(interval)) {
var other = otherInterval.getData();
if (otherInterval.overlap(interval) > 0.5 * interval.length() &&
(other.getBitScore() > match.getBitScore() || other.getBitScore() == match.getBitScore() && other.getUId() < match.getUId())) {
if (toDelete.size() > 10000000) {
toDelete = new ArrayList<>(10000000);
toDeleteLists.add(toDelete);
}
toDelete.add(interval);
}
}
}
for (var list : toDeleteLists) {
if (!list.isEmpty()) {
matches.removeAll(list);
}
}
toDeleteLists.clear();
}
allMatches.addAll(reverseMatches.intervals());
return allMatches;
}
public int getMinOverlap() {
return minOverlap;
}
public void setMinOverlap(int minOverlap) {
this.minOverlap = minOverlap;
}
}
| 6,224 | Java | .java | husonlab/megan-ce | 62 | 21 | 18 | 2016-05-09T10:55:38Z | 2024-02-22T23:23:42Z |
NaiveMatchBasedProfile.java | /FileExtraction/Java_unseen/husonlab_megan-ce/src/megan/algorithms/NaiveMatchBasedProfile.java | /*
* NaiveMatchBasedProfile.java Copyright (C) 2024 Daniel H. Huson
*
* (Some files contain contributions from other authors, who are then mentioned separately.)
*
* This program is free software: you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
package megan.algorithms;
import jloda.graph.Node;
import jloda.util.CanceledException;
import jloda.util.StringUtils;
import jloda.util.progress.ProgressListener;
import megan.classification.Classification;
import megan.classification.IdMapper;
import megan.core.Document;
import megan.data.IReadBlock;
import megan.data.IReadBlockIterator;
import megan.viewer.ClassificationViewer;
import megan.viewer.TaxonomyData;
import java.io.IOException;
import java.util.*;
/**
* Compute a taxonomic profile by naive match-based analysis
* Daniel Huson, 2015
*/
class NaiveMatchBasedProfile {
/**
* compute a taxonomic profile at a given taxonomic rank using naive projection
*
* @return mapping of each taxon to a count
* todo: needs fixing
*/
public static Map<Integer, float[]> compute(final ClassificationViewer viewer, final int level, final float minPercent) throws IOException, CanceledException {
final Map<Integer, Float> rawProfile = new HashMap<>();
// process all reads:
final Document doc = viewer.getDocument();
final BitSet activeMatchesForTaxa = new BitSet();
final BitSet activeTaxa = new BitSet();
int totalAssigned = 0;
try (IReadBlockIterator it = doc.getConnector().getAllReadsIterator(0, 10, true, true)) {
final ProgressListener progressListener = doc.getProgressListener();
progressListener.setTasks("Computing profile", "Processing all reads and matches");
progressListener.setMaximum(it.getMaximumProgress());
progressListener.setProgress(0);
while (it.hasNext()) {
final IReadBlock readBlock = it.next();
if (readBlock.getComplexity() < doc.getMinComplexity()) {
Float rawValue = rawProfile.get(IdMapper.LOW_COMPLEXITY_ID);
rawProfile.put(IdMapper.LOW_COMPLEXITY_ID, rawValue == null ? 1f : rawValue + 1f);
} else if (readBlock.getNumberOfMatches() == 0) {
Float rawValue = rawProfile.get(IdMapper.NOHITS_ID);
rawProfile.put(IdMapper.NOHITS_ID, rawValue == null ? 1f : rawValue + 1f);
} else {
ActiveMatches.compute(doc.getMinScore(), doc.getTopPercent(), doc.getMaxExpected(), doc.getMinPercentIdentity(), readBlock, Classification.Taxonomy, activeMatchesForTaxa);
activeTaxa.clear();
for (int i = activeMatchesForTaxa.nextSetBit(0); i != -1; i = activeMatchesForTaxa.nextSetBit(i + 1)) {
Integer taxonId = readBlock.getMatchBlock(i).getTaxonId();
taxonId = getAncestorAtRank(level, taxonId);
if (taxonId > 0) {
activeTaxa.set(taxonId);
}
}
if (activeTaxa.cardinality() == 0) { // none active
Float rawValue = rawProfile.get(IdMapper.UNASSIGNED_ID);
rawProfile.put(IdMapper.UNASSIGNED_ID, rawValue == null ? 1f : rawValue + 1f);
} else { // have some active matches:
for (int taxonId = activeTaxa.nextSetBit(0); taxonId != -1; taxonId = activeTaxa.nextSetBit(taxonId + 1)) {
rawProfile.merge(taxonId, 1f / activeTaxa.cardinality(), Float::sum);
}
totalAssigned++;
}
}
progressListener.setProgress(it.getProgress());
}
}
int minSupport = (int) (totalAssigned / 100.0 * minPercent);
int totalReads = 0;
final Map<Integer, float[]> profile = new HashMap<>();
for (Integer id : rawProfile.keySet()) {
Float rawValue = rawProfile.get(id);
if (rawValue != null) {
if (rawValue >= minSupport) {
int count = Math.round(rawValue);
profile.put(id, new float[]{count});
totalReads += count;
}
}
}
if (totalReads < doc.getNumberOfReads()) {
float missing = doc.getNumberOfReads() - totalReads;
Float rawValue = rawProfile.get(IdMapper.UNASSIGNED_ID);
rawProfile.put(IdMapper.UNASSIGNED_ID, rawValue == null ? missing : rawValue + missing);
}
float[] total = new float[1];
SortedMap<String, float[]> name2counts = new TreeMap<>();
for (int id : profile.keySet()) {
String name = TaxonomyData.getName2IdMap().get(id);
name2counts.put(name, profile.get(id));
}
for (String name : name2counts.keySet()) {
final float[] counts = name2counts.get(name);
System.err.println(name + "\t" + StringUtils.toString(counts, 0, counts.length, ", ", true));
for (int i = 0; i < 1; i++)
total[i] += counts[i];
}
System.err.println("Total assigned: " + StringUtils.toString(total, ", "));
return profile;
}
/**
* gets the ancestor taxon id at given rank or 0
*
* @return ancestor or 0
*/
private static Integer getAncestorAtRank(int targetLevel, Integer taxonId) {
if (taxonId == 0)
return 0;
Node v = TaxonomyData.getTree().getANode(taxonId);
while (v != null) {
int vLevel = TaxonomyData.getTaxonomicRank(taxonId);
if (vLevel == targetLevel)
return taxonId;
if (v.getInDegree() > 0) {
v = v.getFirstInEdge().getSource();
taxonId = (Integer) v.getInfo();
} else
break;
}
return 0;
}
}
| 6,617 | Java | .java | husonlab/megan-ce | 62 | 21 | 18 | 2016-05-09T10:55:38Z | 2024-02-22T23:23:42Z |
ActiveMatches.java | /FileExtraction/Java_unseen/husonlab_megan-ce/src/megan/algorithms/ActiveMatches.java | /*
* ActiveMatches.java Copyright (C) 2024 Daniel H. Huson
*
* (Some files contain contributions from other authors, who are then mentioned separately.)
*
* This program is free software: you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
package megan.algorithms;
import megan.data.IMatchBlock;
import megan.data.IReadBlock;
import megan.viewer.TaxonomyData;
import java.util.BitSet;
/**
* determines the set of matches that are active for a given read, that is, which pass
* all filter criteria
* Daniel Huson, 1.2009
*/
public class ActiveMatches {
/**
* get the set of matches active for the given read
*
*/
public static void compute(double minScore, double topPercent, double maxExpected, float minPercentIdentity, IReadBlock readBlock, String cName, BitSet activeMatchesForClassification) {
activeMatchesForClassification.clear();
// the set of matches that we will consider:
for (int i = 0; i < readBlock.getNumberOfAvailableMatchBlocks(); i++) {
final IMatchBlock matchBlock = readBlock.getMatchBlock(i);
if (!matchBlock.isIgnore() && !TaxonomyData.isTaxonDisabled(cName, matchBlock.getTaxonId()) && matchBlock.getBitScore() >= minScore && matchBlock.getExpected() <= maxExpected &&
(matchBlock.getPercentIdentity() == 0 || matchBlock.getPercentIdentity() >= minPercentIdentity)) {
if (cName == null || matchBlock.getId(cName) > 0)
activeMatchesForClassification.set(i);
}
}
// determine best score:
float bestScore = 0;
for (int i = activeMatchesForClassification.nextSetBit(0); i != -1; i = activeMatchesForClassification.nextSetBit(i + 1)) {
final IMatchBlock matchBlock = readBlock.getMatchBlock(i);
float score = matchBlock.getBitScore();
if (score > bestScore)
bestScore = score;
}
applyTopPercentFilter(topPercent, bestScore, minPercentIdentity, readBlock, activeMatchesForClassification);
}
/**
* applies the top percent filter to a set of active matches
*
* @param bestScore if 0, which compute this from data
* @param readBlock current read block
* @param activeMatches current set of active matches
*/
private static void applyTopPercentFilter(double topPercent, double bestScore, float minPercentIdentity, IReadBlock readBlock, BitSet activeMatches) {
if (topPercent > 0 && topPercent < 100) {
if (bestScore == 0) {
for (int i = activeMatches.nextSetBit(0); i != -1; i = activeMatches.nextSetBit(i + 1)) {
final IMatchBlock matchBlock = readBlock.getMatchBlock(i);
if (minPercentIdentity == 0 || matchBlock.getPercentIdentity() >= minPercentIdentity) {
bestScore = Math.max(bestScore, matchBlock.getBitScore());
}
}
}
// keep only hits within percentage of top one
final double threshold = (1 - topPercent / 100.0) * bestScore;
for (int i = activeMatches.nextSetBit(0); i != -1; i = activeMatches.nextSetBit(i + 1)) {
final IMatchBlock matchBlock = readBlock.getMatchBlock(i);
if (matchBlock.getBitScore() < threshold && (minPercentIdentity == 0 || matchBlock.getPercentIdentity() >= minPercentIdentity))
activeMatches.set(i, false);
}
}
}
}
| 4,089 | Java | .java | husonlab/megan-ce | 62 | 21 | 18 | 2016-05-09T10:55:38Z | 2024-02-22T23:23:42Z |
TaxonPathAssignment.java | /FileExtraction/Java_unseen/husonlab_megan-ce/src/megan/algorithms/TaxonPathAssignment.java | /*
* TaxonPathAssignment.java Copyright (C) 2024 Daniel H. Huson
*
* (Some files contain contributions from other authors, who are then mentioned separately.)
*
* This program is free software: you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
package megan.algorithms;
import jloda.graph.Edge;
import jloda.graph.Node;
import jloda.util.Pair;
import megan.classification.IdMapper;
import megan.data.IMatchBlock;
import megan.data.IReadBlock;
import megan.viewer.TaxonomicLevels;
import megan.viewer.TaxonomyData;
import java.util.*;
/**
* computes the taxon assignment path for a read, with percent support
* Daniel Huson, 1.2009
*/
public class TaxonPathAssignment {
/**
* determine the taxon id of a read from its matches
*
* @return id
*/
private static List<Pair<Integer, Float>> computeTaxPath(IReadBlock readBlock, BitSet activeMatches) {
List<Pair<Integer, Float>> result = new LinkedList<>();
if (readBlock.getNumberOfMatches() == 0) {
Pair<Integer, Float> pair = new Pair<>(IdMapper.NOHITS_ID, 100f);
result.add(pair);
return result;
}
Map<Node, Integer> node2count = new HashMap<>();
int totalCount = 0;
// compute addresses of all hit taxa:
if (activeMatches.cardinality() > 0) {
// collect the addresses of all non-disabled taxa:
for (int i = activeMatches.nextSetBit(0); i != -1; i = activeMatches.nextSetBit(i + 1)) {
final IMatchBlock matchBlock = readBlock.getMatchBlock(i);
int taxonId = matchBlock.getTaxonId();
if (taxonId > 0) {
if (!TaxonomyData.isTaxonDisabled(taxonId)) {
totalCount++;
Node v = TaxonomyData.getTree().getANode(taxonId);
while (v != null) {
node2count.merge(v, 1, Integer::sum);
if (v.getInDegree() > 0)
v = v.getFirstInEdge().getSource();
else
v = null;
}
}
}
}
if (totalCount == 0) // try again allowing disabled taxa
{
for (int i = activeMatches.nextSetBit(0); i != -1; i = activeMatches.nextSetBit(i + 1)) {
final IMatchBlock matchBlock = readBlock.getMatchBlock(i);
int taxonId = matchBlock.getTaxonId();
if (taxonId > 0) {
totalCount++;
Node v = TaxonomyData.getTree().getANode(taxonId);
while (v != null) {
node2count.merge(v, 1, Integer::sum);
if (v.getInDegree() > 0)
v = v.getFirstInEdge().getSource();
else
v = null;
}
}
}
}
}
if (totalCount == 0) {
Pair<Integer, Float> pair = new Pair<>(IdMapper.UNASSIGNED_ID, 100f);
result.add(pair);
return result;
}
Node v = TaxonomyData.getTree().getRoot();
while (v != null) {
Integer count = node2count.get(v);
if (count == null)
count = 0;
float percent = Math.min(100f, Math.round(100f * count / (float) totalCount));
Pair<Integer, Float> pair = new Pair<>((Integer) v.getInfo(), percent);
result.add(pair);
int bestCount = 0;
Node bestChild = null;
for (Edge e = v.getFirstOutEdge(); e != null; e = v.getNextOutEdge(e)) {
Node w = e.getTarget();
count = node2count.get(w);
if (count != null && count > bestCount) {
bestChild = w;
bestCount = count;
}
}
v = bestChild;
}
return result;
}
/**
* report the taxonomic path and percent
*
*/
public static String getPathAndPercent(IReadBlock readBlock, BitSet activeMatchesForTaxa, boolean showTaxonIds, boolean showRank, boolean useOfficialRanksOnly, boolean showPercent) {
final StringBuilder buf = new StringBuilder();
final List<Pair<Integer, Float>> path = TaxonPathAssignment.computeTaxPath(readBlock, activeMatchesForTaxa);
final String expectedPath = "dpcofgs";
int expectedIndex = 0;
for (Pair<Integer, Float> pair : path) {
final Integer taxId = pair.getFirst();
final String taxonName = (showTaxonIds ? "" + taxId : TaxonomyData.getName2IdMap().get(taxId)).replaceAll(";", "\\;");
if (taxonName.equals("root"))
continue;
final int rank = TaxonomyData.getTaxonomicRank(taxId);
String rankName;
if (rank != 0) {
rankName = TaxonomicLevels.getName(rank);
if (rankName == null)
rankName = "?";
} else {
rankName = "?";
}
if (useOfficialRanksOnly && rankName.equals("?"))
continue;
if (showRank && !rankName.equals("?")) {
char letter = Character.toLowerCase(rankName.charAt(0));
if (rank == 127) // domain
letter = 'd';
if (useOfficialRanksOnly) {
while (expectedIndex < expectedPath.length() && letter != expectedPath.charAt(expectedIndex)) {
buf.append(String.format("%c__unknown", expectedPath.charAt(expectedIndex)));
if (showPercent)
buf.append(String.format("; %d;", (int) (float) pair.getSecond()));
expectedIndex++;
}
expectedIndex++;
}
buf.append(String.format("%c__%s", letter, taxonName));
if (showPercent)
buf.append(String.format("; %d;", (int) (float) pair.getSecond()));
} else {
buf.append(" ").append(taxonName).append("; ");
if (showPercent)
buf.append((int) (float) pair.getSecond()).append(";");
}
}
return buf.toString();
}
}
| 7,093 | Java | .java | husonlab/megan-ce | 62 | 21 | 18 | 2016-05-09T10:55:38Z | 2024-02-22T23:23:42Z |
AssignmentUsingWeightedLCACreator.java | /FileExtraction/Java_unseen/husonlab_megan-ce/src/megan/algorithms/AssignmentUsingWeightedLCACreator.java | /*
* AssignmentUsingWeightedLCACreator.java Copyright (C) 2024 Daniel H. Huson
*
* (Some files contain contributions from other authors, who are then mentioned separately.)
*
* This program is free software: you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
package megan.algorithms;
import jloda.fx.util.ProgramExecutorService;
import jloda.util.Basic;
import jloda.util.CanceledException;
import jloda.util.CollectionUtils;
import jloda.util.progress.ProgressListener;
import jloda.util.progress.ProgressPercentage;
import megan.core.Document;
import megan.daa.connector.DAAConnector;
import megan.daa.connector.MatchBlockDAA;
import megan.daa.connector.ReadBlockDAA;
import megan.data.IConnector;
import megan.data.IMatchBlock;
import megan.data.IReadBlock;
import megan.data.IReadBlockIterator;
import java.io.IOException;
import java.util.BitSet;
import java.util.HashMap;
import java.util.Map;
import java.util.concurrent.ArrayBlockingQueue;
import java.util.concurrent.CountDownLatch;
import java.util.concurrent.ExecutorService;
import java.util.concurrent.Executors;
/**
* Sets up the weighted-LCA algorithm
* Daniel Huson, 2.2016
*/
public class AssignmentUsingWeightedLCACreator implements IAssignmentAlgorithmCreator {
private int[] refId2weight;
private Map<String, Integer> ref2weight; // map reference sequence to number of reads associated with it
private final Object syncRef = new Object();
private final boolean useIdentityFilter;
private final float percentToCover;
private final String cName;
private final Taxon2SpeciesMapping taxon2SpeciesMapping;
/**
* constructor
*/
public AssignmentUsingWeightedLCACreator(final String cName, final Document doc, final boolean usingIdentityFilter, final float percentToCover) throws IOException, CanceledException {
this.cName = cName;
this.useIdentityFilter = usingIdentityFilter;
this.taxon2SpeciesMapping = new Taxon2SpeciesMapping(cName, doc.getProgressListener());
this.percentToCover = (percentToCover >= 99.9999 ? 100 : percentToCover);
System.err.printf("Using 'Weighted LCA' assignment (%.1f %%) on %s%n", this.percentToCover, cName);
computeWeights(doc);
}
/**
* compute all the reference weights
*
*/
private void computeWeights(final Document doc) throws IOException, CanceledException {
final IConnector connector = doc.getConnector();
if (connector instanceof DAAConnector) {
DAAConnector daaConnector = (DAAConnector) connector;
refId2weight = new int[(int) daaConnector.getDAAHeader().getDbSeqsUsed()];
} else
ref2weight = new HashMap<>(10000000);
final int numberOfThreads = ProgramExecutorService.getNumberOfCoresToUse();
final ExecutorService executorService = Executors.newFixedThreadPool(ProgramExecutorService.getNumberOfCoresToUse());
final CountDownLatch countDownLatch = new CountDownLatch(numberOfThreads);
final long[] totalMatches = new long[numberOfThreads];
final long[] totalWeight = new long[numberOfThreads];
final ArrayBlockingQueue<IReadBlock> queue = new ArrayBlockingQueue<>(1000);
final IReadBlock sentinel = new ReadBlockDAA();
final ProgressListener progress = doc.getProgressListener();
progress.setSubtask("Computing weights");
for (int i = 0; i < numberOfThreads; i++) {
final int threadNumber = i;
executorService.submit(() -> {
try {
final BitSet activeMatches = new BitSet(); // pre filter matches for taxon identification
while (true) {
final IReadBlock readBlock = queue.take();
if (readBlock == sentinel)
break;
if (progress.isUserCancelled())
break;
ActiveMatches.compute(doc.getMinScore(), doc.getTopPercent(), doc.getMaxExpected(), doc.getMinPercentIdentity(), readBlock, cName, activeMatches);
totalMatches[threadNumber] += activeMatches.cardinality();
int speciesId = 0; // assigns weights at the species level
for (int i1 = activeMatches.nextSetBit(0); i1 != -1; i1 = activeMatches.nextSetBit(i1 + 1)) {
final IMatchBlock matchBlock = readBlock.getMatchBlock(i1);
int id = matchBlock.getId(cName);
if (id > 0) {
id = taxon2SpeciesMapping.getSpecies(id); // todo: there is a potential problem here: what if the match is to a higher rank and that is incompatible with the majority species?
if (id > 0) {
if (speciesId == 0)
speciesId = id;
else if (speciesId != id) {
speciesId = -1; // means mismatch
break;
}
}
}
}
if (speciesId > 0) {
for (int i1 = activeMatches.nextSetBit(0); i1 != -1; i1 = activeMatches.nextSetBit(i1 + 1)) {
final IMatchBlock matchBlock = readBlock.getMatchBlock(i1);
int id = matchBlock.getId(cName);
if (id > 0) {
id = taxon2SpeciesMapping.getSpecies(id);
if (id == speciesId) {
if (ref2weight != null) {
final String ref = matchBlock.getTextFirstWord();
synchronized (syncRef) {
final Integer count = Basic.replaceNull(ref2weight.get(ref), 0);
ref2weight.put(ref, count + Math.max(1, readBlock.getReadWeight()));
}
} else {
final int refId = ((MatchBlockDAA) matchBlock).getSubjectId();
synchronized (syncRef) {
refId2weight[refId] += Math.max(1, readBlock.getReadWeight());
}
}
totalWeight[threadNumber] += Math.max(1, readBlock.getReadWeight());
}
}
}
}
}
} catch (Exception ex) {
Basic.caught(ex);
} finally {
countDownLatch.countDown();
}
});
}
/*
* feed the queue:
*/
try (final IReadBlockIterator it = connector.getAllReadsIterator(doc.getMinScore(), doc.getMaxExpected(), false, true)) {
progress.setMaximum(it.getMaximumProgress());
progress.setProgress(0);
while (it.hasNext()) {
queue.put(it.next());
progress.setProgress(it.getProgress());
}
for (int i = 0; i < numberOfThreads; i++) { // add one sentinel for each thread
queue.put(sentinel);
}
} catch (Exception e) {
Basic.caught(e);
}
// await worker threads:
try {
countDownLatch.await();
} catch (InterruptedException e) {
Basic.caught(e);
} finally {
executorService.shutdownNow();
}
if (progress.isUserCancelled())
throw new CanceledException();
if (progress instanceof ProgressPercentage)
progress.reportTaskCompleted();
System.err.printf("Total matches: %,12d %n", CollectionUtils.getSum(totalMatches));
System.err.printf("Total references: %,12d %n", (ref2weight != null ? ref2weight.size() : refId2weight.length));
System.err.printf("Total weights: %,12d %n", CollectionUtils.getSum(totalWeight));
System.err.println();
}
/**
* creates a new assignment algorithm
* use this repeatedly to create multiple assignment algorithms that can be run in parallel
*
* @return assignment algorithm
*/
public AssignmentUsingWeightedLCA createAssignmentAlgorithm() {
return new AssignmentUsingWeightedLCA(cName, refId2weight, ref2weight, taxon2SpeciesMapping, percentToCover, useIdentityFilter);
}
}
| 9,546 | Java | .java | husonlab/megan-ce | 62 | 21 | 18 | 2016-05-09T10:55:38Z | 2024-02-22T23:23:42Z |
AssignmentUsingBestHitCreator.java | /FileExtraction/Java_unseen/husonlab_megan-ce/src/megan/algorithms/AssignmentUsingBestHitCreator.java | /*
* AssignmentUsingBestHitCreator.java Copyright (C) 2024 Daniel H. Huson
*
* (Some files contain contributions from other authors, who are then mentioned separately.)
*
* This program is free software: you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
package megan.algorithms;
/**
* create a best hit assignment algorithm
* Daniel Huson, 3.2016
*/
public class AssignmentUsingBestHitCreator implements IAssignmentAlgorithmCreator {
private final String cName;
private final String fileName;
/**
* constructor
*
*/
public AssignmentUsingBestHitCreator(String cName, String fileName) {
this.cName = cName;
this.fileName = fileName;
System.err.println("Using Best-Hit algorithm for binning: " + cName);
}
/**
* creates an assignment algorithm
*
* @return assignment algorithm
*/
@Override
public IAssignmentAlgorithm createAssignmentAlgorithm() {
return new AssignmentUsingBestHit(cName, fileName);
}
}
| 1,581 | Java | .java | husonlab/megan-ce | 62 | 21 | 18 | 2016-05-09T10:55:38Z | 2024-02-22T23:23:42Z |
ReadData.java | /FileExtraction/Java_unseen/husonlab_megan-ce/src/megan/assembly/ReadData.java | /*
* ReadData.java Copyright (C) 2024 Daniel H. Huson
*
* (Some files contain contributions from other authors, who are then mentioned separately.)
*
* This program is free software: you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
package megan.assembly;
/**
* read data object
* Daniel Huson, 5.2015
*/
public class ReadData {
private final int id;
private final String name;
private String segment;
private MatchData[] matches;
public ReadData(int id, String name) {
this.id = id;
this.name = name;
}
public String toString() {
StringBuilder buf = new StringBuilder();
buf.append("ReadData: name='").append(name).append("' seg='").append(segment).append("'\n");
for (MatchData match : matches) {
buf.append("\t").append(match.toString()).append("\n");
}
return buf.toString();
}
public int getId() {
return id;
}
public String getName() {
return name;
}
public String getSegment() {
return segment;
}
public void setSegment(String segment) {
this.segment = segment;
}
public MatchData[] getMatches() {
return matches;
}
public void setMatches(MatchData[] matches) {
this.matches = matches;
}
}
| 1,877 | Java | .java | husonlab/megan-ce | 62 | 21 | 18 | 2016-05-09T10:55:38Z | 2024-02-22T23:23:42Z |
ReadAssembler.java | /FileExtraction/Java_unseen/husonlab_megan-ce/src/megan/assembly/ReadAssembler.java | /*
* ReadAssembler.java Copyright (C) 2024 Daniel H. Huson
*
* (Some files contain contributions from other authors, who are then mentioned separately.)
*
* This program is free software: you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
package megan.assembly;
import jloda.graph.*;
import jloda.graph.io.GraphGML;
import jloda.util.*;
import jloda.util.progress.ProgressListener;
import megan.assembly.align.SimpleAligner4DNA;
import megan.core.Director;
import java.io.IOException;
import java.io.Writer;
import java.util.*;
import java.util.concurrent.CountDownLatch;
import java.util.concurrent.Executors;
/**
* assembler for all reads assigned to a particular class
* <p>
* Daniel Huson, 5.2015
*/
public class ReadAssembler {
private Graph overlapGraph;
private NodeArray<String> node2ReadNameMap;
private ReadData[] readId2ReadData;
private Node[][] paths;
private String label;
private ArrayList<Pair<String, String>> contigs;
private List<Integer>[] readId2ContainedReads;
private final boolean verbose;
/**
* constructor
*/
public ReadAssembler(boolean verbose) {
this.verbose = verbose;
}
/**
* build the overlap graph
*
*/
public void computeOverlapGraph(String label, int minOverlap, List<ReadData> readData, ProgressListener progress) throws IOException, CanceledException {
this.label = label;
final OverlapGraphBuilder overlapGraphBuilder = new OverlapGraphBuilder(minOverlap, verbose);
overlapGraphBuilder.apply(readData, progress);
overlapGraph = overlapGraphBuilder.getOverlapGraph();
{
if (verbose)
System.err.print("Checking for cycles: ");
final var edgesRemoved = DirectedCycleBreaker.apply(overlapGraph);
if (verbose) {
System.err.println(edgesRemoved + (edgesRemoved > 0 ? " removed" : ""));
}
}
readId2ReadData = overlapGraphBuilder.getReadId2ReadData();
node2ReadNameMap = overlapGraphBuilder.getNode2ReadNameMap();
readId2ContainedReads = overlapGraphBuilder.getReadId2ContainedReads();
final PathExtractor pathExtractor = new PathExtractor(overlapGraph, readId2ContainedReads);
pathExtractor.apply(progress);
}
/**
* show the overlap graph
*
*/
public void showOverlapGraph(Director dir, ProgressListener progress) throws CanceledException {
final var overlapGraphViewer = new OverlapGraphViewer(dir, overlapGraph, node2ReadNameMap, paths);
overlapGraphViewer.apply(progress);
}
/**
* write the overlap graph
*
*/
public Pair<Integer, Integer> writeOverlapGraph(Writer w) throws IOException, CanceledException {
final NodeArray<String> names = new NodeArray<>(overlapGraph);
final NodeArray<String> sequences = new NodeArray<>(overlapGraph);
for (var v:overlapGraph.nodes()){
var readData = readId2ReadData[(Integer) v.getInfo()];
sequences.put(v, readData.getSegment());
names.put(v, readData.getName());
}
final Map<String, NodeArray<String>> label2nodes = new TreeMap<>();
label2nodes.put("label", names);
label2nodes.put("sequence", sequences);
final EdgeArray<String> overlap = new EdgeArray<>(overlapGraph);
for (Edge e = overlapGraph.getFirstEdge(); e != null; e = e.getNext()) {
if(e.getInfo()!=null)
overlap.put(e, e.getInfo().toString());
}
final Map<String, EdgeArray<String>> label2edges = new TreeMap<>();
label2edges.put("label", null);
label2edges.put("overlap", overlap);
GraphGML.writeGML(overlapGraph,"Overlap graph generated by MEGAN6", label, true,1,w, label2nodes, label2edges);
return new Pair<>(this.overlapGraph.getNumberOfNodes(), this.overlapGraph.getNumberOfEdges());
}
/**
* assemble all reads provided by the iterator using perfect overlaps of the given minimum length
*
* @return number of contigs and singletons
*/
public int computeContigs(int minReads, double minCoverage, int minLength, ProgressListener progress) throws IOException, CanceledException {
final var pathExtractor = new PathExtractor(overlapGraph, readId2ContainedReads);
pathExtractor.apply(progress);
paths = pathExtractor.getPaths();
final var contigBuilder = new ContigBuilder(pathExtractor.getPaths(), readId2ContainedReads);
contigBuilder.apply(readId2ReadData, minReads, minCoverage, minLength, progress);
contigs = contigBuilder.getContigs();
return contigBuilder.getCountContigs();
}
/**
* write contigs
*
*/
public void writeContigs(Writer w, ProgressListener progress) throws CanceledException, IOException {
progress.setSubtask("Writing contigs");
progress.setMaximum(contigs.size());
progress.setProgress(0);
for (var pair : contigs) {
w.write(pair.getFirst().trim());
w.write("\n");
w.write(pair.getSecond().trim());
w.write("\n");
progress.incrementProgress();
}
w.flush();
progress.reportTaskCompleted();
}
/**
* report contigs stats
*/
public void reportContigStats() {
if (contigs.size() == 0) {
System.err.printf("Contigs:%,9d%n", 0);
} else {
final var sizes = new int[contigs.size()];
var pos = 0;
for (var pair : contigs) {
sizes[pos++] = pair.getSecond().length();
}
Arrays.sort(sizes);
System.err.printf("Contigs:%,9d%n", sizes.length);
System.err.printf("Min len:%,9d%n", sizes[0]);
System.err.printf("Med len:%,9d%n", sizes[sizes.length / 2]);
System.err.printf("Max len:%,9d%n", sizes[sizes.length - 1]);
}
}
public ArrayList<Pair<String, String>> getContigs() {
return contigs;
}
/**
* computes all pairwise overlaps between contigs and then merges contigs
*
* @param contigs input list of contigs and output list of merged contigs
* @return number of resulting
*/
public static int mergeOverlappingContigs(int maxNumberOfThreads, final ProgressListener progress, final float minPercentIdentityToMergeContigs, final int minOverlap, final ArrayList<Pair<String, String>> contigs, final boolean verbose) throws CanceledException {
progress.setSubtask("Overlapping contigs");
final ArrayList<Pair<String, String>> sortedContigs = new ArrayList<>(contigs.size());
sortedContigs.addAll(contigs);
sortedContigs.sort(StringUtils.getComparatorDecreasingLengthOfSecond());
contigs.clear();
final var overlapGraph = new Graph();
final List<Integer>[] contigId2ContainedContigs = new List[sortedContigs.size()];
final var containedContigs = new BitSet();
// main parallel computation:
if (sortedContigs.size() > 0) {
final var numberOfThreads = (Math.min(sortedContigs.size(), Math.min(Runtime.getRuntime().availableProcessors() - 1, maxNumberOfThreads)));
final var notCanceled = new Single<>(true);
progress.setMaximum(sortedContigs.size() / numberOfThreads);
progress.setProgress(0);
final var contig2Node = new HashMap<Integer, Node>();
for (var i = 0; i < sortedContigs.size(); i++) {
final Node v = overlapGraph.newNode(i);
contig2Node.put(i, v);
}
final var service = Executors.newFixedThreadPool(numberOfThreads);
final var countDownLatch = new CountDownLatch(numberOfThreads);
try {
for (int t = 0; t < numberOfThreads; t++) {
final int threadNumber = t;
service.submit(() -> {
try {
final var simpleAlignerDNA = new SimpleAligner4DNA();
simpleAlignerDNA.setMinPercentIdentity(minPercentIdentityToMergeContigs);
final var overlap = new Single<>(0);
for (var i = threadNumber; i < sortedContigs.size(); i += numberOfThreads) {
final var iContig = sortedContigs.get(i).getSecond();
final var iBytes = iContig.getBytes();
for (var j = 0; j < i; j++) {
final var jBytes = sortedContigs.get(j).getSecond().getBytes();
if (iBytes.length > jBytes.length)
throw new RuntimeException("Internal error: contig i is longer than contig j");
final var overlapType = simpleAlignerDNA.getOverlap(iBytes, jBytes, overlap);
// if contained or nearly contained, remove
if (overlapType == SimpleAligner4DNA.OverlapType.QueryContainedInRef) {
synchronized (contigId2ContainedContigs) {
var contained = contigId2ContainedContigs[j];
if (contained == null) {
contained = new ArrayList<>();
contigId2ContainedContigs[j] = contained;
}
contained.add(i);
containedContigs.set(i);
}
} else if (overlapType == SimpleAligner4DNA.OverlapType.QuerySuffix2RefPrefix && overlap.get() >= minOverlap) {
final var v = contig2Node.get(i);
final var w = contig2Node.get(j);
synchronized (overlapGraph) {
overlapGraph.newEdge(v, w, overlap.get());
}
} else if (overlapType == SimpleAligner4DNA.OverlapType.QueryPrefix2RefSuffix && overlap.get() >= minOverlap) {
final var v = contig2Node.get(i);
final var w = contig2Node.get(j);
synchronized (overlapGraph) {
overlapGraph.newEdge(w, v, overlap.get());
}
}
}
}
if (threadNumber == 0)
progress.incrementProgress();
} catch (CanceledException e) {
notCanceled.set(false);
while (countDownLatch.getCount() > 0)
countDownLatch.countDown();
} catch (Exception e) {
Basic.caught(e);
} finally {
countDownLatch.countDown();
}
}
);
}
try {
countDownLatch.await();
} catch (InterruptedException e) {
Basic.caught(e);
}
} finally {
service.shutdownNow();
}
}
if (verbose)
System.err.printf("Contained contigs:%6d%n", containedContigs.cardinality());
if (containedContigs.cardinality() > 0) // delete all contained contigs from graph
{
for (var v : overlapGraph.nodes()) {
if (containedContigs.get((Integer) v.getInfo())) {
overlapGraph.deleteNode(v);
}
}
}
{
if (verbose)
System.err.print("Checking for cycles: ");
final int edgesRemoved = DirectedCycleBreaker.apply(overlapGraph);
if (verbose) {
System.err.println(edgesRemoved + (edgesRemoved > 0 ? " removed" : ""));
}
}
if (verbose) {
System.err.printf("Contig graph nodes:%5d%n", overlapGraph.getNumberOfNodes());
System.err.printf("Contig graph edges:%5d%n", overlapGraph.getNumberOfEdges());
}
final var pathExtractor = new PathExtractor(overlapGraph, contigId2ContainedContigs);
pathExtractor.apply(progress);
final var paths = pathExtractor.getPaths();
final var used = new BitSet();
int countMergedContigs = 0;
for (var path : paths) {
if (path.length == 1) {
final var current = path[0];
if (!containedContigs.get((Integer) current.getInfo())) {
final var contigId = (Integer) current.getInfo(); // info is contig-Id (times -1, if reverse strand)
contigs.add(sortedContigs.get(contigId));
}
} else if (path.length > 1) {
var verboseMerging = false;
if (verboseMerging)
System.err.println("Merging " + path.length + " contigs...");
final var headerBuffer = new StringBuilder();
final var sequenceBuffer = new StringBuilder();
var prev = path[0];
var contigId = Math.abs((Integer) prev.getInfo());
if (used.get(contigId))
continue;
else
used.set(contigId);
var prevContig = sortedContigs.get(contigId);
headerBuffer.append("[").append(StringUtils.skipFirstWord(prevContig.getFirst()));
sequenceBuffer.append(prevContig.getSecond());
var length = prevContig.getSecond().length();
for (var i = 1; i < path.length; i++) { // start at 1
final var current = path[i];
contigId = (Integer) current.getInfo();
used.set(contigId);
final var overlap = (Integer) overlapGraph.getCommonEdge(prev, current).getInfo();
final var currentContig = sortedContigs.get(contigId);
headerBuffer.append(" + (-").append(overlap).append(") + ").append(StringUtils.skipFirstWord(currentContig.getFirst()));
sequenceBuffer.append(currentContig.getSecond().substring(overlap));
length += currentContig.getSecond().length() - overlap;
prev = current;
}
headerBuffer.append("]");
if (verboseMerging) {
System.err.println("Input contigs:");
for (var i = 0; i < path.length; i++) {
var p = path[i];
System.err.println(sortedContigs.get((Integer) p.getInfo()));
if (i < path.length - 1) {
System.err.println("Overlap to next: " + overlapGraph.getCommonEdge(path[i], path[i + 1]).getInfo());
}
}
}
final var pair = new Pair<>("length=" + length + " " + headerBuffer, sequenceBuffer.toString());
contigs.add(pair);
if (verboseMerging) {
System.err.println("Output contig:");
System.err.println(pair);
}
countMergedContigs++;
}
}
for (var current : pathExtractor.getSingletons()) {
if (!containedContigs.get((Integer) current.getInfo())) {
final var contigId = (Integer) current.getInfo(); // info is contig-Id (times -1, if reverse strand)
contigs.add(sortedContigs.get(contigId));
}
}
if (verbose)
System.err.printf("Merged contigs: %6d%n", countMergedContigs);
// sort and renumber contigs:
contigs.sort(StringUtils.getComparatorDecreasingLengthOfSecond());
var contigNumber = 1;
for (var contig : contigs) {
contig.setFirst(String.format(">Contig-%06d %s", contigNumber++, (contig.getFirst().startsWith(">") ? StringUtils.skipFirstWord(contig.getFirst()) : contig.getFirst())));
}
return contigs.size();
}
}
| 17,748 | Java | .java | husonlab/megan-ce | 62 | 21 | 18 | 2016-05-09T10:55:38Z | 2024-02-22T23:23:42Z |
ReadDataCollector.java | /FileExtraction/Java_unseen/husonlab_megan-ce/src/megan/assembly/ReadDataCollector.java | /*
* ReadDataCollector.java Copyright (C) 2024 Daniel H. Huson
*
* (Some files contain contributions from other authors, who are then mentioned separately.)
*
* This program is free software: you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
package megan.assembly;
import jloda.seq.SequenceUtils;
import jloda.util.CanceledException;
import jloda.util.StringUtils;
import jloda.util.progress.ProgressListener;
import megan.data.IMatchBlock;
import megan.data.IReadBlock;
import megan.data.IReadBlockIterator;
import java.io.IOException;
import java.util.ArrayList;
import java.util.LinkedList;
import java.util.List;
/**
* collects all reads for gc assembly
* Created by huson on 8/22/16.
*/
public class ReadDataCollector {
/**
* collect all read data associated with the given iterator
*
* @return list of contig names and contigs
*/
public static List<ReadData> apply(final IReadBlockIterator iterator, final ProgressListener progress) throws IOException, CanceledException {
progress.setSubtask("Collecting reads:");
final var list = new LinkedList<ReadData>();
var countReads = 0;
{
progress.setMaximum(iterator.getMaximumProgress());
progress.setProgress(0);
while (iterator.hasNext()) {
final var readBlock = iterator.next();
//System.err.println(readBlock.getReadName()+" -> "+countReads);
list.add(createReadData(countReads++, readBlock));
progress.setProgress(iterator.getProgress());
}
}
progress.reportTaskCompleted();
return list;
}
/**
* creates the data object associated with a given read and its matches
*
* @return read data
*/
private static ReadData createReadData(int id, IReadBlock readBlock) throws IOException {
var readData = new ReadData(id, readBlock.getReadName());
var best = -1;
var bestScore = 0f;
for (var m = 0; m < readBlock.getNumberOfAvailableMatchBlocks(); m++) {
if (readBlock.getMatchBlock(m).getBitScore() > bestScore) {
best = m;
bestScore = readBlock.getMatchBlock(m).getBitScore();
}
}
if (best >= 0) {
var bestCoordinates = getQueryCoordinates(readBlock.getMatchBlock(best));
if (bestCoordinates[0] < bestCoordinates[1])
readData.setSegment(readBlock.getReadSequence().substring(bestCoordinates[0] - 1, bestCoordinates[1]));
else
readData.setSegment(SequenceUtils.getReverseComplement(readBlock.getReadSequence().substring(bestCoordinates[1] - 1, bestCoordinates[0])));
final var matches = new ArrayList<MatchData>(readBlock.getNumberOfAvailableMatchBlocks());
for (var m = 0; m < readBlock.getNumberOfAvailableMatchBlocks(); m++) {
if (readBlock.getMatchBlock(m).getBitScore() == bestScore) {
final IMatchBlock matchBlock = readBlock.getMatchBlock(m);
final int[] queryCoordinates = getQueryCoordinates(matchBlock);
if (queryCoordinates[0] == bestCoordinates[0] && queryCoordinates[1] == bestCoordinates[1]) { // must all reference same segment in same orientation
int[] refCoordinates = getReferenceCoordinates(matchBlock);
matches.add(new MatchData(readData, StringUtils.getFirstWord(matchBlock.getText()), refCoordinates[0], refCoordinates[1], matchBlock.getText(), matchBlock.getBitScore()));
}
}
}
readData.setMatches(matches.toArray(new MatchData[0]));
}
return readData;
}
/**
* get start and end query coordinates of a match
*
* @return query coordinates, 1-based
*/
private static int[] getQueryCoordinates(IMatchBlock matchBlock) {
var start = matchBlock.getAlignedQueryStart();
var end = matchBlock.getAlignedQueryEnd();
return new int[]{start, end};
}
/**
* get start and end reference coordinates of a match
*
* @return reference coordinates 1-based
*/
private static int[] getReferenceCoordinates(IMatchBlock matchBlock) throws IOException {
var tokensFirst = getLineTokens("Sbjct:", matchBlock.getText(), false);
var tokensLast = getLineTokens("Sbjct:", matchBlock.getText(), true);
if (tokensFirst == null) {
tokensFirst = getLineTokens("Sbjct", matchBlock.getText(), false);
tokensLast = getLineTokens("Sbjct", matchBlock.getText(), true);
}
if (tokensFirst == null || tokensFirst.length != 4 || tokensLast == null || tokensLast.length != 4) {
throw new IOException("Failed to parse sbjct line for match:\n" + matchBlock.getText());
}
var a = Integer.parseInt(tokensFirst[1]);
var b = Integer.parseInt(tokensLast[3]);
return new int[]{a, b};
}
/**
* get all tokens on the first line that begin with start
*
* @param last if true, returns last such line rather than first
* @return tokens
*/
private static String[] getLineTokens(String start, String text, boolean last) {
var a = (last ? text.lastIndexOf("\n" + start) : text.indexOf("\n" + start));
if (a != -1) {
var b = text.indexOf('\n', a + 1);
if (b == -1)
return text.substring(a + 1).split("\\s+");
else
return text.substring(a + 1, b).split("\\s+");
}
return null;
}
}
| 6,201 | Java | .java | husonlab/megan-ce | 62 | 21 | 18 | 2016-05-09T10:55:38Z | 2024-02-22T23:23:42Z |
GUIConfiguration.java | /FileExtraction/Java_unseen/husonlab_megan-ce/src/megan/assembly/GUIConfiguration.java | /*
* GUIConfiguration.java Copyright (C) 2024 Daniel H. Huson
*
* (Some files contain contributions from other authors, who are then mentioned separately.)
*
* This program is free software: you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
package megan.assembly;
import jloda.swing.window.MenuConfiguration;
/**
* configuration for menu
* Daniel Huson, 5.2015
*/
class GUIConfiguration {
/**
* get the menu configuration
*
* @return menu configuration
*/
public static MenuConfiguration getMenuConfiguration() {
MenuConfiguration menuConfig = new MenuConfiguration();
menuConfig.defineMenuBar("File;Edit;Window;Help;");
menuConfig.defineMenu("File", "Close;|;Quit;");
menuConfig.defineMenu("Edit", "Cut;Copy;Paste;|;From Previous Alignment;");
menuConfig.defineMenu("Help", "About...;How to Cite...;|;Community Website...;Reference Manual...;|;Check For Updates...;");
return menuConfig;
}
}
| 1,555 | Java | .java | husonlab/megan-ce | 62 | 21 | 18 | 2016-05-09T10:55:38Z | 2024-02-22T23:23:42Z |
MatchData.java | /FileExtraction/Java_unseen/husonlab_megan-ce/src/megan/assembly/MatchData.java | /*
* MatchData.java Copyright (C) 2024 Daniel H. Huson
*
* (Some files contain contributions from other authors, who are then mentioned separately.)
*
* This program is free software: you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
package megan.assembly;
import java.util.Comparator;
/**
* match data
* Daniel Huson, 5.2015
*/
public class MatchData implements Comparator<MatchData> {
private ReadData read;
private String refName;
private int firstPosInRef;
private int lastPosInRef;
private float bitScore;
private String text;
public MatchData() {
}
public MatchData(ReadData read, String refName, int firstPosInRef, int lastPosInRef, String text, float bitScore) {
this.read = read;
this.refName = refName;
this.firstPosInRef = firstPosInRef;
this.lastPosInRef = lastPosInRef;
this.text = text;
this.bitScore = bitScore;
}
public String toString() {
return "MatchData: refName=" + refName + " refCoordinates=" + firstPosInRef + ".." + lastPosInRef + "bitScore=" + bitScore + "\n" + text;
}
/**
* sort by ascending start position and descending end position
*
*/
public int compare(MatchData o1, MatchData o2) {
if (o1.firstPosInRef < o2.firstPosInRef)
return -1;
if (o1.firstPosInRef > o2.firstPosInRef)
return 1;
if (o1.lastPosInRef < o2.lastPosInRef)
return 1;
if (o1.lastPosInRef > o2.lastPosInRef)
return -1;
return Integer.compare(o1.read.getId(), o2.read.getId());
}
public ReadData getRead() {
return read;
}
public String getRefName() {
return refName;
}
public int getFirstPosInRef() {
return firstPosInRef;
}
public int getLastPosInRef() {
return lastPosInRef;
}
public float getBitScore() {
return bitScore;
}
public String getText() {
return text;
}
}
| 2,569 | Java | .java | husonlab/megan-ce | 62 | 21 | 18 | 2016-05-09T10:55:38Z | 2024-02-22T23:23:42Z |
PathExtractor.java | /FileExtraction/Java_unseen/husonlab_megan-ce/src/megan/assembly/PathExtractor.java | /*
* PathExtractor.java Copyright (C) 2024 Daniel H. Huson
*
* (Some files contain contributions from other authors, who are then mentioned separately.)
*
* This program is free software: you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
package megan.assembly;
import jloda.graph.*;
import jloda.util.CanceledException;
import jloda.util.progress.ProgressListener;
import jloda.util.progress.ProgressPercentage;
import java.util.ArrayList;
import java.util.List;
/**
* Extract a set of paths through the overlap graph
* Daniel Huson, 5.2015
*/
public class PathExtractor {
private final Graph overlapGraph;
private Node[][] paths;
private Node[] singletons;
private final List<Integer>[] readId2ContainedReads;
/**
* constructor
*
*/
public PathExtractor(Graph overlapGraph, List<Integer>[] readId2ContainedReads) {
this.overlapGraph = overlapGraph;
this.readId2ContainedReads = readId2ContainedReads;
}
/**
* determines the paths through the graph
* The algorithm determines the longest path between any start read and any end read, where the length is given by the total
* number of pairwise overlapped bases in the path
*/
public void apply(ProgressListener progress) throws CanceledException {
// make a working copy of the graph. Necessary because we remove stuff from the graph
final Graph overlapGraphWorkingCopy = new Graph();
final NodeArray<Node> new2oldNode = new NodeArray<>(overlapGraphWorkingCopy);
final EdgeArray<Edge> new2oldEdge = new EdgeArray<>(overlapGraphWorkingCopy);
{
progress.setSubtask("Copying graph");
progress.setMaximum(overlapGraph.getNumberOfNodes() + overlapGraph.getNumberOfEdges());
progress.setProgress(0);
NodeArray<Node> old2newNode = new NodeArray<>(this.overlapGraph);
for (Node v = this.overlapGraph.getFirstNode(); v != null; v = this.overlapGraph.getNextNode(v)) {
final Node w = overlapGraphWorkingCopy.newNode(v.getInfo());
w.setData(v.getData());
new2oldNode.put(w, v);
old2newNode.put(v, w);
progress.incrementProgress();
}
for (Edge e = this.overlapGraph.getFirstEdge(); e != null; e = this.overlapGraph.getNextEdge(e)) {
final Edge f = overlapGraphWorkingCopy.newEdge(old2newNode.get(e.getSource()), old2newNode.get(e.getTarget()), e.getInfo());
new2oldEdge.put(f, e);
progress.incrementProgress();
}
}
if (progress instanceof ProgressPercentage)
progress.reportTaskCompleted();
// extract contigs from graph, deleting their nodes
progress.setSubtask("Extracting paths");
progress.setMaximum(overlapGraphWorkingCopy.getNumberOfNodes());
progress.setProgress(0);
final List<Node> toDelete = new ArrayList<>(overlapGraphWorkingCopy.getNumberOfNodes());
final EdgeArray<Integer> edgeWeights = new EdgeArray<>(overlapGraphWorkingCopy);
for (Node v = overlapGraphWorkingCopy.getFirstNode(); v != null; v = v.getNext()) {
if (v.getInDegree() == 0) {
visitNodesRec(v, edgeWeights);
}
progress.incrementProgress();
}
if (progress instanceof ProgressPercentage)
progress.reportTaskCompleted();
final List<Node[]> pathsList = new ArrayList<>();
// generate all paths in this loop:
progress.setSubtask("Extracting paths");
final int initialNumberOfEdges = overlapGraphWorkingCopy.getNumberOfEdges();
progress.setMaximum(initialNumberOfEdges);
progress.setProgress(0);
while (overlapGraphWorkingCopy.getNumberOfEdges() > 0) {
Edge bestEdge = overlapGraphWorkingCopy.getFirstEdge();
for (Edge e = overlapGraphWorkingCopy.getFirstEdge(); e != null; e = overlapGraphWorkingCopy.getNextEdge(e)) {
if (edgeWeights.get(e) > edgeWeights.get(bestEdge))
bestEdge = e;
}
Node v = bestEdge.getSource();
final List<Node> path = new ArrayList<>(); // new path
path.add(new2oldNode.get(bestEdge.getSource()));
int weight = edgeWeights.get(bestEdge);
while (v.getOutDegree() > 0) {
// find predecessor node:
Node w = null;
for (Edge f = v.getFirstOutEdge(); f != null; f = v.getNextOutEdge(f)) {
int eWeight = edgeWeights.get(f);
if (eWeight == weight) {
w = f.getTarget();
weight -= (Integer) f.getInfo(); // subtracting the overlap length of f
break;
}
}
if (w == null)
throw new RuntimeException("w==null");
path.add(new2oldNode.get(w));
toDelete.add(v);
v = w;
}
toDelete.add(v);
// remove all nodes used in contig
for (Node z : toDelete) {
overlapGraphWorkingCopy.deleteNode(z);
}
toDelete.clear();
// clear edge weights:
for (Edge z = overlapGraphWorkingCopy.getFirstEdge(); z != null; z = z.getNext()) {
edgeWeights.put(z, null);
}
// set weights to reflect longest path
for (Node z = overlapGraphWorkingCopy.getFirstNode(); z != null; z = z.getNext()) {
if (z.getInDegree() == 0) {
visitNodesRec(z, edgeWeights);
}
}
pathsList.add(path.toArray(new Node[0]));
progress.setProgress(initialNumberOfEdges - overlapGraphWorkingCopy.getNumberOfEdges());
}
if (progress instanceof ProgressPercentage)
progress.reportTaskCompleted();
// singleton reads:
final List<Node> singletonList = new ArrayList<>();
for (Node v = overlapGraphWorkingCopy.getFirstNode(); v != null; v = overlapGraphWorkingCopy.getNextNode(v)) {
int readId = (Integer) v.getInfo();
if (readId2ContainedReads != null && readId < readId2ContainedReads.length && readId2ContainedReads[readId] != null && readId2ContainedReads[readId].size() > 0)
pathsList.add(new Node[]{v});
else
singletonList.add(new2oldNode.get(v));
}
paths = pathsList.toArray(new Node[pathsList.size()][]);
singletons = singletonList.toArray(new Node[0]);
}
/**
* recursively visit all nodes and set edge weights
* The weight of an edge e is the maximum sum of overlaps on any outgoing path from e
*
* @return path length
*/
private int visitNodesRec(Node v, EdgeArray<Integer> edgeWeights) {
int maxValue = 0;
for (Edge e = v.getFirstOutEdge(); e != null; e = v.getNextOutEdge(e)) {
if (edgeWeights.get(e) == null) {
edgeWeights.put(e, visitNodesRec(e.getTarget(), edgeWeights) + (Integer) e.getInfo());
// note that (Integer)e.getInfo() is the overlap length of e
}
maxValue = Math.max(maxValue, edgeWeights.get(e));
}
return maxValue;
}
/**
* get all selected paths through graph
*
* @return paths
*/
public Node[][] getPaths() {
return paths;
}
/**
* get all singleton nodes
*
* @return singletons
*/
public Node[] getSingletons() {
return singletons;
}
}
| 8,322 | Java | .java | husonlab/megan-ce | 62 | 21 | 18 | 2016-05-09T10:55:38Z | 2024-02-22T23:23:42Z |
DirectedCycleBreaker.java | /FileExtraction/Java_unseen/husonlab_megan-ce/src/megan/assembly/DirectedCycleBreaker.java | /*
* DirectedCycleBreaker.java Copyright (C) 2024 Daniel H. Huson
*
* (Some files contain contributions from other authors, who are then mentioned separately.)
*
* This program is free software: you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
package megan.assembly;
import jloda.graph.Edge;
import jloda.graph.Graph;
import jloda.graph.algorithms.DirectedCycleDetector;
import java.util.Collection;
/**
* breaks all directed cycles be removing edges
* Created by huson on 8/22/16.
*/
class DirectedCycleBreaker {
public static int apply(Graph G) {
DirectedCycleDetector dector = new DirectedCycleDetector(G);
int count = 0;
while (dector.apply()) {
final Collection<Edge> cycle = dector.cycle();
Edge best = null;
for (Edge e : cycle) {
if (best == null || (int) e.getInfo() < (int) best.getInfo() ||
((int) e.getInfo()) == (int) best.getInfo() &&
e.getSource().getOutDegree() + e.getTarget().getInDegree() <
best.getSource().getOutDegree() + best.getTarget().getInDegree())
best = e;
}
if (best == null)
throw new RuntimeException("Internal error: empty cycle???");
G.deleteEdge(best);
count++;
}
return count;
}
}
| 1,977 | Java | .java | husonlab/megan-ce | 62 | 21 | 18 | 2016-05-09T10:55:38Z | 2024-02-22T23:23:42Z |
OverlapGraphViewer.java | /FileExtraction/Java_unseen/husonlab_megan-ce/src/megan/assembly/OverlapGraphViewer.java | /*
* OverlapGraphViewer.java Copyright (C) 2024 Daniel H. Huson
*
* (Some files contain contributions from other authors, who are then mentioned separately.)
*
* This program is free software: you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
package megan.assembly;
import jloda.graph.*;
import jloda.graph.algorithms.FruchtermanReingoldLayout;
import jloda.swing.commands.CommandManager;
import jloda.swing.director.ProjectManager;
import jloda.swing.graphview.EdgeActionAdapter;
import jloda.swing.graphview.EdgeView;
import jloda.swing.graphview.GraphView;
import jloda.swing.graphview.NodeActionAdapter;
import jloda.swing.window.MenuBar;
import jloda.swing.window.MenuConfiguration;
import jloda.util.APoint2D;
import jloda.util.CanceledException;
import jloda.util.progress.ProgressListener;
import jloda.util.progress.ProgressPercentage;
import megan.assembly.commands.SelectFromPreviousWindowCommand;
import megan.core.Director;
import javax.swing.*;
import java.awt.*;
import java.awt.event.ComponentAdapter;
import java.awt.event.ComponentEvent;
import java.awt.geom.Rectangle2D;
import java.util.Collections;
import java.util.HashSet;
import java.util.Set;
import java.util.Stack;
/**
* simple viewer for overlap graph
* Daniel Huson, 5.2015
*/
public class OverlapGraphViewer {
final private Director dir;
final private Graph overlapGraph;
final private Node[][] paths;
final private GraphView graphView;
final private NodeArray<String> node2ReadNameMap;
/**
* constructor
*
*/
public OverlapGraphViewer(Director dir, final Graph overlapGraph, final NodeArray<String> node2ReadNameMap, Node[][] paths) {
this.dir = dir;
this.overlapGraph = overlapGraph;
this.node2ReadNameMap = node2ReadNameMap;
this.paths = paths;
graphView = new GraphView(overlapGraph);
graphView.getScrollPane().getViewport().setScrollMode(JViewport.SIMPLE_SCROLL_MODE);
graphView.getScrollPane().setHorizontalScrollBarPolicy(JScrollPane.HORIZONTAL_SCROLLBAR_ALWAYS);
graphView.getScrollPane().setVerticalScrollBarPolicy(JScrollPane.VERTICAL_SCROLLBAR_ALWAYS);
graphView.getScrollPane().addKeyListener(graphView.getGraphViewListener());
graphView.setSize(800, 800);
graphView.setAllowMoveNodes(true);
graphView.setAllowRubberbandNodes(true);
graphView.setAutoLayoutLabels(true);
graphView.setFixedNodeSize(true);
graphView.setMaintainEdgeLengths(false);
graphView.setAllowEdit(false);
graphView.setCanvasColor(Color.WHITE);
graphView.getScrollPane().addComponentListener(new ComponentAdapter() {
public void componentResized(ComponentEvent event) {
final Dimension ps = graphView.trans.getPreferredSize();
int x = Math.max(ps.width, graphView.getScrollPane().getWidth() - 20);
int y = Math.max(ps.height, graphView.getScrollPane().getHeight() - 20);
ps.setSize(x, y);
graphView.setPreferredSize(ps);
graphView.getScrollPane().getViewport().setViewSize(new Dimension(x, y));
graphView.repaint();
}
});
graphView.removeAllNodeActionListeners();
graphView.removeAllEdgeActionListeners();
graphView.addNodeActionListener(new NodeActionAdapter() {
public void doClick(NodeSet nodes, int clicks) {
ProjectManager.getPreviouslySelectedNodeLabels().clear();
for (Node v : nodes) {
ProjectManager.getPreviouslySelectedNodeLabels().add(node2ReadNameMap.get(v));
}
if (clicks >= 2) {
graphView.selectedNodes.clear();
graphView.selectedEdges.clear();
final EdgeSet edgesToSelect = new EdgeSet(overlapGraph);
final NodeSet nodesToSelect = new NodeSet(overlapGraph);
final Stack<Node> stack = new Stack<>();
stack.addAll(nodes);
while (stack.size() > 0) {
Node v = stack.pop();
for (Edge e = v.getFirstAdjacentEdge(); e != null; e = v.getNextAdjacentEdge(e)) {
if (clicks == 3 || graphView.getLineWidth(e) == 2) {
edgesToSelect.add(e);
Node w = e.getOpposite(v);
if (!nodesToSelect.contains(w)) {
stack.push(w);
nodesToSelect.add(w);
}
}
}
}
graphView.selectedNodes.addAll(nodesToSelect);
graphView.selectedEdges.addAll(edgesToSelect);
graphView.repaint();
}
}
public void doSelect(NodeSet nodes) {
for (Node v : nodes) {
graphView.setLabel(v, node2ReadNameMap.get(v));
}
graphView.selectedEdges.clear();
final EdgeSet edgesToSelect = new EdgeSet(overlapGraph);
final NodeSet nodesToSelect = new NodeSet(overlapGraph);
final Stack<Node> stack = new Stack<>();
stack.addAll(nodes);
while (stack.size() > 0) {
Node v = stack.pop();
for (Edge e = v.getFirstAdjacentEdge(); e != null; e = v.getNextAdjacentEdge(e)) {
if (graphView.getLineWidth(e) == 2) {
edgesToSelect.add(e);
Node w = e.getOpposite(v);
if (!nodesToSelect.contains(w)) {
stack.push(w);
nodesToSelect.add(w);
}
}
}
}
graphView.selectedEdges.addAll(edgesToSelect);
}
public void doDeselect(NodeSet nodes) {
for (Node v : nodes) {
graphView.setLabel(v, null);
}
}
});
graphView.addEdgeActionListener(new EdgeActionAdapter() {
public void doSelect(EdgeSet edges) {
for (Edge e : edges) {
graphView.setLabel(e, "" + e.getInfo());
}
NodeSet nodes = new NodeSet(overlapGraph);
for (Edge e : edges) {
nodes.add(e.getSource());
nodes.add(e.getTarget());
}
graphView.fireDoSelect(nodes);
}
public void doDeselect(EdgeSet edges) {
for (Edge e : edges) {
graphView.setLabel(e, null);
}
}
});
}
/**
* build a graph view for the overlap graph
*
*/
public void apply(ProgressListener progress) throws CanceledException {
progress.setSubtask("Computing graph layout");
progress.setMaximum(-1);
progress.setProgress(0);
Set<Edge> pathEdges = new HashSet<>();
if (paths != null) {
for (Node[] path : paths) {
for (int i = 0; i < path.length - 1; i++) {
Node v = path[i];
for (Edge e = v.getFirstOutEdge(); e != null; e = v.getNextOutEdge(e)) {
if (e.getTarget() == path[i + 1])
pathEdges.add(e);
}
}
}
}
// compute simple layout:
final FruchtermanReingoldLayout fruchtermanReingoldLayout = new FruchtermanReingoldLayout(overlapGraph, null);
NodeArray<APoint2D<?>> coordinates = new NodeArray<>(overlapGraph);
fruchtermanReingoldLayout.apply(1000, coordinates);
for (Node v = overlapGraph.getFirstNode(); v != null; v = v.getNext()) {
graphView.setLocation(v, coordinates.get(v).getX(), coordinates.get(v).getY());
graphView.setHeight(v, 5);
graphView.setWidth(v, 5);
}
for (Edge e = overlapGraph.getFirstEdge(); e != null; e = e.getNext()) {
graphView.setDirection(e, EdgeView.DIRECTED);
if (pathEdges.contains(e)) {
graphView.setLineWidth(e, 2);
graphView.setLineWidth(e.getSource(), 2);
graphView.setLineWidth(e.getTarget(), 2);
}
}
JFrame frame = new JFrame("Assembly Graph");
graphView.setFrame(frame);
frame.setSize(graphView.getSize());
frame.setLocation(100, 100);
frame.addKeyListener(graphView.getGraphViewListener());
frame.getContentPane().setLayout(new BorderLayout());
frame.getContentPane().add(graphView.getScrollPane(), BorderLayout.CENTER);
frame.setDefaultCloseOperation(WindowConstants.DISPOSE_ON_CLOSE);
CommandManager commandManager = new CommandManager(dir, graphView, "megan.commands");
commandManager.addCommands(this, Collections.singletonList(new SelectFromPreviousWindowCommand()), true);
MenuConfiguration menuConfig = GUIConfiguration.getMenuConfiguration();
MenuBar menuBar = new MenuBar(this, menuConfig, commandManager);
frame.setJMenuBar(menuBar);
Rectangle2D bbox = graphView.getBBox();
graphView.trans.setCoordinateRect(bbox);
// show the frame:
frame.setVisible(true);
graphView.getScrollPane().revalidate();
graphView.centerGraph();
if (progress instanceof ProgressPercentage)
progress.reportTaskCompleted();
}
/**
* get the graphview
*
* @return graph view
*/
public GraphView getGraphView() {
return graphView;
}
public NodeArray<String> getNode2ReadNameMap() {
return node2ReadNameMap;
}
}
| 10,680 | Java | .java | husonlab/megan-ce | 62 | 21 | 18 | 2016-05-09T10:55:38Z | 2024-02-22T23:23:42Z |
OverlapGraphBuilder.java | /FileExtraction/Java_unseen/husonlab_megan-ce/src/megan/assembly/OverlapGraphBuilder.java | /*
* OverlapGraphBuilder.java Copyright (C) 2024 Daniel H. Huson
*
* (Some files contain contributions from other authors, who are then mentioned separately.)
*
* This program is free software: you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
package megan.assembly;
import jloda.graph.Edge;
import jloda.graph.Graph;
import jloda.graph.Node;
import jloda.graph.NodeArray;
import jloda.util.CanceledException;
import jloda.util.progress.ProgressListener;
import jloda.util.progress.ProgressPercentage;
import java.util.*;
/**
* assembles a set of reads that align to a specific class in some classification
* <p>
* Daniel Huson, 5.2015
*/
public class OverlapGraphBuilder {
private final Graph overlapGraph = new Graph();
private final NodeArray<String> node2readName = new NodeArray<>(overlapGraph);
private List<Integer>[] readId2ContainedReads;
private ReadData[] readDatas;
private int minOverlap;
private final boolean verbose;
/**
* constructor
*/
public OverlapGraphBuilder(int minOverlap, boolean verbose) {
this.minOverlap = minOverlap;
this.verbose = verbose;
}
/**
* apply
*
*/
public void apply(final List<ReadData> readData, final ProgressListener progress) throws CanceledException {
readDatas = readData.toArray(new ReadData[0]);
// collect all matches for each reference:
progress.setSubtask("Sorting reads and matches by reference");
progress.setMaximum(readDatas.length);
progress.setProgress(0);
readId2ContainedReads = new List[readDatas.length];
long countPairs = 0;
Map<String, SortedSet<MatchData>> ref2matches = new HashMap<>();
for (int r = 0; r < readDatas.length; r++) {
final ReadData read = readDatas[r];
if (read.getMatches() != null) {
for (int m = 0; m < read.getMatches().length; m++) {
final MatchData match = read.getMatches()[m];
SortedSet<MatchData> set = ref2matches.get(match.getRefName());
if (set == null) {
set = new TreeSet<>(new MatchData());
ref2matches.put(match.getRefName(), set);
}
set.add(match);
countPairs++;
}
}
progress.setProgress(r);
}
if (progress instanceof ProgressPercentage)
progress.reportTaskCompleted();
if (verbose)
System.err.printf("Overlaps: %,10d%n", countPairs);
buildOverlapGraph(readDatas, ref2matches, minOverlap);
}
/**
* build the overlap graph
*
*/
private void buildOverlapGraph(ReadData[] reads, Map<String, SortedSet<MatchData>> ref2matches, int minOverlap) {
final Node[] nodes = new Node[reads.length];
final BitSet containedReadIds = new BitSet();
for (String refName : ref2matches.keySet()) {
final MatchData[] matches = ref2matches.get(refName).toArray(new MatchData[0]);
for (int i = 0; i < matches.length; i++) {
final MatchData iMatch = matches[i];
if (!containedReadIds.get(iMatch.getRead().getId())) {
Node v = nodes[iMatch.getRead().getId()];
if (v == null) {
v = nodes[iMatch.getRead().getId()] = overlapGraph.newNode(iMatch.getRead().getId());
node2readName.put(v, iMatch.getRead().getName());
}
for (int j = i + 1; j < matches.length; j++) {
final MatchData jMatch = matches[j];
if (3 * (iMatch.getLastPosInRef() - jMatch.getFirstPosInRef()) <= minOverlap)
break; // no chance of an overlap
int overlapLength = computePerfectOverlapLength(iMatch, jMatch);
if (overlapLength > 0 && jMatch.getLastPosInRef() <= iMatch.getLastPosInRef()) { // contained
containedReadIds.set(jMatch.getRead().getId());
List<Integer> contained = readId2ContainedReads[i];
if (contained == null) {
contained = readId2ContainedReads[i] = new ArrayList<>();
}
contained.add(j);
} else if (overlapLength >= minOverlap) {
Node w = nodes[jMatch.getRead().getId()];
if (w == null) {
w = nodes[jMatch.getRead().getId()] = overlapGraph.newNode(jMatch.getRead().getId());
node2readName.put(w, jMatch.getRead().getName());
}
final Edge e = overlapGraph.getCommonEdge(v, w);
if (e == null) {
overlapGraph.newEdge(v, w, overlapLength);
} else if ((Integer) e.getInfo() < overlapLength) {
e.setInfo(overlapLength);
}
}
}
}
}
}
if (verbose) {
System.err.printf("Graph nodes:%,10d%n", overlapGraph.getNumberOfNodes());
System.err.printf("Graph edges:%,10d%n", overlapGraph.getNumberOfEdges());
System.err.printf("Cont. reads:%,10d%n", containedReadIds.cardinality());
}
}
/**
* computess the number of matching letters, else returns 0
*
* @return number of matching letters or 0
*/
private int computePerfectOverlapLength(MatchData iMatch, MatchData jMatch) {
try {
int first = Math.max(iMatch.getFirstPosInRef(), jMatch.getFirstPosInRef());
int last = Math.min(iMatch.getLastPosInRef(), jMatch.getLastPosInRef());
int count = 0;
for (int refPos = first; refPos <= last; refPos++) {
for (int k = 0; k < 3; k++) {
int iPos = 3 * (refPos - iMatch.getFirstPosInRef()) + k;
int jPos = 3 * (refPos - jMatch.getFirstPosInRef()) + k;
char iChar = Character.toLowerCase(iMatch.getRead().getSegment().charAt(iPos));
char jChar = Character.toLowerCase(jMatch.getRead().getSegment().charAt(jPos));
if (iChar != jChar && iChar != 'n' && jChar != 'n')
return 0;
else if (Character.isLetter(iMatch.getRead().getSegment().charAt(iPos)))
count++;
}
}
return count;
} catch (Exception ex) {
return 0;
}
}
/**
* get the overlap graph
*
* @return overlap graph
*/
public Graph getOverlapGraph() {
return overlapGraph;
}
/**
* get the readDatas associated with the overlap graph
*
* @return readDatas
*/
public ReadData[] getReadId2ReadData() {
return readDatas;
}
/**
* gets the the name of the read associated with a node
*
* @return read name
*/
public NodeArray<String> getNode2ReadNameMap() {
return node2readName;
}
public int getMinOverlap() {
return minOverlap;
}
public void setMinOverlap(int minOverlap) {
this.minOverlap = minOverlap;
}
public List<Integer>[] getReadId2ContainedReads() {
return readId2ContainedReads;
}
}
| 8,255 | Java | .java | husonlab/megan-ce | 62 | 21 | 18 | 2016-05-09T10:55:38Z | 2024-02-22T23:23:42Z |
ContigBuilder.java | /FileExtraction/Java_unseen/husonlab_megan-ce/src/megan/assembly/ContigBuilder.java | /*
* ContigBuilder.java Copyright (C) 2024 Daniel H. Huson
*
* (Some files contain contributions from other authors, who are then mentioned separately.)
*
* This program is free software: you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
package megan.assembly;
import jloda.fx.util.ProgramExecutorService;
import jloda.graph.Edge;
import jloda.graph.Node;
import jloda.util.Basic;
import jloda.util.CanceledException;
import jloda.util.Pair;
import jloda.util.progress.ProgressListener;
import jloda.util.progress.ProgressPercentage;
import java.util.ArrayList;
import java.util.List;
import java.util.concurrent.CountDownLatch;
import java.util.concurrent.ExecutorService;
import java.util.concurrent.Executors;
/**
* builds contigs from paths and data
* Daniel Huson, 5.2015
*/
public class ContigBuilder {
private final ArrayList<Pair<String, String>> result;
private final Node[][] paths;
private final List<Integer>[] readId2ContainedReads;
/**
* constructor
*
*/
public ContigBuilder(Node[][] paths, List<Integer>[] readId2ContainedReads) {
this.paths = paths;
this.readId2ContainedReads = readId2ContainedReads;
result = new ArrayList<>();
}
/**
* apply the algorith
*
*/
public void apply(final ReadData[] reads, final int minReads, final double minAvCoverage, final int minLength, final ProgressListener progress) throws CanceledException {
progress.setSubtask("Building contigs");
progress.setMaximum(paths.length);
progress.setProgress(0);
if (paths.length == 0) {
if (progress instanceof ProgressPercentage)
progress.reportTaskCompleted();
return;
}
final ExecutorService service = Executors.newFixedThreadPool(ProgramExecutorService.getNumberOfCoresToUse());
final CountDownLatch countDownLatch = new CountDownLatch(paths.length);
for (final Node[] path : paths) {
service.submit(() -> {
try {
int contigSize = path.length;
if (contigSize > 0) {
final StringBuilder sequenceBuffer = new StringBuilder();
int totalBases = 0;
int totalReads = 0;
// process the first read:
{
Node currentNode = path[0];
ReadData currentRead;
int currentReadId = (Integer) currentNode.getInfo();
currentRead = reads[currentReadId];
totalReads++;
int readId = (Integer) currentNode.getInfo();
if (readId2ContainedReads[readId] != null) {
totalReads += readId2ContainedReads[readId].size();
}
sequenceBuffer.append(currentRead.getSegment());
totalBases += currentRead.getSegment().length();
}
// process all other reads:
for (int i = 1; i < path.length; i++) {
Node prevNode = path[i - 1];
Node currentNode = path[i];
int nextReadId = (Integer) currentNode.getInfo();
totalReads++;
if (readId2ContainedReads[nextReadId] != null) {
totalReads += readId2ContainedReads[nextReadId].size();
}
final ReadData nextRead = reads[nextReadId];
Edge e = prevNode.getCommonEdge(currentNode);
int overlap = (Integer) e.getInfo();
sequenceBuffer.append(nextRead.getSegment().substring(overlap));
totalBases += nextRead.getSegment().length();
}
if (totalReads < minReads) {
return;
}
// remove all gaps from contig. These are induced by other reads in other contigs, so not need to keep them
// also, this won't change the frame when processing BlastText alignments
final String contigSequence = sequenceBuffer.toString().replaceAll("-", ""); // remove all gaps...
if (contigSequence.length() < minLength) {
return;
}
float coverage = (float) totalBases / Math.max(1.0f, contigSequence.length());
if (coverage < minAvCoverage) {
return;
}
synchronized (result) {
final Pair<String, String> aContig = new Pair<>();
final String contigName = String.format("Contig-%06d", result.size() + 1);
aContig.setFirst(String.format(">%s length=%d reads=%d avCoverage=%.1f", contigName, contigSequence.length(), totalReads, coverage));
aContig.setSecond(contigSequence);
result.add(aContig);
}
}
} finally {
countDownLatch.countDown();
try {
progress.incrementProgress();
} catch (CanceledException e) {
service.shutdownNow();
while (countDownLatch.getCount() > 0)
countDownLatch.countDown();
}
}
});
}
try {
countDownLatch.await();
} catch (InterruptedException e) {
Basic.caught(e);
}
service.shutdownNow();
if (progress instanceof ProgressPercentage)
progress.reportTaskCompleted();
}
/**
* get the computed contigs
*
* @return contigs
*/
public ArrayList<Pair<String, String>> getContigs() {
return result;
}
public int getCountContigs() {
return result.size();
}
}
| 6,948 | Java | .java | husonlab/megan-ce | 62 | 21 | 18 | 2016-05-09T10:55:38Z | 2024-02-22T23:23:42Z |
AlignmentAssembler.java | /FileExtraction/Java_unseen/husonlab_megan-ce/src/megan/assembly/alignment/AlignmentAssembler.java | /*
* AlignmentAssembler.java Copyright (C) 2024 Daniel H. Huson
*
* (Some files contain contributions from other authors, who are then mentioned separately.)
*
* This program is free software: you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
package megan.assembly.alignment;
import jloda.graph.EdgeArray;
import jloda.graph.Graph;
import jloda.graph.Node;
import jloda.graph.NodeArray;
import jloda.graph.io.GraphGML;
import jloda.util.CanceledException;
import jloda.util.Pair;
import jloda.util.StringUtils;
import jloda.util.progress.ProgressListener;
import megan.alignment.gui.Alignment;
import megan.assembly.OverlapGraphViewer;
import megan.assembly.PathExtractor;
import megan.core.Director;
import java.io.IOException;
import java.io.Writer;
import java.util.ArrayList;
import java.util.List;
import java.util.TreeMap;
/**
* assembles from an alignment
* Daniel Huson, 5.2015
*/
public class AlignmentAssembler {
private Graph overlapGraph;
private Alignment alignment;
private Node[][] paths;
private NodeArray<String> node2readName;
private ArrayList<Pair<String, String>> contigs;
private List<Integer>[] readId2ContainedReads;
/**
* constructor
*/
public AlignmentAssembler() {
}
/**
* compute the overlap graph
*
*/
public void computeOverlapGraph(int minOverlap, final Alignment alignment, ProgressListener progress) throws IOException {
this.alignment = alignment;
var overlapGraphBuilder = new OverlapGraphBuilder(minOverlap);
overlapGraphBuilder.apply(alignment, progress);
overlapGraph = overlapGraphBuilder.getOverlapGraph();
node2readName = overlapGraphBuilder.getNode2ReadNameMap();
readId2ContainedReads = overlapGraphBuilder.getReadId2ContainedReads();
}
/**
* show the overlap graph
*
*/
public void showOverlapGraph(Director dir, ProgressListener progress) throws CanceledException {
final var overlapGraphViewer = new OverlapGraphViewer(dir, overlapGraph, node2readName, paths);
overlapGraphViewer.apply(progress);
}
/**
* write the overlap graph
*
*/
public Pair<Integer, Integer> writeOverlapGraph(Writer writer) throws IOException {
try(NodeArray<String> nodeNameMap = new NodeArray<>(overlapGraph);
NodeArray<String> nodeSequenceMap = new NodeArray<>(overlapGraph);
EdgeArray<String> overlap = new EdgeArray<>(overlapGraph)) {
for(var v:overlapGraph.nodes()) {
var i = (Integer) v.getInfo();
nodeSequenceMap.put(v, alignment.getLane(i).getBlock());
nodeNameMap.put(v, StringUtils.getFirstWord(alignment.getLane(i).getName()));
}
final var label2nodes = new TreeMap<String, NodeArray<String>>();
label2nodes.put("label", nodeNameMap);
label2nodes.put("sequence", nodeSequenceMap);
for(var e:overlapGraph.edges()) {
overlap.put(e, e.getInfo().toString());
}
final var label2edges = new TreeMap<String, EdgeArray<String>>();
label2edges.put("label", null);
label2edges.put("overlap", overlap);
GraphGML.writeGML(overlapGraph,"Overlap graph generated by MEGAN6",alignment.getName(),true,1,writer, label2nodes, label2edges);
return new Pair<>(overlapGraph.getNumberOfNodes(), overlapGraph.getNumberOfEdges());
}
}
/**
* compute contigs. Also sorts alignment by contigs
*
*/
public int computeContigs(int alignmentNumber, int minReads, double minCoverage, int minLength, boolean sortAlignmentByContigs, ProgressListener progress) throws CanceledException {
final var pathExtractor = new PathExtractor(overlapGraph, readId2ContainedReads);
pathExtractor.apply(progress);
paths = pathExtractor.getPaths();
final var contigBuilder = new ContigBuilder(pathExtractor.getPaths(), pathExtractor.getSingletons(), readId2ContainedReads);
contigBuilder.apply(alignmentNumber, alignment, minReads, minCoverage, minLength, sortAlignmentByContigs, progress);
contigs = contigBuilder.getContigs();
return contigBuilder.getCountContigs();
}
public ArrayList<Pair<String, String>> getContigs() {
return contigs;
}
/**
* write contigs
*
*/
public void writeContigs(Writer w, ProgressListener progress) throws IOException {
progress.setSubtask("Writing contigs");
progress.setMaximum(contigs.size());
progress.setProgress(0);
for (var pair : contigs) {
w.write(pair.getFirst().trim());
w.write("\n");
w.write(pair.getSecond().trim());
w.write("\n");
progress.incrementProgress();
}
w.flush();
progress.reportTaskCompleted();
}
}
| 5,490 | Java | .java | husonlab/megan-ce | 62 | 21 | 18 | 2016-05-09T10:55:38Z | 2024-02-22T23:23:42Z |
OverlapGraphBuilder.java | /FileExtraction/Java_unseen/husonlab_megan-ce/src/megan/assembly/alignment/OverlapGraphBuilder.java | /*
* OverlapGraphBuilder.java Copyright (C) 2024 Daniel H. Huson
*
* (Some files contain contributions from other authors, who are then mentioned separately.)
*
* This program is free software: you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
package megan.assembly.alignment;
import jloda.graph.*;
import jloda.util.CanceledException;
import jloda.util.Pair;
import jloda.util.StringUtils;
import jloda.util.progress.ProgressListener;
import megan.alignment.gui.Alignment;
import megan.alignment.gui.Lane;
import java.util.*;
/**
* builds the overlap graph
* Daniel Huson, 5.2015
*/
public class OverlapGraphBuilder {
private final Graph overlapGraph = new Graph();
private final NodeArray<String> node2readName = new NodeArray<>(overlapGraph);
private List<Integer>[] readId2ContainedReads;
private final int minOverlap;
/**
* constructor
*
*/
public OverlapGraphBuilder(int minOverlap) {
this.minOverlap = minOverlap;
}
/**
* build the overlap graph
*
*/
public void apply(final Alignment alignment, ProgressListener progress) throws CanceledException {
// alignment.resetOrder();
if (progress != null) {
progress.setSubtask("Building overlap graph");
progress.setMaximum(alignment.getNumberOfSequences());
progress.setProgress(0);
}
final Pair<Integer, Integer>[] list = new Pair[alignment.getNumberOfSequences()];
final int[] numberOfLetters = new int[alignment.getNumberOfSequences()];
for (int i = 0; i < alignment.getNumberOfSequences(); i++) {
list[i] = new Pair<>(alignment.getLane(i).getFirstNonGapPosition(), i);
numberOfLetters[i] = countLetters(alignment.getLane(i));
}
Arrays.sort(list, new Pair<>()); // sort by start position
// overlap graph. Each node is a read, each edge is a suffix-prefix overlap
readId2ContainedReads = new List[alignment.getNumberOfSequences()];
EdgeArray<Integer> edgeWeights = new EdgeArray<>(overlapGraph);
{
final Set<Integer> toDelete = new HashSet<>();
// compute mapping to nodes:
final Node[] i2node = new Node[alignment.getNumberOfSequences()];
for (int i = 0; i < alignment.getNumberOfSequences(); i++) {
i2node[i] = overlapGraph.newNode(i);
node2readName.put(i2node[i], StringUtils.getFirstWord(alignment.getLane(i).getName()));
}
// compute edges and mark contained reads for removal
for (int il = 0; il < list.length; il++) {
final int i = list[il].getSecond();
if (!toDelete.contains(i)) {
final Lane iLane = alignment.getLane(i);
final int iStart = iLane.getFirstNonGapPosition();
final int iEnd = iLane.getLastNonGapPosition();
for (int jl = il + 1; jl < list.length; jl++) {
final int j = list[jl].getSecond();
final Lane jLane = alignment.getLane(j);
final int jStart = jLane.getFirstNonGapPosition();
if (jStart > iEnd)
break; //
if (!toDelete.contains(j)) {
final int jEnd = jLane.getLastNonGapPosition();
if ((iStart < jStart || (iStart == jStart && i < j))) {
int numberOfLettersInOverlap = computeNumberOfLettersInPerfectOverlap(iLane, jLane);
if (iEnd >= jEnd && numberOfLettersInOverlap == numberOfLetters[j]) { // contained
toDelete.add(j);
List<Integer> contained = readId2ContainedReads[i];
if (contained == null) {
contained = readId2ContainedReads[i] = new ArrayList<>();
}
contained.add(j);
} else if (numberOfLettersInOverlap >= minOverlap) {
overlapGraph.newEdge(i2node[i], i2node[j], numberOfLettersInOverlap);
}
}
}
}
}
if (progress != null)
progress.incrementProgress();
}
// remove all reads that are properly contained in some other read
for (int i : toDelete) {
overlapGraph.deleteNode(i2node[i]);
}
}
if (progress != null)
System.err.println("Overlap graph has " + overlapGraph.getNumberOfNodes() + " nodes and " + overlapGraph.getNumberOfEdges() + " edges");
// assign weights to edges, weight is max path length that follows given edge
for (Node v = overlapGraph.getFirstNode(); v != null; v = v.getNext()) {
if (v.getInDegree() == 0) {
visitNodesRec(v, edgeWeights);
}
}
overlapGraph.getNumberOfNodes();
}
/**
* gets the produced overlap graph
*
* @return overlap graph
*/
public Graph getOverlapGraph() {
return overlapGraph;
}
/**
* gets the read-id to contained reads mapping
*
* @return mapping of read ids to contained reads
*/
public List<Integer>[] getReadId2ContainedReads() {
return readId2ContainedReads;
}
/**
* count number of letters in sequence
*
* @return number of letters
*/
private int countLetters(Lane lane) {
int count = 0;
for (int i = lane.getFirstNonGapPosition(); i <= lane.getLastNonGapPosition(); i++) {
if (Character.isLetter(lane.charAt(i)))
count++;
}
return count;
}
/**
* recursively visit all nodes and set edge weights
*
* @return path length
*/
private int visitNodesRec(Node v, EdgeArray<Integer> edgeWeight) {
int maxValue = 0;
for (Edge e = v.getFirstOutEdge(); e != null; e = v.getNextOutEdge(e)) {
if (edgeWeight.get(e) == null) {
edgeWeight.put(e, visitNodesRec(e.getTarget(), edgeWeight) + 1);
}
maxValue = Math.max(maxValue, edgeWeight.get(e));
}
return maxValue;
}
/**
* count the number of letters in the overlap between two lanes.
*
* @return number of letters in percent overlap, or 0, if mismatch encountered
*/
private int computeNumberOfLettersInPerfectOverlap(Lane iLane, Lane jLane) {
final int firstCoordinate = Math.max(iLane.getFirstNonGapPosition(), jLane.getFirstNonGapPosition());
final int lastCoordinate = Math.min(iLane.getLastNonGapPosition(), jLane.getLastNonGapPosition());
int count = 0;
for (int i = firstCoordinate; i < lastCoordinate; i++) {
char iChar = Character.toLowerCase(iLane.charAt(i));
char jChar = Character.toLowerCase(jLane.charAt(i));
if (iChar != jChar && iChar != 'n' && jChar != 'n')
return 0;
else if (Character.isLetter(iChar))
count++;
}
return count;
}
/**
* gets the the name of the read associated with a node
*
* @return read name
*/
public NodeArray<String> getNode2ReadNameMap() {
return node2readName;
}
}
| 8,180 | Java | .java | husonlab/megan-ce | 62 | 21 | 18 | 2016-05-09T10:55:38Z | 2024-02-22T23:23:42Z |
ContigBuilder.java | /FileExtraction/Java_unseen/husonlab_megan-ce/src/megan/assembly/alignment/ContigBuilder.java | /*
* ContigBuilder.java Copyright (C) 2024 Daniel H. Huson
*
* (Some files contain contributions from other authors, who are then mentioned separately.)
*
* This program is free software: you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
package megan.assembly.alignment;
import jloda.graph.Node;
import jloda.util.CanceledException;
import jloda.util.CollectionUtils;
import jloda.util.Pair;
import jloda.util.StringUtils;
import jloda.util.progress.ProgressListener;
import jloda.util.progress.ProgressPercentage;
import megan.alignment.gui.Alignment;
import megan.alignment.gui.Lane;
import java.util.ArrayList;
import java.util.Arrays;
import java.util.List;
/**
* builds contigs from paths and data
* Daniel Huson, 5.2015
*/
public class ContigBuilder {
private final ArrayList<Pair<String, String>> result;
private final Node[][] paths;
private final Node[] singles;
private final List<Integer>[] readId2ContainedReads;
private int countContigs;
private int countSingletons;
/**
* constructor
*
*/
public ContigBuilder(Node[][] paths, Node[] singles, List<Integer>[] readId2ContainedReads) {
this.paths = paths;
this.singles = singles;
this.readId2ContainedReads = readId2ContainedReads;
result = new ArrayList<>();
}
/**
* apply the algorith
*
*/
public void apply(int alignmentNumber, Alignment alignment, int minReads, double minCoverage, int minLength, boolean sortAlignmentByContigs, ProgressListener progress) throws CanceledException {
progress.setSubtask("Building contigs");
progress.setMaximum(paths.length);
progress.setProgress(0);
countContigs = 0;
countSingletons = singles.length;
for (Node[] contig : paths) {
if (contig.length > 0) {
countContigs++;
final String contigName = (alignmentNumber == 0 ? String.format("Contig-%06d", countContigs) : String.format("Contig-%06d.%d", alignmentNumber, countContigs));
final StringBuilder sequenceBuffer = new StringBuilder();
int minCoordinate = Integer.MAX_VALUE;
int maxCoordinate = Integer.MIN_VALUE;
int totalBases = 0;
int totalReads = 0;
for (int i = 0; i < contig.length; i++) {
totalReads++;
int readId = (Integer) contig[i].getInfo();
if (readId2ContainedReads[readId] != null) {
totalReads += readId2ContainedReads[readId].size();
// System.err.println("Contained: " + readId2ContainedReads[readId].size());
}
final Lane iLane = alignment.getLane(readId);
minCoordinate = Math.min(minCoordinate, iLane.getFirstNonGapPosition());
maxCoordinate = Math.max(maxCoordinate, iLane.getLastNonGapPosition());
totalBases += iLane.getLastNonGapPosition() - iLane.getFirstNonGapPosition() + 1;
if (i + 1 < contig.length) {
int nextReadId = (Integer) contig[i + 1].getInfo();
int length = alignment.getLane(nextReadId).getFirstNonGapPosition() - iLane.getFirstNonGapPosition();
sequenceBuffer.append(iLane.getBlock(), 0, length);
} else {
sequenceBuffer.append(iLane.getBlock());
}
}
if (totalReads < minReads) {
continue;
}
// remove all gaps from contig. These are induced by other reads in other contigs, so not need to keep them
// also, this won't change the frame when processing BlastText alignments
final String contigSequence = sequenceBuffer.toString().replaceAll("-", ""); // remove all gaps...
if (contigSequence.length() < minLength) {
continue;
}
float coverage = (float) totalBases / Math.max(1.0f, contigSequence.length());
if (coverage < minCoverage) {
continue;
}
final String referenceName = StringUtils.replaceSpaces(alignment.getReferenceName(), '_');
final Pair<String, String> aContig = new Pair<>();
aContig.setFirst(String.format(">%s\tlength=%d\treads=%d\tcoverage=%.1f\tref=%s\tcoords=%d..%d\n", contigName, contigSequence.length(), totalReads, coverage, StringUtils.swallowLeadingGreaterSign(referenceName), (minCoordinate + 1), (maxCoordinate + 1)));
aContig.setSecond(contigSequence);
System.err.print(aContig.getFirst());
result.add(aContig);
} else
countSingletons++;
progress.incrementProgress();
}
// sort contigs in alignment:
if (sortAlignmentByContigs)
sortAlignmentByContigs(alignment);
if (progress instanceof ProgressPercentage)
progress.reportTaskCompleted();
}
/**
* get the computed contigs
*
* @return contigs
*/
public ArrayList<Pair<String, String>> getContigs() {
return result;
}
public int getCountContigs() {
return countContigs;
}
public int getCountSingletons() {
return countSingletons;
}
/**
* sorts the alignment by contigs
*
*/
private void sortAlignmentByContigs(final Alignment alignment) {
Arrays.sort(paths, (a, b) -> {
Integer posA = alignment.getLane((Integer) a[0].getInfo()).getFirstNonGapPosition();
Integer posB = alignment.getLane((Integer) b[0].getInfo()).getFirstNonGapPosition();
return posA.compareTo(posB);
});
Arrays.sort(paths, (a, b) -> -Integer.compare(a.length, b.length));
// sort reads by contigs:
List<Integer> order = new ArrayList<>(alignment.getNumberOfSequences());
for (Node[] contig : paths) {
for (Node v : contig) {
{
Integer id = (Integer) v.getInfo();
order.add(id);
}
final List<Integer> contained = readId2ContainedReads[(Integer) v.getInfo()];
if (contained != null) {
order.addAll(contained);
}
}
}
CollectionUtils.randomize(singles, 666);
for (Node v : singles) {
Integer id = (Integer) v.getInfo();
order.add(id);
}
alignment.setOrder(order);
}
}
| 7,267 | Java | .java | husonlab/megan-ce | 62 | 21 | 18 | 2016-05-09T10:55:38Z | 2024-02-22T23:23:42Z |
SelectFromPreviousWindowCommand.java | /FileExtraction/Java_unseen/husonlab_megan-ce/src/megan/assembly/commands/SelectFromPreviousWindowCommand.java | /*
* SelectFromPreviousWindowCommand.java Copyright (C) 2024 Daniel H. Huson
*
* (Some files contain contributions from other authors, who are then mentioned separately.)
*
* This program is free software: you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
package megan.assembly.commands;
import jloda.graph.Graph;
import jloda.graph.Node;
import jloda.graph.NodeArray;
import jloda.graph.NodeSet;
import jloda.swing.commands.CommandBase;
import jloda.swing.commands.ICommand;
import jloda.swing.director.ProjectManager;
import jloda.swing.graphview.GraphView;
import jloda.swing.util.ResourceManager;
import jloda.util.parse.NexusStreamParser;
import megan.assembly.OverlapGraphViewer;
import javax.swing.*;
import java.awt.*;
import java.awt.event.ActionEvent;
import java.awt.event.KeyEvent;
import java.util.Set;
/**
* select from previous window
* Daniel Huson, 5.2015
*/
public class SelectFromPreviousWindowCommand extends CommandBase implements ICommand {
/**
* parses the given command and executes it
*/
@Override
public void apply(NexusStreamParser np) throws Exception {
np.matchIgnoreCase(getSyntax());
if (getParent() instanceof OverlapGraphViewer) {
final OverlapGraphViewer overlapGraphViewer = (OverlapGraphViewer) getParent();
final GraphView graphView = overlapGraphViewer.getGraphView();
final NodeArray<String> node2ReadNameMap = overlapGraphViewer.getNode2ReadNameMap();
final Set<String> previousSelection = ProjectManager.getPreviouslySelectedNodeLabels();
if (previousSelection.size() > 0) {
Graph graph = graphView.getGraph();
NodeSet toSelect = new NodeSet(graph);
for (Node v = graph.getFirstNode(); v != null; v = graph.getNextNode(v)) {
String label = node2ReadNameMap.get(v);
if (label != null && previousSelection.contains(label))
toSelect.add(v);
}
if (toSelect.size() > 0) {
graphView.setSelected(toSelect, true);
graphView.repaint();
}
}
}
}
/**
* get command-line usage description
*
* @return usage
*/
@Override
public String getSyntax() {
return "select what=previous;";
}
/**
* action to be performed
*
*/
@Override
public void actionPerformed(ActionEvent ev) {
execute("select what=previous;");
}
public boolean isApplicable() {
return true;
}
public String getAltName() {
return "From Previous Alignment";
}
public String getName() {
return "Select From Previous Window";
}
public String getDescription() {
return "Select from previous window";
}
public ImageIcon getIcon() {
return ResourceManager.getIcon("Empty16.gif");
}
public boolean isCritical() {
return true;
}
/**
* gets the accelerator key to be used in menu
*
* @return accelerator key
*/
public KeyStroke getAcceleratorKey() {
return KeyStroke.getKeyStroke(KeyEvent.VK_P, Toolkit.getDefaultToolkit().getMenuShortcutKeyMaskEx());
}
}
| 3,866 | Java | .java | husonlab/megan-ce | 62 | 21 | 18 | 2016-05-09T10:55:38Z | 2024-02-22T23:23:42Z |
SimpleAligner4DNA.java | /FileExtraction/Java_unseen/husonlab_megan-ce/src/megan/assembly/align/SimpleAligner4DNA.java | /*
* SimpleAligner4DNA.java Copyright (C) 2024 Daniel H. Huson
*
* (Some files contain contributions from other authors, who are then mentioned separately.)
*
* This program is free software: you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
package megan.assembly.align;
import jloda.seq.BlastMode;
import jloda.util.BoyerMoore;
import jloda.util.Single;
import java.util.Iterator;
/**
* convenience class for aligning a DNA query into a DNA reference
* Created by huson on 2/9/16.
*/
public class SimpleAligner4DNA {
public enum OverlapType {QuerySuffix2RefPrefix, QueryContainedInRef, QueryPrefix2RefSuffix, None} // what is query?
private final AlignerOptions alignerOptions;
private final BandedAligner bandedAligner;
private int minRawScore = 1;
private float minPercentIdentity = 0;
public SimpleAligner4DNA() {
alignerOptions = new AlignerOptions();
alignerOptions.setAlignmentType(AlignerOptions.AlignmentMode.SemiGlobal);
alignerOptions.setScoringMatrix(new DNAScoringMatrix(alignerOptions.getMatchScore(), alignerOptions.getMismatchScore()));
bandedAligner = new BandedAligner(alignerOptions, BlastMode.BlastN);
}
/**
* compute a semi-global alignment between the query and the reference
*
* @return true, if alignment found
*/
private boolean computeAlignment(byte[] query, byte[] reference, int queryPos, int refPos, int seedLength) {
bandedAligner.computeAlignment(query, query.length, reference, reference.length, queryPos, refPos, seedLength);
return bandedAligner.getRawScore() >= minRawScore && (minPercentIdentity == 0 || bandedAligner.getPercentIdentity() >= minPercentIdentity);
}
/**
* set the parameters
*
*/
public void setAlignmentParameters(int matchScore, int mismatchScore, int gapOpenPenality, int gapExtensionPenality) {
alignerOptions.setScoringMatrix(new DNAScoringMatrix(matchScore, mismatchScore));
alignerOptions.setGapOpenPenalty(gapOpenPenality);
alignerOptions.setGapExtensionPenalty(gapExtensionPenality);
}
/**
* get the min score to be attained
*
*/
public int getMinRawScore() {
return minRawScore;
}
/**
* set the min raw score
*
*/
public void setMinRawScore(int minRawScore) {
this.minRawScore = minRawScore;
}
/**
* get the min percent identity
*
*/
private float getMinPercentIdentity() {
return minPercentIdentity;
}
/**
* set the min identity
*
*/
public void setMinPercentIdentity(float minPercentIdentity) {
this.minPercentIdentity = minPercentIdentity;
}
/**
* gets a position of the query in the reference, or reference.length if not contained
*
* @return pos or reference.length
*/
private int getPositionInReference(byte[] query, byte[] reference, boolean queryMustBeContained) {
if (queryMustBeContained && getMinPercentIdentity() >= 100) {
return (new BoyerMoore(query, 0, query.length, 127)).search(reference);
}
int bestQueryPos = 0;
int bestRefPos = 0;
int bestScore = 0;
final int k = Math.max(10, (int) (100.0 / (100.0 - minPercentIdentity + 1))); // determine smallest exact match that must be present
for (int queryPos = 0; queryPos < query.length - k + 1; queryPos += k) {
BoyerMoore boyerMoore = new BoyerMoore(query, queryPos, k, 127);
for (Iterator<Integer> it = boyerMoore.iterator(reference); it.hasNext(); ) {
int refPos = it.next();
if ((!queryMustBeContained && computeAlignment(query, reference, queryPos, refPos, k))
|| (queryMustBeContained && refPos <= reference.length - query.length && computeAlignment(query, reference, queryPos, refPos, k) && bandedAligner.getAlignmentLength() >= query.length)) {
{
if (bandedAligner.getRawScore() > bestScore) {
bestScore = bandedAligner.getRawScore();
bestQueryPos = queryPos;
bestRefPos = refPos;
}
}
}
}
}
if (bestScore > 0) {
computeAlignment(query, reference, bestQueryPos, bestRefPos, k);
return bestRefPos;
}
return reference.length;
}
/**
* gets the overlap type of the query in the reference
*
* @param overlap length
* @return type
*/
public OverlapType getOverlap(byte[] query, byte[] reference, Single<Integer> overlap) {
if (getPositionInReference(query, reference, false) != reference.length) {
if (bandedAligner.getStartQuery() > 0 && bandedAligner.getStartReference() == 0 && bandedAligner.getAlignmentLength() < reference.length) {
overlap.set(query.length - bandedAligner.getStartQuery());
return OverlapType.QuerySuffix2RefPrefix;
} else if (bandedAligner.getStartQuery() == 0 && bandedAligner.getStartReference() > 0 && bandedAligner.getAlignmentLength() < query.length) {
overlap.set(bandedAligner.getEndQuery());
return OverlapType.QueryPrefix2RefSuffix;
} else if (bandedAligner.getStartQuery() == 0 && bandedAligner.getEndQuery() == query.length) {
overlap.set(query.length);
return OverlapType.QueryContainedInRef;
}
}
overlap.set(0);
return OverlapType.None;
}
/**
* get the percent identity of the last alignment
*
* @return percent identity
*/
public float getPercentIdentity() {
return bandedAligner.getPercentIdentity();
}
}
| 6,437 | Java | .java | husonlab/megan-ce | 62 | 21 | 18 | 2016-05-09T10:55:38Z | 2024-02-22T23:23:42Z |
BandedAligner.java | /FileExtraction/Java_unseen/husonlab_megan-ce/src/megan/assembly/align/BandedAligner.java | /*
* BandedAligner.java Copyright (C) 2024 Daniel H. Huson
*
* (Some files contain contributions from other authors, who are then mentioned separately.)
*
* This program is free software: you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
package megan.assembly.align;
import jloda.seq.BlastMode;
import jloda.util.ReusableByteBuffer;
/**
* banded DNA aligner. Does both local and semiGlobal alignment
* Daniel Huson, 8.2014
*/
class BandedAligner {
private final double lambda;
private final double lnK;
private final static double LN_2 = 0.69314718055994530941723212145818;
private final static int MINUS_INFINITY = -100000000;
public static int ALIGNMENT_SEGMENT_LENGTH = 60; // length of alignment segment in text format output
private final static byte[] MID_TRACK_LEADING_SPACES = " ".getBytes(); // spaces used in text format output
private long referenceDatabaseLength;
private byte[] query;
private int queryLength;
private byte[] reference;
private int referenceLength;
private final int[][] scoringMatrix;
private final int gapOpenPenalty;
private final int gapExtensionPenalty;
private final int band;
private int rawScore;
private float bitScore = 0;
private double expected = 0;
private final boolean isDNAAlignment;
private int identities;
private int mismatches;
private int gaps;
private int gapOpens;
private int alignmentLength;
private final BlastMode mode;
private final boolean doSemiGlobal;
private int refOffset; // needed convert from row to position in reference
private int startQuery; // first alignment position of query
private int endQuery = -1; // last alignment position of query +1
private int startReference;
private int endReference;
private int[][] matrixM;
private int[][] matrixIRef;
private int[][] matrixIQuery;
private byte[][] traceBackM;
private byte[][] traceBackIRef;
private byte[][] traceBackIQuery;
private static final byte DONE = 9;
private static final byte M_FROM_M = 1;
private static final byte M_FROM_IRef = 2;
private static final byte M_FROM_IQuery = 3;
private static final byte IRef_FROM_M = 4;
private static final byte IRef_FROM_IRef = 5;
private static final byte IQuery_FROM_M = 6;
private static final byte IQuery_FROM_IQuery = 7;
// buffers:
private byte[] queryTrack = new byte[1000];
private byte[] midTrack = new byte[1000];
private byte[] referenceTrack = new byte[1000];
private final ReusableByteBuffer alignmentBuffer = new ReusableByteBuffer(10000);
private int queryPos;
private int refPos;
// new stuff:
private byte[][] alignment; // last computed alignment
private int seedLength;
// number of rows depends only on band width
private final int rows;
private final int lastRowToFill;
private final int middleRow;
/**
* constructor
*
*/
public BandedAligner(final AlignerOptions alignerOptions, final BlastMode mode) {
this.scoringMatrix = alignerOptions.getScoringMatrix().getMatrix();
this.isDNAAlignment = (mode == BlastMode.BlastN);
this.doSemiGlobal = alignerOptions.getAlignmentType() == AlignerOptions.AlignmentMode.SemiGlobal;
this.lambda = alignerOptions.getLambda();
this.lnK = alignerOptions.getLnK();
this.mode = mode;
band = alignerOptions.getBand();
gapOpenPenalty = alignerOptions.getGapOpenPenalty();
gapExtensionPenalty = alignerOptions.getGapExtensionPenalty();
referenceDatabaseLength = alignerOptions.getReferenceDatabaseLength();
rows = 2 * band + 3;
lastRowToFill = rows - 2;
middleRow = rows / 2; // half
matrixM = new int[0][0]; // don't init here, need to initialize properly
matrixIRef = new int[0][0];
matrixIQuery = new int[0][0];
traceBackM = new byte[0][0];
traceBackIRef = new byte[0][0];
traceBackIQuery = new byte[0][0];
// todo: only use one traceback matrix
boolean samSoftClipping = alignerOptions.isSamSoftClipping();
}
/**
* Computes a banded local or semiGlobal alignment.
* The raw score is computed.
*
*/
public void computeAlignment(byte[] query, int queryLength, byte[] reference, int referenceLength, int queryPos, int refPos, int seedLength) {
this.query = query;
this.queryLength = queryLength;
this.reference = reference;
this.referenceLength = referenceLength;
this.queryPos = queryPos;
this.refPos = refPos;
this.seedLength = seedLength;
startQuery = startReference = endQuery = endReference = -1;
if (doSemiGlobal)
computeSemiGlobalAlignment();
else
computeLocalAlignment();
}
/**
* Performs a banded local alignment and return the raw score.
*/
private void computeLocalAlignment() {
alignment = null; // will need to call alignmentByTraceBack to compute this
refOffset = refPos - queryPos - band - 2; // need this to compute index in reference sequence
final int cols = queryLength + 2; // query plus one col before and one after
final int firstSeedCol = queryPos + 1; // +1 because col=pos+1
final int lastSeedCol = queryPos + seedLength; // +1 because col=pos+1, but then -1 because want to be last in seed (not first after seed)
//if (lastSeedCol > queryLength)
// return; // too long
// ------- compute score that comes from seed (without first and last member)
rawScore = 0;
{
for (int col = firstSeedCol + 1; col < lastSeedCol; col++) {
final int refIndex = middleRow + col + refOffset;
rawScore += scoringMatrix[query[col - 1]][reference[refIndex]];
}
if (rawScore <= 0) {
rawScore = 0;
return;
}
}
// ------- resize matrices if necessary:
if (cols >= matrixM.length) { // all values will be 0
// resize:
matrixM = new int[cols][rows];
matrixIRef = new int[cols][rows];
matrixIQuery = new int[cols][rows];
traceBackM = new byte[cols][rows];
traceBackIRef = new byte[cols][rows];
traceBackIQuery = new byte[cols][rows];
// initialize first column:
for (int r = 1; r < rows; r++) {
// matrixM[0][r] = matrixIRef[0][r] = matrixIQuery[0][r] = 0;
traceBackM[0][r] = traceBackIRef[0][r] = traceBackIQuery[0][r] = DONE;
}
// initialize the first and last row:
for (int c = 0; c < cols; c++) {
// matrixM[c][0] = matrixIRef[c][0] = matrixIQuery[c][0] = matrixM[c][rows - 1] = matrixIRef[c][rows - 1] = matrixIQuery[c][rows - 1] = 0;
traceBackM[c][0] = traceBackIRef[c][0] = traceBackIQuery[c][0] = traceBackM[c][rows - 1] = traceBackIRef[0][rows - 1] = traceBackIQuery[0][rows - 1] = DONE;
}
}
// ------- fill dynamic programming matrix from 0 to first column of seed:
{
final int firstCol = Math.max(1, -refOffset - 2 * band - 1); // the column for which refIndex(firstCol,bottom-to-last row)==0
if (firstCol > 1) {
final int prevCol = firstCol - 1;
final int secondToLastRow = rows - 2;
traceBackM[prevCol][secondToLastRow] = traceBackIRef[prevCol][secondToLastRow] = traceBackIQuery[prevCol][secondToLastRow] = DONE; // set previous column to done
matrixM[prevCol][secondToLastRow] = matrixIRef[prevCol][secondToLastRow] = matrixIQuery[prevCol][secondToLastRow] = 0;
}
// note that query pos is c-1, because c==0 is before start of query
for (int col = firstCol; col <= firstSeedCol; col++) { // we never modify the first column or the first or last row
for (int row = 1; row <= lastRowToFill; row++) {
final int refIndex = row + col + refOffset;
if (refIndex == -1) { // in column before reference starts, init
traceBackM[col][row] = traceBackIRef[col][row] = traceBackIQuery[col][row] = DONE;
matrixM[col][row] = matrixIRef[col][row] = matrixIQuery[col][row] = 0;
} else if (refIndex >= 0) //do the actual alignment:
{
int bestMScore = 0;
// match or mismatch
{
final int s = scoringMatrix[query[col - 1]][reference[refIndex]];
int score = matrixM[col - 1][row] + s;
if (score > 0) {
traceBackM[col][row] = M_FROM_M;
bestMScore = score;
}
score = matrixIRef[col - 1][row] + s;
if (score > bestMScore) {
traceBackM[col][row] = M_FROM_IRef;
bestMScore = score;
}
score = matrixIQuery[col - 1][row] + s;
if (score > bestMScore) {
traceBackM[col][row] = M_FROM_IQuery;
bestMScore = score;
}
if (bestMScore == 0) {
traceBackM[col][row] = DONE;
}
matrixM[col][row] = bestMScore;
}
// insertion in reference:
int bestIRefScore = 0;
{
int score = matrixM[col][row - 1] - gapOpenPenalty;
if (score > bestIRefScore) {
traceBackIRef[col][row] = IRef_FROM_M;
bestIRefScore = score;
}
score = matrixIRef[col][row - 1] - gapExtensionPenalty;
if (score > bestIRefScore) {
bestIRefScore = score;
traceBackIRef[col][row] = IRef_FROM_IRef;
}
if (bestIRefScore == 0) {
traceBackIRef[col][row] = DONE;
}
matrixIRef[col][row] = bestIRefScore;
}
// insertion in query:
int bestIQueryScore = 0;
{
int score = matrixM[col - 1][row + 1] - gapOpenPenalty;
if (score > bestIQueryScore) {
bestIQueryScore = score;
traceBackIQuery[col][row] = IQuery_FROM_M;
}
score = matrixIQuery[col - 1][row + 1] - gapExtensionPenalty;
if (score > bestIQueryScore) {
bestIQueryScore = score;
traceBackIQuery[col][row] = IQuery_FROM_IQuery;
}
if (bestIQueryScore == 0) {
traceBackIQuery[col][row] = DONE;
}
matrixIQuery[col][row] = bestIQueryScore;
}
}
// else refIndex < -1
}
}
}
// ------- fill dynamic programming matrix from end of query to last column of seed:
{
final int lastCol = Math.min(queryLength + 1, queryPos + referenceLength - refPos + 1); // last column, fill upto lastCol-1
// initial last column:
for (int row = 1; row < rows; row++) {
matrixM[lastCol][row] = matrixIRef[lastCol][row] = matrixIQuery[lastCol][row] = 0;
traceBackM[lastCol][row] = traceBackIRef[lastCol][row] = traceBackIQuery[lastCol][row] = DONE;
}
// note that col=pos-1, or pos=col+1, because c==0 is before start of query
/*
System.err.println("lastSeedCol: " + lastSeedCol);
System.err.println("lastCol: " + lastCol);
System.err.println("lastRowToFill: " + lastRowToFill);
*/
for (int col = lastCol - 1; col >= lastSeedCol; col--) { // we never modify the first column or the first or last row
for (int row = lastRowToFill; row >= 1; row--) {
final int refIndex = row + col + refOffset;
if (refIndex >= referenceLength) { // out of range of the alignment
traceBackM[col][row] = traceBackIRef[col][row] = traceBackIQuery[col][row] = DONE;
matrixM[col][row] = matrixIRef[col][row] = matrixIQuery[col][row] = 0;
} else if (refIndex >= 0) { // do the actual alignment:
int bestMScore = 0;
// match or mismatch
{
final int s = scoringMatrix[query[col - 1]][reference[refIndex]]; // pos in query=col-1
int score = matrixM[col + 1][row] + s;
if (score > 0) {
traceBackM[col][row] = M_FROM_M;
bestMScore = score;
}
score = matrixIRef[col + 1][row] + s;
if (score > bestMScore) {
traceBackM[col][row] = M_FROM_IRef;
bestMScore = score;
}
score = matrixIQuery[col + 1][row] + s;
if (score > bestMScore) {
traceBackM[col][row] = M_FROM_IQuery;
bestMScore = score;
}
if (bestMScore == 0) {
traceBackM[col][row] = DONE;
}
matrixM[col][row] = bestMScore;
}
// insertion in ref
int bestIRefScore = 0;
{
int score = matrixM[col][row + 1] - gapOpenPenalty;
if (score > bestIRefScore) {
traceBackIRef[col][row] = IRef_FROM_M;
bestIRefScore = score;
}
score = matrixIRef[col][row + 1] - gapExtensionPenalty;
if (score > bestIRefScore) {
bestIRefScore = score;
traceBackIRef[col][row] = IRef_FROM_IRef;
}
if (bestIRefScore == 0) {
traceBackIRef[col][row] = DONE;
}
matrixIRef[col][row] = bestIRefScore;
}
// insertion in query:
int bestIQueryScore = 0;
{
int score = matrixM[col + 1][row - 1] - gapOpenPenalty;
if (score > bestIQueryScore) {
bestIQueryScore = score;
traceBackIQuery[col][row] = IQuery_FROM_M;
}
score = matrixIQuery[col + 1][row - 1] - gapExtensionPenalty;
if (score > bestIQueryScore) {
bestIQueryScore = score;
traceBackIQuery[col][row] = IQuery_FROM_IQuery;
}
if (bestIQueryScore == 0) {
traceBackIQuery[col][row] = DONE;
}
matrixIQuery[col][row] = bestIQueryScore;
}
}
// else refIndex >referenceLength
}
}
}
rawScore += Math.max(Math.max(matrixIQuery[firstSeedCol][middleRow], matrixIRef[firstSeedCol][middleRow]), matrixM[firstSeedCol][middleRow]);
rawScore += Math.max(Math.max(matrixIQuery[lastSeedCol][middleRow], matrixIRef[lastSeedCol][middleRow]), matrixM[lastSeedCol][middleRow]);
}
/**
* Performs a banded semi-global alignment.
*/
private void computeSemiGlobalAlignment() {
alignment = null; // will need to call alignmentByTraceBack to compute this
refOffset = refPos - queryPos - band - 2; // need this to compute index in reference sequence
final int cols = queryLength + 2; // query plus one col before and one after
final int firstSeedCol = queryPos + 1; // +1 because col=pos+1
final int lastSeedCol = queryPos + seedLength; // +1 because col=pos+1, but then -1 because want to be last in seed (not first after seed)
//if (lastSeedCol > queryLength)
// return; // too long
// ------- compute score that comes from seed (without first and last member)
rawScore = 0;
{
for (int col = firstSeedCol + 1; col < lastSeedCol; col++) {
final int refIndex = middleRow + col + refOffset;
rawScore += scoringMatrix[query[col - 1]][reference[refIndex]];
}
if (rawScore <= 0) {
rawScore = 0;
return;
}
}
// ------- resize matrices if necessary:
if (cols >= matrixM.length) { // all values will be 0
// resize:
matrixM = new int[cols][rows];
matrixIRef = new int[cols][rows];
matrixIQuery = new int[cols][rows];
traceBackM = new byte[cols][rows];
traceBackIRef = new byte[cols][rows];
traceBackIQuery = new byte[cols][rows];
// initialize first column:
for (int r = 1; r < rows; r++) {
traceBackM[0][r] = traceBackIRef[0][r] = traceBackIQuery[0][r] = DONE;
matrixIQuery[0][r] = -gapOpenPenalty;
}
// initialize the first and last row:
for (int c = 0; c < cols; c++) {
matrixM[c][0] = matrixIRef[c][0] = matrixIQuery[c][0]
= matrixM[c][rows - 1] = matrixIRef[c][rows - 1] = matrixIQuery[c][rows - 1]
= MINUS_INFINITY; // must never go outside band
}
}
// ------- fill dynamic programming matrix from 0 to first column of seed:
{
final int firstCol = Math.max(1, -refOffset - 2 * band - 1); // the column for which refIndex(firstCol,bottom-to-last row)==0
if (firstCol > 1) {
final int prevCol = firstCol - 1;
final int secondToLastRow = rows - 2;
traceBackM[prevCol][secondToLastRow] = traceBackIRef[prevCol][secondToLastRow] = traceBackIQuery[prevCol][secondToLastRow] = DONE; // set previous column to done
matrixM[prevCol][secondToLastRow] = matrixIRef[prevCol][secondToLastRow] = matrixIQuery[prevCol][secondToLastRow] = 0;
}
// note that query pos is c-1, because c==0 is before start of query
for (int col = firstCol; col <= firstSeedCol; col++) { // we never modify the first column or the first or last row
for (int row = 1; row <= lastRowToFill; row++) {
final int refIndex = row + col + refOffset;
if (refIndex >= reference.length)
continue; // todo: debug this, sometimes happens, but shouldn't
if (refIndex == -1) { // in column before reference starts, init
traceBackM[col][row] = traceBackIRef[col][row] = traceBackIQuery[col][row] = DONE;
matrixM[col][row] = 0;
matrixIRef[col][row] = matrixIQuery[col][row] = -gapOpenPenalty;
} else if (refIndex >= 0) //do the actual alignment:
{
int bestMScore = Integer.MIN_VALUE;
// match or mismatch
{
final int s = scoringMatrix[query[col - 1]][reference[refIndex]];
int score = matrixM[col - 1][row] + s;
if (score > bestMScore) {
traceBackM[col][row] = M_FROM_M;
bestMScore = score;
}
score = matrixIRef[col - 1][row] + s;
if (score > bestMScore) {
traceBackM[col][row] = M_FROM_IRef;
bestMScore = score;
}
score = matrixIQuery[col - 1][row] + s;
if (score > bestMScore) {
traceBackM[col][row] = M_FROM_IQuery;
bestMScore = score;
}
matrixM[col][row] = bestMScore;
}
// insertion in reference:
int bestIRefScore = Integer.MIN_VALUE;
{
int score = matrixM[col][row - 1] - gapOpenPenalty;
if (score > bestIRefScore) {
traceBackIRef[col][row] = IRef_FROM_M;
bestIRefScore = score;
}
score = matrixIRef[col][row - 1] - gapExtensionPenalty;
if (score > bestIRefScore) {
bestIRefScore = score;
traceBackIRef[col][row] = IRef_FROM_IRef;
}
matrixIRef[col][row] = bestIRefScore;
}
// insertion in query:
int bestIQueryScore = Integer.MIN_VALUE;
{
int score = matrixM[col - 1][row + 1] - gapOpenPenalty;
if (score > bestIQueryScore) {
bestIQueryScore = score;
traceBackIQuery[col][row] = IQuery_FROM_M;
}
score = matrixIQuery[col - 1][row + 1] - gapExtensionPenalty;
if (score > bestIQueryScore) {
bestIQueryScore = score;
traceBackIQuery[col][row] = IQuery_FROM_IQuery;
}
matrixIQuery[col][row] = bestIQueryScore;
}
}
// else refIndex < -1
}
}
}
// ------- fill dynamic programming matrix from end of query to last column of seed:
{
final int lastCol = Math.min(queryLength + 1, queryPos + referenceLength - refPos + 1); // last column, fill upto lastCol-1
// initial last column:
for (int row = 1; row < rows - 1; row++) { // no need to init first or last row...
matrixM[lastCol][row] = 0;
matrixIRef[lastCol][row] = matrixIQuery[lastCol][row] = -gapOpenPenalty;
traceBackM[lastCol][row] = traceBackIRef[lastCol][row] = traceBackIQuery[lastCol][row] = DONE;
}
// note that col=pos-1, or pos=col+1, because c==0 is before start of query
/*
System.err.println("lastSeedCol: " + lastSeedCol);
System.err.println("lastCol: " + lastCol);
System.err.println("lastRowToFill: " + lastRowToFill);
*/
for (int col = lastCol - 1; col >= lastSeedCol; col--) { // we never modify the first column or the first or last row
for (int row = lastRowToFill; row >= 1; row--) {
final int refIndex = row + col + refOffset;
if (refIndex >= referenceLength) { // out of range of the alignment
traceBackM[col][row] = traceBackIRef[col][row] = traceBackIQuery[col][row] = DONE;
matrixM[col][row] = matrixIRef[col][row] = matrixIQuery[col][row] = -gapOpenPenalty;
} else if (refIndex >= 0) { // do the actual alignment:
int bestMScore = Integer.MIN_VALUE;
// match or mismatch
{
final int s = scoringMatrix[query[col - 1]][reference[refIndex]]; // pos in query=col-1
int score = matrixM[col + 1][row] + s;
if (score > bestMScore) {
traceBackM[col][row] = M_FROM_M;
bestMScore = score;
}
score = matrixIRef[col + 1][row] + s;
if (score > bestMScore) {
traceBackM[col][row] = M_FROM_IRef;
bestMScore = score;
}
score = matrixIQuery[col + 1][row] + s;
if (score > bestMScore) {
traceBackM[col][row] = M_FROM_IQuery;
bestMScore = score;
}
matrixM[col][row] = bestMScore;
}
// insertion in ref
int bestIRefScore = Integer.MIN_VALUE;
{
int score = matrixM[col][row + 1] - gapOpenPenalty;
if (score > bestIRefScore) {
traceBackIRef[col][row] = IRef_FROM_M;
bestIRefScore = score;
}
score = matrixIRef[col][row + 1] - gapExtensionPenalty;
if (score > bestIRefScore) {
bestIRefScore = score;
traceBackIRef[col][row] = IRef_FROM_IRef;
}
matrixIRef[col][row] = bestIRefScore;
}
// insertion in query:
int bestIQueryScore = Integer.MIN_VALUE;
{
int score = matrixM[col + 1][row - 1] - gapOpenPenalty;
if (score > bestIQueryScore) {
bestIQueryScore = score;
traceBackIQuery[col][row] = IQuery_FROM_M;
}
score = matrixIQuery[col + 1][row - 1] - gapExtensionPenalty;
if (score > bestIQueryScore) {
bestIQueryScore = score;
traceBackIQuery[col][row] = IQuery_FROM_IQuery;
}
matrixIQuery[col][row] = bestIQueryScore;
}
}
// else refIndex >referenceLength
}
}
}
rawScore += Math.max(Math.max(matrixIQuery[firstSeedCol][middleRow], matrixIRef[firstSeedCol][middleRow]), matrixM[firstSeedCol][middleRow]);
rawScore += Math.max(Math.max(matrixIQuery[lastSeedCol][middleRow], matrixIRef[lastSeedCol][middleRow]), matrixM[lastSeedCol][middleRow]);
}
/**
* compute the bit score and expected score from the raw score
*/
public void computeBitScoreAndExpected() {
if (rawScore > 0) {
bitScore = (float) ((lambda * rawScore - lnK) / LN_2);
expected = referenceDatabaseLength * queryLength * Math.pow(2, -bitScore);
} else {
bitScore = 0;
expected = Double.MAX_VALUE;
}
}
/**
* gets the alignment. Also sets the number of matches, mismatches and gaps
*
*/
private void computeAlignmentByTraceBack() {
if (rawScore <= 0) {
alignment = null;
return;
}
gaps = 0;
gapOpens = 0;
identities = 0;
mismatches = 0;
// get first part of alignment:
int length = 0;
{
int r = middleRow;
int c = queryPos + 1;
byte[][] traceBack;
traceBack = traceBackM;
if (matrixIRef[c][r] > matrixM[c][r]) {
traceBack = traceBackIRef;
if (matrixIQuery[c][r] > matrixIRef[c][r])
traceBack = traceBackIQuery;
} else if (matrixIQuery[c][r] > matrixM[c][r])
traceBack = traceBackIQuery;
loop:
while (true) {
int refIndex = r + c + refOffset;
switch (traceBack[c][r]) {
case DONE:
startQuery = c;
startReference = r + c + refOffset + 1;
break loop;
case M_FROM_M:
queryTrack[length] = query[c - 1];
referenceTrack[length] = reference[refIndex];
if (queryTrack[length] == referenceTrack[length]) {
if (isDNAAlignment)
midTrack[length] = '|';
else
midTrack[length] = queryTrack[length];
identities++;
} else {
if (isDNAAlignment || scoringMatrix[queryTrack[length]][referenceTrack[length]] <= 0)
midTrack[length] = ' ';
else
midTrack[length] = '+';
mismatches++;
}
c--;
traceBack = traceBackM;
break;
case M_FROM_IRef:
queryTrack[length] = query[c - 1];
referenceTrack[length] = reference[refIndex];
if (queryTrack[length] == referenceTrack[length]) {
if (isDNAAlignment)
midTrack[length] = '|';
else
midTrack[length] = queryTrack[length];
identities++;
} else {
if (isDNAAlignment || scoringMatrix[queryTrack[length]][referenceTrack[length]] <= 0)
midTrack[length] = ' ';
else
midTrack[length] = '+';
}
c--;
traceBack = traceBackIRef;
break;
case M_FROM_IQuery:
queryTrack[length] = query[c - 1];
referenceTrack[length] = reference[refIndex];
if (queryTrack[length] == referenceTrack[length]) {
if (isDNAAlignment)
midTrack[length] = '|';
else
midTrack[length] = queryTrack[length];
identities++;
} else {
if (isDNAAlignment || scoringMatrix[queryTrack[length]][referenceTrack[length]] <= 0)
midTrack[length] = ' ';
else
midTrack[length] = '+';
}
c--;
traceBack = traceBackIQuery;
break;
case IRef_FROM_M:
queryTrack[length] = '-';
referenceTrack[length] = reference[refIndex];
midTrack[length] = ' ';
r--;
traceBack = traceBackM;
gaps++;
gapOpens++;
break;
case IRef_FROM_IRef:
queryTrack[length] = '-';
referenceTrack[length] = reference[refIndex];
midTrack[length] = ' ';
r--;
traceBack = traceBackIRef;
gaps++;
break;
case IQuery_FROM_M:
queryTrack[length] = query[c - 1];
referenceTrack[length] = '-';
midTrack[length] = ' ';
c--;
r++;
traceBack = traceBackM;
gaps++;
gapOpens++;
break;
case IQuery_FROM_IQuery:
queryTrack[length] = query[c - 1];
referenceTrack[length] = '-';
midTrack[length] = ' ';
c--;
r++;
traceBack = traceBackIQuery;
gaps++;
break;
default:
throw new RuntimeException("Undefined trace-back state: " + traceBack[c][r]);
}
if (queryTrack[length] == '-' && referenceTrack[length] == '-')
System.err.println("gap-gap at: " + length);
if (++length >= queryTrack.length) {
queryTrack = grow(queryTrack);
midTrack = grow(midTrack);
referenceTrack = grow(referenceTrack);
}
} // end of loop
reverseInPlace(queryTrack, length);
reverseInPlace(midTrack, length);
reverseInPlace(referenceTrack, length);
}
// get second part of alignment:
{
for (int i = 1; i < seedLength - 1; i++) {
queryTrack[length] = query[queryPos + i];
referenceTrack[length] = reference[refPos + i];
if (queryTrack[length] == referenceTrack[length]) {
if (isDNAAlignment)
midTrack[length] = '|';
else
midTrack[length] = queryTrack[length];
identities++;
} else {
if (isDNAAlignment || scoringMatrix[queryTrack[length]][referenceTrack[length]] <= 0)
midTrack[length] = ' ';
else
midTrack[length] = '+';
mismatches++;
}
if (++length >= queryTrack.length) {
queryTrack = grow(queryTrack);
midTrack = grow(midTrack);
referenceTrack = grow(referenceTrack);
}
}
}
// get third part of alignment:
{
int r = middleRow;
int c = queryPos + seedLength; // +1 because col=pos+1, but -1 because want to be in last position of seed
byte[][] traceBack;
traceBack = traceBackM;
if (matrixIRef[c][r] > matrixM[c][r]) {
traceBack = traceBackIRef;
if (matrixIQuery[c][r] > matrixIRef[c][r])
traceBack = traceBackIQuery;
} else if (matrixIQuery[c][r] > matrixM[c][r])
traceBack = traceBackIQuery;
loop:
while (true) {
int refIndex = r + c + refOffset;
switch (traceBack[c][r]) {
case DONE:
endQuery = c - 1;
endReference = r + c + refOffset + 1;
break loop;
case M_FROM_M:
queryTrack[length] = query[c - 1];
referenceTrack[length] = reference[refIndex];
if (queryTrack[length] == referenceTrack[length]) {
if (isDNAAlignment)
midTrack[length] = '|';
else
midTrack[length] = queryTrack[length];
identities++;
} else {
if (isDNAAlignment || scoringMatrix[queryTrack[length]][referenceTrack[length]] <= 0)
midTrack[length] = ' ';
else
midTrack[length] = '+';
mismatches++;
}
c++;
traceBack = traceBackM;
break;
case M_FROM_IRef:
queryTrack[length] = query[c - 1];
referenceTrack[length] = reference[refIndex];
if (queryTrack[length] == referenceTrack[length]) {
if (isDNAAlignment)
midTrack[length] = '|';
else
midTrack[length] = queryTrack[length];
identities++;
} else {
if (isDNAAlignment || scoringMatrix[queryTrack[length]][referenceTrack[length]] <= 0)
midTrack[length] = ' ';
else
midTrack[length] = '+';
}
c++;
traceBack = traceBackIRef;
break;
case M_FROM_IQuery:
queryTrack[length] = query[c - 1];
referenceTrack[length] = reference[refIndex];
if (queryTrack[length] == referenceTrack[length]) {
if (isDNAAlignment)
midTrack[length] = '|';
else
midTrack[length] = queryTrack[length];
identities++;
} else {
if (isDNAAlignment || scoringMatrix[queryTrack[length]][referenceTrack[length]] <= 0)
midTrack[length] = ' ';
else
midTrack[length] = '+';
}
c++;
traceBack = traceBackIQuery;
break;
case IRef_FROM_M:
queryTrack[length] = '-';
referenceTrack[length] = reference[refIndex];
midTrack[length] = ' ';
r++;
traceBack = traceBackM;
gaps++;
gapOpens++;
break;
case IRef_FROM_IRef:
queryTrack[length] = '-';
referenceTrack[length] = reference[refIndex];
midTrack[length] = ' ';
r++;
traceBack = traceBackIRef;
gaps++;
break;
case IQuery_FROM_M:
queryTrack[length] = query[c - 1];
referenceTrack[length] = '-';
midTrack[length] = ' ';
c++;
r--;
traceBack = traceBackM;
gaps++;
gapOpens++;
break;
case IQuery_FROM_IQuery:
queryTrack[length] = query[c - 1];
referenceTrack[length] = '-';
midTrack[length] = ' ';
c++;
r--;
traceBack = traceBackIQuery;
gaps++;
break;
default: {
throw new RuntimeException("Undefined trace-back state: " + traceBack[c][r]);
}
}
if (queryTrack[length] == '-' && referenceTrack[length] == '-')
System.err.println("gap-gap at: " + length);
if (++length >= queryTrack.length) {
queryTrack = grow(queryTrack);
midTrack = grow(midTrack);
referenceTrack = grow(referenceTrack);
}
} // end of loop
}
alignmentLength = length;
alignment = new byte[][]{copy(queryTrack, length), copy(midTrack, length), copy(referenceTrack, length)};
}
public int getStartQuery() {
return startQuery;
}
public int getEndQuery() {
return endQuery;
}
public int getStartReference() {
return startReference;
}
public int getEndReference() {
return endReference;
}
public int getGaps() {
return gaps;
}
public int getGapOpens() {
return gapOpens;
}
private int getIdentities() {
return identities;
}
public float getPercentIdentity() {
if (alignment == null)
computeAlignmentByTraceBack();
return getAlignmentLength() == 0 ? 0 : (float) (100 * getIdentities()) / (float) getAlignmentLength();
}
public int getMismatches() {
return mismatches;
}
public int getRawScore() {
return rawScore;
}
public float getBitScore() {
return bitScore;
}
public double getExpected() {
return expected;
}
public int getAlignmentLength() {
return alignmentLength;
}
public long getReferenceDatabaseLength() {
return referenceDatabaseLength;
}
public void setReferenceDatabaseLength(long referenceDatabaseLength) {
this.referenceDatabaseLength = referenceDatabaseLength;
}
/**
* reverse bytes
*
*/
private void reverseInPlace(byte[] array, int length) {
int top = length / 2;
for (int i = 0; i < top; i++) {
byte tmp = array[i];
int j = length - i - 1;
array[i] = array[j];
array[j] = tmp;
}
}
/**
* grow an array
*
* @return larger array containing values
*/
private byte[] grow(byte[] a) {
byte[] result = new byte[Math.max(2, 2 * a.length)];
System.arraycopy(a, 0, result, 0, a.length);
return result;
}
/**
* return a copy
*
* @return copy
*/
private byte[] copy(byte[] array, int length) {
byte[] result = new byte[length];
System.arraycopy(array, 0, result, 0, length);
return result;
}
/**
* return a reverse copy
*
* @return copy
*/
public byte[] copyReverse(byte[] array, int length) {
byte[] result = new byte[length];
for (int i = 0; i < length; i++)
result[i] = array[length - 1 - i];
return result;
}
/**
* to string
*
*/
private String toString(int[][] colRowMatrix, int firstCol, int cols, byte[] query) {
StringBuilder buf = new StringBuilder();
buf.append(" |");
for (int i = firstCol; i < cols; i++) {
buf.append(String.format(" %3d", i));
}
buf.append("\n");
buf.append(" | ");
for (int i = firstCol + 1; i < cols; i++) {
buf.append(" ").append((char) query[i - 1]);
}
buf.append("\n");
buf.append("---+");
buf.append("----".repeat(Math.max(0, cols - firstCol)));
buf.append("\n");
int r = 0;
boolean hasRow = true;
while (hasRow) {
hasRow = false;
for (int i = firstCol; i < cols; i++) {
int[] aColRowMatrix = colRowMatrix[i];
if (aColRowMatrix.length > r) {
if (!hasRow) {
hasRow = true;
buf.append(String.format("%2d |", r));
}
int value = aColRowMatrix[r];
if (value <= MINUS_INFINITY)
buf.append(" -oo");
else
buf.append(String.format(" %3d", value));
}
}
buf.append("\n");
r++;
}
return buf.toString();
}
/**
* maps a bit score to a raw score
*
* @return raw score
*/
public int getRawScoreForBitScore(double bitScore) {
return (int) Math.floor((LN_2 * bitScore + lnK) / lambda);
}
private static final int minNumberOfExactMatches = 10;
private static final int windowForMinNumberOfExactMatches = 30;
/**
* heuristically check whether there is going to be a good alignment
*
* @return true, if good alignment is likely
*/
public boolean quickCheck(final byte[] query, final int queryLength, final byte[] reference, final int referenceLength, final int queryPos, final int refPos) {
if (mode == BlastMode.BlastN)
return true;
if (queryPos + minNumberOfExactMatches >= queryLength || refPos + minNumberOfExactMatches >= referenceLength)
return false;
int count = 0;
final int maxSteps = Math.min(windowForMinNumberOfExactMatches, Math.min(queryLength - queryPos, referenceLength - refPos));
for (int i = 0; i < maxSteps; i++) {
if (query[queryPos + i] == reference[refPos + i]) {
count++;
if (count == minNumberOfExactMatches)
return true;
}
}
return false;
}
}
| 47,829 | Java | .java | husonlab/megan-ce | 62 | 21 | 18 | 2016-05-09T10:55:38Z | 2024-02-22T23:23:42Z |
AlignerOptions.java | /FileExtraction/Java_unseen/husonlab_megan-ce/src/megan/assembly/align/AlignerOptions.java | /*
* AlignerOptions.java Copyright (C) 2024 Daniel H. Huson
*
* (Some files contain contributions from other authors, who are then mentioned separately.)
*
* This program is free software: you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
package megan.assembly.align;
import jloda.seq.BlastMode;
import jloda.util.Pair;
/**
* all options required by an aligner
* Daniel Huson, 8.2014
*/
public class AlignerOptions {
public enum AlignmentMode {Local, SemiGlobal}
private AlignmentMode alignmentType = AlignmentMode.Local;
private int minSeedIdentities = 0;
private int ungappedXDrop = 0;
private int ungappedMinRawScore = 0;
private int gapOpenPenalty = 7;
private int gapExtensionPenalty = 3;
private int matchScore = 2;
private int mismatchScore = -3;
private int band = 4;
private boolean referenceIsDNA = true;
// two values for computing blast statistics:
private double lambda = 0.625;
private double lnK = -0.89159811928378356416921953633132;
private IScoringMatrix scoringMatrix;
private long referenceDatabaseLength = 100000;
private boolean samSoftClipping = false;
public AlignmentMode getAlignmentType() {
return alignmentType;
}
public void setAlignmentType(AlignmentMode alignmentType) {
this.alignmentType = alignmentType;
}
public void setAlignmentType(String alignmentType) {
setAlignmentType(AlignmentMode.valueOf(alignmentType));
}
public int getGapOpenPenalty() {
return gapOpenPenalty;
}
public void setGapOpenPenalty(int gapOpenPenalty) {
this.gapOpenPenalty = gapOpenPenalty;
}
public int getGapExtensionPenalty() {
return gapExtensionPenalty;
}
public void setGapExtensionPenalty(int gapExtensionPenalty) {
this.gapExtensionPenalty = gapExtensionPenalty;
}
public int getMatchScore() {
return matchScore;
}
public void setMatchScore(int matchScore) {
this.matchScore = matchScore;
}
public int getMismatchScore() {
return mismatchScore;
}
public void setMismatchScore(int mismatchScore) {
this.mismatchScore = mismatchScore;
}
public int getBand() {
return band;
}
public void setBand(int band) {
this.band = band;
}
public long getReferenceDatabaseLength() {
return referenceDatabaseLength;
}
public void setReferenceDatabaseLength(long referenceDatabaseLength) {
this.referenceDatabaseLength = referenceDatabaseLength;
}
public IScoringMatrix getScoringMatrix() {
return scoringMatrix;
}
public void setScoringMatrix(IScoringMatrix scoringMatrix) {
this.scoringMatrix = scoringMatrix;
}
public void setLambdaAndK(Pair<Double, Double> lambdaAndK) {
System.err.println("BLAST statistics parameters: lambda=" + lambdaAndK.getFirst() + " k=" + lambdaAndK.getSecond());
lambda = lambdaAndK.getFirst();
lnK = Math.log(lambdaAndK.getSecond());
}
public void setK(double K) {
this.lnK = Math.log(K);
}
public double getK() {
return Math.exp(lnK);
}
public void setLambda(double lambda) {
this.lambda = lambda;
}
public double getLambda() {
return lambda;
}
public double getLnK() {
return lnK;
}
public boolean isReferenceIsDNA() {
return referenceIsDNA;
}
public void setReferenceIsDNA(boolean referenceIsDNA) {
this.referenceIsDNA = referenceIsDNA;
}
public int getMinSeedIdentities(final BlastMode mode) {
if (minSeedIdentities == 0) {
switch (mode) {
case BlastP:
case BlastX:
return 10;
case BlastN:
return 0; // no need to set this, because BlastN seeds are always completely identical
}
}
return minSeedIdentities;
}
public void setMinSeedIdentities(int minSeedIdentities) {
this.minSeedIdentities = minSeedIdentities;
}
public int getUngappedXDrop(final BlastMode mode) {
if (ungappedXDrop == 0) {
switch (mode) {
case BlastP:
case BlastX:
return 20;
case BlastN:
return 8; // todo: need to figure out best default
}
}
return ungappedXDrop;
}
public void setUngappedXDrop(int ungappedXDrop) {
this.ungappedXDrop = ungappedXDrop;
}
public int getUngappedMinRawScore(final BlastMode mode) {
if (ungappedMinRawScore == 0) {
switch (mode) {
case BlastP:
case BlastX:
return 60;
case BlastN:
return 60; // todo: need to figure out best default
}
}
return ungappedMinRawScore;
}
public void setUngappedMinRawScore(int ungappedMinRawScore) {
this.ungappedMinRawScore = ungappedMinRawScore;
}
public boolean isSamSoftClipping() {
return samSoftClipping;
}
public void setSamSoftClipping(boolean samSoftClipping) {
this.samSoftClipping = samSoftClipping;
}
}
| 5,893 | Java | .java | husonlab/megan-ce | 62 | 21 | 18 | 2016-05-09T10:55:38Z | 2024-02-22T23:23:42Z |
IScoringMatrix.java | /FileExtraction/Java_unseen/husonlab_megan-ce/src/megan/assembly/align/IScoringMatrix.java | /*
* IScoringMatrix.java Copyright (C) 2024 Daniel H. Huson
*
* (Some files contain contributions from other authors, who are then mentioned separately.)
*
* This program is free software: you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
package megan.assembly.align;
/**
* interface for scoring matrix
* Daniel Huson, 8.2014
*/
public interface IScoringMatrix {
/**
* gets the score for aligning letters a and b
*
* @return score
*/
int getScore(byte a, byte b);
/**
* get the scoring matrix
*
* @return matrix
*/
int[][] getMatrix();
}
| 1,172 | Java | .java | husonlab/megan-ce | 62 | 21 | 18 | 2016-05-09T10:55:38Z | 2024-02-22T23:23:42Z |
DNAScoringMatrix.java | /FileExtraction/Java_unseen/husonlab_megan-ce/src/megan/assembly/align/DNAScoringMatrix.java | /*
* DNAScoringMatrix.java Copyright (C) 2024 Daniel H. Huson
*
* (Some files contain contributions from other authors, who are then mentioned separately.)
*
* This program is free software: you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
package megan.assembly.align;
/**
* Basic DNA scoring matrix
* Daniel Huson, 8.2014
*/
public class DNAScoringMatrix implements IScoringMatrix {
private final int[][] matrix = new int[128][128];
public DNAScoringMatrix(int matchScore, int mismatchScore) {
for (int i = 0; i < 128; i++) {
matrix[i][i] = matchScore;
for (int j = i + 1; j < 128; j++)
matrix[i][j] = matrix[j][i] = mismatchScore;
}
}
/**
* get score for letters a and b
*
* @return score
*/
public int getScore(byte a, byte b) {
return matrix[a][b];
}
@Override
public int[][] getMatrix() {
return matrix;
}
}
| 1,522 | Java | .java | husonlab/megan-ce | 62 | 21 | 18 | 2016-05-09T10:55:38Z | 2024-02-22T23:23:42Z |
ImportBiom2Taxonomy.java | /FileExtraction/Java_unseen/husonlab_megan-ce/src/megan/biom/biom2/ImportBiom2Taxonomy.java | /*
* ImportBiom2Taxonomy.java Copyright (C) 2024 Daniel H. Huson
*
* (Some files contain contributions from other authors, who are then mentioned separately.)
*
* This program is free software: you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
package megan.biom.biom2;
import ch.systemsx.cisd.base.mdarray.MDArray;
import ch.systemsx.cisd.hdf5.IHDF5Reader;
import jloda.util.StringUtils;
import megan.biom.biom1.QIIMETaxonParser;
import megan.classification.IdMapper;
import java.util.HashMap;
import java.util.Map;
class ImportBiom2Taxonomy {
private final static String[] keys = {"taxonomy", "organism", "organisms"};
/**
* gets the taxonomy class to samples to counts map
*
* @return map
*/
public static Map<Integer, float[]> getClass2Samples2Counts(IHDF5Reader reader, int numberOfSamples, boolean ignorePathAbove) {
int countLinesImported = 0;
int countLinesSkipped = 0;
MDArray<String> pathArray = null;
int[] dimensions = null;
for (final String metaKey : reader.getGroupMembers("/observation/metadata")) {
if (StringUtils.getIndexIgnoreCase(metaKey, keys) != -1) {
pathArray = reader.string().readMDArray("/observation/metadata/" + metaKey);
dimensions = pathArray.dimensions();
if (dimensions != null && dimensions.length > 0)
break;
}
}
if (dimensions == null)
return null;
final int[] indptr = reader.readIntArray("/sample/matrix/indptr"); // dataset containing the compressed column offsets
final int[] indices = reader.readIntArray("/sample/matrix/indices"); // dataset containing the row indices (e.g., maps into observation/ids)
final float[] data = reader.readFloatArray("/sample/matrix/data"); // dataset containing the actual matrix data
final Map<Integer, float[]> class2counts = new HashMap<>();
// Loop over Samples
for (int i = 0; i < numberOfSamples; i++) {
// Add counts to this sample
for (int j = indptr[i]; j < indptr[i + 1]; j++) {
final int taxonId;
if (dimensions.length == 1) {
final String[] path = new String[]{pathArray.get(indices[j])};
taxonId = QIIMETaxonParser.parseTaxon(path, ignorePathAbove);
} else if (dimensions.length == 2) {
final String[] path = getPath(pathArray, indices[j], dimensions[1]);
taxonId = QIIMETaxonParser.parseTaxon(path, ignorePathAbove);
countLinesImported++;
} else {
taxonId = IdMapper.UNASSIGNED_ID;
countLinesSkipped++;
}
float[] array = class2counts.computeIfAbsent(taxonId, k -> new float[numberOfSamples]);
array[i] += data[j];
}
}
System.err.printf("Lines imported:%,10d%n", countLinesImported);
if (countLinesSkipped > 0)
System.err.printf("Lines skipped: %,10d%n", countLinesSkipped);
return class2counts;
}
/**
* get the taxon path
*/
private static String[] getPath(MDArray<String> array, int row, int cols) {
final String[] path = new String[cols];
for (int c = 0; c < cols; c++)
path[c] = array.get(row, c);
return path;
}
/**
* determines whether taxonomy metadata is present
*
* @return true, if present
*/
public static boolean hasTaxonomyMetadata(IHDF5Reader reader) {
for (final String metaKey : reader.getGroupMembers("/observation/metadata")) {
if (StringUtils.getIndexIgnoreCase(metaKey, keys) != -1) {
final MDArray<String> pathArray = reader.string().readMDArray("/observation/metadata/" + metaKey);
final int[] dimensions = pathArray.dimensions();
if (dimensions != null && dimensions.length > 0)
return true;
}
}
return false;
}
}
| 4,588 | Java | .java | husonlab/megan-ce | 62 | 21 | 18 | 2016-05-09T10:55:38Z | 2024-02-22T23:23:42Z |
Biom2Importer.java | /FileExtraction/Java_unseen/husonlab_megan-ce/src/megan/biom/biom2/Biom2Importer.java | /*
* Biom2Importer.java Copyright (C) 2024 Daniel H. Huson
*
* (Some files contain contributions from other authors, who are then mentioned separately.)
*
* This program is free software: you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
package megan.biom.biom2;
import ch.systemsx.cisd.hdf5.HDF5Factory;
import ch.systemsx.cisd.hdf5.IHDF5Reader;
import jloda.seq.BlastMode;
import jloda.swing.util.ProgramProperties;
import jloda.swing.window.NotificationsInSwing;
import jloda.util.CollectionUtils;
import jloda.util.StringUtils;
import megan.classification.Classification;
import megan.classification.IdMapper;
import megan.core.DataTable;
import megan.core.Document;
import megan.core.MeganFile;
import megan.util.BiomFileFilter;
import megan.viewer.MainViewer;
import java.io.IOException;
import java.util.Date;
import java.util.HashMap;
import java.util.Map;
/**
* parses a file in biom format
* Daniel Huson, 9.2012
*/
public class Biom2Importer {
/**
* apply the biom2 importer to the given file
*
*/
static public void apply(String fileName, Document doc, String type, boolean ignorePathAbove) throws IOException {
if (!BiomFileFilter.isBiom2File(fileName)) {
throw new IOException("File not in BIOM2 format (or incorrect file suffix?)");
}
System.err.println("Importing data from BIOM2 file");
try (IHDF5Reader reader = HDF5Factory.openForReading(fileName)) {
final TopLevelAttributes topLevelAttributes = new TopLevelAttributes(reader);
System.err.println(topLevelAttributes);
if (topLevelAttributes.getShape().length > 0 && topLevelAttributes.getShape()[0] > 200000)
throw new IOException("Too many rows,shape=" + StringUtils.toString(topLevelAttributes.getShape(), ", "));
final String[] sampleIds = reader.readStringArray("/sample/ids"); // dataset of the sample IDs
final int numberOfSamples = sampleIds.length;
final Map<String, Map<Integer, float[]>> classification2class2sample2count = new HashMap<>();
if (ImportBiom2Taxonomy.hasTaxonomyMetadata(reader)) {
final Map<Integer, float[]> class2sample2count = ImportBiom2Taxonomy.getClass2Samples2Counts(reader, numberOfSamples, ignorePathAbove);
classification2class2sample2count.put(Classification.Taxonomy, class2sample2count);
}
// todo: add parsing of other classifications here
final DataTable datatTable = doc.getDataTable();
datatTable.clear();
datatTable.setCreator(ProgramProperties.getProgramName());
datatTable.setCreationDate((new Date()).toString());
final float[] sizes;
if (classification2class2sample2count.containsKey(Classification.Taxonomy))
sizes = computeSizes(numberOfSamples, classification2class2sample2count.get(Classification.Taxonomy));
else if (classification2class2sample2count.size() > 0)
sizes = computeSizes(numberOfSamples, classification2class2sample2count.values().iterator().next());
else {
throw new IOException("Unsupported data, please report on megan.informatik.uni-tuebingen.de");
}
final float totalReads;
totalReads = CollectionUtils.getSum(sizes);
doc.getActiveViewers().addAll(classification2class2sample2count.keySet());
doc.getMeganFile().setFileType(MeganFile.Type.MEGAN_SUMMARY_FILE);
datatTable.getClassification2Class2Counts().putAll(classification2class2sample2count);
if (!classification2class2sample2count.containsKey(Classification.Taxonomy)) {
final Map<Integer, float[]> class2counts = new HashMap<>();
class2counts.put(IdMapper.UNASSIGNED_ID, sizes);
datatTable.getClassification2Class2Counts().put(Classification.Taxonomy, class2counts);
}
datatTable.setSamples(sampleIds, null, sizes, new BlastMode[]{BlastMode.Classifier});
datatTable.setTotalReads(Math.round(totalReads));
doc.setNumberReads(Math.round(totalReads));
// read the meta data, if available:
final int metaDataCount = Biom2MetaData.read(reader, sampleIds, doc.getSampleAttributeTable());
System.err.println("done (" + totalReads + " reads)");
final String message = "Imported " + totalReads + " reads, " + classification2class2sample2count.size() + " classifications, "
+ metaDataCount + " attributes" + "\nGenerated by " + topLevelAttributes.getGeneratedBy()
+ ", date: " + topLevelAttributes.getCreationDate();
NotificationsInSwing.showInformation(MainViewer.getLastActiveFrame(), message);
}
}
/**
* determines the total sample sizes
*
* @return sample sizes
*/
private static float[] computeSizes(int numberOfSamples, Map<Integer, float[]> class2sample2count) {
final float[] sizes = new float[numberOfSamples];
for (float[] array : class2sample2count.values()) {
for (int i = 0; i < array.length; i++) {
sizes[i] += array[i];
}
}
return sizes;
}
/**
* get the entry, if it exists, otherwise create it and initialize to zeros
*
* @return entry
*/
private static Integer[] getOrCreate(Map<Integer, Integer[]> map, Integer id, int size) {
return map.computeIfAbsent(id, k -> newZeroedIntegerArray(size));
}
/**
* add all values to sum
*
*/
private static void addToArray(Integer[] sum, int[] add) {
for (int i = 0; i < add.length; i++) {
sum[i] += add[i];
}
}
/**
* create new array with zero entries
*
* @return new array
*/
private static Integer[] newZeroedIntegerArray(int size) {
Integer[] result = new Integer[size];
for (int i = 0; i < size; i++)
result[i] = 0;
return result;
}
}
| 6,687 | Java | .java | husonlab/megan-ce | 62 | 21 | 18 | 2016-05-09T10:55:38Z | 2024-02-22T23:23:42Z |
TopLevelAttributes.java | /FileExtraction/Java_unseen/husonlab_megan-ce/src/megan/biom/biom2/TopLevelAttributes.java | /*
* TopLevelAttributes.java Copyright (C) 2024 Daniel H. Huson
*
* (Some files contain contributions from other authors, who are then mentioned separately.)
*
* This program is free software: you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
package megan.biom.biom2;
import ch.systemsx.cisd.hdf5.IHDF5Reader;
import ch.systemsx.cisd.hdf5.IHDF5Writer;
import jloda.util.StringUtils;
import java.io.IOException;
import java.text.DateFormat;
import java.text.SimpleDateFormat;
import java.util.Date;
import java.util.Locale;
import java.util.TimeZone;
/**
* top level attributes of file in biom2.1 format
* Daniel Huson, 9.2017
*/
public class TopLevelAttributes {
public enum Type {
OTU_table("OTU table"), Pathway_table("Pathway table"), Function_table("Function table"), Ortholog_table("Ortholog table"), Gene_table("Gene table"), Metabolite_table("Metabolite table"), Taxon_table("Taxon table");
private final String name;
Type(String s) {
name = s;
}
boolean equalsName(String otherName) {
return name.equals(otherName);
}
public String toString() {
return this.name;
}
static Type valueOfName(String name) {
for (Type value : Type.values())
if (value.equalsName(name))
return value;
return null;
}
}
private String id;
private String type;
private String formatURL;
private int[] formatVersion;
private String generatedBy;
private String creationDate;
private int[] shape;
private int nnz;
/**
* constructor
*/
public TopLevelAttributes() {
}
/**
* construct from reader
*
*/
public TopLevelAttributes(IHDF5Reader reader) throws IOException {
read(reader);
}
/**
* read top-level properties from biom2.1 file
*
*/
private void read(IHDF5Reader reader) throws IOException {
try {
id = reader.string().getAttr("/", "id");
type = reader.string().getAttr("/", "type");
formatURL = reader.string().getAttr("/", "format-url");
formatVersion = reader.int32().getArrayAttr("/", "format-version");
generatedBy = reader.string().getAttr("/", "generated-by");
creationDate = reader.string().getAttr("/", "creation-date");
shape = reader.int32().getArrayAttr("/", "shape");
nnz = reader.int32().getAttr("/", "nnz");
} catch (Exception ex) {
System.err.println("BIOM2 parser: Some required top-level attribute(s) missing.");
throw new IOException(ex);
}
if (!isValidType(type))
throw new IOException("Invalid type: " + type);
}
public void write(IHDF5Writer writer) throws IOException {
throw new IOException("Not implemented");
}
public String toString() {
return "id: " + id + "\n" +
"type: " + type + "\n" +
"formatURL: " + formatURL + "\n" +
"formatVersion: " + StringUtils.toString(formatVersion, ".") + "\n" +
"generatedBy: " + generatedBy + "\n" +
"creationDate: " + creationDate + "\n" +
"shape: " + StringUtils.toString(shape, ",") + "\n" +
"nnz: " + nnz + "\n";
}
public String getId() {
return id;
}
public void setId(String id) {
this.id = id;
}
public String getType() {
return type;
}
public void setType(String type) {
this.type = type;
}
public String getFormatURL() {
return formatURL;
}
public void setFormatURL(String formatURL) {
this.formatURL = formatURL;
}
public int[] getFormatVersion() {
return formatVersion;
}
public void setFormatVersion(int major, int minor) {
this.formatVersion = new int[]{major, minor};
}
public String getGeneratedBy() {
return generatedBy;
}
public void setGeneratedBy(String generatedBy) {
this.generatedBy = generatedBy;
}
public String getCreationDate() {
return creationDate;
}
public void setCreationDate(String creationDate) {
this.creationDate = creationDate;
}
public void setCreationDate(Date date) {
this.creationDate = getISO8601StringForDate(date);
}
public int[] getShape() {
return shape;
}
public void setShape(int[] shape) {
this.shape = shape;
}
public int getNnz() {
return nnz;
}
public void setNnz(int nnz) {
this.nnz = nnz;
}
/**
* Return an ISO 8601 combined date and time string for specified date/time
*
* @param date Date
* @return String with format "yyyy-MM-dd'T'HH:mm:ss'Z'"
*/
private static String getISO8601StringForDate(Date date) {
DateFormat dateFormat = new SimpleDateFormat("yyyy-MM-dd'T'HH:mm:ss'Z'", Locale.US);
dateFormat.setTimeZone(TimeZone.getTimeZone("UTC"));
return dateFormat.format(date);
}
private static boolean isValidType(String type) {
return Type.valueOfName(type) != null;
}
}
| 5,801 | Java | .java | husonlab/megan-ce | 62 | 21 | 18 | 2016-05-09T10:55:38Z | 2024-02-22T23:23:42Z |
Biom2ParserTest.java | /FileExtraction/Java_unseen/husonlab_megan-ce/src/megan/biom/biom2/Biom2ParserTest.java | /*
* Biom2ParserTest.java Copyright (C) 2024 Daniel H. Huson
*
* (Some files contain contributions from other authors, who are then mentioned separately.)
*
* This program is free software: you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
package megan.biom.biom2;
import ch.systemsx.cisd.base.mdarray.MDArray;
import ch.systemsx.cisd.hdf5.HDF5Factory;
import ch.systemsx.cisd.hdf5.IHDF5Reader;
import jloda.seq.BlastMode;
import jloda.util.CollectionUtils;
import jloda.util.FileUtils;
import jloda.util.StringUtils;
import megan.biom.biom1.QIIMETaxonParser;
import megan.classification.Classification;
import megan.classification.IdMapper;
import megan.core.Document;
import megan.viewer.TaxonomyData;
import java.io.*;
import java.util.ArrayList;
import java.util.Date;
import java.util.HashMap;
import java.util.Map;
public class Biom2ParserTest {
/**
* parse a file in biom2 format.
*
* @return Document
*/
private static Document apply(String inputFile, boolean preservePaths) throws IOException {
Writer dumpWriter = new BufferedWriter(new FileWriter(FileUtils.replaceFileSuffix(inputFile, (preservePaths ? "+p" : "-p") + "-dmp.txt")));
final Document doc = new Document();
doc.getDataTable().setCreator("MEGAN6 Biom2 import");
doc.getDataTable().setCreationDate((new Date()).toString());
System.err.println("Reading file: " + inputFile);
int countLinesImported = 0;
int countLinesSkipped = 0;
try (IHDF5Reader reader = HDF5Factory.openForReading(inputFile)) {
final TopLevelAttributes topLevelAttributes = new TopLevelAttributes(reader);
System.err.println(topLevelAttributes);
final String[] sampleIds = reader.readStringArray("/sample/ids"); // dataset of the sample IDs
final ArrayList<String> classifications = new ArrayList<>();
final Map<String, MDArray<String>> classification2MDArray = new HashMap<>();
String taxonomyNameMetadata;
for (final String metaKey : reader.getGroupMembers("/observation/metadata")) {
if (metaKey.equalsIgnoreCase("taxonomy") || metaKey.equalsIgnoreCase("organism")) {
taxonomyNameMetadata = metaKey;
classifications.add(taxonomyNameMetadata);
// System.err.println("Elements: " + reader.getDataSetInformation("/observation/metadata/"+taxonomyNameMetadata).getNumberOfElements());
final MDArray<String> array = reader.string().readMDArray("/observation/metadata/" + taxonomyNameMetadata);
/*
int[] dimensions = array.dimensions();
if (dimensions.length == 2) {
int rows = dimensions[0];
int cols = dimensions[1];
for (int i = 0; i < rows; i++) {
if(false) {
System.err.print("row=" + i + ":");
for (int j = 0; j < cols; j++) {
System.err.print(array.get(i, j) + ";");
}
System.err.println();
if (i > 100) {
System.err.println("...");
break;
}
}
}
}
*/
classification2MDArray.put(taxonomyNameMetadata, array);
}
}
final int[] indptr = reader.readIntArray("/sample/matrix/indptr"); // dataset containing the compressed column offsets
final int[] indices = reader.readIntArray("/sample/matrix/indices"); // dataset containing the row indices (e.g., maps into observation/ids)
final float[] data = reader.readFloatArray("/sample/matrix/data"); // dataset containing the actual matrix data
final Map<String, Map<Integer, float[]>> classication2class2counts = new HashMap<>();
for (String classificationName : classifications) {
classication2class2counts.put(classificationName, new HashMap<>());
}
final Map<Integer, float[]> class2counts = classication2class2counts.get("taxonomy");
final float[] sizes = new float[sampleIds.length];
// Loop over Samples
for (int i = 0; i < sampleIds.length; i++) {
long size = 0;
// Add counts to this sample
for (int j = indptr[i]; j < indptr[i + 1]; j++) {
size += data[j];
for (String classificationName : classifications) {
final MDArray<String> pathArray = classification2MDArray.get(classificationName);
final int[] dimensions = pathArray.dimensions();
final int taxonId;
if (dimensions.length == 1) {
final String[] path = new String[]{pathArray.get(indices[j])};
taxonId = QIIMETaxonParser.parseTaxon(path, preservePaths);
//System.err.println(Basic.toString(path, ";") + " -> " +taxonId+" -> "+TaxonomyData.getName2IdMap().get(taxonId)+" ->"+data[j]);
dumpWriter.append(StringUtils.toString(path, ";")).append(" -> ").append(String.valueOf(taxonId)).append(" -> ").
append(TaxonomyData.getName2IdMap().get(taxonId)).append(" ->").append(String.valueOf(data[j])).append("\n");
} else if (dimensions.length == 2) {
final String[] path = getPath(pathArray, indices[j], dimensions[1]);
taxonId = QIIMETaxonParser.parseTaxon(path, preservePaths);
//System.err.println(Basic.toString(path, ";") + " -> " + data[j]);
dumpWriter.append(StringUtils.toString(path, ";")).append(" -> ").append(String.valueOf(taxonId)).append(" -> ").
append(TaxonomyData.getName2IdMap().get(taxonId)).append(" ->").append(String.valueOf(data[j])).append("\n");
} else {
taxonId = IdMapper.UNASSIGNED_ID;
countLinesSkipped++;
}
//System.err.println(taxonId+" -> "+TaxonomyData.getName2IdMap().get(taxonId)+"- > "+data[j]);
float[] array = class2counts.computeIfAbsent(taxonId, k -> new float[sampleIds.length]);
array[i] += data[j];
}
}
sizes[i] = size;
}
doc.getDataTable().setSamples(sampleIds, null, sizes, new BlastMode[]{BlastMode.Classifier});
doc.getDataTable().setTotalReads(Math.round(CollectionUtils.getSum(sizes)));
for (Integer classId : class2counts.keySet()) {
float[] array = class2counts.get(classId);
for (int i = 0; i < array.length; i++)
doc.getDataTable().setClassification2Class2Count(Classification.Taxonomy, classId, i, array[i]);
}
// Loop over Metadata-Entries
doc.getSampleAttributeTable().setSampleOrder(doc.getSampleNames());
final Map<String, Object> sample2value = new HashMap<>();
for (final String metaKey : reader.getGroupMembers("/sample/metadata")) {
for (int i = 0; i < sampleIds.length; i++) {
String metaValue = reader.readStringArray("/sample/metadata/" + metaKey)[i];
sample2value.put(sampleIds[i], metaValue);
}
doc.getSampleAttributeTable().addAttribute(metaKey, sample2value, true, true);
sample2value.clear();
}
}
System.err.printf("Lines imported:%,10d%n", countLinesImported);
System.err.printf("Lines skipped: %,10d%n", countLinesSkipped);
dumpWriter.close();
return doc;
}
private static String[] getPath(MDArray<String> array, int row, int cols) {
final String[] path = new String[cols];
for (int c = 0; c < cols; c++)
path[c] = array.get(row, c);
return path;
}
public static void main(String[] args) throws IOException {
TaxonomyData.load();
final String inputFile = "/Users/huson/data/biom2/suparna/otu_table_qiime1.9.1.biom";
//final String inputFile="/Users/huson/data/biom2/rich_sparse_otu_table_hdf5.biom";
boolean preservePaths = false;
Document doc = apply(inputFile, false);
OutputStreamWriter w = new OutputStreamWriter(System.err);
doc.getDataTable().write(w);
doc.getSampleAttributeTable().write(w, false, true);
final String outputFile = FileUtils.replaceFileSuffix(inputFile, "-p" + ".megan");
System.err.println("Writing file: " + outputFile);
try (Writer writer = new FileWriter(outputFile)) {
doc.getDataTable().write(writer);
doc.getSampleAttributeTable().write(writer, false, true);
}
}
}
| 9,823 | Java | .java | husonlab/megan-ce | 62 | 21 | 18 | 2016-05-09T10:55:38Z | 2024-02-22T23:23:42Z |
Biom2MetaData.java | /FileExtraction/Java_unseen/husonlab_megan-ce/src/megan/biom/biom2/Biom2MetaData.java | /*
* Biom2MetaData.java Copyright (C) 2024 Daniel H. Huson
*
* (Some files contain contributions from other authors, who are then mentioned separately.)
*
* This program is free software: you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
package megan.biom.biom2;
import ch.systemsx.cisd.hdf5.IHDF5Reader;
import ch.systemsx.cisd.hdf5.IHDF5Writer;
import megan.core.SampleAttributeTable;
import java.io.IOException;
import java.util.Arrays;
import java.util.HashMap;
import java.util.Map;
class Biom2MetaData {
/**
* read metadata from a BIOM2 file
*
*/
public static int read(IHDF5Reader reader, String[] sampleIds, SampleAttributeTable table) {
table.setSampleOrder(Arrays.asList(sampleIds));
final Map<String, Object> sample2value = new HashMap<>();
for (final String metaKey : reader.getGroupMembers("/sample/metadata")) {
for (int i = 0; i < sampleIds.length; i++) {
String metaValue = reader.readStringArray("/sample/metadata/" + metaKey)[i];
sample2value.put(sampleIds[i], metaValue);
}
table.addAttribute(metaKey, sample2value, true, true);
sample2value.clear();
}
return reader.getGroupMembers("/sample/metadata").size();
}
/**
* write metadata to a BIOM2 file
*
*/
public static void write(IHDF5Writer writer, String[] sampleIds, SampleAttributeTable table) throws IOException {
throw new IOException("Not implemented");
}
}
| 2,087 | Java | .java | husonlab/megan-ce | 62 | 21 | 18 | 2016-05-09T10:55:38Z | 2024-02-22T23:23:42Z |
Biom1ImportSEED.java | /FileExtraction/Java_unseen/husonlab_megan-ce/src/megan/biom/biom1/Biom1ImportSEED.java | /*
* Biom1ImportSEED.java Copyright (C) 2024 Daniel H. Huson
*
* (Some files contain contributions from other authors, who are then mentioned separately.)
*
* This program is free software: you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
package megan.biom.biom1;
import jloda.util.CollectionUtils;
import jloda.util.NumberUtils;
import jloda.util.StringUtils;
import megan.classification.Classification;
import megan.classification.ClassificationManager;
import megan.classification.IdMapper;
import java.util.ArrayList;
import java.util.HashMap;
import java.util.List;
import java.util.Map;
/**
* extracts classification from a BIOME file containing a seed classification
* Daniel Huson, 9.2012
*/
class Biom1ImportSEED {
/**
* gets a series 2 classes to value map from the data
*
* @return map
*/
public static Map<String, Map<Integer, Integer>> getSeries2Classes2Value(Biom1Data biom1Data) {
final Classification classification = ClassificationManager.get("SEED", true);
final Map<String, Map<Integer, Integer>> series2Classes2count = new HashMap<>();
int numberOfRows = biom1Data.getRows().length;
Integer[] row2class = new Integer[numberOfRows];
int rowCount = 0;
for (Map row : biom1Data.getRows()) {
//System.err.println("Obj: "+obj);
Integer bestId = null;
String idStr = (String) row.get("id");
if (idStr != null && NumberUtils.isInteger(idStr))
bestId = NumberUtils.parseInt(idStr);
else {
final Map metaData = (Map) row.get("metadata");
if (metaData != null) {
Object obj = metaData.get("taxonomy");
if (obj == null)
obj = metaData.get("ontology");
if (obj instanceof ArrayList) {
List<String> names = CollectionUtils.reverseList((ArrayList) obj);
for (String name : names) {
int keggId = classification.getName2IdMap().get(name);
if (keggId > 0) {
bestId = keggId;
break;
}
}
}
}
}
if (bestId != null)
row2class[rowCount++] = bestId;
else {
row2class[rowCount++] = IdMapper.UNASSIGNED_ID;
System.err.println("Failed to determine SEED for: " + StringUtils.toString(row.values(), ","));
}
}
int numberOfClasses = biom1Data.getColumns().length;
String[] col2series = new String[numberOfClasses];
int colCount = 0;
for (Object obj : biom1Data.getColumns()) {
//System.err.println("Obj: "+obj);
String label = (String) ((Map<?, ?>) obj).get("id");
//System.err.println("Series: " + label);
col2series[colCount++] = label;
}
if (biom1Data.getMatrix_type().equalsIgnoreCase(Biom1Data.AcceptableMatrixTypes.dense.toString())) {
int row = 0;
for (Object obj : biom1Data.getData()) {
final int[] array = Biom1ImportTaxonomy.createIntArray(obj);
if (array == null)
continue;
for (int col = 0; col < array.length; col++) {
int value = array[col];
Map<Integer, Integer> class2count = series2Classes2count.computeIfAbsent(col2series[col], k -> new HashMap<>());
Integer previous = class2count.get(row2class[row]);
if (previous != null)
value += previous;
// if (class2count.get(row2class[row]) == null) // need this to avoid reading the number for the same node more than once
class2count.put(row2class[row], value);
// System.err.println(col2series[col] + " -> " + row2class[row] + " -> " + value);
}
row++;
}
} else if (biom1Data.getMatrix_type().equalsIgnoreCase(Biom1Data.AcceptableMatrixTypes.sparse.toString())) {
for (Object obj : biom1Data.getData()) {
final int[] array3 = Biom1ImportTaxonomy.createIntArray(obj);
if (array3 == null)
continue;
int row = array3[0];
int col = array3[1];
int value = array3[2];
// System.err.println("Class: " + obj.getClass());
// System.err.println("Row: " + Basic.toString(array3));
Map<Integer, Integer> class2count = series2Classes2count.computeIfAbsent(col2series[col], k -> new HashMap<>());
Integer previous = class2count.get(row2class[row]);
if (previous != null)
value += previous;
// if (class2count.get(row2class[row]) == null) // need this to avoid reading the number for the same node more than once
class2count.put(row2class[row], value);
// System.err.println(col2series[col] + " -> " + row2class[row] + " -> " + value);
}
}
return series2Classes2count;
}
}
| 5,725 | Java | .java | husonlab/megan-ce | 62 | 21 | 18 | 2016-05-09T10:55:38Z | 2024-02-22T23:23:42Z |
Biom1Data.java | /FileExtraction/Java_unseen/husonlab_megan-ce/src/megan/biom/biom1/Biom1Data.java | /*
* Biom1Data.java Copyright (C) 2024 Daniel H. Huson
*
* (Some files contain contributions from other authors, who are then mentioned separately.)
*
* This program is free software: you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
package megan.biom.biom1;
import com.google.gson.Gson;
import jloda.swing.util.ProgramProperties;
import jloda.util.StringUtils;
import java.io.IOException;
import java.io.Reader;
import java.io.Writer;
import java.text.DateFormat;
import java.text.SimpleDateFormat;
import java.util.Date;
import java.util.Map;
import java.util.TimeZone;
/**
* biom data
* Daniel Huson, 7.2012
*/
public class Biom1Data {
public enum AcceptableTypes {
OTU_table("OTU table"),
Pathway_table("Pathway table"),
Function_table("Function table"),
Ortholog_table("Ortholog table"),
Gene_table("Gene table"),
Metabolite_table("Metabolite table"),
Taxon_table("Taxon table"),
MEGAN_table("MEGAN table");
private final String value;
AcceptableTypes(String value) {
this.value = value;
}
public String toString() {
return value;
}
}
public enum AcceptableMatrixTypes {sparse, dense}
public enum AcceptableMatrixElementTypes {
Int("int"), Float("float"), Unicode("unicode");
private final String value;
AcceptableMatrixElementTypes(String value) {
this.value = value;
}
public String toString() {
return value;
}
}
/*
Required files (Biom-format 1.0)
id : <string or null> a field that can be used to id a table (or null)
format : <string> The name and version of the current biom format
format_url : <url> A string with a static URL providing format details
type : <string> Table type (a controlled vocabulary)
Acceptable values:
"OTU table"
"Pathway table"
"Function table"
"Ortholog table"
"Gene table"
"Metabolite table"
"Taxon table"
generated_by : <string> Package and revision that built the table
date : <datetime> Date the table was built (ISO 8601 format)
rows : <list of objects> An ORDERED list of obj describing the rows
(explained in detail below)
columns : <list of objects> An ORDERED list of obj describing the columns
(explained in detail below)
matrix_type : <string> Type of matrix data representation (a controlled vocabulary)
Acceptable values:
"sparse" : only non-zero values are specified
"dense" : every element must be specified
matrix_element_type : Value type in matrix (a controlled vocabulary)
Acceptable values:
"int" : integer
"float" : floating point
"unicode" : unicode string
shape : <list of ints>, the number of rows and number of columns in data
data : <list of lists>, counts of observations by sample
if matrix_type is "sparse", [[row, column, value],
[row, column, value],
...]
if matrix_type is "dense", [[value, value, value, ...],
[value, value, value, ...],
...]
*/
private String comment;
private String classification;
private String id;
private String format;
private String format_url;
private String type;
public String generated_by;
private String date;
// public Date date; // todo: should be date
private Map[] rows;
private Map[] columns;
private String matrix_type;
private String matrix_element_type;
private int[] shape;
private float[][] data;
/**
* default constructor
*/
public Biom1Data() {
}
/**
* constructor to be used when generating new biome files
*
*/
public Biom1Data(String id) {
this.id = id;
format = "Biological Observation Matrix 1.0.0";
format_url = "http://biom-format.org";
generated_by = ProgramProperties.getProgramName();
TimeZone tz = TimeZone.getTimeZone("UTC");
DateFormat df = new SimpleDateFormat("yyyy-MM-dd'T'HH:mm:ss");
df.setTimeZone(tz);
date = df.format(new Date());
}
/**
* check whether data is acceptable
*
*/
private void check() throws IOException {
boolean ok = false;
if (type != null) {
for (AcceptableTypes acceptable : AcceptableTypes.values()) {
if (acceptable.toString().equalsIgnoreCase(type)) {
ok = true;
break;
}
}
}
if (!ok)
throw new IOException("type=" + type + ", must be one of: " + StringUtils.toString(AcceptableTypes.values(), ", ").replaceAll("_", " "));
ok = false;
if (matrix_type != null) {
for (AcceptableMatrixTypes acceptable : AcceptableMatrixTypes.values()) {
if (acceptable.toString().equalsIgnoreCase(matrix_type)) {
ok = true;
break;
}
}
}
if (!ok)
throw new IOException("matrix_type=" + matrix_type + ", must be one of: " + StringUtils.toString(AcceptableMatrixTypes.values(), ", "));
ok = false;
if (matrix_element_type != null) {
for (AcceptableMatrixElementTypes acceptable : AcceptableMatrixElementTypes.values()) {
if (acceptable.toString().equalsIgnoreCase(matrix_element_type)) {
ok = true;
break;
}
}
}
if (!ok)
throw new IOException("matrix_element_type=" + matrix_element_type + ", must be one of: " + StringUtils.toString(AcceptableMatrixElementTypes.values(), ", "));
}
/**
* read
*
*/
public static Biom1Data fromReader(Reader reader) throws IOException {
Gson gson = new Gson();
Biom1Data biom1Data = gson.fromJson(reader, Biom1Data.class);
biom1Data.check();
return biom1Data;
}
/**
* write
*
*/
public void write(Writer writer) {
final Gson gson = new Gson();
gson.toJson(this, writer);
}
public String getId() {
return id;
}
public void setId(String id) {
this.id = id;
}
public String getFormat() {
return format;
}
public void setFormat(String format) {
this.format = format;
}
public String getFormatUrl() {
return format_url;
}
public void setFormatUrl(String url) {
this.format_url = url;
}
public String getType() {
return type;
}
public void setType(String type) {
this.type = type;
}
public String getGenerated_by() {
return generated_by;
}
public void setGenerated_by(String generated_by) {
this.generated_by = generated_by;
}
public String getDate() {
return date;
}
public void setDate(String date) {
this.date = date;
}
public Map[] getRows() {
return rows;
}
public void setRows(Map[] rows) {
this.rows = rows;
}
public Map[] getColumns() {
return columns;
}
public void setColumns(Map[] columns) {
this.columns = columns;
}
public String[] getColumnIds() {
String[] ids = new String[getColumns().length];
for (int i = 0; i < getColumns().length; i++) {
ids[i] = (String) columns[i].get("id");
}
return ids;
}
public String getMatrix_type() {
return matrix_type;
}
public void setMatrix_type(String matrix_type) {
this.matrix_type = matrix_type;
}
public String getMatrix_element_type() {
return matrix_element_type;
}
public void setMatrix_element_type(String matrix_element_type) {
this.matrix_element_type = matrix_element_type;
}
public int[] getShape() {
return shape;
}
public float[][] getData() {
return data;
}
public void setShape(int[] shape) {
this.shape = shape;
}
public void setData(float[][] data) {
this.data = data;
}
public boolean isTaxonomyData() {
return AcceptableTypes.Taxon_table.toString().equalsIgnoreCase(type);
}
public boolean isOTUData() {
return AcceptableTypes.OTU_table.toString().equalsIgnoreCase(type);
}
public boolean isKEGGData() {
if (!AcceptableTypes.Function_table.toString().equalsIgnoreCase(type))
return false;
return comment != null && comment.contains("KEGG");
}
public boolean isSEEDData() {
if (!AcceptableTypes.Function_table.toString().equalsIgnoreCase(type))
return false;
if (rows.length == 0)
return false;
return comment != null && comment.contains("SEED");
}
public String getComment() {
return comment;
}
public void setComment(String comment) {
this.comment = comment;
}
public String getClassification() {
return classification;
}
public void setClassification(String classification) {
this.classification = classification;
}
}
| 10,381 | Java | .java | husonlab/megan-ce | 62 | 21 | 18 | 2016-05-09T10:55:38Z | 2024-02-22T23:23:42Z |
BIOM1Importer.java | /FileExtraction/Java_unseen/husonlab_megan-ce/src/megan/biom/biom1/BIOM1Importer.java | /*
* BIOM1Importer.java Copyright (C) 2024 Daniel H. Huson
*
* (Some files contain contributions from other authors, who are then mentioned separately.)
*
* This program is free software: you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
package megan.biom.biom1;
import jloda.seq.BlastMode;
import jloda.swing.util.ProgramProperties;
import jloda.swing.window.NotificationsInSwing;
import megan.classification.Classification;
import megan.classification.IdMapper;
import megan.core.ClassificationType;
import megan.core.DataTable;
import megan.core.Document;
import megan.core.MeganFile;
import megan.util.BiomFileFilter;
import megan.viewer.MainViewer;
import java.io.FileReader;
import java.io.IOException;
import java.util.Date;
import java.util.HashMap;
import java.util.Map;
/**
* import a file in biom1 format
* Daniel Huson, 9.2012
*/
public class BIOM1Importer {
/**
* apply the importer parser to the named file.
*
*/
static public void apply(String fileName, Document doc, String type, boolean taxonomyIgnorePath) throws IOException {
doc.getMeganFile().setFileType(MeganFile.Type.MEGAN_SUMMARY_FILE);
if (!BiomFileFilter.isBiom1File(fileName)) {
throw new IOException("File not in BIOM1 format (or incorrect file suffix?)");
}
System.err.println("Importing data from BIOM1 file");
Biom1Data biom1Data = Biom1Data.fromReader(new FileReader(fileName));
String[] names = biom1Data.getColumnIds();
Map<String, Map<Integer, Integer>> series2Classes2count;
String classificationName;
if (type.equalsIgnoreCase("taxonomy") || biom1Data.isTaxonomyData() || biom1Data.isOTUData()) {
series2Classes2count = Biom1ImportTaxonomy.getSample2Class2Value(biom1Data, taxonomyIgnorePath);
classificationName = Classification.Taxonomy;
} else if (type.equalsIgnoreCase("seed") || biom1Data.isSEEDData()) {
series2Classes2count = Biom1ImportSEED.getSeries2Classes2Value(biom1Data);
classificationName = "SEED";
} else if (type.equalsIgnoreCase("kegg") || biom1Data.isKEGGData()) {
series2Classes2count = Biom1ImportKEGG.getSeries2Classes2Value(biom1Data);
classificationName = "KEGG";
} else
throw new IOException("Unable to import this datatype: " + biom1Data.getType());
System.err.println("Classification type is: " + classificationName);
DataTable table = doc.getDataTable();
table.clear();
table.setCreator(ProgramProperties.getProgramName());
table.setCreationDate((new Date()).toString());
doc.getActiveViewers().add(classificationName);
final Map<Integer, float[]> targetClass2counts = new HashMap<>();
int totalReads = 0;
int numberOfSeries = series2Classes2count.keySet().size();
final float[] sizes = new float[numberOfSeries];
final Map<String, Integer> series2pid = new HashMap<>();
final String[] columnIds = biom1Data.getColumnIds();
for (int c = 0; c < columnIds.length; c++)
series2pid.put(columnIds[c], c);
for (String series : series2Classes2count.keySet()) {
int seriesId = series2pid.get(series);
final Map<Integer, Integer> class2count = series2Classes2count.get(series);
for (Integer classId : class2count.keySet()) {
Integer count = class2count.get(classId);
if (count == null)
count = 0;
float[] counts = targetClass2counts.computeIfAbsent(classId, k -> new float[numberOfSeries]);
counts[seriesId] = count;
totalReads += count;
sizes[seriesId] += count;
}
}
table.getClassification2Class2Counts().put(classificationName, targetClass2counts);
if (!classificationName.equals(ClassificationType.Taxonomy.toString())) {
final Map<Integer, float[]> class2counts = new HashMap<>();
class2counts.put(IdMapper.UNASSIGNED_ID, sizes);
table.getClassification2Class2Counts().put(ClassificationType.Taxonomy.toString(), class2counts);
}
table.setSamples(names, null, sizes, new BlastMode[]{BlastMode.Classifier});
table.setTotalReads(totalReads);
doc.setNumberReads(totalReads);
System.err.println("done (" + totalReads + " reads)");
NotificationsInSwing.showInformation(MainViewer.getLastActiveFrame(), "Imported " + totalReads + " reads, as " + classificationName + " classification"
+ "\nGenerated by " + biom1Data.generated_by
+ "\nDate: " + biom1Data.getDate()
+ (biom1Data.getComment() != null ? "\nComment: " + biom1Data.getComment() : ""));
}
/**
* get the entry, if it exists, otherwise create it and initialize to zeros
*
* @return entry
*/
private static Integer[] getOrCreate(Map<Integer, Integer[]> map, Integer id, int size) {
return map.computeIfAbsent(id, k -> newZeroedIntegerArray(size));
}
/**
* add all values to sum
*
*/
private static void addToArray(Integer[] sum, int[] add) {
for (int i = 0; i < add.length; i++) {
sum[i] += add[i];
}
}
/**
* create new array with zero entries
*
* @return new array
*/
private static Integer[] newZeroedIntegerArray(int size) {
Integer[] result = new Integer[size];
for (int i = 0; i < size; i++)
result[i] = 0;
return result;
}
}
| 6,215 | Java | .java | husonlab/megan-ce | 62 | 21 | 18 | 2016-05-09T10:55:38Z | 2024-02-22T23:23:42Z |
Biom1ImportKEGG.java | /FileExtraction/Java_unseen/husonlab_megan-ce/src/megan/biom/biom1/Biom1ImportKEGG.java | /*
* Biom1ImportKEGG.java Copyright (C) 2024 Daniel H. Huson
*
* (Some files contain contributions from other authors, who are then mentioned separately.)
*
* This program is free software: you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
package megan.biom.biom1;
import jloda.util.CollectionUtils;
import jloda.util.NumberUtils;
import jloda.util.StringUtils;
import megan.classification.Classification;
import megan.classification.ClassificationManager;
import megan.classification.IdMapper;
import java.util.ArrayList;
import java.util.HashMap;
import java.util.List;
import java.util.Map;
/**
* extracts classification from a BIOME file containing a kegg classification
* Daniel Huson, 9.2012
*/
class Biom1ImportKEGG {
/**
* gets a series 2 classes to value map from the data
*
* @return map
*/
public static Map<String, Map<Integer, Integer>> getSeries2Classes2Value(Biom1Data biom1Data) {
final Classification classification = ClassificationManager.get("KEGG", true);
final Map<String, Map<Integer, Integer>> series2Classes2count = new HashMap<>();
int numberOfRows = biom1Data.getRows().length;
final Integer[] row2class = new Integer[numberOfRows];
int rowCount = 0;
for (Map row : biom1Data.getRows()) {
//System.err.println("Obj: "+obj);
Integer bestId = null;
final String idStr = (String) row.get("id");
if (idStr != null && NumberUtils.isInteger(idStr))
bestId = NumberUtils.parseInt(idStr);
else if (idStr != null && idStr.startsWith("K"))
bestId = NumberUtils.parseInt(idStr.substring(1));
else {
Map metaData = (Map) row.get("metadata");
if (metaData != null) {
Object obj = metaData.get("taxonomy");
if (obj == null)
obj = metaData.get("ontology");
if (obj instanceof ArrayList) {
List<String> names = CollectionUtils.reverseList((ArrayList) obj);
for (String name : names) {
int keggId = classification.getName2IdMap().get(name);
if (keggId > 0) {
bestId = keggId;
break;
}
}
}
}
}
// System.err.println("Class: " + label);
if (bestId != null)
row2class[rowCount++] = bestId;
else {
row2class[rowCount++] = IdMapper.UNASSIGNED_ID;
System.err.println("Failed to determine KEGG for: " + StringUtils.toString(row.values(), ","));
}
}
int numberOfClasses = biom1Data.getColumns().length;
final String[] col2series = new String[numberOfClasses];
int colCount = 0;
for (Object obj : biom1Data.getColumns()) {
//System.err.println("Obj: "+obj);
String label = (String) ((Map<?, ?>) obj).get("id");
//System.err.println("Series: " + label);
col2series[colCount++] = label;
}
if (biom1Data.getMatrix_type().equalsIgnoreCase(Biom1Data.AcceptableMatrixTypes.dense.toString())) {
int row = 0;
for (Object obj : biom1Data.getData()) {
final int[] array = Biom1ImportTaxonomy.createIntArray(obj);
if (array == null)
continue;
for (int col = 0; col < array.length; col++) {
int value = array[col];
Map<Integer, Integer> class2count = series2Classes2count.computeIfAbsent(col2series[col], k -> new HashMap<>());
Integer previous = class2count.get(row2class[row]);
if (previous != null)
value += previous;
// if (class2count.get(row2class[row]) == null) // need this to avoid reading the number for the same node more than once
class2count.put(row2class[row], value);
// System.err.println(col2series[col] + " -> " + row2class[row] + " -> " + value);
}
row++;
}
} else if (biom1Data.getMatrix_type().equalsIgnoreCase(Biom1Data.AcceptableMatrixTypes.sparse.toString())) {
for (Object obj : biom1Data.getData()) {
final int[] array3 = Biom1ImportTaxonomy.createIntArray(obj);
if (array3 == null)
continue;
int row = array3[0];
int col = array3[1];
int value = array3[2];
//System.err.println("Class: " + obj.getClass());
//System.err.println("Row: " + Basic.toString(array3));
Map<Integer, Integer> class2count = series2Classes2count.computeIfAbsent(col2series[col], k -> new HashMap<>());
Integer previous = class2count.get(row2class[row]);
if (previous != null)
value += previous;
// if (class2count.get(row2class[row]) == null) // need this to avoid reading the number for the same node more than once
class2count.put(row2class[row], value);
// System.err.println(col2series[col] + " -> " + row2class[row] + " -> " + value);
}
}
return series2Classes2count;
}
}
| 5,915 | Java | .java | husonlab/megan-ce | 62 | 21 | 18 | 2016-05-09T10:55:38Z | 2024-02-22T23:23:42Z |
StringMap.java | /FileExtraction/Java_unseen/husonlab_megan-ce/src/megan/biom/biom1/StringMap.java | /*
* StringMap.java Copyright (C) 2024 Daniel H. Huson
*
* (Some files contain contributions from other authors, who are then mentioned separately.)
*
* This program is free software: you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
package megan.biom.biom1;
import java.util.HashMap;
/**
* String map
*/
class StringMap<T> extends HashMap<String, T> {
}
| 932 | Java | .java | husonlab/megan-ce | 62 | 21 | 18 | 2016-05-09T10:55:38Z | 2024-02-22T23:23:42Z |
QIIMETaxonParser.java | /FileExtraction/Java_unseen/husonlab_megan-ce/src/megan/biom/biom1/QIIMETaxonParser.java | /*
* QIIMETaxonParser.java Copyright (C) 2024 Daniel H. Huson
*
* (Some files contain contributions from other authors, who are then mentioned separately.)
*
* This program is free software: you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
package megan.biom.biom1;
import megan.classification.IdMapper;
import megan.viewer.TaxonomyData;
public class QIIMETaxonParser {
/**
* determines the taxon-id associated with a QIIME generated taxon path, which looks something like this:
* k__Bacteria;p__Proteobacteria;c__Gammaproteobacteria;o__Enterobacteriales;f__Enterobacteriaceae;g__Escherichia;s__
*
* @param ignorePathAbove just used last assignable, ignoring whether the path above matches
* @return NCBI taxon id
*/
public static int parseTaxon(String[] taxonPath, boolean ignorePathAbove) {
int bestId = IdMapper.UNASSIGNED_ID;
String genus = null;
for (String name : taxonPath) {
if (name != null) {
if (name.indexOf("__") == 1) {
if (name.startsWith("g")) {
genus = name.substring(3);
name = name.substring(3);
} else if (name.startsWith("s") && genus != null)
name = genus + " " + name.substring(3);
else
name = name.substring(3);
}
if (name.startsWith("[") && name.endsWith("]") || name.startsWith("(") && name.endsWith(")"))
name = name.substring(1, name.length() - 1);
name = name.replaceAll("_", " ");
if (name.equals("Root"))
name = "root";
if (name.length() > 0) {
final int taxonId = TaxonomyData.getName2IdMap().get(name);
if (taxonId > 0 && (bestId == IdMapper.UNASSIGNED_ID || ignorePathAbove || TaxonomyData.getTree().isDescendant(bestId, taxonId)))
bestId = taxonId;
}
}
}
return bestId;
}
}
| 2,666 | Java | .java | husonlab/megan-ce | 62 | 21 | 18 | 2016-05-09T10:55:38Z | 2024-02-22T23:23:42Z |
Biom1ExportFViewer.java | /FileExtraction/Java_unseen/husonlab_megan-ce/src/megan/biom/biom1/Biom1ExportFViewer.java | /*
* Biom1ExportFViewer.java Copyright (C) 2024 Daniel H. Huson
*
* (Some files contain contributions from other authors, who are then mentioned separately.)
*
* This program is free software: you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
package megan.biom.biom1;
import jloda.graph.Edge;
import jloda.graph.Node;
import jloda.graph.NodeData;
import jloda.graph.NodeSet;
import jloda.util.CanceledException;
import jloda.util.FileUtils;
import jloda.util.progress.ProgressListener;
import megan.core.Director;
import megan.viewer.ClassificationViewer;
import java.io.BufferedWriter;
import java.io.File;
import java.io.FileWriter;
import java.io.IOException;
import java.util.*;
/**
* export a fviewer analysis in biom format
* Daniel Huson, 7.2012
*/
public class Biom1ExportFViewer {
/**
* export taxon name to counts mapping
*
* @return lines written
*/
public static int apply(Director dir, String cName, File file, ProgressListener progressListener) throws IOException {
Biom1Data biom1Data = new Biom1Data(file.getPath());
biom1Data.setType(Biom1Data.AcceptableTypes.Function_table.toString());
biom1Data.setMatrix_type(Biom1Data.AcceptableMatrixTypes.dense.toString());
biom1Data.setMatrix_element_type(Biom1Data.AcceptableMatrixElementTypes.Int.toString());
biom1Data.setComment(cName + " classification computed by MEGAN");
ClassificationViewer viewer = (ClassificationViewer) dir.getViewerByClassName(cName);
if (viewer == null)
throw new IOException(cName + " Viewer not open");
java.util.List<String> names = dir.getDocument().getSampleNames();
int numberOfCols = names.size();
LinkedList<Map> colList = new LinkedList<>();
for (String name : names) {
Map colItem = new StringMap();
colItem.put("id", FileUtils.getFileNameWithoutPath(FileUtils.getFileBaseName(name)));
colItem.put("metadata", new StringMap());
colList.add(colItem);
}
biom1Data.setColumns(colList.toArray(new Map[0]));
final NodeSet selectedNodes = viewer.getSelectedNodes();
if (selectedNodes.size() == 0) {
throw new IOException("No nodes selected");
}
progressListener.setSubtask("Processing " + cName + " nodes");
progressListener.setMaximum(selectedNodes.size());
progressListener.setProgress(0);
final LinkedList<Map> rowList = new LinkedList<>();
final LinkedList<float[]> dataList = new LinkedList<>();
visitSelectedLeavesRec(viewer, viewer.getTree().getRoot(), selectedNodes, new Vector<>(), rowList, dataList, new HashSet<>(), progressListener);
int numberOfRows = rowList.size();
biom1Data.setRows(rowList.toArray(new Map[numberOfRows]));
biom1Data.setShape(new int[]{numberOfRows, numberOfCols});
float[][] data = new float[numberOfRows][];
int j = 0;
for (float[] dataRow : dataList) {
data[j++] = dataRow;
}
biom1Data.setData(data);
System.err.println("Writing file: " + file);
try (BufferedWriter w = new BufferedWriter(new FileWriter(file))) {
biom1Data.write(w);
}
return numberOfRows;
}
/**
* recursively visit all the selected leaves
*
*/
private static void visitSelectedLeavesRec(ClassificationViewer viewer, Node v, NodeSet selected, Vector<String> path,
LinkedList<Map> rowList, LinkedList<float[]> dataList, Set<Integer> seen, ProgressListener progressListener) throws CanceledException {
if (v.getOutDegree() > 0 || selected.contains(v)) {
Integer classId = (Integer) v.getInfo();
if (!seen.contains(classId)) {
seen.add(classId);
String className = v == viewer.getTree().getRoot() ? "Root" : viewer.getClassification().getName2IdMap().get(classId);
path.addElement(className);
if (selected.contains(v)) {
NodeData data = viewer.getNodeData(v);
if (data != null) {
final float[] values;
if (v.getOutDegree() == 0)
values = data.getSummarized();
else
values = data.getAssigned();
final StringMap<Object> rowItem = new StringMap<>();
rowItem.put("id", "" + classId);
final StringMap<Object> metadata = new StringMap<>();
ArrayList<String> classification = new ArrayList<>(path.size());
classification.addAll(path);
metadata.put("taxonomy", classification);
rowItem.put("metadata", metadata);
rowList.add(rowItem);
dataList.add(values);
}
progressListener.incrementProgress();
}
for (Edge e = v.getFirstOutEdge(); e != null; e = v.getNextOutEdge(e)) {
visitSelectedLeavesRec(viewer, e.getTarget(), selected, path, rowList, dataList, seen, progressListener);
}
path.setSize(path.size() - 1);
}
}
}
}
| 5,976 | Java | .java | husonlab/megan-ce | 62 | 21 | 18 | 2016-05-09T10:55:38Z | 2024-02-22T23:23:42Z |
Biom1ImportTaxonomy.java | /FileExtraction/Java_unseen/husonlab_megan-ce/src/megan/biom/biom1/Biom1ImportTaxonomy.java | /*
* Biom1ImportTaxonomy.java Copyright (C) 2024 Daniel H. Huson
*
* (Some files contain contributions from other authors, who are then mentioned separately.)
*
* This program is free software: you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
package megan.biom.biom1;
import jloda.util.NumberUtils;
import jloda.util.StringUtils;
import megan.classification.IdMapper;
import megan.viewer.TaxonomyData;
import java.util.ArrayList;
import java.util.HashMap;
import java.util.Map;
/**
* imports a BIOME file of type Taxonomy table or OTU table
* Daniel Huson, 9.2012
*/
class Biom1ImportTaxonomy {
/**
* gets a sample to class to value map from the data
*
* @return map
*/
public static Map<String, Map<Integer, Integer>> getSample2Class2Value(Biom1Data biom1Data, boolean taxonomyIgnorePath) {
final Map<String, Map<Integer, Integer>> sample2class2value = new HashMap<>();
int numberOfRows = biom1Data.getRows().length;
final Integer[] row2class = new Integer[numberOfRows];
int rowCount = 0;
for (Map row : biom1Data.getRows()) {
//System.err.println("Obj: "+obj);
Integer taxonId = null;
Map metaData = (Map) row.get("metadata");
if (metaData != null) {
Object obj = metaData.get("taxonomy");
if (obj == null)
obj = metaData.get("Taxonomy");
if (obj == null)
obj = metaData.get("organism");
if (obj == null)
obj = metaData.get("Organism");
if (obj == null)
obj = metaData.get("ontology");
if (obj instanceof ArrayList) {
final ArrayList<String> orig = ((ArrayList<String>) obj);
taxonId = QIIMETaxonParser.parseTaxon(orig.toArray(new String[0]), taxonomyIgnorePath);
}
}
if (taxonId == null) {
final String idStr = (String) row.get("id");
if (idStr != null) {
if (NumberUtils.isInteger(idStr))
taxonId = NumberUtils.parseInt(idStr);
else {
int newTaxId = TaxonomyData.getName2IdMap().get(idStr);
if (newTaxId != 0) {
taxonId = newTaxId;
}
}
}
}
if (taxonId != null)
row2class[rowCount++] = taxonId;
else {
row2class[rowCount++] = IdMapper.UNASSIGNED_ID;
System.err.println("Failed to determine taxon for: " + StringUtils.toString(row.values(), ","));
}
}
int numberOfClasses = biom1Data.getColumns().length;
String[] col2series = new String[numberOfClasses];
int colCount = 0;
for (Object obj : biom1Data.getColumns()) {
//System.err.println("Obj: "+obj);
String label = (String) ((Map<?, ?>) obj).get("id");
//System.err.println("Series: " + label);
col2series[colCount++] = label;
}
if (biom1Data.getMatrix_type().equalsIgnoreCase(Biom1Data.AcceptableMatrixTypes.dense.toString())) {
int row = 0;
for (Object obj : biom1Data.getData()) {
final int[] array = createIntArray(obj);
if (array == null)
continue;
for (int col = 0; col < array.length; col++) {
int value = array[col];
Map<Integer, Integer> class2count = sample2class2value.computeIfAbsent(col2series[col], k -> new HashMap<>());
Integer previous = class2count.get(row2class[row]);
if (previous != null)
value += previous;
// if (class2count.get(row2class[row]) == null) // need this to avoid reading the number for the same node more than once
class2count.put(row2class[row], value);
// System.err.println(col2series[col] + " -> " + row2class[row] + " -> " + value);
}
row++;
}
} else if (biom1Data.getMatrix_type().equalsIgnoreCase(Biom1Data.AcceptableMatrixTypes.sparse.toString())) {
for (Object obj : biom1Data.getData()) {
final int[] array3 = createIntArray(obj);
if (array3 == null)
continue;
int row = array3[0];
int col = array3[1];
int value = array3[2];
// System.err.println("Class: " + obj.getClass());
// System.err.println("Row: " + Basic.toString(array3));
Map<Integer, Integer> class2count = sample2class2value.computeIfAbsent(col2series[col], k -> new HashMap<>());
Integer previous = class2count.get(row2class[row]);
if (previous != null)
value += previous;
// if (class2count.get(row2class[row]) == null) // need this to avoid reading the number for the same node more than once
class2count.put(row2class[row], value);
// System.err.println(col2series[col] + " -> " + row2class[row] + " -> " + value);
}
}
return sample2class2value;
}
/**
* creates an int array from an object, if possible
*
* @return array or null
*/
public static int[] createIntArray(Object obj) {
if (obj instanceof int[])
return (int[]) obj;
else if (obj instanceof float[]) {
float[] that = (float[]) obj;
int[] array = new int[that.length];
for (int i = 0; i < that.length; i++)
array[i] = Math.round(that[i]);
return array;
} else if (obj instanceof Integer[]) {
final Integer[] that = (Integer[]) obj;
int[] array = new int[that.length];
for (int i = 0; i < that.length; i++)
array[i] = Math.round(that[i]);
return array;
} else if (obj instanceof Float[]) {
final Float[] that = (Float[]) obj;
int[] array = new int[that.length];
for (int i = 0; i < that.length; i++)
array[i] = Math.round(that[i]);
return array;
} else
return null;
}
}
| 6,907 | Java | .java | husonlab/megan-ce | 62 | 21 | 18 | 2016-05-09T10:55:38Z | 2024-02-22T23:23:42Z |
Biom1ExportTaxonomy.java | /FileExtraction/Java_unseen/husonlab_megan-ce/src/megan/biom/biom1/Biom1ExportTaxonomy.java | /*
* Biom1ExportTaxonomy.java Copyright (C) 2024 Daniel H. Huson
*
* (Some files contain contributions from other authors, who are then mentioned separately.)
*
* This program is free software: you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
package megan.biom.biom1;
import jloda.graph.Edge;
import jloda.graph.Node;
import jloda.graph.NodeData;
import jloda.graph.NodeSet;
import jloda.util.CanceledException;
import jloda.util.FileUtils;
import jloda.util.progress.ProgressListener;
import megan.core.Director;
import megan.viewer.MainViewer;
import megan.viewer.TaxonomicLevels;
import megan.viewer.TaxonomyData;
import java.io.BufferedWriter;
import java.io.File;
import java.io.FileWriter;
import java.io.IOException;
import java.util.ArrayList;
import java.util.LinkedList;
import java.util.Map;
import java.util.Vector;
/**
* export a taxonomic analysis in biom format
* Daniel Huson, 7.2012
*/
public class Biom1ExportTaxonomy {
/**
* export taxon name to counts mapping
*
* @return lines written
*/
public static int apply(Director dir, File file, boolean officialRanksOnly, ProgressListener progressListener) throws IOException {
final Biom1Data biom1Data = new Biom1Data(file.getPath());
biom1Data.setType(Biom1Data.AcceptableTypes.Taxon_table.toString());
biom1Data.setMatrix_type(Biom1Data.AcceptableMatrixTypes.dense.toString());
biom1Data.setMatrix_element_type(Biom1Data.AcceptableMatrixElementTypes.Int.toString());
biom1Data.setComment("Taxonomy classification computed by MEGAN");
final MainViewer viewer = dir.getMainViewer();
final java.util.List<String> names = dir.getDocument().getSampleNames();
int numberOfCols = names.size();
final LinkedList<Map> colList = new LinkedList<>();
for (String name : names) {
final Map<String, Object> colItem = new StringMap<>();
colItem.put("id", FileUtils.getFileNameWithoutPath(FileUtils.getFileBaseName(name)));
colItem.put("metadata", new StringMap<>());
colList.add(colItem);
}
biom1Data.setColumns(colList.toArray(new Map[0]));
final NodeSet selectedNodes = viewer.getSelectedNodes();
if (selectedNodes.size() == 0) {
throw new IOException("No nodes selected");
}
progressListener.setSubtask("Processing taxa");
progressListener.setMaximum(selectedNodes.size());
progressListener.setProgress(0);
final LinkedList<Map> rowList = new LinkedList<>();
final LinkedList<float[]> dataList = new LinkedList<>();
visitSelectedLeavesRec(viewer, viewer.getTree().getRoot(), selectedNodes, new Vector<>(), rowList, dataList, officialRanksOnly, progressListener);
int numberOfRows = rowList.size();
biom1Data.setRows(rowList.toArray(new Map[numberOfRows]));
biom1Data.setShape(new int[]{numberOfRows, numberOfCols});
final float[][] data = new float[numberOfRows][];
int j = 0;
for (float[] dataRow : dataList) {
data[j++] = dataRow;
}
biom1Data.setData(data);
System.err.println("Writing file: " + file);
try (BufferedWriter w = new BufferedWriter(new FileWriter(file))) {
biom1Data.write(w);
}
return numberOfRows;
}
/**
* recursively visit all the selected leaves
*
*/
private static void visitSelectedLeavesRec(MainViewer viewer, Node v, NodeSet selected, Vector<String> path,
LinkedList<Map> rowList, LinkedList<float[]> dataList, boolean officialRanksOnly, ProgressListener progressListener) throws CanceledException {
if (v.getOutDegree() > 0 || selected.contains(v)) {
final Integer taxId = (Integer) v.getInfo();
String taxName = v == viewer.getTree().getRoot() ? "Root" : TaxonomyData.getName2IdMap().get(taxId);
{
int a = taxName.indexOf("<");
int b = taxName.lastIndexOf(">");
if (0 < a && a < b && b == taxName.length() - 1)
taxName = taxName.substring(0, a).trim(); // remove trailing anything in < > brackets
}
final int rank = TaxonomyData.getTaxonomicRank(taxId);
boolean addedPathElement = false;
if (!officialRanksOnly || TaxonomicLevels.isMajorRank(rank)) {
if (officialRanksOnly) {
char letter = Character.toLowerCase(TaxonomicLevels.getName(rank).charAt(0));
path.addElement(String.format("%c__%s", letter, taxName));
} else
path.addElement(taxName);
addedPathElement = true;
if (selected.contains(v)) {
NodeData nodeData = viewer.getNodeData(v);
if (nodeData != null) {
float[] values;
if (v.getOutDegree() == 0)
values = nodeData.getSummarized();
else
values = nodeData.getAssigned();
final Map<String, Object> rowItem = new StringMap<>();
rowItem.put("id", "" + taxId);
final Map<String, Object> metadata = new StringMap<>();
final ArrayList<String> classification = new ArrayList<>(path.size());
classification.addAll(path);
metadata.put("taxonomy", classification);
rowItem.put("metadata", metadata);
rowList.add(rowItem);
dataList.add(values);
}
}
}
progressListener.incrementProgress();
for (Edge e = v.getFirstOutEdge(); e != null; e = v.getNextOutEdge(e)) {
visitSelectedLeavesRec(viewer, e.getTarget(), selected, path, rowList, dataList, officialRanksOnly, progressListener);
}
if (addedPathElement)
path.setSize(path.size() - 1);
}
}
}
| 6,756 | Java | .java | husonlab/megan-ce | 62 | 21 | 18 | 2016-05-09T10:55:38Z | 2024-02-22T23:23:42Z |
FileManagerRMA3.java | /FileExtraction/Java_unseen/husonlab_megan-ce/src/megan/rma3/FileManagerRMA3.java | /*
* FileManagerRMA3.java Copyright (C) 2024 Daniel H. Huson
*
* (Some files contain contributions from other authors, who are then mentioned separately.)
*
* This program is free software: you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
package megan.rma3;
import jloda.swing.util.ChooseFileDialog;
import jloda.swing.util.ProgramProperties;
import jloda.swing.util.TextFileFilter;
import jloda.util.FileUtils;
import jloda.util.GZipUtils;
import megan.io.InputOutputReaderWriter;
import javax.swing.*;
import java.io.File;
import java.io.IOException;
import java.util.HashSet;
import java.util.Set;
/**
* File manager for RMA3
* Daniel Huson, 2015
*/
public class FileManagerRMA3 {
private boolean dontAskAny = !ProgramProperties.isUseGUI();
private final Set<String> tabooSet = new HashSet<>();
private static FileManagerRMA3 instance;
private FileManagerRMA3() {
}
/**
* get the instance of the FileRMA3 manager
*
* @return instance
*/
public static FileManagerRMA3 getInstance() {
if (instance == null)
instance = new FileManagerRMA3();
return instance;
}
/**
* get the sam file associated with a RMA3 file, asking to locate it, if missing
*
* @return file or null
*/
public File getSAMFile(String rma3File) throws IOException {
final String fileName = (new RMA3File(rma3File, RMA3FileModifier.READ_ONLY)).getFileFooter().getAlignmentFile();
if (fileName == null || fileName.length() == 0)
return null;
final String suffix = FileUtils.getFileSuffix(fileName);
final String type = (new RMA3File(rma3File, RMA3FileModifier.READ_ONLY)).getFileFooter().getAlignmentFileFormat();
return getFile(rma3File, fileName, suffix, type, true);
}
/**
* get the fasta file associated with a RMA3 file, asking to locate it, if missing
*
* @return file
*/
public File getFASTAFile(String rma3File) throws IOException {
final String fileName = (new RMA3File(rma3File, RMA3FileModifier.READ_ONLY)).getFileFooter().getReadsFile();
if (fileName == null || fileName.length() == 0)
return null;
final String suffix = FileUtils.getFileSuffix(fileName);
final String type = (new RMA3File(rma3File, RMA3FileModifier.READ_ONLY)).getFileFooter().getReadsFileFormat();
return getFile(rma3File, fileName, suffix, type, false);
}
/**
* gets a file, asking to decompress it, if it is gzipped and asking to locate it, if missing
*
* @return file
*/
private File getFile(String rma3File, String fileName, String suffix, String type, boolean alignmentFile) throws IOException {
if (dontAskAny || tabooSet.contains(fileName)) {
File file = new File(fileName);
if (file.exists())
return file;
else
throw new IOException("File not found: " + fileName);
}
if (fileName != null && fileName.length() > 0) {
File file = new File(fileName);
if (file.exists())
return file;
else {
System.err.println("No such file: " + file);
if ((new File(fileName + ".gz")).exists()) {
System.err.println("Run gunzip on: " + fileName + ".gz");
int response = JOptionPane.showConfirmDialog(null, "Required " + type + " file '" + file.getName() + "' is compressed, decompress?",
type + " file is compressed", JOptionPane.YES_NO_CANCEL_OPTION,
JOptionPane.QUESTION_MESSAGE, ProgramProperties.getProgramIcon());
switch (response) {
case JOptionPane.YES_OPTION:
GZipUtils.inflate(fileName + ".gz", fileName);
return new File(fileName);
case JOptionPane.NO_OPTION:
break;
default:
case JOptionPane.CANCEL_OPTION:
throw new IOException("User canceled");
}
}
if ((new File(fileName + ".zip")).exists())
System.err.println("Run unzip on: " + fileName + ".zip");
}
if (ProgramProperties.isUseGUI()) {
if (!file.getParentFile().exists() || !file.getParentFile().isDirectory()) {
file = new File((new File(rma3File)).getParent(), FileUtils.getFileNameWithoutPath(fileName));
}
String[] choices = new String[]{"Locate missing " + type + " file", "Don't ask again for this missing file", "Don't ask again for any missing files"};
String choice = (String) JOptionPane.showInputDialog(null, "Need " + type + " file to access reads", "MEGAN requires " + type + " file",
JOptionPane.QUESTION_MESSAGE, ProgramProperties.getProgramIcon(), choices, choices[0]);
if (choice == null)
throw new IOException("File not found: " + fileName);
else if (choice.equals(choices[0])) {
File altFile = ChooseFileDialog.chooseFileToOpen(null, file, (new TextFileFilter(suffix)), (new TextFileFilter(suffix)), null, "Locate " + type + " file '" + file.getName() + "'");
if (altFile == null)
throw new IOException("User canceled");
if (!altFile.exists())
throw new IOException("No such file: " + altFile);
try (RMA3FileModifier modifier = new RMA3FileModifier(rma3File)) {
final FileFooterRMA3 footer = modifier.getFileFooter();
if (alignmentFile) {
if (altFile.exists() && altFile.length() != footer.getAlignmentFileSize()) {
throw new IOException("Specified file has wrong size " + file.length() + ", expected: " + footer.getAlignmentFileSize());
}
footer.setAlignmentFile(altFile.getPath());
} else {
if (altFile.exists() && altFile.length() != footer.getReadsFileSize()) {
throw new IOException("Specified " + type + " file has wrong size " + file.length() + ", expected: " + footer.getReadsFileSize());
}
footer.setReadsFile(altFile.getPath());
}
try (InputOutputReaderWriter io = new InputOutputReaderWriter(new File(rma3File), RMA3FileModifier.READ_WRITE)) {
io.setLength(footer.getFileFooter());
io.seek(footer.getFileFooter());
footer.write(io);
}
}
return altFile;
} else if (choice.equals(choices[1])) {
tabooSet.add(fileName);
throw new IOException("File not found: " + fileName);
} else if (choice.equals(choices[2])) {
dontAskAny = true;
throw new IOException("File not found: " + fileName);
}
}
}
throw new IOException("File not found: " + fileName);
}
}
| 8,053 | Java | .java | husonlab/megan-ce | 62 | 21 | 18 | 2016-05-09T10:55:38Z | 2024-02-22T23:23:42Z |
AuxBlocksHeaderRMA3.java | /FileExtraction/Java_unseen/husonlab_megan-ce/src/megan/rma3/AuxBlocksHeaderRMA3.java | /*
* AuxBlocksHeaderRMA3.java Copyright (C) 2024 Daniel H. Huson
*
* (Some files contain contributions from other authors, who are then mentioned separately.)
*
* This program is free software: you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
package megan.rma3;
import megan.io.IInputReader;
import megan.io.IOutputWriter;
import java.io.IOException;
/**
* Format description for an aux-block entry
* Created by huson on 5/16/14.
*/
public class AuxBlocksHeaderRMA3 extends BaseRMA3 {
/**
* constructor
*/
public AuxBlocksHeaderRMA3(boolean DEAD) {
super("Name:String Data:Bytes");
}
@Override
public void read(IInputReader reader, long startPos) throws IOException {
setFormatDef(reader.readString());
}
@Override
public void write(IOutputWriter writer) throws IOException {
writer.writeString(getFormatDef());
}
}
| 1,468 | Java | .java | husonlab/megan-ce | 62 | 21 | 18 | 2016-05-09T10:55:38Z | 2024-02-22T23:23:42Z |
FileHeaderRMA3.java | /FileExtraction/Java_unseen/husonlab_megan-ce/src/megan/rma3/FileHeaderRMA3.java | /*
* FileHeaderRMA3.java Copyright (C) 2024 Daniel H. Huson
*
* (Some files contain contributions from other authors, who are then mentioned separately.)
*
* This program is free software: you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
package megan.rma3;
import jloda.util.Pair;
import megan.io.IInputReader;
import megan.io.IOutputWriter;
import megan.rma2.RMA2File;
import java.io.IOException;
/**
* The header section of an RMA3 File
* Created by huson on 5/14/14.
*/
public class FileHeaderRMA3 extends BaseRMA3 {
private String creator;
private long creationDate;
/**
* constructor
*/
public FileHeaderRMA3() {
super("Creator:String CreationDate:Long");
}
/**
* write the current header data
*
*/
public void write(IOutputWriter writer) throws IOException {
writer.writeInt(RMA2File.MAGIC_NUMBER);
writer.writeInt(3);
writer.writeString(getFormatDef());
FormatDefinition formatDefinition = FormatDefinition.fromString(getFormatDef());
formatDefinition.startWrite();
for (Pair<String, FormatDefinition.Type> pair : formatDefinition.getList()) {
if (pair.getFirst().equals("Creator"))
formatDefinition.write(writer, "Creator", getCreator());
else if (pair.getFirst().equals("CreationDate"))
formatDefinition.write(writer, "CreationDate", getCreationDate());
}
formatDefinition.finishWrite();
}
/**
* read the header from a file
*
*/
public void read(IInputReader reader, long startPos) throws IOException {
reader.seek(startPos);
final int magicNumber = reader.readInt();
if (magicNumber != RMA2File.MAGIC_NUMBER) {
throw new IOException("Not an RMA file");
}
final int version = reader.readInt();
if (version != 3) {
throw new IOException("Not an RMA 3 file");
}
setFormatDef(reader.readString());
FormatDefinition formatDefinition = FormatDefinition.fromString(getFormatDef());
for (Pair<String, FormatDefinition.Type> pair : formatDefinition.getList()) {
if (pair.getFirst().equals("Creator"))
setCreator(reader.readString());
else if (pair.getFirst().equals("CreationDate"))
setCreationDate(reader.readLong());
}
}
private String getCreator() {
return creator;
}
private void setCreator(String creator) {
this.creator = creator;
}
public long getCreationDate() {
return creationDate;
}
private void setCreationDate(long creationDate) {
this.creationDate = creationDate;
}
}
| 3,315 | Java | .java | husonlab/megan-ce | 62 | 21 | 18 | 2016-05-09T10:55:38Z | 2024-02-22T23:23:42Z |
MatchFooterRMA3.java | /FileExtraction/Java_unseen/husonlab_megan-ce/src/megan/rma3/MatchFooterRMA3.java | /*
* MatchFooterRMA3.java Copyright (C) 2024 Daniel H. Huson
*
* (Some files contain contributions from other authors, who are then mentioned separately.)
*
* This program is free software: you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
package megan.rma3;
import jloda.util.Pair;
import megan.io.IInputReader;
import megan.io.IOutputWriter;
import java.io.IOException;
/**
* Footer for matches
* Created by huson on 5/16/14.
*/
public class MatchFooterRMA3 extends BaseRMA3 {
private long numberOfReads;
private long numberOfMatches;
private int maxMatchesPerRead = 100;
private boolean useKegg = false;
private boolean useSeed = false;
private boolean useCog = false;
private boolean usePfam = false;
private String readFormatDef = "";
private String matchFormatDef = "";
/**
* constructor
*
*/
public MatchFooterRMA3(String formatDef) {
super(formatDef);
}
/**
* default constructor
*/
public MatchFooterRMA3() {
this("TotalReads:Long TotalMatches:Long MaxMatchesPerRead:Integer UseKegg:Integer UseSeed:Integer UseCog:Integer UsePfam:Integer ReadFormat:String MatchFormat:String ");
}
@Override
public void read(IInputReader reader, long startPos) throws IOException {
reader.seek(startPos);
setFormatDef(reader.readString());
FormatDefinition formatDefinition = FormatDefinition.fromString(getFormatDef());
for (Pair<String, FormatDefinition.Type> pair : formatDefinition.getList()) {
switch (pair.getFirst()) {
case "TotalReads" -> setNumberOfReads(reader.readLong());
case "TotalMatches" -> setNumberOfMatches(reader.readLong());
case "MaxMatchesPerRead" -> setMaxMatchesPerRead(reader.readInt());
case "UseKegg" -> setUseKegg(reader.readInt() != 0);
case "UseSeed" -> setUseSeed(reader.readInt() != 0);
case "UseCog" -> setUseCog(reader.readInt() != 0);
case "UsePfam" -> setUsePfam(reader.readInt() != 0);
case "ReadFormat" -> setReadFormatDef(reader.readString());
case "MatchFormat" -> setMatchFormatDef(reader.readString());
}
}
}
@Override
public void write(IOutputWriter writer) throws IOException {
writer.writeString(getFormatDef());
FormatDefinition formatDefinition = FormatDefinition.fromString(getFormatDef());
formatDefinition.startWrite();
for (Pair<String, FormatDefinition.Type> pair : formatDefinition.getList()) {
switch (pair.getFirst()) {
case "TotalReads" -> formatDefinition.write(writer, "TotalReads", getNumberOfReads());
case "TotalMatches" -> formatDefinition.write(writer, "TotalMatches", getNumberOfMatches());
case "MaxMatchesPerRead" -> formatDefinition.write(writer, "MaxMatchesPerRead", getMaxMatchesPerRead());
case "UseKegg" -> formatDefinition.write(writer, "UseKegg", isUseKegg() ? 1 : 0);
case "UseSeed" -> formatDefinition.write(writer, "UseSeed", isUseSeed() ? 1 : 0);
case "UseCog" -> formatDefinition.write(writer, "UseCog", isUseCog() ? 1 : 0);
case "UsePfam" -> formatDefinition.write(writer, "UsePfam", isUsePfam() ? 1 : 0);
case "ReadFormat" -> formatDefinition.write(writer, "ReadFormat", getReadFormatDef());
case "MatchFormat" -> formatDefinition.write(writer, "MatchFormat", getMatchFormatDef());
}
}
formatDefinition.finishWrite();
}
public long getNumberOfReads() {
return numberOfReads;
}
public void incrementNumberOfReads() {
numberOfReads++;
}
private void setNumberOfReads(long numberOfReads) {
this.numberOfReads = numberOfReads;
}
public long getNumberOfMatches() {
return numberOfMatches;
}
public void incrementNumberOfMatches() {
numberOfMatches++;
}
private void setNumberOfMatches(long numberOfMatches) {
this.numberOfMatches = numberOfMatches;
}
private int getMaxMatchesPerRead() {
return maxMatchesPerRead;
}
private void setMaxMatchesPerRead(int maxMatchesPerRead) {
this.maxMatchesPerRead = maxMatchesPerRead;
}
private boolean isUseKegg() {
return useKegg;
}
private void setUseKegg(boolean useKegg) {
this.useKegg = useKegg;
}
private boolean isUseSeed() {
return useSeed;
}
private void setUseSeed(boolean useSeed) {
this.useSeed = useSeed;
}
private boolean isUseCog() {
return useCog;
}
private void setUseCog(boolean useCog) {
this.useCog = useCog;
}
private boolean isUsePfam() {
return usePfam;
}
private void setUsePfam(boolean usePfam) {
this.usePfam = usePfam;
}
public String getReadFormatDef() {
return readFormatDef;
}
private void setReadFormatDef(String readFormatDef) {
this.readFormatDef = readFormatDef;
}
public String getMatchFormatDef() {
return matchFormatDef;
}
private void setMatchFormatDef(String matchFormatDef) {
this.matchFormatDef = matchFormatDef;
}
}
| 5,933 | Java | .java | husonlab/megan-ce | 62 | 21 | 18 | 2016-05-09T10:55:38Z | 2024-02-22T23:23:42Z |
SAMCompress.java | /FileExtraction/Java_unseen/husonlab_megan-ce/src/megan/rma3/SAMCompress.java | /*
* SAMCompress.java Copyright (C) 2024 Daniel H. Huson
*
* (Some files contain contributions from other authors, who are then mentioned separately.)
*
* This program is free software: you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
package megan.rma3;
/**
* deflate and inflate SAM line based on previous one
* Created by huson on 5/24/14.
*/
class SAMCompress {
/**
* deflate current SAM line by replacing all fields that equalOverShorterOfBoth previous line by an ampersand
*
* @return deflated SAM line
*/
public static int deflate(byte[] previous, int previousLength, byte[] current, int currentLength, byte[] result) {
if (previous == null) {
System.arraycopy(current, 0, result, 0, currentLength);
return currentLength;
}
int length = 0;
int start1 = 0;
int start2 = 0;
while (start1 < previousLength && start2 < currentLength) {
int end1 = start1;
int end2 = start2;
while (end1 <= previousLength && end2 <= currentLength) {
byte c1 = (end1 < previousLength ? previous[end1] : (byte) '\t');
byte c2 = (end2 < currentLength ? current[end2] : (byte) '\t');
if (c1 == c2) {
if (c1 == '\t') // at end of a common block
{
result[length++] = '&';
break;
} else {
end1++;
end2++;
}
} else // c1!=c2
{
while (end1 < previousLength && previous[end1] != '\t') {
end1++;
}
while (end2 < currentLength && current[end2] != '\t') {
end2++;
}
for (int i = start2; i < end2; i++)
result[length++] = current[i];
break;
}
}
start1 = end1 + 1;
start2 = end2 + 1;
if (start2 < currentLength)
result[length++] = '\t';
}
return length;
}
/**
* inflate the current SAM line by replacing all & by the corresponding field in the previous SAM line
*
* @return inflated SAM line
*/
public static int inflate(byte[] previous, int previousLength, byte[] current, int currentLength, byte[] result) {
if (previous == null) {
System.arraycopy(current, 0, result, 0, currentLength);
return currentLength;
}
int length = 0;
int start1 = 0;
int start2 = 0;
while (start1 < previousLength && start2 < currentLength) {
int end1 = start1;
while (end1 < previousLength && previous[end1] != '\t') {
end1++;
}
int end2 = start2;
while (end2 < currentLength && current[end2] != '\t') {
end2++;
}
if (current[start2] == '&' && end2 == start2 + 1) {
for (int i = start1; i < end1; i++)
result[length++] = previous[i];
} else {
for (int i = start2; i < end2; i++)
result[length++] = current[i];
}
if (end2 < currentLength)
result[length++] = '\t';
start1 = end1 + 1;
start2 = end2 + 1;
}
return length;
}
/**
* deflate current SAM line by replacing all fields that equalOverShorterOfBoth previous line by an ampersand
*
* @return deflated SAM line
*/
public static String deflate(String previous, String current) {
if (previous == null)
return current;
StringBuilder buf = new StringBuilder();
int start1 = 0;
int start2 = 0;
while (start1 < previous.length() && start2 < current.length()) {
int end1 = start1;
int end2 = start2;
while (end1 <= previous.length() && end2 <= current.length()) {
int c1 = (end1 < previous.length() ? previous.charAt(end1) : '\t');
int c2 = (end2 < current.length() ? current.charAt(end2) : '\t');
if (c1 == c2) {
if (c1 == '\t') // at end of a common block
{
buf.append("&");
break;
} else {
end1++;
end2++;
}
} else // c1!=c2
{
while (end1 < previous.length() && previous.charAt(end1) != '\t') {
end1++;
}
while (end2 < current.length() && current.charAt(end2) != '\t') {
end2++;
}
for (int i = start2; i < end2; i++)
buf.append(current.charAt(i));
break;
}
}
start1 = end1 + 1;
start2 = end2 + 1;
if (start2 < current.length())
buf.append("\t");
}
return buf.toString();
}
/**
* inflate the current SAM line by replacing all & by the corresponding field in the previous SAM line
*
* @return inflated SAM line
*/
public static String inflate(String previous, String current) {
if (previous == null)
return current;
StringBuilder buf = new StringBuilder();
int start1 = 0;
int start2 = 0;
while (start1 < previous.length() && start2 < current.length()) {
int end1 = start1;
while (end1 < previous.length() && previous.charAt(end1) != '\t') {
end1++;
}
int end2 = start2;
while (end2 < current.length() && current.charAt(end2) != '\t') {
end2++;
}
if (current.charAt(start2) == '&' && end2 == start2 + 1) {
for (int i = start1; i < end1; i++)
buf.append(previous.charAt(i));
} else {
for (int i = start2; i < end2; i++)
buf.append(current.charAt(i));
}
if (end2 < current.length())
buf.append("\t");
start1 = end1 + 1;
start2 = end2 + 1;
}
return buf.toString();
}
}
| 7,151 | Java | .java | husonlab/megan-ce | 62 | 21 | 18 | 2016-05-09T10:55:38Z | 2024-02-22T23:23:42Z |
RMA3File.java | /FileExtraction/Java_unseen/husonlab_megan-ce/src/megan/rma3/RMA3File.java | /*
* RMA3File.java Copyright (C) 2024 Daniel H. Huson
*
* (Some files contain contributions from other authors, who are then mentioned separately.)
*
* This program is free software: you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
package megan.rma3;
import jloda.seq.BlastMode;
import megan.io.IInputReader;
import megan.io.InputOutputReaderWriter;
import java.io.Closeable;
import java.io.IOException;
/**
* access an RMA3 file
* Created by huson on 5/13/14.
*/
public class RMA3File implements Closeable {
final public static String READ_ONLY = "r";
final public static String READ_WRITE = "rw";
private final String fileName;
private final InputOutputReaderWriter reader;
private final FileHeaderRMA3 fileHeader;
private final FileFooterRMA3 fileFooter;
private final MatchFooterRMA3 matchFooter;
private final ClassificationsFooterRMA3 classificationsFooter;
private final AuxBlocksFooterRMA3 auxBlocksFooter;
/**
* construct an RMA3 object and open the named file in it
*
*/
public RMA3File(String fileName, String mode) throws IOException {
this.fileName = fileName;
this.reader = new InputOutputReaderWriter(fileName, mode);
fileHeader = new FileHeaderRMA3();
try {
fileHeader.read(reader, 0L);
} catch (IOException ex) {
System.err.println("File name: " + fileName);
throw ex;
}
fileFooter = new FileFooterRMA3();
reader.seek(reader.length() - 8L); // // last long in file is position of fileFooter
long footerPosition = reader.readLong();
fileFooter.read(reader, footerPosition);
matchFooter = new MatchFooterRMA3();
matchFooter.read(reader, fileFooter.getMatchesFooter());
classificationsFooter = new ClassificationsFooterRMA3();
classificationsFooter.read(reader, fileFooter.getClassificationsFooter());
auxBlocksFooter = new AuxBlocksFooterRMA3();
auxBlocksFooter.read(reader, fileFooter.getAuxFooter());
}
/**
* close this file
*
*/
public void close() throws IOException {
reader.close();
}
public String getFileName() {
return fileName;
}
public IInputReader getReader() {
return reader;
}
public FileHeaderRMA3 getFileHeader() {
return fileHeader;
}
public FileFooterRMA3 getFileFooter() {
return fileFooter;
}
public MatchFooterRMA3 getMatchFooter() {
return matchFooter;
}
public ClassificationsFooterRMA3 getClassificationsFooter() {
return classificationsFooter;
}
public AuxBlocksFooterRMA3 getAuxBlocksFooter() {
return auxBlocksFooter;
}
public long getStartMatches() {
return fileFooter.getMatchesStart();
}
public long getEndMatches() {
return fileFooter.getEndMatches();
}
public BlastMode getBlastMode() {
return BlastMode.valueOf(fileFooter.getBlastMode());
}
public String getSamFile() {
String samFile = fileFooter.getAlignmentFile();
if (samFile != null && samFile.length() > 0 && fileFooter.getAlignmentFileFormat().equals("SAM"))
return samFile;
else
return null;
}
}
| 3,880 | Java | .java | husonlab/megan-ce | 62 | 21 | 18 | 2016-05-09T10:55:38Z | 2024-02-22T23:23:42Z |
RMA3Connector.java | /FileExtraction/Java_unseen/husonlab_megan-ce/src/megan/rma3/RMA3Connector.java | /*
* RMA3Connector.java Copyright (C) 2024 Daniel H. Huson
*
* (Some files contain contributions from other authors, who are then mentioned separately.)
*
* This program is free software: you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
package megan.rma3;
import jloda.util.CanceledException;
import jloda.util.ListOfLongs;
import jloda.util.Single;
import jloda.util.StringUtils;
import jloda.util.progress.ProgressListener;
import megan.core.ClassificationType;
import megan.data.*;
import java.io.File;
import java.io.IOException;
import java.util.*;
/**
* RMA3 connector
* Created by huson on 5/16/14.
*/
public class RMA3Connector implements IConnector {
private String fileName;
/**
* constructor
*
*/
public RMA3Connector(String fileName) throws IOException {
setFile(fileName);
}
@Override
public String getFilename() {
return fileName;
}
@Override
public void setFile(String file) {
this.fileName = file;
}
@Override
public boolean isReadOnly() {
return fileName != null && ((new File(fileName)).canWrite());
}
@Override
public long getUId() throws IOException {
try (RMA3File rma3File = new RMA3File(fileName, RMA3File.READ_ONLY)) {
return rma3File.getFileHeader().getCreationDate();
}
}
@Override
public IReadBlockIterator getAllReadsIterator(float minScore, float maxExpected, boolean wantReadSequence, boolean wantMatches) throws IOException {
return new AllReadsIterator(getReadBlockGetter(minScore, maxExpected, wantReadSequence, wantMatches));
}
@Override
public IReadBlockIterator getReadsIterator(String classification, int classId, float minScore, float maxExpected, boolean wantReadSequence, boolean wantMatches) throws IOException {
return getReadsIteratorForListOfClassIds(classification, Collections.singletonList(classId), minScore, maxExpected, wantReadSequence, wantMatches);
}
@Override
public IReadBlockIterator getReadsIteratorForListOfClassIds(String classification, Collection<Integer> classIds, float minScore, float maxExpected, boolean wantReadSequence, boolean wantMatches) throws IOException {
try (RMA3File rma3File = new RMA3File(fileName, RMA3File.READ_ONLY)) {
final ClassificationBlockRMA3 block = new ClassificationBlockRMA3(ClassificationType.valueOf(classification));
block.read(rma3File.getClassificationsFooter(), rma3File.getReader());
ListOfLongs list = new ListOfLongs();
for (Integer classId : classIds) {
if (block.getSum(classId) > 0) {
block.readLocations(rma3File.getClassificationsFooter(), rma3File.getReader(), classId, list);
}
}
return new ReadBlockIterator(list.iterator(), list.size(), getReadBlockGetter(minScore, maxExpected, wantReadSequence, wantMatches));
}
}
@Override
public IReadBlockGetter getReadBlockGetter(float minScore, float maxExpected, boolean wantReadSequence, boolean wantMatches) throws IOException {
final RMA3File rma3File = new RMA3File(fileName, RMA3File.READ_ONLY);
return new ReadBlockGetterRMA3(rma3File, minScore, maxExpected, wantReadSequence, wantMatches);
}
@Override
public String[] getAllClassificationNames() throws IOException {
try (RMA3File rma3File = new RMA3File(fileName, RMA3File.READ_ONLY)) {
List<String> names = rma3File.getClassificationsFooter().getAllNames();
return names.toArray(new String[0]);
}
}
@Override
public int getClassificationSize(String classificationName) throws IOException {
IClassificationBlock classificationBlock = getClassificationBlock(classificationName);
return classificationBlock.getKeySet().size();
}
@Override
public int getClassSize(String classificationName, int classId) throws IOException {
IClassificationBlock classificationBlock = getClassificationBlock(classificationName);
return classificationBlock.getSum(classId);
}
@Override
public IClassificationBlock getClassificationBlock(String classificationName) throws IOException {
try (RMA3File rma3File = new RMA3File(fileName, RMA3File.READ_ONLY)) {
ClassificationBlockRMA3 classificationBlock = new ClassificationBlockRMA3(ClassificationType.valueOf(classificationName));
classificationBlock.read(rma3File.getClassificationsFooter(), rma3File.getReader());
return classificationBlock;
}
}
/**
* rescan classifications after running the data processor
*
*/
@Override
public void updateClassifications(String[] names, List<UpdateItem> updateItemList, ProgressListener progressListener) throws IOException, CanceledException {
final UpdateItemList updateItems = (UpdateItemList) updateItemList;
final int numClassifications = names.length;
long maxProgress = 0;
for (int i = 0; i < numClassifications; i++) {
maxProgress += updateItems.getClassIds(i).size();
}
progressListener.setMaximum(maxProgress);
RMA3FileModifier rma3FileModifier = new RMA3FileModifier(fileName);
rma3FileModifier.startModification();
for (int i = 0; i < numClassifications; i++) {
if (StringUtils.toString(ClassificationType.values(), " ").contains(names[i])) {
ClassificationType classificationType = ClassificationType.valueOf(names[i]);
final Map<Integer, ListOfLongs> classId2Locations = new HashMap<>();
for (Integer classId : updateItems.getClassIds(i)) {
float weight = updateItems.getWeight(i, classId);
final ListOfLongs positions = new ListOfLongs();
classId2Locations.put(classId, positions);
if (updateItems.getWeight(i, classId) > 0) {
for (UpdateItem item = updateItems.getFirst(i, classId); item != null; item = item.getNextInClassification(i)) {
positions.add(item.getReadUId());
}
}
progressListener.incrementProgress();
}
rma3FileModifier.updateClassification(classificationType, classId2Locations);
} else
System.err.println("Unsupported classification type: " + names[i]);
}
rma3FileModifier.finishModification();
}
@Override
public IReadBlockIterator getFindAllReadsIterator(String regEx, FindSelection findSelection, Single<Boolean> canceled) throws IOException {
return new FindAllReadsIterator(regEx, findSelection, getAllReadsIterator(0, 10, true, true), canceled);
}
@Override
public int getNumberOfReads() throws IOException {
try (RMA3File rma3File = new RMA3File(fileName, RMA3File.READ_ONLY)) {
return (int) Math.min(Integer.MAX_VALUE, rma3File.getMatchFooter().getNumberOfReads());
}
}
@Override
public int getNumberOfMatches() throws IOException {
try (RMA3File rma3File = new RMA3File(fileName, RMA3File.READ_ONLY)) {
return (int) Math.min(Integer.MAX_VALUE, rma3File.getMatchFooter().getNumberOfMatches());
}
}
@Override
public void setNumberOfReads(int numberOfReads) {
}
@Override
public void putAuxiliaryData(Map<String, byte[]> label2data) throws IOException {
try (RMA3FileModifier rma3FileModifier = new RMA3FileModifier(fileName)) {
rma3FileModifier.saveAuxData(label2data);
}
}
@Override
public Map<String, byte[]> getAuxiliaryData() throws IOException {
final RMA3File rma3File = new RMA3File(fileName, RMA3File.READ_ONLY);
final Map<String, byte[]> label2data = new HashMap<>();
try {
rma3File.getAuxBlocksFooter().readAuxBlocks(rma3File.getFileFooter(), rma3File.getReader(), label2data);
} finally {
rma3File.close();
}
return label2data;
}
}
| 8,624 | Java | .java | husonlab/megan-ce | 62 | 21 | 18 | 2016-05-09T10:55:38Z | 2024-02-22T23:23:42Z |
MatchLineRMA3.java | /FileExtraction/Java_unseen/husonlab_megan-ce/src/megan/rma3/MatchLineRMA3.java | /*
* MatchLineRMA3.java Copyright (C) 2024 Daniel H. Huson
*
* (Some files contain contributions from other authors, who are then mentioned separately.)
*
* This program is free software: you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
package megan.rma3;
import megan.data.TextStoragePolicy;
import megan.io.IInputReader;
import megan.io.IOutputWriter;
import java.io.IOException;
import java.util.Objects;
/**
* A match
* Created by huson on 5/16/14.
*/
public class MatchLineRMA3 extends BaseRMA3 {
// configuration:
private boolean embedText;
private boolean doKegg;
private boolean doSeed;
private boolean doCog;
private boolean doPfam;
// data stored in a match line:
private float expected;
private int bitScore;
private int percentId;
private int taxId;
private int keggId;
private int seedId;
private int cogId;
private int pfamId;
// alternatives, depending on textStoragePolicy:
private long fileOffset; // location of match in SAM file
private String text; // SAM line
/**
* constructor
*/
public MatchLineRMA3(String format) {
super("");
setFormatDef(format); // call this to ensure that variables are set
}
/**
* constructor
*/
public MatchLineRMA3(TextStoragePolicy textStoragePolicy, boolean doKegg, boolean doSeed, boolean doCog, boolean doPfam) {
super("Expected:Float BitScore:Character PercentId:Byte Tax:Integer");
this.doKegg = doKegg;
this.doSeed = doSeed;
this.doCog = doCog;
this.doPfam = doPfam;
this.embedText = (textStoragePolicy == TextStoragePolicy.Embed || textStoragePolicy == TextStoragePolicy.InRMAZ);
setFormatDef(getFormatDef() + (doKegg ? " Kegg:Integer" : "")
+ (doSeed ? " Seed:Integer" : "") + (doCog ? " EGGNOG:Integer" : "")
+ (doPfam ? " Pfam:Integer" : "") + (embedText ? " BlastText:String" : " FileOffset:Long"));
}
/**
* read
*
*/
public void read(IInputReader reader, long position) throws IOException {
reader.seek(position);
read(reader);
}
/**
* read
*
*/
public void read(IInputReader reader) throws IOException {
// todo: for efficiency, we assume that the format of match lines is always as follows:
expected = reader.readFloat();
bitScore = reader.readChar();
percentId = reader.read();
taxId = reader.readInt();
if (doKegg)
keggId = reader.readInt();
if (doSeed)
seedId = reader.readInt();
if (doCog)
cogId = reader.readInt();
if (doPfam)
pfamId = reader.readInt();
if (embedText) {
text = reader.readString();
} else {
fileOffset = reader.readLong();
}
}
/**
* write
*
*/
public void write(IOutputWriter writer) throws IOException {
// todo: for efficiency, we assume that the format of match lines is always as follows:
writer.writeFloat(expected);
writer.writeChar((char) bitScore);
writer.write((byte) percentId);
writer.writeInt(taxId);
if (doKegg)
writer.writeInt(keggId);
if (doSeed)
writer.writeInt(seedId);
if (doCog)
writer.writeInt(cogId);
if (doPfam)
writer.writeInt(pfamId);
if (embedText)
writer.writeString(text);
else
writer.writeLong(fileOffset);
}
public float getExpected() {
return expected;
}
public void setExpected(float expected) {
this.expected = expected;
}
public int getBitScore() {
return bitScore;
}
public void setBitScore(int bitScore) {
this.bitScore = bitScore;
}
public int getPercentId() {
return percentId;
}
public void setPercentId(int percentId) {
this.percentId = percentId;
}
public int getTaxId() {
return taxId;
}
public void setTaxId(Integer id) {
this.taxId = Objects.requireNonNullElse(id, 0);
}
public int getKeggId() {
return keggId;
}
public void setKeggId(Integer id) {
this.keggId = Objects.requireNonNullElse(id, 0);
}
public int getSeedId() {
return seedId;
}
public void setSeedId(Integer id) {
this.seedId = Objects.requireNonNullElse(id, 0);
}
public int getCogId() {
return cogId;
}
public void setCogId(Integer id) {
this.cogId = Objects.requireNonNullElse(id, 0);
}
public int getPfamId() {
return pfamId;
}
public void setPfamId(Integer id) {
this.pfamId = Objects.requireNonNullElse(id, 0);
}
public long getFileOffset() {
return fileOffset;
}
public void setFileOffset(long fileOffset) {
this.fileOffset = fileOffset;
}
public String getText() {
return text;
}
public void setText(String text) {
this.text = text;
}
public boolean isDoKegg() {
return doKegg;
}
public boolean isDoSeed() {
return doSeed;
}
public boolean isDoCog() {
return doCog;
}
public boolean isEmbedText() {
return embedText;
}
public void setFormatDef(String formatDef) {
doKegg = formatDef.contains("Kegg:Integer");
doSeed = formatDef.contains("Seed:Integer");
doCog = formatDef.contains("EGGNOG:Integer");
embedText = formatDef.contains("BlastText:String");
super.setFormatDef(formatDef);
}
}
| 6,268 | Java | .java | husonlab/megan-ce | 62 | 21 | 18 | 2016-05-09T10:55:38Z | 2024-02-22T23:23:42Z |
FileFooterRMA3.java | /FileExtraction/Java_unseen/husonlab_megan-ce/src/megan/rma3/FileFooterRMA3.java | /*
* FileFooterRMA3.java Copyright (C) 2024 Daniel H. Huson
*
* (Some files contain contributions from other authors, who are then mentioned separately.)
*
* This program is free software: you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
package megan.rma3;
import jloda.util.Pair;
import megan.io.IInputReader;
import megan.io.IOutputWriter;
import java.io.IOException;
/**
* fileFooter of RMA3 file
* Created by huson on 5/16/14.
*/
public class FileFooterRMA3 extends BaseRMA3 {
private String creator;
private long creationDate;
private String alignmentFile;
private String alignmentFileFormat;
private long alignmentFileSize;
private String readsFile;
private String readsFileFormat;
private long readsFileSize;
private String blastMode;
private long matchesStart;
private long matchesFooter;
private long classificationsStart;
private long classificationsFooter;
private long auxStart;
private long auxFooter;
private long fileFooter;
/**
* constructor
*/
public FileFooterRMA3() {
super("Creator:String CreationDate:Long" +
" AlignmentsFile:String AlignmentFileFormat:String AlignmentFileSize:Long" +
" ReadsFile:String ReadsFileFormat:String ReadsFileSize:Long" +
" BlastMode:String" +
" MatchesStart:Long MatchesFooter:Long" +
" ClassificationsStart:Long ClassificationsFooter:Long" +
" AuxStart:Long AuxFooter:Long FileFooter:Long");
}
/**
* read from an RMA3 file
*
*/
public void read(IInputReader reader, long startPos) throws IOException {
reader.seek(startPos);
setFormatDef(reader.readString());
FormatDefinition formatDefinition = FormatDefinition.fromString(getFormatDef());
for (Pair<String, FormatDefinition.Type> pair : formatDefinition.getList()) {
switch (pair.getFirst()) {
case "Creator" -> setCreator(reader.readString());
case "CreationDate" -> setCreationDate(reader.readLong());
case "AlignmentsFile" -> setAlignmentFile(reader.readString());
case "AlignmentFileFormat" -> setAlignmentFileFormat(reader.readString());
case "AlignmentFileSize" -> setAlignmentFileSize(reader.readLong());
case "ReadsFile" -> setReadsFile(reader.readString());
case "ReadsFileFormat" -> setReadsFileFormat(reader.readString());
case "ReadsFileSize" -> setReadsFileSize(reader.readLong());
case "BlastMode" -> setBlastMode(reader.readString());
case "MatchesStart" -> setMatchesStart(reader.readLong());
case "MatchesFooter" -> setMatchesFooter(reader.readLong());
case "ClassificationsStart" -> setClassificationsStart(reader.readLong());
case "ClassificationsFooter" -> setClassificationsFooter(reader.readLong());
case "AuxStart" -> setAuxStart(reader.readLong());
case "AuxFooter" -> setAuxFooter(reader.readLong());
case "FileFooter" -> setFileFooter(reader.readLong());
}
}
}
/**
* write to an RMA3 file
*
*/
public void write(IOutputWriter writer) throws IOException {
writer.writeString(getFormatDef());
final FormatDefinition formatDefinition = FormatDefinition.fromString(getFormatDef());
formatDefinition.startWrite();
for (Pair<String, FormatDefinition.Type> pair : formatDefinition.getList()) {
{
switch (pair.getFirst()) {
case "Creator" -> formatDefinition.write(writer, "Creator", getCreator());
case "CreationDate" -> formatDefinition.write(writer, "CreationDate", getCreationDate());
case "AlignmentsFile" -> formatDefinition.write(writer, "AlignmentsFile", getAlignmentFile());
case "AlignmentFileFormat" -> formatDefinition.write(writer, "AlignmentFileFormat", getAlignmentFileFormat());
case "AlignmentFileSize" -> formatDefinition.write(writer, "AlignmentFileSize", getAlignmentFileSize());
case "ReadsFile" -> formatDefinition.write(writer, "ReadsFile", getReadsFile());
case "ReadsFileFormat" -> formatDefinition.write(writer, "ReadsFileFormat", getReadsFileFormat());
case "ReadsFileSize" -> formatDefinition.write(writer, "ReadsFileSize", getReadsFileSize());
case "BlastMode" -> formatDefinition.write(writer, "BlastMode", getBlastMode());
case "MatchesStart" -> formatDefinition.write(writer, "MatchesStart", getMatchesStart());
case "MatchesFooter" -> formatDefinition.write(writer, "MatchesFooter", getMatchesFooter());
case "ClassificationsStart" -> formatDefinition.write(writer, "ClassificationsStart", getClassificationsStart());
case "ClassificationsFooter" -> formatDefinition.write(writer, "ClassificationsFooter", getClassificationsFooter());
case "AuxStart" -> formatDefinition.write(writer, "AuxStart", getAuxStart());
case "AuxFooter" -> formatDefinition.write(writer, "AuxFooter", getAuxFooter());
case "FileFooter" -> formatDefinition.write(writer, "FileFooter", getFileFooter());
}
}
}
formatDefinition.finishWrite();
}
private String getCreator() {
return creator;
}
private void setCreator(String creator) {
this.creator = creator;
}
private long getCreationDate() {
return creationDate;
}
private void setCreationDate(long creationDate) {
this.creationDate = creationDate;
}
public String getAlignmentFile() {
return alignmentFile;
}
public void setAlignmentFile(String alignmentFile) {
this.alignmentFile = alignmentFile;
}
public String getAlignmentFileFormat() {
return alignmentFileFormat;
}
private void setAlignmentFileFormat(String alignmentFileFormat) {
this.alignmentFileFormat = alignmentFileFormat;
}
public String getBlastMode() {
return blastMode;
}
private void setBlastMode(String blastMode) {
this.blastMode = blastMode;
}
public long getAlignmentFileSize() {
return alignmentFileSize;
}
private void setAlignmentFileSize(long alignmentFileSize) {
this.alignmentFileSize = alignmentFileSize;
}
public String getReadsFile() {
return readsFile;
}
public void setReadsFile(String readsFile) {
this.readsFile = readsFile;
}
public String getReadsFileFormat() {
return readsFileFormat;
}
private void setReadsFileFormat(String readsFileFormat) {
this.readsFileFormat = readsFileFormat;
}
public long getReadsFileSize() {
return readsFileSize;
}
private void setReadsFileSize(long readsFileSize) {
this.readsFileSize = readsFileSize;
}
public long getEndMatches() {
return getMatchesFooter();
}
public long getFileFooter() {
return fileFooter;
}
public void setFileFooter(long fileFooter) {
this.fileFooter = fileFooter;
}
public long getMatchesFooter() {
return matchesFooter;
}
private void setMatchesFooter(long matchesFooter) {
this.matchesFooter = matchesFooter;
}
public long getClassificationsFooter() {
return classificationsFooter;
}
public void setClassificationsFooter(long classificationsFooter) {
this.classificationsFooter = classificationsFooter;
}
public long getClassificationsStart() {
return classificationsStart;
}
private void setClassificationsStart(long classificationsStart) {
this.classificationsStart = classificationsStart;
}
public long getAuxStart() {
return auxStart;
}
public void setAuxStart(long auxStart) {
this.auxStart = auxStart;
}
public long getAuxFooter() {
return auxFooter;
}
public void setAuxFooter(long auxFooter) {
this.auxFooter = auxFooter;
}
public long getMatchesStart() {
return matchesStart;
}
private void setMatchesStart(long matchesStart) {
this.matchesStart = matchesStart;
}
}
| 9,135 | Java | .java | husonlab/megan-ce | 62 | 21 | 18 | 2016-05-09T10:55:38Z | 2024-02-22T23:23:42Z |
Utilities.java | /FileExtraction/Java_unseen/husonlab_megan-ce/src/megan/rma3/Utilities.java | /*
* Utilities.java Copyright (C) 2024 Daniel H. Huson
*
* (Some files contain contributions from other authors, who are then mentioned separately.)
*
* This program is free software: you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
package megan.rma3;
import jloda.util.*;
import megan.io.InputReader;
import java.io.File;
import java.io.IOException;
/**
* Some utilities for creating RMA3 files
* Created by huson on 5/23/14.
*/
class Utilities {
/**
* find the query in the reads file. When found, FileLineBytesIterator is pointing to location of query in file
*
* @return true, if found
*/
public static boolean findQuery(String queryName, FileLineBytesIterator it, boolean isFastA) {
try {
if (isFastA) {
while (it.hasNext()) {
byte[] line = it.next();
if (line[0] == '>' && matchName(queryName, line, it.getLineLength()))
return true;
}
} else { // assume that this is fastQ
if (it.getLinePosition() == 0) // at beginning of file
{
byte[] line = it.next();
// System.err.println(Basic.toString(line,it.getLineLength()));
if (line[0] != '@')
throw new IOException("Expected FastQ header line (starting with '@'), got: " + StringUtils.toString(line, it.getLineLength()));
if (matchName(queryName, line, it.getLineLength()))
return true;
it.next();
it.next();
it.next();
}
while (it.hasNext()) {
byte[] line = it.next();
// System.err.println(Basic.toString(line,it.getLineLength()));
if (line[0] != '@')
throw new IOException("Expected FastQ header line (starting with '@'), got: " + StringUtils.toString(line, it.getLineLength()));
if (matchName(queryName, line, it.getLineLength()))
return true;
it.next();
it.next();
it.next();
}
}
} catch (Exception ex) {
Basic.caught(ex);
}
return false;
}
/**
* assuming that the FileLineBytesIterator has just returned the header line of a fastA or fastQ record, writes the full text of the match
*
* @return string
*/
public static String getFastAText(FileLineBytesIterator it, boolean isFastA) {
final StringBuilder buf = new StringBuilder();
if (isFastA) {
byte[] bytes = it.getLine();
while (true) {
for (int i = 0; i < it.getLineLength(); i++)
buf.append((char) bytes[i]);
if (!it.hasNext() || it.peekNextByte() == '>')
break;
bytes = it.next();
}
} else // fastq, copy in fastA format...
{
byte[] bytes = it.getLine();
buf.append(">");
for (int i = 1; i < it.getLineLength(); i++) // first line has header, skip the leading '@'
buf.append((char) bytes[i]);
if (it.hasNext()) { // second line has sequence
bytes = it.next();
for (int i = 0; i < it.getLineLength(); i++)
buf.append((char) bytes[i]);
}
// skip the two next lines:
if (it.hasNext())
it.next();
if (it.hasNext())
it.next();
}
return buf.toString();
}
/**
* assuming that the FileLineBytesIterator has just returned the header line of a fastA or fastQ record, writes the full text of the match
*
*/
public static void skipFastAText(FileLineBytesIterator it, boolean isFastA) {
if (isFastA) {
while (it.hasNext() && it.peekNextByte() != '>') {
it.next();
}
} else // fastq
{
if (it.hasNext()) { // second
it.next();
}
if (it.hasNext()) { // third
it.next();
}
if (it.hasNext()) { // fourth line
it.next();
}
}
}
/**
* assuming that the FileLineBytesIterator has just returned the header line of a fastA or fastQ record, writes the full text of the match
*
* @return size
*/
public static int getFastAText(FileLineBytesIterator it, boolean isFastA, Single<byte[]> result) { // todo: has not been tested!
byte[] buffer = result.get();
if (isFastA) {
byte[] bytes = it.getLine();
int length = 0;
while (true) {
if (length + it.getLineLength() >= buffer.length) { // grow result buffer
byte[] tmp = new byte[2 * buffer.length];
System.arraycopy(buffer, 0, tmp, 0, length);
buffer = tmp;
result.set(buffer);
}
System.arraycopy(bytes, 0, buffer, length, it.getLineLength());
length += it.getLineLength();
if (!it.hasNext() || it.peekNextByte() == '>')
break;
bytes = it.next();
}
return length;
} else // fastq
{
byte[] bytes = it.getLine();
int length = 0;
if (length + it.getLineLength() >= buffer.length) { // grow result buffer
byte[] tmp = new byte[2 * buffer.length];
System.arraycopy(buffer, 0, tmp, 0, length);
buffer = tmp;
result.set(buffer);
}
buffer[length++] = '>'; // first character is '>' (not '@')
System.arraycopy(bytes, 1, buffer, length, it.getLineLength() - 1);
if (it.hasNext()) { // second line has sequence
bytes = it.next();
if (length + it.getLineLength() >= buffer.length) { // grow result buffer
byte[] tmp = new byte[2 * buffer.length];
System.arraycopy(buffer, 0, tmp, 0, length);
buffer = tmp;
result.set(buffer);
}
System.arraycopy(bytes, 0, buffer, length, it.getLineLength());
}
if (it.hasNext()) { // third
it.next();
}
if (it.hasNext()) { // fourth line
it.next();
}
return length;
}
}
/**
* match header line with query name
*
* @return true, if name matches name in line
*/
private static boolean matchName(String queryName, byte[] line, int lineLength) {
int start = 0;
if (line[start] == '>' || line[0] == '@')
start++;
while (Character.isWhitespace(line[start]) && start < lineLength)
start++;
int end = start;
while (!Character.isWhitespace(line[end]) && end < lineLength) {
end++;
}
if (end - start != queryName.length())
return false; // have different lengths
for (int i = 0; i < queryName.length(); i++) {
if (queryName.charAt(i) != (char) line[start + i])
return false;
}
return true; //
}
/**
* returns a fastA or fastQ record as FastA
*
* @return fastA record at current position
*/
public static String getFastAText(InputReader reader, long position) throws IOException {
StringBuilder buf = new StringBuilder();
reader.seek(position);
char letter = (char) reader.read();
boolean isFastA = (letter == '>');
if (!isFastA && (letter != '@'))
throw new IOException("Expected '>' or '@' at position: " + position + ", got: " + letter);
buf.append('>');
if (isFastA) {
letter = (char) reader.read();
while (letter != '>') {
if (letter != '\r')
buf.append(letter);
letter = (char) reader.read();
}
} else // fastq, copy in fastA format...
{
boolean seenFirstEndOfLine = false;
letter = (char) reader.read();
while (true) {
if (letter != '\r')
buf.append(letter);
if (letter == '\n') {
if (!seenFirstEndOfLine)
seenFirstEndOfLine = true;
else
break;
}
letter = (char) reader.read();
}
}
return buf.toString();
}
/**
* get the header from a fastA record
*
* @return header
*/
public static String getFastAHeader(String fastAText) {
int end = fastAText.indexOf('\n');
if (end == -1)
return fastAText;
else
return fastAText.substring(0, end);
}
/**
* get the seqiemce from a fastA record
*
* @return header
*/
public static String getFastASequence(String fastAText) {
int start = fastAText.indexOf('\n');
if (start == -1)
return "Unavailable";
else
return fastAText.substring(start + 1);
}
/**
* is the given file a MALT or Diamond -generated SAM file?
*
* @return true, if file is MALT or Diamond generated SAM file
*/
public static boolean IsMaltOrDiamondSAMFile(File file) {
String suffix = FileUtils.getFileSuffix(FileUtils.getFileNameWithoutZipOrGZipSuffix(file.getName()));
if (suffix == null)
return false;
if (!suffix.equalsIgnoreCase(".sam"))
return false;
try {
try (FileLineIterator it = new FileLineIterator(file.getPath())) {
while (it.hasNext()) {
String aLine = it.next();
if (aLine.startsWith("@")) {
if (aLine.contains("PN:MALT") || (aLine.contains("PN:DIAMOND")))
return true;
} else {
return false;
}
}
}
} catch (IOException ignored) {
}
return false;
}
}
| 11,060 | Java | .java | husonlab/megan-ce | 62 | 21 | 18 | 2016-05-09T10:55:38Z | 2024-02-22T23:23:42Z |
ReadBlockGetterRMA3.java | /FileExtraction/Java_unseen/husonlab_megan-ce/src/megan/rma3/ReadBlockGetterRMA3.java | /*
* ReadBlockGetterRMA3.java Copyright (C) 2024 Daniel H. Huson
*
* (Some files contain contributions from other authors, who are then mentioned separately.)
*
* This program is free software: you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
package megan.rma3;
import jloda.util.Basic;
import jloda.util.StringUtils;
import megan.data.IReadBlock;
import megan.data.IReadBlockGetter;
import megan.data.MatchBlockFromBlast;
import megan.data.ReadBlockFromBlast;
import megan.io.IInputReader;
import megan.io.InputReader;
import megan.parsers.sam.SAMMatch;
import java.io.File;
import java.io.IOException;
import java.util.ArrayList;
/**
* accesses a read block
* <p/>
* Created by huson on 5/21/14.
*/
public class ReadBlockGetterRMA3 implements IReadBlockGetter {
private final RMA3File rma3File;
private final ReadLineRMA3 readLine;
private final MatchLineRMA3 matchLine;
private final float minScore;
private final float maxExpected;
private final boolean wantReadText;
private final boolean wantMatches;
private final IInputReader reader;
private final InputReader samReader;
private final InputReader fastaReader;
private final SAMMatch samMatch;
private final long startMatches;
private final long endMatches;
private boolean inStreaming = false;
/**
* constructor
*
*/
public ReadBlockGetterRMA3(RMA3File rma3File, float minScore, float maxExpected, boolean wantReadText, boolean wantMatches) throws IOException {
this.rma3File = rma3File;
this.wantReadText = wantReadText;
this.wantMatches = wantMatches;
readLine = new ReadLineRMA3(rma3File.getMatchFooter().getReadFormatDef());
matchLine = new MatchLineRMA3(rma3File.getMatchFooter().getMatchFormatDef());
samMatch = new SAMMatch(rma3File.getBlastMode());
startMatches = rma3File.getStartMatches();
endMatches = rma3File.getEndMatches();
this.minScore = minScore;
this.maxExpected = maxExpected;
final FileManagerRMA3 rma3FileManager = FileManagerRMA3.getInstance();
if (wantReadText && !readLine.isEmbedText()) {
final File fastAFile = rma3FileManager.getFASTAFile(rma3File.getFileName());
if (fastAFile != null)
fastaReader = new InputReader(fastAFile, null, null, true);
else
fastaReader = null;
} else
fastaReader = null;
if (wantMatches && !matchLine.isEmbedText()) {
final File samFile = rma3FileManager.getSAMFile(rma3File.getFileName());
if (samFile != null)
samReader = new InputReader(samFile, null, null, true);
else
samReader = null;
} else
samReader = null;
reader = rma3File.getReader();
reader.seek(startMatches);
}
/**
* grabs the read block with the given UID
*
* @return read block
*/
@Override
public IReadBlock getReadBlock(long uid) throws IOException {
if (uid == -1 && !inStreaming) {
inStreaming = true;
}
if (uid >= 0) {
if (inStreaming)
throw new IOException("getReadBlock(uid=" + uid + ") failed: streamOnly");
reader.seek(uid);
} else
uid = reader.getPosition();
readLine.read(reader);
if (readLine.getReadUid() != uid)
throw new IOException("getReadUid(): doesn't match expected: " + uid);
final ReadBlockFromBlast readBlock = new ReadBlockFromBlast();
readBlock.setUId(uid);
readBlock.setReadWeight(readLine.getReadWeight());
if (readLine.isEmbedText()) {
String readText = readLine.getText();
readBlock.setReadHeader(Utilities.getFastAHeader(readText));
readBlock.setReadSequence(Utilities.getFastASequence(readText));
readBlock.setNumberOfMatches(readLine.getNumberOfMatches());
} else if (fastaReader != null) {
try {
String readText = Utilities.getFastAText(fastaReader, readLine.getFileOffset());
readBlock.setReadHeader(Utilities.getFastAHeader(readText));
readBlock.setReadSequence(Utilities.getFastASequence(readText));
} catch (Exception ex) {
Basic.caught(ex);
}
}
final ArrayList<MatchBlockFromBlast> matches = new ArrayList<>(readLine.getNumberOfMatches());
String firstSAMLineForCurrentRead = null;
for (int i = 0; i < readLine.getNumberOfMatches(); i++) {
if (reader.getPosition() >= endMatches)
throw new IOException("Overrun matches section");
matchLine.read(reader);
if (wantMatches && matchLine.getBitScore() >= minScore && matchLine.getExpected() <= maxExpected) {
MatchBlockFromBlast matchBlock = new MatchBlockFromBlast();
matchBlock.setUId(matchLine.getFileOffset());
matchBlock.setExpected(matchLine.getExpected());
matchBlock.setBitScore(matchLine.getBitScore());
matchBlock.setPercentIdentity(matchLine.getPercentId());
matchBlock.setTaxonId(matchLine.getTaxId());
if (matchLine.isDoKegg())
matchBlock.setId("KEGG", matchLine.getKeggId());
if (matchLine.isDoSeed())
matchBlock.setId("SEED", matchLine.getSeedId());
if (matchLine.isDoCog())
matchBlock.setId("EGGNOG", matchLine.getCogId());
if (matchLine.isEmbedText()) {
samMatch.parse(SAMCompress.inflate(firstSAMLineForCurrentRead, matchLine.getText()));
matchBlock.setText(samMatch.getBlastAlignmentText());
if (readBlock.getReadHeader() == null)
readBlock.setReadHeader(samMatch.getQueryName());
} else if (samReader != null) {
try {
samReader.seek(matchLine.getFileOffset());
samMatch.parse(samReader.readLine());
matchBlock.setText(samMatch.getBlastAlignmentText());
if (readBlock.getReadHeader() == null)
readBlock.setReadHeader(samMatch.getQueryName());
} catch (Exception ex) {
Basic.caught(ex);
}
}
matches.add(matchBlock);
}
if (firstSAMLineForCurrentRead == null) { // need to grab first line
if (matchLine.isEmbedText()) {
firstSAMLineForCurrentRead = matchLine.getText();
} else if (wantReadText && (readBlock.getReadHeader() == null || readBlock.getReadHeader().length() == 0) && samReader != null) { // if we don't yet have the read header then we need it now
try {
samReader.seek(matchLine.getFileOffset());
firstSAMLineForCurrentRead = samReader.readLine();
} catch (Exception ex) {
Basic.caught(ex);
}
}
}
}
if (wantReadText && (readBlock.getReadHeader() == null || readBlock.getReadHeader().length() == 0) && firstSAMLineForCurrentRead != null)
readBlock.setReadHeader(StringUtils.getFirstWord(firstSAMLineForCurrentRead));
readBlock.setMatchBlocks(matches.toArray(new MatchBlockFromBlast[0]));
return readBlock;
}
@Override
public void close() {
try {
rma3File.close();
if (samReader != null)
samReader.close();
if (fastaReader != null)
fastaReader.close();
} catch (Exception e) {
Basic.caught(e);
}
}
public long getPosition() {
try {
return reader.getPosition();
} catch (IOException e) {
return -1;
}
}
public long getStartMatches() {
return startMatches;
}
public long getEndMatches() {
return endMatches;
}
/**
* get total number of reads
*
* @return total number of reads
*/
@Override
public long getCount() {
return rma3File.getMatchFooter().getNumberOfReads();
}
}
| 9,058 | Java | .java | husonlab/megan-ce | 62 | 21 | 18 | 2016-05-09T10:55:38Z | 2024-02-22T23:23:42Z |
ReadLineRMA3.java | /FileExtraction/Java_unseen/husonlab_megan-ce/src/megan/rma3/ReadLineRMA3.java | /*
* ReadLineRMA3.java Copyright (C) 2024 Daniel H. Huson
*
* (Some files contain contributions from other authors, who are then mentioned separately.)
*
* This program is free software: you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
package megan.rma3;
import megan.data.TextStoragePolicy;
import megan.io.IInputReader;
import megan.io.IOutputWriter;
import java.io.IOException;
/**
* A read
* Created by huson on May 2014
*/
public class ReadLineRMA3 extends BaseRMA3 {
// configuration:
private boolean embedText;
// data stored in a match line:
private long readUid; // unique identifier and location in RMA3 file
private int length;
private int numberOfMatches;
private int readWeight;
private final boolean hasMagnitude;
// alternatives, depending on textStoragePolicy:
private long fileOffset; // location of text in reads file
private String text; // read text (header and sequence)
/**
* constructor
*/
public ReadLineRMA3(String format) {
super("");
setFormatDef(format); // call this to ensure that variables are set
hasMagnitude = format.contains("Weight:Integer");
}
/**
* constructor
*/
public ReadLineRMA3(TextStoragePolicy textStoragePolicy, boolean hasReadWeight) {
super("ReadUid:Long ReadLength:Integer " + (hasReadWeight ? "Weight:Integer " : "") + "NumMatches:Integer");
this.hasMagnitude = hasReadWeight;
this.embedText = (textStoragePolicy == TextStoragePolicy.Embed || textStoragePolicy == TextStoragePolicy.InRMAZ);
setFormatDef(getFormatDef() + (embedText ? " BlastText:String" : " FileOffset:Long"));
}
/**
* read
*
*/
public void read(IInputReader reader, long position) throws IOException {
reader.seek(position);
read(reader);
}
/**
* read
*
*/
public void read(IInputReader reader) throws IOException {
// todo: for efficiency, we assume that the format of match lines is always as follows:
readUid = reader.readLong();
length = reader.readInt();
if (hasMagnitude)
readWeight = reader.readInt();
else
readWeight = 1;
numberOfMatches = reader.readInt();
if (embedText) {
text = reader.readString();
} else {
fileOffset = reader.readLong();
}
}
/**
* write
*
*/
public void write(IOutputWriter writer) throws IOException {
// todo: for efficiency, we assume that the format of match lines is always as follows:
writer.writeLong(readUid);
writer.writeInt(length);
if (hasMagnitude)
writer.writeInt(readWeight);
writer.writeInt(numberOfMatches);
if (embedText)
writer.writeString(text);
else
writer.writeLong(fileOffset);
}
public long getReadUid() {
return readUid;
}
public void setReadUid(long readUid) {
this.readUid = readUid;
}
public int getLength() {
return length;
}
public void setLength(int length) {
this.length = length;
}
public int getReadWeight() {
return readWeight;
}
public void setReadWeight(int readWeight) {
this.readWeight = readWeight;
}
public int getNumberOfMatches() {
return numberOfMatches;
}
public void setNumberOfMatches(int numberOfMatches) {
this.numberOfMatches = numberOfMatches;
}
public long getFileOffset() {
return fileOffset;
}
public void setFileOffset(long fileOffset) {
this.fileOffset = fileOffset;
}
public String getText() {
return text;
}
public void setText(String text) {
this.text = text;
}
public boolean isEmbedText() {
return embedText;
}
public void setFormatDef(String formatDef) {
embedText = formatDef.contains("BlastText:String");
super.setFormatDef(formatDef);
}
}
| 4,631 | Java | .java | husonlab/megan-ce | 62 | 21 | 18 | 2016-05-09T10:55:38Z | 2024-02-22T23:23:42Z |
ClassificationsFooterRMA3.java | /FileExtraction/Java_unseen/husonlab_megan-ce/src/megan/rma3/ClassificationsFooterRMA3.java | /*
* ClassificationsFooterRMA3.java Copyright (C) 2024 Daniel H. Huson
*
* (Some files contain contributions from other authors, who are then mentioned separately.)
*
* This program is free software: you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
package megan.rma3;
import jloda.util.Pair;
import megan.core.ClassificationType;
import megan.io.IInputReader;
import megan.io.IOutputWriter;
import java.io.IOException;
import java.util.ArrayList;
import java.util.EnumMap;
import java.util.List;
/**
* Format of a match in an RMA3 file
* Created by huson on 5/16/14.
*/
public class ClassificationsFooterRMA3 extends BaseRMA3 {
private final String defaultFormat;
private boolean doKegg = false;
private boolean doSeed = false;
private boolean doCog = false;
private boolean doPfam = false;
private String classificationBlockFormat = ClassificationBlockRMA3.FORMAT;
private final EnumMap<ClassificationType, Long> startLocation = new EnumMap<>(ClassificationType.class);
private final EnumMap<ClassificationType, Long> endLocation = new EnumMap<>(ClassificationType.class);
/**
* constructor
*/
public ClassificationsFooterRMA3() {
super("ClassificationBlockFormat:String TaxStart:Long TaxEnd:Long");
defaultFormat = getFormatDef();
}
/**
* constructor
*/
public ClassificationsFooterRMA3(boolean doKegg, boolean doSeed, boolean doCog, boolean doPfam) {
super("TaxStart:Long TaxEnd:Long");
defaultFormat = getFormatDef();
if (doKegg)
setDo(ClassificationType.KEGG);
if (doSeed)
setDo(ClassificationType.SEED);
if (doCog)
setDo(ClassificationType.COG);
if (doCog)
setDo(ClassificationType.PFAM);
}
@Override
public void read(IInputReader reader, long startPos) throws IOException {
reader.seek(startPos);
setFormatDef(reader.readString());
doKegg = doSeed = doCog = doPfam = false;
FormatDefinition formatDefinition = FormatDefinition.fromString(getFormatDef());
for (Pair<String, FormatDefinition.Type> pair : formatDefinition.getList()) {
{
switch (pair.getFirst()) {
case "ClassificationBlockFormat" -> setClassificationBlockFormat(reader.readString());
case "TaxStart" -> setStart(ClassificationType.Taxonomy, reader.readLong());
case "TaxEnd" -> setEnd(ClassificationType.Taxonomy, reader.readLong());
case "KeggStart" -> {
setStart(ClassificationType.KEGG, reader.readLong());
doKegg = true;
}
case "KeggEnd" -> setEnd(ClassificationType.KEGG, reader.readLong());
case "SeedStart" -> {
setStart(ClassificationType.SEED, reader.readLong());
doSeed = true;
}
case "SeedEnd" -> setEnd(ClassificationType.SEED, reader.readLong());
case "CogStart" -> {
setStart(ClassificationType.COG, reader.readLong());
doCog = true;
}
case "CogEnd" -> setEnd(ClassificationType.COG, reader.readLong());
case "PfamStart" -> {
setStart(ClassificationType.PFAM, reader.readLong());
doPfam = true;
}
case "PfamEnd" -> setEnd(ClassificationType.PFAM, reader.readLong());
}
}
}
}
@Override
public void write(IOutputWriter writer) throws IOException {
writer.writeString(getFormatDef());
FormatDefinition formatDefinition = FormatDefinition.fromString(getFormatDef());
formatDefinition.startWrite();
for (Pair<String, FormatDefinition.Type> pair : formatDefinition.getList()) {
{
switch (pair.getFirst()) {
case "ClassificationBlockFormat" -> formatDefinition.write(writer, "ClassificationBlockFormat", getClassificationBlockFormat());
case "TaxStart" -> formatDefinition.write(writer, "TaxStart", getStart(ClassificationType.Taxonomy));
case "TaxEnd" -> formatDefinition.write(writer, "TaxEnd", getEnd(ClassificationType.Taxonomy));
case "KeggStart" -> formatDefinition.write(writer, "KeggStart", getStart(ClassificationType.KEGG));
case "KeggEnd" -> formatDefinition.write(writer, "KeggEnd", getEnd(ClassificationType.KEGG));
case "SeedStart" -> formatDefinition.write(writer, "SeedStart", getStart(ClassificationType.SEED));
case "SeedEnd" -> formatDefinition.write(writer, "SeedEnd", getEnd(ClassificationType.SEED));
case "CogStart" -> formatDefinition.write(writer, "CogStart", getStart(ClassificationType.COG));
case "CogEnd" -> formatDefinition.write(writer, "CogEnd", getEnd(ClassificationType.COG));
case "PfamStart" -> formatDefinition.write(writer, "PfamStart", getStart(ClassificationType.PFAM));
case "PfamEnd" -> formatDefinition.write(writer, "PfamEnd", getEnd(ClassificationType.PFAM));
}
}
}
formatDefinition.finishWrite();
}
private void updateFormat() {
setFormatDef(defaultFormat);
if (doKegg)
setFormatDef(getFormatDef() + " KeggStart:Long KeggEnd:Long");
if (doSeed)
setFormatDef(getFormatDef() + " SeedStart:Long SeedEnd:Long");
if (doCog)
setFormatDef(getFormatDef() + " CogStart:Long CogEnd:Long");
if (doPfam)
setFormatDef(getFormatDef() + " PfamStart:Long PfamEnd:Long");
}
public long getStart(ClassificationType type) {
Long value = startLocation.get(type);
return value != null ? value : 0L;
}
public void setStart(ClassificationType type, long location) {
startLocation.put(type, location);
}
private long getEnd(ClassificationType type) {
Long value = endLocation.get(type);
return value != null ? value : 0L;
}
public void setEnd(ClassificationType type, long location) {
endLocation.put(type, location);
}
public List<String> getAllNames() {
ArrayList<String> names = new ArrayList<>(4);
names.add(ClassificationType.Taxonomy.toString());
if (doKegg)
names.add("KEGG");
if (doSeed)
names.add("SEED");
if (doCog)
names.add("EGGNOG");
if (doPfam)
names.add("PFAM");
return names;
}
public void clear() {
startLocation.clear();
endLocation.clear();
}
public void setDo(ClassificationType classificationType) {
switch (classificationType) {
case KEGG:
doKegg = true;
break;
case SEED:
doSeed = true;
break;
case COG:
doCog = true;
break;
case PFAM:
doPfam = true;
break;
default:
return;
}
updateFormat();
}
public boolean isDo(ClassificationType classificationType) {
return switch (classificationType) {
case KEGG -> doKegg;
case SEED -> doSeed;
case COG -> doCog;
case PFAM -> doPfam;
case Taxonomy -> true;
default -> false;
};
}
private String getClassificationBlockFormat() {
return classificationBlockFormat;
}
private void setClassificationBlockFormat(String classificationBlockFormat) {
this.classificationBlockFormat = classificationBlockFormat;
}
}
| 8,578 | Java | .java | husonlab/megan-ce | 62 | 21 | 18 | 2016-05-09T10:55:38Z | 2024-02-22T23:23:42Z |
BaseRMA3.java | /FileExtraction/Java_unseen/husonlab_megan-ce/src/megan/rma3/BaseRMA3.java | /*
* BaseRMA3.java Copyright (C) 2024 Daniel H. Huson
*
* (Some files contain contributions from other authors, who are then mentioned separately.)
*
* This program is free software: you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
package megan.rma3;
import megan.io.IInputReader;
import megan.io.IOutputWriter;
import megan.io.OutputWriterHumanReadable;
import java.io.IOException;
import java.io.StringWriter;
/**
* Base class for data blocks used by RMA3 format
* Created by huson on 5/16/14.
*/
public abstract class BaseRMA3 {
private String formatDef;
/**
* constructor
*
*/
public BaseRMA3(String formatDef) {
setFormatDef(formatDef);
}
public String toString() {
final IOutputWriter w = new OutputWriterHumanReadable(new StringWriter());
try {
write(w);
} catch (IOException ignored) {
}
return w.toString();
}
abstract public void read(IInputReader reader, long startPos) throws IOException;
protected abstract void write(IOutputWriter writer) throws IOException;
String getFormatDef() {
return formatDef;
}
void setFormatDef(String formatDef) {
this.formatDef = formatDef;
}
}
| 1,808 | Java | .java | husonlab/megan-ce | 62 | 21 | 18 | 2016-05-09T10:55:38Z | 2024-02-22T23:23:42Z |
RMA3FileFilter.java | /FileExtraction/Java_unseen/husonlab_megan-ce/src/megan/rma3/RMA3FileFilter.java | /*
* RMA3FileFilter.java Copyright (C) 2024 Daniel H. Huson
*
* (Some files contain contributions from other authors, who are then mentioned separately.)
*
* This program is free software: you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
package megan.rma3;
import megan.io.InputReader;
import megan.rma2.RMA2File;
import java.io.File;
import java.io.FileFilter;
import java.io.IOException;
/**
* RMA3 file filter
* Created by huson on 10/3/14.
*/
public class RMA3FileFilter implements FileFilter {
private static RMA3FileFilter instance;
/**
* gets an instance
*
* @return instance
*/
public static RMA3FileFilter getInstance() {
if (instance == null)
instance = new RMA3FileFilter();
return instance;
}
/**
* Tests whether or not the specified abstract pathname should be
* included in a pathname list.
*
* @param pathname The abstract pathname to be tested
* @return <code>true</code> if and only if <code>pathname</code>
* should be included
*/
@Override
public boolean accept(File pathname) {
try {
try (InputReader r = new InputReader(pathname, null, null, true)) {
return r.readInt() == RMA2File.MAGIC_NUMBER && r.readInt() == 3;
}
} catch (IOException e) {
return false;
}
}
}
| 1,959 | Java | .java | husonlab/megan-ce | 62 | 21 | 18 | 2016-05-09T10:55:38Z | 2024-02-22T23:23:42Z |
RMA3FileModifier.java | /FileExtraction/Java_unseen/husonlab_megan-ce/src/megan/rma3/RMA3FileModifier.java | /*
* RMA3FileModifier.java Copyright (C) 2024 Daniel H. Huson
*
* (Some files contain contributions from other authors, who are then mentioned separately.)
*
* This program is free software: you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
package megan.rma3;
import jloda.util.ListOfLongs;
import megan.core.ClassificationType;
import megan.io.InputOutputReaderWriter;
import java.io.File;
import java.io.IOException;
import java.util.Map;
/**
* class used to rescan the classifications in an RMA3 file
*/
public class RMA3FileModifier extends RMA3File {
private InputOutputReaderWriter io;
/**
* construct an RMA3 modifier and read in RMA3 data
*
*/
public RMA3FileModifier(String fileName) throws IOException {
super(fileName, READ_WRITE);
close(); // have read the file, now close the readerWriter
}
/**
* start the modification process
*
*/
public void startModification() throws IOException {
io = new InputOutputReaderWriter(new File(getFileName()), READ_WRITE);
getClassificationsFooter().clear();
io.seek(getFileFooter().getClassificationsStart());
io.setLength(io.getPosition());
}
/**
* rescan a specific classification
*
*/
public void updateClassification(ClassificationType classificationType, Map<Integer, ListOfLongs> classId2locations) throws IOException {
getClassificationsFooter().setStart(classificationType, io.getPosition());
getClassificationsFooter().setDo(classificationType);
final ClassificationBlockRMA3 classificationBlock = new ClassificationBlockRMA3(classificationType);
for (Integer classId : classId2locations.keySet()) {
classificationBlock.setSum(classId, classId2locations.get(classId).size());
}
classificationBlock.write(io, classId2locations);
getClassificationsFooter().setEnd(classificationType, io.getPosition());
}
/**
* finish the rescan process
*
*/
public void finishModification() throws IOException {
getFileFooter().setClassificationsFooter(io.getPosition());
getClassificationsFooter().write(io);
getFileFooter().setAuxStart(io.getPosition());
getFileFooter().setAuxFooter(io.getPosition());
getAuxBlocksFooter().write(io);
getFileFooter().setFileFooter(io.getPosition());
getFileFooter().write(io);
close();
}
/**
* close the readerWriter/writer, if it is open
*
*/
public void close() throws IOException {
if (io != null) {
try {
io.close();
} finally {
io = null;
}
}
}
/**
* save the aux data to the rma3 file
*
*/
public void saveAuxData(Map<String, byte[]> label2data) throws IOException {
final FileFooterRMA3 fileFooter = getFileFooter();
close();
io = new InputOutputReaderWriter(new File(getFileName()), READ_WRITE);
io.setLength(fileFooter.getAuxStart());
io.seek(fileFooter.getAuxStart());
fileFooter.setAuxStart(io.getPosition());
getAuxBlocksFooter().writeAuxBlocks(io, label2data);
fileFooter.setAuxFooter(io.getPosition());
getAuxBlocksFooter().write(io);
fileFooter.setFileFooter(io.getPosition());
fileFooter.write(io);
close();
}
}
| 4,006 | Java | .java | husonlab/megan-ce | 62 | 21 | 18 | 2016-05-09T10:55:38Z | 2024-02-22T23:23:42Z |
RMAFileFilter.java | /FileExtraction/Java_unseen/husonlab_megan-ce/src/megan/rma3/RMAFileFilter.java | /*
* RMAFileFilter.java Copyright (C) 2024 Daniel H. Huson
*
* (Some files contain contributions from other authors, who are then mentioned separately.)
*
* This program is free software: you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
package megan.rma3;
import jloda.swing.util.FileFilterBase;
import jloda.util.FileUtils;
import megan.io.InputReader;
import megan.rma2.RMA2File;
import java.io.File;
import java.io.FilenameFilter;
/**
* RMA file filter
* Created by huson on 10/3/14.
*/
public class RMAFileFilter extends FileFilterBase implements FilenameFilter {
private static RMAFileFilter instance;
/**
* gets an instance
*
* @return instance
*/
public static RMAFileFilter getInstance() {
if (instance == null)
instance = new RMAFileFilter();
return instance;
}
/**
* Tests whether or not the specified abstract pathname should be
* included in a pathname list.
*
* @param pathname The abstract pathname to be tested
* @return <code>true</code> if and only if <code>pathname</code>
* should be included
*/
@Override
public boolean accept(File pathname) {
String suffix = FileUtils.getFileSuffix(pathname.getName()).toLowerCase();
if (suffix.startsWith(".rma")) {
try (InputReader r = new InputReader(pathname, null, null, true)) {
int magicNumber = r.readInt();
int version = r.readInt();
return magicNumber == RMA2File.MAGIC_NUMBER && (version == 2 || version == 3);
} catch (Exception ex) {
// silently ignore
}
}
return false;
}
/**
* @return description of file matching the filter
*/
public String getBriefDescription() {
return "MEGAN RMA Files";
}
}
| 2,421 | Java | .java | husonlab/megan-ce | 62 | 21 | 18 | 2016-05-09T10:55:38Z | 2024-02-22T23:23:42Z |
ClassificationBlockRMA3.java | /FileExtraction/Java_unseen/husonlab_megan-ce/src/megan/rma3/ClassificationBlockRMA3.java | /*
* ClassificationBlockRMA3.java Copyright (C) 2024 Daniel H. Huson
*
* (Some files contain contributions from other authors, who are then mentioned separately.)
*
* This program is free software: you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
package megan.rma3;
import jloda.util.ListOfLongs;
import megan.core.ClassificationType;
import megan.data.IClassificationBlock;
import megan.io.IInputReader;
import megan.io.IOutputWriter;
import megan.io.InputReader;
import megan.io.OutputWriterHumanReadable;
import java.io.IOException;
import java.io.StringWriter;
import java.util.HashMap;
import java.util.Map;
import java.util.Set;
/**
* implements a classification block
* Created by huson on 5/16/14.
*/
public class ClassificationBlockRMA3 implements IClassificationBlock {
public final static String FORMAT = "NumClasses:Integer [ClassId:Integer Count:Integer [Location:Long]*]*";
// number of classes, the for each class: class-id, count and then all locations in matches section
private final Map<Integer, Integer> map = new HashMap<>();
private ClassificationType classificationType;
public ClassificationBlockRMA3(ClassificationType classificationType) {
this.classificationType = classificationType;
}
@Override
public int getSum(Integer key) {
Integer sum = map.get(key);
return sum != null ? sum : 0;
}
@Override
public void setWeightedSum(Integer key, float num) {
throw new RuntimeException("Not implemented");
}
@Override
public float getWeightedSum(Integer key) {
return map.get(key);
}
@Override
public void setSum(Integer key, int num) {
map.put(key, num);
}
@Override
public String getName() {
return classificationType.toString();
}
@Override
public void setName(String name) {
classificationType = ClassificationType.valueOf(name);
}
@Override
public Set<Integer> getKeySet() {
return map.keySet();
}
/**
* write to file
*
*/
public void write(IOutputWriter writer, Map<Integer, ListOfLongs> classId2locations) throws IOException {
writer.writeInt(map.size());
for (Integer key : map.keySet()) {
writer.writeInt(key); // class id
final Integer sum = map.get(key);
writer.writeInt(sum); // count
if (classId2locations != null) {
final ListOfLongs list = classId2locations.get(key);
if (list.size() != sum)
throw new IOException("Wrong number of locations: " + list.size() + ", should be: " + sum);
for (int i = 0; i < list.size(); i++)
writer.writeLong(list.get(i));
}
}
}
/**
* reads the named classification block
*
*/
public void read(ClassificationsFooterRMA3 classificationsFooter, IInputReader reader) throws IOException {
map.clear();
long start = classificationsFooter.getStart(classificationType);
if (start != 0) {
reader.seek(start);
final int numberOfClasses = reader.readInt();
for (int i = 0; i < numberOfClasses; i++) {
int classId = reader.readInt();
int sum = reader.readInt();
for (int z = 0; z < 8; z++)
reader.skipBytes(sum); // skip all locations, 8 bytes each
map.put(classId, sum);
}
}
map.size();
}
/**
* reads the named classification block
*
* @return size
*/
public int read(ClassificationsFooterRMA3 classificationsFooter, InputReader reader, int classId) throws IOException {
map.clear();
long start = classificationsFooter.getStart(classificationType);
if (start != 0) {
final int numberOfClasses = reader.readInt();
for (int i = 0; i < numberOfClasses; i++) {
int currentId = reader.readInt();
int sum = reader.readInt();
reader.skipBytes(8 * sum); // skip all locations
if (currentId == classId) {
map.put(currentId, sum);
break;
}
}
}
return map.size();
}
/**
* read all locations for a given class and adds the to list
*
*/
public void readLocations(ClassificationsFooterRMA3 classificationsFooter, IInputReader reader, int classId, ListOfLongs list) throws IOException {
long start = classificationsFooter.getStart(classificationType);
if (start != 0) {
reader.seek(start);
final int numberOfClasses = reader.readInt();
for (int i = 0; i < numberOfClasses; i++) {
int currentId = reader.readInt();
int sum = reader.readInt();
if (currentId == classId) {
for (int z = 0; z < sum; z++) {
list.add(reader.readLong());
}
} else
reader.skipBytes(8 * sum); // skip all locations
}
}
list.size();
}
/**
* human readable representation
*
* @return string
*/
public String toString() {
final IOutputWriter w = new OutputWriterHumanReadable(new StringWriter());
try {
// w.writeString(classificationType.toString()+":\n");
write(w, null);
} catch (IOException ignored) {
}
return w.toString();
}
}
| 6,187 | Java | .java | husonlab/megan-ce | 62 | 21 | 18 | 2016-05-09T10:55:38Z | 2024-02-22T23:23:42Z |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.