conflict_resolution
stringlengths
27
16k
<<<<<<< public IntegerVertex pluck(int... index) { return new IntegerPluckVertex(this, index); } ======= public IntegerVertex slice(int dimension, int index) { return new IntegerSliceVertex(this, dimension, index); } >>>>>>> public IntegerVertex slice(int dimension, int index) { return new IntegerSliceVertex(this, dimension, index); } public IntegerVertex pluck(int... index) { return new IntegerPluckVertex(this, index); }
<<<<<<< import java.util.HashMap; import java.util.Map; import io.improbable.keanu.annotation.ExportVertexToPythonBindings; ======= >>>>>>> import io.improbable.keanu.annotation.ExportVertexToPythonBindings;
<<<<<<< public class PoissonVertex extends IntegerVertex implements ProbabilisticInteger, SamplableWithManyScalars<IntegerTensor> { ======= import java.util.Collections; import java.util.Map; import java.util.Set; import static io.improbable.keanu.tensor.TensorShapeValidation.checkTensorsMatchNonScalarShapeOrAreScalar; public class PoissonVertex extends IntegerVertex implements ProbabilisticInteger { >>>>>>> import java.util.Collections; import java.util.Map; import java.util.Set; import static io.improbable.keanu.tensor.TensorShapeValidation.checkTensorsMatchNonScalarShapeOrAreScalar; public class PoissonVertex extends IntegerVertex implements ProbabilisticInteger, SamplableWithManyScalars<IntegerTensor> {
<<<<<<< @Override public BooleanTensor calculate() { return op(a.getValue(), b.getValue()); } protected abstract BooleanTensor op(A a, B b); ======= protected abstract BooleanTensor op(A l, B r); >>>>>>> @Override public BooleanTensor calculate() { return op(a.getValue(), b.getValue()); } protected abstract BooleanTensor op(A l, B r);
<<<<<<< import io.improbable.keanu.vertices.bool.BooleanVertex; import io.improbable.keanu.vertices.dbl.KeanuRandom; ======= >>>>>>> import io.improbable.keanu.vertices.bool.BooleanVertex;
<<<<<<< public DoubleVertex toGreaterThanMask(DoubleVertex rhs) { return new DoubleGreaterThanMaskVertex(this, rhs); } public DoubleVertex toGreaterThanMask(double rhs) { return new DoubleGreaterThanMaskVertex(this, new ConstantDoubleVertex(rhs)); } public <T extends NumberTensor> BoolVertex greaterThanOrEqualTo(Vertex<T> rhs) { ======= public <T extends NumberTensor> BooleanVertex greaterThanOrEqualTo(Vertex<T> rhs) { >>>>>>> public DoubleVertex toGreaterThanMask(DoubleVertex rhs) { return new DoubleGreaterThanMaskVertex(this, rhs); } public DoubleVertex toGreaterThanMask(double rhs) { return new DoubleGreaterThanMaskVertex(this, new ConstantDoubleVertex(rhs)); } public <T extends NumberTensor> BooleanVertex greaterThanOrEqualTo(Vertex<T> rhs) { <<<<<<< public DoubleVertex toGreaterThanOrEqualToMask(DoubleVertex rhs) { return new DoubleGreaterThanOrEqualToMaskVertex(this, rhs); } public DoubleVertex toGreaterThanOrEqualToMask(double rhs) { return new DoubleGreaterThanOrEqualToMaskVertex(this, new ConstantDoubleVertex(rhs)); } public <T extends NumberTensor> BoolVertex lessThan(Vertex<T> rhs) { ======= public <T extends NumberTensor> BooleanVertex lessThan(Vertex<T> rhs) { >>>>>>> public DoubleVertex toGreaterThanOrEqualToMask(DoubleVertex rhs) { return new DoubleGreaterThanOrEqualToMaskVertex(this, rhs); } public DoubleVertex toGreaterThanOrEqualToMask(double rhs) { return new DoubleGreaterThanOrEqualToMaskVertex(this, new ConstantDoubleVertex(rhs)); } public <T extends NumberTensor> BooleanVertex lessThan(Vertex<T> rhs) { <<<<<<< public DoubleVertex toLessThanMask(DoubleVertex rhs) { return new DoubleLessThanMaskVertex(this, rhs); } public DoubleVertex toLessThanMask(double rhs) { return new DoubleLessThanMaskVertex(this, new ConstantDoubleVertex(rhs)); } public <T extends NumberTensor> BoolVertex lessThanOrEqualTo(Vertex<T> rhs) { ======= public <T extends NumberTensor> BooleanVertex lessThanOrEqualTo(Vertex<T> rhs) { >>>>>>> public DoubleVertex toLessThanMask(DoubleVertex rhs) { return new DoubleLessThanMaskVertex(this, rhs); } public DoubleVertex toLessThanMask(double rhs) { return new DoubleLessThanMaskVertex(this, new ConstantDoubleVertex(rhs)); } public <T extends NumberTensor> BooleanVertex lessThanOrEqualTo(Vertex<T> rhs) {
<<<<<<< public static BayesNetDoubleAsContinuous createSimpleGaussian(double mu, double sigma, Random random) { GaussianVertex A = new GaussianVertex(mu, sigma, random); ======= public static BayesNet createSimpleGaussian(double mu, double sigma, KeanuRandom random) { GaussianVertex A = new GaussianVertex(mu, sigma); >>>>>>> public static BayesNetDoubleAsContinuous createSimpleGaussian(double mu, double sigma, KeanuRandom random) { GaussianVertex A = new GaussianVertex(mu, sigma); <<<<<<< public static BayesNetDoubleAsContinuous createSumOfGaussianDistribution(double mu, double sigma, double observedSum, Random random) { ======= public static BayesNet createSumOfGaussianDistribution(double mu, double sigma, double observedSum, KeanuRandom random) { >>>>>>> public static BayesNetDoubleAsContinuous createSumOfGaussianDistribution(double mu, double sigma, double observedSum, KeanuRandom random) { <<<<<<< public static BayesNetDoubleAsContinuous create2DDonutDistribution(Random random) { DoubleVertex A = new GaussianVertex(0, 1, random); DoubleVertex B = new GaussianVertex(0, 1, random); ======= public static BayesNet create2DDonutDistribution(KeanuRandom random) { DoubleVertex A = new GaussianVertex(0, 1); DoubleVertex B = new GaussianVertex(0, 1); >>>>>>> public static BayesNetDoubleAsContinuous create2DDonutDistribution(KeanuRandom random) { DoubleVertex A = new GaussianVertex(0, 1); DoubleVertex B = new GaussianVertex(0, 1);
<<<<<<< @Override ======= @SaveParentVertex(LEFT_NAME) >>>>>>> @Override @SaveParentVertex(LEFT_NAME) <<<<<<< @Override ======= @SaveParentVertex(RIGHT_NAME) >>>>>>> @Override @SaveParentVertex(RIGHT_NAME)
<<<<<<< import static io.improbable.keanu.distributions.hyperparam.Diffs.MU; import static io.improbable.keanu.distributions.hyperparam.Diffs.SIGMA; import static io.improbable.keanu.distributions.hyperparam.Diffs.X; import static io.improbable.keanu.tensor.TensorShapeValidation.checkHasSingleNonScalarShapeOrAllScalar; import static io.improbable.keanu.tensor.TensorShapeValidation.checkTensorsMatchNonScalarShapeOrAreScalar; import java.util.HashMap; import java.util.Map; import java.util.Set; import io.improbable.keanu.annotation.ExportVertexToPythonBindings; ======= >>>>>>> import io.improbable.keanu.annotation.ExportVertexToPythonBindings;
<<<<<<< public TriangularVertex(@LoadVertexParam(X_MIN_NAME) DoubleVertex xMin, @LoadVertexParam(X_MAX_NAME) DoubleVertex xMax, @LoadVertexParam(C_NAME) DoubleVertex c) { this(checkHasSingleNonScalarShapeOrAllScalar(xMin.getShape(), xMax.getShape(), c.getShape()), xMin, xMax, c); ======= public TriangularVertex(@LoadParentVertex(X_MIN_NAME) DoubleVertex xMin, @LoadParentVertex(X_MAX_NAME) DoubleVertex xMax, @LoadParentVertex(C_NAME) DoubleVertex c) { this(checkHasOneNonLengthOneShapeOrAllLengthOne(xMin.getShape(), xMax.getShape(), c.getShape()), xMin, xMax, c); >>>>>>> public TriangularVertex(@LoadVertexParam(X_MIN_NAME) DoubleVertex xMin, @LoadVertexParam(X_MAX_NAME) DoubleVertex xMax, @LoadVertexParam(C_NAME) DoubleVertex c) { this(checkHasOneNonLengthOneShapeOrAllLengthOne(xMin.getShape(), xMax.getShape(), c.getShape()), xMin, xMax, c);
<<<<<<< protected DualNumber calculateDualNumber(Map<Vertex<?>, DualNumber> dualNumbers) { ======= public DualNumber calculateDualNumber(Map<Vertex, DualNumber> dualNumbers) { >>>>>>> public DualNumber calculateDualNumber(Map<Vertex<?>, DualNumber> dualNumbers) {
<<<<<<< public final Map<String, DoubleTensor> dLogProbAtValue() { ======= public Map<Long, DoubleTensor> dLogProbAtValue() { >>>>>>> public final Map<Long, DoubleTensor> dLogProbAtValue() {
<<<<<<< protected DualNumber calculateDualNumber(Map<Vertex<?>, DualNumber> dualNumbers) { ======= public DualNumber calculateDualNumber(Map<Vertex, DualNumber> dualNumbers) { >>>>>>> public DualNumber calculateDualNumber(Map<Vertex<?>, DualNumber> dualNumbers) {
<<<<<<< public MinVertex(@LoadVertexParam(LEFT_NAME) DoubleVertex left, @LoadVertexParam(RIGHT_NAME) DoubleVertex right) { super(left.getShape(), left.lessThanOrEqualTo(right), left, right); ======= public MinVertex(@LoadParentVertex(LEFT_NAME) DoubleVertex left, @LoadParentVertex(RIGHT_NAME) DoubleVertex right) { super(left.lessThanOrEqualTo(right), left, right); >>>>>>> public MinVertex(@LoadVertexParam(LEFT_NAME) DoubleVertex left, @LoadVertexParam(RIGHT_NAME) DoubleVertex right) { super(left.lessThanOrEqualTo(right), left, right);
<<<<<<< import static io.improbable.keanu.tensor.TensorShapeValidation.checkHasOneNonLengthOneShapeOrAllLengthOne; ======= @DisplayInformationForOutput(displayName = "/") >>>>>>> import static io.improbable.keanu.tensor.TensorShapeValidation.checkHasOneNonLengthOneShapeOrAllLengthOne; @DisplayInformationForOutput(displayName = "/")
<<<<<<< private Beta() { } /** * Computer Generation of Statistical Distributions * by Richard Saucier * ARL-TR-2168 March 2000 * 5.1.2 page 14 */ ======= >>>>>>> private Beta() { } /** * Computer Generation of Statistical Distributions * by Richard Saucier * ARL-TR-2168 March 2000 * 5.1.2 page 14 */
<<<<<<< import io.improbable.keanu.util.ProgressBar; ======= import io.improbable.keanu.util.status.StatusBar; import io.improbable.keanu.vertices.ProbabilityCalculator; import io.improbable.keanu.vertices.Vertex; import io.improbable.keanu.vertices.VertexId; >>>>>>> import io.improbable.keanu.util.status.StatusBar; <<<<<<< @Override public NetworkSamplesGenerator generatePosteriorSamples(final ProbabilisticModel model, final List<? extends Variable> fromVariables) { Preconditions.checkArgument(model instanceof ProbabilisticModelWithGradient, "NUTS requires a model on which gradients can be calculated."); return new NetworkSamplesGenerator(setupSampler((ProbabilisticModelWithGradient) model, fromVariables), ProgressBar::new); ======= return new NetworkSamplesGenerator(setupSampler(bayesNet, fromVertices), StatusBar::new); >>>>>>> @Override public NetworkSamplesGenerator generatePosteriorSamples(final ProbabilisticModel model, final List<? extends Variable> fromVariables) { Preconditions.checkArgument(model instanceof ProbabilisticModelWithGradient, "NUTS requires a model on which gradients can be calculated."); return new NetworkSamplesGenerator(setupSampler((ProbabilisticModelWithGradient) model, fromVariables), StatusBar::new);
<<<<<<< public LogNormalVertex(DoubleVertex mu, DoubleVertex sigma) { this(checkHasOneNonLengthOneShapeOrAllLengthOne(mu.getShape(), sigma.getShape()), mu, sigma); ======= @ExportVertexToPythonBindings public LogNormalVertex(@LoadParentVertex(MU_NAME) DoubleVertex mu, @LoadParentVertex(SIGMA_NAME) DoubleVertex sigma) { this(checkHasSingleNonScalarShapeOrAllScalar(mu.getShape(), sigma.getShape()), mu, sigma); >>>>>>> @ExportVertexToPythonBindings public LogNormalVertex(@LoadParentVertex(MU_NAME) DoubleVertex mu, @LoadParentVertex(SIGMA_NAME) DoubleVertex sigma) { this(checkHasOneNonLengthOneShapeOrAllLengthOne(mu.getShape(), sigma.getShape()), mu, sigma);
<<<<<<< public void fit(INPUT input, OUTPUT output) { modelGraph.observeValues(input, output); KeanuOptimizer.Gradient.of(modelGraph.getBayesianNetwork()).maxLikelihood(); ======= public void fit(ModelGraph modelGraph) { GradientOptimizer.of(modelGraph.getBayesianNetwork()).maxLikelihood(); >>>>>>> public void fit(ModelGraph modelGraph) { KeanuOptimizer.Gradient.of(modelGraph.getBayesianNetwork()).maxLikelihood();
<<<<<<< import io.improbable.keanu.distributions.dual.Diffs; ======= import io.improbable.keanu.tensor.TensorShape; >>>>>>> import io.improbable.keanu.distributions.dual.Diffs; import io.improbable.keanu.tensor.TensorShape; <<<<<<< ======= import java.util.Map; import static io.improbable.keanu.tensor.TensorShape.shapeToDesiredRankByPrependingOnes; import static io.improbable.keanu.tensor.TensorShapeValidation.checkHasSingleNonScalarShapeOrAllScalar; import static io.improbable.keanu.tensor.TensorShapeValidation.checkTensorsMatchNonScalarShapeOrAreScalar; >>>>>>> import static io.improbable.keanu.tensor.TensorShape.shapeToDesiredRankByPrependingOnes; import static io.improbable.keanu.tensor.TensorShapeValidation.checkHasSingleNonScalarShapeOrAllScalar; import static io.improbable.keanu.tensor.TensorShapeValidation.checkTensorsMatchNonScalarShapeOrAreScalar; <<<<<<< Diffs dlnP = Logistic.withParameters(mu.getValue(), s.getValue()).dLogProb(value); return convertDualNumbersToDiff(dlnP.get(MU).getValue(), dlnP.get(S).getValue(), dlnP.get(X).getValue()); ======= Logistic.DiffLogP dlnP = Logistic.dlnPdf(mu.getValue(), s.getValue(), value); return convertDualNumbersToDiff(dlnP.dLogPdmu, dlnP.dLogPds, dlnP.dLogPdx); >>>>>>> Diffs dlnP = Logistic.withParameters(mu.getValue(), s.getValue()).dLogProb(value); return convertDualNumbersToDiff(dlnP.get(MU).getValue(), dlnP.get(S).getValue(), dlnP.get(X).getValue());
<<<<<<< ProbabilisticWithGradientGraph gradientGraph; if (USE_TENSORFLOW) { gradientGraph = TensorflowGraphConverter.convertWithGradient(bayesNet); } else { bayesNet.cascadeObservations(); gradientGraph = KeanuGraphConverter.convertWithGradient(bayesNet); } final List<String> latentVariables = gradientGraph.getLatentVariables(); final List<String> sampleFrom = sampleFromVertices.stream() .map(Vertex::getUniqueStringReference) .collect(Collectors.toList()); ======= Preconditions.checkArgument(!sampleFromVertices.isEmpty(), "List of vertices to sample from is empty"); bayesNet.cascadeObservations(); >>>>>>> Preconditions.checkArgument(!sampleFromVertices.isEmpty(), "List of vertices to sample from is empty"); ProbabilisticWithGradientGraph gradientGraph; if (USE_TENSORFLOW) { gradientGraph = TensorflowGraphConverter.convertWithGradient(bayesNet); } else { bayesNet.cascadeObservations(); gradientGraph = KeanuGraphConverter.convertWithGradient(bayesNet); } final List<String> latentVariables = gradientGraph.getLatentVariables(); final List<String> sampleFrom = sampleFromVertices.stream() .map(Vertex::getUniqueStringReference) .collect(Collectors.toList());
<<<<<<< import io.improbable.keanu.KeanuRandom; import io.improbable.keanu.tensor.bool.BooleanTensor; ======= >>>>>>> import io.improbable.keanu.tensor.bool.BooleanTensor;
<<<<<<< public InverseGammaVertex(DoubleVertex alpha, DoubleVertex beta) { this(checkHasOneNonLengthOneShapeOrAllLengthOne(alpha.getShape(), beta.getShape()), alpha, beta); ======= @ExportVertexToPythonBindings public InverseGammaVertex(@LoadParentVertex(ALPHA_NAME) DoubleVertex alpha, @LoadParentVertex(BETA_NAME) DoubleVertex beta) { this(checkHasSingleNonScalarShapeOrAllScalar(alpha.getShape(), beta.getShape()), alpha, beta); >>>>>>> @ExportVertexToPythonBindings public InverseGammaVertex(@LoadParentVertex(ALPHA_NAME) DoubleVertex alpha, @LoadParentVertex(BETA_NAME) DoubleVertex beta) { this(checkHasOneNonLengthOneShapeOrAllLengthOne(alpha.getShape(), beta.getShape()), alpha, beta);
<<<<<<< import io.improbable.keanu.algorithms.variational.optimizer.*; ======= import io.improbable.keanu.algorithms.ProbabilisticModelWithGradient; >>>>>>> import io.improbable.keanu.algorithms.ProbabilisticModelWithGradient; import io.improbable.keanu.algorithms.VariableReference; import io.improbable.keanu.algorithms.variational.optimizer.FitnessFunction; import io.improbable.keanu.algorithms.variational.optimizer.FitnessFunctionGradient; import io.improbable.keanu.algorithms.variational.optimizer.OptimizedResult; <<<<<<< import io.improbable.keanu.algorithms.variational.optimizer.ProbabilisticWithGradientGraph; import io.improbable.keanu.algorithms.variational.optimizer.nongradient.LogLikelihoodFitnessFunction; import io.improbable.keanu.algorithms.variational.optimizer.nongradient.LogProbFitnessFunction; import io.improbable.keanu.tensor.dbl.DoubleTensor; ======= >>>>>>> import io.improbable.keanu.algorithms.variational.optimizer.nongradient.LogLikelihoodFitnessFunction; import io.improbable.keanu.algorithms.variational.optimizer.nongradient.LogProbFitnessFunction; import io.improbable.keanu.tensor.dbl.DoubleTensor; <<<<<<< public OptimizedResult maxAPosteriori() { return optimize(probabilisticWithGradientGraph, false); ======= public double maxAPosteriori() { assertHasLatents(); FitnessFunctionWithGradient fitnessFunction = new FitnessFunctionWithGradient( probabilisticModel, false, this::handleGradientCalculation, this::handleFitnessCalculation ); return optimize(fitnessFunction); >>>>>>> public OptimizedResult maxAPosteriori() { return optimize(probabilisticModelWithGradient, false); <<<<<<< FitnessFunction fitnessFunction; FitnessFunctionGradient fitnessFunctionGradient; ======= FitnessFunctionWithGradient fitnessFunction = new FitnessFunctionWithGradient( probabilisticModel, true, this::handleGradientCalculation, this::handleFitnessCalculation ); >>>>>>> FitnessFunction fitnessFunction; FitnessFunctionGradient fitnessFunctionGradient; <<<<<<< private OptimizedResult optimize(FitnessFunction fitnessFunction, FitnessFunctionGradient fitnessFunctionGradient) { ======= double[] startingPoint = Optimizer.convertToPoint(getAsDoubleTensors(probabilisticModel.getLatentVariables())); >>>>>>> private OptimizedResult optimize(FitnessFunction fitnessFunction, FitnessFunctionGradient fitnessFunctionGradient) { <<<<<<< private ProbabilisticWithGradientGraph probabilisticWithGradientGraph; private GradientOptimizationAlgorithm gradientOptimizationAlgorithm = ConjugateGradient.builder().build(); private boolean checkInitialFitnessConditions = true; ======= private ProbabilisticModelWithGradient probabilisticModel; private int maxEvaluations = Integer.MAX_VALUE; private double relativeThreshold = 1e-8; private double absoluteThreshold = 1e-8; private UpdateFormula updateFormula = UpdateFormula.POLAK_RIBIERE; >>>>>>> private ProbabilisticModelWithGradient probabilisticModelWithGradient; private GradientOptimizationAlgorithm gradientOptimizationAlgorithm = ConjugateGradient.builder().build(); private boolean checkInitialFitnessConditions = true;
<<<<<<< import static org.junit.Assert.assertEquals; import java.util.List; import java.util.function.Function; import io.improbable.keanu.algorithms.variational.GradientOptimizer; ======= import io.improbable.keanu.algorithms.variational.optimizer.gradient.GradientOptimizer; >>>>>>> import static org.junit.Assert.assertEquals; import java.util.List; import java.util.function.Function; import io.improbable.keanu.algorithms.variational.optimizer.gradient.GradientOptimizer;
<<<<<<< import io.improbable.keanu.distributions.continuous.SmoothUniformDistribution; import io.improbable.keanu.tensor.dbl.DoubleTensor; ======= import io.improbable.keanu.distributions.continuous.SmoothUniform; >>>>>>> import io.improbable.keanu.distributions.continuous.SmoothUniform; import io.improbable.keanu.tensor.dbl.DoubleTensor;
<<<<<<< @Test public void youCanCheckForZeros() { IntegerTensor containsZero = IntegerTensor.create(new int[]{ 0, -1, Integer.MAX_VALUE, Integer.MIN_VALUE, -0, 1}, 3, 2); BooleanTensor expectedMask = BooleanTensor.create(new boolean[]{ false, true, true, true, false, true}, 3, 2); TensorValidator<Integer, IntegerTensor> validator = TensorValidator.thatExpectsNotToFind(0); assertThat(validator.check(containsZero), equalTo(expectedMask)); } @Test public void youCanFixAValidationIssueByReplacingTheValue() { IntegerTensor containsMinusOne = IntegerTensor.create(1, 0, -1); IntegerTensor expectedResult = IntegerTensor.create(1, 0, 0); TensorValidator<Integer, IntegerTensor> validator = TensorValidator.thatReplaces(-1, 0); containsMinusOne = validator.validate(containsMinusOne); assertThat(containsMinusOne, equalTo(expectedResult)); } @Test public void youCanFixACustomValidationIssueByReplacingTheValue() { IntegerTensor containsMinusOne = IntegerTensor.create(1, 0, -1); IntegerTensor expectedResult = IntegerTensor.create(1, 0, 0); TensorValidator<Integer, IntegerTensor> validator = TensorValidator.thatFixesElementwise(x -> x >= 0, TensorValidationPolicy.changeValueTo(0)); containsMinusOne = validator.validate(containsMinusOne); assertThat(containsMinusOne, equalTo(expectedResult)); } ======= @Test public void canFindScalarMinAndMax() { IntegerTensor a = IntegerTensor.create(5, 4, 3, 2).reshape(2, 2); int min = a.min(); int max = a.max(); assertEquals(2, min); assertEquals(5, max); } @Test public void canFindMinAndMaxFromScalarToTensor() { IntegerTensor a = IntegerTensor.create(5, 4, 3, 2).reshape(1, 4); IntegerTensor b = IntegerTensor.create(3); IntegerTensor min = IntegerTensor.min(a, b); IntegerTensor max = IntegerTensor.max(a, b); assertArrayEquals(new int[]{3, 3, 3, 2}, min.asFlatIntegerArray()); assertArrayEquals(new int[]{5, 4, 3, 3}, max.asFlatIntegerArray()); } @Test public void canFindElementWiseMinAndMax() { IntegerTensor a = IntegerTensor.create(1, 2, 3, 4).reshape(1, 4); IntegerTensor b = IntegerTensor.create(2, 3, 1, 4).reshape(1, 4); IntegerTensor min = IntegerTensor.min(a, b); IntegerTensor max = IntegerTensor.max(a, b); assertArrayEquals(new int[]{1, 2, 1, 4}, min.asFlatIntegerArray()); assertArrayEquals(new int[]{2, 3, 3, 4}, max.asFlatIntegerArray()); } >>>>>>> @Test public void canFindScalarMinAndMax() { IntegerTensor a = IntegerTensor.create(5, 4, 3, 2).reshape(2, 2); int min = a.min(); int max = a.max(); assertEquals(2, min); assertEquals(5, max); } @Test public void canFindMinAndMaxFromScalarToTensor() { IntegerTensor a = IntegerTensor.create(5, 4, 3, 2).reshape(1, 4); IntegerTensor b = IntegerTensor.create(3); IntegerTensor min = IntegerTensor.min(a, b); IntegerTensor max = IntegerTensor.max(a, b); assertArrayEquals(new int[]{3, 3, 3, 2}, min.asFlatIntegerArray()); assertArrayEquals(new int[]{5, 4, 3, 3}, max.asFlatIntegerArray()); } @Test public void canFindElementWiseMinAndMax() { IntegerTensor a = IntegerTensor.create(1, 2, 3, 4).reshape(1, 4); IntegerTensor b = IntegerTensor.create(2, 3, 1, 4).reshape(1, 4); IntegerTensor min = IntegerTensor.min(a, b); IntegerTensor max = IntegerTensor.max(a, b); assertArrayEquals(new int[]{1, 2, 1, 4}, min.asFlatIntegerArray()); assertArrayEquals(new int[]{2, 3, 3, 4}, max.asFlatIntegerArray()); } @Test public void youCanCheckForZeros() { IntegerTensor containsZero = IntegerTensor.create(new int[]{ 0, -1, Integer.MAX_VALUE, Integer.MIN_VALUE, -0, 1}, 3, 2); BooleanTensor expectedMask = BooleanTensor.create(new boolean[]{ false, true, true, true, false, true}, 3, 2); TensorValidator<Integer, IntegerTensor> validator = TensorValidator.thatExpectsNotToFind(0); assertThat(validator.check(containsZero), equalTo(expectedMask)); } @Test public void youCanFixAValidationIssueByReplacingTheValue() { IntegerTensor containsMinusOne = IntegerTensor.create(1, 0, -1); IntegerTensor expectedResult = IntegerTensor.create(1, 0, 0); TensorValidator<Integer, IntegerTensor> validator = TensorValidator.thatReplaces(-1, 0); containsMinusOne = validator.validate(containsMinusOne); assertThat(containsMinusOne, equalTo(expectedResult)); } @Test public void youCanFixACustomValidationIssueByReplacingTheValue() { IntegerTensor containsMinusOne = IntegerTensor.create(1, 0, -1); IntegerTensor expectedResult = IntegerTensor.create(1, 0, 0); TensorValidator<Integer, IntegerTensor> validator = TensorValidator.thatFixesElementwise(x -> x >= 0, TensorValidationPolicy.changeValueTo(0)); containsMinusOne = validator.validate(containsMinusOne); assertThat(containsMinusOne, equalTo(expectedResult)); }
<<<<<<< import java.util.HashMap; import java.util.Map; ======= >>>>>>> import java.util.HashMap; import java.util.Map;
<<<<<<< import io.improbable.keanu.backend.tensorflow.TensorflowProbabilisticModel; import io.improbable.keanu.backend.tensorflow.TensorflowProbabilisticModelWithGradient; import io.improbable.keanu.network.BayesianNetwork; import io.improbable.keanu.vertices.dbl.DoubleVertex; ======= import io.improbable.keanu.tensor.dbl.DoubleTensor; >>>>>>> import io.improbable.keanu.backend.tensorflow.TensorflowProbabilisticModel; import io.improbable.keanu.backend.tensorflow.TensorflowProbabilisticModelWithGradient; import io.improbable.keanu.network.BayesianNetwork; import io.improbable.keanu.tensor.dbl.DoubleTensor; import io.improbable.keanu.vertices.dbl.DoubleVertex; <<<<<<< ======= import java.util.Map; >>>>>>> import java.util.Map; <<<<<<< public void keanuGradientOptimizerCanMLE() { assertCanCalculateMaxLikelihood(getKeanuGradientOptimizer()); } @Test public void tensorflowGradientOptimizerCanMLE() { assertCanCalculateMaxLikelihood(getTensorflowGradientOptimizer()); } @Test public void keanuNonGradientOptimizerCanMLE() { assertCanCalculateMaxLikelihood(getKeanuNonGradientOptimizer()); } @Test public void tensorflowNonGradientOptimizerCanMLE() { assertCanCalculateMaxLikelihood(getTensorflowNonGradientOptimizer()); } @Test public void keanuGradientOptimizerCanMAP() { assertCanCalculateMaxAPosteriori(getKeanuGradientOptimizer()); } @Test public void tensorflowGradientOptimizerCanMAP() { assertCanCalculateMaxAPosteriori(getTensorflowGradientOptimizer()); } @Test public void keanuNonGradientOptimizerCanMAP() { assertCanCalculateMaxAPosteriori(getKeanuNonGradientOptimizer()); } @Test public void tensorflowNonGradientOptimizerCanMAP() { assertCanCalculateMaxAPosteriori(getTensorflowNonGradientOptimizer()); } private Function<BayesianNetwork, Optimizer> getKeanuGradientOptimizer() { return (bayesNet) -> KeanuOptimizer.Gradient.of(bayesNet); } private Function<BayesianNetwork, Optimizer> getKeanuNonGradientOptimizer() { return (bayesNet) -> KeanuOptimizer.NonGradient.of(bayesNet); } private Function<BayesianNetwork, Optimizer> getTensorflowGradientOptimizer() { return (bayesNet) -> GradientOptimizer.builder() .probabilisticModel(TensorflowProbabilisticModelWithGradient.convert(bayesNet)) .build(); } private Function<BayesianNetwork, Optimizer> getTensorflowNonGradientOptimizer() { return (bayesNet) -> NonGradientOptimizer.builder() .probabilisticModel(TensorflowProbabilisticModel.convert(bayesNet)) .build(); } private void assertCanCalculateMaxLikelihood(Function<BayesianNetwork, Optimizer> optimizerMapper) { DoubleVertex A = new GaussianVertex(20.0, 1.0); DoubleVertex B = new GaussianVertex(20.0, 1.0); A.setValue(20.0); B.setAndCascade(20.0); DoubleVertex Cobserved = new GaussianVertex(A.plus(B), 1.0); Cobserved.observe(44.0); BayesianNetwork bayesNet = new BayesianNetwork(A.getConnectedGraph()); Optimizer optimizer = optimizerMapper.apply(bayesNet); OptimizedResult optimizedResult = optimizer.maxLikelihood(); double maxA = optimizedResult.get(A.getReference()).scalar(); double maxB = optimizedResult.get(B.getReference()).scalar(); assertEquals(44, maxA + maxB, 0.1); } public void assertCanCalculateMaxAPosteriori(Function<BayesianNetwork, Optimizer> optimizerMapper) { DoubleVertex A = new GaussianVertex(20.0, 1.0); DoubleVertex B = new GaussianVertex(20.0, 1.0); A.setValue(21.5); B.setAndCascade(21.5); DoubleVertex Cobserved = new GaussianVertex(A.plus(B), 1.0); Cobserved.observe(46.0); BayesianNetwork bayesNet = new BayesianNetwork(A.getConnectedGraph()); Optimizer optimizer = optimizerMapper.apply(bayesNet); OptimizedResult optimizedResult = optimizer.maxAPosteriori(); double maxA = optimizedResult.get(A.getReference()).scalar(); double maxB = optimizedResult.get(B.getReference()).scalar(); assertEquals(22, maxA, 0.1); assertEquals(22, maxB, 0.1); } @Test ======= >>>>>>> public void keanuGradientOptimizerCanMLE() { assertCanCalculateMaxLikelihood(getKeanuGradientOptimizer()); } @Test public void tensorflowGradientOptimizerCanMLE() { assertCanCalculateMaxLikelihood(getTensorflowGradientOptimizer()); } @Test public void keanuNonGradientOptimizerCanMLE() { assertCanCalculateMaxLikelihood(getKeanuNonGradientOptimizer()); } @Test public void tensorflowNonGradientOptimizerCanMLE() { assertCanCalculateMaxLikelihood(getTensorflowNonGradientOptimizer()); } @Test public void keanuGradientOptimizerCanMAP() { assertCanCalculateMaxAPosteriori(getKeanuGradientOptimizer()); } @Test public void tensorflowGradientOptimizerCanMAP() { assertCanCalculateMaxAPosteriori(getTensorflowGradientOptimizer()); } @Test public void keanuNonGradientOptimizerCanMAP() { assertCanCalculateMaxAPosteriori(getKeanuNonGradientOptimizer()); } @Test public void tensorflowNonGradientOptimizerCanMAP() { assertCanCalculateMaxAPosteriori(getTensorflowNonGradientOptimizer()); } private Function<BayesianNetwork, Optimizer> getKeanuGradientOptimizer() { return (bayesNet) -> Keanu.Optimizer.Gradient.of(bayesNet); } private Function<BayesianNetwork, Optimizer> getKeanuNonGradientOptimizer() { return (bayesNet) -> Keanu.Optimizer.NonGradient.of(bayesNet); } private Function<BayesianNetwork, Optimizer> getTensorflowGradientOptimizer() { return (bayesNet) -> GradientOptimizer.builder() .probabilisticModel(TensorflowProbabilisticModelWithGradient.convert(bayesNet)) .build(); } private Function<BayesianNetwork, Optimizer> getTensorflowNonGradientOptimizer() { return (bayesNet) -> NonGradientOptimizer.builder() .probabilisticModel(TensorflowProbabilisticModel.convert(bayesNet)) .build(); } private void assertCanCalculateMaxLikelihood(Function<BayesianNetwork, Optimizer> optimizerMapper) { DoubleVertex A = new GaussianVertex(20.0, 1.0); DoubleVertex B = new GaussianVertex(20.0, 1.0); A.setValue(20.0); B.setAndCascade(20.0); DoubleVertex Cobserved = new GaussianVertex(A.plus(B), 1.0); Cobserved.observe(44.0); BayesianNetwork bayesNet = new BayesianNetwork(A.getConnectedGraph()); Optimizer optimizer = optimizerMapper.apply(bayesNet); OptimizedResult optimizedResult = optimizer.maxLikelihood(); double maxA = optimizedResult.getValueFor(A.getReference()).scalar(); double maxB = optimizedResult.getValueFor(B.getReference()).scalar(); assertEquals(44, maxA + maxB, 0.1); } public void assertCanCalculateMaxAPosteriori(Function<BayesianNetwork, Optimizer> optimizerMapper) { DoubleVertex A = new GaussianVertex(20.0, 1.0); DoubleVertex B = new GaussianVertex(20.0, 1.0); A.setValue(21.5); B.setAndCascade(21.5); DoubleVertex Cobserved = new GaussianVertex(A.plus(B), 1.0); Cobserved.observe(46.0); BayesianNetwork bayesNet = new BayesianNetwork(A.getConnectedGraph()); Optimizer optimizer = optimizerMapper.apply(bayesNet); OptimizedResult optimizedResult = optimizer.maxAPosteriori(); double maxA = optimizedResult.getValueFor(A.getReference()).scalar(); double maxB = optimizedResult.getValueFor(B.getReference()).scalar(); assertEquals(22, maxA, 0.1); assertEquals(22, maxB, 0.1); } @Test
<<<<<<< public MaxVertex(DoubleVertex left, DoubleVertex right) { super(left.greaterThanOrEqualTo(right), left, right); ======= @ExportVertexToPythonBindings public MaxVertex(@LoadParentVertex(LEFT_NAME) DoubleVertex left, @LoadParentVertex(RIGHT_NAME) DoubleVertex right) { super(left.getShape(), left.greaterThanOrEqualTo(right), left, right); >>>>>>> @ExportVertexToPythonBindings public MaxVertex(@LoadParentVertex(LEFT_NAME) DoubleVertex left, @LoadParentVertex(RIGHT_NAME) DoubleVertex right) { super(left.greaterThanOrEqualTo(right), left, right);
<<<<<<< public static Map<String, DoubleTensor> getJointLogProbGradientWrtLatents(List<Vertex> probabilisticVertices) { final Map<String, DoubleTensor> diffOfLogWrt = new HashMap<>(); ======= public static Map<String, Double> getJointLogProbGradientWrtLatents(List<? extends Vertex> probabilisticVertices) { final Map<String, Double> diffOfLogWrt = new HashMap<>(); >>>>>>> public static Map<String, DoubleTensor> getJointLogProbGradientWrtLatents(List<? extends Vertex> probabilisticVertices) { final Map<String, DoubleTensor> diffOfLogWrt = new HashMap<>();
<<<<<<< @Override DoubleTensor tad(int dimension, int index); ======= DoubleTensor concat(int dimension, DoubleTensor... those); >>>>>>> @Override DoubleTensor tad(int dimension, int index); DoubleTensor concat(int dimension, DoubleTensor... those);
<<<<<<< DoubleTensor slice(int dimension, int[] index); DoubleTensor tensorAlongDimension(int index, int... dimension); ======= default List<DoubleTensor> sliceAlongDimension(int dimension, int indexStart, int indexEnd) { List<DoubleTensor> slicedTensors = new ArrayList<>(); for (int i = indexStart; i < indexEnd; i++) { slicedTensors.add(slice(dimension, i)); } return slicedTensors; } >>>>>>> default List<DoubleTensor> sliceAlongDimension(int dimension, int indexStart, int indexEnd) { List<DoubleTensor> slicedTensors = new ArrayList<>(); for (int i = indexStart; i < indexEnd; i++) { slicedTensors.add(slice(dimension, i)); } return slicedTensors; } DoubleTensor slice(int dimension, int[] index); DoubleTensor tensorAlongDimension(int index, int... dimension);
<<<<<<< import io.improbable.keanu.util.ProgressBar; ======= import io.improbable.keanu.vertices.VertexId; >>>>>>> import io.improbable.keanu.util.ProgressBar; import io.improbable.keanu.vertices.VertexId;
<<<<<<< import io.improbable.keanu.network.BayesianNetwork; ======= import io.improbable.keanu.DeterministicRule; import io.improbable.keanu.network.BayesNet; >>>>>>> import io.improbable.keanu.DeterministicRule; import io.improbable.keanu.network.BayesianNetwork;
<<<<<<< public <T extends Vertex<?>> void addAll(Collection<T> vertices) { for (T v : vertices) { add(v); } } public <T extends Vertex<?>> T add(T v) throws PlateException { if (v.getLabel() == null) { throw new PlateException("Vertex " + v + " has no label"); ======= public <T extends Vertex<?>> T add(T v) { VertexLabel label = v.getLabel(); if (label == null) { throw new PlateException("Vertex " + v + " must contain a label in order to be added to a plate"); } String outerNamespace = label.getOuterNamespace().orElse(""); if (NAME_REGEX.matcher(outerNamespace).matches()) { throw new PlateException("Vertex " + v + " has already been added to " + outerNamespace); >>>>>>> public <T extends Vertex<?>> void addAll(Collection<T> vertices) { for (T v : vertices) { add(v); } } public <T extends Vertex<?>> T add(T v) { VertexLabel label = v.getLabel(); if (label == null) { throw new PlateException("Vertex " + v + " must contain a label in order to be added to a plate"); } String outerNamespace = label.getOuterNamespace().orElse(""); if (NAME_REGEX.matcher(outerNamespace).matches()) { throw new PlateException("Vertex " + v + " has already been added to " + outerNamespace);
<<<<<<< import io.improbable.keanu.vertices.NonProbabilistic; ======= import io.improbable.keanu.vertices.Vertex; >>>>>>> import io.improbable.keanu.vertices.NonProbabilistic; import io.improbable.keanu.vertices.Vertex; <<<<<<< ======= import io.improbable.keanu.vertices.dbl.nonprobabilistic.diff.DualNumber; import io.improbable.keanu.vertices.update.NonProbabilisticValueUpdater; >>>>>>> import io.improbable.keanu.vertices.dbl.nonprobabilistic.diff.DualNumber; <<<<<<< public DoubleUnaryOpVertex(int[] shape, DoubleVertex inputVertex) { ======= public DoubleUnaryOpVertex( DoubleVertex inputVertex) { this(inputVertex.getShape(), inputVertex); } /** * A vertex that performs a user defined operation on a single input vertex * * @param shape the shape of the tensor * @param inputVertex the input vertex */ public DoubleUnaryOpVertex( int[] shape, DoubleVertex inputVertex) { super(new NonProbabilisticValueUpdater<>(v -> ((DoubleUnaryOpVertex) v).op(inputVertex.getValue()))); >>>>>>> public DoubleUnaryOpVertex( DoubleVertex inputVertex) { this(inputVertex.getShape(), inputVertex); } /** * A vertex that performs a user defined operation on a single input vertex * * @param shape the shape of the tensor * @param inputVertex the input vertex */ public DoubleUnaryOpVertex(int[] shape, DoubleVertex inputVertex) { <<<<<<< @Override public DoubleTensor calculate() { return op(inputVertex.getValue()); } protected abstract DoubleTensor op(DoubleTensor a); ======= @Override public DualNumber calculateDualNumber(Map<Vertex, DualNumber> dualNumbers) { try { return dualOp(dualNumbers.get(inputVertex)); } catch (UnsupportedOperationException e) { return super.calculateDualNumber(dualNumbers); } } protected abstract DoubleTensor op(DoubleTensor value); >>>>>>> @Override public DoubleTensor calculate() { return op(inputVertex.getValue()); } @Override public DualNumber calculateDualNumber(Map<Vertex, DualNumber> dualNumbers) { try { return dualOp(dualNumbers.get(inputVertex)); } catch (UnsupportedOperationException e) { return super.calculateDualNumber(dualNumbers); } } protected abstract DoubleTensor op(DoubleTensor value);
<<<<<<< import io.improbable.keanu.tensor.dbl.DoubleTensor; ======= import io.improbable.keanu.testcategory.Slow; >>>>>>> import io.improbable.keanu.tensor.dbl.DoubleTensor; import io.improbable.keanu.testcategory.Slow;
<<<<<<< public Vertex<Tensor<T>> orElse(Vertex<? extends Tensor<T>> els) { long[] shape = TensorShapeValidation.checkHasSingleNonScalarShapeOrAllScalar(thn.getShape(), els.getShape(), predicate.getShape()); return new IfVertex<>(shape, predicate, thn, els); ======= public IfVertex<T> orElse(Vertex<? extends Tensor<T>> els) { assertShapesMatchOrAreScalar(thn.getShape(), els.getShape(), predicate.getShape()); return new IfVertex<>(els.getShape(), predicate, thn, els); >>>>>>> public IfVertex<T> orElse(Vertex<? extends Tensor<T>> els) { long[] shape = TensorShapeValidation.checkHasSingleNonScalarShapeOrAllScalar(thn.getShape(), els.getShape(), predicate.getShape()); return new IfVertex<>(shape, predicate, thn, els); <<<<<<< public BoolVertex orElse(Vertex<? extends BooleanTensor> els) { long[] shape = TensorShapeValidation.checkHasSingleNonScalarShapeOrAllScalar(thn.getShape(), els.getShape(), predicate.getShape()); return new BooleanIfVertex(shape, predicate, thn, els); ======= public BooleanIfVertex orElse(Vertex<? extends BooleanTensor> els) { assertShapesMatchOrAreScalar(thn.getShape(), els.getShape(), predicate.getShape()); return new BooleanIfVertex(els.getShape(), predicate, thn, els); >>>>>>> public BooleanIfVertex orElse(Vertex<? extends BooleanTensor> els) { long[] shape = TensorShapeValidation.checkHasSingleNonScalarShapeOrAllScalar(thn.getShape(), els.getShape(), predicate.getShape()); return new BooleanIfVertex(shape, predicate, thn, els); <<<<<<< public DoubleVertex orElse(Vertex<? extends DoubleTensor> els) { long[] shape = TensorShapeValidation.checkHasSingleNonScalarShapeOrAllScalar(thn.getShape(), els.getShape(), predicate.getShape()); return new DoubleIfVertex(shape, predicate, thn, els); ======= public DoubleIfVertex orElse(Vertex<? extends DoubleTensor> els) { assertShapesMatchOrAreScalar(thn.getShape(), els.getShape(), predicate.getShape()); return new DoubleIfVertex(els.getShape(), predicate, thn, els); >>>>>>> public DoubleIfVertex orElse(Vertex<? extends DoubleTensor> els) { long[] shape = TensorShapeValidation.checkHasSingleNonScalarShapeOrAllScalar(thn.getShape(), els.getShape(), predicate.getShape()); return new DoubleIfVertex(shape, predicate, thn, els);
<<<<<<< import java.util.Map; import java.util.Set; import com.google.common.collect.ImmutableList; ======= >>>>>>> import com.google.common.collect.ImmutableList; <<<<<<< public class Categorical<T> implements Distribution<Tensor<T>> { ======= import java.util.Map; public class Categorical<T> implements Distribution<T> { >>>>>>> import java.util.Map; import java.util.Set; public class Categorical<T> implements Distribution<Tensor<T>> {
<<<<<<< import io.improbable.keanu.vertices.dbl.nonprobabilistic.diff.Infinitesimal; import io.improbable.keanu.vertices.dbltensor.DoubleTensor; ======= import io.improbable.keanu.vertices.dbl.nonprobabilistic.diff.PartialDerivatives; >>>>>>> import io.improbable.keanu.vertices.dbl.nonprobabilistic.diff.PartialDerivatives; import io.improbable.keanu.vertices.dbltensor.DoubleTensor;
<<<<<<< import io.improbable.keanu.testcategory.Slow; ======= import io.improbable.keanu.vertices.ConstantVertex; import io.improbable.keanu.vertices.dbl.DoubleVertex; >>>>>>> import io.improbable.keanu.testcategory.Slow; import io.improbable.keanu.vertices.ConstantVertex; import io.improbable.keanu.vertices.dbl.DoubleVertex;
<<<<<<< import io.improbable.keanu.vertices.Vertex; import io.improbable.keanu.vertices.dbl.Differentiator; ======= >>>>>>> import io.improbable.keanu.vertices.Vertex; import io.improbable.keanu.vertices.dbl.Differentiator; <<<<<<< import java.util.HashMap; import java.util.Map; import static io.improbable.keanu.tensor.TensorShapeValidation.checkHasSingleNonScalarShapeOrAllScalar; ======= >>>>>>> import java.util.HashMap; import java.util.Map; <<<<<<< public Map<Vertex, PartialDerivatives> reverseModeAutoDifferentiation(PartialDerivatives derivativeOfOutputsWithRespectToSelf) { Map<Vertex, PartialDerivatives> partials = new HashMap<>(); PartialDerivatives rightPartial = derivativeOfOutputsWithRespectToSelf.multiplyBy(right.getValue()); PartialDerivatives leftPartial = derivativeOfOutputsWithRespectToSelf.multiplyBy(left.getValue()); partials.put(left, Differentiator.reshapeReverseAutoDiff(rightPartial, left.getValue(), right.getValue())); partials.put(right, Differentiator.reshapeReverseAutoDiff(leftPartial, right.getValue(), left.getValue())); return partials; } @Override protected DoubleTensor op(DoubleTensor left, DoubleTensor right) { return left.times(right); ======= protected DualNumber dualOp(DualNumber l, DualNumber r) { return l.multiplyBy(r); >>>>>>> public Map<Vertex, PartialDerivatives> reverseModeAutoDifferentiation(PartialDerivatives derivativeOfOutputsWithRespectToSelf) { Map<Vertex, PartialDerivatives> partials = new HashMap<>(); PartialDerivatives rightPartial = derivativeOfOutputsWithRespectToSelf.multiplyBy(right.getValue()); PartialDerivatives leftPartial = derivativeOfOutputsWithRespectToSelf.multiplyBy(left.getValue()); partials.put(left, Differentiator.reshapeReverseAutoDiff(rightPartial, left.getValue(), right.getValue())); partials.put(right, Differentiator.reshapeReverseAutoDiff(leftPartial, right.getValue(), left.getValue())); return partials; } @Override protected DualNumber dualOp(DualNumber l, DualNumber r) { return l.multiplyBy(r);
<<<<<<< private Map<Long, DoubleTensor> convertDualNumbersToDiff(DoubleTensor dLogPdLocation, DoubleTensor dLogPdScale, DoubleTensor dLogPdX) { ======= private Map<VertexId, DoubleTensor> convertDualNumbersToDiff(DoubleTensor dLogPdLoc, DoubleTensor dLogPdScale, DoubleTensor dLogPdX) { >>>>>>> private Map<VertexId, DoubleTensor> convertDualNumbersToDiff(DoubleTensor dLogPdLocation, DoubleTensor dLogPdScale, DoubleTensor dLogPdX) {
<<<<<<< private static final Logger log = Logger.getLogger(Module.class); ======= >>>>>>> private static final Logger log = Logger.getLogger(Module.class); <<<<<<< public void visit(State state, Environment env, Properties props) throws Exception { ======= public void visit(final State state, Properties props) throws Exception { >>>>>>> public void visit(final State state, final Environment env, Properties props) throws Exception { <<<<<<< Node initNode = getNode(initNodeId); boolean test = false; if (initNode instanceof Test) { startTimer(initNode); test = true; } initNode.visit(state, env, getProps(initNodeId)); if (test) stopTimer(initNode); // update aliases Set<String> aliases; if ((aliases = aliasMap.get(initNodeId)) != null) for (String alias : aliases) { ((Alias) nodes.get(alias)).update(initNodeId); } String curNodeId = initNodeId; int numHops = 0; long startTime = System.currentTimeMillis() / 1000; while (true) { // check if END state was reached if (curNodeId.equalsIgnoreCase("END")) { log.debug("reached END state"); break; } // check if maxSec was reached long curTime = System.currentTimeMillis() / 1000; if ((curTime - startTime) > maxSec) { log.debug("reached maxSec(" + maxSec + ")"); break; } // check if maxHops was reached if (numHops > maxHops) { log.debug("reached maxHops(" + maxHops + ")"); break; } numHops++; if (!adjMap.containsKey(curNodeId) && !curNodeId.startsWith("alias.")) { throw new Exception("Reached node(" + curNodeId + ") without outgoing edges in module(" + this + ")"); } AdjList adj = adjMap.get(curNodeId); String nextNodeId = adj.randomNeighbor(); Node nextNode = getNode(nextNodeId); if (nextNode instanceof Alias) { nextNodeId = ((Alias) nextNode).getTargetId(); nextNode = ((Alias) nextNode).get(); ======= ExecutorService service = new SimpleThreadPool(1, "RandomWalk Runner"); try { Node initNode = getNode(initNodeId); boolean test = false; if (initNode instanceof Test) { startTimer(initNode); test = true; >>>>>>> ExecutorService service = new SimpleThreadPool(1, "RandomWalk Runner"); try { Node initNode = getNode(initNodeId); boolean test = false; if (initNode instanceof Test) { startTimer(initNode); test = true;
<<<<<<< DoubleTensor mask = Nd4jDoubleTensor.create(new double[]{1., 1., 1., 1.}, new int[]{2, 2}); ======= DoubleTensor mask = Nd4jDoubleTensor.ones(2, 2); >>>>>>> DoubleTensor mask = Nd4jDoubleTensor.ones(2, 2); <<<<<<< ======= public void canBroadcastMultiplyRank4ContainingVectorAndMatrix() { /* # test to make sure numpy returns same result import numpy as np a = np.array([1, 2, 3, 4, 5, 6, 7, 8]).reshape(2,2,2,1) b = np.array([1, 2, 3, 4]).reshape(2,2,1,1) ab = a * b print(ab) print(np.shape(ab)) */ DoubleTensor rank4 = Nd4jDoubleTensor.create(new double[]{1, 2, 3, 4, 5, 6, 7, 8}, new int[]{2, 2, 2, 1}); DoubleTensor matrix = matrixA.reshape(2, 2, 1, 1); DoubleTensor expected = Nd4jDoubleTensor.create(new double[]{1, 2, 6, 8, 15, 18, 28, 32}, new int[]{2, 2, 2, 1}); assertTimesOperationEquals(rank4, matrix, expected); assertTimesInPlaceOperationEquals(rank4, matrix, expected); } @Test public void canBroadcastMultiplyRank4ContainingMatrixAndMatrix() { /* # test to make sure numpy returns same result import numpy as np a = np.array([1, 2, 3, 4, 5, 6, 7, 8, 4, 3, 2, 1, 7, 5, 8, 6]).reshape(2,2,2,2) b = np.array([1, 2, 3, 4]).reshape(2,2,1,1) ab = a * b print(ab) print(np.shape(ab)) */ DoubleTensor rank4 = Nd4jDoubleTensor.create(new double[]{ 1, 2, 3, 4, 5, 6, 7, 8, 4, 3, 2, 1, 7, 5, 8, 6 }, new int[]{2, 2, 2, 2}); DoubleTensor matrix = matrixA.reshape(2, 2, 1, 1); DoubleTensor expected = Nd4jDoubleTensor.create(new double[]{ 1, 2, 3, 4, 10, 12, 14, 16, 12, 9, 6, 3, 28, 20, 32, 24 }, new int[]{2, 2, 2, 2}); assertTimesOperationEquals(rank4, matrix, expected); assertTimesInPlaceOperationEquals(rank4, matrix, expected); } @Test public void canBroadcastMultiplyRank5ContainingMatrixAndMatrix() { /* # test to make sure numpy returns same result import numpy as np a = np.array([ 1, 2, 3, 4, 5, 6, 7, 8, 4, 3, 2, 1, 7, 5, 8, 6, 6, 3, 2, 9, 3, 4, 7, 6, 6, 2, 5, 4, 0, 2, 1, 3 ]).reshape(2,2,2,2,2) b = np.array([1, 2, 3, 4]).reshape(2,2,1,1,1) ab = a * b print(ab) print(np.shape(ab)) */ DoubleTensor rank5 = Nd4jDoubleTensor.create(new double[]{ 1, 2, 3, 4, 5, 6, 7, 8, 4, 3, 2, 1, 7, 5, 8, 6, 6, 3, 2, 9, 3, 4, 7, 6, 6, 2, 5, 4, 0, 2, 1, 3 }, new int[]{2, 2, 2, 2, 2}); DoubleTensor matrix = matrixA.reshape(2, 2, 1, 1, 1); DoubleTensor expected = Nd4jDoubleTensor.create(new double[]{ 1, 2, 3, 4, 5, 6, 7, 8, 8, 6, 4, 2, 14, 10, 16, 12, 18, 9, 6, 27, 9, 12, 21, 18, 24, 8, 20, 16, 0, 8, 4, 12 }, new int[]{2, 2, 2, 2, 2}); assertTimesOperationEquals(rank5, matrix, expected); assertTimesInPlaceOperationEquals(rank5, matrix, expected); } @Test public void canSuperBroadcast() { /* # test to make sure numpy returns same result import numpy as np a = np.zeros([2,2,2,2]) b = np.array([1,0,1,0]).reshape(2,2) ab = a + b print(ab) print(np.shape(ab)) */ DoubleTensor x = Nd4jDoubleTensor.zeros(new int[]{2, 2, 2, 2}); DoubleTensor y = Nd4jDoubleTensor.create(new double[]{1, 0, 1, 0}, new int[]{2, 2}); DoubleTensor diff = x.plus(y); DoubleTensor expected = Nd4jDoubleTensor.create(new double[]{ 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0 }, new int[]{2, 2, 2, 2}); assertEquals(expected, diff); } @Test public void canSuperBroadcastInPlace() { DoubleTensor x = Nd4jDoubleTensor.zeros(new int[]{2, 2, 2, 2}); DoubleTensor y = Nd4jDoubleTensor.create(new double[]{1, 0, 1, 0}, new int[]{2, 2}); DoubleTensor diff = x.plusInPlace(y); DoubleTensor expected = Nd4jDoubleTensor.create(new double[]{ 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0 }, new int[]{2, 2, 2, 2}); assertEquals(expected, diff); } @Test >>>>>>> <<<<<<< ======= public void canBroadcastAdd() { /* # test to make sure numpy returns same result import numpy as np x = np.array([1,2,3]).reshape(3,1) s = np.array([-5, -2, -3, -7, -8, -5, -2, -3, -7, -8, -5, -2, -3, -7, -8]).reshape(3,5) sx = s + x print(sx) print(np.shape(sx)) */ DoubleTensor x = Nd4jDoubleTensor.create(new double[]{1, 2, 3}, new int[]{3, 1}); DoubleTensor s = Nd4jDoubleTensor.create(new double[]{ -5, -2, -3, -7, -8, -5, -2, -3, -7, -8, -5, -2, -3, -7, -8 }, new int[]{3, 5}); DoubleTensor diff = s.plus(x); DoubleTensor expected = Nd4jDoubleTensor.create(new double[]{ -4, -1, -2, -6, -7, -3, 0, -1, -5, -6, -2, 1, 0, -4, -5 }, new int[]{3, 5}); assertEquals(expected, diff); } @Test public void canBroadcastSubtract() { /* # test to make sure numpy returns same result import numpy as np x = np.array([-1,-2,-3]).reshape(3,1) s = np.array([-5, -2, -3, -7, -8, -5, -2, -3, -7, -8, -5, -2, -3, -7, -8]).reshape(3,5) sx = s - x print(sx) print(np.shape(sx)) */ DoubleTensor x = Nd4jDoubleTensor.create(new double[]{-1, -2, -3}, new int[]{3, 1}); DoubleTensor s = Nd4jDoubleTensor.create(new double[]{ -5, -2, -3, -7, -8, -5, -2, -3, -7, -8, -5, -2, -3, -7, -8 }, new int[]{3, 5}); DoubleTensor diff = s.minus(x); DoubleTensor expected = Nd4jDoubleTensor.create(new double[]{ -4, -1, -2, -6, -7, -3, 0, -1, -5, -6, -2, 1, 0, -4, -5 }, new int[]{3, 5}); assertEquals(expected, diff); } @Test public void canBroadcastDivide() { /* # test to make sure numpy returns same result import numpy as np x = np.array([1,2,3]).reshape(3,1) s = np.array([5, 2, 3, 7, 8, 5, 2, 3, 7, 8, 5, 2, 3, 7, 8]).reshape(3,5) sx = s / x print(sx) print(np.shape(sx)) */ DoubleTensor x = Nd4jDoubleTensor.create(new double[]{1, 2, 3}, new int[]{3, 1}); DoubleTensor s = Nd4jDoubleTensor.create(new double[]{ 5, 2, 3, 7, 8, 5, 2, 3, 7, 8, 5, 2, 3, 7, 8 }, new int[]{3, 5}); DoubleTensor diff = s.div(x); DoubleTensor expected = Nd4jDoubleTensor.create(new double[]{ 5 / 1.0, 2 / 1.0, 3 / 1.0, 7 / 1.0, 8 / 1.0, 5 / 2.0, 2 / 2.0, 3 / 2.0, 7 / 2.0, 8 / 2.0, 5 / 3.0, 2 / 3.0, 3 / 3.0, 7 / 3.0, 8 / 3.0 }, new int[]{3, 5}); assertEquals(expected, diff); } @Test >>>>>>> <<<<<<< ======= private void assertTimesOperationEquals(DoubleTensor left, DoubleTensor right, DoubleTensor expected) { DoubleTensor actual = left.times(right); assertEquals(expected, actual); } private void assertTimesInPlaceOperationEquals(DoubleTensor left, DoubleTensor right, DoubleTensor expected) { left = left.timesInPlace(right); assertEquals(expected, left); } private void assertPlusOperationEquals(DoubleTensor left, DoubleTensor right, DoubleTensor expected) { DoubleTensor actual = left.plus(right); assertEquals(expected, actual); } private void assertPlusInPlaceOperationEquals(DoubleTensor left, DoubleTensor right, DoubleTensor expected) { left = left.plusInPlace(right); assertEquals(expected, left); } private void assertDivideOperationEquals(DoubleTensor left, DoubleTensor right, DoubleTensor expected) { DoubleTensor actual = left.div(right); assertEquals(expected, actual); } private void assertDivideInPlaceOperationEquals(DoubleTensor left, DoubleTensor right, DoubleTensor expected) { left = left.divInPlace(right); assertEquals(expected, left); } private void assertMinusOperationEquals(DoubleTensor left, DoubleTensor right, DoubleTensor expected) { DoubleTensor actual = left.minus(right); assertEquals(expected, actual); } private void assertMinusInPlaceOperationEquals(DoubleTensor left, DoubleTensor right, DoubleTensor expected) { left = left.minusInPlace(right); assertEquals(expected, left); } >>>>>>> <<<<<<< ======= @Test public void canBroadcastMultiplyDifferentRankedTensorsBigToSmall() { DoubleTensor rank4 = DoubleTensor.ones(4, 2, 2, 2); DoubleTensor matrix = DoubleTensor.create(new double[]{1, 2, 3, 4}, new int[]{2, 2}); DoubleTensor expected = Nd4jDoubleTensor.create(new double[]{ 1, 2, 3, 4, 1, 2, 3, 4, 1, 2, 3, 4, 1, 2, 3, 4, 1, 2, 3, 4, 1, 2, 3, 4, 1, 2, 3, 4, 1, 2, 3, 4, }, new int[]{4, 2, 2, 2}); assertTimesOperationEquals(rank4, matrix, expected); assertTimesInPlaceOperationEquals(rank4, matrix, expected); } @Test public void canBroadcastMultiplyDifferentRankedTensorsSmallToBig() { DoubleTensor rank4 = DoubleTensor.ones(4, 2, 2, 2); DoubleTensor matrix = DoubleTensor.create(new double[]{1, 2, 3, 4}, new int[]{2, 2}); DoubleTensor expected = Nd4jDoubleTensor.create(new double[]{ 1, 2, 3, 4, 1, 2, 3, 4, 1, 2, 3, 4, 1, 2, 3, 4, 1, 2, 3, 4, 1, 2, 3, 4, 1, 2, 3, 4, 1, 2, 3, 4, }, new int[]{4, 2, 2, 2}); assertTimesOperationEquals(matrix, rank4, expected); assertTimesInPlaceOperationEquals(matrix, rank4, expected); } @Test public void canBroadcastPlusDifferentRankedTensorsBigToSmall() { DoubleTensor rank4 = DoubleTensor.zeros(new int[]{4, 2, 2, 2}); DoubleTensor matrix = DoubleTensor.create(new double[]{1, 2, 3, 4}, new int[]{2, 2}); DoubleTensor expected = Nd4jDoubleTensor.create(new double[]{ 1, 2, 3, 4, 1, 2, 3, 4, 1, 2, 3, 4, 1, 2, 3, 4, 1, 2, 3, 4, 1, 2, 3, 4, 1, 2, 3, 4, 1, 2, 3, 4, }, new int[]{4, 2, 2, 2}); assertPlusOperationEquals(rank4, matrix, expected); assertPlusInPlaceOperationEquals(rank4, matrix, expected); } @Test public void canBroadcastPlusDifferentRankedTensorsSmallToBig() { DoubleTensor rank4 = DoubleTensor.zeros(new int[]{4, 2, 2, 2}); DoubleTensor matrix = DoubleTensor.create(new double[]{1, 2, 3, 4}, new int[]{2, 2}); DoubleTensor expected = Nd4jDoubleTensor.create(new double[]{ 1, 2, 3, 4, 1, 2, 3, 4, 1, 2, 3, 4, 1, 2, 3, 4, 1, 2, 3, 4, 1, 2, 3, 4, 1, 2, 3, 4, 1, 2, 3, 4, }, new int[]{4, 2, 2, 2}); assertPlusOperationEquals(matrix, rank4, expected); assertPlusInPlaceOperationEquals(matrix, rank4, expected); } @Test public void canBroadcastDivideDifferentRankedTensorsBigToSmall() { DoubleTensor rank4 = DoubleTensor.ones(new int[]{4, 2, 2, 2}).times(10.); DoubleTensor matrix = DoubleTensor.create(new double[]{1, 2, 5, 10}, new int[]{2, 2}); DoubleTensor expected = Nd4jDoubleTensor.create(new double[]{ 10, 5, 2, 1, 10, 5, 2, 1, 10, 5, 2, 1, 10, 5, 2, 1, 10, 5, 2, 1, 10, 5, 2, 1, 10, 5, 2, 1, 10, 5, 2, 1, }, new int[]{4, 2, 2, 2}); assertDivideOperationEquals(rank4, matrix, expected); assertDivideInPlaceOperationEquals(rank4, matrix, expected); } @Test public void canBroadcastDivideDifferentRankedTensorsSmallToBig() { DoubleTensor rank4 = DoubleTensor.ones(new int[]{4, 2, 2, 2}).times(10.); DoubleTensor matrix = DoubleTensor.create(new double[]{1, 2, 5, 10}, new int[]{2, 2}); DoubleTensor expected = Nd4jDoubleTensor.create(new double[]{ .1, .2, .5, 1, .1, .2, .5, 1, .1, .2, .5, 1, .1, .2, .5, 1, .1, .2, .5, 1, .1, .2, .5, 1, .1, .2, .5, 1, .1, .2, .5, 1, }, new int[]{4, 2, 2, 2}); assertDivideOperationEquals(matrix, rank4, expected); assertDivideInPlaceOperationEquals(matrix, rank4, expected); } @Test public void canBroadcastMinusDifferentRankedTensorsBigToSmall() { DoubleTensor rank4 = DoubleTensor.ones(new int[]{4, 2, 2, 2}).times(5.); DoubleTensor matrix = DoubleTensor.create(new double[]{1, 2, 3, 4}, new int[]{2, 2}); DoubleTensor expected = Nd4jDoubleTensor.create(new double[]{ 4, 3, 2, 1, 4, 3, 2, 1, 4, 3, 2, 1, 4, 3, 2, 1, 4, 3, 2, 1, 4, 3, 2, 1, 4, 3, 2, 1, 4, 3, 2, 1 }, new int[]{4, 2, 2, 2}); assertMinusOperationEquals(rank4, matrix, expected); assertMinusInPlaceOperationEquals(rank4, matrix, expected); } @Test public void canBroadcastMinusDifferentRankedTensorsSmallToBig() { DoubleTensor rank4 = DoubleTensor.ones(new int[]{4, 2, 2, 2}).times(5.); DoubleTensor matrix = DoubleTensor.create(new double[]{1, 2, 3, 4}, new int[]{2, 2}); DoubleTensor expected = Nd4jDoubleTensor.create(new double[]{ -4, -3, -2, -1, -4, -3, -2, -1, -4, -3, -2, -1, -4, -3, -2, -1, -4, -3, -2, -1, -4, -3, -2, -1, -4, -3, -2, -1, -4, -3, -2, -1 }, new int[]{4, 2, 2, 2}); assertMinusOperationEquals(matrix, rank4, expected); assertMinusInPlaceOperationEquals(matrix, rank4, expected); } >>>>>>>
<<<<<<< @Override public String get(Locale language) { // No language support return get(); } ======= @Override public String get(String identification) { return get(); } >>>>>>> @Override public String get(Locale language) { // No language support return get(); } @Override public String get(String identification) { return get(); }
<<<<<<< import java.util.HashMap; import java.util.Map; ======= >>>>>>> import java.util.HashMap; import java.util.Map;
<<<<<<< protected DualNumber calculateDualNumber(Map<Vertex<?>, DualNumber> dualNumbers) { ======= public DualNumber calculateDualNumber(Map<Vertex, DualNumber> dualNumbers) { >>>>>>> public DualNumber calculateDualNumber(Map<Vertex<?>, DualNumber> dualNumbers) {
<<<<<<< Map<String, DoubleTensor> diffs = LogProbGradient.getJointLogProbGradientWrtLatents(probabilisticVertices); ======= Map<Long, Double> diffs = LogProbGradient.getJointLogProbGradientWrtLatents(probabilisticVertices); >>>>>>> Map<Long, DoubleTensor> diffs = LogProbGradient.getJointLogProbGradientWrtLatents(probabilisticVertices);
<<<<<<< import io.improbable.keanu.algorithms.variational.optimizer.VariableReference; import io.improbable.keanu.util.ProgressBar; ======= import io.improbable.keanu.util.status.PercentageComponent; import io.improbable.keanu.util.status.RemainingTimeComponent; import io.improbable.keanu.util.status.StatusBar; import io.improbable.keanu.vertices.VertexId; >>>>>>> import io.improbable.keanu.algorithms.variational.optimizer.VariableReference; import io.improbable.keanu.util.status.PercentageComponent; import io.improbable.keanu.util.status.RemainingTimeComponent; import io.improbable.keanu.util.status.StatusBar; <<<<<<< progressBar.finish(); return new NetworkSamples(samplesByVariable, logOfMasterPForEachSample, sampleCount); ======= statusBar.finish(); return new NetworkSamples(samplesByVertex, logOfMasterPForEachSample, sampleCount); >>>>>>> statusBar.finish(); return new NetworkSamples(samplesByVariable, logOfMasterPForEachSample, sampleCount);
<<<<<<< Map<String, Double> gradient = DoubleTensor.toScalars(LogProbGradient.getJointLogProbGradientWrtLatents( ======= Map<Long, Double> gradient = LogProbGradient.getJointLogProbGradientWrtLatents( >>>>>>> Map<Long, Double> gradient = DoubleTensor.toScalars(LogProbGradient.getJointLogProbGradientWrtLatents( <<<<<<< )); Map<String, Double> gradientBeforeLeapfrog = new HashMap<>(); ======= ); Map<Long, Double> gradientBeforeLeapfrog = new HashMap<>(); >>>>>>> )); Map<Long, Double> gradientBeforeLeapfrog = new HashMap<>(); <<<<<<< Map<String, Double> newGradient = DoubleTensor.toScalars(LogProbGradient.getJointLogProbGradientWrtLatents( ======= Map<Long, Double> newGradient = LogProbGradient.getJointLogProbGradientWrtLatents( >>>>>>> Map<Long, Double> newGradient = DoubleTensor.toScalars(LogProbGradient.getJointLogProbGradientWrtLatents(
<<<<<<< import io.improbable.keanu.vertices.VertexBinaryOp; ======= import io.improbable.keanu.vertices.dbl.Differentiable; >>>>>>> import io.improbable.keanu.vertices.VertexBinaryOp; import io.improbable.keanu.vertices.dbl.Differentiable; <<<<<<< public abstract class DoubleBinaryOpVertex extends DoubleVertex implements NonProbabilistic<DoubleTensor>, VertexBinaryOp<DoubleVertex, DoubleVertex> { ======= public abstract class DoubleBinaryOpVertex extends DoubleVertex implements Differentiable, NonProbabilistic<DoubleTensor> { >>>>>>> public abstract class DoubleBinaryOpVertex extends DoubleVertex implements Differentiable, NonProbabilistic<DoubleTensor>, VertexBinaryOp<DoubleVertex, DoubleVertex> {
<<<<<<< import java.util.ArrayList; import java.util.Arrays; import java.util.List; ======= import java.util.concurrent.atomic.AtomicReference; >>>>>>> import java.util.ArrayList; import java.util.Arrays; import java.util.List; import java.util.concurrent.atomic.AtomicReference; <<<<<<< public DoubleTensor nextLaplace(int[] shape, DoubleTensor mu, DoubleTensor beta) { List<Double> laplaceValues = new ArrayList<>(); List<List<Integer>> possibleIndexes = exploreIndexes(shape); for (List<Integer> index : possibleIndexes) { int[] currentDimension = index.stream().mapToInt(i -> i).toArray(); double sample = laplaceSample( mu.getValue(currentDimension), beta.getValue(currentDimension), nd4jRandom ); laplaceValues.add(sample); } return createTensorFromList(laplaceValues, shape); } private List<List<Integer>> exploreIndexes(int[] shape) { List<List<Integer>> possibleIndexes = new ArrayList<>(); int[] results = new int[shape.length]; iterateThroughShape(0, shape.length, shape, results, possibleIndexes); return possibleIndexes; } private void iterateThroughShape(int count, int length, int[] size, int[] result, List<List<Integer>> dimensions) { if (count >= length) { Integer[] res = ArrayUtils.toObject(result); dimensions.add(Arrays.asList(res)); return; } for (int i = 0; i < size[count]; i++) { result[count] = i; iterateThroughShape(count + 1, length, size, result, dimensions); } } private DoubleTensor createTensorFromList(List<Double> list, int[] shape) { double[] values = list.stream().mapToDouble(d -> d).toArray(); return Nd4jDoubleTensor.create(values, shape); } private static double laplaceSample(double mu, double beta, Random random) { if (beta <= 0.0) { throw new IllegalArgumentException("Invalid value for beta: " + beta); } if (random.nextDouble() > 0.5) { return mu + beta * Math.log(random.nextDouble()); } else { return mu - beta * Math.log(random.nextDouble()); } } ======= public double nextGaussian() { return nd4jRandom.nextGaussian(); } public boolean nextBoolean() { return nd4jRandom.nextBoolean(); } public int nextInt(int maxExclusive) { return nd4jRandom.nextInt(maxExclusive); } >>>>>>> public DoubleTensor nextLaplace(int[] shape, DoubleTensor mu, DoubleTensor beta) { List<Double> laplaceValues = new ArrayList<>(); List<List<Integer>> possibleIndexes = exploreIndexes(shape); for (List<Integer> index : possibleIndexes) { int[] currentDimension = index.stream().mapToInt(i -> i).toArray(); double sample = laplaceSample( mu.getValue(currentDimension), beta.getValue(currentDimension), nd4jRandom ); laplaceValues.add(sample); } return createTensorFromList(laplaceValues, shape); } private List<List<Integer>> exploreIndexes(int[] shape) { List<List<Integer>> possibleIndexes = new ArrayList<>(); int[] results = new int[shape.length]; iterateThroughShape(0, shape.length, shape, results, possibleIndexes); return possibleIndexes; } private void iterateThroughShape(int count, int length, int[] size, int[] result, List<List<Integer>> dimensions) { if (count >= length) { Integer[] res = ArrayUtils.toObject(result); dimensions.add(Arrays.asList(res)); return; } for (int i = 0; i < size[count]; i++) { result[count] = i; iterateThroughShape(count + 1, length, size, result, dimensions); } } private DoubleTensor createTensorFromList(List<Double> list, int[] shape) { double[] values = list.stream().mapToDouble(d -> d).toArray(); return Nd4jDoubleTensor.create(values, shape); } private static double laplaceSample(double mu, double beta, Random random) { if (beta <= 0.0) { throw new IllegalArgumentException("Invalid value for beta: " + beta); } if (random.nextDouble() > 0.5) { return mu + beta * Math.log(random.nextDouble()); } else { return mu - beta * Math.log(random.nextDouble()); } } public double nextGaussian() { return nd4jRandom.nextGaussian(); } public boolean nextBoolean() { return nd4jRandom.nextBoolean(); } public int nextInt(int maxExclusive) { return nd4jRandom.nextInt(maxExclusive); }
<<<<<<< @Override public BooleanTensor elementwiseEquals(Double value) { return BooleanTensor.create(this.scalar().equals(value)); } private static class SimpleDoubleFlattenedView implements FlattenedView<Double> { private double value; public SimpleDoubleFlattenedView(double value) { this.value = value; } @Override public long size() { return 1; } @Override public Double get(long index) { if (index != 0) { throw new IndexOutOfBoundsException(); } return value; } @Override public Double getOrScalar(long index) { return value; } @Override public void set(long index, Double value) { if (index != 0) { throw new IndexOutOfBoundsException(); } this.value = value; } } ======= >>>>>>> @Override public BooleanTensor elementwiseEquals(Double value) { return BooleanTensor.create(this.scalar().equals(value)); }
<<<<<<< import static java.util.stream.Collectors.counting; import static java.util.stream.Collectors.groupingBy; import static org.hamcrest.MatcherAssert.assertThat; import static org.hamcrest.number.IsCloseTo.closeTo; import static org.junit.Assert.assertEquals; import static org.junit.Assert.assertNull; import static org.junit.Assert.assertTrue; import java.util.ArrayList; import java.util.Arrays; import java.util.HashMap; import java.util.HashSet; import java.util.List; import java.util.Map; import java.util.function.Supplier; import org.apache.commons.math3.stat.descriptive.SummaryStatistics; import com.google.common.collect.ImmutableList; ======= import static java.util.stream.Collectors.counting; import static java.util.stream.Collectors.groupingBy; import static org.hamcrest.MatcherAssert.assertThat; import static org.hamcrest.number.IsCloseTo.closeTo; import static org.junit.Assert.assertEquals; import static org.junit.Assert.assertNull; import static org.junit.Assert.assertTrue; import java.util.ArrayList; import java.util.Arrays; import java.util.HashMap; import java.util.HashSet; import java.util.List; import java.util.Map; import java.util.function.Supplier; import org.apache.commons.math3.stat.descriptive.SummaryStatistics; >>>>>>> import static java.util.stream.Collectors.counting; import static java.util.stream.Collectors.groupingBy; import static org.hamcrest.MatcherAssert.assertThat; import static org.hamcrest.number.IsCloseTo.closeTo; import static org.junit.Assert.assertEquals; import static org.junit.Assert.assertNull; import static org.junit.Assert.assertTrue; import java.util.ArrayList; import java.util.Arrays; import java.util.HashMap; import java.util.HashSet; import java.util.List; import java.util.Map; import java.util.function.Supplier; import org.apache.commons.math3.stat.descriptive.SummaryStatistics; import com.google.common.collect.ImmutableList; import static java.util.stream.Collectors.counting; import static java.util.stream.Collectors.groupingBy; import static org.hamcrest.MatcherAssert.assertThat; import static org.hamcrest.number.IsCloseTo.closeTo; import static org.junit.Assert.assertEquals; import static org.junit.Assert.assertNull; import static org.junit.Assert.assertTrue; import java.util.ArrayList; import java.util.Arrays; import java.util.HashMap; import java.util.HashSet; import java.util.List; import java.util.Map; import java.util.function.Supplier; import org.apache.commons.math3.stat.descriptive.SummaryStatistics;
<<<<<<< import static io.improbable.keanu.vertices.dbltensor.TensorShapeValidation.checkHasSingleNonScalarShapeOrAllScalar; import static io.improbable.keanu.vertices.dbltensor.TensorShapeValidation.checkTensorsMatchNonScalarShapeOrAreScalar; ======= import static io.improbable.keanu.vertices.dbltensor.probabilistic.ProbabilisticVertexShaping.checkParentShapes; >>>>>>> import static io.improbable.keanu.vertices.dbltensor.TensorShapeValidation.checkHasSingleNonScalarShapeOrAllScalar; import static io.improbable.keanu.vertices.dbltensor.TensorShapeValidation.checkTensorsMatchNonScalarShapeOrAreScalar; <<<<<<< public TensorUniformVertex(DoubleTensorVertex xMin, DoubleTensorVertex xMax, KeanuRandom random) { this(xMin.getValue().getShape(), xMin, xMax, random); } public TensorUniformVertex(DoubleTensorVertex xMin, double xMax, KeanuRandom random) { this(xMin.getValue().getShape(), xMin, new ConstantTensorVertex(xMax), random); } public TensorUniformVertex(double xMin, DoubleTensorVertex xMax, KeanuRandom random) { this(xMax.getValue().getShape(), new ConstantTensorVertex(xMin), xMax, random); } public TensorUniformVertex(double xMin, double xMax, KeanuRandom random) { this(Tensor.SCALAR_SHAPE, new ConstantTensorVertex(xMin), new ConstantTensorVertex(xMax), random); } ======= >>>>>>> <<<<<<< this(checkHasSingleNonScalarShapeOrAllScalar(xMin.getValue(), xMax.getValue()), xMin, xMax, new KeanuRandom()); ======= this(xMin.getValue().getShape(), xMin, xMax); >>>>>>> this(checkHasSingleNonScalarShapeOrAllScalar(xMin.getValue(), xMax.getValue()), xMin, xMax); <<<<<<< this(Tensor.SCALAR_SHAPE, new ConstantTensorVertex(xMin), new ConstantTensorVertex(xMax), new KeanuRandom()); ======= this(new int[]{1, 1}, new ConstantTensorVertex(xMin), new ConstantTensorVertex(xMax)); >>>>>>> this(Tensor.SCALAR_SHAPE, new ConstantTensorVertex(xMin), new ConstantTensorVertex(xMax)); <<<<<<< public DoubleTensor sample() { return TensorUniform.sample(getShape(), xMin.getValue(), xMax.getValue(), random); ======= public DoubleTensor sample(KeanuRandom random) { return TensorUniform.sample(xMin.getValue(), xMax.getValue(), random); >>>>>>> public DoubleTensor sample(KeanuRandom random) { return TensorUniform.sample(getShape(), xMin.getValue(), xMax.getValue(), random);
<<<<<<< public GammaVertex(@LoadVertexParam(THETA_NAME) DoubleVertex theta, @LoadVertexParam(K_NAME) DoubleVertex k) { this(checkHasSingleNonScalarShapeOrAllScalar(theta.getShape(), k.getShape()), theta, k); ======= public GammaVertex(@LoadParentVertex(THETA_NAME) DoubleVertex theta, @LoadParentVertex(K_NAME) DoubleVertex k) { this(checkHasOneNonLengthOneShapeOrAllLengthOne(theta.getShape(), k.getShape()), theta, k); >>>>>>> public GammaVertex(@LoadVertexParam(THETA_NAME) DoubleVertex theta, @LoadVertexParam(K_NAME) DoubleVertex k) { this(checkHasOneNonLengthOneShapeOrAllLengthOne(theta.getShape(), k.getShape()), theta, k);
<<<<<<< import io.improbable.keanu.vertices.Vertex; import io.improbable.keanu.vertices.dbl.Differentiator; ======= >>>>>>> import io.improbable.keanu.vertices.Vertex; import io.improbable.keanu.vertices.dbl.Differentiator; <<<<<<< import java.util.HashMap; import java.util.Map; import static io.improbable.keanu.tensor.TensorShapeValidation.checkHasSingleNonScalarShapeOrAllScalar; ======= >>>>>>> import java.util.HashMap; import java.util.Map; <<<<<<< public DualNumber calculateDualNumber(Map<Vertex, DualNumber> dualNumbers) { DualNumber leftDual = dualNumbers.get(left); DualNumber rightDual = dualNumbers.get(right); return leftDual.subtract(rightDual); } @Override public Map<Vertex, PartialDerivatives> reverseModeAutoDifferentiation(PartialDerivatives derivativeOfOutputsWithRespectToSelf) { Map<Vertex, PartialDerivatives> partials = new HashMap<>(); partials.put(left, Differentiator.reshapeReverseAutoDiff(derivativeOfOutputsWithRespectToSelf, left.getValue(), right.getValue())); partials.put(right, Differentiator.reshapeReverseAutoDiff(derivativeOfOutputsWithRespectToSelf.multiplyBy(-1.0), right.getValue(), left.getValue())); return partials; ======= protected DoubleTensor op(DoubleTensor l, DoubleTensor r) { return l.minus(r); >>>>>>> protected DoubleTensor op(DoubleTensor l, DoubleTensor r) { return l.minus(r); } @Override public Map<Vertex, PartialDerivatives> reverseModeAutoDifferentiation(PartialDerivatives derivativeOfOutputsWithRespectToSelf) { Map<Vertex, PartialDerivatives> partials = new HashMap<>(); partials.put(left, Differentiator.reshapeReverseAutoDiff(derivativeOfOutputsWithRespectToSelf, left.getValue(), right.getValue())); partials.put(right, Differentiator.reshapeReverseAutoDiff(derivativeOfOutputsWithRespectToSelf.multiplyBy(-1.0), right.getValue(), left.getValue())); return partials;
<<<<<<< import io.improbable.keanu.vertices.dbl.nonprobabilistic.diff.PartialDerivatives; import io.improbable.keanu.vertices.update.NonProbabilisticValueUpdater; ======= >>>>>>> import io.improbable.keanu.vertices.dbl.nonprobabilistic.diff.PartialDerivatives; <<<<<<< @Override public Map<Vertex, PartialDerivatives> reverseModeAutoDifferentiation(PartialDerivatives derivativeOfOutputsWithRespectToSelf) { if (reverseModeAutoDiffLambda != null) { return reverseModeAutoDiffLambda.apply(derivativeOfOutputsWithRespectToSelf); } throw new UnsupportedOperationException(); } } ======= } >>>>>>> @Override public Map<Vertex, PartialDerivatives> reverseModeAutoDifferentiation(PartialDerivatives derivativeOfOutputsWithRespectToSelf) { if (reverseModeAutoDiffLambda != null) { return reverseModeAutoDiffLambda.apply(derivativeOfOutputsWithRespectToSelf); } throw new UnsupportedOperationException(); } }
<<<<<<< import io.improbable.keanu.vertices.dbl.nonprobabilistic.operators.unary.AbsVertex; import io.improbable.keanu.vertices.dbl.nonprobabilistic.operators.unary.ArcCosVertex; import io.improbable.keanu.vertices.dbl.nonprobabilistic.operators.unary.ArcSinVertex; import io.improbable.keanu.vertices.dbl.nonprobabilistic.operators.unary.ArcTanVertex; import io.improbable.keanu.vertices.dbl.nonprobabilistic.operators.unary.CeilVertex; import io.improbable.keanu.vertices.dbl.nonprobabilistic.operators.unary.CosVertex; import io.improbable.keanu.vertices.dbl.nonprobabilistic.operators.unary.DoubleUnaryOpLambda; import io.improbable.keanu.vertices.dbl.nonprobabilistic.operators.unary.ExpVertex; import io.improbable.keanu.vertices.dbl.nonprobabilistic.operators.unary.FloorVertex; import io.improbable.keanu.vertices.dbl.nonprobabilistic.operators.unary.LogVertex; import io.improbable.keanu.vertices.dbl.nonprobabilistic.operators.unary.PluckVertex; import io.improbable.keanu.vertices.dbl.nonprobabilistic.operators.unary.RoundVertex; import io.improbable.keanu.vertices.dbl.nonprobabilistic.operators.unary.SigmoidVertex; import io.improbable.keanu.vertices.dbl.nonprobabilistic.operators.unary.SinVertex; import io.improbable.keanu.vertices.dbl.nonprobabilistic.operators.unary.SliceVertex; import io.improbable.keanu.vertices.dbl.nonprobabilistic.operators.unary.SumVertex; import io.improbable.keanu.vertices.dbl.nonprobabilistic.operators.unary.TanVertex; import io.improbable.keanu.vertices.update.ValueUpdater; ======= import io.improbable.keanu.vertices.dbl.nonprobabilistic.operators.unary.*; >>>>>>> import io.improbable.keanu.vertices.dbl.nonprobabilistic.operators.unary.*; import io.improbable.keanu.vertices.update.ValueUpdater;
<<<<<<< import io.improbable.keanu.algorithms.variational.optimizer.*; import io.improbable.keanu.tensor.dbl.DoubleTensor; import io.improbable.keanu.util.ProgressBar; ======= import io.improbable.keanu.algorithms.variational.optimizer.Optimizer; import io.improbable.keanu.algorithms.variational.optimizer.ProbabilisticGraph; import io.improbable.keanu.algorithms.variational.optimizer.Variable; import io.improbable.keanu.util.status.StatusBar; >>>>>>> import io.improbable.keanu.algorithms.variational.optimizer.*; import io.improbable.keanu.algorithms.variational.optimizer.Optimizer; import io.improbable.keanu.algorithms.variational.optimizer.ProbabilisticGraph; import io.improbable.keanu.algorithms.variational.optimizer.Variable; import io.improbable.keanu.tensor.dbl.DoubleTensor; import io.improbable.keanu.util.status.StatusBar; <<<<<<< progressBar.finish(); ======= statusBar.finish(); return pointValuePair.getValue(); } >>>>>>> statusBar.finish();
<<<<<<< pythonConstructors.add( new PythonConstructor( javaClass, toPythonClass(javaClass), toPythonParams(pythonParameters, parameterTypes), String.join(", ", pythonParameters), docString.getAsString() ) ); ======= PythonConstructor pythonConstructor = new PythonConstructor(javaClass, toPythonClass(javaClass), String.join(", ", pythonParameters), docString.getAsString()); pythonConstructors.add(pythonConstructor); exportedMethodsJoiner.add(pythonConstructor.pythonClass); >>>>>>> PythonConstructor pythonConstructor = new PythonConstructor( javaClass, toPythonClass(javaClass), toPythonParams(pythonParameters, parameterTypes), String.join(", ", pythonParameters), docString.getAsString() ); pythonConstructors.add(pythonConstructor); exportedMethodsJoiner.add(pythonConstructor.pythonClass);
<<<<<<< import static org.junit.Assert.assertArrayEquals; import static org.junit.Assert.assertEquals; import static junit.framework.TestCase.assertTrue; import org.junit.Before; import org.junit.Test; import org.junit.Before; import org.junit.Test; ======= import static org.junit.Assert.assertArrayEquals; import static org.junit.Assert.assertEquals; import static org.junit.Assert.assertTrue; >>>>>>> import static org.junit.Assert.assertArrayEquals; import static org.junit.Assert.assertEquals; import static junit.framework.TestCase.assertTrue; import org.junit.Before; import org.junit.Test; <<<<<<< @Test public void canTensorMultiplyWithVectorAndRank4() { DoubleTensor a = Nd4jDoubleTensor.create(new double[]{1, 2, 3}, new int[]{1, 1, 3, 1}); DoubleTensor b = Nd4jDoubleTensor.create(new double[]{ 5, 2, 3, 7, 8, 5, 2, 3, 7, 8, 5, 2, 3, 7, 8 }, new int[]{1, 3, 1, 5}); DoubleTensor c = a.tensorMultiply(b, new int[]{2, 3}, new int[]{1, 0}); DoubleTensor expected = Nd4jDoubleTensor.create(new double[]{ 30, 12, 18, 42, 48 }, new int[]{1, 1, 1, 5}); assertEquals(expected, c); } @Test public void canTensorMultiplyWithNumpyExample() { DoubleTensor a = DoubleTensor.arange(0, 60).reshape(3, 4, 5); DoubleTensor b = DoubleTensor.arange(0, 24.).reshape(4, 3, 2); DoubleTensor c = a.tensorMultiply(b, new int[]{1, 0}, new int[]{0, 1}); DoubleTensor expected = Nd4jDoubleTensor.create(new double[]{ 4400., 4730., 4532., 4874., 4664., 5018., 4796., 5162., 4928., 5306. }, new int[]{5, 2}); assertEquals(expected, c); } // @Test // public void canTensorMultiplyWithMatrixAndRank4() { // DoubleTensor a = Nd4jDoubleTensor.create(new double[]{ // 5, 5, 2, 7, // 3, 2, 8, 4, // 9, 7, 6, 9, // 2, 3, 5, 6 // }, new int[]{2, 2, 2, 2}); // // DoubleTensor b = Nd4jDoubleTensor.create(new double[]{3, 2, 6, 4, 7, 5}, new int[]{2, 3}); // // DoubleTensor c = a.tensorMultiply(b, new int[]{3}, new int[]{0}); // // DoubleTensor expected = Nd4jDoubleTensor.create(new double[]{ // 35, 45, 55, 34, 53, 47, // 0, 0, 0, 0, 0, 0, // 0, 0, 0, 0, 0, 0, // 0, 0, 0, 0, 0, 0 // }, new int[]{2, 2, 2, 3}); // // assertEquals(expected, c); // } ======= @Test public void canPermuteForTranspose() { DoubleTensor a = DoubleTensor.create(new double[]{1, 2, 3, 4}, new int[]{2, 2}); DoubleTensor permuted = a.permute(1, 0); DoubleTensor transposed = a.transpose(); assertEquals(transposed, permuted); } @Test public void canPermuteUpperDimensions() { DoubleTensor a = DoubleTensor.create(new double[]{ 1, 2, 3, 4, 5, 6, 7, 8 }, new int[]{1, 2, 2, 2}); DoubleTensor permuted = a.permute(0, 1, 3, 2); DoubleTensor expected = DoubleTensor.create(new double[]{ 1, 3, 2, 4, 5, 7, 6, 8 }, new int[]{1, 2, 2, 2}); assertEquals(expected, permuted); } >>>>>>> @Test public void canTensorMultiplyWithVectorAndRank4() { DoubleTensor a = Nd4jDoubleTensor.create(new double[]{1, 2, 3}, new int[]{1, 1, 3, 1}); DoubleTensor b = Nd4jDoubleTensor.create(new double[]{ 5, 2, 3, 7, 8, 5, 2, 3, 7, 8, 5, 2, 3, 7, 8 }, new int[]{1, 3, 1, 5}); DoubleTensor c = a.tensorMultiply(b, new int[]{2, 3}, new int[]{1, 0}); DoubleTensor expected = Nd4jDoubleTensor.create(new double[]{ 30, 12, 18, 42, 48 }, new int[]{1, 1, 1, 5}); assertEquals(expected, c); } @Test public void canTensorMultiplyWithNumpyExample() { DoubleTensor a = DoubleTensor.arange(0, 60).reshape(3, 4, 5); DoubleTensor b = DoubleTensor.arange(0, 24.).reshape(4, 3, 2); DoubleTensor c = a.tensorMultiply(b, new int[]{1, 0}, new int[]{0, 1}); DoubleTensor expected = Nd4jDoubleTensor.create(new double[]{ 4400., 4730., 4532., 4874., 4664., 5018., 4796., 5162., 4928., 5306. }, new int[]{5, 2}); assertEquals(expected, c); } @Test public void canPermuteForTranspose() { DoubleTensor a = DoubleTensor.create(new double[]{1, 2, 3, 4}, new int[]{2, 2}); DoubleTensor permuted = a.permute(1, 0); DoubleTensor transposed = a.transpose(); assertEquals(transposed, permuted); } @Test public void canPermuteUpperDimensions() { DoubleTensor a = DoubleTensor.create(new double[]{ 1, 2, 3, 4, 5, 6, 7, 8 }, new int[]{1, 2, 2, 2}); DoubleTensor permuted = a.permute(0, 1, 3, 2); DoubleTensor expected = DoubleTensor.create(new double[]{ 1, 3, 2, 4, 5, 7, 6, 8 }, new int[]{1, 2, 2, 2}); assertEquals(expected, permuted); }
<<<<<<< pythonConstructors.add( new PythonConstructor( javaClass, toPythonClass(javaClass), toPythonParams(pythonParameters, parameterTypes), String.join(", ", pythonParameters))); ======= pythonConstructors.add(new PythonConstructor(javaClass, toPythonClass(javaClass), String.join(", ", pythonParameters), docString.getAsString())); >>>>>>> pythonConstructors.add( new PythonConstructor( javaClass, toPythonClass(javaClass), toPythonParams(pythonParameters, parameterTypes), String.join(", ", pythonParameters), docString.getAsString() ) ); <<<<<<< PythonConstructor(String javaClass, String pythonClass, String pythonTypedParameters, String pythonParameters) { ======= PythonConstructor(String javaClass, String pythonClass, String pythonParameters, String docString) { >>>>>>> PythonConstructor(String javaClass, String pythonClass, String pythonTypedParameters, String pythonParameters, String docString) {
<<<<<<< import io.improbable.keanu.vertices.Vertex; import io.improbable.keanu.vertices.dbl.Differentiator; ======= >>>>>>> import io.improbable.keanu.vertices.Vertex; import io.improbable.keanu.vertices.dbl.Differentiator; <<<<<<< import java.util.HashMap; import java.util.Map; import static io.improbable.keanu.tensor.TensorShapeValidation.checkHasSingleNonScalarShapeOrAllScalar; ======= >>>>>>> import java.util.HashMap; import java.util.Map; import static io.improbable.keanu.tensor.TensorShapeValidation.checkHasSingleNonScalarShapeOrAllScalar;
<<<<<<< protected DualNumber calculateDualNumber(Map<Vertex<?>, DualNumber> dualNumbers) { ======= public DualNumber calculateDualNumber(Map<Vertex, DualNumber> dualNumbers) { >>>>>>> public DualNumber calculateDualNumber(Map<Vertex<?>, DualNumber> dualNumbers) {
<<<<<<< public KeanuSavedBayesNet.VertexID toProtoBuf() { return KeanuSavedBayesNet.VertexID.newBuilder() .addAllIdValues(Longs.asList(idValues)) .build(); } ======= public long[] getValue() { return Arrays.copyOf(idValues, idValues.length); } >>>>>>> public KeanuSavedBayesNet.VertexID toProtoBuf() { return KeanuSavedBayesNet.VertexID.newBuilder() .addAllIdValues(Longs.asList(idValues)) .build(); } public long[] getValue() { return Arrays.copyOf(idValues, idValues.length); }
<<<<<<< static DoubleTensor placeHolder(int[] shape) { return new Nd4jDoubleTensor(shape); } static Map<String, DoubleTensor> fromScalars(Map<String, Double> scalars) { Map<String, DoubleTensor> asTensors = new HashMap<>(); ======= static Map<Long, DoubleTensor> fromScalars(Map<Long, Double> scalars) { Map<Long, DoubleTensor> asTensors = new HashMap<>(); >>>>>>> static DoubleTensor placeHolder(int[] shape) { return new Nd4jDoubleTensor(shape); } static Map<Long, DoubleTensor> fromScalars(Map<Long, Double> scalars) { Map<Long, DoubleTensor> asTensors = new HashMap<>();
<<<<<<< public class LogisticVertex extends DoubleVertex implements ProbabilisticDouble, SamplableWithManyScalars<DoubleTensor> { ======= import java.util.HashMap; import java.util.Map; import java.util.Set; import static io.improbable.keanu.distributions.hyperparam.Diffs.MU; import static io.improbable.keanu.distributions.hyperparam.Diffs.S; import static io.improbable.keanu.distributions.hyperparam.Diffs.X; import static io.improbable.keanu.tensor.TensorShapeValidation.checkHasSingleNonScalarShapeOrAllScalar; import static io.improbable.keanu.tensor.TensorShapeValidation.checkTensorsMatchNonScalarShapeOrAreScalar; public class LogisticVertex extends DoubleVertex implements ProbabilisticDouble { >>>>>>> import java.util.HashMap; import java.util.Map; import java.util.Set; import static io.improbable.keanu.distributions.hyperparam.Diffs.MU; import static io.improbable.keanu.distributions.hyperparam.Diffs.S; import static io.improbable.keanu.distributions.hyperparam.Diffs.X; import static io.improbable.keanu.tensor.TensorShapeValidation.checkHasSingleNonScalarShapeOrAllScalar; import static io.improbable.keanu.tensor.TensorShapeValidation.checkTensorsMatchNonScalarShapeOrAreScalar; public class LogisticVertex extends DoubleVertex implements ProbabilisticDouble, SamplableWithManyScalars<DoubleTensor> {
<<<<<<< private Laplace() { } /** * Computer Generation of Statistical Distributions * by Richard Saucier * ARL-TR-2168 March 2000 * 5.1.8 page 25 */ ======= >>>>>>> private Laplace() { } /** * Computer Generation of Statistical Distributions * by Richard Saucier * ARL-TR-2168 March 2000 * 5.1.8 page 25 */
<<<<<<< @Override @SaveParentVertex(LEFT_NAME) ======= @SaveVertexParam(LEFT_NAME) >>>>>>> @Override @SaveVertexParam(LEFT_NAME) <<<<<<< @Override @SaveParentVertex(RIGHT_NAME) ======= @SaveVertexParam(RIGHT_NAME) >>>>>>> @Override @SaveVertexParam(RIGHT_NAME)
<<<<<<< public class InverseGammaVertex extends DoubleVertex implements ProbabilisticDouble, SamplableWithManyScalars<DoubleTensor> { ======= import java.util.HashMap; import java.util.Map; import java.util.Set; import static io.improbable.keanu.distributions.hyperparam.Diffs.A; import static io.improbable.keanu.distributions.hyperparam.Diffs.B; import static io.improbable.keanu.distributions.hyperparam.Diffs.X; import static io.improbable.keanu.tensor.TensorShapeValidation.checkHasSingleNonScalarShapeOrAllScalar; import static io.improbable.keanu.tensor.TensorShapeValidation.checkTensorsMatchNonScalarShapeOrAreScalar; public class InverseGammaVertex extends DoubleVertex implements ProbabilisticDouble { >>>>>>> import java.util.HashMap; import java.util.Map; import java.util.Set; import static io.improbable.keanu.distributions.hyperparam.Diffs.A; import static io.improbable.keanu.distributions.hyperparam.Diffs.B; import static io.improbable.keanu.distributions.hyperparam.Diffs.X; import static io.improbable.keanu.tensor.TensorShapeValidation.checkHasSingleNonScalarShapeOrAllScalar; import static io.improbable.keanu.tensor.TensorShapeValidation.checkTensorsMatchNonScalarShapeOrAreScalar; public class InverseGammaVertex extends DoubleVertex implements ProbabilisticDouble, SamplableWithManyScalars<DoubleTensor> {
<<<<<<< public void fit(INPUT input, OUTPUT output) { modelGraph.observeValues(input, output); KeanuOptimizer.Gradient.of(modelGraph.getBayesianNetwork()).maxAPosteriori(); ======= public void fit(ModelGraph modelGraph) { GradientOptimizer.of(modelGraph.getBayesianNetwork()).maxAPosteriori(); >>>>>>> public void fit(ModelGraph modelGraph) { KeanuOptimizer.Gradient.of(modelGraph.getBayesianNetwork()).maxAPosteriori();
<<<<<<< import io.improbable.keanu.vertices.Vertex; import io.improbable.keanu.vertices.dbl.Differentiator; ======= >>>>>>> import io.improbable.keanu.vertices.Vertex; import io.improbable.keanu.vertices.dbl.Differentiator; <<<<<<< import java.util.HashMap; import java.util.Map; import static io.improbable.keanu.tensor.TensorShapeValidation.checkHasSingleNonScalarShapeOrAllScalar; ======= >>>>>>> import java.util.HashMap; import java.util.Map;
<<<<<<< public DoubleVertex getWeightsVertex(long featureCount, DoubleVertex priorOnWeightsMeans, DoubleVertex priorOnWeightsScaleParameter) { return new GaussianVertex(new long[]{1, featureCount}, priorOnWeightsMeans, priorOnWeightsScaleParameter).setLabel("weights"); ======= public DoubleVertex getWeightsVertex(long featureCount, double[] priorOnWeightsMeans, double[] priorOnInterceptScaleParameter) { return new GaussianVertex(new long[]{featureCount, 1}, ConstantVertex.of(priorOnWeightsMeans, featureCount, 1), ConstantVertex.of(priorOnInterceptScaleParameter, featureCount, 1)).setLabel("weights"); >>>>>>> public DoubleVertex getWeightsVertex(long featureCount, DoubleVertex priorOnWeightsMeans, DoubleVertex priorOnWeightsScaleParameter) { return new GaussianVertex(new long[]{featureCount, 1}, priorOnWeightsMeans, priorOnWeightsScaleParameter).setLabel("weights"); <<<<<<< public DoubleVertex getWeightsVertex(long featureCount, DoubleVertex priorOnWeightsMeans, DoubleVertex priorOnWeightsScaleParameter) { return new LaplaceVertex(new long[]{1, featureCount}, priorOnWeightsMeans, priorOnWeightsScaleParameter); ======= public DoubleVertex getWeightsVertex(long featureCount, double[] priorOnWeightsMeans, double[] priorOnInterceptScaleParameter) { return new LaplaceVertex(new long[]{featureCount, 1}, new ConstantDoubleVertex(priorOnWeightsMeans, new long[]{priorOnWeightsMeans.length, 1}), new ConstantDoubleVertex(priorOnInterceptScaleParameter, new long[]{priorOnInterceptScaleParameter.length, 1}) ); >>>>>>> public DoubleVertex getWeightsVertex(long featureCount, DoubleVertex priorOnWeightsMeans, DoubleVertex priorOnWeightsScaleParameter) { return new LaplaceVertex(new long[]{featureCount, 1}, priorOnWeightsMeans, priorOnWeightsScaleParameter); <<<<<<< public DoubleVertex getWeightsVertex(long featureCount, DoubleVertex priorOnWeightsMeans, DoubleVertex priorOnWeightsScaleParameter) { return new GaussianVertex(new long[]{1, featureCount}, priorOnWeightsMeans, priorOnWeightsScaleParameter); ======= public DoubleVertex getWeightsVertex(long featureCount, double[] priorOnWeightsMeans, double[] priorOnInterceptScaleParameter) { return new GaussianVertex(new long[]{featureCount, 1}, new ConstantDoubleVertex(priorOnWeightsMeans, new long[]{priorOnWeightsMeans.length, 1}), new ConstantDoubleVertex(priorOnInterceptScaleParameter, new long[]{priorOnInterceptScaleParameter.length, 1}) ); >>>>>>> public DoubleVertex getWeightsVertex(long featureCount, DoubleVertex priorOnWeightsMeans, DoubleVertex priorOnWeightsScaleParameter) { return new GaussianVertex(new long[]{featureCount, 1}, priorOnWeightsMeans, priorOnWeightsScaleParameter);
<<<<<<< import io.improbable.keanu.vertices.dbl.nonprobabilistic.diff.PartialDerivatives; ======= import io.improbable.keanu.vertices.update.NonProbabilisticValueUpdater; >>>>>>> import io.improbable.keanu.vertices.dbl.nonprobabilistic.diff.PartialDerivatives; import io.improbable.keanu.vertices.update.NonProbabilisticValueUpdater;
<<<<<<< import io.improbable.keanu.annotation.DisplayInformationForOutput; ======= import io.improbable.keanu.annotation.ExportVertexToPythonBindings; >>>>>>> import io.improbable.keanu.annotation.DisplayInformationForOutput; import io.improbable.keanu.annotation.ExportVertexToPythonBindings;
<<<<<<< import static org.junit.Assert.assertTrue; import java.util.ArrayList; import java.util.Arrays; import java.util.List; import java.util.stream.Collectors; import org.junit.Test; import org.slf4j.Logger; import org.slf4j.LoggerFactory; import io.improbable.keanu.algorithms.variational.GradientOptimizer; ======= import io.improbable.keanu.algorithms.variational.optimizer.gradient.GradientOptimizer; >>>>>>> import static org.junit.Assert.assertTrue; import java.util.ArrayList; import java.util.Arrays; import java.util.List; import java.util.stream.Collectors; import org.junit.Test; import org.slf4j.Logger; import org.slf4j.LoggerFactory; import io.improbable.keanu.algorithms.variational.optimizer.gradient.GradientOptimizer;
<<<<<<< import java.util.ArrayList; import java.util.Arrays; import java.util.List; import java.util.function.Function; ======= import java.util.Arrays; import io.improbable.keanu.kotlin.DoubleOperators; >>>>>>> import java.util.ArrayList; import java.util.Arrays; import java.util.List; import io.improbable.keanu.kotlin.DoubleOperators; <<<<<<< public interface DoubleTensor extends NumberTensor<Double> { ======= public interface DoubleTensor extends NumberTensor<Double, DoubleTensor>, DoubleOperators<DoubleTensor> { DoubleTensor MINUS_ONE_SCALAR = scalar(-1.0); >>>>>>> public interface DoubleTensor extends NumberTensor<Double, DoubleTensor>, DoubleOperators<DoubleTensor> { DoubleTensor MINUS_ONE_SCALAR = scalar(-1.0);
<<<<<<< import io.improbable.keanu.vertices.dbl.nonprobabilistic.diff.PartialDerivatives; ======= import io.improbable.keanu.vertices.update.NonProbabilisticValueUpdater; >>>>>>> import io.improbable.keanu.vertices.dbl.nonprobabilistic.diff.PartialDerivatives; import io.improbable.keanu.vertices.update.NonProbabilisticValueUpdater; <<<<<<< public DoubleUnaryOpLambda(int[] shape, Vertex<IN> inputVertex, Function<IN, DoubleTensor> op, Function<Map<Vertex, DualNumber>, DualNumber> dualNumberCalculation, Function<PartialDerivatives, Map<Vertex, PartialDerivatives>> reverseModeAutoDiffLambda) { ======= public DoubleUnaryOpLambda(int[] shape, Vertex<IN> inputVertex, Function<IN, DoubleTensor> op, Function<Map<Vertex, DualNumber>, DualNumber> dualNumberCalculation) { super(new NonProbabilisticValueUpdater<>(v -> ((DoubleUnaryOpLambda<IN>) v).op.apply(inputVertex.getValue()))); >>>>>>> public DoubleUnaryOpLambda(int[] shape, Vertex<IN> inputVertex, Function<IN, DoubleTensor> op, Function<Map<Vertex, DualNumber>, DualNumber> dualNumberCalculation, Function<PartialDerivatives, Map<Vertex, PartialDerivatives>> reverseModeAutoDiffLambda) { super(new NonProbabilisticValueUpdater<>(v -> ((DoubleUnaryOpLambda<IN>) v).op.apply(inputVertex.getValue())));
<<<<<<< import io.improbable.keanu.tensor.dbl.DoubleTensor; ======= import io.improbable.keanu.testcategory.Slow; >>>>>>> import io.improbable.keanu.tensor.dbl.DoubleTensor; import io.improbable.keanu.testcategory.Slow;
<<<<<<< import io.improbable.keanu.distributions.dual.Diffs; ======= import io.improbable.keanu.tensor.TensorShape; >>>>>>> import io.improbable.keanu.distributions.dual.Diffs; import io.improbable.keanu.tensor.TensorShape; <<<<<<< ======= import java.util.Map; import static io.improbable.keanu.tensor.TensorShape.shapeToDesiredRankByPrependingOnes; import static io.improbable.keanu.tensor.TensorShapeValidation.checkHasSingleNonScalarShapeOrAllScalar; import static io.improbable.keanu.tensor.TensorShapeValidation.checkTensorsMatchNonScalarShapeOrAreScalar; >>>>>>> import java.util.Map; import static io.improbable.keanu.tensor.TensorShape.shapeToDesiredRankByPrependingOnes; import static io.improbable.keanu.tensor.TensorShapeValidation.checkHasSingleNonScalarShapeOrAllScalar; import static io.improbable.keanu.tensor.TensorShapeValidation.checkTensorsMatchNonScalarShapeOrAreScalar; <<<<<<< Diffs dlnP = LogNormal.withParameters(mu.getValue(), sigma.getValue()).dLogProb(value); return convertDualNumbersToDiff(dlnP.get(MU).getValue(), dlnP.get(SIGMA).getValue(), dlnP.get(X).getValue()); ======= LogNormal.DiffLogP dlnP = LogNormal.dlnPdf(mu.getValue(), sigma.getValue(), value); return convertDualNumbersToDiff(dlnP.dLogPdmu, dlnP.dLogPdsigma, dlnP.dLogPdx); >>>>>>> Diffs dlnP = LogNormal.withParameters(mu.getValue(), sigma.getValue()).dLogProb(value); return convertDualNumbersToDiff(dlnP.get(MU).getValue(), dlnP.get(SIGMA).getValue(), dlnP.get(X).getValue());
<<<<<<< import io.improbable.keanu.vertices.dbl.nonprobabilistic.operators.binary.AdditionVertex; import io.improbable.keanu.vertices.dbl.nonprobabilistic.operators.binary.DifferenceVertex; import io.improbable.keanu.vertices.dbl.nonprobabilistic.operators.binary.DivisionVertex; import io.improbable.keanu.vertices.dbl.nonprobabilistic.operators.binary.MultiplicationVertex; import io.improbable.keanu.vertices.dbl.nonprobabilistic.operators.unary.*; import io.improbable.keanu.vertices.dbl.nonprobabilistic.operators.binary.PowerVertex; import io.improbable.keanu.vertices.dbl.probabilistic.ExponentialVertex; ======= import io.improbable.keanu.vertices.dbl.nonprobabilistic.operators.binary.*; import io.improbable.keanu.vertices.dbl.nonprobabilistic.operators.unary.AbsVertex; import io.improbable.keanu.vertices.dbl.nonprobabilistic.operators.unary.DoubleUnaryOpLambda; >>>>>>> import io.improbable.keanu.vertices.dbl.nonprobabilistic.operators.binary.AdditionVertex; import io.improbable.keanu.vertices.dbl.nonprobabilistic.operators.binary.DifferenceVertex; import io.improbable.keanu.vertices.dbl.nonprobabilistic.operators.binary.DivisionVertex; import io.improbable.keanu.vertices.dbl.nonprobabilistic.operators.binary.MultiplicationVertex; import io.improbable.keanu.vertices.dbl.nonprobabilistic.operators.unary.*; import io.improbable.keanu.vertices.dbl.nonprobabilistic.operators.binary.PowerVertex; import io.improbable.keanu.vertices.dbl.nonprobabilistic.operators.unary.AbsVertex; import io.improbable.keanu.vertices.dbl.nonprobabilistic.operators.unary.DoubleUnaryOpLambda;
<<<<<<< protected DualNumber calculateDualNumber(Map<Vertex<?>, DualNumber> dualNumbers) { ======= public DualNumber calculateDualNumber(Map<Vertex, DualNumber> dualNumbers) { >>>>>>> public DualNumber calculateDualNumber(Map<Vertex<?>, DualNumber> dualNumbers) {
<<<<<<< import org.apache.accumulo.core.client.impl.AcceptableThriftTableOperationException; import org.apache.accumulo.core.client.impl.ServerClient; ======= >>>>>>> import org.apache.accumulo.core.client.impl.AcceptableThriftTableOperationException; <<<<<<< import org.apache.accumulo.core.master.thrift.BulkImportState; ======= import org.apache.accumulo.core.rpc.ThriftUtil; >>>>>>> import org.apache.accumulo.core.master.thrift.BulkImportState; import org.apache.accumulo.core.rpc.ThriftUtil;
<<<<<<< import io.improbable.keanu.vertices.dbl.nonprobabilistic.diff.PartialDerivatives; import io.improbable.keanu.vertices.update.NonProbabilisticValueUpdater; ======= >>>>>>> import io.improbable.keanu.vertices.dbl.nonprobabilistic.diff.PartialDerivatives; <<<<<<< return DualNumber.concat(dualNumbers, duals, input, dimension, inputValues); } @Override public Map<Vertex, PartialDerivatives> reverseModeAutoDifferentiation(PartialDerivatives derivativeOfOutputsWithRespectToSelf) { DoubleTensor value = derivativeOfOutputsWithRespectToSelf.asMap().get(this.getId()); int[] partialShape = value.getShape(); int[] rearrange = TensorShape.dimensionRange(0, partialShape.length); rearrange[dimension] = 0; rearrange[0] = dimension; DoubleTensor permuted = value.permute(rearrange); double[] permutedBuffer = permuted.asFlatDoubleArray(); Map<Vertex, PartialDerivatives> concattedPartial = new HashMap<>(); int bufferOffset = 0; for (DoubleVertex vertex : input) { int[] ofWrtShape = TensorShape.concat(Arrays.copyOfRange(value.getShape(), 0, vertex.getValue().getRank()), vertex.getShape()); int inputSize = (int) (value.getLength() / (value.getShape()[value.getShape().length / 2 + dimension])) * vertex.getShape()[dimension]; double[] inputsDualNumbers = Arrays.copyOfRange(permutedBuffer, bufferOffset, bufferOffset + inputSize); DoubleTensor unpermuted = DoubleTensor.create(inputsDualNumbers, ofWrtShape).permute(rearrange); PartialDerivatives partial = new PartialDerivatives(getId(), unpermuted); concattedPartial.put(vertex, partial); bufferOffset += inputSize; } return concattedPartial; ======= return DualNumber.concat(dualNumbers, dualNumbersOfInputs, input, dimension, inputValues); >>>>>>> return DualNumber.concat(dualNumbers, dualNumbersOfInputs, input, dimension, inputValues); } @Override public Map<Vertex, PartialDerivatives> reverseModeAutoDifferentiation(PartialDerivatives derivativeOfOutputsWithRespectToSelf) { DoubleTensor value = derivativeOfOutputsWithRespectToSelf.asMap().get(this.getId()); int[] partialShape = value.getShape(); int[] rearrange = TensorShape.dimensionRange(0, partialShape.length); rearrange[dimension] = 0; rearrange[0] = dimension; DoubleTensor permuted = value.permute(rearrange); double[] permutedBuffer = permuted.asFlatDoubleArray(); Map<Vertex, PartialDerivatives> concattedPartial = new HashMap<>(); int bufferOffset = 0; for (DoubleVertex vertex : input) { int[] ofWrtShape = TensorShape.concat(Arrays.copyOfRange(value.getShape(), 0, vertex.getValue().getRank()), vertex.getShape()); int inputSize = (int) (value.getLength() / (value.getShape()[value.getShape().length / 2 + dimension])) * vertex.getShape()[dimension]; double[] inputsDualNumbers = Arrays.copyOfRange(permutedBuffer, bufferOffset, bufferOffset + inputSize); DoubleTensor unpermuted = DoubleTensor.create(inputsDualNumbers, ofWrtShape).permute(rearrange); PartialDerivatives partial = new PartialDerivatives(getId(), unpermuted); concattedPartial.put(vertex, partial); bufferOffset += inputSize; } return concattedPartial;
<<<<<<< BayesianNetwork network = new BayesianNetwork(A.getConnectedGraph()); network.probeForNonZeroMasterP(100); ======= BayesNet network = new BayesNet(A.getConnectedGraph()); network.probeForNonZeroMasterP(100, random); >>>>>>> BayesianNetwork network = new BayesianNetwork(A.getConnectedGraph()); network.probeForNonZeroMasterP(100, random); <<<<<<< BayesNetDoubleAsContinuous network = new BayesNetDoubleAsContinuous(A.getConnectedGraph()); network.probeForNonZeroMasterP(100); ======= BayesNet network = new BayesNet(A.getConnectedGraph()); network.probeForNonZeroMasterP(100, random); >>>>>>> BayesNetDoubleAsContinuous network = new BayesNetDoubleAsContinuous(A.getConnectedGraph()); network.probeForNonZeroMasterP(100, random);
<<<<<<< protected DualNumber calculateDualNumber(Map<Vertex<?>, DualNumber> dualNumbers) { ======= public DoubleTensor getDerivedValue() { return op(extractFromInputs(DoubleTensor.class, Vertex::getValue)); } @Override public DualNumber calculateDualNumber(Map<Vertex, DualNumber> dualNumbers) { >>>>>>> public DualNumber calculateDualNumber(Map<Vertex<?>, DualNumber> dualNumbers) {
<<<<<<< public BetaVertex(DoubleVertex alpha, DoubleVertex beta) { this(checkHasOneNonLengthOneShapeOrAllLengthOne(alpha.getShape(), beta.getShape()), alpha, beta); ======= @ExportVertexToPythonBindings public BetaVertex(@LoadParentVertex(ALPHA_NAME) DoubleVertex alpha, @LoadParentVertex(BETA_NAME) DoubleVertex beta) { this(checkHasSingleNonScalarShapeOrAllScalar(alpha.getShape(), beta.getShape()), alpha, beta); >>>>>>> @ExportVertexToPythonBindings public BetaVertex(@LoadParentVertex(ALPHA_NAME) DoubleVertex alpha, @LoadParentVertex(BETA_NAME) DoubleVertex beta) { this(checkHasOneNonLengthOneShapeOrAllLengthOne(alpha.getShape(), beta.getShape()), alpha, beta);
<<<<<<< ======= import org.apache.lucene.index.Fields; >>>>>>>
<<<<<<< //import org.apache.lucene.codecs.bloom.BloomFilteringPostingsFormat; //import org.apache.lucene.codecs.bloom.DefaultBloomFilterFactory; import org.apache.lucene.codecs.lucene41.Lucene41Codec; ======= import org.apache.lucene.codecs.lucene40.Lucene40Codec; >>>>>>> import org.apache.lucene.codecs.lucene41.Lucene41Codec; <<<<<<< /* if (field.equals("id")) { if (idFieldPostingsFormat.equals("BloomLucene41")) { return new BloomFilteringPostingsFormat( PostingsFormat.forName("Lucene41"), new DefaultBloomFilterFactory()); } else if (idFieldPostingsFormat.equals("BloomMemory")) { return new BloomFilteringPostingsFormat( PostingsFormat.forName("Memory"), new DefaultBloomFilterFactory()); } } */ ======= >>>>>>>
<<<<<<< import org.apache.lucene.analysis.standard.StandardAnalyzer; import org.apache.lucene.document.*; import org.apache.lucene.index.*; import org.apache.lucene.index.IndexReader.AtomicReaderContext; import org.apache.lucene.codecs.Codec; import org.apache.lucene.codecs.PostingsFormat; import org.apache.lucene.codecs.lucene40.Lucene40Codec; import org.apache.lucene.search.*; import org.apache.lucene.store.*; import org.apache.lucene.util.*; ======= import org.apache.lucene.codecs.Codec; import org.apache.lucene.codecs.PostingsFormat; import org.apache.lucene.codecs.lucene40.Lucene40Codec; import org.apache.lucene.document.Document; import org.apache.lucene.document.Field; import org.apache.lucene.document.NumericField; import org.apache.lucene.document.StringField; import org.apache.lucene.index.BalancedSegmentMergePolicy; import org.apache.lucene.index.IndexWriter; import org.apache.lucene.index.IndexWriterConfig; import org.apache.lucene.index.IndexableField; import org.apache.lucene.index.LogByteSizeMergePolicy; import org.apache.lucene.index.LogDocMergePolicy; import org.apache.lucene.index.LogMergePolicy; import org.apache.lucene.index.MergePolicy; import org.apache.lucene.index.NoDeletionPolicy; import org.apache.lucene.index.NoMergePolicy; import org.apache.lucene.index.Term; import org.apache.lucene.index.TieredMergePolicy; import org.apache.lucene.store.Directory; import org.apache.lucene.store.MMapDirectory; import org.apache.lucene.store.NIOFSDirectory; import org.apache.lucene.store.SimpleFSDirectory; import org.apache.lucene.util.InfoStream; import org.apache.lucene.util.PrintStreamInfoStream; import org.apache.lucene.util.Version; import org.apache.lucene.util._TestUtil; >>>>>>> import org.apache.lucene.codecs.Codec; import org.apache.lucene.codecs.PostingsFormat; import org.apache.lucene.codecs.lucene40.Lucene40Codec; import org.apache.lucene.document.*; import org.apache.lucene.index.*; import org.apache.lucene.index.IndexReader.AtomicReaderContext; import org.apache.lucene.search.*; import org.apache.lucene.store.*; import org.apache.lucene.util.*;
<<<<<<< import thredds.client.catalog.Catalog; import thredds.client.catalog.builder.CatalogBuilder; import thredds.client.catalog.tools.CatalogXmlWriter; ======= import thredds.catalog.InvCatalogFactory; import thredds.catalog.InvCatalogImpl; >>>>>>> import thredds.client.catalog.Catalog; import thredds.client.catalog.builder.CatalogBuilder; import thredds.client.catalog.tools.CatalogXmlWriter; <<<<<<< ======= import ucar.unidata.test.util.ThreddsServer; >>>>>>> import ucar.unidata.test.util.NotTravis; import ucar.unidata.test.util.ThreddsServer;
<<<<<<< Assert.assertEquals(expected, actual); } ======= assertEquals(expected, actual); >>>>>>> assertEquals(expected, actual); } <<<<<<< ======= assertTrue("Expected to find metadata entries", itemsInspected > 0); >>>>>>>
<<<<<<< * Copyright 1998-2015 John Caron and University Corporation for Atmospheric Research/Unidata ======= * Copyright 1998-2017 University Corporation for Atmospheric Research/Unidata >>>>>>> * Copyright 1998-2017 University Corporation for Atmospheric Research/Unidata <<<<<<< ======= import ucar.nc2.dataset.TransformType; import ucar.unidata.geoloc.Earth; import ucar.unidata.geoloc.ProjectionImpl; import ucar.unidata.geoloc.projection.proj4.AlbersEqualAreaEllipse; >>>>>>> import ucar.nc2.dataset.TransformType; import ucar.unidata.geoloc.Earth; import ucar.unidata.geoloc.ProjectionImpl; <<<<<<< public ProjectionCT makeCoordinateTransform(AttributeContainer ctv, String geoCoordinateUnits) { readStandardParams(ctv, geoCoordinateUnits); ucar.unidata.geoloc.ProjectionImpl proj = new EquidistantAzimuthalProjection(lat0, lon0, false_easting, false_northing, earth); return new ProjectionCT(ctv.getName(), "FGDC", proj); ======= public TransformType getTransformType() { return TransformType.Projection; } public CoordinateTransform makeCoordinateTransform(NetcdfDataset ds, Variable ctv) { readStandardParams(ds, ctv); // create spherical Earth obj if not created by readStandardParams w radii, flattening if (earth == null) { if (earth_radius > 0.) { // Earth radius obtained in readStandardParams is in km, but Earth object wants m earth = new Earth(earth_radius * 1000.); } else { earth = new Earth(); } } ProjectionImpl proj = new EquidistantAzimuthalProjection(lat0, lon0, false_easting, false_northing, earth); return new ProjectionCT(ctv.getShortName(), "FGDC", proj); >>>>>>> public TransformType getTransformType() { return TransformType.Projection; } public ProjectionCT makeCoordinateTransform(AttributeContainer ctv, String geoCoordinateUnits) { readStandardParams(ctv, geoCoordinateUnits); // create spherical Earth obj if not created by readStandardParams w radii, flattening if (earth == null) { if (earth_radius > 0.) { // Earth radius obtained in readStandardParams is in km, but Earth object wants m earth = new Earth(earth_radius * 1000.); } else { earth = new Earth(); } } ProjectionImpl proj = new EquidistantAzimuthalProjection(lat0, lon0, false_easting, false_northing, earth); return new ProjectionCT(ctv.getName(), "FGDC", proj);
<<<<<<< @ContextConfiguration(locations={"/WEB-INF/applicationContext.xml"},loader=MockTdsContextLoader.class) ======= @ContextConfiguration(locations={"/WEB-INF/applicationContext-tdsConfig.xml"},loader=MockTdsContextLoader.class) @Category(NeedsContentRoot.class) >>>>>>> @ContextConfiguration(locations={"/WEB-INF/applicationContext.xml"},loader=MockTdsContextLoader.class) @Category(NeedsContentRoot.class)
<<<<<<< * Copyright 1998-2015 John Caron and University Corporation for Atmospheric Research/Unidata * * Portions of this software were developed by the Unidata Program at the * University Corporation for Atmospheric Research. * * Access and use of this software shall impose the following obligations * and understandings on the user. The user is granted the right, without * any fee or cost, to use, copy, modify, alter, enhance and distribute * this software, and any derivative works thereof, and its supporting * documentation for any purpose whatsoever, provided that this entire * notice appears in all copies of the software, derivative works and * supporting documentation. Further, UCAR requests that the user credit * UCAR/Unidata in any publications that result from the use of this * software or in any product that includes this software. The names UCAR * and/or Unidata, however, may not be used in any advertising or publicity * to endorse or promote any products or commercial entity unless specific * written permission is obtained from UCAR/Unidata. The user also * understands that UCAR/Unidata is not obligated to provide the user with * any support, consulting, training or assistance of any kind with regard * to the use, operation and performance of this software nor to provide * the user with any updates, revisions, new versions or "bug fixes." * * THIS SOFTWARE IS PROVIDED BY UCAR/UNIDATA "AS IS" AND ANY EXPRESS OR * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE * DISCLAIMED. IN NO EVENT SHALL UCAR/UNIDATA BE LIABLE FOR ANY SPECIAL, * INDIRECT OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING * FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, * NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION * WITH THE ACCESS, USE OR PERFORMANCE OF THIS SOFTWARE. ======= * Copyright (c) 1998-2017 University Corporation for Atmospheric Research/Unidata >>>>>>> * Copyright (c) 1998-2017 John Caron and University Corporation for Atmospheric Research/Unidata
<<<<<<< // class not interface, per Bloch edition 2 item 19 private CDM() {} // disable instantiation ======= // Special Attribute Names public static final String NCPROPERTIES = "_NCProperties"; public static final String ISNETCDF4 = "_IsNetcdf4"; public static final String SUPERBLOCKVERSION = "_SuperblockVersion"; public static final String DAP4_LITTLE_ENDIAN = "_DAP4_Little_Endian"; public static final String EDU_UCAR_PREFIX = "_edu.ucar"; >>>>>>> // Special Attribute Names public static final String NCPROPERTIES = "_NCProperties"; public static final String ISNETCDF4 = "_IsNetcdf4"; public static final String SUPERBLOCKVERSION = "_SuperblockVersion"; public static final String DAP4_LITTLE_ENDIAN = "_DAP4_Little_Endian"; public static final String EDU_UCAR_PREFIX = "_edu.ucar"; // class not interface, per Bloch edition 2 item 19 private CDM() {} // disable instantiation
<<<<<<< ======= import ucar.unidata.test.util.NeedsExternalResource; import ucar.unidata.test.util.TestDir; >>>>>>> import ucar.unidata.test.util.TestDir;