conflict_resolution
stringlengths
27
16k
<<<<<<< ======= import java.util.HashMap; import java.util.HashSet; >>>>>>> import java.util.HashSet; <<<<<<< ======= import java.util.ListIterator; import java.util.Map; import java.util.Set; import java.util.concurrent.CopyOnWriteArrayList; >>>>>>> import java.util.Set;
<<<<<<< /* * Copyright (C) 2012-2019 52°North Initiative for Geospatial Open Source ======= /** * Copyright (C) 2012-2020 52°North Initiative for Geospatial Open Source >>>>>>> /* * Copyright (C) 2012-2020 52°North Initiative for Geospatial Open Source
<<<<<<< import org.yamcs.protobuf.Commanding.CommandOptions; import org.yamcs.protobuf.Commanding.CommandQueueEntry; import org.yamcs.protobuf.Commanding.CommandVerifierOption; ======= import org.yamcs.protobuf.CreateProcessorRequest; import org.yamcs.protobuf.DeleteProcessorRequest; import org.yamcs.protobuf.EditProcessorRequest; >>>>>>> import org.yamcs.protobuf.Commanding.CommandOptions; import org.yamcs.protobuf.Commanding.CommandQueueEntry; import org.yamcs.protobuf.Commanding.CommandVerifierOption; import org.yamcs.protobuf.CreateProcessorRequest; import org.yamcs.protobuf.DeleteProcessorRequest; import org.yamcs.protobuf.EditProcessorRequest;
<<<<<<< import java.util.Random; ======= import java.util.Map; >>>>>>> import java.util.Map; import java.util.Random; <<<<<<< CCSDSPacket packet = readPacket(serverConnection.getTcInputStream()); if (packet != null) ======= CCSDSPacket packet = readPacket( new DataInputStream(serverConnection.getTcSocket().getInputStream())); if (packet != null) { >>>>>>> CCSDSPacket packet = readPacket(serverConnection.getTcInputStream()); if (packet != null) <<<<<<< protected CCSDSPacket readPacket(DataInputStream dIn) throws IOException { byte hdr[] = new byte[6]; dIn.readFully(hdr); int remaining = ((hdr[4] & 0xFF) << 8) + (hdr[5] & 0xFF) + 1; if (remaining > maxTcPacketLength - 6) throw new IOException( "Remaining packet length too big: " + remaining + " maximum allowed is " + (maxTcPacketLength - 6)); byte[] b = new byte[6 + remaining]; System.arraycopy(hdr, 0, b, 0, 6); dIn.readFully(b, 6, remaining); CCSDSPacket packet = new CCSDSPacket(ByteBuffer.wrap(b)); tmLink.tmTransmit(ackPacket(packet, 0, 0)); return packet; ======= /** * this runs in a separate thread but pushes commands to the main TM thread */ protected CCSDSPacket readPacket(DataInputStream dIn) { try { byte hdr[] = new byte[6]; dIn.readFully(hdr); int remaining = ((hdr[4] & 0xFF) << 8) + (hdr[5] & 0xFF) + 1; if (remaining > maxLength - 6) { throw new IOException( "Remaining packet length too big: " + remaining + " maximum allowed is " + (maxLength - 6)); } byte[] b = new byte[6 + remaining]; System.arraycopy(hdr, 0, b, 0, 6); dIn.readFully(b, 6, remaining); CCSDSPacket packet = new CCSDSPacket(ByteBuffer.wrap(b)); tmLink.ackPacketSend(ackPacket(packet, 0, 0)); return packet; } catch (IOException e) { log.error("Connection lost:" + e.getMessage(), e); } catch (Exception e) { log.error("Error reading command " + e.getMessage(), e); } return null; >>>>>>> protected CCSDSPacket readPacket(DataInputStream dIn) throws IOException { byte hdr[] = new byte[6]; dIn.readFully(hdr); int remaining = ((hdr[4] & 0xFF) << 8) + (hdr[5] & 0xFF) + 1; if (remaining > maxTcPacketLength - 6) throw new IOException( "Remaining packet length too big: " + remaining + " maximum allowed is " + (maxTcPacketLength - 6)); byte[] b = new byte[6 + remaining]; System.arraycopy(hdr, 0, b, 0, 6); dIn.readFully(b, 6, remaining); CCSDSPacket packet = new CCSDSPacket(ByteBuffer.wrap(b)); tmLink.tmTransmit(ackPacket(packet, 0, 0)); return packet;
<<<<<<< import java.io.IOException; import java.util.HashMap; import java.util.Map; import org.slf4j.Logger; import org.slf4j.LoggerFactory; import org.yamcs.ConfigurationException; import org.yamcs.YamcsServer; import org.yamcs.api.ws.WSConstants; import org.yamcs.protobuf.Web.WebSocketServerMessage.WebSocketReplyData; import org.yamcs.protobuf.Yamcs.ProtoDataType; import org.yamcs.security.AuthenticationToken; import org.yamcs.web.HttpRequestHandler; import org.yamcs.web.HttpRequestInfo; import org.yamcs.web.HttpServer; import org.yamcs.web.WebConfig; import com.google.protobuf.Message; ======= >>>>>>> import java.io.IOException; import java.util.HashMap; import java.util.Map; import org.slf4j.Logger; import org.slf4j.LoggerFactory; import org.yamcs.ConfigurationException; import org.yamcs.YamcsServer; import org.yamcs.api.ws.WSConstants; import org.yamcs.protobuf.Web.WebSocketServerMessage.WebSocketReplyData; import org.yamcs.protobuf.Yamcs.ProtoDataType; import org.yamcs.security.AuthenticationToken; import org.yamcs.web.HttpRequestHandler; import org.yamcs.web.HttpRequestInfo; import org.yamcs.web.HttpServer; import org.yamcs.web.WebConfig; import com.google.protobuf.Message; <<<<<<< ======= import io.protostuff.Schema; import org.slf4j.Logger; import org.slf4j.LoggerFactory; import org.yamcs.ConfigurationException; import org.yamcs.YamcsServer; import org.yamcs.api.ws.WSConstants; import org.yamcs.protobuf.Web.WebSocketServerMessage.WebSocketReplyData; import org.yamcs.protobuf.Yamcs.ProtoDataType; import org.yamcs.security.AuthenticationToken; import org.yamcs.web.HttpRequestHandler; import org.yamcs.web.HttpRequestInfo; import org.yamcs.web.HttpServer; import org.yamcs.web.WebConfig; import java.io.IOException; import java.io.InputStream; import java.util.HashMap; import java.util.Map; >>>>>>> <<<<<<< public <T extends Message> void sendData(ProtoDataType dataType, T data) throws IOException { ======= public <S> void sendData(ProtoDataType dataType, S data, Schema<S> schema) throws IOException { >>>>>>> public <T extends Message> void sendData(ProtoDataType dataType, T data) throws IOException { <<<<<<< WebSocketFrame frame = encoder.encodeData(dataSeqCount, dataType, data); ======= WebSocketFrame frame = getEncoder().encodeData(dataSeqCount, dataType, data, schema); >>>>>>> WebSocketFrame frame = getEncoder().encodeData(dataSeqCount, dataType, data);
<<<<<<< import com.RNFetchBlob.RNFetchBlobPackage; ======= import org.reactnative.camera.RNCameraPackage; >>>>>>> import com.RNFetchBlob.RNFetchBlobPackage; import org.reactnative.camera.RNCameraPackage; <<<<<<< new RNFetchBlobPackage(), ======= new RNCameraPackage(), >>>>>>> new RNFetchBlobPackage(), new RNCameraPackage(),
<<<<<<< return createTempDb(customFile, getDefaultFormat(), isDefaultInMemory(), getDefaultTempDirectory()); ======= // if linker is read-only, open linkee read-only boolean readOnly = ((linkerDb instanceof DatabaseImpl) ? ((DatabaseImpl)linkerDb).isReadOnly() : false); return createTempDb(customFile, getDefaultFormat(), isDefaultInMemory(), getDefaultTempDirectory(), readOnly); >>>>>>> // if linker is read-only, open linkee read-only boolean readOnly = ((linkerDb instanceof DatabaseImpl) ? ((DatabaseImpl)linkerDb).isReadOnly() : false); return createTempDb(customFile, getDefaultFormat(), isDefaultInMemory(), getDefaultTempDirectory(), readOnly); <<<<<<< protected Database createTempDb(Object customFile, FileFormat format, boolean inMemory, Path tempDir) ======= protected Database createTempDb(Object customFile, FileFormat format, boolean inMemory, File tempDir, boolean readOnly) >>>>>>> protected Database createTempDb(Object customFile, FileFormat format, boolean inMemory, Path tempDir, boolean readOnly) <<<<<<< TempDatabaseImpl db = new TempDatabaseImpl(this, customFile, dbFile, channel, format); ======= TempDatabaseImpl db = new TempDatabaseImpl(this, customFile, dbFile, channel, format, readOnly); >>>>>>> TempDatabaseImpl db = new TempDatabaseImpl(this, customFile, dbFile, channel, format, readOnly); <<<<<<< Path file, FileChannel channel, FileFormat fileFormat) ======= File file, FileChannel channel, FileFormat fileFormat, boolean readOnly) >>>>>>> Path file, FileChannel channel, FileFormat fileFormat, boolean readOnly)
<<<<<<< public class TableImpl implements Table, PropertyMaps.Owner { ======= public class TableImpl implements Table { >>>>>>> public class TableImpl implements Table, PropertyMaps.Owner { <<<<<<< public void propertiesUpdated() throws IOException { // propagate update to columns for(ColumnImpl col : _columns) { col.propertiesUpdated(); } reloadRowValidator(); // calculated columns will need to be re-sorted (their expressions may // have changed when their properties were updated) _calcColEval.reSort(); } ======= >>>>>>> public void propertiesUpdated() throws IOException { // propagate update to columns for(ColumnImpl col : _columns) { col.propertiesUpdated(); } reloadRowValidator(); // calculated columns will need to be re-sorted (their expressions may // have changed when their properties were updated) _calcColEval.reSort(); } <<<<<<< rowStart -= umapRowLength; ======= // zero remaining row data ByteUtil.clearRange(umapBuf, dataOffset, (rowStart + umapRowLength)); rowStart -= umapRowLength; >>>>>>> // zero remaining row data ByteUtil.clearRange(umapBuf, dataOffset, (rowStart + umapRowLength)); rowStart -= umapRowLength; <<<<<<< if(!column.isAutoNumber()) { Object val = column.getRowValue(row); if(val == null) { val = column.generateDefaultValue(); } ======= if(!column.isAutoNumber()) { >>>>>>> if(!column.isAutoNumber()) { Object val = column.getRowValue(row); if(val == null) { val = column.generateDefaultValue(); } <<<<<<< // need to assign calculated values after all the other fields are // filled in but before final validation _calcColEval.calculate(row); // run row validation if enabled if(_rowValidator != null) { _rowValidator.validate(row); } ======= >>>>>>> // need to assign calculated values after all the other fields are // filled in but before final validation _calcColEval.calculate(row); // run row validation if enabled if(_rowValidator != null) { _rowValidator.validate(row); } <<<<<<< // exposed for unit tests ======= >>>>>>> // exposed for unit tests <<<<<<< /** * Utility for managing calculated columns. Calculated columns need to be * evaluated in dependency order. */ private class CalcColEvaluator { /** List of calculated columns in this table, ordered by calculation dependency */ private final List<ColumnImpl> _calcColumns = new ArrayList<ColumnImpl>(1); private boolean _sorted; public void add(ColumnImpl col) { if(!getDatabase().isEvaluateExpressions()) { return; } _calcColumns.add(col); // whenever we add new columns, we need to re-sort _sorted = false; } public void reSort() { // mark columns for re-sort on next use _sorted = false; } public void calculate(Object[] row) throws IOException { if(!_sorted) { sortColumnsByDeps(); _sorted = true; } for(ColumnImpl col : _calcColumns) { Object rowValue = col.getCalculationContext().eval(row); col.setRowValue(row, rowValue); } } private void sortColumnsByDeps() { // a topological sort sorts nodes where A -> B such that A ends up in // the list before B (assuming that we are working with a DAG). In our // case, we return "descendent" info as Field1 -> Field2 (where Field1 // uses Field2 in its calculation). This means that in order to // correctly calculate Field1, we need to calculate Field2 first, and // hence essentially need the reverse topo sort (a list where Field2 // comes before Field1). (new TopoSorter<ColumnImpl>(_calcColumns, TopoSorter.REVERSE) { @Override protected void getDescendents(ColumnImpl from, List<ColumnImpl> descendents) { Set<Identifier> identifiers = new LinkedHashSet<Identifier>(); from.getCalculationContext().collectIdentifiers(identifiers); for(Identifier identifier : identifiers) { if(isThisTable(identifier)) { String colName = identifier.getObjectName(); for(ColumnImpl calcCol : _calcColumns) { // we only care if the identifier is another calc field if(calcCol.getName().equalsIgnoreCase(colName)) { descendents.add(calcCol); } } } } } }).sort(); } } ======= >>>>>>> /** * Utility for managing calculated columns. Calculated columns need to be * evaluated in dependency order. */ private class CalcColEvaluator { /** List of calculated columns in this table, ordered by calculation dependency */ private final List<ColumnImpl> _calcColumns = new ArrayList<ColumnImpl>(1); private boolean _sorted; public void add(ColumnImpl col) { if(!getDatabase().isEvaluateExpressions()) { return; } _calcColumns.add(col); // whenever we add new columns, we need to re-sort _sorted = false; } public void reSort() { // mark columns for re-sort on next use _sorted = false; } public void calculate(Object[] row) throws IOException { if(!_sorted) { sortColumnsByDeps(); _sorted = true; } for(ColumnImpl col : _calcColumns) { Object rowValue = col.getCalculationContext().eval(row); col.setRowValue(row, rowValue); } } private void sortColumnsByDeps() { // a topological sort sorts nodes where A -> B such that A ends up in // the list before B (assuming that we are working with a DAG). In our // case, we return "descendent" info as Field1 -> Field2 (where Field1 // uses Field2 in its calculation). This means that in order to // correctly calculate Field1, we need to calculate Field2 first, and // hence essentially need the reverse topo sort (a list where Field2 // comes before Field1). (new TopoSorter<ColumnImpl>(_calcColumns, TopoSorter.REVERSE) { @Override protected void getDescendents(ColumnImpl from, List<ColumnImpl> descendents) { Set<Identifier> identifiers = new LinkedHashSet<Identifier>(); from.getCalculationContext().collectIdentifiers(identifiers); for(Identifier identifier : identifiers) { if(isThisTable(identifier)) { String colName = identifier.getObjectName(); for(ColumnImpl calcCol : _calcColumns) { // we only care if the identifier is another calc field if(calcCol.getName().equalsIgnoreCase(colName)) { descendents.add(calcCol); } } } } } }).sort(); } }
<<<<<<< return FirstOf( FunctionCall(), StringLiteral(), NamedValue(), ListInstantiation(), MapInstantiation(), Sequence("( ", Expression(), ") ") ); ======= return FirstOf( FunctionCallChain(), StringLiteral(), FunctionInstantiation(), NamedValue(), ListInstantiation(), Sequence("( ", Expression(), ") ") ); >>>>>>> return FirstOf( FunctionCallChain(), StringLiteral(), FunctionInstantiation(), NamedValue(), ListInstantiation(), MapInstantiation(), Sequence("( ", Expression(), ") ") );
<<<<<<< ======= import android.support.annotation.NonNull; import android.support.annotation.RestrictTo; import android.support.annotation.StyleRes; >>>>>>>
<<<<<<< import io.eventuate.javaclient.spring.jdbc.EventuateJdbcAccess; import io.eventuate.javaclient.spring.jdbc.EventuateSchema; ======= >>>>>>> import io.eventuate.javaclient.spring.jdbc.EventuateSchema; <<<<<<< import io.eventuate.local.common.SourceTableNameSupplier; import io.eventuate.local.java.jdbckafkastore.EventuateLocalAggregateCrud; ======= >>>>>>> import io.eventuate.local.common.SourceTableNameSupplier; <<<<<<< protected EventuateLocalAggregateCrud localAggregateCrud; protected CdcDataPublisher<PublishedEvent> cdcDataPublisher; @Before public void init() { localAggregateCrud = new EventuateLocalAggregateCrud(eventuateJdbcAccess); cdcDataPublisher = createCdcKafkaPublisher(); cdcDataPublisher.start(); } ======= >>>>>>> @Before public void init() { super.init(); cdcDataPublisher = createCdcKafkaPublisher(); cdcDataPublisher.start(); } <<<<<<< public void shouldSendPublishedEventsToKafka() throws InterruptedException { ======= public void shouldSendPublishedEventsToKafka() { CdcDataPublisher<PublishedEvent> cdcDataPublisher = createCdcKafkaPublisher(); cdcDataPublisher.start(); cdcProcessor.start(cdcDataPublisher::handleEvent); >>>>>>> public void shouldSendPublishedEventsToKafka() {
<<<<<<< private String oldDbHistoryTopicName = "eventuate.local.cdc.my-sql-connector.offset.storage"; ======= private String mySqlBinLogClientName = "MySqlBinLog"; >>>>>>> private String oldDbHistoryTopicName = "eventuate.local.cdc.my-sql-connector.offset.storage"; private String mySqlBinLogClientName = "MySqlBinLog"; <<<<<<< public String getOldDbHistoryTopicName() { return oldDbHistoryTopicName; } public void setOldDbHistoryTopicName(String oldDbHistoryTopicName) { this.oldDbHistoryTopicName = oldDbHistoryTopicName; } ======= public String getMySqlBinLogClientName() { return mySqlBinLogClientName; } public void setMySqlBinLogClientName(String mySqlBinLogClientName) { this.mySqlBinLogClientName = mySqlBinLogClientName; } >>>>>>> public String getOldDbHistoryTopicName() { return oldDbHistoryTopicName; } public void setOldDbHistoryTopicName(String oldDbHistoryTopicName) { this.oldDbHistoryTopicName = oldDbHistoryTopicName; } public String getMySqlBinLogClientName() { return mySqlBinLogClientName; } public void setMySqlBinLogClientName(String mySqlBinLogClientName) { this.mySqlBinLogClientName = mySqlBinLogClientName; }
<<<<<<< public MySqlBinaryLogClient mySqlBinaryLogClient(@Value("${spring.datasource.url}") String dataSourceURL, DataSource dataSource, EventuateConfigurationProperties eventuateConfigurationProperties, CuratorFramework curatorFramework, OffsetStore offsetStore, DebeziumBinlogOffsetKafkaStore debeziumBinlogOffsetKafkaStore) { ======= public MySqlBinaryLogClient<PublishedEvent> mySqlBinaryLogClient(@Value("${spring.datasource.url}") String dataSourceURL, EventuateConfigurationProperties eventuateConfigurationProperties, SourceTableNameSupplier sourceTableNameSupplier, IWriteRowsEventDataParser<PublishedEvent> eventDataParser, EventuateSchema eventuateSchema) { >>>>>>> public MySqlBinaryLogClient mySqlBinaryLogClient(@Value("${spring.datasource.url}") String dataSourceURL, DataSource dataSource, EventuateConfigurationProperties eventuateConfigurationProperties, CuratorFramework curatorFramework, OffsetStore offsetStore, DebeziumBinlogOffsetKafkaStore debeziumBinlogOffsetKafkaStore) { <<<<<<< ======= ResolvedEventuateSchema.make(eventuateSchema, jdbcUrl), sourceTableNameSupplier.getSourceTableName(), >>>>>>>
<<<<<<< ======= import com.azure.core.management.exception.ManagementException; import com.azure.core.util.logging.ClientLogger; >>>>>>> import com.azure.core.util.logging.ClientLogger; <<<<<<< public class WebApp extends AbstractAppService<com.azure.resourcemanager.appservice.models.WebApp> implements IWebApp { ======= public class WebApp implements IWebApp { private static final ClientLogger LOGGER = new ClientLogger(WebApp.class); >>>>>>> public class WebApp extends AbstractAppService<com.azure.resourcemanager.appservice.models.WebApp> implements IWebApp { private static final ClientLogger LOGGER = new ClientLogger(WebApp.class); <<<<<<< getRemoteResource().deploy(com.azure.resourcemanager.appservice.models.DeployType.fromString(deployType.getValue()), targetFile, options); ======= LOGGER.info(String.format("Deploying (%s)[%s] %s ...", TextUtils.cyan(targetFile.toString()), TextUtils.cyan(deployType.toString()), StringUtils.isBlank(targetPath) ? "" : (" to " + TextUtils.green(targetPath)))); getWebAppInner().deploy(com.azure.resourcemanager.appservice.models.DeployType.fromString(deployType.getValue()), targetFile, options); } @Override public boolean exists() { refreshWebAppInner(); return webAppInner != null; } @Override public String hostName() { return getWebAppInner().defaultHostname(); } @Override public String state() { return getWebAppInner().state(); } @Override public PublishingProfile getPublishingProfile() { final com.azure.resourcemanager.appservice.models.PublishingProfile publishingProfile = getWebAppInner().getPublishingProfile(); return PublishingProfile.createFromServiceModel(publishingProfile); } @Override public DiagnosticConfig getDiagnosticConfig() { return AppServiceUtils.fromWebAppDiagnosticLogs(getWebAppInner().diagnosticLogsConfig()); } @Override public Flux<String> streamAllLogsAsync() { return getWebAppInner().streamAllLogsAsync(); } @Override public Runtime getRuntime() { return AppServiceUtils.getRuntimeFromWebApp(getWebAppInner()); >>>>>>> LOGGER.info(String.format("Deploying (%s)[%s] %s ...", TextUtils.cyan(targetFile.toString()), TextUtils.cyan(deployType.toString()), StringUtils.isBlank(targetPath) ? "" : (" to " + TextUtils.green(targetPath)))); getRemoteResource().deploy(com.azure.resourcemanager.appservice.models.DeployType.fromString(deployType.getValue()), targetFile, options);
<<<<<<< import com.microsoft.azure.common.exceptions.AzureExecutionException; import com.microsoft.azure.common.logging.Log; import com.microsoft.azure.maven.model.DeploymentResource; ======= import com.microsoft.azure.toolkit.lib.common.exception.AzureExecutionException; import com.microsoft.azure.toolkit.lib.common.logging.Log; >>>>>>> import com.microsoft.azure.toolkit.lib.common.exception.AzureExecutionException; import com.microsoft.azure.toolkit.lib.common.logging.Log; import com.microsoft.azure.maven.model.DeploymentResource;
<<<<<<< if (!mergeErrorStream && err.size() > 0) { log.warn(StringUtils.trim(err.toString())); ======= if (err.size() > 0) { log.warn(err.toString()); >>>>>>> if (err.size() > 0) { log.warn(StringUtils.trim(err.toString()));
<<<<<<< public static String oracleFile = ""; public static boolean parse = false; // perform synchronous parsing ======= public static String oracleFile = null; >>>>>>> public static String oracleFile = null; public static boolean parse = false; // perform synchronous parsing
<<<<<<< import hudson.model.AbstractBuild; import hudson.model.AbstractProject; import hudson.model.BuildListener; import hudson.model.Node; import hudson.model.Result; import hudson.model.Run; ======= import hudson.model.Action; import hudson.model.BuildListener; >>>>>>> import hudson.model.Action; import hudson.model.BuildListener; import hudson.model.AbstractBuild; import hudson.model.AbstractProject; import hudson.model.Cause.UserCause; <<<<<<< import jenkins.model.Jenkins; ======= >>>>>>> import jenkins.model.Jenkins; import org.kohsuke.stapler.export.Exported; <<<<<<< PromotionTargetAction targetAction = getAction(PromotionTargetAction.class); AbstractBuild<?, ?> build = targetAction.resolve(); // TODO why would it ever be true that build != target? List<ManualApproval> approvals = build.getActions(ManualApproval.class); for(ManualApproval approval : approvals) { List<ParameterValue> params = approval.badge.getParameterValues(); for(ParameterValue value : params) { BuildWrapper wrapper = value.createBuildWrapper(Promotion.this); if(wrapper != null) { Environment e = wrapper.setUp(Promotion.this, launcher, listener); if(e==null) return Result.FAILURE; buildEnvironments.add(e); } } } ======= //PromotionTargetAction targetAction = getAction(PromotionTargetAction.class); List<ParameterValue> params=getParameterValues(); if (params!=null){ for(ParameterValue value : params) { BuildWrapper wrapper=value.createBuildWrapper(Promotion.this); if (wrapper!=null){ Environment e = wrapper.setUp(Promotion.this, launcher, listener); if(e==null) return Result.FAILURE; buildEnvironments.add(e); } } } >>>>>>> List<ParameterValue> params=getParameterValues(); if (params!=null){ for(ParameterValue value : params) { BuildWrapper wrapper=value.createBuildWrapper(Promotion.this); if (wrapper!=null){ Environment e = wrapper.setUp(Promotion.this, launcher, listener); if(e==null) return Result.FAILURE; buildEnvironments.add(e); } } }
<<<<<<< import org.activiti.engine.ActivitiException; import org.activiti.engine.history.HistoricProcessInstance; ======= import javax.xml.namespace.QName; import org.activiti.engine.impl.test.PluggableActivitiTestCase; >>>>>>> import javax.xml.namespace.QName; import org.activiti.engine.ActivitiException; import org.activiti.engine.history.HistoricProcessInstance;
<<<<<<< @Deployment(resources = { "org/activiti/engine/test/bpmn/callactivity/CallActivity.testStartUserIdSetWhenLooping.bpmn20.xml", "org/activiti/engine/test/bpmn/callactivity/simpleSubProcess.bpmn20.xml" }) public void testStartUserIdSetWhenLooping() { identityService.setAuthenticatedUserId("kermit"); ProcessInstance processInstance = runtimeService.startProcessInstanceByKey("loopingCallActivity", CollectionUtil.singletonMap("input", 0)); for (int i=1; i<4; i++) { Task task = taskService.createTaskQuery().singleResult(); assertEquals("Task in subprocess", task.getName()); identityService.setAuthenticatedUserId("kermit"); taskService.complete(task.getId(), CollectionUtil.singletonMap("input", i)); } identityService.setAuthenticatedUserId(null); Task task = taskService.createTaskQuery().singleResult(); assertEquals("Final task", task.getName()); if (processEngineConfiguration.getHistoryLevel().isAtLeast(HistoryLevel.ACTIVITY)) { List<HistoricProcessInstance> historicProcessInstances = historyService.createHistoricProcessInstanceQuery() .superProcessInstanceId(processInstance.getId()).list(); assertEquals(3, historicProcessInstances.size()); for (HistoricProcessInstance historicProcessInstance : historicProcessInstances) { assertNotNull(historicProcessInstance.getStartUserId()); assertNotNull(historicProcessInstance.getStartTime()); assertNotNull(historicProcessInstance.getEndTime()); } } } ======= /** * Test case for sub process with DataObject */ @Deployment(resources = { "org/activiti/engine/test/bpmn/callactivity/DataObject.fatherProcess.bpmn20.xml", "org/activiti/engine/test/bpmn/callactivity/DataObject.subProcess.bpmn20.xml" }) public void testDataObject() { ProcessInstance processInstance = runtimeService.startProcessInstanceByKey("DataObject_fatherProcess"); assertNotNull(processInstance); } >>>>>>> @Deployment(resources = { "org/activiti/engine/test/bpmn/callactivity/CallActivity.testStartUserIdSetWhenLooping.bpmn20.xml", "org/activiti/engine/test/bpmn/callactivity/simpleSubProcess.bpmn20.xml" }) public void testStartUserIdSetWhenLooping() { identityService.setAuthenticatedUserId("kermit"); ProcessInstance processInstance = runtimeService.startProcessInstanceByKey("loopingCallActivity", CollectionUtil.singletonMap("input", 0)); for (int i=1; i<4; i++) { Task task = taskService.createTaskQuery().singleResult(); assertEquals("Task in subprocess", task.getName()); identityService.setAuthenticatedUserId("kermit"); taskService.complete(task.getId(), CollectionUtil.singletonMap("input", i)); } identityService.setAuthenticatedUserId(null); Task task = taskService.createTaskQuery().singleResult(); assertEquals("Final task", task.getName()); if (processEngineConfiguration.getHistoryLevel().isAtLeast(HistoryLevel.ACTIVITY)) { List<HistoricProcessInstance> historicProcessInstances = historyService.createHistoricProcessInstanceQuery() .superProcessInstanceId(processInstance.getId()).list(); assertEquals(3, historicProcessInstances.size()); for (HistoricProcessInstance historicProcessInstance : historicProcessInstances) { assertNotNull(historicProcessInstance.getStartUserId()); assertNotNull(historicProcessInstance.getStartTime()); assertNotNull(historicProcessInstance.getEndTime()); } } } /** * Test case for sub process with DataObject */ @Deployment(resources = { "org/activiti/engine/test/bpmn/callactivity/DataObject.fatherProcess.bpmn20.xml", "org/activiti/engine/test/bpmn/callactivity/DataObject.subProcess.bpmn20.xml" }) public void testDataObject() { ProcessInstance processInstance = runtimeService.startProcessInstanceByKey("DataObject_fatherProcess"); assertNotNull(processInstance); }
<<<<<<< package org.activiti.engine.test.bpmn.event.timer; import java.util.Calendar; import java.util.Date; import java.util.List; ======= >>>>>>>
<<<<<<< taskService.complete(tasks.get(1).getId()); Task task = taskService.createTaskQuery().taskAssignee("c").singleResult(); assertNotNull(task); taskService.complete(task.getId()); processInstance = runtimeService.createProcessInstanceQuery().processInstanceId(processInstance.getId()).singleResult(); assertNull(processInstance); variableMap = new HashMap<String, Object>(); variableMap.put("a", 1); variableMap.put("b", 2); processInstance = runtimeService.startProcessInstanceByKey("InclusiveGateway", variableMap); assertNotNull(processInstance.getId()); tasks = taskService.createTaskQuery().processInstanceId(processInstance.getId()).list(); assertEquals(1, taskService.createTaskQuery().count()); task = tasks.get(0); assertEquals("a", task.getAssignee()); taskService.complete(task.getId()); task = taskService.createTaskQuery().taskAssignee("c").singleResult(); assertNotNull(task); taskService.complete(task.getId()); processInstance = runtimeService.createProcessInstanceQuery().processInstanceId(processInstance.getId()).singleResult(); assertNull(processInstance); variableMap = new HashMap<String, Object>(); variableMap.put("a", 2); variableMap.put("b", 2); try { processInstance = runtimeService.startProcessInstanceByKey("InclusiveGateway", variableMap); fail(); } catch(ActivitiException e) { assertTrue(e.getMessage().contains("No outgoing sequence flow")); } } ======= } @Deployment(resources={"org/activiti/engine/test/bpmn/gateway/InclusiveGatewayTest.testJoinAfterCall.bpmn20.xml", "org/activiti/engine/test/bpmn/gateway/InclusiveGatewayTest.testJoinAfterCallSubProcess.bpmn20.xml"}) public void testJoinAfterCall() { // Test case to test act-1026 ProcessInstance processInstance = runtimeService .startProcessInstanceByKey("InclusiveGateway"); assertNotNull(processInstance.getId()); System.out.println("id " + processInstance.getId() + " " + processInstance.getProcessDefinitionId()); List<Task> tasks = taskService.createTaskQuery().list(); for (Task task : tasks) { System.out.println("task " + task.getName()); } assertEquals(3, taskService.createTaskQuery().count()); // now complate task A and check number of remaining tasks Task taskA = taskService.createTaskQuery().taskName("Task A").singleResult(); assertNotNull(taskA); taskService.complete(taskA.getId()); assertEquals(2, taskService.createTaskQuery().count()); // now complate task B and check number of remaining tasks Task taskB = taskService.createTaskQuery().taskName("Task B").singleResult(); assertNotNull(taskB); taskService.complete(taskB.getId()); assertEquals(1, taskService.createTaskQuery().count()); // now complate task C and check number of remaining tasks } >>>>>>> taskService.complete(tasks.get(1).getId()); Task task = taskService.createTaskQuery().taskAssignee("c").singleResult(); assertNotNull(task); taskService.complete(task.getId()); processInstance = runtimeService.createProcessInstanceQuery().processInstanceId(processInstance.getId()).singleResult(); assertNull(processInstance); variableMap = new HashMap<String, Object>(); variableMap.put("a", 1); variableMap.put("b", 2); processInstance = runtimeService.startProcessInstanceByKey("InclusiveGateway", variableMap); assertNotNull(processInstance.getId()); tasks = taskService.createTaskQuery().processInstanceId(processInstance.getId()).list(); assertEquals(1, taskService.createTaskQuery().count()); task = tasks.get(0); assertEquals("a", task.getAssignee()); taskService.complete(task.getId()); task = taskService.createTaskQuery().taskAssignee("c").singleResult(); assertNotNull(task); taskService.complete(task.getId()); processInstance = runtimeService.createProcessInstanceQuery().processInstanceId(processInstance.getId()).singleResult(); assertNull(processInstance); variableMap = new HashMap<String, Object>(); variableMap.put("a", 2); variableMap.put("b", 2); try { processInstance = runtimeService.startProcessInstanceByKey("InclusiveGateway", variableMap); fail(); } catch(ActivitiException e) { assertTrue(e.getMessage().contains("No outgoing sequence flow")); } } @Deployment(resources={"org/activiti/engine/test/bpmn/gateway/InclusiveGatewayTest.testJoinAfterCall.bpmn20.xml", "org/activiti/engine/test/bpmn/gateway/InclusiveGatewayTest.testJoinAfterCallSubProcess.bpmn20.xml"}) public void testJoinAfterCall() { // Test case to test act-1026 ProcessInstance processInstance = runtimeService .startProcessInstanceByKey("InclusiveGateway"); assertNotNull(processInstance.getId()); System.out.println("id " + processInstance.getId() + " " + processInstance.getProcessDefinitionId()); List<Task> tasks = taskService.createTaskQuery().list(); for (Task task : tasks) { System.out.println("task " + task.getName()); } assertEquals(3, taskService.createTaskQuery().count()); // now complate task A and check number of remaining tasks Task taskA = taskService.createTaskQuery().taskName("Task A").singleResult(); assertNotNull(taskA); taskService.complete(taskA.getId()); assertEquals(2, taskService.createTaskQuery().count()); // now complate task B and check number of remaining tasks Task taskB = taskService.createTaskQuery().taskName("Task B").singleResult(); assertNotNull(taskB); taskService.complete(taskB.getId()); assertEquals(1, taskService.createTaskQuery().count()); // now complate task C and check number of remaining tasks }
<<<<<<< public String getPrimaryStorageUuidForRootVolume() { return primaryStorageUuidForRootVolume; } public void setPrimaryStorageUuidForRootVolume(String primaryStorageUuidForRootVolume) { this.primaryStorageUuidForRootVolume = primaryStorageUuidForRootVolume; } ======= public String getRootPassword() { return rootPassword; } public void setRootPassword(String rootPassword) { this.rootPassword = rootPassword; } >>>>>>> public String getPrimaryStorageUuidForRootVolume() { return primaryStorageUuidForRootVolume; } public void setPrimaryStorageUuidForRootVolume(String primaryStorageUuidForRootVolume) { this.primaryStorageUuidForRootVolume = primaryStorageUuidForRootVolume; } public String getRootPassword() { return rootPassword; } public void setRootPassword(String rootPassword) { this.rootPassword = rootPassword; }
<<<<<<< private String primaryStorageUuidForRootVolume; ======= private String rootPassword; >>>>>>> private String primaryStorageUuidForRootVolume; private String rootPassword; <<<<<<< struct.setPrimaryStorageUuidForRootVolume(msg.getPrimaryStorageUuidForRootVolume()); ======= struct.setRootPassword(msg.getRootPassword()); >>>>>>> struct.setPrimaryStorageUuidForRootVolume(msg.getPrimaryStorageUuidForRootVolume()); struct.setRootPassword(msg.getRootPassword()); <<<<<<< struct.setPrimaryStorageUuidForRootVolume(msg.getPrimaryStorageUuidForRootVolume()); ======= struct.setRootPassword(msg.getRootPassword()); >>>>>>> struct.setPrimaryStorageUuidForRootVolume(msg.getPrimaryStorageUuidForRootVolume()); struct.setRootPassword(msg.getRootPassword());
<<<<<<< privateAliyun, ======= daho, >>>>>>> privateAliyun, daho,
<<<<<<< taskEntityManager.insert(task, (ExecutionEntity) execution); // Handling assignments need to be done after the task is inserted, to have an id handleAssignments(taskEntityManager, activeTaskAssignee, activeTaskOwner, activeTaskCandidateUsers, activeTaskCandidateGroups, task, execution); ======= handleAssignments(activeTaskAssignee, activeTaskOwner, activeTaskCandidateUsers, activeTaskCandidateGroups, task, expressionManager, execution); >>>>>>> taskEntityManager.insert(task, (ExecutionEntity) execution); // Handling assignments need to be done after the task is inserted, to have an id handleAssignments(taskEntityManager, activeTaskAssignee, activeTaskOwner, activeTaskCandidateUsers, activeTaskCandidateGroups, task, expressionManager, execution); <<<<<<< protected void handleAssignments(TaskEntityManager taskEntityManager, String assignee, String owner, List<String> candidateUsers, List<String> candidateGroups, TaskEntity task, DelegateExecution execution) { ======= protected void handleAssignments(String assignee, String owner, List<String> candidateUsers, List<String> candidateGroups, TaskEntity task, ExpressionManager expressionManager, DelegateExecution execution) { >>>>>>> protected void handleAssignments(TaskEntityManager taskEntityManager, String assignee, String owner, List<String> candidateUsers, List<String> candidateGroups, TaskEntity task, ExpressionManager expressionManager, DelegateExecution execution) {
<<<<<<< put("org.zstack.sdk.ResourceBindableConfigStruct", "org.zstack.resourceconfig.APIGetResourceBindableConfigReply$ResourceBindableConfigStruct"); put("org.zstack.sdk.ResourceConfigInventory", "org.zstack.resourceconfig.ResourceConfigInventory"); ======= put("org.zstack.sdk.ReplicationDiskStatus", "org.zstack.storage.primary.ministorage.ReplicationDiskStatus"); put("org.zstack.sdk.ReplicationGroupState", "org.zstack.imagereplicator.ReplicationGroupState"); put("org.zstack.sdk.ReplicationNetworkStatus", "org.zstack.storage.primary.ministorage.ReplicationNetworkStatus"); put("org.zstack.sdk.ReplicationRole", "org.zstack.storage.primary.ministorage.ReplicationRole"); put("org.zstack.sdk.ReplicationState", "org.zstack.storage.primary.ministorage.ReplicationState"); put("org.zstack.sdk.ResourceBindableConfigStruct", "org.zstack.core.config.resourceconfig.APIGetResourceBindableConfigReply$ResourceBindableConfigStruct"); put("org.zstack.sdk.ResourceConfigInventory", "org.zstack.core.config.resourceconfig.ResourceConfigInventory"); >>>>>>> put("org.zstack.sdk.ResourceBindableConfigStruct", "org.zstack.resourceconfig.APIGetResourceBindableConfigReply$ResourceBindableConfigStruct"); put("org.zstack.sdk.ResourceConfigInventory", "org.zstack.resourceconfig.ResourceConfigInventory"); put("org.zstack.sdk.ReplicationDiskStatus", "org.zstack.storage.primary.ministorage.ReplicationDiskStatus"); put("org.zstack.sdk.ReplicationGroupState", "org.zstack.imagereplicator.ReplicationGroupState"); put("org.zstack.sdk.ReplicationNetworkStatus", "org.zstack.storage.primary.ministorage.ReplicationNetworkStatus"); put("org.zstack.sdk.ReplicationRole", "org.zstack.storage.primary.ministorage.ReplicationRole"); put("org.zstack.sdk.ReplicationState", "org.zstack.storage.primary.ministorage.ReplicationState"); put("org.zstack.sdk.ResourceBindableConfigStruct", "org.zstack.core.config.resourceconfig.APIGetResourceBindableConfigReply$ResourceBindableConfigStruct"); put("org.zstack.sdk.ResourceConfigInventory", "org.zstack.core.config.resourceconfig.ResourceConfigInventory");
<<<<<<< CharSequence label = tracker.getPreviewText(key, mKeyboard.isShifted()); if (TextUtils.isEmpty(label)) { Drawable iconToDraw = getIconToDrawForKey(key, true); // Here's an annoying bug for you (explanation at the end of the // hack) ======= Drawable iconToDraw = getIconToDrawForKey(key, true); if (iconToDraw != null) { //Here's an annoying bug for you (explaination at the end of the hack) >>>>>>> Drawable iconToDraw = getIconToDrawForKey(key, true); if (iconToDraw != null) { // Here's an annoying bug for you (explaination at the end of the // hack)
<<<<<<< public abstract class PartitionReceiveHandler ======= import java.util.*; import org.apache.qpid.proton.message.Message; import com.microsoft.azure.servicebus.ReceiveHandler; /** * A handler class for the receive operation. Use any implementation of this abstract class to specify * user action when using PartitionReceiver's setReceiveHandler(). * @see {@link PartitionReceiver#setReceiveHandler} */ public abstract class PartitionReceiveHandler extends ReceiveHandler >>>>>>> /** * A handler class for the receive operation. Use any implementation of this abstract class to specify * user action when using PartitionReceiver's setReceiveHandler(). * @see {@link PartitionReceiver#setReceiveHandler} */ public abstract class PartitionReceiveHandler <<<<<<< public abstract void onError(Throwable error); public abstract void onClose(Throwable error); ======= /** * Generic version of onReceive. This method internally call the type specific version onReceive() instead. * @param messages the list of fetched messages from the underlying protocol layer. * @see {@link PartitionReceiveHandler#onReceive} */ @Override public void onReceiveMessages(LinkedList<Message> messages) { this.onReceive(EventDataUtil.toEventDataCollection(messages)); } >>>>>>> public abstract void onError(Throwable error); public abstract void onClose(Throwable error);
<<<<<<< bundle.putString(Project.NAME, mProject.name); bundle.putString(Project.FOLDER, mProject.folder); bundle.putInt(Project.COLOR, intent.getIntExtra("color", 0)); bundle.putString(Project.PREFIX, mProject.prefix); bundle.putString(Project.CODE, mProject.code); bundle.putString(Project.POSTFIX, mProject.postfix); ======= bundle.putString(Project.NAME, projectName); bundle.putString(Project.FOLDER, projectFolder); //bundle.putInt(Project.COLOR, intent.getIntExtra("color", 0)); bundle.putString(Project.PREFIX, prefixScript); bundle.putString(Project.CODE, code); bundle.putString(Project.POSTFIX, postfixScript); >>>>>>> bundle.putString(Project.NAME, mProject.name); bundle.putString(Project.FOLDER, mProject.folder); //bundle.putInt(Project.COLOR, intent.getIntExtra("color", 0)); bundle.putString(Project.PREFIX, mProject.prefix); bundle.putString(Project.CODE, mProject.code); bundle.putString(Project.POSTFIX, mProject.postfix);
<<<<<<< /* * Ivory: A Hadoop toolkit for web-scale information retrieval * * Licensed under the Apache License, Version 2.0 (the "License"); you * may not use this file except in compliance with the License. You may * obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or * implied. See the License for the specific language governing * permissions and limitations under the License. */ package ivory.smrf.model.importance; import ivory.exception.ConfigurationException; import ivory.exception.RetrievalException; import ivory.smrf.model.Clique; import ivory.util.XMLTools; import java.io.BufferedReader; import java.io.IOException; import java.io.InputStreamReader; import java.util.List; import java.util.Map; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.FileSystem; import org.apache.hadoop.fs.Path; import org.w3c.dom.Node; import org.w3c.dom.NodeList; import com.google.common.collect.Lists; import com.google.common.collect.Maps; import edu.umd.cloud9.util.map.HMapKF; /** * @author Don Metzler */ public class LinearImportanceModel extends ConceptImportanceModel { // MetaFeatures. private final List<MetaFeature> metaFeatures = Lists.newArrayList(); // MetaFeature values. private final Map<MetaFeature, HMapKF<String>> metaFeatureValues = Maps.newHashMap(); // Default feature values for each MetaFeature. private final HMapKF<String> defaultValues = new HMapKF<String>(); public void configure(Node model) throws ConfigurationException { // Clear meta-feature data. metaFeatures.clear(); metaFeatureValues.clear(); defaultValues.clear(); // Construct MRF feature by feature. NodeList children = model.getChildNodes(); float totalMetaFeatureWeight = 0.0f; for (int i = 0; i < children.getLength(); i++) { Node child = children.item(i); if ("feature".equals(child.getNodeName())) { // collection_freq, document_freq, clue_cf, or enwiki_cf String metaFeatureName = XMLTools.getAttributeValue(child, "id", ""); float metaFeatureWeight = XMLTools.getAttributeValue(child, "weight", -1.0f); if ("".equals(metaFeatureName) || metaFeatureWeight == -1) { throw new ConfigurationException("Must specify metafeature name and weight."); } MetaFeature mf = new MetaFeature(metaFeatureName, metaFeatureWeight); metaFeatures.add(mf); totalMetaFeatureWeight += metaFeatureWeight; String file = XMLTools.getAttributeValue(child, "file", null); if (file == null) { throw new ConfigurationException("Must specify the location of the metafeature stats file."); } try { metaFeatureValues.put(mf, readDataStats(file)); } catch (IOException e) { throw new RetrievalException("Error: " + e); } float defaultValue = XMLTools.getAttributeValue(child, "default", 0.0f); defaultValues.put(mf.getName(), defaultValue); } } // Normalize meta feature weights. for (int i = 0; i < metaFeatures.size(); i++) { MetaFeature mf = (MetaFeature) metaFeatures.get(i); float w = mf.getWeight() / totalMetaFeatureWeight; mf.setWeight(w); } } @Override public float getConceptWeight(String concept) { // Compute query-dependent clique weight. float weight = 0.0f; for (MetaFeature mf : metaFeatures) { float metaWeight = mf.getWeight(); float cliqueFeatureVal = computeFeatureVal(concept, mf); weight += metaWeight * cliqueFeatureVal; } return weight; } @Override public float getCliqueWeight(Clique c) { return getConceptWeight(c.getConcept()); } private float computeFeatureVal(String cliqueTerms, MetaFeature f) { float count; // Get meta-feature values for f. HMapKF<String> mfValues = metaFeatureValues.get(f); // Look up value for clique terms. if (mfValues != null && mfValues.containsKey(cliqueTerms)) { count = mfValues.get(cliqueTerms); } else { count = defaultValues.get(f.getName()); } return count; } // Reads MetaFeature statistics from a file, public static HMapKF<String> readDataStats(String file) throws IOException { // TODO: This is a bit dangerous, as we should be passing in a handle to the FS from outside. Configuration conf = new Configuration(); HMapKF<String> values = new HMapKF<String>(); FileSystem fs = FileSystem.get(conf); BufferedReader in = new BufferedReader(new InputStreamReader(fs.open(new Path(file)))); String line; while ((line = in.readLine()) != null) { String[] tokens = line.split("\t"); String concept = tokens[0]; float value = Float.parseFloat(tokens[1]); values.put(concept, value); } return values; } } ======= /* * Ivory: A Hadoop toolkit for web-scale information retrieval * * Licensed under the Apache License, Version 2.0 (the "License"); you * may not use this file except in compliance with the License. You may * obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or * implied. See the License for the specific language governing * permissions and limitations under the License. */ package ivory.smrf.model.importance; import ivory.exception.ConfigurationException; import ivory.exception.RetrievalException; import ivory.smrf.model.Clique; import ivory.util.XMLTools; import java.io.BufferedReader; import java.io.IOException; import java.io.InputStreamReader; import java.util.List; import java.util.Map; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.FileSystem; import org.apache.hadoop.fs.Path; import org.w3c.dom.Node; import org.w3c.dom.NodeList; import com.google.common.collect.Lists; import com.google.common.collect.Maps; import edu.umd.cloud9.util.HMapKF; /** * @author Don Metzler */ public class LinearImportanceModel extends ConceptImportanceModel { // MetaFeatures. private final List<MetaFeature> mMetaFeatures = Lists.newArrayList(); // MetaFeature values. private final Map<MetaFeature, HMapKF<String>> mMetaFeatureValues = Maps.newHashMap(); // Default feature values for each meta feature. private final HMapKF<String> mDefaultValues = new HMapKF<String>(); public void configure(Node model) throws ConfigurationException { // Clear meta-feature data. mMetaFeatures.clear(); mMetaFeatureValues.clear(); mDefaultValues.clear(); // Construct MRF feature by feature. NodeList children = model.getChildNodes(); float totalMetaFeatureWeight = 0.0f; for (int i = 0; i < children.getLength(); i++) { Node child = children.item(i); if ("feature".equals(child.getNodeName())) { // collection_freq, document_freq, clue_cf, or enwiki_cf String metaFeatureName = XMLTools.getAttributeValue(child, "id", ""); float metaFeatureWeight = XMLTools.getAttributeValue(child, "weight", -1.0f); if (metaFeatureName == "" || metaFeatureWeight == -1) { throw new ConfigurationException("Must specify metafeature name and weight."); } MetaFeature mf = new MetaFeature(metaFeatureName, metaFeatureWeight); mMetaFeatures.add(mf); totalMetaFeatureWeight += metaFeatureWeight; String file = XMLTools.getAttributeValue(child, "file", null); if (file == null) { throw new ConfigurationException("Must specify the location of the metafeature stats file."); } try { mMetaFeatureValues.put(mf, readDataStats(file)); } catch (IOException e) { throw new RetrievalException("Error: " + e); } float defaultValue = XMLTools.getAttributeValue(child, "default", 0.0f); mDefaultValues.put(mf.getName(), defaultValue); } } // Normalize meta feature weights. for (int i = 0; i < mMetaFeatures.size(); i++) { MetaFeature mf = (MetaFeature) mMetaFeatures.get(i); float w = mf.getWeight() / totalMetaFeatureWeight; mf.setWeight(w); } } @Override public float getConceptWeight(String concept) { // Compute query-dependent clique weight. float weight = 0.0f; for (MetaFeature mf : mMetaFeatures) { float metaWeight = mf.getWeight(); float cliqueFeatureVal = computeFeatureVal(concept, mf); weight += metaWeight * cliqueFeatureVal; } return weight; } @Override public float getCliqueWeight(Clique c) { return getConceptWeight(c.getConcept()); } public float computeFeatureVal(String cliqueTerms, MetaFeature f) { float count; // Get meta-feature values for f. HMapKF<String> mfValues = mMetaFeatureValues.get(f); // Look up value for clique terms. if (mfValues != null && mfValues.containsKey(cliqueTerms)) { count = mfValues.get(cliqueTerms); } else { count = mDefaultValues.get(f.getName()); } return count; } public List<MetaFeature> getMetaFeatures() { return mMetaFeatures; } // Reads MetaFeature statistics from a file, public static HMapKF<String> readDataStats(String file) throws IOException { Configuration conf = new Configuration(); HMapKF<String> values = new HMapKF<String>(); FileSystem fs = FileSystem.get(conf); BufferedReader in = new BufferedReader(new InputStreamReader(fs.open(new Path(file)))); String line; while ((line = in.readLine()) != null) { String[] tokens = line.split("\t"); String concept = tokens[0]; float value = Float.parseFloat(tokens[1]); values.put(concept, value); } return values; } } >>>>>>> /* * Ivory: A Hadoop toolkit for web-scale information retrieval * * Licensed under the Apache License, Version 2.0 (the "License"); you * may not use this file except in compliance with the License. You may * obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or * implied. See the License for the specific language governing * permissions and limitations under the License. */ package ivory.smrf.model.importance; import ivory.exception.ConfigurationException; import ivory.exception.RetrievalException; import ivory.smrf.model.Clique; import ivory.util.XMLTools; import java.io.BufferedReader; import java.io.IOException; import java.io.InputStreamReader; import java.util.List; import java.util.Map; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.FileSystem; import org.apache.hadoop.fs.Path; import org.w3c.dom.Node; import org.w3c.dom.NodeList; import com.google.common.collect.Lists; import com.google.common.collect.Maps; import edu.umd.cloud9.util.map.HMapKF; /** * @author Don Metzler */ public class LinearImportanceModel extends ConceptImportanceModel { // MetaFeatures. private final List<MetaFeature> mMetaFeatures = Lists.newArrayList(); // MetaFeature values. private final Map<MetaFeature, HMapKF<String>> mMetaFeatureValues = Maps.newHashMap(); // Default feature values for each meta feature. private final HMapKF<String> mDefaultValues = new HMapKF<String>(); public void configure(Node model) throws ConfigurationException { // Clear meta-feature data. mMetaFeatures.clear(); mMetaFeatureValues.clear(); mDefaultValues.clear(); // Construct MRF feature by feature. NodeList children = model.getChildNodes(); float totalMetaFeatureWeight = 0.0f; for (int i = 0; i < children.getLength(); i++) { Node child = children.item(i); if ("feature".equals(child.getNodeName())) { // collection_freq, document_freq, clue_cf, or enwiki_cf String metaFeatureName = XMLTools.getAttributeValue(child, "id", ""); float metaFeatureWeight = XMLTools.getAttributeValue(child, "weight", -1.0f); if (metaFeatureName == "" || metaFeatureWeight == -1) { throw new ConfigurationException("Must specify metafeature name and weight."); } MetaFeature mf = new MetaFeature(metaFeatureName, metaFeatureWeight); mMetaFeatures.add(mf); totalMetaFeatureWeight += metaFeatureWeight; String file = XMLTools.getAttributeValue(child, "file", null); if (file == null) { throw new ConfigurationException("Must specify the location of the metafeature stats file."); } try { mMetaFeatureValues.put(mf, readDataStats(file)); } catch (IOException e) { throw new RetrievalException("Error: " + e); } float defaultValue = XMLTools.getAttributeValue(child, "default", 0.0f); mDefaultValues.put(mf.getName(), defaultValue); } } // Normalize meta feature weights. for (int i = 0; i < mMetaFeatures.size(); i++) { MetaFeature mf = (MetaFeature) mMetaFeatures.get(i); float w = mf.getWeight() / totalMetaFeatureWeight; mf.setWeight(w); } } @Override public float getConceptWeight(String concept) { // Compute query-dependent clique weight. float weight = 0.0f; for (MetaFeature mf : mMetaFeatures) { float metaWeight = mf.getWeight(); float cliqueFeatureVal = computeFeatureVal(concept, mf); weight += metaWeight * cliqueFeatureVal; } return weight; } @Override public float getCliqueWeight(Clique c) { return getConceptWeight(c.getConcept()); } public float computeFeatureVal(String cliqueTerms, MetaFeature f) { float count; // Get meta-feature values for f. HMapKF<String> mfValues = mMetaFeatureValues.get(f); // Look up value for clique terms. if (mfValues != null && mfValues.containsKey(cliqueTerms)) { count = mfValues.get(cliqueTerms); } else { count = mDefaultValues.get(f.getName()); } return count; } public List<MetaFeature> getMetaFeatures() { return mMetaFeatures; } // Reads MetaFeature statistics from a file, public static HMapKF<String> readDataStats(String file) throws IOException { Configuration conf = new Configuration(); HMapKF<String> values = new HMapKF<String>(); FileSystem fs = FileSystem.get(conf); BufferedReader in = new BufferedReader(new InputStreamReader(fs.open(new Path(file)))); String line; while ((line = in.readLine()) != null) { String[] tokens = line.split("\t"); String concept = tokens[0]; float value = Float.parseFloat(tokens[1]); values.put(concept, value); } return values; } }
<<<<<<< private Path collectionPath = new Path("/shared/collections/clir/trec/arabic.trec01-03_cleaned+nodups.docs"); private String index = this.getClass().getCanonicalName() + "-index"; ======= private Path collectionPath = new Path("/shared/collections/clir/trec/trec2001-02.ar-cleaned.xml"); private String index = "/tmp/" + this.getClass().getCanonicalName() + "-index"; >>>>>>> private Path collectionPath = new Path("/shared/collections/clir/trec/trec2001-02.ar-cleaned.xml"); private String index = this.getClass().getCanonicalName() + "-index";
<<<<<<< SampleWindow window = new ImmutableAverageSampleWindow(); window = window.addSample(bigRtt, 1); window = window.addSample(moderateRtt, 1); window = window.addSample(lowRtt, 1); ======= ImmutableAverageSampleWindow window = new ImmutableAverageSampleWindow(); window = window.addSample(bigRtt, 1, false); window = window.addSample(moderateRtt, 1, false); window = window.addSample(lowRtt, 1, false); >>>>>>> SampleWindow window = new ImmutableAverageSampleWindow(); window = window.addSample(bigRtt, 1, false); window = window.addSample(moderateRtt, 1, false); window = window.addSample(lowRtt, 1, false); <<<<<<< SampleWindow window = new ImmutableAverageSampleWindow(); window = window.addSample(bigRtt, 1); window = window.addSample(moderateRtt, 1); window = window.addSample(lowRtt, 1); window = window.addDroppedSample(1); Assert.assertEquals((bigRtt + moderateRtt + lowRtt) / 3, window.getTrackedRttNanos()); ======= ImmutableAverageSampleWindow window = new ImmutableAverageSampleWindow(); window = window.addSample(bigRtt, 1, false); window = window.addSample(moderateRtt, 1, false); window = window.addSample(lowRtt, 1, false); window = window.addSample(bigRtt, 1, true); Assert.assertEquals((bigRtt + moderateRtt + lowRtt + bigRtt) / 4, window.getTrackedRttNanos()); >>>>>>> SampleWindow window = new ImmutableAverageSampleWindow(); window = window.addSample(bigRtt, 1, false); window = window.addSample(moderateRtt, 1, false); window = window.addSample(lowRtt, 1, false); window = window.addSample(bigRtt, 1, true); Assert.assertEquals((bigRtt + moderateRtt + lowRtt + bigRtt) / 4, window.getTrackedRttNanos());
<<<<<<< SampleWindow window = new ImmutablePercentileSampleWindow(0.5, 10); window = window.addSample(bigRtt, 0); window = window.addSample(moderateRtt, 0); window = window.addSample(lowRtt, 0); ======= ImmutablePercentileSampleWindow window = new ImmutablePercentileSampleWindow(0.5); window = window.addSample(slowestRtt, 1, false); window = window.addSample(moderateRtt, 1, false); window = window.addSample(fastestRtt, 1, false); >>>>>>> SampleWindow window = new ImmutablePercentileSampleWindow(0.5, 10); window = window.addSample(bigRtt, 0, false); window = window.addSample(moderateRtt, 0, false); window = window.addSample(lowRtt, 0, false); <<<<<<< SampleWindow window = new ImmutablePercentileSampleWindow(0.5, 10); window = window.addSample(bigRtt, 0); window = window.addSample(moderateRtt, 0); window = window.addSample(lowRtt, 0); window = window.addDroppedSample(1); ======= ImmutablePercentileSampleWindow window = new ImmutablePercentileSampleWindow(0.5); window = window.addSample(slowestRtt, 1, false); window = window.addSample(moderateRtt, 1, false); window = window.addSample(fastestRtt, 1, false); window = window.addSample(slowestRtt, 1, true); >>>>>>> ImmutablePercentileSampleWindow window = new ImmutablePercentileSampleWindow(0.5); window = window.addSample(bigRtt, 1, false); window = window.addSample(moderateRtt, 1, false); window = window.addSample(lowRtt, 1, false); window = window.addSample(bigRtt, 1, true); <<<<<<< SampleWindow window = new ImmutablePercentileSampleWindow(0.999, 10); window = window.addSample(bigRtt, 0); window = window.addSample(moderateRtt, 0); window = window.addSample(lowRtt, 0); Assert.assertEquals(bigRtt, window.getTrackedRttNanos()); } @Test public void rttObservationOrderDoesntAffectResultValue() { SampleWindow window = new ImmutablePercentileSampleWindow(0.999, 10); window = window.addSample(moderateRtt, 0); window = window.addSample(lowRtt, 0); window = window.addSample(bigRtt, 0); Assert.assertEquals(bigRtt, window.getTrackedRttNanos()); ======= ImmutablePercentileSampleWindow window = new ImmutablePercentileSampleWindow(0.999); window = window.addSample(slowestRtt, 1, false); window = window.addSample(moderateRtt, 1, false); window = window.addSample(fastestRtt, 1, false); window = window.addSample(slowestRtt, 1, true); Assert.assertEquals(slowestRtt, window.getTrackedRttNanos()); >>>>>>> SampleWindow window = new ImmutablePercentileSampleWindow(0.999, 10); window = window.addSample(bigRtt, 1, false); window = window.addSample(moderateRtt, 1, false); window = window.addSample(lowRtt, 1, false); window = window.addSample(bigRtt, 1, true); Assert.assertEquals(bigRtt, window.getTrackedRttNanos()); } @Test public void rttObservationOrderDoesntAffectResultValue() { SampleWindow window = new ImmutablePercentileSampleWindow(0.999, 10); window = window.addSample(moderateRtt, 1, false); window = window.addSample(lowRtt, 1, false); window = window.addSample(bigRtt, 1, false); Assert.assertEquals(bigRtt, window.getTrackedRttNanos());
<<<<<<< import com.github.alexthe666.iceandfire.core.ModWorld; import com.github.alexthe666.iceandfire.event.EventLiving; ======= import com.github.alexthe666.iceandfire.event.EventServer; >>>>>>> import com.github.alexthe666.iceandfire.core.ModWorld; import com.github.alexthe666.iceandfire.event.EventLiving; import com.github.alexthe666.iceandfire.event.EventServer;
<<<<<<< @VisibleForTesting final ConcurrentHashMap<ContainerId, AMContainer> containerMap; ======= private final ConcurrentHashMap<ContainerId, AMContainer> containerMap; private String auxiliaryService; >>>>>>> @VisibleForTesting final ConcurrentHashMap<ContainerId, AMContainer> containerMap; private String auxiliaryService; <<<<<<< AMContainer amc = createAmContainer(container, chh, tal, containerSignatureMatcher, context, schedulerId, launcherId, taskCommId); ======= AMContainer amc = new AMContainerImpl(container, chh, tal, containerSignatureMatcher, context, schedulerId, launcherId, taskCommId, auxiliaryService); >>>>>>> AMContainer amc = createAmContainer(container, chh, tal, containerSignatureMatcher, context, schedulerId, launcherId, taskCommId, auxiliaryService);
<<<<<<< import org.apache.hadoop.security.UserGroupInformation; import org.apache.tez.dag.api.TezConfiguration; ======= >>>>>>> import org.apache.tez.dag.api.TezConfiguration; <<<<<<< private boolean inputShouldBeConsumed(InputAttemptIdentifier id) { boolean isInputFinished = false; if (id instanceof CompositeInputAttemptIdentifier) { CompositeInputAttemptIdentifier cid = (CompositeInputAttemptIdentifier)id; isInputFinished = isInputFinished(cid.getInputIdentifier(), cid.getInputIdentifier() + cid.getInputIdentifierCount()); } else { isInputFinished = isInputFinished(id.getInputIdentifier()); } return !obsoleteInputs.contains(id) && !isInputFinished; ======= private synchronized boolean inputShouldBeConsumed(InputAttemptIdentifier id) { return (!obsoleteInputs.contains(id) && !isInputFinished(id.getInputIdentifier())); >>>>>>> private synchronized boolean inputShouldBeConsumed(InputAttemptIdentifier id) { boolean isInputFinished = false; if (id instanceof CompositeInputAttemptIdentifier) { CompositeInputAttemptIdentifier cid = (CompositeInputAttemptIdentifier)id; isInputFinished = isInputFinished(cid.getInputIdentifier(), cid.getInputIdentifier() + cid.getInputIdentifierCount()); } else { isInputFinished = isInputFinished(id.getInputIdentifier()); } return !obsoleteInputs.contains(id) && !isInputFinished;
<<<<<<< // Inform the shuffle scheduler long endTime = System.currentTimeMillis(); // Reset retryStartTime as map task make progress if retried before. retryStartTime = 0; scheduler.copySucceeded(srcAttemptId, host, compressedLength, decompressedLength, endTime - startTime, mapOutput, false); // Note successful shuffle metrics.successFetch(); } remaining.remove(inputAttemptIdentifier.toString()); } catch(IOException ioe) { ======= scheduler.copySucceeded(srcAttemptId, host, compressedLength, decompressedLength, endTime - startTime, mapOutput, false); // Note successful shuffle remaining.remove(srcAttemptId.toString()); return null; } catch (IOException ioe) { >>>>>>> // Inform the shuffle scheduler long endTime = System.currentTimeMillis(); // Reset retryStartTime as map task make progress if retried before. retryStartTime = 0; scheduler.copySucceeded(srcAttemptId, host, compressedLength, decompressedLength, endTime - startTime, mapOutput, false); } remaining.remove(inputAttemptIdentifier.toString()); } catch(IOException ioe) { <<<<<<< metrics.failedFetch(); return new InputAttemptIdentifier[]{srcAttemptId}; ======= return new InputAttemptIdentifier[] {srcAttemptId}; >>>>>>> return new InputAttemptIdentifier[] {srcAttemptId}; <<<<<<< boolean hasFailures = false; // Fetch partition count number of map outputs (handles auto-reduce case) for (int curPartition = minPartition; curPartition <= maxPartition; curPartition++) { try { long startTime = System.currentTimeMillis(); // Partition id is the base partition id plus the relative offset int reduceId = host.getPartitionId() + curPartition - minPartition; srcAttemptId = scheduler.getIdentifierForFetchedOutput(srcAttemptId.getPathComponent(), reduceId); Path filename = getShuffleInputFileName(srcAttemptId.getPathComponent(), null); TezIndexRecord indexRecord = getIndexRecord(srcAttemptId.getPathComponent(), reduceId); mapOutput = getMapOutputForDirectDiskFetch(srcAttemptId, filename, indexRecord); long endTime = System.currentTimeMillis(); scheduler.copySucceeded(srcAttemptId, host, indexRecord.getPartLength(), indexRecord.getRawLength(), (endTime - startTime), mapOutput, true); metrics.successFetch(); } catch (IOException e) { if (mapOutput != null) { mapOutput.abort(); ======= try { long startTime = System.currentTimeMillis(); Path filename = getShuffleInputFileName(srcAttemptId.getPathComponent(), null); TezIndexRecord indexRecord = getIndexRecord(srcAttemptId.getPathComponent(), currentPartition); mapOutput = getMapOutputForDirectDiskFetch(srcAttemptId, filename, indexRecord); long endTime = System.currentTimeMillis(); scheduler.copySucceeded(srcAttemptId, host, indexRecord.getPartLength(), indexRecord.getRawLength(), (endTime - startTime), mapOutput, true); iter.remove(); } catch (IOException e) { if (mapOutput != null) { mapOutput.abort(); } if (!stopped) { ioErrs.increment(1); scheduler.copyFailed(srcAttemptId, host, true, false, true); LOG.warn("Failed to read local disk output of " + srcAttemptId + " from " + host.getHostIdentifier(), e); } else { if (LOG.isDebugEnabled()) { LOG.debug( "Ignoring fetch error during local disk copy since fetcher has already been stopped"); >>>>>>> boolean hasFailures = false; // Fetch partition count number of map outputs (handles auto-reduce case) for (int curPartition = minPartition; curPartition <= maxPartition; curPartition++) { try { long startTime = System.currentTimeMillis(); // Partition id is the base partition id plus the relative offset int reduceId = host.getPartitionId() + curPartition - minPartition; srcAttemptId = scheduler.getIdentifierForFetchedOutput(srcAttemptId.getPathComponent(), reduceId); Path filename = getShuffleInputFileName(srcAttemptId.getPathComponent(), null); TezIndexRecord indexRecord = getIndexRecord(srcAttemptId.getPathComponent(), reduceId); mapOutput = getMapOutputForDirectDiskFetch(srcAttemptId, filename, indexRecord); long endTime = System.currentTimeMillis(); scheduler.copySucceeded(srcAttemptId, host, indexRecord.getPartLength(), indexRecord.getRawLength(), (endTime - startTime), mapOutput, true); } catch (IOException e) { if (mapOutput != null) { mapOutput.abort();
<<<<<<< mapHost = new MapHost(HOST, PORT, 0, 1); fetcher = new FetcherOrderedGrouped(null, scheduler, merger, metrics, shuffle, null, false, 0, ======= mapHost = new MapHost(HOST, PORT, 0); fetcher = new FetcherOrderedGrouped(null, scheduler, merger, shuffle, null, false, 0, >>>>>>> mapHost = new MapHost(HOST, PORT, 0, 1); fetcher = new FetcherOrderedGrouped(null, scheduler, merger, shuffle, null, false, 0, <<<<<<< MapHost host = new MapHost(HOST, PORT, 1, 1); FetcherOrderedGrouped fetcher = new FetcherOrderedGrouped(null, scheduler, merger, metrics, shuffle, null, false, 0, ======= MapHost host = new MapHost(HOST, PORT, 1); FetcherOrderedGrouped fetcher = new FetcherOrderedGrouped(null, scheduler, merger, shuffle, null, false, 0, >>>>>>> MapHost host = new MapHost(HOST, PORT, 1, 1); FetcherOrderedGrouped fetcher = new FetcherOrderedGrouped(null, scheduler, merger, shuffle, null, false, 0, <<<<<<< final MapHost host = new MapHost(HOST, PORT, 1, 1); FetcherOrderedGrouped mockFetcher = new FetcherOrderedGrouped(null, scheduler, merger, metrics, shuffle, null, false, 0, ======= final MapHost host = new MapHost(HOST, PORT, 1); FetcherOrderedGrouped mockFetcher = new FetcherOrderedGrouped(null, scheduler, merger, shuffle, null, false, 0, >>>>>>> final MapHost host = new MapHost(HOST, PORT, 1, 1); FetcherOrderedGrouped mockFetcher = new FetcherOrderedGrouped(null, scheduler, merger, shuffle, null, false, 0,
<<<<<<< if (hve.getRepositoryVersion().getVersion().equals(request.getRepositoryVersion()) && allowed.contains(hve.getState())) { ======= if (hve.getRepositoryVersion().getVersion().equals(request.getTargetVersion()) && (hve.getState() == RepositoryVersionState.INSTALLED || hve.getState() == RepositoryVersionState.NOT_REQUIRED)) { >>>>>>> if (hve.getRepositoryVersion().getVersion().equals(request.getTargetVersion()) && allowed.contains(hve.getState())) {
<<<<<<< ======= import org.apache.ambari.server.state.RefreshCommandConfiguration; import org.apache.ambari.server.state.RepositoryInfo; >>>>>>> import org.apache.ambari.server.state.RefreshCommandConfiguration;
<<<<<<< import org.apache.ambari.server.topology.TopologyDeleteFormer; ======= import org.apache.commons.collections.CollectionUtils; >>>>>>> import org.apache.ambari.server.topology.TopologyDeleteFormer; import org.apache.commons.collections.CollectionUtils; <<<<<<< new HashSet<>(Arrays.asList(new String[]{ SERVICE_CLUSTER_NAME_PROPERTY_ID, SERVICE_SERVICE_NAME_PROPERTY_ID})); ======= new HashSet<>(Arrays.asList(new String[]{ SERVICE_CLUSTER_NAME_PROPERTY_ID, SERVICE_SERVICE_NAME_PROPERTY_ID})); /** * The property ids for an service resource. */ private static final Set<String> PROPERTY_IDS = new HashSet<>(); >>>>>>> new HashSet<>(Arrays.asList(new String[]{ SERVICE_CLUSTER_NAME_PROPERTY_ID, SERVICE_SERVICE_NAME_PROPERTY_ID})); /** * The property ids for an service resource. */ private static final Set<String> PROPERTY_IDS = new HashSet<>(); <<<<<<< @Inject private TopologyDeleteFormer topologyDeleteFormer; ======= /** * Used to lookup the repository when creating services. */ private final RepositoryVersionDAO repositoryVersionDAO; >>>>>>> @Inject private TopologyDeleteFormer topologyDeleteFormer; /** * Used to lookup the repository when creating services. */ private final RepositoryVersionDAO repositoryVersionDAO;
<<<<<<< ======= import org.apache.ambari.server.orm.dao.HostComponentStateDAO; import org.apache.ambari.server.orm.dao.HostDAO; import org.apache.ambari.server.orm.dao.HostVersionDAO; >>>>>>> import org.apache.ambari.server.orm.dao.HostComponentStateDAO; import org.apache.ambari.server.orm.dao.HostDAO; import org.apache.ambari.server.orm.dao.HostVersionDAO; <<<<<<< ======= clusterDAO = injector.getInstance(ClusterDAO.class); hostDAO = injector.getInstance(HostDAO.class); >>>>>>> clusterDAO = injector.getInstance(ClusterDAO.class); hostDAO = injector.getInstance(HostDAO.class);
<<<<<<< return resource; } /** * Validates a singular API request. * * @param upgradeContext the map of properties * @return the validated upgrade pack * @throws AmbariException */ private UpgradePack validateRequest(UpgradeContext upgradeContext) throws AmbariException { Cluster cluster = upgradeContext.getCluster(); Direction direction = upgradeContext.getDirection(); Map<String, Object> requestMap = upgradeContext.getUpgradeRequest(); UpgradeType upgradeType = upgradeContext.getType(); ======= // set the assocaited to/from version (to/from is dictated by direction) RepositoryVersionEntity repositoryVersion = entity.getRepositoryVersion(); setResourceProperty(resource, UPGRADE_ASSOCIATED_VERSION, repositoryVersion.getVersion(), requestedIds); >>>>>>> // set the assocaited to/from version (to/from is dictated by direction) RepositoryVersionEntity repositoryVersion = entity.getRepositoryVersion(); setResourceProperty(resource, UPGRADE_ASSOCIATED_VERSION, repositoryVersion.getVersion(), requestedIds); <<<<<<< ======= upgradeBeingReverted.setRevertAllowed(false); upgradeBeingReverted = s_upgradeDAO.merge(upgradeBeingReverted); } >>>>>>> upgradeBeingReverted.setRevertAllowed(false); upgradeBeingReverted = s_upgradeDAO.merge(upgradeBeingReverted); } <<<<<<< if (wrapper.getTasks() != null && wrapper.getTasks().size() > 0 && wrapper.getTasks().get(0).getService() != null) { String serviceName = wrapper.getTasks().get(0).getService(); ServiceInfo serviceInfo = ambariMetaInfo.getService(stackId.getStackName(), stackId.getStackVersion(), serviceName); params.put(SERVICE_PACKAGE_FOLDER, serviceInfo.getServicePackageFolder()); params.put(HOOKS_FOLDER, stackInfo.getStackHooksFolder()); } ======= // add each host to this stage RequestResourceFilter filter = new RequestResourceFilter(serviceName, componentName, new ArrayList<>(wrapper.getHosts())); >>>>>>> // add each host to this stage RequestResourceFilter filter = new RequestResourceFilter(serviceName, componentName, new ArrayList<>(wrapper.getHosts())); <<<<<<< s_commandExecutionHelper.get().addExecutionCommandsToStage(actionContext, stage, requestParams); ======= s_commandExecutionHelper.get().addExecutionCommandsToStage(actionContext, stage, requestParams, jsons); >>>>>>> s_commandExecutionHelper.get().addExecutionCommandsToStage(actionContext, stage, requestParams, jsons);
<<<<<<< import static org.easymock.EasyMock.anyLong; import static org.easymock.EasyMock.anyObject; import static org.easymock.EasyMock.anyString; ======= import static com.google.common.collect.Sets.newLinkedHashSet; import static java.util.Collections.emptySet; import static java.util.Collections.singletonList; >>>>>>> import static com.google.common.collect.Sets.newLinkedHashSet; import static java.util.Collections.emptySet; import static java.util.Collections.singletonList; import static org.easymock.EasyMock.anyLong; import static org.easymock.EasyMock.anyObject; import static org.easymock.EasyMock.anyString;
<<<<<<< import org.apache.ambari.server.state.alert.Scope; import org.apache.ambari.server.view.configuration.InstanceConfig; ======= >>>>>>> import org.apache.ambari.server.state.alert.Scope;
<<<<<<< private Configuration configuration; ======= Configuration configuration; @Inject AmbariLdapConfiguration ldapConfiguration; >>>>>>> private Configuration configuration; @Inject private AmbariLdapConfiguration ldapConfiguration; <<<<<<< .withConstructor(users, authoritiesPopulator, configuration).createMock(); ======= .withConstructor(configuration, ldapConfiguration, authoritiesPopulator, userDAO).createMock(); >>>>>>> .withConstructor(users, configuration, ldapConfiguration, authoritiesPopulator).createMock(); <<<<<<< .withConstructor(users, authoritiesPopulator, configuration).createMock(); ======= .withConstructor(configuration, ldapConfiguration, authoritiesPopulator, userDAO).createMock(); >>>>>>> .withConstructor(users, configuration, ldapConfiguration, authoritiesPopulator).createMock();
<<<<<<< StackId stackId; Cluster cluster = clusterFsm.getCluster(clusterId); stackId = cluster.getDesiredStackVersion(); ======= Cluster cluster = clusterFsm.getCluster(clusterName); >>>>>>> Cluster cluster = clusterFsm.getCluster(clusterId); <<<<<<< LOG.debug("Received command report: " + report); Host host = clusterFsm.getHost(hostName); // HostEntity hostEntity = hostDAO.findByName(hostname); //don't touch database ======= LOG.debug("Received command report: {}", report); // get this locally; don't touch the database Host host = clusterFsm.getHost(hostname); >>>>>>> LOG.debug("Received command report: {}", report); // get this locally; don't touch the database Host host = clusterFsm.getHost(hostName); <<<<<<< ======= if ((report.getRoleCommand().equals(RoleCommand.START.toString()) || (report.getRoleCommand().equals(RoleCommand.CUSTOM_COMMAND.toString()) && ("START".equals(report.getCustomCommand()) || "RESTART".equals(report.getCustomCommand())))) && null != report.getConfigurationTags() && !report.getConfigurationTags().isEmpty()) { LOG.info("Updating applied config on service " + scHost.getServiceName() + ", component " + scHost.getServiceComponentName() + ", host " + scHost.getHostName()); scHost.updateActualConfigs(report.getConfigurationTags()); scHost.setRestartRequired(false); } // Necessary for resetting clients stale configs after starting service if ((RoleCommand.INSTALL.toString().equals(report.getRoleCommand()) || (RoleCommand.CUSTOM_COMMAND.toString().equals(report.getRoleCommand()) && "INSTALL".equals(report.getCustomCommand()))) && svcComp.isClientComponent()){ scHost.updateActualConfigs(report.getConfigurationTags()); scHost.setRestartRequired(false); } >>>>>>> <<<<<<< if (status.getSecurityState() != null) { SecurityState prevSecurityState = scHost.getSecurityState(); SecurityState currentSecurityState = SecurityState.valueOf(status.getSecurityState()); if ((prevSecurityState != currentSecurityState)) { if (prevSecurityState.isEndpoint()) { scHost.setSecurityState(currentSecurityState); LOG.info(String.format("Security of service component %s of service %s of cluster %s " + "has changed from %s to %s on host %s", componentName, status.getServiceName(), status.getClusterId(), prevSecurityState, currentSecurityState, hostname)); } else { LOG.debug(String.format("Security of service component %s of service %s of cluster %s " + "has changed from %s to %s on host %s but will be ignored since %s is a " + "transitional state", componentName, status.getServiceName(), status.getClusterId(), prevSecurityState, currentSecurityState, hostname, prevSecurityState)); } } } if (null != status.getStackVersion() && !status.getStackVersion().isEmpty()) { scHost.setStackVersion(gson.fromJson(status.getStackVersion(), StackId.class)); } ======= >>>>>>>
<<<<<<< import org.apache.ambari.server.controller.internal.DeleteHostComponentStatusMetaData; ======= import org.apache.ambari.server.orm.entities.RepositoryVersionEntity; >>>>>>> import org.apache.ambari.server.controller.internal.DeleteHostComponentStatusMetaData; import org.apache.ambari.server.orm.entities.RepositoryVersionEntity; <<<<<<< void delete(DeleteHostComponentStatusMetaData deleteMetaData); ======= void delete() throws AmbariException; /** * This method computes the state of the repository that's associated with the desired * version. It is used, for example, when a host component reports its version and the * state can be in flux. * * @param reportedVersion * @throws AmbariException */ void updateRepositoryState(String reportedVersion) throws AmbariException; /** * @return the repository state for the desired version */ RepositoryVersionState getRepositoryState(); >>>>>>> void delete(DeleteHostComponentStatusMetaData deleteMetaData); /** * This method computes the state of the repository that's associated with the desired * version. It is used, for example, when a host component reports its version and the * state can be in flux. * * @param reportedVersion * @throws AmbariException */ void updateRepositoryState(String reportedVersion) throws AmbariException; /** * @return the repository state for the desired version */ RepositoryVersionState getRepositoryState();
<<<<<<< import java.lang.reflect.Method; ======= import java.lang.reflect.Field; >>>>>>> import java.lang.reflect.Method; import java.lang.reflect.Field; <<<<<<< Method injectKeytabMethod = agentCommandsPublisher.getClass().getDeclaredMethod("injectKeytab", ExecutionCommand.class, String.class, String.class); injectKeytabMethod.setAccessible(true); injectKeytabMethod.invoke(agentCommandsPublisher, executionCommand, "SET_KEYTAB", targetHost); ======= HeartBeatHandler handler = heartbeatTestHelper.getHeartBeatHandler(am, aq); commandparams.put(KerberosServerAction.DATA_DIRECTORY, createTestKeytabData(handler).getAbsolutePath()); handler.injectKeytab(executionCommand, SET_KEYTAB, targetHost); >>>>>>> Method injectKeytabMethod = agentCommandsPublisher.getClass().getDeclaredMethod("injectKeytab", ExecutionCommand.class, String.class, String.class); injectKeytabMethod.setAccessible(true); commandparams.put(KerberosServerAction.DATA_DIRECTORY, createTestKeytabData(agentCommandsPublisher).getAbsolutePath()); injectKeytabMethod.invoke(agentCommandsPublisher, executionCommand, "SET_KEYTAB", targetHost); <<<<<<< Method injectKeytabMethod = agentCommandsPublisher.getClass().getDeclaredMethod("injectKeytab", ExecutionCommand.class, String.class, String.class); injectKeytabMethod.setAccessible(true); injectKeytabMethod.invoke(agentCommandsPublisher, executionCommand, "REMOVE_KEYTAB", targetHost); ======= HeartBeatHandler handler = heartbeatTestHelper.getHeartBeatHandler(am, aq); commandparams.put(KerberosServerAction.DATA_DIRECTORY, createTestKeytabData(handler).getAbsolutePath()); handler.injectKeytab(executionCommand, REMOVE_KEYTAB, targetHost); >>>>>>> Method injectKeytabMethod = agentCommandsPublisher.getClass().getDeclaredMethod("injectKeytab", ExecutionCommand.class, String.class, String.class); injectKeytabMethod.setAccessible(true); commandparams.put(KerberosServerAction.DATA_DIRECTORY, createTestKeytabData(agentCommandsPublisher).getAbsolutePath()); injectKeytabMethod.invoke(agentCommandsPublisher, executionCommand, "REMOVE_KEYTAB", targetHost);
<<<<<<< ======= * Performs basic configuration of session manager with static values and values from * configuration file. * * @param sessionManager session manager */ protected void configureSessionManager(SessionManager sessionManager) { // use AMBARISESSIONID instead of JSESSIONID to avoid conflicts with // other services (like HDFS) that run on the same context but a different // port sessionManager.getSessionCookieConfig().setName("AMBARISESSIONID"); sessionManager.getSessionCookieConfig().setHttpOnly(true); if (configs.getApiSSLAuthentication()) { sessionManager.getSessionCookieConfig().setSecure(true); } } protected void configureMaxInactiveInterval() { // each request that does not use AMBARISESSIONID will create a new // HashedSession in Jetty; these MUST be reaped after inactivity in order // to prevent a memory leak int sessionInactivityTimeout = configs.getHttpSessionInactiveTimeout(); sessionManager.setMaxInactiveInterval(sessionInactivityTimeout); } /** >>>>>>>
<<<<<<< import org.apache.ambari.server.actionmanager.HostRoleStatus; import org.apache.ambari.server.api.services.AmbariMetaInfo; ======= >>>>>>> <<<<<<< import org.apache.ambari.server.orm.entities.StageEntity; import org.apache.ambari.server.orm.entities.UpgradeEntity; import org.apache.ambari.server.state.stack.upgrade.Direction; import org.apache.ambari.server.state.stack.upgrade.UpgradeType; ======= import org.apache.ambari.server.orm.entities.StackEntity; >>>>>>> import org.apache.ambari.server.orm.entities.StackEntity; <<<<<<< Assert.assertNotNull(serviceComponentDesiredStateEntity); UpgradeEntity upgradeEntity = createUpgradeEntity("2.2.0.0", "2.2.0.1"); ServiceComponentHistoryEntity history = new ServiceComponentHistoryEntity(); history.setFromStack(serviceComponentDesiredStateEntity.getDesiredStack()); history.setToStack(serviceComponentDesiredStateEntity.getDesiredStack()); history.setUpgrade(upgradeEntity); history.setServiceComponentDesiredState(serviceComponentDesiredStateEntity); serviceComponentDesiredStateEntity.addHistory(history); serviceComponentDesiredStateEntity = serviceComponentDesiredStateDAO.merge( serviceComponentDesiredStateEntity); serviceComponentDesiredStateEntity = serviceComponentDesiredStateDAO.findByName( cluster.getClusterId(), serviceName, componentName); assertEquals(1, serviceComponentDesiredStateEntity.getHistory().size()); // verify that we can retrieve the history directly List<ServiceComponentHistoryEntity> componentHistoryList = serviceComponentDesiredStateDAO.findHistory( sc.getClusterId(), sc.getServiceName(), sc.getName()); assertEquals(1, componentHistoryList.size()); // delete the SC sc.delete(new DeleteHostComponentStatusMetaData()); ======= RepositoryVersionEntity rve = new RepositoryVersionEntity(stackEntity, "HDP-2.2.0", "2.2.0.1-1111", "[]"); >>>>>>> RepositoryVersionEntity rve = new RepositoryVersionEntity(stackEntity, "HDP-2.2.0", "2.2.0.1-1111", "[]"); <<<<<<< /** * Creates an upgrade entity, asserting it was created correctly. * * @param fromVersion * @param toVersion * @return */ private UpgradeEntity createUpgradeEntity(String fromVersion, String toVersion) { RequestDAO requestDAO = injector.getInstance(RequestDAO.class); RequestEntity requestEntity = new RequestEntity(); requestEntity.setRequestId(99L); requestEntity.setClusterId(cluster.getClusterId()); requestEntity.setStatus(HostRoleStatus.PENDING); requestEntity.setStages(new ArrayList<StageEntity>()); requestDAO.create(requestEntity); UpgradeDAO upgradeDao = injector.getInstance(UpgradeDAO.class); UpgradeEntity upgradeEntity = new UpgradeEntity(); upgradeEntity.setClusterId(cluster.getClusterId()); upgradeEntity.setDirection(Direction.UPGRADE); upgradeEntity.setFromVersion(fromVersion); upgradeEntity.setToVersion(toVersion); upgradeEntity.setUpgradePackage("upgrade_test"); upgradeEntity.setUpgradeType(UpgradeType.ROLLING); upgradeEntity.setRequestEntity(requestEntity); upgradeDao.create(upgradeEntity); List<UpgradeEntity> upgrades = upgradeDao.findUpgrades(cluster.getClusterId()); assertEquals(1, upgrades.size()); return upgradeEntity; } ======= >>>>>>>
<<<<<<< import java.util.Collection; ======= >>>>>>> <<<<<<< import org.apache.ambari.server.orm.entities.PrincipalEntity; import org.apache.ambari.server.orm.entities.UserAuthenticationEntity; import org.apache.ambari.server.orm.entities.UserEntity; ======= >>>>>>> import org.apache.ambari.server.orm.entities.PrincipalEntity; import org.apache.ambari.server.orm.entities.UserAuthenticationEntity; import org.apache.ambari.server.orm.entities.UserEntity; <<<<<<< ======= import org.springframework.security.crypto.password.PasswordEncoder; import org.springframework.security.crypto.password.StandardPasswordEncoder; >>>>>>> import org.springframework.security.crypto.password.PasswordEncoder; import org.springframework.security.crypto.password.StandardPasswordEncoder; <<<<<<< public class AmbariPamAuthenticationProviderTest { private static Injector injector; @Inject private AmbariPamAuthenticationProvider authenticationProvider; @Inject private Configuration configuration; ======= public class AmbariPamAuthenticationProviderTest extends EasyMockSupport { >>>>>>> public class AmbariPamAuthenticationProviderTest extends EasyMockSupport { <<<<<<< expect(unixUser.getUserName()).andReturn(TEST_USER_NAME).atLeastOnce(); UserEntity userEntity = combineUserEntity(); User user = new User(userEntity); UserDAO userDAO = createNiceMock(UserDAO.class); Collection<AmbariGrantedAuthority> userAuthorities = Collections.singletonList(createNiceMock(AmbariGrantedAuthority.class)); expect(pam.authenticate(EasyMock.anyObject(String.class), EasyMock.anyObject(String.class))).andReturn(unixUser).atLeastOnce(); expect(unixUser.getGroups()).andReturn(Collections.singleton("group")).atLeastOnce(); EasyMock.replay(unixUser); EasyMock.replay(pam); Authentication authentication = new AmbariUserAuthentication("userPass", user, userAuthorities); Authentication result = authenticationProvider.authenticateViaPam(pam,authentication); expect(userDAO.findUserByName("userName")).andReturn(null).once(); ======= expect(unixUser.getGroups()).andReturn(Collections.singleton("group")).atLeastOnce(); PAM pam = createMock(PAM.class); expect(pam.authenticate(eq(TEST_USER_NAME), eq(TEST_USER_PASS))) .andReturn(unixUser) .once(); pam.dispose(); expectLastCall().once(); PamAuthenticationFactory pamAuthenticationFactory = injector.getInstance(PamAuthenticationFactory.class); expect(pamAuthenticationFactory.createInstance(anyObject(String.class))).andReturn(pam).once(); replayAll(); Authentication authentication = new UsernamePasswordAuthenticationToken(TEST_USER_NAME, TEST_USER_PASS); AmbariPamAuthenticationProvider authenticationProvider = injector.getInstance(AmbariPamAuthenticationProvider.class); Authentication result = authenticationProvider.authenticate(authentication); verifyAll(); >>>>>>> expect(unixUser.getUserName()).andReturn(TEST_USER_NAME).atLeastOnce(); PAM pam = createMock(PAM.class); expect(pam.authenticate(eq(TEST_USER_NAME), eq(TEST_USER_PASS))).andReturn(unixUser).once(); UserEntity userEntity = combineUserEntity(); UserDAO userDAO = injector.getInstance(UserDAO.class); expect(userDAO.findUserByName(TEST_USER_NAME)).andReturn(userEntity).once(); MemberDAO memberDAO = injector.getInstance(MemberDAO.class); expect(memberDAO.findAllMembersByUser(userEntity)).andReturn(Collections.emptyList()).once(); PrivilegeDAO privilegeDAO = injector.getInstance(PrivilegeDAO.class); expect(privilegeDAO.findAllByPrincipal(anyObject())).andReturn(Collections.emptyList()).once(); replayAll(); Authentication authentication = new UsernamePasswordAuthenticationToken(TEST_USER_NAME, TEST_USER_PASS); AmbariPamAuthenticationProvider authenticationProvider = injector.getInstance(AmbariPamAuthenticationProvider.class); Authentication result = authenticationProvider.authenticateViaPam(pam, authentication); <<<<<<< private UserEntity combineUserEntity() { PrincipalEntity principalEntity = new PrincipalEntity(); UserAuthenticationEntity userAuthenticationEntity = new UserAuthenticationEntity(); userAuthenticationEntity.setAuthenticationType(UserAuthenticationType.PAM); userAuthenticationEntity.setAuthenticationKey(TEST_USER_NAME); UserEntity userEntity = new UserEntity(); userEntity.setUserId(1); userEntity.setUserName(UserName.fromString(TEST_USER_NAME).toString()); userEntity.setPrincipal(principalEntity); userEntity.setAuthenticationEntities(Collections.singletonList(userAuthenticationEntity)); return userEntity; } ======= >>>>>>> private UserEntity combineUserEntity() { PrincipalEntity principalEntity = new PrincipalEntity(); UserAuthenticationEntity userAuthenticationEntity = new UserAuthenticationEntity(); userAuthenticationEntity.setAuthenticationType(UserAuthenticationType.PAM); userAuthenticationEntity.setAuthenticationKey(TEST_USER_NAME); UserEntity userEntity = new UserEntity(); userEntity.setUserId(1); userEntity.setUserName(UserName.fromString(TEST_USER_NAME).toString()); userEntity.setPrincipal(principalEntity); userEntity.setAuthenticationEntities(Collections.singletonList(userAuthenticationEntity)); return userEntity; }
<<<<<<< private final boolean masterComponent; ======= private final Long m_hostId; >>>>>>> private final boolean masterComponent; private final Long m_hostId; <<<<<<< String hostName, boolean recoveryEnabled, boolean masterComponent) { ======= String hostName, boolean recoveryEnabled, Long hostId) { >>>>>>> String hostName, boolean recoveryEnabled, boolean masterComponent, Long hostId) { <<<<<<< this.masterComponent = masterComponent; ======= m_hostId = hostId; >>>>>>> this.masterComponent = masterComponent; m_hostId = hostId; <<<<<<< public boolean isMasterComponent() { return masterComponent; } ======= public Long getHostId() { return m_hostId; } >>>>>>> public boolean isMasterComponent() { return masterComponent; } public Long getHostId() { return m_hostId; }
<<<<<<< import org.apache.ambari.server.state.State; ======= import org.apache.ambari.server.state.UpgradeState; >>>>>>> import org.apache.ambari.server.state.State; import org.apache.ambari.server.state.UpgradeState; <<<<<<< public void restoreComponentsStatuses() throws AmbariException { Long clusterId = null; for (Cluster cluster : clusters.getClustersForHost(getHostName())) { clusterId = cluster.getClusterId(); for (ServiceComponentHost sch : cluster.getServiceComponentHosts(getHostName())) { Service s = cluster.getService(sch.getServiceName()); ServiceComponent sc = s.getServiceComponent(sch.getServiceComponentName()); if (!sc.isClientComponent() && sch.getState().equals(State.UNKNOWN)) { State lastValidState = sch.getLastValidState(); LOG.warn("Restore component state to last valid state for component " + sc.getName() + " on " + getHostName() + " to " + lastValidState); sch.setState(lastValidState); } } } //TODO if (clusterId != null) { calculateHostStatus(clusterId); } } @Override public void calculateHostStatus(Long clusterId) throws AmbariException { //Use actual component status to compute the host status int masterCount = 0; int mastersRunning = 0; int slaveCount = 0; int slavesRunning = 0; StackId stackId; Cluster cluster = clusters.getCluster(clusterId); stackId = cluster.getDesiredStackVersion(); List<ServiceComponentHost> scHosts = cluster.getServiceComponentHosts(hostName); for (ServiceComponentHost scHost : scHosts) { ComponentInfo componentInfo = ambariMetaInfo.getComponent(stackId.getStackName(), stackId.getStackVersion(), scHost.getServiceName(), scHost.getServiceComponentName()); String status = scHost.getState().name(); String category = componentInfo.getCategory(); if (MaintenanceState.OFF == maintenanceStateHelper.getEffectiveState(scHost, this)) { if (category.equals("MASTER")) { ++masterCount; if (status.equals("STARTED")) { ++mastersRunning; } } else if (category.equals("SLAVE")) { ++slaveCount; if (status.equals("STARTED")) { ++slavesRunning; } } } } HostHealthStatus.HealthStatus healthStatus; if (masterCount == mastersRunning && slaveCount == slavesRunning) { healthStatus = HostHealthStatus.HealthStatus.HEALTHY; } else if (masterCount > 0 && mastersRunning < masterCount) { healthStatus = HostHealthStatus.HealthStatus.UNHEALTHY; } else { healthStatus = HostHealthStatus.HealthStatus.ALERT; } setStatus(healthStatus.name()); } @Transactional public void updateHost(HostRegistrationRequestEvent e) { importHostInfo(e.hostInfo); setLastRegistrationTime(e.registrationTime); //Initialize heartbeat time and timeInState with registration time. setLastHeartbeatTime(e.registrationTime); setLastAgentEnv(e.agentEnv); setTimeInState(e.registrationTime); setAgentVersion(e.agentVersion); setPublicHostName(e.publicHostName); setTimeInState(System.currentTimeMillis()); setState(HostState.INIT); } ======= /** * {@inheritDoc} */ @Override public boolean isRepositoryVersionCorrect(RepositoryVersionEntity repositoryVersion) throws AmbariException { HostEntity hostEntity = getHostEntity(); Collection<HostComponentStateEntity> hostComponentStates = hostEntity.getHostComponentStateEntities(); // for every host component, if it matches the desired repo and has reported // the correct version then we're good for (HostComponentStateEntity hostComponentState : hostComponentStates) { ServiceComponentDesiredStateEntity desiredComponmentState = hostComponentState.getServiceComponentDesiredStateEntity(); RepositoryVersionEntity desiredRepositoryVersion = desiredComponmentState.getDesiredRepositoryVersion(); ComponentInfo componentInfo = ambariMetaInfo.getComponent( desiredRepositoryVersion.getStackName(), desiredRepositoryVersion.getStackVersion(), hostComponentState.getServiceName(), hostComponentState.getComponentName()); // skip components which don't advertise a version if (!componentInfo.isVersionAdvertised()) { continue; } // we only care about checking the specified repo version for this host if (!repositoryVersion.equals(desiredRepositoryVersion)) { continue; } String versionAdvertised = hostComponentState.getVersion(); if (hostComponentState.getUpgradeState() == UpgradeState.IN_PROGRESS || !StringUtils.equals(versionAdvertised, repositoryVersion.getVersion())) { return false; } } return true; } >>>>>>> public void restoreComponentsStatuses() throws AmbariException { Long clusterId = null; for (Cluster cluster : clusters.getClustersForHost(getHostName())) { clusterId = cluster.getClusterId(); for (ServiceComponentHost sch : cluster.getServiceComponentHosts(getHostName())) { Service s = cluster.getService(sch.getServiceName()); ServiceComponent sc = s.getServiceComponent(sch.getServiceComponentName()); if (!sc.isClientComponent() && sch.getState().equals(State.UNKNOWN)) { State lastValidState = sch.getLastValidState(); LOG.warn("Restore component state to last valid state for component " + sc.getName() + " on " + getHostName() + " to " + lastValidState); sch.setState(lastValidState); } } } //TODO if (clusterId != null) { calculateHostStatus(clusterId); } } @Override public void calculateHostStatus(Long clusterId) throws AmbariException { //Use actual component status to compute the host status int masterCount = 0; int mastersRunning = 0; int slaveCount = 0; int slavesRunning = 0; StackId stackId; Cluster cluster = clusters.getCluster(clusterId); stackId = cluster.getDesiredStackVersion(); List<ServiceComponentHost> scHosts = cluster.getServiceComponentHosts(hostName); for (ServiceComponentHost scHost : scHosts) { ComponentInfo componentInfo = ambariMetaInfo.getComponent(stackId.getStackName(), stackId.getStackVersion(), scHost.getServiceName(), scHost.getServiceComponentName()); String status = scHost.getState().name(); String category = componentInfo.getCategory(); if (MaintenanceState.OFF == maintenanceStateHelper.getEffectiveState(scHost, this)) { if (category.equals("MASTER")) { ++masterCount; if (status.equals("STARTED")) { ++mastersRunning; } } else if (category.equals("SLAVE")) { ++slaveCount; if (status.equals("STARTED")) { ++slavesRunning; } } } } HostHealthStatus.HealthStatus healthStatus; if (masterCount == mastersRunning && slaveCount == slavesRunning) { healthStatus = HostHealthStatus.HealthStatus.HEALTHY; } else if (masterCount > 0 && mastersRunning < masterCount) { healthStatus = HostHealthStatus.HealthStatus.UNHEALTHY; } else { healthStatus = HostHealthStatus.HealthStatus.ALERT; } setStatus(healthStatus.name()); } @Transactional public void updateHost(HostRegistrationRequestEvent e) { importHostInfo(e.hostInfo); setLastRegistrationTime(e.registrationTime); //Initialize heartbeat time and timeInState with registration time. setLastHeartbeatTime(e.registrationTime); setLastAgentEnv(e.agentEnv); setTimeInState(e.registrationTime); setAgentVersion(e.agentVersion); setPublicHostName(e.publicHostName); setTimeInState(System.currentTimeMillis()); setState(HostState.INIT); } /** * {@inheritDoc} */ @Override public boolean isRepositoryVersionCorrect(RepositoryVersionEntity repositoryVersion) throws AmbariException { HostEntity hostEntity = getHostEntity(); Collection<HostComponentStateEntity> hostComponentStates = hostEntity.getHostComponentStateEntities(); // for every host component, if it matches the desired repo and has reported // the correct version then we're good for (HostComponentStateEntity hostComponentState : hostComponentStates) { ServiceComponentDesiredStateEntity desiredComponmentState = hostComponentState.getServiceComponentDesiredStateEntity(); RepositoryVersionEntity desiredRepositoryVersion = desiredComponmentState.getDesiredRepositoryVersion(); ComponentInfo componentInfo = ambariMetaInfo.getComponent( desiredRepositoryVersion.getStackName(), desiredRepositoryVersion.getStackVersion(), hostComponentState.getServiceName(), hostComponentState.getComponentName()); // skip components which don't advertise a version if (!componentInfo.isVersionAdvertised()) { continue; } // we only care about checking the specified repo version for this host if (!repositoryVersion.equals(desiredRepositoryVersion)) { continue; } String versionAdvertised = hostComponentState.getVersion(); if (hostComponentState.getUpgradeState() == UpgradeState.IN_PROGRESS || !StringUtils.equals(versionAdvertised, repositoryVersion.getVersion())) { return false; } } return true; }
<<<<<<< public static final int REMOVE = 2; public static final int MANAGER = 3; public static final int SAVE = 4; ======= public static final int EDIT = 2; >>>>>>> public static final int MANAGER = 3; public static final int SAVE = 4; public static final int EDIT = 2; <<<<<<< // TODO should be a plural string. menu.add(0, REMOVE, 2, R.string.removeSong).setIcon(android.R.drawable.ic_menu_delete); menu.add(0, MANAGER, 3, "Manager").setIcon(android.R.drawable.ic_menu_manage); menu.add(0, SAVE, 4, "Save").setIcon(android.R.drawable.ic_menu_save); ======= menu.add(0, EDIT, 2, R.string.editPlaylist).setIcon(android.R.drawable.ic_menu_edit); >>>>>>> menu.add(0, EDIT, 2, R.string.editPlaylist).setIcon(android.R.drawable.ic_menu_edit); menu.add(0, MANAGER, 3, "Manager").setIcon(android.R.drawable.ic_menu_manage); menu.add(0, SAVE, 4, "Save").setIcon(android.R.drawable.ic_menu_save);
<<<<<<< import com.google.inject.Inject; ======= import com.google.common.collect.Sets; >>>>>>> import com.google.inject.Inject; import com.google.common.collect.Sets; <<<<<<< + ", request=" + request.toString() + ", current state=" + sc.getDesiredState() + ".")); return; ======= + ", request=" + request + ", current state=" + sc.getDesiredState() + "."); >>>>>>> + ", request=" + request.toString() + ", current state=" + sc.getDesiredState() + ".")); return;
<<<<<<< * Gets a lists of hosts with commands in progress given a range of requests. * The range of requests should include all requests with at least 1 stage in * progress. * * @return the list of hosts with commands in progress. * @see HostRoleStatus#IN_PROGRESS_STATUSES */ @RequiresSession public List<String> getHostsWithPendingTasks(long iLowestRequestIdInProgress, long iHighestRequestIdInProgress) { TypedQuery<String> query = entityManagerProvider.get().createNamedQuery( "HostRoleCommandEntity.findHostsByCommandStatus", String.class); query.setParameter("iLowestRequestIdInProgress", iLowestRequestIdInProgress); query.setParameter("iHighestRequestIdInProgress", iHighestRequestIdInProgress); query.setParameter("statuses", HostRoleStatus.IN_PROGRESS_STATUSES); return daoUtils.selectList(query); } /** * Gets a lists of hosts with commands in progress which occurr before the * specified request ID. This will only return commands which are not * {@link AgentCommandType#BACKGROUND_EXECUTION_COMMAND} as thsee commands do * not block future requests. * * @param lowerRequestIdInclusive * the lowest request ID to consider (inclusive) when getting any * blocking hosts. * @param requestId * the request ID to calculate any blocking hosts for (essentially, * the upper limit exclusive) * @return the list of hosts from older running requests which will block * those same hosts in the specified request ID. * @see HostRoleStatus#IN_PROGRESS_STATUSES */ @RequiresSession public List<String> getBlockingHostsForRequest(long lowerRequestIdInclusive, long requestId) { TypedQuery<String> query = entityManagerProvider.get().createNamedQuery( "HostRoleCommandEntity.getBlockingHostsForRequest", String.class); query.setParameter("lowerRequestIdInclusive", lowerRequestIdInclusive); query.setParameter("upperRequestIdExclusive", requestId); query.setParameter("statuses", HostRoleStatus.IN_PROGRESS_STATUSES); return daoUtils.selectList(query); } /** ======= * Gets a lists of hosts with commands in progress given a range of requests. * The range of requests should include all requests with at least 1 stage in * progress. * * @return the list of hosts with commands in progress. * @see HostRoleStatus#IN_PROGRESS_STATUSES */ @RequiresSession public List<String> getHostsWithPendingTasks(long iLowestRequestIdInProgress, long iHighestRequestIdInProgress) { TypedQuery<String> query = entityManagerProvider.get().createNamedQuery( "HostRoleCommandEntity.findHostsByCommandStatus", String.class); query.setParameter("iLowestRequestIdInProgress", iLowestRequestIdInProgress); query.setParameter("iHighestRequestIdInProgress", iHighestRequestIdInProgress); query.setParameter("statuses", HostRoleStatus.IN_PROGRESS_STATUSES); return daoUtils.selectList(query); } /** * Gets a lists of hosts with commands in progress which occurr before the * specified request ID. This will only return commands which are not * {@link AgentCommandType#BACKGROUND_EXECUTION_COMMAND} as thsee commands do * not block future requests. * * @param lowerRequestIdInclusive * the lowest request ID to consider (inclusive) when getting any * blocking hosts. * @param requestId * the request ID to calculate any blocking hosts for (essentially, * the upper limit exclusive) * @return the list of hosts from older running requests which will block * those same hosts in the specified request ID. * @see HostRoleStatus#IN_PROGRESS_STATUSES */ @RequiresSession public List<String> getBlockingHostsForRequest(long lowerRequestIdInclusive, long requestId) { TypedQuery<String> query = entityManagerProvider.get().createNamedQuery( "HostRoleCommandEntity.getBlockingHostsForRequest", String.class); query.setParameter("lowerRequestIdInclusive", lowerRequestIdInclusive); query.setParameter("upperRequestIdExclusive", requestId); query.setParameter("statuses", HostRoleStatus.IN_PROGRESS_STATUSES); return daoUtils.selectList(query); } /** * Gets the most recently run service check grouped by the command's role * (which is the only way to identify the service it was for!?) * * @param clusterId * the ID of the cluster to get the service checks for. */ @RequiresSession public List<LastServiceCheckDTO> getLatestServiceChecksByRole(long clusterId) { TypedQuery<LastServiceCheckDTO> query = entityManagerProvider.get().createNamedQuery( "HostRoleCommandEntity.findLatestServiceChecksByRole", LastServiceCheckDTO.class); query.setParameter("clusterId", clusterId); query.setParameter("roleCommand", RoleCommand.SERVICE_CHECK); return daoUtils.selectList(query); } /** >>>>>>> * Gets a lists of hosts with commands in progress given a range of requests. * The range of requests should include all requests with at least 1 stage in * progress. * * @return the list of hosts with commands in progress. * @see HostRoleStatus#IN_PROGRESS_STATUSES */ @RequiresSession public List<String> getHostsWithPendingTasks(long iLowestRequestIdInProgress, long iHighestRequestIdInProgress) { TypedQuery<String> query = entityManagerProvider.get().createNamedQuery( "HostRoleCommandEntity.findHostsByCommandStatus", String.class); query.setParameter("iLowestRequestIdInProgress", iLowestRequestIdInProgress); query.setParameter("iHighestRequestIdInProgress", iHighestRequestIdInProgress); query.setParameter("statuses", HostRoleStatus.IN_PROGRESS_STATUSES); return daoUtils.selectList(query); } /** * Gets a lists of hosts with commands in progress which occurr before the * specified request ID. This will only return commands which are not * {@link AgentCommandType#BACKGROUND_EXECUTION_COMMAND} as thsee commands do * not block future requests. * * @param lowerRequestIdInclusive * the lowest request ID to consider (inclusive) when getting any * blocking hosts. * @param requestId * the request ID to calculate any blocking hosts for (essentially, * the upper limit exclusive) * @return the list of hosts from older running requests which will block * those same hosts in the specified request ID. * @see HostRoleStatus#IN_PROGRESS_STATUSES */ @RequiresSession public List<String> getBlockingHostsForRequest(long lowerRequestIdInclusive, long requestId) { TypedQuery<String> query = entityManagerProvider.get().createNamedQuery( "HostRoleCommandEntity.getBlockingHostsForRequest", String.class); query.setParameter("lowerRequestIdInclusive", lowerRequestIdInclusive); query.setParameter("upperRequestIdExclusive", requestId); query.setParameter("statuses", HostRoleStatus.IN_PROGRESS_STATUSES); return daoUtils.selectList(query); } /** <<<<<<< public List<Long> findTaskIdsByRequestStageIds(List<RequestDAO.StageEntityPK> requestStageIds) { EntityManager entityManager = entityManagerProvider.get(); List<Long> taskIds = new ArrayList<Long>(); for (RequestDAO.StageEntityPK requestIds : requestStageIds) { TypedQuery<Long> hostRoleCommandQuery = entityManager.createNamedQuery("HostRoleCommandEntity.findTaskIdsByRequestStageIds", Long.class); hostRoleCommandQuery.setParameter("requestId", requestIds.getRequestId()); hostRoleCommandQuery.setParameter("stageId", requestIds.getStageId()); taskIds.addAll(daoUtils.selectList(hostRoleCommandQuery)); } return taskIds; } ======= public Set<Long> findTaskIdsByRequestStageIds(List<RequestDAO.StageEntityPK> requestStageIds) { EntityManager entityManager = entityManagerProvider.get(); List<Long> taskIds = new ArrayList<>(); for (RequestDAO.StageEntityPK requestIds : requestStageIds) { TypedQuery<Long> hostRoleCommandQuery = entityManager.createNamedQuery("HostRoleCommandEntity.findTaskIdsByRequestStageIds", Long.class); hostRoleCommandQuery.setParameter("requestId", requestIds.getRequestId()); hostRoleCommandQuery.setParameter("stageId", requestIds.getStageId()); taskIds.addAll(daoUtils.selectList(hostRoleCommandQuery)); } return Sets.newHashSet(taskIds); } /** * A simple DTO for storing the most recent service check time for a given * {@link Role}. */ public static class LastServiceCheckDTO { /** * The role. */ public final String role; /** * The time that the service check ended. */ public final long endTime; /** * Constructor. * * @param role * @param endTime */ public LastServiceCheckDTO(String role, long endTime) { this.role = role; this.endTime = endTime; } } >>>>>>> public Set<Long> findTaskIdsByRequestStageIds(List<RequestDAO.StageEntityPK> requestStageIds) { EntityManager entityManager = entityManagerProvider.get(); List<Long> taskIds = new ArrayList<>(); for (RequestDAO.StageEntityPK requestIds : requestStageIds) { TypedQuery<Long> hostRoleCommandQuery = entityManager.createNamedQuery("HostRoleCommandEntity.findTaskIdsByRequestStageIds", Long.class); hostRoleCommandQuery.setParameter("requestId", requestIds.getRequestId()); hostRoleCommandQuery.setParameter("stageId", requestIds.getStageId()); taskIds.addAll(daoUtils.selectList(hostRoleCommandQuery)); } return Sets.newHashSet(taskIds); }
<<<<<<< import com.fasterxml.jackson.annotation.JsonInclude; import com.fasterxml.jackson.annotation.JsonProperty; ======= import org.apache.ambari.server.AmbariException; import org.apache.ambari.server.state.kerberos.VariableReplacementHelper; >>>>>>> import com.fasterxml.jackson.annotation.JsonInclude; import com.fasterxml.jackson.annotation.JsonProperty; import org.apache.ambari.server.AmbariException; import org.apache.ambari.server.state.kerberos.VariableReplacementHelper;
<<<<<<< menu.add(0, EDIT, 1, R.string.editPlaylist).setIcon(android.R.drawable.ic_menu_edit); menu.add(0, CLEAR, 2, R.string.clear).setIcon(android.R.drawable.ic_menu_close_clear_cancel); ======= menu.add(0, CLEAR, 1, R.string.clear).setIcon(android.R.drawable.ic_menu_close_clear_cancel); menu.add(0, EDIT, 2, R.string.editPlaylist).setIcon(android.R.drawable.ic_menu_edit); menu.add(0, MANAGER, 3, "Manager").setIcon(android.R.drawable.ic_menu_manage); menu.add(0, SAVE, 4, "Save").setIcon(android.R.drawable.ic_menu_save); >>>>>>> menu.add(0, EDIT, 1, R.string.editPlaylist).setIcon(android.R.drawable.ic_menu_edit); menu.add(0, CLEAR, 2, R.string.clear).setIcon(android.R.drawable.ic_menu_close_clear_cancel); menu.add(0, MANAGER, 3, "Manager").setIcon(android.R.drawable.ic_menu_manage); menu.add(0, SAVE, 4, "Save").setIcon(android.R.drawable.ic_menu_save);
<<<<<<< String version, String displayName, String upgradePack, String operatingSystems) throws AmbariException { return create(stackEntity, version, displayName, upgradePack, operatingSystems, RepositoryType.STANDARD); } /** * Validates and creates an object. * The version must be unique within this stack name (e.g., HDP, HDPWIN, BIGTOP). * @param stackEntity Stack entity. * @param version Stack version, e.g., 2.2 or 2.2.0.1-885 * @param displayName Unique display name * @param upgradePack Optional upgrade pack, e.g, upgrade-2.2 * @param operatingSystems JSON structure of repository URLs for each OS * @param type the repository type * @return Returns the object created if successful, and throws an exception otherwise. * @throws AmbariException */ @Transactional public RepositoryVersionEntity create(StackEntity stackEntity, String version, String displayName, String upgradePack, String operatingSystems, RepositoryType type) throws AmbariException { ======= String version, String displayName, String operatingSystems) throws AmbariException { >>>>>>> String version, String displayName, String operatingSystems) throws AmbariException { return create(stackEntity, version, displayName, operatingSystems, RepositoryType.STANDARD); } /** * Validates and creates an object. * The version must be unique within this stack name (e.g., HDP, HDPWIN, BIGTOP). * @param stackEntity Stack entity. * @param version Stack version, e.g., 2.2 or 2.2.0.1-885 * @param displayName Unique display name * @param upgradePack Optional upgrade pack, e.g, upgrade-2.2 * @param operatingSystems JSON structure of repository URLs for each OS * @param type the repository type * @return Returns the object created if successful, and throws an exception otherwise. * @throws AmbariException */ @Transactional public RepositoryVersionEntity create(StackEntity stackEntity, String version, String displayName, String upgradePack, String operatingSystems, RepositoryType type) throws AmbariException { <<<<<<< stackEntity, version, displayName, upgradePack, operatingSystems); newEntity.setType(type); ======= stackEntity, version, displayName, operatingSystems); >>>>>>> stackEntity, version, displayName, operatingSystems); newEntity.setType(type);
<<<<<<< import org.apache.ambari.server.security.authorization.LdapServerProperties; ======= import org.apache.ambari.server.security.authorization.UserType; import org.apache.ambari.server.security.authorization.jwt.JwtAuthenticationProperties; >>>>>>> <<<<<<< * Determines whether an existing local users will be updated as LDAP users. */ @Markdown( description = "Determines how to handle username collision while updating from LDAP.", examples = {"skip", "convert", "add"} ) public static final ConfigurationProperty<String> LDAP_SYNC_USERNAME_COLLISIONS_BEHAVIOR = new ConfigurationProperty<>( "ldap.sync.username.collision.behavior", "add"); /** ======= >>>>>>> * Determines whether an existing local users will be updated as LDAP users. */ @Markdown( description = "Determines how to handle username collision while updating from LDAP.", examples = {"skip", "convert", "add"} ) public static final ConfigurationProperty<String> LDAP_SYNC_USERNAME_COLLISIONS_BEHAVIOR = new ConfigurationProperty<>( "ldap.sync.username.collision.behavior", "add"); /** <<<<<<< /** * The maximum number of authentication attempts permitted to a local user. Once the number of failures reaches this limit the user will be locked out. 0 indicates unlimited failures */ @Markdown(description = "The maximum number of authentication attempts permitted to a local user. Once the number of failures reaches this limit the user will be locked out. 0 indicates unlimited failures.") public static final ConfigurationProperty<Integer> MAX_LOCAL_AUTHENTICATION_FAILURES = new ConfigurationProperty<>( "authentication.local.max.failures", 10); /** * A flag to determine whether locked out messages are to be shown to users, if relevant, when authenticating into Ambari */ @Markdown(description = "Show or hide whether the user account is disabled or locked out, if relevant, when an authentication attempt fails.") public static final ConfigurationProperty<String> SHOW_LOCKED_OUT_USER_MESSAGE = new ConfigurationProperty<>( "authentication.local.show.locked.account.messages", "false"); ======= /** * The core pool size of the executor service that runs server side alerts. */ @Markdown(description = "The core pool size of the executor service that runs server side alerts.") public static final ConfigurationProperty<Integer> SERVER_SIDE_ALERTS_CORE_POOL_SIZE = new ConfigurationProperty<>( "alerts.server.side.scheduler.threadpool.size.core", 4); >>>>>>> /** * The maximum number of authentication attempts permitted to a local user. Once the number of failures reaches this limit the user will be locked out. 0 indicates unlimited failures */ @Markdown(description = "The maximum number of authentication attempts permitted to a local user. Once the number of failures reaches this limit the user will be locked out. 0 indicates unlimited failures.") public static final ConfigurationProperty<Integer> MAX_LOCAL_AUTHENTICATION_FAILURES = new ConfigurationProperty<>( "authentication.local.max.failures", 10); /** * A flag to determine whether locked out messages are to be shown to users, if relevant, when authenticating into Ambari */ @Markdown(description = "Show or hide whether the user account is disabled or locked out, if relevant, when an authentication attempt fails.") public static final ConfigurationProperty<String> SHOW_LOCKED_OUT_USER_MESSAGE = new ConfigurationProperty<>( "authentication.local.show.locked.account.messages", "false"); /** * The core pool size of the executor service that runs server side alerts. */ @Markdown(description = "The core pool size of the executor service that runs server side alerts.") public static final ConfigurationProperty<Integer> SERVER_SIDE_ALERTS_CORE_POOL_SIZE = new ConfigurationProperty<>( "alerts.server.side.scheduler.threadpool.size.core", 4); <<<<<<< * Ldap username collision handling behavior. * ADD - append the new LDAP entry to the set of existing authentication methods. * CONVERT - remove all authentication methods except for the new LDAP entry. * SKIP - skip existing local users. */ public enum LdapUsernameCollisionHandlingBehavior { ADD, CONVERT, SKIP; /** * Safely translates a user-supplied behavior name to a {@link LdapUsernameCollisionHandlingBehavior}. * <p> * If the user-supplied value is empty or invalid, the default value is returned. * * @param value a user-supplied behavior name value * @param defaultValue the default value * @return a {@link LdapUsernameCollisionHandlingBehavior} */ public static LdapUsernameCollisionHandlingBehavior translate(String value, LdapUsernameCollisionHandlingBehavior defaultValue) { String processedValue = StringUtils.upperCase(StringUtils.trim(value)); if (StringUtils.isEmpty(processedValue)) { return defaultValue; } else { try { return valueOf(processedValue); } catch (IllegalArgumentException e) { LOG.warn("Invalid LDAP username collision value ({}), using the default value ({})", value, defaultValue.name().toLowerCase()); return defaultValue; } } } } /** ======= >>>>>>> * Ldap username collision handling behavior. * ADD - append the new LDAP entry to the set of existing authentication methods. * CONVERT - remove all authentication methods except for the new LDAP entry. * SKIP - skip existing local users. */ public enum LdapUsernameCollisionHandlingBehavior { ADD, CONVERT, SKIP; /** * Safely translates a user-supplied behavior name to a {@link LdapUsernameCollisionHandlingBehavior}. * <p> * If the user-supplied value is empty or invalid, the default value is returned. * * @param value a user-supplied behavior name value * @param defaultValue the default value * @return a {@link LdapUsernameCollisionHandlingBehavior} */ public static LdapUsernameCollisionHandlingBehavior translate(String value, LdapUsernameCollisionHandlingBehavior defaultValue) { String processedValue = StringUtils.upperCase(StringUtils.trim(value)); if (StringUtils.isEmpty(processedValue)) { return defaultValue; } else { try { return valueOf(processedValue); } catch (IllegalArgumentException e) { LOG.warn("Invalid LDAP username collision value ({}), using the default value ({})", value, defaultValue.name().toLowerCase()); return defaultValue; } } } } /** <<<<<<< if (passwdProp != null) { String dbpasswd = readPasswordFromStore(passwdProp); if (dbpasswd != null) { return dbpasswd; } } return readPasswordFromFile(passwdProp, SERVER_JDBC_RCA_USER_PASSWD.getDefaultValue()); } private String readPasswordFromFile(String filePath, String defaultPassword) { if (filePath == null) { LOG.debug("DB password file not specified - using default"); return defaultPassword; } else { LOG.debug("Reading password from file {}", filePath); String password; try { password = FileUtils.readFileToString(new File(filePath)); password = StringUtils.chomp(password); } catch (IOException e) { throw new RuntimeException("Unable to read database password", e); } return password; } } String readPasswordFromStore(String aliasStr) { String password = null; loadCredentialProvider(); if (credentialProvider != null) { char[] result = null; try { result = credentialProvider.getPasswordForAlias(aliasStr); } catch (AmbariException e) { LOG.error("Error reading from credential store."); e.printStackTrace(); } if (result != null) { password = new String(result); } else { if (CredentialProvider.isAliasString(aliasStr)) { LOG.error("Cannot read password for alias = " + aliasStr); } else { LOG.warn("Raw password provided, not an alias. It cannot be read from credential store."); } } } return password; } /** * Gets parameters of LDAP server to connect to * * @return LdapServerProperties object representing connection parameters */ public LdapServerProperties getLdapServerProperties() { LdapServerProperties ldapServerProperties = new LdapServerProperties(); ldapServerProperties.setPrimaryUrl(getProperty(LDAP_PRIMARY_URL)); ldapServerProperties.setSecondaryUrl(getProperty(LDAP_SECONDARY_URL)); ldapServerProperties.setUseSsl(Boolean.parseBoolean(getProperty(LDAP_USE_SSL))); ldapServerProperties.setAnonymousBind(Boolean.parseBoolean(getProperty(LDAP_BIND_ANONYMOUSLY))); ldapServerProperties.setManagerDn(getProperty(LDAP_MANAGER_DN)); String ldapPasswordProperty = getProperty(LDAP_MANAGER_PASSWORD); String ldapPassword = null; if (CredentialProvider.isAliasString(ldapPasswordProperty)) { ldapPassword = readPasswordFromStore(ldapPasswordProperty); } if (ldapPassword != null) { ldapServerProperties.setManagerPassword(ldapPassword); } else { if (ldapPasswordProperty != null && new File(ldapPasswordProperty).exists()) { ldapServerProperties.setManagerPassword(readPasswordFromFile(ldapPasswordProperty, "")); } } ldapServerProperties.setBaseDN(getProperty(LDAP_BASE_DN)); ldapServerProperties.setUsernameAttribute(getProperty(LDAP_USERNAME_ATTRIBUTE)); ldapServerProperties.setForceUsernameToLowercase(Boolean.parseBoolean(getProperty(LDAP_USERNAME_FORCE_LOWERCASE))); ldapServerProperties.setUserBase(getProperty(LDAP_USER_BASE)); ldapServerProperties.setUserObjectClass(getProperty(LDAP_USER_OBJECT_CLASS)); ldapServerProperties.setDnAttribute(getProperty(LDAP_DN_ATTRIBUTE)); ldapServerProperties.setGroupBase(getProperty(LDAP_GROUP_BASE)); ldapServerProperties.setGroupObjectClass(getProperty(LDAP_GROUP_OBJECT_CLASS)); ldapServerProperties.setGroupMembershipAttr(getProperty(LDAP_GROUP_MEMBERSHIP_ATTR)); ldapServerProperties.setGroupNamingAttr(getProperty(LDAP_GROUP_NAMING_ATTR)); ldapServerProperties.setAdminGroupMappingRules(getProperty(LDAP_ADMIN_GROUP_MAPPING_RULES)); ldapServerProperties.setAdminGroupMappingMemberAttr(getProperty(LDAP_ADMIN_GROUP_MAPPING_MEMBER_ATTR_DEFAULT)); ldapServerProperties.setUserSearchFilter(getProperty(LDAP_USER_SEARCH_FILTER)); ldapServerProperties.setAlternateUserSearchFilterEnabled(Boolean.parseBoolean(getProperty(LDAP_ALT_USER_SEARCH_ENABLED))); ldapServerProperties.setAlternateUserSearchFilter(getProperty(LDAP_ALT_USER_SEARCH_FILTER)); ldapServerProperties.setGroupSearchFilter(getProperty(LDAP_GROUP_SEARCH_FILTER)); ldapServerProperties.setReferralMethod(getProperty(LDAP_REFERRAL)); ldapServerProperties.setSyncUserMemberReplacePattern(getProperty(LDAP_SYNC_USER_MEMBER_REPLACE_PATTERN)); ldapServerProperties.setSyncGroupMemberReplacePattern(getProperty(LDAP_SYCN_GROUP_MEMBER_REPLACE_PATTERN)); ldapServerProperties.setSyncUserMemberFilter(getProperty(LDAP_SYNC_USER_MEMBER_FILTER)); ldapServerProperties.setSyncGroupMemberFilter(getProperty(LDAP_SYNC_GROUP_MEMBER_FILTER)); ldapServerProperties.setPaginationEnabled( Boolean.parseBoolean(getProperty(LDAP_PAGINATION_ENABLED))); if (properties.containsKey(LDAP_GROUP_BASE) || properties.containsKey(LDAP_GROUP_OBJECT_CLASS) || properties.containsKey(LDAP_GROUP_MEMBERSHIP_ATTR) || properties.containsKey(LDAP_GROUP_NAMING_ATTR) || properties.containsKey(LDAP_ADMIN_GROUP_MAPPING_RULES) || properties.containsKey(LDAP_GROUP_SEARCH_FILTER)) { ldapServerProperties.setGroupMappingEnabled(true); } return ldapServerProperties; } public boolean isLdapConfigured() { return Boolean.parseBoolean(getProperty(IS_LDAP_CONFIGURED)); ======= return PasswordUtils.getInstance().readPassword(passwdProp, SERVER_JDBC_RCA_USER_PASSWD.getDefaultValue()); >>>>>>> return PasswordUtils.getInstance().readPassword(passwdProp, SERVER_JDBC_RCA_USER_PASSWD.getDefaultValue()); <<<<<<< /** * Determines whether an existing local users will be skipped on updated during LDAP sync. * * @return true if ambari need to skip existing user during LDAP sync. */ public LdapUsernameCollisionHandlingBehavior getLdapSyncCollisionHandlingBehavior() { return LdapUsernameCollisionHandlingBehavior.translate( getProperty(LDAP_SYNC_USERNAME_COLLISIONS_BEHAVIOR), LdapUsernameCollisionHandlingBehavior.ADD); } ======= >>>>>>> /** * Determines whether an existing local users will be skipped on updated during LDAP sync. * * @return true if ambari need to skip existing user during LDAP sync. */ public LdapUsernameCollisionHandlingBehavior getLdapSyncCollisionHandlingBehavior() { return LdapUsernameCollisionHandlingBehavior.translate( getProperty(LDAP_SYNC_USERNAME_COLLISIONS_BEHAVIOR), LdapUsernameCollisionHandlingBehavior.ADD); } <<<<<<< public int getMaxAuthenticationFailures() { return Integer.parseInt(getProperty(MAX_LOCAL_AUTHENTICATION_FAILURES)); } public boolean showLockedOutUserMessage() { return Boolean.parseBoolean(getProperty(SHOW_LOCKED_OUT_USER_MESSAGE)); } ======= public int getAlertServiceCorePoolSize() { return Integer.parseInt(getProperty(SERVER_SIDE_ALERTS_CORE_POOL_SIZE)); } >>>>>>> public int getMaxAuthenticationFailures() { return Integer.parseInt(getProperty(MAX_LOCAL_AUTHENTICATION_FAILURES)); } public boolean showLockedOutUserMessage() { return Boolean.parseBoolean(getProperty(SHOW_LOCKED_OUT_USER_MESSAGE)); } public int getAlertServiceCorePoolSize() { return Integer.parseInt(getProperty(SERVER_SIDE_ALERTS_CORE_POOL_SIZE)); }
<<<<<<< + ", status=" + status + ", securityState=" + securityState + ", serviceName=" + serviceName + ", clusterId=" + clusterId ======= + ", status=" + status + ", serviceName=" + serviceName + ", clusterName=" + clusterName >>>>>>> + ", status=" + status + ", serviceName=" + serviceName + ", clusterId=" + clusterId
<<<<<<< import org.apache.commons.lang.BooleanUtils; ======= import org.apache.commons.lang.StringUtils; >>>>>>> import org.apache.commons.lang.BooleanUtils; import org.apache.commons.lang.StringUtils; <<<<<<< public static final String UPGRADE_CHECK_REPOSITORY_VERSION_PROPERTY_ID = PropertyHelper.getPropertyId("UpgradeChecks", "repository_version"); public static final String UPGRADE_CHECK_FOR_REVERT_PROPERTY_ID = PropertyHelper.getPropertyId("UpgradeChecks", "for_revert"); ======= >>>>>>> public static final String UPGRADE_CHECK_REPOSITORY_VERSION_PROPERTY_ID = PropertyHelper.getPropertyId("UpgradeChecks", "repository_version"); public static final String UPGRADE_CHECK_FOR_REVERT_PROPERTY_ID = PropertyHelper.getPropertyId("UpgradeChecks", "for_revert"); <<<<<<< UPGRADE_CHECK_UPGRADE_PACK_PROPERTY_ID, UPGRADE_CHECK_REPOSITORY_VERSION_PROPERTY_ID, UPGRADE_CHECK_FOR_REVERT_PROPERTY_ID); ======= UPGRADE_CHECK_TARGET_REPOSITORY_VERSION_ID_ID, UPGRADE_CHECK_UPGRADE_PACK_PROPERTY_ID); >>>>>>> UPGRADE_CHECK_FOR_REVERT_PROPERTY_ID, UPGRADE_CHECK_TARGET_REPOSITORY_VERSION_ID_ID, UPGRADE_CHECK_UPGRADE_PACK_PROPERTY_ID);
<<<<<<< @SuppressWarnings("unchecked") public class QueuePool<T extends Poolable> implements LifecycledPool<T> { ======= public final class QueuePool<T extends Poolable> implements LifecycledPool<T> { >>>>>>> @SuppressWarnings("unchecked") public final class QueuePool<T extends Poolable> implements LifecycledPool<T> {
<<<<<<< import org.apache.ambari.server.state.State; ======= import org.apache.commons.collections.MapUtils; >>>>>>> import org.apache.commons.collections.MapUtils; import org.apache.ambari.server.state.State; <<<<<<< addHostComponentLastStateTable(); ======= addAmbariConfigurationTable(); >>>>>>> addAmbariConfigurationTable(); addHostComponentLastStateTable(); <<<<<<< protected void addHostComponentLastStateTable() throws SQLException { dbAccessor.addColumn(COMPONENT_STATE_TABLE, new DBAccessor.DBColumnInfo(COMPONENT_LAST_STATE_COLUMN, String.class, 255, State.UNKNOWN, true)); } ======= protected void addAmbariConfigurationTable() throws SQLException { List<DBAccessor.DBColumnInfo> columns = new ArrayList<>(); columns.add(new DBAccessor.DBColumnInfo(AMBARI_CONFIGURATION_CATEGORY_NAME_COLUMN, String.class, 100, null, false)); columns.add(new DBAccessor.DBColumnInfo(AMBARI_CONFIGURATION_PROPERTY_NAME_COLUMN, String.class, 100, null, false)); columns.add(new DBAccessor.DBColumnInfo(AMBARI_CONFIGURATION_PROPERTY_VALUE_COLUMN, String.class, 255, null, true)); dbAccessor.createTable(AMBARI_CONFIGURATION_TABLE, columns); dbAccessor.addPKConstraint(AMBARI_CONFIGURATION_TABLE, "PK_ambari_configuration", AMBARI_CONFIGURATION_CATEGORY_NAME_COLUMN, AMBARI_CONFIGURATION_PROPERTY_NAME_COLUMN); } >>>>>>> protected void addAmbariConfigurationTable() throws SQLException { List<DBAccessor.DBColumnInfo> columns = new ArrayList<>(); columns.add(new DBAccessor.DBColumnInfo(AMBARI_CONFIGURATION_CATEGORY_NAME_COLUMN, String.class, 100, null, false)); columns.add(new DBAccessor.DBColumnInfo(AMBARI_CONFIGURATION_PROPERTY_NAME_COLUMN, String.class, 100, null, false)); columns.add(new DBAccessor.DBColumnInfo(AMBARI_CONFIGURATION_PROPERTY_VALUE_COLUMN, String.class, 255, null, true)); dbAccessor.createTable(AMBARI_CONFIGURATION_TABLE, columns); dbAccessor.addPKConstraint(AMBARI_CONFIGURATION_TABLE, "PK_ambari_configuration", AMBARI_CONFIGURATION_CATEGORY_NAME_COLUMN, AMBARI_CONFIGURATION_PROPERTY_NAME_COLUMN); } protected void addHostComponentLastStateTable() throws SQLException { dbAccessor.addColumn(COMPONENT_STATE_TABLE, new DBAccessor.DBColumnInfo(COMPONENT_LAST_STATE_COLUMN, String.class, 255, State.UNKNOWN, true)); } <<<<<<< updateHostComponentLastStateTable(); ======= updateKerberosConfigurations(); >>>>>>> updateKerberosConfigurations(); updateHostComponentLastStateTable(); <<<<<<< protected void updateHostComponentLastStateTable() throws SQLException { executeInTransaction(new Runnable() { @Override public void run() { try { HostComponentStateDAO hostComponentStateDAO = injector.getInstance(HostComponentStateDAO.class); List<HostComponentStateEntity> hostComponentStateEntities = hostComponentStateDAO.findAll(); for (HostComponentStateEntity hostComponentStateEntity : hostComponentStateEntities) { hostComponentStateEntity.setLastLiveState(hostComponentStateEntity.getCurrentState()); hostComponentStateDAO.merge(hostComponentStateEntity); } } catch (Exception e) { LOG.warn("Setting status for stages and Requests threw exception. ", e); } } }); } ======= protected void updateKerberosConfigurations() throws AmbariException { AmbariManagementController ambariManagementController = injector.getInstance(AmbariManagementController.class); Clusters clusters = ambariManagementController.getClusters(); if (clusters != null) { Map<String, Cluster> clusterMap = clusters.getClusters(); if (!MapUtils.isEmpty(clusterMap)) { for (Cluster cluster : clusterMap.values()) { Config config = cluster.getDesiredConfigByType("kerberos-env"); if (config != null) { Map<String, String> properties = config.getProperties(); if (properties.containsKey("group")) { // Covert kerberos-env/group to kerberos-env/ipa_user_group updateConfigurationPropertiesForCluster(cluster, "kerberos-env", Collections.singletonMap("ipa_user_group", properties.get("group")), Collections.singleton("group"), true, false); } } } } } } >>>>>>> protected void updateKerberosConfigurations() throws AmbariException { AmbariManagementController ambariManagementController = injector.getInstance(AmbariManagementController.class); Clusters clusters = ambariManagementController.getClusters(); if (clusters != null) { Map<String, Cluster> clusterMap = clusters.getClusters(); if (!MapUtils.isEmpty(clusterMap)) { for (Cluster cluster : clusterMap.values()) { Config config = cluster.getDesiredConfigByType("kerberos-env"); if (config != null) { Map<String, String> properties = config.getProperties(); if (properties.containsKey("group")) { // Covert kerberos-env/group to kerberos-env/ipa_user_group updateConfigurationPropertiesForCluster(cluster, "kerberos-env", Collections.singletonMap("ipa_user_group", properties.get("group")), Collections.singleton("group"), true, false); } } } } } } protected void updateHostComponentLastStateTable() throws SQLException { executeInTransaction(new Runnable() { @Override public void run() { try { HostComponentStateDAO hostComponentStateDAO = injector.getInstance(HostComponentStateDAO.class); List<HostComponentStateEntity> hostComponentStateEntities = hostComponentStateDAO.findAll(); for (HostComponentStateEntity hostComponentStateEntity : hostComponentStateEntities) { hostComponentStateEntity.setLastLiveState(hostComponentStateEntity.getCurrentState()); hostComponentStateDAO.merge(hostComponentStateEntity); } } catch (Exception e) { LOG.warn("Setting status for stages and Requests threw exception. ", e); } } }); }
<<<<<<< import org.apache.ambari.server.controller.internal.DeleteHostComponentStatusMetaData; import org.apache.ambari.server.controller.internal.UpgradeResourceProvider; ======= >>>>>>> import org.apache.ambari.server.controller.internal.DeleteHostComponentStatusMetaData; import org.apache.ambari.server.controller.internal.UpgradeResourceProvider; <<<<<<< import org.apache.ambari.server.state.stack.upgrade.Direction; import org.apache.ambari.server.state.svccomphost.ServiceComponentHostSummary; import org.apache.ambari.server.topology.TopologyDeleteFormer; ======= >>>>>>> import org.apache.ambari.server.state.stack.upgrade.Direction; import org.apache.ambari.server.state.svccomphost.ServiceComponentHostSummary; import org.apache.ambari.server.topology.TopologyDeleteFormer; <<<<<<< * Get the ClusterVersionEntity object whose state is CURRENT. * @return */ @Override public ClusterVersionEntity getCurrentClusterVersion() { Collection<ClusterVersionEntity> clusterVersionEntities = getClusterEntity().getClusterVersionEntities(); for (ClusterVersionEntity clusterVersionEntity : clusterVersionEntities) { if (clusterVersionEntity.getState() == RepositoryVersionState.CURRENT) { // TODO assuming there's only 1 current version, return 1st found, exception was expected in previous implementation return clusterVersionEntity; } } return null; } /** * {@inheritDoc} */ @Override public ClusterVersionEntity getEffectiveClusterVersion() throws AmbariException { UpgradeEntity upgradeEntity = getUpgradeInProgress(); if (upgradeEntity == null) { return getCurrentClusterVersion(); } String effectiveVersion = null; switch (upgradeEntity.getUpgradeType()) { case NON_ROLLING: if (upgradeEntity.getDirection() == Direction.UPGRADE) { boolean pastChangingStack = isNonRollingUpgradePastUpgradingStack(upgradeEntity); effectiveVersion = pastChangingStack ? upgradeEntity.getToVersion() : upgradeEntity.getFromVersion(); } else { // Should be the lower value during a Downgrade. effectiveVersion = upgradeEntity.getToVersion(); } break; case ROLLING: default: // Version will be higher on upgrade and lower on downgrade directions. effectiveVersion = upgradeEntity.getToVersion(); break; } if (effectiveVersion == null) { throw new AmbariException("Unable to determine which version to use during Stack Upgrade, effectiveVersion is null."); } // Find the first cluster version whose repo matches the expected version. Collection<ClusterVersionEntity> clusterVersionEntities = getClusterEntity().getClusterVersionEntities(); for (ClusterVersionEntity clusterVersionEntity : clusterVersionEntities) { if (clusterVersionEntity.getRepositoryVersion().getVersion().equals(effectiveVersion)) { return clusterVersionEntity; } } return null; } /** * Given a NonRolling stack upgrade, determine if it has already crossed the point of using the newer version. * @param upgrade Stack Upgrade * @return Return true if should be using to_version, otherwise, false to mean the from_version. */ private boolean isNonRollingUpgradePastUpgradingStack(UpgradeEntity upgrade) { for (UpgradeGroupEntity group : upgrade.getUpgradeGroups()) { if (group.getName().equalsIgnoreCase(UpgradeResourceProvider.CONST_UPGRADE_GROUP_NAME)) { for (UpgradeItemEntity item : group.getItems()) { List<Long> taskIds = hostRoleCommandDAO.findTaskIdsByStage(upgrade.getRequestId(), item.getStageId()); List<HostRoleCommandEntity> commands = hostRoleCommandDAO.findByPKs(taskIds); for (HostRoleCommandEntity command : commands) { if (command.getCustomCommandName() != null && command.getCustomCommandName().equalsIgnoreCase(UpgradeResourceProvider.CONST_CUSTOM_COMMAND_NAME) && command.getStatus() == HostRoleStatus.COMPLETED) { return true; } } } return false; } } return false; } /** * Get all of the ClusterVersionEntity objects for the cluster. * @return */ @Override public Collection<ClusterVersionEntity> getAllClusterVersions() { return clusterVersionDAO.findByCluster(getClusterName()); } /** * During the Finalize Action, want to transition all Host Versions from INSTALLED to CURRENT, and the last CURRENT one to INSTALLED. * @param hostNames Collection of host names * @param currentClusterVersion Entity that contains the cluster's current stack (with its name and version) * @param desiredState Desired state must be {@link RepositoryVersionState#CURRENT} * @throws AmbariException */ @Override public void mapHostVersions(Set<String> hostNames, ClusterVersionEntity currentClusterVersion, RepositoryVersionState desiredState) throws AmbariException { if (currentClusterVersion == null) { throw new AmbariException("Could not find current stack version of cluster " + getClusterName()); } final Set<RepositoryVersionState> validStates = Sets.newHashSet(RepositoryVersionState.CURRENT); if (!validStates.contains(desiredState)) { throw new AmbariException("The state must be one of [" + StringUtils.join(validStates, ", ") + "]"); } clusterGlobalLock.writeLock().lock(); try { StackEntity repoVersionStackEntity = currentClusterVersion.getRepositoryVersion().getStack(); StackId repoVersionStackId = new StackId(repoVersionStackEntity); Map<String, HostVersionEntity> existingHostToHostVersionEntity = new HashMap<>(); List<HostVersionEntity> existingHostVersionEntities = hostVersionDAO.findByClusterStackAndVersion( getClusterName(), repoVersionStackId, currentClusterVersion.getRepositoryVersion().getVersion()); if (existingHostVersionEntities != null) { for (HostVersionEntity entity : existingHostVersionEntities) { existingHostToHostVersionEntity.put(entity.getHostName(), entity); } } Sets.SetView<String> intersection = Sets.intersection( existingHostToHostVersionEntity.keySet(), hostNames); for (String hostname : hostNames) { List<HostVersionEntity> currentHostVersions = hostVersionDAO.findByClusterHostAndState( getClusterName(), hostname, RepositoryVersionState.CURRENT); HostVersionEntity currentHostVersionEntity = (currentHostVersions != null && currentHostVersions.size() == 1) ? currentHostVersions.get(0) : null; // Notice that if any hosts already have the desired stack and version, regardless of the state, we try // to be robust and only insert records for the missing hosts. if (!intersection.contains(hostname)) { // According to the business logic, we don't create objects in a CURRENT state. HostEntity hostEntity = hostDAO.findByName(hostname); HostVersionEntity hostVersionEntity = new HostVersionEntity(hostEntity, currentClusterVersion.getRepositoryVersion(), desiredState); hostVersionDAO.create(hostVersionEntity); } else { HostVersionEntity hostVersionEntity = existingHostToHostVersionEntity.get(hostname); if (hostVersionEntity.getState() != desiredState) { hostVersionEntity.setState(desiredState); hostVersionEntity = hostVersionDAO.merge(hostVersionEntity); } // Maintain the invariant that only one HostVersionEntity is allowed // to have a state of CURRENT. if (currentHostVersionEntity != null && !currentHostVersionEntity.getRepositoryVersion().equals( hostVersionEntity.getRepositoryVersion()) && desiredState == RepositoryVersionState.CURRENT && currentHostVersionEntity.getState() == RepositoryVersionState.CURRENT) { currentHostVersionEntity.setState(RepositoryVersionState.INSTALLED); hostVersionDAO.merge(currentHostVersionEntity); } } } } finally { clusterGlobalLock.writeLock().unlock(); } } /** ======= >>>>>>> <<<<<<< * @see #clusterGlobalLock ======= * the stack to remove configurations for (not {@code null}). * @param serviceName * the service name (not {@code null}). * @see #clusterGlobalLock >>>>>>> * the stack to remove configurations for (not {@code null}). * @param serviceName * the service name (not {@code null}). * @see #clusterGlobalLock
<<<<<<< ======= import static org.apache.ambari.server.controller.KerberosHelperImpl.CHECK_KEYTABS; import static org.apache.ambari.server.controller.KerberosHelperImpl.REMOVE_KEYTAB; import static org.apache.ambari.server.controller.KerberosHelperImpl.SET_KEYTAB; import java.io.BufferedInputStream; import java.io.File; import java.io.FileInputStream; import java.io.IOException; >>>>>>> <<<<<<< LOG.debug("Recovery configuration set to {}", response.getRecoveryConfig().toString()); ======= LOG.info("Recovery configuration set to {}", response.getRecoveryConfig()); >>>>>>> LOG.debug("Recovery configuration set to {}", response.getRecoveryConfig().toString()); <<<<<<< ======= /** * Adds commands from action queue to a heartbeat response. */ protected void sendCommands(String hostname, HeartBeatResponse response) throws AmbariException { List<AgentCommand> cmds = actionQueue.dequeueAll(hostname); if (cmds != null && !cmds.isEmpty()) { for (AgentCommand ac : cmds) { try { if (LOG.isDebugEnabled()) { LOG.debug("Sending command string = {}", StageUtils.jaxbToString(ac)); } } catch (Exception e) { throw new AmbariException("Could not get jaxb string for command", e); } switch (ac.getCommandType()) { case BACKGROUND_EXECUTION_COMMAND: case EXECUTION_COMMAND: { ExecutionCommand ec = (ExecutionCommand)ac; LOG.info("HeartBeatHandler.sendCommands: sending ExecutionCommand for host {}, role {}, roleCommand {}, and command ID {}, task ID {}", ec.getHostname(), ec.getRole(), ec.getRoleCommand(), ec.getCommandId(), ec.getTaskId()); Map<String, String> hlp = ec.getHostLevelParams(); if (hlp != null) { String customCommand = hlp.get("custom_command"); if (SET_KEYTAB.equalsIgnoreCase(customCommand) || REMOVE_KEYTAB.equalsIgnoreCase(customCommand) || CHECK_KEYTABS.equalsIgnoreCase(customCommand)) { LOG.info(String.format("%s called", customCommand)); try { injectKeytab(ec, customCommand, hostname); } catch (IOException e) { throw new AmbariException("Could not inject keytab into command", e); } } } response.addExecutionCommand((ExecutionCommand) ac); break; } case STATUS_COMMAND: { response.addStatusCommand((StatusCommand) ac); break; } case CANCEL_COMMAND: { response.addCancelCommand((CancelCommand) ac); break; } case ALERT_DEFINITION_COMMAND: { response.addAlertDefinitionCommand((AlertDefinitionCommand) ac); break; } case ALERT_EXECUTION_COMMAND: { response.addAlertExecutionCommand((AlertExecutionCommand) ac); break; } default: LOG.error("There is no action for agent command =" + ac.getCommandType().name()); } } } } >>>>>>> <<<<<<< ======= /** * Insert Kerberos keytab details into the ExecutionCommand for the SET_KEYTAB custom command if * any keytab details and associated data exists for the target host. * * @param ec the ExecutionCommand to update * @param command a name of the relevant keytab command * @param targetHost a name of the host the relevant command is destined for * @throws AmbariException */ void injectKeytab(ExecutionCommand ec, String command, String targetHost) throws AmbariException { String dataDir = ec.getCommandParams().get(KerberosServerAction.DATA_DIRECTORY); if(dataDir != null) { KerberosIdentityDataFileReader reader = null; List<Map<String, String>> kcp = ec.getKerberosCommandParams(); try { reader = kerberosIdentityDataFileReaderFactory.createKerberosIdentityDataFileReader(new File(dataDir, KerberosIdentityDataFileReader.DATA_FILE_NAME)); for (Map<String, String> record : reader) { String hostName = record.get(KerberosIdentityDataFileReader.HOSTNAME); if (targetHost.equalsIgnoreCase(hostName)) { if (SET_KEYTAB.equalsIgnoreCase(command)) { String keytabFilePath = record.get(KerberosIdentityDataFileReader.KEYTAB_FILE_PATH); if (keytabFilePath != null) { String sha1Keytab = DigestUtils.sha1Hex(keytabFilePath); File keytabFile = new File(dataDir + File.separator + hostName + File.separator + sha1Keytab); if (keytabFile.canRead()) { Map<String, String> keytabMap = new HashMap<>(); String principal = record.get(KerberosIdentityDataFileReader.PRINCIPAL); String isService = record.get(KerberosIdentityDataFileReader.SERVICE); keytabMap.put(KerberosIdentityDataFileReader.HOSTNAME, hostName); keytabMap.put(KerberosIdentityDataFileReader.SERVICE, isService); keytabMap.put(KerberosIdentityDataFileReader.COMPONENT, record.get(KerberosIdentityDataFileReader.COMPONENT)); keytabMap.put(KerberosIdentityDataFileReader.PRINCIPAL, principal); keytabMap.put(KerberosIdentityDataFileReader.KEYTAB_FILE_PATH, keytabFilePath); keytabMap.put(KerberosIdentityDataFileReader.KEYTAB_FILE_OWNER_NAME, record.get(KerberosIdentityDataFileReader.KEYTAB_FILE_OWNER_NAME)); keytabMap.put(KerberosIdentityDataFileReader.KEYTAB_FILE_OWNER_ACCESS, record.get(KerberosIdentityDataFileReader.KEYTAB_FILE_OWNER_ACCESS)); keytabMap.put(KerberosIdentityDataFileReader.KEYTAB_FILE_GROUP_NAME, record.get(KerberosIdentityDataFileReader.KEYTAB_FILE_GROUP_NAME)); keytabMap.put(KerberosIdentityDataFileReader.KEYTAB_FILE_GROUP_ACCESS, record.get(KerberosIdentityDataFileReader.KEYTAB_FILE_GROUP_ACCESS)); BufferedInputStream bufferedIn = new BufferedInputStream(new FileInputStream(keytabFile)); byte[] keytabContent = null; try { keytabContent = IOUtils.toByteArray(bufferedIn); } finally { bufferedIn.close(); } String keytabContentBase64 = Base64.encodeBase64String(keytabContent); keytabMap.put(KerberosServerAction.KEYTAB_CONTENT_BASE64, keytabContentBase64); kcp.add(keytabMap); } } } else if (REMOVE_KEYTAB.equalsIgnoreCase(command) || CHECK_KEYTABS.equalsIgnoreCase(command)) { Map<String, String> keytabMap = new HashMap<>(); keytabMap.put(KerberosIdentityDataFileReader.HOSTNAME, hostName); keytabMap.put(KerberosIdentityDataFileReader.SERVICE, record.get(KerberosIdentityDataFileReader.SERVICE)); keytabMap.put(KerberosIdentityDataFileReader.COMPONENT, record.get(KerberosIdentityDataFileReader.COMPONENT)); keytabMap.put(KerberosIdentityDataFileReader.PRINCIPAL, record.get(KerberosIdentityDataFileReader.PRINCIPAL)); keytabMap.put(KerberosIdentityDataFileReader.KEYTAB_FILE_PATH, record.get(KerberosIdentityDataFileReader.KEYTAB_FILE_PATH)); kcp.add(keytabMap); } } } } catch (IOException e) { throw new AmbariException("Could not inject keytabs to enable kerberos"); } finally { if (reader != null) { try { reader.close(); } catch (Throwable t) { // ignored } } } ec.setKerberosCommandParams(kcp); } } >>>>>>>
<<<<<<< ======= desiredStateEntity.setRecoveryEnabled(false); desiredStateEntityPK = getDesiredStateEntityPK(desiredStateEntity); >>>>>>> desiredStateEntity.setRecoveryEnabled(false);
<<<<<<< import org.apache.ambari.server.orm.entities.RepositoryVersionEntity; ======= >>>>>>> <<<<<<< hostLevelParams.put(REPO_INFO, ambariMetaInfo.getRepoInfoString(cluster, component, host)); ======= try { hostLevelParams.put(REPO_INFO, repoVersionHelper.getRepoInfo(cluster, component, host)); } catch (SystemException e) { throw new AmbariException("", e); } >>>>>>> hostLevelParams.put(REPO_INFO, ambariMetaInfo.getRepoInfoString(cluster, component, host)); <<<<<<< execCmd.setRepositoryFile(ambariMetaInfo.getCommandRepository(cluster, component, host)); ======= >>>>>>> execCmd.setRepositoryFile(ambariMetaInfo.getCommandRepository(cluster, component, host)); <<<<<<< ======= >>>>>>> <<<<<<< public String getStatusCommandTimeout(ServiceInfo serviceInfo) throws AmbariException { String commandTimeout = configs.getDefaultAgentTaskTimeout(false); if (serviceInfo.getSchemaVersion().equals(AmbariMetaInfo.SCHEMA_VERSION_2)) { // Service check command is not custom command CommandScriptDefinition script = serviceInfo.getCommandScript(); if (script != null) { if (script.getTimeout() > 0) { commandTimeout = String.valueOf(script.getTimeout()); } } else { String message = String.format("Service %s has no command script " + "defined. It is not possible to run service check" + " for this service", serviceInfo.getName()); throw new AmbariException(message); } } // Try to apply overridden service check timeout value if available Long overriddenTimeout = configs.getAgentServiceCheckTaskTimeout(); if (!overriddenTimeout.equals(Configuration.AGENT_SERVICE_CHECK_TASK_TIMEOUT.getDefaultValue())) { commandTimeout = String.valueOf(overriddenTimeout); } return commandTimeout; } ======= >>>>>>> public String getStatusCommandTimeout(ServiceInfo serviceInfo) throws AmbariException { String commandTimeout = configs.getDefaultAgentTaskTimeout(false); if (serviceInfo.getSchemaVersion().equals(AmbariMetaInfo.SCHEMA_VERSION_2)) { // Service check command is not custom command CommandScriptDefinition script = serviceInfo.getCommandScript(); if (script != null) { if (script.getTimeout() > 0) { commandTimeout = String.valueOf(script.getTimeout()); } } else { String message = String.format("Service %s has no command script " + "defined. It is not possible to run service check" + " for this service", serviceInfo.getName()); throw new AmbariException(message); } } // Try to apply overridden service check timeout value if available Long overriddenTimeout = configs.getAgentServiceCheckTaskTimeout(); if (!overriddenTimeout.equals(Configuration.AGENT_SERVICE_CHECK_TASK_TIMEOUT.getDefaultValue())) { commandTimeout = String.valueOf(overriddenTimeout); } return commandTimeout; }
<<<<<<< import java.util.regex.Matcher; import java.util.regex.Pattern; ======= import javax.xml.bind.JAXBContext; import javax.xml.bind.Unmarshaller; >>>>>>> import java.util.regex.Matcher; import java.util.regex.Pattern; import javax.xml.bind.JAXBContext; import javax.xml.bind.Unmarshaller; <<<<<<< } ======= } JAXBContext jaxbContext = JAXBContext.newInstance(Scan.class); Unmarshaller unmarshaller = jaxbContext.createUnmarshaller(); StringReader reader = new StringReader(sb.toString()); Scan scan = (Scan) unmarshaller.unmarshal(reader); >>>>>>> } JAXBContext jaxbContext = JAXBContext.newInstance(Scan.class); Unmarshaller unmarshaller = jaxbContext.createUnmarshaller(); StringReader reader = new StringReader(sb.toString()); Scan scan = (Scan) unmarshaller.unmarshal(reader);
<<<<<<< new HostRequest(host1, cluster1, null); rInvalid1.setRackInfo(UUID.randomUUID().toString()); ======= new HostRequest(host1, cluster1); >>>>>>> new HostRequest(host1, cluster1); rInvalid1.setRackInfo(UUID.randomUUID().toString());
<<<<<<< import org.apache.ambari.server.security.authorization.LdapServerProperties; ======= import org.apache.ambari.server.security.authorization.UserType; >>>>>>> <<<<<<< public void testGetLdapServerProperties() throws Exception { final Properties ambariProperties = new Properties(); final Configuration configuration = new Configuration(ambariProperties); final File passwordFile = temp.newFile("ldap-password.dat"); final FileOutputStream fos = new FileOutputStream(passwordFile); fos.write("ambaritest\r\n".getBytes()); fos.close(); final String passwordFilePath = temp.getRoot().getAbsolutePath() + File.separator + "ldap-password.dat"; ambariProperties.setProperty(Configuration.LDAP_PRIMARY_URL.getKey(), "1"); ambariProperties.setProperty(Configuration.LDAP_SECONDARY_URL.getKey(), "2"); ambariProperties.setProperty(Configuration.LDAP_USE_SSL.getKey(), "true"); ambariProperties.setProperty(Configuration.LDAP_BIND_ANONYMOUSLY.getKey(), "true"); ambariProperties.setProperty(Configuration.LDAP_MANAGER_DN.getKey(), "5"); ambariProperties.setProperty(Configuration.LDAP_MANAGER_PASSWORD.getKey(), passwordFilePath); ambariProperties.setProperty(Configuration.LDAP_BASE_DN.getKey(), "7"); ambariProperties.setProperty(Configuration.LDAP_USERNAME_ATTRIBUTE.getKey(), "8"); ambariProperties.setProperty(Configuration.LDAP_USER_BASE.getKey(), "9"); ambariProperties.setProperty(Configuration.LDAP_USER_OBJECT_CLASS.getKey(), "10"); ambariProperties.setProperty(Configuration.LDAP_GROUP_BASE.getKey(), "11"); ambariProperties.setProperty(Configuration.LDAP_GROUP_OBJECT_CLASS.getKey(), "12"); ambariProperties.setProperty(Configuration.LDAP_GROUP_MEMBERSHIP_ATTR.getKey(), "13"); ambariProperties.setProperty(Configuration.LDAP_GROUP_NAMING_ATTR.getKey(), "14"); ambariProperties.setProperty(Configuration.LDAP_ADMIN_GROUP_MAPPING_RULES.getKey(), "15"); ambariProperties.setProperty(Configuration.LDAP_GROUP_SEARCH_FILTER.getKey(), "16"); ambariProperties.setProperty(Configuration.LDAP_USER_SEARCH_FILTER.getKey(), "17"); ambariProperties.setProperty(Configuration.LDAP_ALT_USER_SEARCH_ENABLED.getKey(), "true"); ambariProperties.setProperty(Configuration.LDAP_ALT_USER_SEARCH_FILTER.getKey(), "18"); final LdapServerProperties ldapProperties = configuration.getLdapServerProperties(); Assert.assertEquals("1", ldapProperties.getPrimaryUrl()); Assert.assertEquals("2", ldapProperties.getSecondaryUrl()); Assert.assertEquals(true, ldapProperties.isUseSsl()); Assert.assertEquals(true, ldapProperties.isAnonymousBind()); Assert.assertEquals("5", ldapProperties.getManagerDn()); Assert.assertEquals("ambaritest", ldapProperties.getManagerPassword()); Assert.assertEquals("7", ldapProperties.getBaseDN()); Assert.assertEquals("8", ldapProperties.getUsernameAttribute()); Assert.assertEquals("9", ldapProperties.getUserBase()); Assert.assertEquals("10", ldapProperties.getUserObjectClass()); Assert.assertEquals("11", ldapProperties.getGroupBase()); Assert.assertEquals("12", ldapProperties.getGroupObjectClass()); Assert.assertEquals("13", ldapProperties.getGroupMembershipAttr()); Assert.assertEquals("14", ldapProperties.getGroupNamingAttr()); Assert.assertEquals("15", ldapProperties.getAdminGroupMappingRules()); Assert.assertEquals("16", ldapProperties.getGroupSearchFilter()); Assert.assertEquals("17", ldapProperties.getUserSearchFilter(false)); Assert.assertEquals(true, ldapProperties.isAlternateUserSearchFilterEnabled()); Assert.assertEquals("18", ldapProperties.getUserSearchFilter(true)); } @Test ======= >>>>>>>
<<<<<<< ======= import com.google.inject.Provider; import com.google.inject.persist.Transactional; >>>>>>> <<<<<<< public static final String REPOSITORY_VERSION_UPGRADE_PACK_PROPERTY_ID = PropertyHelper.getPropertyId("RepositoryVersions", "upgrade_pack"); public static final String REPOSITORY_VERSION_TYPE_PROPERTY_ID = "RepositoryVersions/type"; public static final String REPOSITORY_VERSION_COMPONENTS = "RepositoryVersions/components"; ======= >>>>>>> public static final String REPOSITORY_VERSION_TYPE_PROPERTY_ID = "RepositoryVersions/type"; public static final String REPOSITORY_VERSION_COMPONENTS = "RepositoryVersions/components"; <<<<<<< public static Set<String> propertyIds = Sets.newHashSet( REPOSITORY_VERSION_ID_PROPERTY_ID, REPOSITORY_VERSION_REPOSITORY_VERSION_PROPERTY_ID, REPOSITORY_VERSION_DISPLAY_NAME_PROPERTY_ID, REPOSITORY_VERSION_STACK_NAME_PROPERTY_ID, REPOSITORY_VERSION_STACK_VERSION_PROPERTY_ID, REPOSITORY_VERSION_UPGRADE_PACK_PROPERTY_ID, SUBRESOURCE_OPERATING_SYSTEMS_PROPERTY_ID, REPOSITORY_VERSION_TYPE_PROPERTY_ID, REPOSITORY_VERSION_COMPONENTS); ======= public static Set<String> propertyIds = new HashSet<String>() { { add(REPOSITORY_VERSION_ID_PROPERTY_ID); add(REPOSITORY_VERSION_REPOSITORY_VERSION_PROPERTY_ID); add(REPOSITORY_VERSION_DISPLAY_NAME_PROPERTY_ID); add(REPOSITORY_VERSION_STACK_NAME_PROPERTY_ID); add(REPOSITORY_VERSION_STACK_VERSION_PROPERTY_ID); add(SUBRESOURCE_OPERATING_SYSTEMS_PROPERTY_ID); } }; >>>>>>> public static Set<String> propertyIds = Sets.newHashSet( REPOSITORY_VERSION_ID_PROPERTY_ID, REPOSITORY_VERSION_REPOSITORY_VERSION_PROPERTY_ID, REPOSITORY_VERSION_DISPLAY_NAME_PROPERTY_ID, REPOSITORY_VERSION_STACK_NAME_PROPERTY_ID, REPOSITORY_VERSION_STACK_VERSION_PROPERTY_ID, SUBRESOURCE_OPERATING_SYSTEMS_PROPERTY_ID, REPOSITORY_VERSION_TYPE_PROPERTY_ID, REPOSITORY_VERSION_COMPONENTS); <<<<<<< entity.setUpgradePackage(repositoryVersionHelper.getUpgradePackageName(stackName, stackVersion, entity.getVersion())); List<RepositoryVersionEntity.Component> components = null; int i = 1; for (Entry<String, Object> entry : properties.entrySet()) { if (entry.getKey().startsWith(REPOSITORY_VERSION_COMPONENTS)) { if (null == components) { components = new ArrayList<>(); } String serviceName = PropertyHelper.getPropertyName(entry.getKey()); Collection<String> componentNames = (Collection<String>) entry.getValue(); for (String componentName : componentNames) { components.add(new RepositoryVersionEntity.Component(serviceName, componentName, i++)); } } } if (null != components) { entity.setType(RepositoryType.PATCH); entity.setComponents(components); } ======= >>>>>>> List<RepositoryVersionEntity.Component> components = null; int i = 1; for (Entry<String, Object> entry : properties.entrySet()) { if (entry.getKey().startsWith(REPOSITORY_VERSION_COMPONENTS)) { if (null == components) { components = new ArrayList<>(); } String serviceName = PropertyHelper.getPropertyName(entry.getKey()); Collection<String> componentNames = (Collection<String>) entry.getValue(); for (String componentName : componentNames) { components.add(new RepositoryVersionEntity.Component(serviceName, componentName, i++)); } } } if (null != components) { entity.setType(RepositoryType.PATCH); entity.setComponents(components); }
<<<<<<< import org.apache.ambari.server.actionmanager.HostRoleCommandFactory; import org.apache.ambari.server.actionmanager.HostRoleCommandFactoryImpl; import org.apache.ambari.server.actionmanager.StageFactory; import org.apache.ambari.server.actionmanager.StageFactoryImpl; import org.apache.ambari.server.agent.stomp.AgentConfigsHolder; import org.apache.ambari.server.agent.stomp.MetadataHolder; import org.apache.ambari.server.api.services.AmbariMetaInfo; import org.apache.ambari.server.audit.AuditLogger; import org.apache.ambari.server.audit.AuditLoggerDefaultImpl; ======= import org.apache.ambari.server.actionmanager.HostRoleStatus; >>>>>>> import org.apache.ambari.server.actionmanager.HostRoleCommandFactory; import org.apache.ambari.server.actionmanager.HostRoleCommandFactoryImpl; import org.apache.ambari.server.actionmanager.StageFactory; import org.apache.ambari.server.actionmanager.StageFactoryImpl; import org.apache.ambari.server.agent.stomp.AgentConfigsHolder; import org.apache.ambari.server.agent.stomp.MetadataHolder; import org.apache.ambari.server.api.services.AmbariMetaInfo; import org.apache.ambari.server.audit.AuditLogger; import org.apache.ambari.server.audit.AuditLoggerDefaultImpl; import org.apache.ambari.server.actionmanager.HostRoleStatus; <<<<<<< import org.apache.ambari.server.controller.KerberosHelper; import org.apache.ambari.server.controller.KerberosHelperImpl; ======= import org.apache.ambari.server.controller.AmbariServer; import org.apache.ambari.server.controller.KerberosHelper; >>>>>>> import org.apache.ambari.server.controller.KerberosHelper; import org.apache.ambari.server.controller.KerberosHelperImpl; import org.apache.ambari.server.controller.AmbariServer; import org.apache.ambari.server.controller.KerberosHelper; <<<<<<< import org.apache.ambari.server.events.MetadataUpdateEvent; import org.apache.ambari.server.hooks.HookService; import org.apache.ambari.server.hooks.users.UserHookService; import org.apache.ambari.server.metadata.CachedRoleCommandOrderProvider; import org.apache.ambari.server.metadata.RoleCommandOrderProvider; ======= import org.apache.ambari.server.controller.internal.AmbariServerConfigurationCategory; import org.apache.ambari.server.ldap.domain.AmbariLdapConfigurationKeys; >>>>>>> import org.apache.ambari.server.events.MetadataUpdateEvent; import org.apache.ambari.server.hooks.HookService; import org.apache.ambari.server.hooks.users.UserHookService; import org.apache.ambari.server.metadata.CachedRoleCommandOrderProvider; import org.apache.ambari.server.metadata.RoleCommandOrderProvider; import org.apache.ambari.server.controller.internal.AmbariServerConfigurationCategory; import org.apache.ambari.server.ldap.domain.AmbariLdapConfigurationKeys; <<<<<<< import org.apache.ambari.server.scheduler.ExecutionScheduler; import org.apache.ambari.server.security.SecurityHelper; import org.apache.ambari.server.security.encryption.CredentialStoreService; import org.apache.ambari.server.stack.StackManagerFactory; ======= import org.apache.ambari.server.orm.dao.AmbariConfigurationDAO; import org.apache.ambari.server.serveraction.kerberos.PrepareKerberosIdentitiesServerAction; >>>>>>> import org.apache.ambari.server.scheduler.ExecutionScheduler; import org.apache.ambari.server.security.SecurityHelper; import org.apache.ambari.server.security.encryption.CredentialStoreService; import org.apache.ambari.server.stack.StackManagerFactory; import org.apache.ambari.server.orm.dao.AmbariConfigurationDAO; import org.apache.ambari.server.serveraction.kerberos.PrepareKerberosIdentitiesServerAction; <<<<<<< Method updateHostComponentLastStateTable = UpgradeCatalog300.class.getDeclaredMethod("updateHostComponentLastStateTable"); ======= Method upgradeLdapConfiguration = UpgradeCatalog300.class.getDeclaredMethod("upgradeLdapConfiguration"); Method createRoleAuthorizations = UpgradeCatalog300.class.getDeclaredMethod("createRoleAuthorizations"); Method addUserAuthenticationSequence = UpgradeCatalog300.class.getDeclaredMethod("addUserAuthenticationSequence"); >>>>>>> Method updateHostComponentLastStateTable = UpgradeCatalog300.class.getDeclaredMethod("updateHostComponentLastStateTable"); Method upgradeLdapConfiguration = UpgradeCatalog300.class.getDeclaredMethod("upgradeLdapConfiguration"); Method createRoleAuthorizations = UpgradeCatalog300.class.getDeclaredMethod("createRoleAuthorizations"); Method addUserAuthenticationSequence = UpgradeCatalog300.class.getDeclaredMethod("addUserAuthenticationSequence"); <<<<<<< .addMockedMethod(updateHostComponentLastStateTable) ======= .addMockedMethod(upgradeLdapConfiguration) .addMockedMethod(createRoleAuthorizations) .addMockedMethod(addUserAuthenticationSequence) >>>>>>> .addMockedMethod(updateHostComponentLastStateTable) .addMockedMethod(upgradeLdapConfiguration) .addMockedMethod(createRoleAuthorizations) .addMockedMethod(addUserAuthenticationSequence) <<<<<<< Module module = new AbstractModule() { @Override public void configure() { PartialNiceMockBinder.newBuilder().addConfigsBindings().addFactoriesInstallBinding().build().configure(binder()); bind(DBAccessor.class).toInstance(dbAccessor); bind(OsFamily.class).toInstance(osFamily); bind(EntityManager.class).toInstance(entityManager); bind(PersistedState.class).toInstance(mock(PersistedStateImpl.class)); bind(Clusters.class).toInstance(mock(ClustersImpl.class)); bind(SecurityHelper.class).toInstance(mock(SecurityHelper.class)); bind(HostRoleCommandFactory.class).to(HostRoleCommandFactoryImpl.class); bind(ActionDBAccessor.class).toInstance(createNiceMock(ActionDBAccessorImpl.class)); bind(UnitOfWork.class).toInstance(createNiceMock(UnitOfWork.class)); bind(RoleCommandOrderProvider.class).to(CachedRoleCommandOrderProvider.class); bind(StageFactory.class).to(StageFactoryImpl.class); bind(AuditLogger.class).toInstance(createNiceMock(AuditLoggerDefaultImpl.class)); bind(PasswordEncoder.class).toInstance(new StandardPasswordEncoder()); bind(HookService.class).to(UserHookService.class); bind(ServiceComponentHostFactory.class).toInstance(createNiceMock(ServiceComponentHostFactory.class)); bind(AbstractRootServiceResponseFactory.class).to(RootServiceResponseFactory.class); bind(CredentialStoreService.class).toInstance(createNiceMock(CredentialStoreService.class)); bind(AmbariManagementController.class).toInstance(createNiceMock(AmbariManagementControllerImpl.class)); bind(ExecutionScheduler.class).toInstance(createNiceMock(ExecutionScheduler.class)); bind(AmbariMetaInfo.class).toInstance(createNiceMock(AmbariMetaInfo.class)); bind(KerberosHelper.class).toInstance(createNiceMock(KerberosHelperImpl.class)); bind(StackManagerFactory.class).toInstance(createNiceMock(StackManagerFactory.class)); install(new FactoryModuleBuilder().implement( Host.class, HostImpl.class).build(HostFactory.class)); install(new FactoryModuleBuilder().implement( Cluster.class, ClusterImpl.class).build(ClusterFactory.class)); install(new FactoryModuleBuilder().build(UpgradeContextFactory.class)); install(new FactoryModuleBuilder().implement( Service.class, ServiceImpl.class).build(ServiceFactory.class)); } }; ======= Module module = getTestGuiceModule(); // updateStageTable Capture<DBAccessor.DBColumnInfo> updateStageTableCaptures = newCapture(CaptureType.ALL); dbAccessor.addColumn(eq(STAGE_TABLE), capture(updateStageTableCaptures)); expectLastCall().once(); dbAccessor.addColumn(eq(STAGE_TABLE), capture(updateStageTableCaptures)); expectLastCall().once(); dbAccessor.addColumn(eq(REQUEST_TABLE), capture(updateStageTableCaptures)); expectLastCall().once(); >>>>>>> Module module = new AbstractModule() { @Override public void configure() { PartialNiceMockBinder.newBuilder().addConfigsBindings().addFactoriesInstallBinding().build().configure(binder()); bind(DBAccessor.class).toInstance(dbAccessor); bind(OsFamily.class).toInstance(osFamily); bind(EntityManager.class).toInstance(entityManager); bind(PersistedState.class).toInstance(mock(PersistedStateImpl.class)); bind(Clusters.class).toInstance(mock(ClustersImpl.class)); bind(SecurityHelper.class).toInstance(mock(SecurityHelper.class)); bind(HostRoleCommandFactory.class).to(HostRoleCommandFactoryImpl.class); bind(ActionDBAccessor.class).toInstance(createNiceMock(ActionDBAccessorImpl.class)); bind(UnitOfWork.class).toInstance(createNiceMock(UnitOfWork.class)); bind(RoleCommandOrderProvider.class).to(CachedRoleCommandOrderProvider.class); bind(StageFactory.class).to(StageFactoryImpl.class); bind(AuditLogger.class).toInstance(createNiceMock(AuditLoggerDefaultImpl.class)); bind(PasswordEncoder.class).toInstance(new StandardPasswordEncoder()); bind(HookService.class).to(UserHookService.class); bind(ServiceComponentHostFactory.class).toInstance(createNiceMock(ServiceComponentHostFactory.class)); bind(AbstractRootServiceResponseFactory.class).to(RootServiceResponseFactory.class); bind(CredentialStoreService.class).toInstance(createNiceMock(CredentialStoreService.class)); bind(AmbariManagementController.class).toInstance(createNiceMock(AmbariManagementControllerImpl.class)); bind(ExecutionScheduler.class).toInstance(createNiceMock(ExecutionScheduler.class)); bind(AmbariMetaInfo.class).toInstance(createNiceMock(AmbariMetaInfo.class)); bind(KerberosHelper.class).toInstance(createNiceMock(KerberosHelperImpl.class)); bind(StackManagerFactory.class).toInstance(createNiceMock(StackManagerFactory.class)); install(new FactoryModuleBuilder().implement( Host.class, HostImpl.class).build(HostFactory.class)); install(new FactoryModuleBuilder().implement( Cluster.class, ClusterImpl.class).build(ClusterFactory.class)); install(new FactoryModuleBuilder().build(UpgradeContextFactory.class)); install(new FactoryModuleBuilder().implement( Service.class, ServiceImpl.class).build(ServiceFactory.class)); } }; // updateStageTable Capture<DBAccessor.DBColumnInfo> updateStageTableCaptures = newCapture(CaptureType.ALL); dbAccessor.addColumn(eq(STAGE_TABLE), capture(updateStageTableCaptures)); expectLastCall().once(); dbAccessor.addColumn(eq(STAGE_TABLE), capture(updateStageTableCaptures)); expectLastCall().once(); dbAccessor.addColumn(eq(REQUEST_TABLE), capture(updateStageTableCaptures)); expectLastCall().once(); <<<<<<< Capture<DBAccessor.DBColumnInfo> lastValidColumn = newCapture(); dbAccessor.addColumn(eq(UpgradeCatalog300.COMPONENT_STATE_TABLE), capture(lastValidColumn)); ======= // removeSecurityState >>>>>>> Capture<DBAccessor.DBColumnInfo> lastValidColumn = newCapture(); dbAccessor.addColumn(eq(UpgradeCatalog300.COMPONENT_STATE_TABLE), capture(lastValidColumn)); // removeSecurityState <<<<<<< DBAccessor.DBColumnInfo capturedLastValidColumn = lastValidColumn.getValue(); Assert.assertEquals(UpgradeCatalog300.COMPONENT_LAST_STATE_COLUMN, capturedLastValidColumn.getName()); Assert.assertEquals(State.UNKNOWN, capturedLastValidColumn.getDefaultValue()); Assert.assertEquals(String.class, capturedLastValidColumn.getType()); ======= validateCreateUserAuthenticationTable(createUserAuthenticationTableCaptures); validateUpdateGroupMembershipRecords(createMembersTableCaptures); validateUpdateAdminPrivilegeRecords(createAdminPrincipalTableCaptures); validateUpdateUsersTable(updateUserTableCaptures, alterUserTableCaptures); >>>>>>> DBAccessor.DBColumnInfo capturedLastValidColumn = lastValidColumn.getValue(); Assert.assertEquals(UpgradeCatalog300.COMPONENT_LAST_STATE_COLUMN, capturedLastValidColumn.getName()); Assert.assertEquals(State.UNKNOWN, capturedLastValidColumn.getDefaultValue()); Assert.assertEquals(String.class, capturedLastValidColumn.getType()); validateCreateUserAuthenticationTable(createUserAuthenticationTableCaptures); validateUpdateGroupMembershipRecords(createMembersTableCaptures); validateUpdateAdminPrivilegeRecords(createAdminPrincipalTableCaptures); validateUpdateUsersTable(updateUserTableCaptures, alterUserTableCaptures); <<<<<<< expect(injector.getInstance(MetadataHolder.class)).andReturn(createNiceMock(MetadataHolder.class)).anyTimes(); expect(injector.getInstance(AgentConfigsHolder.class)).andReturn(createNiceMock(AgentConfigsHolder.class)).anyTimes(); ======= expect(injector.getInstance(AmbariServer.class)).andReturn(createNiceMock(AmbariServer.class)).anyTimes(); KerberosHelper kerberosHelperMock = createNiceMock(KerberosHelper.class); expect(kerberosHelperMock.createTemporaryDirectory()).andReturn(new File("/invalid/file/path")).times(2); expect(injector.getInstance(KerberosHelper.class)).andReturn(kerberosHelperMock).anyTimes(); >>>>>>> expect(injector.getInstance(MetadataHolder.class)).andReturn(createNiceMock(MetadataHolder.class)).anyTimes(); expect(injector.getInstance(AgentConfigsHolder.class)).andReturn(createNiceMock(AgentConfigsHolder.class)).anyTimes(); expect(injector.getInstance(AmbariServer.class)).andReturn(createNiceMock(AmbariServer.class)).anyTimes(); KerberosHelper kerberosHelperMock = createNiceMock(KerberosHelper.class); expect(kerberosHelperMock.createTemporaryDirectory()).andReturn(new File("/invalid/file/path")).times(2); expect(injector.getInstance(KerberosHelper.class)).andReturn(kerberosHelperMock).anyTimes(); <<<<<<< UpgradeCatalog300 upgradeCatalog300 = new UpgradeCatalog300(injector); field.set(upgradeCatalog300, createNiceMock(Configuration.class)); ======= UpgradeCatalog300 upgradeCatalog300 = createMockBuilder(UpgradeCatalog300.class).addMockedMethod("getPrepareIdentityServerAction").addMockedMethod("executeInTransaction").createMock(); PrepareKerberosIdentitiesServerAction mockAction = createNiceMock(PrepareKerberosIdentitiesServerAction.class); expect(upgradeCatalog300.getPrepareIdentityServerAction()).andReturn(mockAction).times(2); upgradeCatalog300.executeInTransaction(anyObject()); expectLastCall().times(2); upgradeCatalog300.injector = injector; replay(upgradeCatalog300); field.set(upgradeCatalog300, configuration); >>>>>>> UpgradeCatalog300 upgradeCatalog300 = createMockBuilder(UpgradeCatalog300.class).addMockedMethod("getPrepareIdentityServerAction").addMockedMethod("executeInTransaction").createMock(); PrepareKerberosIdentitiesServerAction mockAction = createNiceMock(PrepareKerberosIdentitiesServerAction.class); expect(upgradeCatalog300.getPrepareIdentityServerAction()).andReturn(mockAction).times(2); upgradeCatalog300.executeInTransaction(anyObject()); expectLastCall().times(2); upgradeCatalog300.injector = injector; replay(upgradeCatalog300); field.set(upgradeCatalog300, createNiceMock(Configuration.class));
<<<<<<< public static final String COMMAND_PARAM_VERSION = VERSION; public static final String COMMAND_PARAM_CLUSTER_NAME = "clusterName"; public static final String COMMAND_PARAM_DIRECTION = "upgrade_direction"; private static final String COMMAND_PARAM_UPGRADE_PACK = "upgrade_pack"; public static final String COMMAND_PARAM_REQUEST_ID = "request_id"; private static final String COMMAND_PARAM_UPGRADE_TYPE = "upgrade_type"; private static final String COMMAND_PARAM_TASKS = "tasks"; private static final String COMMAND_PARAM_STRUCT_OUT = "structured_out"; private static final String COMMAND_DOWNGRADE_FROM_VERSION = "downgrade_from_version"; /** * The original "current" stack of the cluster before the upgrade started. * This is the same regardless of whether the current direction is * {@link Direction#UPGRADE} or {@link Direction#DOWNGRADE}. */ public static final String COMMAND_PARAM_ORIGINAL_STACK = "original_stack"; /** * The target upgrade stack before the upgrade started. This is the same * regardless of whether the current direction is {@link Direction#UPGRADE} or * {@link Direction#DOWNGRADE}. */ public static final String COMMAND_PARAM_TARGET_STACK = "target_stack"; /** * The list of supported services put on a command. */ public static final String COMMAND_PARAM_SUPPORTED_SERVICES = "supported_services"; ======= >>>>>>> /** * The list of supported services put on a command. */ public static final String COMMAND_PARAM_SUPPORTED_SERVICES = "supported_services"; <<<<<<< switch (direction) { case UPGRADE: sourceStackId = cluster.getCurrentStackVersion(); RepositoryVersionEntity targetRepositoryVersion = s_repoVersionDAO.findByStackNameAndVersion( sourceStackId.getStackName(), version); // !!! Consult the version definition and add the service names to supportedServices if (targetRepositoryVersion.getType() != RepositoryType.STANDARD) { try { VersionDefinitionXml vdf = targetRepositoryVersion.getRepositoryXml(); supportedServices.addAll(vdf.getAvailableServiceNames()); // !!! better not be, but just in case if (!supportedServices.isEmpty()) { scope = UpgradeScope.PARTIAL; } } catch (Exception e) { String msg = String.format("Could not parse version definition for %s. Upgrade will not proceed.", version); LOG.error(msg, e); throw new AmbariException(msg); } } targetStackId = targetRepositoryVersion.getStackId(); break; case DOWNGRADE: sourceStackId = cluster.getCurrentStackVersion(); targetStackId = cluster.getDesiredStackVersion(); break; } ======= >>>>>>> switch (direction) { case UPGRADE: StackId sourceStackId = cluster.getCurrentStackVersion(); RepositoryVersionEntity targetRepositoryVersion = s_repoVersionDAO.findByStackNameAndVersion( sourceStackId.getStackName(), version); // !!! Consult the version definition and add the service names to supportedServices if (targetRepositoryVersion.getType() != RepositoryType.STANDARD) { try { VersionDefinitionXml vdf = targetRepositoryVersion.getRepositoryXml(); supportedServices.addAll(vdf.getAvailableServiceNames()); // !!! better not be, but just in case if (!supportedServices.isEmpty()) { scope = UpgradeScope.PARTIAL; } } catch (Exception e) { String msg = String.format("Could not parse version definition for %s. Upgrade will not proceed.", version); LOG.error(msg, e); throw new AmbariException(msg); } } break; case DOWNGRADE: break; } <<<<<<< Map<String, String> commandParams = getNewParameterMap(request); commandParams.put(COMMAND_PARAM_CLUSTER_NAME, cluster.getClusterName()); commandParams.put(COMMAND_PARAM_VERSION, context.getVersion()); commandParams.put(COMMAND_PARAM_DIRECTION, context.getDirection().name().toLowerCase()); commandParams.put(COMMAND_PARAM_ORIGINAL_STACK, context.getOriginalStackId().getStackId()); commandParams.put(COMMAND_PARAM_TARGET_STACK, context.getTargetStackId().getStackId()); commandParams.put(COMMAND_DOWNGRADE_FROM_VERSION, context.getDowngradeFromVersion()); commandParams.put(COMMAND_PARAM_UPGRADE_PACK, upgradePack.getName()); commandParams.put(COMMAND_PARAM_SUPPORTED_SERVICES, StringUtils.join(context.getSupportedServices(), ',')); ======= Map<String, String> commandParams = getNewParameterMap(request, context); commandParams.put(UpgradeContext.COMMAND_PARAM_UPGRADE_PACK, upgradePack.getName()); >>>>>>> Map<String, String> commandParams = getNewParameterMap(request, context); commandParams.put(UpgradeContext.COMMAND_PARAM_UPGRADE_PACK, upgradePack.getName()); commandParams.put(COMMAND_PARAM_SUPPORTED_SERVICES, StringUtils.join(context.getSupportedServices(), ','));
<<<<<<< Map<String, String> params = new HashMap<>(); ======= >>>>>>> <<<<<<< final CalculatedStatus status = CalculatedStatus.statusFromRequest(s_hostRoleCommandDAO, topologyManager, entity.getRequestId()); ======= Map<Long, HostRoleCommandStatusSummaryDTO> summary = s_hostRoleCommandDAO.findAggregateCounts(entity.getRequestId()); // get summaries from TopologyManager for logical requests summary.putAll(topologyManager.getStageSummaries(entity.getRequestId())); // summary might be empty due to delete host have cleared all // HostRoleCommands or due to hosts haven't registered yet with the cluster // when the cluster is provisioned with a Blueprint final CalculatedStatus status; LogicalRequest logicalRequest = topologyManager.getRequest(entity.getRequestId()); if (summary.isEmpty() && null != logicalRequest) { // In this case, it appears that there are no tasks but this is a logical // topology request, so it's a matter of hosts simply not registering yet // for tasks to be created ==> status = PENDING. // For a new LogicalRequest there should be at least one HostRequest, // while if they were removed already ==> status = COMPLETED. if (logicalRequest.getHostRequests().isEmpty()) { status = CalculatedStatus.COMPLETED; } else { status = CalculatedStatus.PENDING; } } else { // there are either tasks or this is not a logical request, so do normal // status calculations status = CalculatedStatus.statusFromStageSummary(summary, summary.keySet()); } >>>>>>> Map<Long, HostRoleCommandStatusSummaryDTO> summary = s_hostRoleCommandDAO.findAggregateCounts(entity.getRequestId()); // get summaries from TopologyManager for logical requests summary.putAll(topologyManager.getStageSummaries(entity.getRequestId())); // summary might be empty due to delete host have cleared all // HostRoleCommands or due to hosts haven't registered yet with the cluster // when the cluster is provisioned with a Blueprint final CalculatedStatus status; LogicalRequest logicalRequest = topologyManager.getRequest(entity.getRequestId()); if (summary.isEmpty() && null != logicalRequest) { // In this case, it appears that there are no tasks but this is a logical // topology request, so it's a matter of hosts simply not registering yet // for tasks to be created ==> status = PENDING. // For a new LogicalRequest there should be at least one HostRequest, // while if they were removed already ==> status = COMPLETED. if (logicalRequest.getHostRequests().isEmpty()) { status = CalculatedStatus.COMPLETED; } else { status = CalculatedStatus.PENDING; } } else { // there are either tasks or this is not a logical request, so do normal // status calculations status = CalculatedStatus.statusFromStageSummary(summary, summary.keySet()); }
<<<<<<< public String getDesiredStackId() { return desiredStackId.getStackId(); ======= @ApiModelProperty(hidden = true) public String getDesiredStackVersion() { return desiredStackVersion; >>>>>>> @ApiModelProperty(hidden = true) public String getDesiredStackId() { return desiredStackId.getStackId(); <<<<<<< ======= @ApiModelProperty(name = "maintenance_state") >>>>>>> @ApiModelProperty(name = "maintenance_state")
<<<<<<< import org.apache.ambari.server.security.SecurityHelper; import org.apache.ambari.server.security.authorization.AmbariAuthorizationFilter; ======= >>>>>>> import org.apache.ambari.server.security.authorization.AmbariAuthorizationFilter;
<<<<<<< @Test public void testExecuteDDLUpdates() throws Exception { Method addServiceComponentColumn = UpgradeCatalog300.class .getDeclaredMethod("addServiceComponentColumn"); UpgradeCatalog300 upgradeCatalog300 = createMockBuilder(UpgradeCatalog300.class) .addMockedMethod(addServiceComponentColumn) .createMock(); upgradeCatalog300.addServiceComponentColumn(); replay(upgradeCatalog300); upgradeCatalog300.executeDDLUpdates(); verify(upgradeCatalog300); } ======= @Test public void testExecuteDDLUpdates() throws Exception { Method updateStageTable = UpgradeCatalog300.class.getDeclaredMethod("updateStageTable"); UpgradeCatalog300 upgradeCatalog300 = createMockBuilder(UpgradeCatalog300.class) .addMockedMethod(updateStageTable) .createMock(); upgradeCatalog300.updateStageTable(); replay(upgradeCatalog300); upgradeCatalog300.executeDDLUpdates(); verify(upgradeCatalog300); } >>>>>>> @Test public void testExecuteDDLUpdates() throws Exception { Method updateStageTable = UpgradeCatalog300.class.getDeclaredMethod("updateStageTable"); Method addServiceComponentColumn = UpgradeCatalog300.class .getDeclaredMethod("addServiceComponentColumn"); UpgradeCatalog300 upgradeCatalog300 = createMockBuilder(UpgradeCatalog300.class) .addMockedMethod(addServiceComponentColumn) .addMockedMethod(updateStageTable) .createMock(); upgradeCatalog300.addServiceComponentColumn(); upgradeCatalog300.updateStageTable(); replay(upgradeCatalog300); upgradeCatalog300.executeDDLUpdates(); verify(upgradeCatalog300); }
<<<<<<< import org.apache.ambari.server.orm.DBAccessor.DBColumnInfo; ======= import org.apache.ambari.server.controller.internal.CalculatedStatus; import org.apache.ambari.server.orm.DBAccessor; >>>>>>> import org.apache.ambari.server.orm.DBAccessor.DBColumnInfo; import org.apache.ambari.server.controller.internal.CalculatedStatus; import org.apache.ambari.server.orm.DBAccessor; <<<<<<< addServiceComponentColumn(); ======= updateStageTable(); } protected void updateStageTable() throws SQLException { dbAccessor.addColumn(STAGE_TABLE, new DBAccessor.DBColumnInfo(STAGE_STATUS_COLUMN, String.class, 255, HostRoleStatus.PENDING, false)); dbAccessor.addColumn(STAGE_TABLE, new DBAccessor.DBColumnInfo(STAGE_DISPLAY_STATUS_COLUMN, String.class, 255, HostRoleStatus.PENDING, false)); dbAccessor.addColumn(REQUEST_TABLE, new DBAccessor.DBColumnInfo(REQUEST_DISPLAY_STATUS_COLUMN, String.class, 255, HostRoleStatus.PENDING, false)); >>>>>>> addServiceComponentColumn(); updateStageTable(); } protected void updateStageTable() throws SQLException { dbAccessor.addColumn(STAGE_TABLE, new DBAccessor.DBColumnInfo(STAGE_STATUS_COLUMN, String.class, 255, HostRoleStatus.PENDING, false)); dbAccessor.addColumn(STAGE_TABLE, new DBAccessor.DBColumnInfo(STAGE_DISPLAY_STATUS_COLUMN, String.class, 255, HostRoleStatus.PENDING, false)); dbAccessor.addColumn(REQUEST_TABLE, new DBAccessor.DBColumnInfo(REQUEST_DISPLAY_STATUS_COLUMN, String.class, 255, HostRoleStatus.PENDING, false)); <<<<<<< /** * Updates the {@code servicecomponentdesiredstate} table. * * @throws SQLException */ protected void addServiceComponentColumn() throws SQLException { dbAccessor.addColumn(UpgradeCatalog250.COMPONENT_TABLE, new DBColumnInfo("repo_state", String.class, 255, RepositoryVersionState.INIT.name(), false)); } ======= protected void setStatusOfStagesAndRequests() { executeInTransaction(new Runnable() { @Override public void run() { try { RequestDAO requestDAO = injector.getInstance(RequestDAO.class); StageFactory stageFactory = injector.getInstance(StageFactory.class); EntityManager em = getEntityManagerProvider().get(); List<RequestEntity> requestEntities= requestDAO.findAll(); for (RequestEntity requestEntity: requestEntities) { Collection<StageEntity> stageEntities= requestEntity.getStages(); List <HostRoleStatus> stageDisplayStatuses = new ArrayList<>(); List <HostRoleStatus> stageStatuses = new ArrayList<>(); for (StageEntity stageEntity: stageEntities) { Stage stage = stageFactory.createExisting(stageEntity); List<HostRoleCommand> hostRoleCommands = stage.getOrderedHostRoleCommands(); Map<HostRoleStatus, Integer> statusCount = CalculatedStatus.calculateStatusCountsForTasks(hostRoleCommands); HostRoleStatus stageDisplayStatus = CalculatedStatus.calculateSummaryDisplayStatus(statusCount, hostRoleCommands.size(), stage.isSkippable()); HostRoleStatus stageStatus = CalculatedStatus.calculateStageStatus(hostRoleCommands, statusCount, stage.getSuccessFactors(), stage.isSkippable()); stageEntity.setStatus(stageStatus); stageStatuses.add(stageStatus); stageEntity.setDisplayStatus(stageDisplayStatus); stageDisplayStatuses.add(stageDisplayStatus); em.merge(stageEntity); } HostRoleStatus requestStatus = CalculatedStatus.getOverallStatusForRequest(stageStatuses); requestEntity.setStatus(requestStatus); HostRoleStatus requestDisplayStatus = CalculatedStatus.getOverallDisplayStatusForRequest(stageDisplayStatuses); requestEntity.setDisplayStatus(requestDisplayStatus); em.merge(requestEntity); } } catch (Exception e) { LOG.warn("Setting status for stages and Requests threw exception. ", e); } } }); } >>>>>>> /** * Updates the {@code servicecomponentdesiredstate} table. * * @throws SQLException */ protected void addServiceComponentColumn() throws SQLException { dbAccessor.addColumn(UpgradeCatalog250.COMPONENT_TABLE, new DBColumnInfo("repo_state", String.class, 255, RepositoryVersionState.INIT.name(), false)); } protected void setStatusOfStagesAndRequests() { executeInTransaction(new Runnable() { @Override public void run() { try { RequestDAO requestDAO = injector.getInstance(RequestDAO.class); StageFactory stageFactory = injector.getInstance(StageFactory.class); EntityManager em = getEntityManagerProvider().get(); List<RequestEntity> requestEntities= requestDAO.findAll(); for (RequestEntity requestEntity: requestEntities) { Collection<StageEntity> stageEntities= requestEntity.getStages(); List <HostRoleStatus> stageDisplayStatuses = new ArrayList<>(); List <HostRoleStatus> stageStatuses = new ArrayList<>(); for (StageEntity stageEntity: stageEntities) { Stage stage = stageFactory.createExisting(stageEntity); List<HostRoleCommand> hostRoleCommands = stage.getOrderedHostRoleCommands(); Map<HostRoleStatus, Integer> statusCount = CalculatedStatus.calculateStatusCountsForTasks(hostRoleCommands); HostRoleStatus stageDisplayStatus = CalculatedStatus.calculateSummaryDisplayStatus(statusCount, hostRoleCommands.size(), stage.isSkippable()); HostRoleStatus stageStatus = CalculatedStatus.calculateStageStatus(hostRoleCommands, statusCount, stage.getSuccessFactors(), stage.isSkippable()); stageEntity.setStatus(stageStatus); stageStatuses.add(stageStatus); stageEntity.setDisplayStatus(stageDisplayStatus); stageDisplayStatuses.add(stageDisplayStatus); em.merge(stageEntity); } HostRoleStatus requestStatus = CalculatedStatus.getOverallStatusForRequest(stageStatuses); requestEntity.setStatus(requestStatus); HostRoleStatus requestDisplayStatus = CalculatedStatus.getOverallDisplayStatusForRequest(stageDisplayStatuses); requestEntity.setDisplayStatus(requestDisplayStatus); em.merge(requestEntity); } } catch (Exception e) { LOG.warn("Setting status for stages and Requests threw exception. ", e); } } }); }
<<<<<<< private GroupDAO groupDAO; ======= protected GroupDAO groupDAO; @Inject private PamAuthenticationFactory pamAuthenticationFactory; >>>>>>> private GroupDAO groupDAO; @Inject private PamAuthenticationFactory pamAuthenticationFactory; <<<<<<< if (isPamEnabled()) { //Set PAM configuration file (found under /etc/pam.d) String pamConfig = configuration.getPamConfigurationFile(); PAM pam; ======= if(isPamEnabled()){ PAM pam; String userName = String.valueOf(authentication.getPrincipal()); UserEntity existingUser = userDAO.findUserByName(userName); if ((existingUser != null) && (existingUser.getUserType() != UserType.PAM)) { String errorMsg = String.format("%s user exists with the username %s. Cannot authenticate via PAM", existingUser.getUserType(), userName); LOG.error(errorMsg); return null; } try{ //Set PAM configuration file (found under /etc/pam.d) String pamConfig = configuration.getPamConfigurationFile(); pam = pamAuthenticationFactory.createInstance(pamConfig); } catch(PAMException ex) { LOG.error("Unable to Initialize PAM." + ex.getMessage()); throw new AuthenticationServiceException("Unable to Initialize PAM - ", ex); } >>>>>>> if (isPamEnabled()) { //Set PAM configuration file (found under /etc/pam.d) String pamConfig = configuration.getPamConfigurationFile(); PAM pam;
<<<<<<< import com.google.common.collect.ImmutableMap; import com.google.common.collect.ImmutableSet; import com.google.inject.Inject; import com.google.inject.assistedinject.Assisted; import com.google.inject.assistedinject.AssistedInject; ======= import com.google.common.collect.ImmutableMap; import com.google.common.collect.Sets; >>>>>>> import com.google.common.collect.ImmutableMap; import com.google.common.collect.Sets; import com.google.inject.Inject; import com.google.inject.assistedinject.Assisted; import com.google.inject.assistedinject.AssistedInject; <<<<<<< public static final String USERNAME_PROPERTY_ID = "user_name"; public static final String DISPLAY_NAME_PROPERTY_ID = "display_name"; public static final String LOCAL_USERNAME_PROPERTY_ID = "local_user_name"; public static final String ACTIVE_PROPERTY_ID = "active"; public static final String CREATE_TIME_PROPERTY_ID = "created"; public static final String CONSECUTIVE_FAILURES_PROPERTY_ID = "consecutive_failures"; public static final String ADMIN_PROPERTY_ID = "admin"; public static final String GROUPS_PROPERTY_ID = "groups"; public static final String USER_USERNAME_PROPERTY_ID = USER_RESOURCE_CATEGORY + "/" + USERNAME_PROPERTY_ID; public static final String USER_DISPLAY_NAME_PROPERTY_ID = USER_RESOURCE_CATEGORY + "/" + DISPLAY_NAME_PROPERTY_ID; public static final String USER_LOCAL_USERNAME_PROPERTY_ID = USER_RESOURCE_CATEGORY + "/" + LOCAL_USERNAME_PROPERTY_ID; public static final String USER_ACTIVE_PROPERTY_ID = USER_RESOURCE_CATEGORY + "/" + ACTIVE_PROPERTY_ID; public static final String USER_CREATE_TIME_PROPERTY_ID = USER_RESOURCE_CATEGORY + "/" + CREATE_TIME_PROPERTY_ID; public static final String USER_CONSECUTIVE_FAILURES_PROPERTY_ID = USER_RESOURCE_CATEGORY + "/" + CONSECUTIVE_FAILURES_PROPERTY_ID; public static final String USER_ADMIN_PROPERTY_ID = USER_RESOURCE_CATEGORY + "/" + ADMIN_PROPERTY_ID; public static final String USER_GROUPS_PROPERTY_ID = USER_RESOURCE_CATEGORY + "/" + GROUPS_PROPERTY_ID; /* ******************************************************* * Deprecated properties, kept for backwards compatibility and to maintain API V1 contract. * These properties are related to a user's authentication resource. * ******************************************************* */ @Deprecated public static final String PASSWORD_PROPERTY_ID = "password"; @Deprecated public static final String OLD_PASSWORD_PROPERTY_ID = "old_password"; @Deprecated public static final String LDAP_USER_PROPERTY_ID = "ldap_user"; @Deprecated public static final String USER_TYPE_PROPERTY_ID = "user_type"; @Deprecated public static final String USER_PASSWORD_PROPERTY_ID = USER_RESOURCE_CATEGORY + "/" + PASSWORD_PROPERTY_ID; @Deprecated public static final String USER_OLD_PASSWORD_PROPERTY_ID = USER_RESOURCE_CATEGORY + "/" + OLD_PASSWORD_PROPERTY_ID; @Deprecated public static final String USER_LDAP_USER_PROPERTY_ID = USER_RESOURCE_CATEGORY + "/" + LDAP_USER_PROPERTY_ID; @Deprecated public static final String USER_USER_TYPE_PROPERTY_ID = USER_RESOURCE_CATEGORY + "/" + USER_TYPE_PROPERTY_ID; /* ******************************************************* */ private static final Set<String> PK_PROPERTY_IDS = ImmutableSet.of( USER_USERNAME_PROPERTY_ID ); private static final Set<String> PROPERTY_IDS = ImmutableSet.of( USER_USERNAME_PROPERTY_ID, USER_DISPLAY_NAME_PROPERTY_ID, USER_LOCAL_USERNAME_PROPERTY_ID, USER_ACTIVE_PROPERTY_ID, USER_CREATE_TIME_PROPERTY_ID, USER_CONSECUTIVE_FAILURES_PROPERTY_ID, USER_GROUPS_PROPERTY_ID, USER_PASSWORD_PROPERTY_ID, USER_OLD_PASSWORD_PROPERTY_ID, USER_LDAP_USER_PROPERTY_ID, USER_USER_TYPE_PROPERTY_ID, USER_ADMIN_PROPERTY_ID ); private static final Map<Resource.Type, String> KEY_PROPERTY_IDS = ImmutableMap.of( Resource.Type.User, USER_USERNAME_PROPERTY_ID ); @Inject private Users users; ======= public static final String USER_USERNAME_PROPERTY_ID = PropertyHelper.getPropertyId("Users", "user_name"); public static final String USER_PASSWORD_PROPERTY_ID = PropertyHelper.getPropertyId("Users", "password"); public static final String USER_OLD_PASSWORD_PROPERTY_ID = PropertyHelper.getPropertyId("Users", "old_password"); public static final String USER_LDAP_USER_PROPERTY_ID = PropertyHelper.getPropertyId("Users", "ldap_user"); public static final String USER_TYPE_PROPERTY_ID = PropertyHelper.getPropertyId("Users", "user_type"); public static final String USER_ACTIVE_PROPERTY_ID = PropertyHelper.getPropertyId("Users", "active"); public static final String USER_GROUPS_PROPERTY_ID = PropertyHelper.getPropertyId("Users", "groups"); public static final String USER_ADMIN_PROPERTY_ID = PropertyHelper.getPropertyId("Users", "admin"); /** * The key property ids for a User resource. */ private static Map<Resource.Type, String> keyPropertyIds = ImmutableMap.<Resource.Type, String>builder() .put(Resource.Type.User, USER_USERNAME_PROPERTY_ID) .build(); >>>>>>> public static final String USERNAME_PROPERTY_ID = "user_name"; public static final String DISPLAY_NAME_PROPERTY_ID = "display_name"; public static final String LOCAL_USERNAME_PROPERTY_ID = "local_user_name"; public static final String ACTIVE_PROPERTY_ID = "active"; public static final String CREATE_TIME_PROPERTY_ID = "created"; public static final String CONSECUTIVE_FAILURES_PROPERTY_ID = "consecutive_failures"; public static final String ADMIN_PROPERTY_ID = "admin"; public static final String GROUPS_PROPERTY_ID = "groups"; public static final String USER_USERNAME_PROPERTY_ID = USER_RESOURCE_CATEGORY + "/" + USERNAME_PROPERTY_ID; public static final String USER_DISPLAY_NAME_PROPERTY_ID = USER_RESOURCE_CATEGORY + "/" + DISPLAY_NAME_PROPERTY_ID; public static final String USER_LOCAL_USERNAME_PROPERTY_ID = USER_RESOURCE_CATEGORY + "/" + LOCAL_USERNAME_PROPERTY_ID; public static final String USER_ACTIVE_PROPERTY_ID = USER_RESOURCE_CATEGORY + "/" + ACTIVE_PROPERTY_ID; public static final String USER_CREATE_TIME_PROPERTY_ID = USER_RESOURCE_CATEGORY + "/" + CREATE_TIME_PROPERTY_ID; public static final String USER_CONSECUTIVE_FAILURES_PROPERTY_ID = USER_RESOURCE_CATEGORY + "/" + CONSECUTIVE_FAILURES_PROPERTY_ID; public static final String USER_ADMIN_PROPERTY_ID = USER_RESOURCE_CATEGORY + "/" + ADMIN_PROPERTY_ID; public static final String USER_GROUPS_PROPERTY_ID = USER_RESOURCE_CATEGORY + "/" + GROUPS_PROPERTY_ID; /* ******************************************************* * Deprecated properties, kept for backwards compatibility and to maintain API V1 contract. * These properties are related to a user's authentication resource. * ******************************************************* */ @Deprecated public static final String PASSWORD_PROPERTY_ID = "password"; @Deprecated public static final String OLD_PASSWORD_PROPERTY_ID = "old_password"; @Deprecated public static final String LDAP_USER_PROPERTY_ID = "ldap_user"; @Deprecated public static final String USER_TYPE_PROPERTY_ID = "user_type"; @Deprecated public static final String USER_PASSWORD_PROPERTY_ID = USER_RESOURCE_CATEGORY + "/" + PASSWORD_PROPERTY_ID; @Deprecated public static final String USER_OLD_PASSWORD_PROPERTY_ID = USER_RESOURCE_CATEGORY + "/" + OLD_PASSWORD_PROPERTY_ID; @Deprecated public static final String USER_LDAP_USER_PROPERTY_ID = USER_RESOURCE_CATEGORY + "/" + LDAP_USER_PROPERTY_ID; @Deprecated public static final String USER_USER_TYPE_PROPERTY_ID = USER_RESOURCE_CATEGORY + "/" + USER_TYPE_PROPERTY_ID; /* ******************************************************* */ <<<<<<< @AssistedInject UserResourceProvider(@Assisted AmbariManagementController managementController) { super(Resource.Type.User, PROPERTY_IDS, KEY_PROPERTY_IDS, managementController); ======= UserResourceProvider(AmbariManagementController managementController) { super(Resource.Type.User, propertyIds, keyPropertyIds, managementController); >>>>>>> @AssistedInject UserResourceProvider(@Assisted AmbariManagementController managementController) { super(Resource.Type.User, propertyIds, keyPropertyIds, managementController); <<<<<<< return PK_PROPERTY_IDS; ======= return new HashSet<>(keyPropertyIds.values()); >>>>>>> return new HashSet<>(keyPropertyIds.values());
<<<<<<< = new LoadParameters(businessName, partKey, dependsOn); ======= = new LoadParameters(partKey, dependsOn, hostWhiteList); >>>>>>> = new LoadParameters(businessName, partKey, dependsOn, hostWhiteList);
<<<<<<< PluginPart(int pluginType, String businessName, File file, File oDexDir, File libraryDir, String[] dependsOn) { ======= PluginPart(int pluginType, File file, File oDexDir, File libraryDir, String[] dependsOn, String[] hostWhiteList) { >>>>>>> PluginPart(int pluginType, String businessName, File file, File oDexDir, File libraryDir, String[] dependsOn, String[] hostWhiteList) {
<<<<<<< public PluginManagerThatUseDynamicLoader(String appId, Context context, ViewCallback viewCallback) { super(appId, context, viewCallback); ======= final private File mSoDirRoot; protected PluginManagerThatUseDynamicLoader(String appId, Context context) { super(context); File dir = context.getDir("dynamic-manager", Context.MODE_PRIVATE); mSoDirRoot = new File(dir, "SoDirRoot"); >>>>>>> protected PluginManagerThatUseDynamicLoader(String appId, Context context) { super(context);
<<<<<<< ======= import com.netflix.servo.aws.constants.Dimensions; >>>>>>> import com.netflix.servo.aws.constants.Dimensions;
<<<<<<< ======= private Configuration configuration = Configuration.DEFAULT; >>>>>>> private Configuration configuration = Configuration.DEFAULT; <<<<<<< * Override the default crouton manager. This is responsible for queueing and showing the Crouton. * * @param manager * A valid manager. You can get a new one via {@link Crouton#getNewManager()} * * @return this {@link Crouton}. */ public Crouton setCroutonManager(final Manager manager) { this.manager = manager; return this; } /** * Creates a manager if one has not be defined. * Unless you this crouton to pass it to another manager it shows on the main one. * * @return The {@link Manager} you have set or the default {@link Manager}. */ Manager getCroutonManager() { if (null == manager) { manager = Manager.getInstance(); } return manager; } /** ======= * Set the configuration on this crouton, idea being you can modify the none visual aspects pre showing it. * * @param configuration a configuration build using {@link Configuration.Builder} * @return this {@link Crouton} */ public Crouton setConfiguration(final Configuration configuration) { if (configuration != null) { this.configuration = configuration; } return this; } /** >>>>>>> * Override the default crouton manager. This is responsible for queueing and showing the Crouton. * * @param manager * A valid manager. You can get a new one via {@link Crouton#getNewManager()} * * @return this {@link Crouton}. */ public Crouton setCroutonManager(final Manager manager) { this.manager = manager; return this; } /** * Creates a manager if one has not be defined. * Unless you this crouton to pass it to another manager it shows on the main one. * * @return The {@link Manager} you have set or the default {@link Manager}. */ Manager getCroutonManager() { if (null == manager) { manager = Manager.getInstance(); } return manager; } /** * Set the configuration on this crouton, idea being you can modify the none visual aspects pre showing it. * * @param configuration a configuration build using {@link Configuration.Builder} * @return this {@link Crouton} */ public Crouton setConfiguration(final Configuration configuration) { if (configuration != null) { this.configuration = configuration; } return this; } /**
<<<<<<< import hudson.matrix.MatrixProject; import hudson.model.AbstractBuild; import hudson.model.AbstractProject; ======= import hudson.Util; >>>>>>> import hudson.model.AbstractBuild; import hudson.model.AbstractProject; <<<<<<< ======= import hudson.model.AbstractBuild; import hudson.model.AbstractProject; >>>>>>> <<<<<<< import hudson.util.FormValidation; ======= import hudson.tasks.Recorder; >>>>>>> <<<<<<< import net.sf.json.JSONObject; import org.apache.commons.collections.CollectionUtils; import org.kohsuke.stapler.AncestorInPath; import org.kohsuke.stapler.QueryParameter; import org.kohsuke.stapler.StaplerRequest; ======= >>>>>>> import org.apache.commons.collections.CollectionUtils; <<<<<<< return new PlotAction(project, this); ======= return new PlotAction(project, this); } public BuildStepMonitor getRequiredMonitorService() { return BuildStepMonitor.BUILD; >>>>>>> return new PlotAction(project, this); <<<<<<< @Extension public static final DescriptorImpl DESCRIPTOR = new DescriptorImpl(); public static class DescriptorImpl extends BuildStepDescriptor<Publisher> { public DescriptorImpl() { super(PlotPublisher.class); } public String getDisplayName() { return Messages.Plot_Publisher_DisplayName(); } @Override public boolean isApplicable(Class<? extends AbstractProject> jobType) { return AbstractProject.class.isAssignableFrom(jobType) && !MatrixProject.class.isAssignableFrom(jobType); } /** * Called when the user saves the project configuration. */ @Override public Publisher newInstance(StaplerRequest req, JSONObject formData) throws FormException { PlotPublisher publisher = new PlotPublisher(); for (Object data : SeriesFactory.getArray(formData.get("plots"))) { publisher.addPlot(bindPlot((JSONObject) data, req)); } return publisher; } private static Plot bindPlot(JSONObject data, StaplerRequest req) { Plot p = req.bindJSON(Plot.class, data); p.series = SeriesFactory.createSeriesList(data.get("series"), req); return p; } /** * Checks if the series file is valid. */ public FormValidation doCheckSeriesFile(@AncestorInPath AbstractProject<?, ?> project, @QueryParameter String value) throws IOException { return FilePath.validateFileMask(project.getSomeWorkspace(), value); } } ======= public static final PlotDescriptor DESCRIPTOR = new PlotDescriptor(); >>>>>>> public static final PlotDescriptor DESCRIPTOR = new PlotDescriptor();
<<<<<<< Mapbox.getInstance(getApplicationContext(), BuildConfig.MAP_ACCESS_TOKEN); dcLocationManager = new DcLocationManager(this); ======= try { DynamicLanguage.setContextLocale(this, DynamicLanguage.getSelectedLocale(this)); } catch (Exception e) { e.printStackTrace(); } >>>>>>> Mapbox.getInstance(getApplicationContext(), BuildConfig.MAP_ACCESS_TOKEN); dcLocationManager = new DcLocationManager(this); try { DynamicLanguage.setContextLocale(this, DynamicLanguage.getSelectedLocale(this)); } catch (Exception e) { e.printStackTrace(); }
<<<<<<< import javax.servlet.ServletContext; ======= import org.springframework.boot.bind.RelaxedPropertyResolver; >>>>>>> <<<<<<< ServletContext servletContext, HeartbeatProperties heartbeatProperties) { ======= List<ConsulRegistrationCustomizer> registrationCustomizers, HeartbeatProperties heartbeatProperties) { RelaxedPropertyResolver propertyResolver = new RelaxedPropertyResolver(context.getEnvironment()); >>>>>>> List<ConsulRegistrationCustomizer> registrationCustomizers, HeartbeatProperties heartbeatProperties) { <<<<<<< ServletContext servletContext, HeartbeatProperties heartbeatProperties) { ======= List<ConsulRegistrationCustomizer> registrationCustomizers, HeartbeatProperties heartbeatProperties) { RelaxedPropertyResolver propertyResolver = new RelaxedPropertyResolver(context.getEnvironment()); >>>>>>> List<ConsulRegistrationCustomizer> registrationCustomizers, HeartbeatProperties heartbeatProperties) {
<<<<<<< ======= @Deprecated @ContextConfiguration(initializers = ConsulTestcontainers.class) >>>>>>> @ContextConfiguration(initializers = ConsulTestcontainers.class)
<<<<<<< @Deprecated interface LocalResolver { ======= public interface LocalResolver { >>>>>>> @Deprecated public interface LocalResolver {
<<<<<<< // TODO: did heartbeatInterval need to be a field? protected Duration computeHearbeatInterval() { ======= /** * @deprecated the joda time {@link Period} will be replaced with java8 duration. * @return the computed heartbeat interval */ @Deprecated protected Period computeHearbeatInterval() { >>>>>>> /** * @return the computed heartbeat interval */ protected Duration computeHeartbeatInterval() { <<<<<<< ======= @Deprecated protected Period computeHeartbeatInterval() { return computeHearbeatInterval(); } public String getTtl() { return this.ttlValue + this.ttlUnit; } >>>>>>>
<<<<<<< import io.nuls.tools.constant.TxType; ======= import io.nuls.rpc.util.TimeUtils; >>>>>>> import io.nuls.rpc.util.TimeUtils; import io.nuls.tools.constant.TxType;
<<<<<<< Duration period = properties.computeHearbeatInterval(); ======= Period period = properties.computeHeartbeatInterval(); >>>>>>> Duration period = properties.computeHeartbeatInterval(); <<<<<<< properties.setTtl(Duration.ofSeconds(2)); Duration period = properties.computeHearbeatInterval(); ======= properties.setTtlValue(2); Period period = properties.computeHeartbeatInterval(); >>>>>>> properties.setTtl(Duration.ofSeconds(2)); Duration period = properties.computeHeartbeatInterval();
<<<<<<< "--spring.config.use-legacy-processing=true", ======= "--spring.cloud.consul.host=" + ConsulTestcontainers.getHost(), "--spring.cloud.consul.port=" + ConsulTestcontainers.getPort(), >>>>>>> "--spring.config.use-legacy-processing=true", "--spring.cloud.consul.host=" + ConsulTestcontainers.getHost(), "--spring.cloud.consul.port=" + ConsulTestcontainers.getPort(),
<<<<<<< import com.github.ambry.utils.Utils; import java.io.File; ======= import com.github.ambry.utils.Time; >>>>>>> import com.github.ambry.utils.Time; import com.github.ambry.utils.Utils; <<<<<<< import java.util.HashSet; ======= >>>>>>> import java.util.HashSet; <<<<<<< public MockCluster(NotificationSystem notificationSystem) throws IOException, InstantiationException, URISyntaxException, GeneralSecurityException { this(notificationSystem, false, "", new Properties(), true); } public MockCluster(NotificationSystem notificationSystem, boolean enableSSL, String datacenters, Properties sslProps, boolean enableHardDeletes) throws IOException, InstantiationException, URISyntaxException, GeneralSecurityException { // sslEnabledDatacenters represents comma separated list of datacenters to which ssl should be enabled ======= public MockCluster(NotificationSystem notificationSystem, boolean enableSSL, String sslEnabledDatacentersForDC1, String sslEnabledDatacentersForDC2, String sslEnabledDatacentersForDC3, Time time) throws IOException, InstantiationException { >>>>>>> public MockCluster(NotificationSystem notificationSystem, Time time) throws IOException, InstantiationException, URISyntaxException, GeneralSecurityException { this(notificationSystem, false, "", new Properties(), true, time); } public MockCluster(NotificationSystem notificationSystem, boolean enableSSL, String datacenters, Properties sslProps, boolean enableHardDeletes, Time time) throws IOException, InstantiationException, URISyntaxException, GeneralSecurityException { // sslEnabledDatacenters represents comma separated list of datacenters to which ssl should be enabled <<<<<<< try { for (MockDataNodeId dataNodeId : dataNodes) { if (enableSSL) { String sslEnabledDatacenters = getSSLEnabledDatacenterValue(dataNodeId.getDatacenterName(), datacenterList); sslProps.setProperty("ssl.enabled.datacenters", sslEnabledDatacenters); } initializeServer(dataNodeId, sslProps, enableHardDeletes); ======= for (MockDataNodeId dataNodeId : dataNodes) { if (dataNodeId.getDatacenterName() == "DC1") { startServer(dataNodeId, sslEnabledDatacentersForDC1, time); } else if (dataNodeId.getDatacenterName() == "DC2") { startServer(dataNodeId, sslEnabledDatacentersForDC2, time); } else if (dataNodeId.getDatacenterName() == "DC3") { startServer(dataNodeId, sslEnabledDatacentersForDC3, time); >>>>>>> try { for (MockDataNodeId dataNodeId : dataNodes) { if (enableSSL) { String sslEnabledDatacenters = getSSLEnabledDatacenterValue(dataNodeId.getDatacenterName(), datacenterList); sslProps.setProperty("ssl.enabled.datacenters", sslEnabledDatacenters); } initializeServer(dataNodeId, sslProps, enableHardDeletes, time); <<<<<<< private void initializeServer(DataNodeId dataNodeId, Properties sslProperties, boolean enableHardDeletes) throws IOException, InstantiationException, URISyntaxException { ======= private void startServer(DataNodeId dataNodeId, String sslEnabledDatacenters, Time time) throws IOException, InstantiationException { >>>>>>> private void initializeServer(DataNodeId dataNodeId, Properties sslProperties, boolean enableHardDeletes, Time time) throws IOException, InstantiationException, URISyntaxException { <<<<<<< props.setProperty("store.deleted.message.retention.days", "0"); props.setProperty("store.enable.hard.delete", Boolean.toString(enableHardDeletes)); ======= props.setProperty("store.deleted.message.retention.days", "1"); props.setProperty("store.enable.hard.delete", "true"); >>>>>>> props.setProperty("store.enable.hard.delete", Boolean.toString(enableHardDeletes)); props.setProperty("store.deleted.message.retention.days", "1"); <<<<<<< AmbryServer server = new AmbryServer(propverify, clusterMap, notificationSystem); ======= AmbryServer server = new AmbryServer(propverify, clusterMap, notificationSystem, time); server.startup(); >>>>>>> AmbryServer server = new AmbryServer(propverify, clusterMap, notificationSystem, time);
<<<<<<< restRequest.getMetrics().injectTracker(adminMetrics.postBlobTracker); ======= restRequest.getMetricsTracker().injectMetrics(adminMetrics.postBlobMetrics); AsyncRequestResponseHandler responseHandler = requestResponseHandlerController.getHandler(); >>>>>>> restRequest.getMetricsTracker().injectMetrics(adminMetrics.postBlobMetrics); <<<<<<< restRequest.getMetrics().injectTracker(adminMetrics.deleteBlobTracker); ======= restRequest.getMetricsTracker().injectMetrics(adminMetrics.deleteBlobMetrics); AsyncRequestResponseHandler responseHandler = requestResponseHandlerController.getHandler(); >>>>>>> restRequest.getMetricsTracker().injectMetrics(adminMetrics.deleteBlobMetrics); <<<<<<< restRequest.getMetrics().injectTracker(adminMetrics.headBlobTracker); ======= restRequest.getMetricsTracker().injectMetrics(adminMetrics.headBlobMetrics); AsyncRequestResponseHandler responseHandler = requestResponseHandlerController.getHandler(); >>>>>>> restRequest.getMetricsTracker().injectMetrics(adminMetrics.headBlobMetrics);
<<<<<<< long maxTotalSizeOfEntriesInBytes) { super(RequestOrResponseType.ReplicaMetadataRequest, Request_Response_Version, correlationId, clientId); ======= String hostName, String replicaPath, long maxTotalSizeOfEntriesInBytes) { super(RequestResponseType.ReplicaMetadataRequest, Request_Response_Version, correlationId, clientId); >>>>>>> String hostName, String replicaPath, long maxTotalSizeOfEntriesInBytes) { super(RequestOrResponseType.ReplicaMetadataRequest, Request_Response_Version, correlationId, clientId);