conflict_resolution
stringlengths
27
16k
<<<<<<< ======= if (false == namesystem.isInSafeMode()) { namesystem.setSafeMode(SafeModeAction.SAFEMODE_ENTER); } // Backup node should never do lease recovery, // therefore lease hard limit should never expire. namesystem.leaseManager.setLeasePeriod( HdfsConstants.LEASE_SOFTLIMIT_PERIOD, Long.MAX_VALUE); >>>>>>> if (false == namesystem.isInSafeMode()) { namesystem.setSafeMode(SafeModeAction.SAFEMODE_ENTER); } // Backup node should never do lease recovery, // therefore lease hard limit should never expire. namesystem.leaseManager.setLeasePeriod( HdfsConstants.LEASE_SOFTLIMIT_PERIOD, Long.MAX_VALUE); <<<<<<< + nnReg.getAddress() + " expecting " + nnRpcAddress); ======= + nnReg.getAddress() + " expecting " + clientRpcAddress); >>>>>>> + nnReg.getAddress() + " expecting " + nnRpcAddress);
<<<<<<< import com.google.common.annotations.VisibleForTesting; import org.apache.commons.logging.Log; import org.apache.commons.logging.LogFactory; ======= >>>>>>> import com.google.common.annotations.VisibleForTesting; import org.apache.commons.logging.Log; import org.apache.commons.logging.LogFactory; <<<<<<< import org.apache.hadoop.hdfs.server.protocol.DatanodeStorage; import org.apache.hadoop.hdfs.server.protocol.StorageReport; ======= import org.apache.hadoop.hdfs.server.namenode.CachedBlock; >>>>>>> import org.apache.hadoop.hdfs.server.protocol.DatanodeStorage; import org.apache.hadoop.hdfs.server.protocol.StorageReport; import org.apache.hadoop.hdfs.server.namenode.CachedBlock; <<<<<<< private final Map<String, DatanodeStorageInfo> storageMap = new HashMap<String, DatanodeStorageInfo>(); ======= /** * A list of CachedBlock objects on this datanode. */ public static class CachedBlocksList extends IntrusiveCollection<CachedBlock> { public enum Type { PENDING_CACHED, CACHED, PENDING_UNCACHED } private final DatanodeDescriptor datanode; private final Type type; CachedBlocksList(DatanodeDescriptor datanode, Type type) { this.datanode = datanode; this.type = type; } public DatanodeDescriptor getDatanode() { return datanode; } public Type getType() { return type; } } /** * The blocks which we want to cache on this DataNode. */ private final CachedBlocksList pendingCached = new CachedBlocksList(this, CachedBlocksList.Type.PENDING_CACHED); /** * The blocks which we know are cached on this datanode. * This list is updated by periodic cache reports. */ private final CachedBlocksList cached = new CachedBlocksList(this, CachedBlocksList.Type.CACHED); /** * The blocks which we want to uncache on this DataNode. */ private final CachedBlocksList pendingUncached = new CachedBlocksList(this, CachedBlocksList.Type.PENDING_UNCACHED); public CachedBlocksList getPendingCached() { return pendingCached; } public CachedBlocksList getCached() { return cached; } public CachedBlocksList getPendingUncached() { return pendingUncached; } /** * Head of the list of blocks on the datanode */ private volatile BlockInfo blockList = null; /** * Number of blocks on the datanode */ private int numBlocks = 0; >>>>>>> private final Map<String, DatanodeStorageInfo> storageMap = new HashMap<String, DatanodeStorageInfo>(); /** * A list of CachedBlock objects on this datanode. */ public static class CachedBlocksList extends IntrusiveCollection<CachedBlock> { public enum Type { PENDING_CACHED, CACHED, PENDING_UNCACHED } private final DatanodeDescriptor datanode; private final Type type; CachedBlocksList(DatanodeDescriptor datanode, Type type) { this.datanode = datanode; this.type = type; } public DatanodeDescriptor getDatanode() { return datanode; } public Type getType() { return type; } } /** * The blocks which we want to cache on this DataNode. */ private final CachedBlocksList pendingCached = new CachedBlocksList(this, CachedBlocksList.Type.PENDING_CACHED); /** * The blocks which we know are cached on this datanode. * This list is updated by periodic cache reports. */ private final CachedBlocksList cached = new CachedBlocksList(this, CachedBlocksList.Type.CACHED); /** * The blocks which we want to uncache on this DataNode. */ private final CachedBlocksList pendingUncached = new CachedBlocksList(this, CachedBlocksList.Type.PENDING_UNCACHED); public CachedBlocksList getPendingCached() { return pendingCached; } public CachedBlocksList getCached() { return cached; } public CachedBlocksList getPendingUncached() { return pendingUncached; } <<<<<<< super(nodeID); ======= this(nodeID, 0L, 0L, 0L, 0L, 0L, 0L, 0, 0); >>>>>>> super(nodeID); <<<<<<< ======= long capacity, long dfsUsed, long remaining, long bpused, long cacheCapacity, long cacheUsed, >>>>>>> long cacheCapacity, long cacheUsed, <<<<<<< updateHeartbeat(StorageReport.EMPTY_ARRAY, xceiverCount, failedVolumes); ======= updateHeartbeat(capacity, dfsUsed, remaining, bpused, cacheCapacity, cacheUsed, xceiverCount, failedVolumes); >>>>>>> updateHeartbeat(StorageReport.EMPTY_ARRAY, cacheCapacity, cacheUsed, xceiverCount, failedVolumes); <<<<<<< boolean removeBlock(String storageID, BlockInfo b) { DatanodeStorageInfo s = getStorageInfo(storageID); if (s != null) { return s.removeBlock(b); } return false; ======= @VisibleForTesting protected BlockInfo getHead(){ return blockList; >>>>>>> boolean removeBlock(String storageID, BlockInfo b) { DatanodeStorageInfo s = getStorageInfo(storageID); if (s != null) { return s.removeBlock(b); } return false; <<<<<<< public void updateHeartbeat(StorageReport[] reports, int xceiverCount, int volFailures) { long totalCapacity = 0; long totalRemaining = 0; long totalBlockPoolUsed = 0; long totalDfsUsed = 0; ======= public void updateHeartbeat(long capacity, long dfsUsed, long remaining, long blockPoolUsed, long cacheCapacity, long cacheUsed, int xceiverCount, int volFailures) { setCapacity(capacity); setRemaining(remaining); setBlockPoolUsed(blockPoolUsed); setDfsUsed(dfsUsed); setCacheCapacity(cacheCapacity); setCacheUsed(cacheUsed); >>>>>>> public void updateHeartbeat(StorageReport[] reports, long cacheCapacity, long cacheUsed, int xceiverCount, int volFailures) { long totalCapacity = 0; long totalRemaining = 0; long totalBlockPoolUsed = 0; long totalDfsUsed = 0; setCacheCapacity(cacheCapacity); setCacheUsed(cacheUsed);
<<<<<<< ClientProtocol, Closeable, ProtocolTranslator { ======= ProtocolMetaInterface, ClientProtocol, Closeable { >>>>>>> ProtocolMetaInterface, ClientProtocol, Closeable, ProtocolTranslator { <<<<<<< @Override public Object getUnderlyingProxyObject() { return rpcProxy; } ======= @Override public boolean isMethodSupported(String methodName) throws IOException { return RpcClientUtil.isMethodSupported(rpcProxy, ClientNamenodeProtocolPB.class, RpcKind.RPC_PROTOCOL_BUFFER, RPC.getProtocolVersion(ClientNamenodeProtocolPB.class), methodName); } >>>>>>> @Override public boolean isMethodSupported(String methodName) throws IOException { return RpcClientUtil.isMethodSupported(rpcProxy, ClientNamenodeProtocolPB.class, RpcKind.RPC_PROTOCOL_BUFFER, RPC.getProtocolVersion(ClientNamenodeProtocolPB.class), methodName); } @Override public Object getUnderlyingProxyObject() { return rpcProxy; }
<<<<<<< import com.google.common.base.Optional; import junit.framework.TestCase; ======= >>>>>>>
<<<<<<< finalizeINodeFileUnderConstruction(src, pendingFile, iip.getLatestSnapshot()); NameNode.stateChangeLog.info("DIR* completeFile: " + src + " is closed by " + holder); ======= finalizeINodeFileUnderConstruction(src, pendingFile); >>>>>>> finalizeINodeFileUnderConstruction(src, pendingFile, iip.getLatestSnapshot());
<<<<<<< import org.apache.hadoop.hdfs.StorageType; ======= import org.apache.hadoop.fs.Path; >>>>>>> import org.apache.hadoop.fs.Path; import org.apache.hadoop.hdfs.StorageType;
<<<<<<< inodeId = fsNamesys.allocateNewInodeId(); newFile = fsDir.unprotectedAddFile(inodeId, ======= inodeId = getAndUpdateLastInodeId(addCloseOp.inodeId, logVersion, lastInodeId); newFile = (INodeFile) fsDir.unprotectedAddFile(inodeId, >>>>>>> inodeId = getAndUpdateLastInodeId(addCloseOp.inodeId, logVersion, lastInodeId); newFile = fsDir.unprotectedAddFile(inodeId,
<<<<<<< public abstract class INode implements Comparable<byte[]> { static final ReadOnlyList<INode> EMPTY_READ_ONLY_LIST = ReadOnlyList.Util.emptyList(); /** * The inode name is in java UTF8 encoding; * The name in HdfsFileStatus should keep the same encoding as this. * if this encoding is changed, implicitly getFileInfo and listStatus in * clientProtocol are changed; The decoding at the client * side should change accordingly. */ protected byte[] name; protected INodeDirectory parent; protected long modificationTime; protected long accessTime; ======= abstract class INode implements Comparable<byte[]> { static final List<INode> EMPTY_LIST = Collections.unmodifiableList(new ArrayList<INode>()); >>>>>>> public abstract class INode implements Comparable<byte[]> { static final ReadOnlyList<INode> EMPTY_READ_ONLY_LIST = ReadOnlyList.Util.emptyList();
<<<<<<< private static int runOnce = 0; ======= >>>>>>> <<<<<<< shellState.getReader().println(); for (String n : shellState.getConnector().namespaceOperations().list()) { delim = ""; for (NamespacePermission p : NamespacePermission.values()) { if (p != null && shellState.getConnector().securityOperations().hasNamespacePermission(user, n, p)) { if (runOnce == 0) { shellState.getReader().print("\nNamespace permissions (" + n + "): "); runOnce++; } shellState.getReader().print(delim + "Namespace." + p.name()); delim = ", "; } } runOnce = 0; } shellState.getReader().println(); ======= shellState.getReader().printNewline(); boolean runOnce = true; >>>>>>> shellState.getReader().println(); boolean runOnce = true; for (String n : shellState.getConnector().namespaceOperations().list()) { delim = ""; for (NamespacePermission p : NamespacePermission.values()) { if (p != null && shellState.getConnector().securityOperations().hasNamespacePermission(user, n, p)) { if (runOnce) { shellState.getReader().print("\nNamespace permissions (" + n + "): "); runOnce = false; } shellState.getReader().print(delim + "Namespace." + p.name()); delim = ", "; } } runOnce = true; } shellState.getReader().println(); runOnce = true; <<<<<<< if (runOnce == 0) { shellState.getReader().print("\nTable permissions (" + t + "): "); runOnce++; ======= if (runOnce) { shellState.getReader().printString("\nTable permissions (" + t + "): "); runOnce = false; >>>>>>> if (runOnce) { shellState.getReader().print("\nTable permissions (" + t + "): "); runOnce = false;
<<<<<<< import org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.StorageReportProto; import org.apache.hadoop.hdfs.protocol.proto.EncryptionZonesProtos.ListEncryptionZonesResponseProto; import org.apache.hadoop.hdfs.protocol.proto.EncryptionZonesProtos.EncryptionZoneProto; ======= >>>>>>> import org.apache.hadoop.hdfs.protocol.proto.EncryptionZonesProtos.ListEncryptionZonesResponseProto; import org.apache.hadoop.hdfs.protocol.proto.EncryptionZonesProtos.EncryptionZoneProto;
<<<<<<< import org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.AclFeatureProto; import org.apache.hadoop.hdfs.server.namenode.INodeReference.DstReference; import org.apache.hadoop.hdfs.server.namenode.INodeReference.WithCount; import org.apache.hadoop.hdfs.server.namenode.INodeReference.WithName; ======= >>>>>>> import org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.AclFeatureProto; <<<<<<< public static ImmutableList<AclEntry> loadAclEntries( AclFeatureProto proto, final String[] stringTable) { ImmutableList.Builder<AclEntry> b = ImmutableList.builder(); for (int v : proto.getEntriesList()) { int p = v & ACL_ENTRY_PERM_MASK; int t = (v >> ACL_ENTRY_TYPE_OFFSET) & ACL_ENTRY_TYPE_MASK; int s = (v >> ACL_ENTRY_SCOPE_OFFSET) & ACL_ENTRY_SCOPE_MASK; int nid = (v >> ACL_ENTRY_NAME_OFFSET) & ACL_ENTRY_NAME_MASK; String name = stringTable[nid]; b.add(new AclEntry.Builder().setName(name) .setPermission(FSACTION_VALUES[p]) .setScope(ACL_ENTRY_SCOPE_VALUES[s]) .setType(ACL_ENTRY_TYPE_VALUES[t]).build()); } return b.build(); } public static INodeReference loadINodeReference( INodeSection.INodeReference r, FSDirectory dir) throws IOException { long referredId = r.getReferredId(); INode referred = dir.getInode(referredId); WithCount withCount = (WithCount) referred.getParentReference(); if (withCount == null) { withCount = new INodeReference.WithCount(null, referred); } final INodeReference ref; if (r.hasDstSnapshotId()) { // DstReference ref = new INodeReference.DstReference(null, withCount, r.getDstSnapshotId()); } else { ref = new INodeReference.WithName(null, withCount, r.getName() .toByteArray(), r.getLastSnapshotId()); } return ref; } ======= >>>>>>> public static ImmutableList<AclEntry> loadAclEntries( AclFeatureProto proto, final String[] stringTable) { ImmutableList.Builder<AclEntry> b = ImmutableList.builder(); for (int v : proto.getEntriesList()) { int p = v & ACL_ENTRY_PERM_MASK; int t = (v >> ACL_ENTRY_TYPE_OFFSET) & ACL_ENTRY_TYPE_MASK; int s = (v >> ACL_ENTRY_SCOPE_OFFSET) & ACL_ENTRY_SCOPE_MASK; int nid = (v >> ACL_ENTRY_NAME_OFFSET) & ACL_ENTRY_NAME_MASK; String name = stringTable[nid]; b.add(new AclEntry.Builder().setName(name) .setPermission(FSACTION_VALUES[p]) .setScope(ACL_ENTRY_SCOPE_VALUES[s]) .setType(ACL_ENTRY_TYPE_VALUES[t]).build()); } return b.build(); }
<<<<<<< markBlockAsCorrupt(new BlockToMarkCorrupt(storedBlock, reason), dn, storageID); ======= markBlockAsCorrupt(new BlockToMarkCorrupt(storedBlock, reason, Reason.CORRUPTION_REPORTED), dn); >>>>>>> markBlockAsCorrupt(new BlockToMarkCorrupt(storedBlock, reason, Reason.CORRUPTION_REPORTED), dn, storageID);
<<<<<<< this(id, null, replication, modTime, preferredBlockSize, BlockInfo.EMPTY_ARRAY, permissions.applyUMask(UMASK), clientName, clientMachine, clientNode); ======= super(id, permissions, BlockInfo.EMPTY_ARRAY, replication, modTime, modTime, preferredBlockSize); this.clientName = clientName; this.clientMachine = clientMachine; this.clientNode = clientNode; >>>>>>> this(id, null, replication, modTime, preferredBlockSize, BlockInfo.EMPTY_ARRAY, permissions, clientName, clientMachine, clientNode);
<<<<<<< /** * Returns one block report per volume. * @param bpid Block Pool Id * @return - a map of StorageID to block report for the volume. */ public Map<String, BlockListAsLongs> getBlockReports(String bpid); ======= /** * Returns the cache report - the full list of cached block IDs of a * block pool. * @param bpid Block Pool Id * @return the cache report - the full list of cached block IDs. */ public List<Long> getCacheReport(String bpid); >>>>>>> /** * Returns one block report per volume. * @param bpid Block Pool Id * @return - a map of StorageID to block report for the volume. */ public Map<String, BlockListAsLongs> getBlockReports(String bpid); /** * Returns the cache report - the full list of cached block IDs of a * block pool. * @param bpid Block Pool Id * @return the cache report - the full list of cached block IDs. */ public List<Long> getCacheReport(String bpid);
<<<<<<< StorageType[] storageTypes = b.getStorageTypes(); if (storageTypes != null) { for (int i = 0; i < storageTypes.length; ++i) { builder.addStorageTypes(PBHelper.convertStorageType(storageTypes[i])); } } final String[] storageIDs = b.getStorageIDs(); if (storageIDs != null) { builder.addAllStorageIDs(Arrays.asList(b.getStorageIDs())); } ======= Preconditions.checkArgument(cachedLocs.size() == 0, "Found additional cached replica locations that are not in the set of" + " storage-backed locations!"); >>>>>>> Preconditions.checkArgument(cachedLocs.size() == 0, "Found additional cached replica locations that are not in the set of" + " storage-backed locations!"); StorageType[] storageTypes = b.getStorageTypes(); if (storageTypes != null) { for (int i = 0; i < storageTypes.length; ++i) { builder.addStorageTypes(PBHelper.convertStorageType(storageTypes[i])); } } final String[] storageIDs = b.getStorageIDs(); if (storageIDs != null) { builder.addAllStorageIDs(Arrays.asList(b.getStorageIDs())); } <<<<<<< List<StorageTypeProto> storageTypesList = proto.getStorageTypesList(); StorageType[] storageTypes = new StorageType[locs.size()]; // The media should correspond to targets 1:1. If not then // ignore the media information (left as default). if ((storageTypesList != null) && (storageTypesList.size() == locs.size())) { for (int i = 0; i < storageTypesList.size(); ++i) { storageTypes[i] = PBHelper.convertType(storageTypesList.get(i)); } } final int storageIDsCount = proto.getStorageIDsCount(); final String[] storageIDs = storageIDsCount == 0? null : proto.getStorageIDsList().toArray(new String[storageIDsCount]); ======= // Set values from the isCached list, re-using references from loc List<DatanodeInfo> cachedLocs = new ArrayList<DatanodeInfo>(locs.size()); List<Boolean> isCachedList = proto.getIsCachedList(); for (int i=0; i<isCachedList.size(); i++) { if (isCachedList.get(i)) { cachedLocs.add(targets[i]); } } >>>>>>> List<StorageTypeProto> storageTypesList = proto.getStorageTypesList(); StorageType[] storageTypes = new StorageType[locs.size()]; // The media should correspond to targets 1:1. If not then // ignore the media information (left as default). if ((storageTypesList != null) && (storageTypesList.size() == locs.size())) { for (int i = 0; i < storageTypesList.size(); ++i) { storageTypes[i] = PBHelper.convertType(storageTypesList.get(i)); } } final int storageIDsCount = proto.getStorageIDsCount(); final String[] storageIDs = storageIDsCount == 0? null : proto.getStorageIDsList().toArray(new String[storageIDsCount]); // Set values from the isCached list, re-using references from loc List<DatanodeInfo> cachedLocs = new ArrayList<DatanodeInfo>(locs.size()); List<Boolean> isCachedList = proto.getIsCachedList(); for (int i=0; i<isCachedList.size(); i++) { if (isCachedList.get(i)) { cachedLocs.add(targets[i]); } } <<<<<<< storageIDs, storageTypes, proto.getOffset(), proto.getCorrupt()); ======= proto.getOffset(), proto.getCorrupt(), cachedLocs.toArray(new DatanodeInfo[0])); >>>>>>> storageIDs, storageTypes, proto.getOffset(), proto.getCorrupt(), cachedLocs.toArray(new DatanodeInfo[0])); <<<<<<< .setStorageUuid(r.getStorageID()).build(); ======= .setStorageID(r.getStorageID()); return builder.build(); >>>>>>> .setStorageUuid(r.getStorageID()); return builder.build();
<<<<<<< d.updateHeartbeat(StorageReport.EMPTY_ARRAY, 0, 0); ======= d.updateHeartbeat(0L, 0L, 0L, 0L, 0L, 0L, 0, 0); >>>>>>> d.updateHeartbeat(StorageReport.EMPTY_ARRAY, 0L, 0L, 0, 0);
<<<<<<< import org.apache.hadoop.hdfs.server.protocol.HeartbeatResponse; ======= import org.apache.hadoop.hdfs.server.protocol.DatanodeStorage; >>>>>>> import org.apache.hadoop.hdfs.server.protocol.DatanodeStorage; import org.apache.hadoop.hdfs.server.protocol.HeartbeatResponse; <<<<<<< response = impl.sendHeartbeat(PBHelper.convert(request.getRegistration()), request.getCapacity(), request.getDfsUsed(), request.getRemaining(), request.getBlockPoolUsed(), request.getXmitsInProgress(), request.getXceiverCount(), request.getFailedVolumes()); ======= List<StorageReportProto> list = request.getReportsList(); StorageReport[] report = new StorageReport[list.size()]; int i = 0; for (StorageReportProto p : list) { report[i++] = new StorageReport(p.getStorageID(), p.getFailed(), p.getCapacity(), p.getDfsUsed(), p.getRemaining(), p.getBlockPoolUsed()); } cmds = impl.sendHeartbeat(PBHelper.convert(request.getRegistration()), report, request.getXmitsInProgress(), request.getXceiverCount(), request.getFailedVolumes()); >>>>>>> List<StorageReportProto> list = request.getReportsList(); StorageReport[] report = new StorageReport[list.size()]; int i = 0; for (StorageReportProto p : list) { report[i++] = new StorageReport(p.getStorageID(), p.getFailed(), p.getCapacity(), p.getDfsUsed(), p.getRemaining(), p.getBlockPoolUsed()); } response = impl.sendHeartbeat(PBHelper.convert(request.getRegistration()), report, request.getXmitsInProgress(), request.getXceiverCount(), request.getFailedVolumes());
<<<<<<< /** See <a href="{@docRoot}/../core-default.html">core-default.xml</a> */ public static final String HADOOP_SECURITY_JAVA_SECURE_RANDOM_ALGORITHM_KEY = "hadoop.security.java.secure.random.algorithm"; /** Defalt value for HADOOP_SECURITY_JAVA_SECURE_RANDOM_ALGORITHM_KEY */ public static final String HADOOP_SECURITY_JAVA_SECURE_RANDOM_ALGORITHM_DEFAULT = "SHA1PRNG"; /** See <a href="{@docRoot}/../core-default.html">core-default.xml</a> */ public static final String HADOOP_SECURITY_SECURE_RANDOM_IMPL_KEY = "hadoop.security.secure.random.impl"; /** See <a href="{@docRoot}/../core-default.html">core-default.xml</a> */ public static final String HADOOP_SECURITY_SECURE_RANDOM_DEVICE_FILE_PATH_KEY = "hadoop.security.random.device.file.path"; public static final String HADOOP_SECURITY_SECURE_RANDOM_DEVICE_FILE_PATH_DEFAULT = "/dev/urandom"; ======= // <!--- KMSClientProvider configurations —> /** See <a href="{@docRoot}/../core-default.html">core-default.xml</a> */ public static final String KMS_CLIENT_ENC_KEY_CACHE_SIZE = "hadoop.security.kms.client.encrypted.key.cache.size"; /** Default value for KMS_CLIENT_ENC_KEY_CACHE_SIZE */ public static final int KMS_CLIENT_ENC_KEY_CACHE_SIZE_DEFAULT = 500; /** See <a href="{@docRoot}/../core-default.html">core-default.xml</a> */ public static final String KMS_CLIENT_ENC_KEY_CACHE_LOW_WATERMARK = "hadoop.security.kms.client.encrypted.key.cache.low-watermark"; /** Default value for KMS_CLIENT_ENC_KEY_CACHE_LOW_WATERMARK */ public static final float KMS_CLIENT_ENC_KEY_CACHE_LOW_WATERMARK_DEFAULT = 0.3f; /** See <a href="{@docRoot}/../core-default.html">core-default.xml</a> */ public static final String KMS_CLIENT_ENC_KEY_CACHE_NUM_REFILL_THREADS = "hadoop.security.kms.client.encrypted.key.cache.num.refill.threads"; /** Default value for KMS_CLIENT_ENC_KEY_NUM_REFILL_THREADS */ public static final int KMS_CLIENT_ENC_KEY_CACHE_NUM_REFILL_THREADS_DEFAULT = 2; /** See <a href="{@docRoot}/../core-default.html">core-default.xml</a> */ public static final String KMS_CLIENT_ENC_KEY_CACHE_EXPIRY_MS = "hadoop.security.kms.client.encrypted.key.cache.expiry"; /** Default value for KMS_CLIENT_ENC_KEY_CACHE_EXPIRY (12 hrs)*/ public static final int KMS_CLIENT_ENC_KEY_CACHE_EXPIRY_DEFAULT = 43200000; >>>>>>> // <!--- KMSClientProvider configurations —> /** See <a href="{@docRoot}/../core-default.html">core-default.xml</a> */ public static final String KMS_CLIENT_ENC_KEY_CACHE_SIZE = "hadoop.security.kms.client.encrypted.key.cache.size"; /** Default value for KMS_CLIENT_ENC_KEY_CACHE_SIZE */ public static final int KMS_CLIENT_ENC_KEY_CACHE_SIZE_DEFAULT = 500; /** See <a href="{@docRoot}/../core-default.html">core-default.xml</a> */ public static final String KMS_CLIENT_ENC_KEY_CACHE_LOW_WATERMARK = "hadoop.security.kms.client.encrypted.key.cache.low-watermark"; /** Default value for KMS_CLIENT_ENC_KEY_CACHE_LOW_WATERMARK */ public static final float KMS_CLIENT_ENC_KEY_CACHE_LOW_WATERMARK_DEFAULT = 0.3f; /** See <a href="{@docRoot}/../core-default.html">core-default.xml</a> */ public static final String KMS_CLIENT_ENC_KEY_CACHE_NUM_REFILL_THREADS = "hadoop.security.kms.client.encrypted.key.cache.num.refill.threads"; /** Default value for KMS_CLIENT_ENC_KEY_NUM_REFILL_THREADS */ public static final int KMS_CLIENT_ENC_KEY_CACHE_NUM_REFILL_THREADS_DEFAULT = 2; /** See <a href="{@docRoot}/../core-default.html">core-default.xml</a> */ public static final String KMS_CLIENT_ENC_KEY_CACHE_EXPIRY_MS = "hadoop.security.kms.client.encrypted.key.cache.expiry"; /** Default value for KMS_CLIENT_ENC_KEY_CACHE_EXPIRY (12 hrs)*/ public static final int KMS_CLIENT_ENC_KEY_CACHE_EXPIRY_DEFAULT = 43200000; /** See <a href="{@docRoot}/../core-default.html">core-default.xml</a> */ public static final String HADOOP_SECURITY_JAVA_SECURE_RANDOM_ALGORITHM_KEY = "hadoop.security.java.secure.random.algorithm"; /** Defalt value for HADOOP_SECURITY_JAVA_SECURE_RANDOM_ALGORITHM_KEY */ public static final String HADOOP_SECURITY_JAVA_SECURE_RANDOM_ALGORITHM_DEFAULT = "SHA1PRNG"; /** See <a href="{@docRoot}/../core-default.html">core-default.xml</a> */ public static final String HADOOP_SECURITY_SECURE_RANDOM_IMPL_KEY = "hadoop.security.secure.random.impl"; /** See <a href="{@docRoot}/../core-default.html">core-default.xml</a> */ public static final String HADOOP_SECURITY_SECURE_RANDOM_DEVICE_FILE_PATH_KEY = "hadoop.security.random.device.file.path"; public static final String HADOOP_SECURITY_SECURE_RANDOM_DEVICE_FILE_PATH_DEFAULT = "/dev/urandom";
<<<<<<< /** make sure that we still have the lease on this file. */ private INodeFileUnderConstruction checkLease(String src, String holder) throws LeaseExpiredException, UnresolvedLinkException { return checkLease(src, holder, dir.getINode(src)); ======= // make sure that we still have the lease on this file. private INodeFileUnderConstruction checkLease(String src, String holder) throws LeaseExpiredException, UnresolvedLinkException, FileNotFoundException { assert hasReadOrWriteLock(); return checkLease(src, INodeId.GRANDFATHER_INODE_ID, holder, dir.getINode(src)); >>>>>>> /** make sure that we still have the lease on this file. */ private INodeFileUnderConstruction checkLease(String src, String holder) throws LeaseExpiredException, UnresolvedLinkException, FileNotFoundException { return checkLease(src, INodeId.GRANDFATHER_INODE_ID, holder, dir.getINode(src));
<<<<<<< // TODO:HA decide on OperationCategory for this namesystem.refreshNodes(new HdfsConfiguration()); ======= namesystem.getBlockManager().getDatanodeManager().refreshNodes( new HdfsConfiguration()); >>>>>>> // TODO:HA decide on OperationCategory for this namesystem.getBlockManager().getDatanodeManager().refreshNodes( new HdfsConfiguration()); <<<<<<< // TODO:HA decide on OperationCategory for this return namesystem.getEditLogManifest(sinceTxId); ======= return namesystem.getEditLog().getEditLogManifest(sinceTxId); >>>>>>> // TODO:HA decide on OperationCategory for this return namesystem.getEditLog().getEditLogManifest(sinceTxId); <<<<<<< // TODO:HA decide on OperationCategory for this namesystem.setBalancerBandwidth(bandwidth); ======= namesystem.getBlockManager().getDatanodeManager().setBalancerBandwidth(bandwidth); >>>>>>> // TODO:HA decide on OperationCategory for this namesystem.getBlockManager().getDatanodeManager().setBalancerBandwidth(bandwidth);
<<<<<<< import org.apache.hadoop.fs.CommonConfigurationKeysPublic; ======= import org.apache.hadoop.fs.CommonConfigurationKeys; import org.apache.hadoop.fs.CommonConfigurationKeysPublic; >>>>>>> import org.apache.hadoop.fs.CommonConfigurationKeys; import org.apache.hadoop.fs.CommonConfigurationKeysPublic; <<<<<<< conf.getInt("ipc.client.connection.maxidletime", 10000), // 10s conf.getInt("ipc.client.connect.max.retries", 10), conf.getInt( CommonConfigurationKeysPublic.IPC_CLIENT_CONNECT_MAX_RETRIES_ON_SOCKET_TIMEOUTS_KEY, CommonConfigurationKeysPublic.IPC_CLIENT_CONNECT_MAX_RETRIES_ON_SOCKET_TIMEOUTS_DEFAULT), conf.getBoolean("ipc.client.tcpnodelay", false), ======= conf.getInt(CommonConfigurationKeysPublic.IPC_CLIENT_CONNECTION_MAXIDLETIME_KEY, CommonConfigurationKeysPublic.IPC_CLIENT_CONNECTION_MAXIDLETIME_DEFAULT), conf.getInt(CommonConfigurationKeysPublic.IPC_CLIENT_CONNECT_MAX_RETRIES_KEY, CommonConfigurationKeysPublic.IPC_CLIENT_CONNECT_MAX_RETRIES_DEFAULT), conf.getBoolean(CommonConfigurationKeysPublic.IPC_CLIENT_TCPNODELAY_KEY, CommonConfigurationKeysPublic.IPC_CLIENT_TCPNODELAY_DEFAULT), >>>>>>> conf.getInt( CommonConfigurationKeysPublic.IPC_CLIENT_CONNECT_MAX_RETRIES_ON_SOCKET_TIMEOUTS_KEY, CommonConfigurationKeysPublic.IPC_CLIENT_CONNECT_MAX_RETRIES_ON_SOCKET_TIMEOUTS_DEFAULT), conf.getInt(CommonConfigurationKeysPublic.IPC_CLIENT_CONNECTION_MAXIDLETIME_KEY, CommonConfigurationKeysPublic.IPC_CLIENT_CONNECTION_MAXIDLETIME_DEFAULT), conf.getInt(CommonConfigurationKeysPublic.IPC_CLIENT_CONNECT_MAX_RETRIES_KEY, CommonConfigurationKeysPublic.IPC_CLIENT_CONNECT_MAX_RETRIES_DEFAULT), conf.getBoolean(CommonConfigurationKeysPublic.IPC_CLIENT_TCPNODELAY_KEY, CommonConfigurationKeysPublic.IPC_CLIENT_TCPNODELAY_DEFAULT),
<<<<<<< "fakehostname", "fake_datanode_id", 100, 101, 102); ======= "fakehostname", "fake_storage_id", 100, 101, 102, 103); >>>>>>> "fakehostname", "fake_datanode_id", 100, 101, 102, 103); <<<<<<< "fakehostname_" + i, "fake_datanode_id", 100, 101, 102); ======= "fakehostname_" + i, "fake_storage_id", 100, 101, 102, 103); >>>>>>> "fakehostname_" + i, "fake_datanode_id", 100, 101, 102, 103); <<<<<<< "fakehostname_" + i, "fake_datanode_id_" + i, 100, 101, 102); ======= "fakehostname_" + i, "fake_storage_id_" + i, 100, 101, 102, 103); >>>>>>> "fakehostname_" + i, "fake_datanode_id_" + i, 100, 101, 102, 103); <<<<<<< "fakehostname", "fake_datanode_id", 100, 101, 102); ======= "fakehostname", "fake_storage_id", 100, 101, 102, 103); >>>>>>> "fakehostname", "fake_datanode_id", 100, 101, 102, 103); <<<<<<< "fakehostname", "fake_datanode_id", 100, 101, 102); ======= "fakehostname", "fake_storage_id", 100, 101, 102, 103); >>>>>>> "fakehostname", "fake_datanode_id", 100, 101, 102, 103);
<<<<<<< import com.google.common.collect.Lists; ======= import org.apache.commons.io.FileUtils; >>>>>>> import com.google.common.collect.Lists; import org.apache.commons.io.FileUtils; <<<<<<< import org.apache.hadoop.fs.permission.AclEntry; ======= >>>>>>> import org.apache.hadoop.fs.permission.AclEntry;
<<<<<<< // Notify all peers of the shutdown and restart. // datanode.shouldRun should still be true and datanode.restarting should // be set true before calling this method. synchronized void restartNotifyPeers() { assert (datanode.shouldRun == true && datanode.shutdownForUpgrade); for (Peer p : peers.keySet()) { // interrupt each and every DataXceiver thread. peers.get(p).interrupt(); } } // Close all peers and clear the map. synchronized void closeAllPeers() { LOG.info("Closing all peers."); for (Peer p : peers.keySet()) { IOUtils.cleanup(LOG, p); } peers.clear(); } // Return the number of peers. synchronized int getNumPeers() { return peers.size(); } ======= synchronized void releasePeer(Peer peer) { peers.remove(peer); } >>>>>>> // Notify all peers of the shutdown and restart. // datanode.shouldRun should still be true and datanode.restarting should // be set true before calling this method. synchronized void restartNotifyPeers() { assert (datanode.shouldRun == true && datanode.shutdownForUpgrade); for (Peer p : peers.keySet()) { // interrupt each and every DataXceiver thread. peers.get(p).interrupt(); } } // Close all peers and clear the map. synchronized void closeAllPeers() { LOG.info("Closing all peers."); for (Peer p : peers.keySet()) { IOUtils.cleanup(LOG, p); } peers.clear(); } // Return the number of peers. synchronized int getNumPeers() { return peers.size(); } synchronized void releasePeer(Peer peer) { peers.remove(peer); }
<<<<<<< import java.util.List; ======= import java.util.EnumSet; >>>>>>> import java.util.EnumSet; import java.util.List;
<<<<<<< private void addToParent(INodeDirectory parent, INode child) { ======= void addToParent(INodeDirectory parent, INode child) { FSDirectory fsDir = namesystem.dir; if (parent == fsDir.rootDir && FSDirectory.isReservedName(child)) { throw new HadoopIllegalArgumentException("File name \"" + child.getLocalName() + "\" is reserved. Please " + " change the name of the existing file or directory to another " + "name before upgrading to this release."); } >>>>>>> private void addToParent(INodeDirectory parent, INode child) { FSDirectory fsDir = namesystem.dir; if (parent == fsDir.rootDir && FSDirectory.isReservedName(child)) { throw new HadoopIllegalArgumentException("File name \"" + child.getLocalName() + "\" is reserved. Please " + " change the name of the existing file or directory to another " + "name before upgrading to this release."); }
<<<<<<< this.serviceRpcServer.addProtocol(RpcKind.RPC_WRITABLE, RefreshAuthorizationPolicyProtocol.class, this); this.serviceRpcServer.addProtocol(RpcKind.RPC_WRITABLE, RefreshUserMappingsProtocol.class, this); this.serviceRpcServer.addProtocol(RpcKind.RPC_WRITABLE, GetUserMappingsProtocol.class, this); this.serviceRpcServer.addProtocol(RpcKind.RPC_WRITABLE, HAServiceProtocol.class, this); ======= >>>>>>> this.serviceRpcServer.addProtocol(RpcKind.RPC_WRITABLE, HAServiceProtocol.class, this); <<<<<<< this.clientRpcServer.addProtocol(RpcKind.RPC_WRITABLE, RefreshAuthorizationPolicyProtocol.class, this); this.clientRpcServer.addProtocol(RpcKind.RPC_WRITABLE, RefreshUserMappingsProtocol.class, this); this.clientRpcServer.addProtocol(RpcKind.RPC_WRITABLE, GetUserMappingsProtocol.class, this); this.clientRpcServer.addProtocol(RpcKind.RPC_WRITABLE, HAServiceProtocol.class, this); ======= >>>>>>> this.clientRpcServer.addProtocol(RpcKind.RPC_WRITABLE, HAServiceProtocol.class, this);
<<<<<<< public void create(String src, FsPermission masked, String clientName, EnumSetWritable<CreateFlag> flag, boolean createParent, short replication, long blockSize) throws AccessControlException, AlreadyBeingCreatedException, DSQuotaExceededException, FileAlreadyExistsException, FileNotFoundException, NSQuotaExceededException, ParentNotDirectoryException, SafeModeException, UnresolvedLinkException, SnapshotAccessControlException, IOException; ======= public HdfsFileStatus create(String src, FsPermission masked, String clientName, EnumSetWritable<CreateFlag> flag, boolean createParent, short replication, long blockSize) throws AccessControlException, AlreadyBeingCreatedException, DSQuotaExceededException, FileAlreadyExistsException, FileNotFoundException, NSQuotaExceededException, ParentNotDirectoryException, SafeModeException, UnresolvedLinkException, IOException; >>>>>>> public HdfsFileStatus create(String src, FsPermission masked, String clientName, EnumSetWritable<CreateFlag> flag, boolean createParent, short replication, long blockSize) throws AccessControlException, AlreadyBeingCreatedException, DSQuotaExceededException, FileAlreadyExistsException, FileNotFoundException, NSQuotaExceededException, ParentNotDirectoryException, SafeModeException, UnresolvedLinkException, SnapshotAccessControlException, IOException;
<<<<<<< registerMBean(datanode.getDatanodeUuid()); } private StorageType getStorageTypeFromLocations( Collection<StorageLocation> dataLocations, File dir) { for (StorageLocation dataLocation : dataLocations) { if (dataLocation.getFile().equals(dir)) { return dataLocation.getStorageType(); } } return StorageType.DEFAULT; ======= cacheManager = new FsDatasetCache(this); registerMBean(storage.getStorageID()); >>>>>>> cacheManager = new FsDatasetCache(this); registerMBean(datanode.getDatanodeUuid()); } private StorageType getStorageTypeFromLocations( Collection<StorageLocation> dataLocations, File dir) { for (StorageLocation dataLocation : dataLocations) { if (dataLocation.getFile().equals(dir)) { return dataLocation.getStorageType(); } } return StorageType.DEFAULT;
<<<<<<< public HeartbeatResponse sendHeartbeat(DatanodeRegistration nodeReg, long capacity, long dfsUsed, long remaining, long blockPoolUsed, int xmitsInProgress, int xceiverCount, int failedVolumes) throws IOException { ======= public DatanodeCommand[] sendHeartbeat(DatanodeRegistration nodeReg, StorageReport[] report, int xmitsInProgress, int xceiverCount, int failedVolumes) throws IOException { >>>>>>> public HeartbeatResponse sendHeartbeat(DatanodeRegistration nodeReg, StorageReport[] report, int xmitsInProgress, int xceiverCount, int failedVolumes) throws IOException { <<<<<<< namesystem.getBlockManager().processIncrementalBlockReport( nodeReg, poolId, receivedAndDeletedBlocks); ======= namesystem.getBlockManager().blockReceivedAndDeleted( nodeReg, poolId, receivedAndDeletedBlocks[0].getBlocks()); >>>>>>> namesystem.getBlockManager().processIncrementalBlockReport( nodeReg, poolId, receivedAndDeletedBlocks[0].getBlocks());
<<<<<<< "\t[-refreshSuperUserGroupsConfiguration]\n" + ======= "\t[refreshSuperUserGroupsConfiguration]\n" + "\t[-refreshCallQueue]\n" + >>>>>>> "\t[-refreshSuperUserGroupsConfiguration]\n" + "\t[-refreshCallQueue]\n" +
<<<<<<< /** * Initialize replication queues. */ private void initializeReplQueues() { LOG.info("initializing replication queues"); blockManager.processMisReplicatedBlocks(); initializedReplQueues = true; } ======= private boolean inActiveState() { return haContext != null && haContext.getState().getServiceState() == HAServiceState.ACTIVE; } >>>>>>> private boolean inActiveState() { return haContext != null && haContext.getState().getServiceState() == HAServiceState.ACTIVE; } /** * Initialize replication queues. */ private void initializeReplQueues() { LOG.info("initializing replication queues"); blockManager.processMisReplicatedBlocks(); initializedReplQueues = true; }
<<<<<<< return loadFSImage(target, startOpt, recovery); ======= return loadFSImage(target, recovery, startOpt); >>>>>>> return loadFSImage(target, startOpt, recovery); <<<<<<< this.loadFSImage(target, null, null); ======= >>>>>>> <<<<<<< private boolean loadFSImage(FSNamesystem target, StartupOption startOpt, MetaRecoveryContext recovery) throws IOException { ======= boolean loadFSImage(FSNamesystem target, MetaRecoveryContext recovery, StartupOption startOpt) throws IOException { >>>>>>> private boolean loadFSImage(FSNamesystem target, StartupOption startOpt, MetaRecoveryContext recovery) throws IOException {
<<<<<<< ======= import java.io.IOException; import org.apache.commons.logging.impl.Log4JLogger; import org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtocol; import org.apache.log4j.Level; >>>>>>> import org.apache.commons.logging.impl.Log4JLogger; import org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtocol; import org.apache.log4j.Level; <<<<<<< ======= /** * Do parallel read several times with different number of files and threads. * * Note that while this is the only "test" in a junit sense, we're actually * dispatching a lot more. Failures in the other methods (and other threads) * need to be manually collected, which is inconvenient. */ @Test public void testParallelReadCopying() throws IOException { runTestWorkload(new CopyingReadWorkerHelper()); } @Test public void testParallelReadByteBuffer() throws IOException { runTestWorkload(new DirectReadWorkerHelper()); } @Test public void testParallelReadMixed() throws IOException { runTestWorkload(new MixedWorkloadHelper()); } @Test public void testParallelNoChecksums() throws IOException { verifyChecksums = false; runTestWorkload(new MixedWorkloadHelper()); } >>>>>>>
<<<<<<< * Flatten the given map, as returned by other functions in this class, * into a flat list of {@link ConfiguredNNAddress} instances. */ public static List<ConfiguredNNAddress> flattenAddressMap( Map<String, Map<String, InetSocketAddress>> map) { List<ConfiguredNNAddress> ret = Lists.newArrayList(); for (Map.Entry<String, Map<String, InetSocketAddress>> entry : map.entrySet()) { String nsId = entry.getKey(); Map<String, InetSocketAddress> nnMap = entry.getValue(); for (Map.Entry<String, InetSocketAddress> e2 : nnMap.entrySet()) { String nnId = e2.getKey(); InetSocketAddress addr = e2.getValue(); ret.add(new ConfiguredNNAddress(nsId, nnId, addr)); } } return ret; } /** * Format the given map, as returned by other functions in this class, * into a string suitable for debugging display. The format of this string * should not be considered an interface, and is liable to change. */ public static String addressMapToString( Map<String, Map<String, InetSocketAddress>> map) { StringBuilder b = new StringBuilder(); for (Map.Entry<String, Map<String, InetSocketAddress>> entry : map.entrySet()) { String nsId = entry.getKey(); Map<String, InetSocketAddress> nnMap = entry.getValue(); b.append("Nameservice <").append(nsId).append(">:").append("\n"); for (Map.Entry<String, InetSocketAddress> e2 : nnMap.entrySet()) { b.append(" NN ID ").append(e2.getKey()) .append(" => ").append(e2.getValue()).append("\n"); } } return b.toString(); } public static String nnAddressesAsString(Configuration conf) { Map<String, Map<String, InetSocketAddress>> addresses = getHaNnRpcAddresses(conf); return addressMapToString(addresses); } /** * Represent one of the NameNodes configured in the cluster. */ public static class ConfiguredNNAddress { private final String nameserviceId; private final String namenodeId; private final InetSocketAddress addr; private ConfiguredNNAddress(String nameserviceId, String namenodeId, InetSocketAddress addr) { this.nameserviceId = nameserviceId; this.namenodeId = namenodeId; this.addr = addr; } public String getNameserviceId() { return nameserviceId; } public String getNamenodeId() { return namenodeId; } public InetSocketAddress getAddress() { return addr; } @Override public String toString() { return "ConfiguredNNAddress[nsId=" + nameserviceId + ";" + "nnId=" + namenodeId + ";addr=" + addr + "]"; } } /** * Get a URI for each configured nameservice. If a nameservice is * HA-enabled, then the logical URI of the nameservice is returned. If the * nameservice is not HA-enabled, then a URI corresponding to an RPC address * of the single NN for that nameservice is returned, preferring the service * RPC address over the client RPC address. * * @param conf configuration * @return a collection of all configured NN URIs, preferring service * addresses */ public static Collection<URI> getNsServiceRpcUris(Configuration conf) { return getNameServiceUris(conf, DFSConfigKeys.DFS_NAMENODE_SERVICE_RPC_ADDRESS_KEY, DFSConfigKeys.DFS_NAMENODE_RPC_ADDRESS_KEY); } /** * Get a URI for each configured nameservice. If a nameservice is * HA-enabled, then the logical URI of the nameservice is returned. If the * nameservice is not HA-enabled, then a URI corresponding to the address of * the single NN for that nameservice is returned. * * @param conf configuration * @param keys configuration keys to try in order to get the URI for non-HA * nameservices * @return a collection of all configured NN URIs */ public static Collection<URI> getNameServiceUris(Configuration conf, String... keys) { Set<URI> ret = new HashSet<URI>(); for (String nsId : getNameServiceIds(conf)) { if (HAUtil.isHAEnabled(conf, nsId)) { // Add the logical URI of the nameservice. try { ret.add(new URI(HdfsConstants.HDFS_URI_SCHEME + "://" + nsId)); } catch (URISyntaxException ue) { throw new IllegalArgumentException(ue); } } else { // Add the URI corresponding to the address of the NN. for (String key : keys) { String addr = conf.get(concatSuffixes(key, nsId)); if (addr != null) { ret.add(createUri(HdfsConstants.HDFS_URI_SCHEME, NetUtils.createSocketAddr(addr))); break; } } } } // Add the generic configuration keys. for (String key : keys) { String addr = conf.get(key); if (addr != null) { ret.add(createUri("hdfs", NetUtils.createSocketAddr(addr))); break; } } return ret; } /** * Given the InetSocketAddress this method returns the nameservice Id * corresponding to the key with matching address, by doing a reverse * lookup on the list of nameservices until it finds a match. * * If null is returned, client should try {@link #isDefaultNamenodeAddress} * to check pre-Federation, non-HA configurations. ======= * Given the InetSocketAddress for any configured communication with a * namenode, this method returns the corresponding nameservice ID, * by doing a reverse lookup on the list of nameservices until it * finds a match. * >>>>>>> * Flatten the given map, as returned by other functions in this class, * into a flat list of {@link ConfiguredNNAddress} instances. */ public static List<ConfiguredNNAddress> flattenAddressMap( Map<String, Map<String, InetSocketAddress>> map) { List<ConfiguredNNAddress> ret = Lists.newArrayList(); for (Map.Entry<String, Map<String, InetSocketAddress>> entry : map.entrySet()) { String nsId = entry.getKey(); Map<String, InetSocketAddress> nnMap = entry.getValue(); for (Map.Entry<String, InetSocketAddress> e2 : nnMap.entrySet()) { String nnId = e2.getKey(); InetSocketAddress addr = e2.getValue(); ret.add(new ConfiguredNNAddress(nsId, nnId, addr)); } } return ret; } /** * Format the given map, as returned by other functions in this class, * into a string suitable for debugging display. The format of this string * should not be considered an interface, and is liable to change. */ public static String addressMapToString( Map<String, Map<String, InetSocketAddress>> map) { StringBuilder b = new StringBuilder(); for (Map.Entry<String, Map<String, InetSocketAddress>> entry : map.entrySet()) { String nsId = entry.getKey(); Map<String, InetSocketAddress> nnMap = entry.getValue(); b.append("Nameservice <").append(nsId).append(">:").append("\n"); for (Map.Entry<String, InetSocketAddress> e2 : nnMap.entrySet()) { b.append(" NN ID ").append(e2.getKey()) .append(" => ").append(e2.getValue()).append("\n"); } } return b.toString(); } public static String nnAddressesAsString(Configuration conf) { Map<String, Map<String, InetSocketAddress>> addresses = getHaNnRpcAddresses(conf); return addressMapToString(addresses); } /** * Represent one of the NameNodes configured in the cluster. */ public static class ConfiguredNNAddress { private final String nameserviceId; private final String namenodeId; private final InetSocketAddress addr; private ConfiguredNNAddress(String nameserviceId, String namenodeId, InetSocketAddress addr) { this.nameserviceId = nameserviceId; this.namenodeId = namenodeId; this.addr = addr; } public String getNameserviceId() { return nameserviceId; } public String getNamenodeId() { return namenodeId; } public InetSocketAddress getAddress() { return addr; } @Override public String toString() { return "ConfiguredNNAddress[nsId=" + nameserviceId + ";" + "nnId=" + namenodeId + ";addr=" + addr + "]"; } } /** * Get a URI for each configured nameservice. If a nameservice is * HA-enabled, then the logical URI of the nameservice is returned. If the * nameservice is not HA-enabled, then a URI corresponding to an RPC address * of the single NN for that nameservice is returned, preferring the service * RPC address over the client RPC address. * * @param conf configuration * @return a collection of all configured NN URIs, preferring service * addresses */ public static Collection<URI> getNsServiceRpcUris(Configuration conf) { return getNameServiceUris(conf, DFSConfigKeys.DFS_NAMENODE_SERVICE_RPC_ADDRESS_KEY, DFSConfigKeys.DFS_NAMENODE_RPC_ADDRESS_KEY); } /** * Get a URI for each configured nameservice. If a nameservice is * HA-enabled, then the logical URI of the nameservice is returned. If the * nameservice is not HA-enabled, then a URI corresponding to the address of * the single NN for that nameservice is returned. * * @param conf configuration * @param keys configuration keys to try in order to get the URI for non-HA * nameservices * @return a collection of all configured NN URIs */ public static Collection<URI> getNameServiceUris(Configuration conf, String... keys) { Set<URI> ret = new HashSet<URI>(); for (String nsId : getNameServiceIds(conf)) { if (HAUtil.isHAEnabled(conf, nsId)) { // Add the logical URI of the nameservice. try { ret.add(new URI(HdfsConstants.HDFS_URI_SCHEME + "://" + nsId)); } catch (URISyntaxException ue) { throw new IllegalArgumentException(ue); } } else { // Add the URI corresponding to the address of the NN. for (String key : keys) { String addr = conf.get(concatSuffixes(key, nsId)); if (addr != null) { ret.add(createUri(HdfsConstants.HDFS_URI_SCHEME, NetUtils.createSocketAddr(addr))); break; } } } } // Add the generic configuration keys. for (String key : keys) { String addr = conf.get(key); if (addr != null) { ret.add(createUri("hdfs", NetUtils.createSocketAddr(addr))); break; } } return ret; } /** * Given the InetSocketAddress this method returns the nameservice Id * corresponding to the key with matching address, by doing a reverse * lookup on the list of nameservices until it finds a match. * <<<<<<< String[] ids = getSuffixIDs(conf, address, keys); return (ids != null) ? ids[0] : null; ======= if (nameserviceIds == null || nameserviceIds.isEmpty()) { return null; } // Get the candidateAddresses for all the configured nameServiceIds for (String nameserviceId : nameserviceIds) { for (String key : keys) { String candidateAddress = conf.get( getNameServiceIdKey(key, nameserviceId)); if (candidateAddress != null && address.equals(NetUtils.createSocketAddr(candidateAddress))) return nameserviceId; } } // didn't find a match return null; >>>>>>> String[] ids = getSuffixIDs(conf, address, keys); return (ids != null) ? ids[0] : null; <<<<<<< * Given the InetSocketAddress for any configured communication with a * namenode, this method determines whether it is the configured * communication channel for the "default" namenode. * It does a reverse lookup on the list of default communication parameters * to see if the given address matches any of them. * Since the process of resolving URIs to Addresses is slightly expensive, * this utility method should not be used in performance-critical routines. * * @param conf - configuration * @param address - InetSocketAddress for configured communication with NN. * Configured addresses are typically given as URIs, but we may have to * compare against a URI typed in by a human, or the server name may be * aliased, so we compare unambiguous InetSocketAddresses instead of just * comparing URI substrings. * @param keys - list of configured communication parameters that should * be checked for matches. For example, to compare against RPC addresses, * provide the list DFS_NAMENODE_SERVICE_RPC_ADDRESS_KEY, * DFS_NAMENODE_RPC_ADDRESS_KEY * @return - boolean confirmation if matched generic parameter */ public static boolean isDefaultNamenodeAddress(Configuration conf, InetSocketAddress address, String... keys) { for (String key : keys) { String candidateAddress = conf.get(key); if (candidateAddress != null && address.equals(NetUtils.createSocketAddr(candidateAddress))) return true; } return false; } /** ======= * @return key specific to a nameserviceId from a generic key */ public static String getNameServiceIdKey(String key, String nameserviceId) { return key + "." + nameserviceId; } /** >>>>>>>
<<<<<<< @Override // NameNode protected void checkOperation(OperationCategory op) throws UnsupportedActionException { if (OperationCategory.JOURNAL != op) { String msg = "Operation category " + op + " is not supported at the BackupNode"; throw new UnsupportedActionException(msg); } } ======= @Override protected String getNameServiceId(Configuration conf) { return DFSUtil.getBackupNameServiceId(conf); } >>>>>>> @Override // NameNode protected void checkOperation(OperationCategory op) throws UnsupportedActionException { if (OperationCategory.JOURNAL != op) { String msg = "Operation category " + op + " is not supported at the BackupNode"; throw new UnsupportedActionException(msg); } } @Override protected String getNameServiceId(Configuration conf) { return DFSUtil.getBackupNameServiceId(conf); }
<<<<<<< final void removeFromInodeMap(List<INode> inodes) { if (inodes != null) { for (INode inode : inodes) { inodeMap.remove(inode); ======= private final void removeFromInodeMap(INode inode) { inodeMap.remove(inode); } /** Remove all the inodes under given inode from the map */ private void remvoedAllFromInodesFromMap(INode inode) { removeFromInodeMap(inode); if (!inode.isDirectory()) { return; } INodeDirectory dir = (INodeDirectory) inode; for (INode child : dir.getChildrenList()) { remvoedAllFromInodesFromMap(child); } dir.clearChildren(); } /** Update the count of each directory with quota in the namespace * A directory's count is defined as the total number inodes in the tree * rooted at the directory. * * This is an update of existing state of the filesystem and does not * throw QuotaExceededException. */ void updateCountForINodeWithQuota() { updateCountForINodeWithQuota(this, rootDir, new INode.DirCounts(), new ArrayList<INode>(50)); } /** * Update the count of the directory if it has a quota and return the count * * This does not throw a QuotaExceededException. This is just an update * of of existing state and throwing QuotaExceededException does not help * with fixing the state, if there is a problem. * * @param dir the root of the tree that represents the directory * @param counters counters for name space and disk space * @param nodesInPath INodes for the each of components in the path. */ private static void updateCountForINodeWithQuota(FSDirectory fsd, INodeDirectory dir, INode.DirCounts counts, ArrayList<INode> nodesInPath) { long parentNamespace = counts.nsCount; long parentDiskspace = counts.dsCount; counts.nsCount = 1L;//for self. should not call node.spaceConsumedInTree() counts.dsCount = 0L; /* We don't need nodesInPath if we could use 'parent' field in * INode. using 'parent' is not currently recommended. */ nodesInPath.add(dir); for (INode child : dir.getChildrenList()) { fsd.inodeMap.put(child); if (child.isDirectory()) { updateCountForINodeWithQuota(fsd, (INodeDirectory)child, counts, nodesInPath); } else if (child.isSymlink()) { counts.nsCount += 1; } else { // reduce recursive calls counts.nsCount += 1; counts.dsCount += ((INodeFile)child).diskspaceConsumed(); >>>>>>> private final void removeFromInodeMap(INode inode) { inodeMap.remove(inode); } /** Remove all the inodes under given inode from the map */ private void remvoedAllFromInodesFromMap(INode inode) { removeFromInodeMap(inode); if (!inode.isDirectory()) { return; } INodeDirectory dir = (INodeDirectory) inode; for (INode child : dir.getChildrenList()) { remvoedAllFromInodesFromMap(child); } dir.clearChildren(); } /** Update the count of each directory with quota in the namespace * A directory's count is defined as the total number inodes in the tree * rooted at the directory. * * This is an update of existing state of the filesystem and does not * throw QuotaExceededException. */ void updateCountForINodeWithQuota() { updateCountForINodeWithQuota(this, rootDir, new INode.DirCounts(), new ArrayList<INode>(50)); } /** * Update the count of the directory if it has a quota and return the count * * This does not throw a QuotaExceededException. This is just an update * of of existing state and throwing QuotaExceededException does not help * with fixing the state, if there is a problem. * * @param dir the root of the tree that represents the directory * @param counters counters for name space and disk space * @param nodesInPath INodes for the each of components in the path. */ private static void updateCountForINodeWithQuota(FSDirectory fsd, INodeDirectory dir, INode.DirCounts counts, ArrayList<INode> nodesInPath) { long parentNamespace = counts.nsCount; long parentDiskspace = counts.dsCount; counts.nsCount = 1L;//for self. should not call node.spaceConsumedInTree() counts.dsCount = 0L; /* We don't need nodesInPath if we could use 'parent' field in * INode. using 'parent' is not currently recommended. */ nodesInPath.add(dir); for (INode child : dir.getChildrenList()) { fsd.inodeMap.put(child); if (child.isDirectory()) { updateCountForINodeWithQuota(fsd, (INodeDirectory)child, counts, nodesInPath); } else if (child.isSymlink()) { counts.nsCount += 1; } else { // reduce recursive calls counts.nsCount += 1; counts.dsCount += ((INodeFile)child).diskspaceConsumed(); <<<<<<< return quotaNode.replaceSelf4INodeDirectory(); ======= INodeDirectory newNode = new INodeDirectory(dirNode); INodeDirectory parent = (INodeDirectory)inodes[inodes.length-2]; dirNode = newNode; parent.replaceChild(newNode); // update the inodeMap inodeMap.put(newNode); >>>>>>> return quotaNode.replaceSelf4INodeDirectory(); // update the inodeMap inodeMap.put(newNode); <<<<<<< return dirNode.replaceSelf4Quota(latest, nsQuota, dsQuota); ======= INodeDirectoryWithQuota newNode = new INodeDirectoryWithQuota(nsQuota, dsQuota, dirNode); // non-root directory node; parent != null INodeDirectory parent = (INodeDirectory)inodes[inodes.length-2]; dirNode = newNode; parent.replaceChild(newNode); // update the inodeMap inodeMap.put(newNode); >>>>>>> return dirNode.replaceSelf4Quota(latest, nsQuota, dsQuota); // update the inodeMap inodeMap.put(newNode);
<<<<<<< import org.apache.hadoop.hdfs.protocol.AclException; ======= import org.apache.hadoop.hdfs.net.Peer; import org.apache.hadoop.hdfs.net.TcpPeerServer; >>>>>>> import org.apache.hadoop.hdfs.protocol.AclException; import org.apache.hadoop.hdfs.net.Peer; import org.apache.hadoop.hdfs.net.TcpPeerServer;
<<<<<<< final public FileEncryptionInfo getFileEncryptionInfo() { return feInfo; } final public int getChildrenNum() { ======= public final int getChildrenNum() { >>>>>>> public final FileEncryptionInfo getFileEncryptionInfo() { return feInfo; } public final int getChildrenNum() {
<<<<<<< /** Is this a snapshottable directory? */ public boolean isSnapshottable() { return false; } INode removeChild(INode node) { assert children != null; int low = Collections.binarySearch(children, node.name); if (low >= 0) { return children.remove(low); } else { return null; ======= private void assertChildrenNonNull() { if (children == null) { throw new AssertionError("children is null: " + this); >>>>>>> /** Is this a snapshottable directory? */ public boolean isSnapshottable() { return false; } private void assertChildrenNonNull() { if (children == null) { throw new AssertionError("children is null: " + this); <<<<<<< /** Set the children list. */ public void setChildren(List<INode> children) { this.children = children; } ======= >>>>>>> /** Set the children list. */ public void setChildren(List<INode> children) { this.children = children; }
<<<<<<< import org.apache.hadoop.hdfs.protocol.proto.AclProtos.AclFsImageProto; import org.apache.hadoop.hdfs.protocolPB.PBHelper; ======= import org.apache.hadoop.hdfs.protocol.LayoutFlags; >>>>>>> import org.apache.hadoop.hdfs.protocol.proto.AclProtos.AclFsImageProto; import org.apache.hadoop.hdfs.protocolPB.PBHelper; import org.apache.hadoop.hdfs.protocol.LayoutFlags;
<<<<<<< private void checkOwner(INode inode, Snapshot snapshot ) throws AccessControlException { if (inode != null && user.equals(inode.getUserName(snapshot))) { ======= /** Guarded by {@link FSNamesystem#readLock()} */ private void checkOwner(INode inode) throws AccessControlException { if (inode != null && user.equals(inode.getUserName())) { >>>>>>> /** Guarded by {@link FSNamesystem#readLock()} */ private void checkOwner(INode inode, Snapshot snapshot ) throws AccessControlException { if (inode != null && user.equals(inode.getUserName(snapshot))) { <<<<<<< private void checkTraverse(INode[] inodes, int last, Snapshot snapshot ======= /** Guarded by {@link FSNamesystem#readLock()} */ private void checkTraverse(INode[] inodes, int last >>>>>>> /** Guarded by {@link FSNamesystem#readLock()} */ private void checkTraverse(INode[] inodes, int last, Snapshot snapshot <<<<<<< private void checkSubAccess(INode inode, Snapshot snapshot, FsAction access ======= /** Guarded by {@link FSNamesystem#readLock()} */ private void checkSubAccess(INode inode, FsAction access >>>>>>> /** Guarded by {@link FSNamesystem#readLock()} */ private void checkSubAccess(INode inode, Snapshot snapshot, FsAction access <<<<<<< private void check(INode[] inodes, int i, Snapshot snapshot, FsAction access ======= /** Guarded by {@link FSNamesystem#readLock()} */ private void check(INode[] inodes, int i, FsAction access >>>>>>> /** Guarded by {@link FSNamesystem#readLock()} */ private void check(INode[] inodes, int i, Snapshot snapshot, FsAction access <<<<<<< private void check(INode inode, Snapshot snapshot, FsAction access ======= /** Guarded by {@link FSNamesystem#readLock()} */ private void check(INode inode, FsAction access >>>>>>> /** Guarded by {@link FSNamesystem#readLock()} */ private void check(INode inode, Snapshot snapshot, FsAction access <<<<<<< private void checkStickyBit(INode parent, INode inode, Snapshot snapshot ) throws AccessControlException { if(!parent.getFsPermission(snapshot).getStickyBit()) { ======= /** Guarded by {@link FSNamesystem#readLock()} */ private void checkStickyBit(INode parent, INode inode) throws AccessControlException { if(!parent.getFsPermission().getStickyBit()) { >>>>>>> /** Guarded by {@link FSNamesystem#readLock()} */ private void checkStickyBit(INode parent, INode inode, Snapshot snapshot ) throws AccessControlException { if(!parent.getFsPermission(snapshot).getStickyBit()) {
<<<<<<< import org.apache.hadoop.hdfs.server.protocol.HeartbeatResponse; ======= import org.apache.hadoop.hdfs.server.protocol.DatanodeStorage; >>>>>>> import org.apache.hadoop.hdfs.server.protocol.DatanodeStorage; import org.apache.hadoop.hdfs.server.protocol.HeartbeatResponse; <<<<<<< public HeartbeatResponse sendHeartbeat(DatanodeRegistration registration, long capacity, long dfsUsed, long remaining, long blockPoolUsed, int xmitsInProgress, int xceiverCount, int failedVolumes) throws IOException { HeartbeatRequestProto req = HeartbeatRequestProto.newBuilder() .setRegistration(PBHelper.convert(registration)).setCapacity(capacity) .setDfsUsed(dfsUsed).setRemaining(remaining) .setBlockPoolUsed(blockPoolUsed).setXmitsInProgress(xmitsInProgress) .setXceiverCount(xceiverCount).setFailedVolumes(failedVolumes).build(); ======= public DatanodeCommand[] sendHeartbeat(DatanodeRegistration registration, StorageReport[] reports, int xmitsInProgress, int xceiverCount, int failedVolumes) throws IOException { HeartbeatRequestProto.Builder builder = HeartbeatRequestProto.newBuilder() .setRegistration(PBHelper.convert(registration)) .setXmitsInProgress(xmitsInProgress).setXceiverCount(xceiverCount) .setFailedVolumes(failedVolumes); for (StorageReport r : reports) { builder.addReports(PBHelper.convert(r)); } >>>>>>> public HeartbeatResponse sendHeartbeat(DatanodeRegistration registration, StorageReport[] reports, int xmitsInProgress, int xceiverCount, int failedVolumes) throws IOException { HeartbeatRequestProto.Builder builder = HeartbeatRequestProto.newBuilder() .setRegistration(PBHelper.convert(registration)) .setXmitsInProgress(xmitsInProgress).setXceiverCount(xceiverCount) .setFailedVolumes(failedVolumes); for (StorageReport r : reports) { builder.addReports(PBHelper.convert(r)); }
<<<<<<< this(b, locs, null, null, startOffset, corrupt); } public LocatedBlock(ExtendedBlock b, DatanodeStorageInfo[] storages) { this(b, storages, -1, false); // startOffset is unknown } public LocatedBlock(ExtendedBlock b, DatanodeStorageInfo[] storages, long startOffset, boolean corrupt) { this(b, DatanodeStorageInfo.toDatanodeInfos(storages), DatanodeStorageInfo.toStorageIDs(storages), DatanodeStorageInfo.toStorageTypes(storages), startOffset, corrupt); // startOffset is unknown } public LocatedBlock(ExtendedBlock b, DatanodeInfo[] locs, String[] storageIDs, StorageType[] storageTypes, long startOffset, boolean corrupt) { ======= this(b, locs, startOffset, corrupt, EMPTY_LOCS); } public LocatedBlock(ExtendedBlock b, DatanodeInfo[] locs, long startOffset, boolean corrupt, DatanodeInfo[] cachedLocs) { >>>>>>> this(b, locs, null, null, startOffset, corrupt, EMPTY_LOCS); } public LocatedBlock(ExtendedBlock b, DatanodeStorageInfo[] storages) { this(b, storages, -1, false); // startOffset is unknown } public LocatedBlock(ExtendedBlock b, DatanodeStorageInfo[] storages, long startOffset, boolean corrupt) { this(b, DatanodeStorageInfo.toDatanodeInfos(storages), DatanodeStorageInfo.toStorageIDs(storages), DatanodeStorageInfo.toStorageTypes(storages), startOffset, corrupt, EMPTY_LOCS); // startOffset is unknown } public LocatedBlock(ExtendedBlock b, DatanodeInfo[] locs, String[] storageIDs, StorageType[] storageTypes, long startOffset, boolean corrupt, DatanodeInfo[] cachedLocs) { <<<<<<< this.storageIDs = storageIDs; this.storageTypes = storageTypes; ======= Preconditions.checkArgument(cachedLocs != null, "cachedLocs should not be null, use a different constructor"); if (cachedLocs.length == 0) { this.cachedLocs = EMPTY_LOCS; } else { this.cachedLocs = cachedLocs; } >>>>>>> this.storageIDs = storageIDs; this.storageTypes = storageTypes; Preconditions.checkArgument(cachedLocs != null, "cachedLocs should not be null, use a different constructor"); if (cachedLocs.length == 0) { this.cachedLocs = EMPTY_LOCS; } else { this.cachedLocs = cachedLocs; }
<<<<<<< import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.RenameSnapshotRequestProto; import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.RenewDelegationTokenRequestProto; ======= >>>>>>> import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.RenameSnapshotRequestProto;
<<<<<<< private int maxRetries; //the max. no. of retries for socket connections // the max. no. of retries for socket connections on time out exceptions private int maxRetriesOnSocketTimeouts; private boolean tcpNoDelay; // if T then disable Nagle's Algorithm private boolean doPing; //do we need to send ping message private int pingInterval; // how often sends ping to the server in msecs ======= private final int maxRetries; //the max. no. of retries for socket connections private final boolean tcpNoDelay; // if T then disable Nagle's Algorithm private final boolean doPing; //do we need to send ping message private final int pingInterval; // how often sends ping to the server in msecs >>>>>>> private final int maxRetries; //the max. no. of retries for socket connections // the max. no. of retries for socket connections on time out exceptions private final int maxRetriesOnSocketTimeouts; private final boolean tcpNoDelay; // if T then disable Nagle's Algorithm private final boolean doPing; //do we need to send ping message private final int pingInterval; // how often sends ping to the server in msecs
<<<<<<< import org.apache.hadoop.ipc.RPC; import org.apache.hadoop.ipc.StandbyException; ======= import org.apache.hadoop.io.IOUtils; >>>>>>> import org.apache.hadoop.ipc.StandbyException; import org.apache.hadoop.ipc.RPC; <<<<<<< @Override public long getProtocolVersion(String protocol, long clientVersion) throws IOException { if (protocol.equals(JournalProtocol.class.getName())) { return JournalProtocol.versionID; } return super.getProtocolVersion(protocol, clientVersion); } ======= ///////////////////////////////////////////////////// // NamenodeProtocol implementation for backup node. ///////////////////////////////////////////////////// @Override // NamenodeProtocol public BlocksWithLocations getBlocks(DatanodeInfo datanode, long size) throws IOException { throw new UnsupportedActionException("getBlocks"); } // Only active name-node can register other nodes. @Override // NamenodeProtocol public NamenodeRegistration register(NamenodeRegistration registration ) throws IOException { throw new UnsupportedActionException("register"); } @Override // NamenodeProtocol public NamenodeCommand startCheckpoint(NamenodeRegistration registration) throws IOException { throw new UnsupportedActionException("startCheckpoint"); } @Override // NamenodeProtocol public void endCheckpoint(NamenodeRegistration registration, CheckpointSignature sig) throws IOException { throw new UnsupportedActionException("endCheckpoint"); } >>>>>>>
<<<<<<< ======= import static org.apache.hadoop.util.Time.now; import org.apache.hadoop.hdfs.server.common.HdfsServerConstants.NamenodeRole; import org.apache.hadoop.hdfs.server.common.HdfsServerConstants.StartupOption; import org.apache.hadoop.hdfs.server.namenode.FSImageStorageInspector.FSImageFile; >>>>>>> import org.apache.hadoop.hdfs.server.namenode.FSImageStorageInspector.FSImageFile;
<<<<<<< BlockManagerTestUtil.getStorageReportsForDatanode(dn), 0, 0); ======= 2*HdfsConstants.MIN_BLOCKS_FOR_WRITE*BLOCK_SIZE, 0L, 2*HdfsConstants.MIN_BLOCKS_FOR_WRITE*BLOCK_SIZE, 0L, 0L, 0L, 0, 0); >>>>>>> BlockManagerTestUtil.getStorageReportsForDatanode(dn), 0L, 0L, 0, 0);
<<<<<<< /* * Copyright (C) 2012-2017 52°North Initiative for Geospatial Open Source ======= /** * Copyright (C) 2012-2017 52°North Initiative for Geospatial Open Source >>>>>>> /* * Copyright (C) 2012-2017 52°North Initiative for Geospatial Open Source <<<<<<< import org.slf4j.Logger; import org.slf4j.LoggerFactory; import org.n52.iceland.exception.ows.concrete.GenericThrowableWrapperException; import org.n52.shetland.ogc.ows.exception.OwsExceptionReport; ======= import org.hibernate.internal.util.collections.CollectionHelper; import org.n52.sos.convert.ConverterRepository; >>>>>>> import org.slf4j.Logger; import org.slf4j.LoggerFactory; import org.n52.iceland.exception.ows.concrete.GenericThrowableWrapperException; import org.n52.shetland.ogc.ows.exception.OwsExceptionReport; <<<<<<< private final String procedureId; private final DaoFactory daoFactory; ======= private String procedureId; private ProcedureDAO procedureDAO = new ProcedureDAO(); >>>>>>> private final String procedureId; private final DaoFactory daoFactory; private ProcedureDAO procedureDAO = new ProcedureDAO(); <<<<<<< ProcedureDAO procedureDAO = daoFactory.getProcedureDAO(); ======= >>>>>>> ProcedureDAO procedureDAO = daoFactory.getProcedureDAO(); <<<<<<< AbstractSeriesDAO seriesDAO = daoFactory.getSeriesDAO(); if (isSetTimeExtremaEmpty(pte) && seriesDAO != null) { pte = seriesDAO.getProcedureTimeExtrema(getSession(), procedureId); ======= if (isSetTimeExtremaEmpty(pte) && DaoFactory.getInstance().isSeriesDAO()) { pte = DaoFactory.getInstance().getSeriesDAO().getProcedureTimeExtrema(getSession(), procedureId); >>>>>>> AbstractSeriesDAO seriesDAO = daoFactory.getSeriesDAO(); if (isSetTimeExtremaEmpty(pte) && seriesDAO != null) { pte = seriesDAO.getProcedureTimeExtrema(getSession(), procedureId);
<<<<<<< ======= import org.apache.hadoop.ipc.ProtobufRpcEngine; import org.apache.hadoop.ipc.RPC; import org.apache.hadoop.ipc.RpcPayloadHeader.RpcKind; >>>>>>> import org.apache.hadoop.ipc.ProtobufRpcEngine; import org.apache.hadoop.ipc.RPC; import org.apache.hadoop.ipc.RpcPayloadHeader.RpcKind; <<<<<<< import com.google.common.base.Joiner; import com.google.common.collect.Lists; import com.google.common.collect.Maps; ======= import com.google.protobuf.BlockingService; >>>>>>> import com.google.common.base.Joiner; import com.google.common.collect.Lists; import com.google.common.collect.Maps; import com.google.protobuf.BlockingService;
<<<<<<< import org.apache.hadoop.hdfs.server.namenode.FSEditLogOp.RollingUpgradeOp; ======= import org.apache.hadoop.hdfs.server.namenode.FSEditLogOp.SetAclOp; >>>>>>> import org.apache.hadoop.hdfs.server.namenode.FSEditLogOp.RollingUpgradeOp; import org.apache.hadoop.hdfs.server.namenode.FSEditLogOp.SetAclOp;
<<<<<<< ======= /** * The time when the last batch of caching directives was sent, in * monotonic milliseconds. */ private long lastCachingDirectiveSentTimeMs; /** * Head of the list of blocks on the datanode */ private volatile BlockInfo blockList = null; /** * Number of blocks on the datanode */ private int numBlocks = 0; >>>>>>> /** * The time when the last batch of caching directives was sent, in * monotonic milliseconds. */ private long lastCachingDirectiveSentTimeMs; /** * Head of the list of blocks on the datanode */ private volatile BlockInfo blockList = null; /** * Number of blocks on the datanode */ private int numBlocks = 0; <<<<<<< @VisibleForTesting public DatanodeStorageInfo updateStorage(DatanodeStorage s) { synchronized (storageMap) { DatanodeStorageInfo storage = storageMap.get(s.getStorageID()); if (storage == null) { LOG.info("Adding new storage ID " + s.getStorageID() + " for DN " + getXferAddr()); storage = new DatanodeStorageInfo(this, s); storageMap.put(s.getStorageID(), storage); } else { storage.setState(s.getState()); } return storage; } } } ======= /** * @return The time at which we last sent caching directives to this * DataNode, in monotonic milliseconds. */ public long getLastCachingDirectiveSentTimeMs() { return this.lastCachingDirectiveSentTimeMs; } /** * @param time The time at which we last sent caching directives to this * DataNode, in monotonic milliseconds. */ public void setLastCachingDirectiveSentTimeMs(long time) { this.lastCachingDirectiveSentTimeMs = time; } } >>>>>>> @VisibleForTesting public DatanodeStorageInfo updateStorage(DatanodeStorage s) { synchronized (storageMap) { DatanodeStorageInfo storage = storageMap.get(s.getStorageID()); if (storage == null) { LOG.info("Adding new storage ID " + s.getStorageID() + " for DN " + getXferAddr()); storage = new DatanodeStorageInfo(this, s); storageMap.put(s.getStorageID(), storage); } else { storage.setState(s.getState()); } return storage; } } /** * @return The time at which we last sent caching directives to this * DataNode, in monotonic milliseconds. */ public long getLastCachingDirectiveSentTimeMs() { return this.lastCachingDirectiveSentTimeMs; } /** * @param time The time at which we last sent caching directives to this * DataNode, in monotonic milliseconds. */ public void setLastCachingDirectiveSentTimeMs(long time) { this.lastCachingDirectiveSentTimeMs = time; } }
<<<<<<< import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetSnapshotDiffReportRequestProto; import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetSnapshotDiffReportResponseProto; import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetSnapshottableDirListingRequestProto; import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetSnapshottableDirListingResponseProto; ======= import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.IsFileClosedRequestProto; import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.IsFileClosedResponseProto; >>>>>>> import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetSnapshotDiffReportRequestProto; import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetSnapshotDiffReportResponseProto; import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetSnapshottableDirListingRequestProto; import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetSnapshottableDirListingResponseProto; import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.IsFileClosedRequestProto; import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.IsFileClosedResponseProto; <<<<<<< @Override public CreateSnapshotResponseProto createSnapshot(RpcController controller, CreateSnapshotRequestProto request) throws ServiceException { try { server.createSnapshot(request.getSnapshotRoot(), request.getSnapshotName()); } catch (IOException e) { throw new ServiceException(e); } return VOID_CREATE_SNAPSHOT_RESPONSE; } @Override public DeleteSnapshotResponseProto deleteSnapshot(RpcController controller, DeleteSnapshotRequestProto request) throws ServiceException { try { server .deleteSnapshot(request.getSnapshotRoot(), request.getSnapshotName()); return VOID_DELETE_SNAPSHOT_RESPONSE; } catch (IOException e) { throw new ServiceException(e); } } @Override public AllowSnapshotResponseProto allowSnapshot(RpcController controller, AllowSnapshotRequestProto req) throws ServiceException { try { server.allowSnapshot(req.getSnapshotRoot()); return VOID_ALLOW_SNAPSHOT_RESPONSE; } catch (IOException e) { throw new ServiceException(e); } } @Override public DisallowSnapshotResponseProto disallowSnapshot(RpcController controller, DisallowSnapshotRequestProto req) throws ServiceException { try { server.disallowSnapshot(req.getSnapshotRoot()); return VOID_DISALLOW_SNAPSHOT_RESPONSE; } catch (IOException e) { throw new ServiceException(e); } } @Override public RenameSnapshotResponseProto renameSnapshot(RpcController controller, RenameSnapshotRequestProto request) throws ServiceException { try { server.renameSnapshot(request.getSnapshotRoot(), request.getSnapshotOldName(), request.getSnapshotNewName()); return VOID_RENAME_SNAPSHOT_RESPONSE; } catch (IOException e) { throw new ServiceException(e); } } @Override public GetSnapshottableDirListingResponseProto getSnapshottableDirListing( RpcController controller, GetSnapshottableDirListingRequestProto request) throws ServiceException { try { SnapshottableDirectoryStatus[] result = server .getSnapshottableDirListing(); if (result != null) { return GetSnapshottableDirListingResponseProto.newBuilder(). setSnapshottableDirList(PBHelper.convert(result)).build(); } else { return NULL_GET_SNAPSHOTTABLE_DIR_LISTING_RESPONSE; } } catch (IOException e) { throw new ServiceException(e); } } @Override public GetSnapshotDiffReportResponseProto getSnapshotDiffReport( RpcController controller, GetSnapshotDiffReportRequestProto request) throws ServiceException { try { SnapshotDiffReport report = server.getSnapshotDiffReport( request.getSnapshotRoot(), request.getFromSnapshot(), request.getToSnapshot()); return GetSnapshotDiffReportResponseProto.newBuilder() .setDiffReport(PBHelper.convert(report)).build(); } catch (IOException e) { throw new ServiceException(e); } } ======= @Override public IsFileClosedResponseProto isFileClosed( RpcController controller, IsFileClosedRequestProto request) throws ServiceException { try { boolean result = server.isFileClosed(request.getSrc()); return IsFileClosedResponseProto.newBuilder().setResult(result).build(); } catch (IOException e) { throw new ServiceException(e); } } >>>>>>> @Override public CreateSnapshotResponseProto createSnapshot(RpcController controller, CreateSnapshotRequestProto request) throws ServiceException { try { server.createSnapshot(request.getSnapshotRoot(), request.getSnapshotName()); } catch (IOException e) { throw new ServiceException(e); } return VOID_CREATE_SNAPSHOT_RESPONSE; } @Override public DeleteSnapshotResponseProto deleteSnapshot(RpcController controller, DeleteSnapshotRequestProto request) throws ServiceException { try { server .deleteSnapshot(request.getSnapshotRoot(), request.getSnapshotName()); return VOID_DELETE_SNAPSHOT_RESPONSE; } catch (IOException e) { throw new ServiceException(e); } } @Override public AllowSnapshotResponseProto allowSnapshot(RpcController controller, AllowSnapshotRequestProto req) throws ServiceException { try { server.allowSnapshot(req.getSnapshotRoot()); return VOID_ALLOW_SNAPSHOT_RESPONSE; } catch (IOException e) { throw new ServiceException(e); } } @Override public DisallowSnapshotResponseProto disallowSnapshot(RpcController controller, DisallowSnapshotRequestProto req) throws ServiceException { try { server.disallowSnapshot(req.getSnapshotRoot()); return VOID_DISALLOW_SNAPSHOT_RESPONSE; } catch (IOException e) { throw new ServiceException(e); } } @Override public RenameSnapshotResponseProto renameSnapshot(RpcController controller, RenameSnapshotRequestProto request) throws ServiceException { try { server.renameSnapshot(request.getSnapshotRoot(), request.getSnapshotOldName(), request.getSnapshotNewName()); return VOID_RENAME_SNAPSHOT_RESPONSE; } catch (IOException e) { throw new ServiceException(e); } } @Override public GetSnapshottableDirListingResponseProto getSnapshottableDirListing( RpcController controller, GetSnapshottableDirListingRequestProto request) throws ServiceException { try { SnapshottableDirectoryStatus[] result = server .getSnapshottableDirListing(); if (result != null) { return GetSnapshottableDirListingResponseProto.newBuilder(). setSnapshottableDirList(PBHelper.convert(result)).build(); } else { return NULL_GET_SNAPSHOTTABLE_DIR_LISTING_RESPONSE; } } catch (IOException e) { throw new ServiceException(e); } } @Override public GetSnapshotDiffReportResponseProto getSnapshotDiffReport( RpcController controller, GetSnapshotDiffReportRequestProto request) throws ServiceException { try { SnapshotDiffReport report = server.getSnapshotDiffReport( request.getSnapshotRoot(), request.getFromSnapshot(), request.getToSnapshot()); return GetSnapshotDiffReportResponseProto.newBuilder() .setDiffReport(PBHelper.convert(report)).build(); } catch (IOException e) { throw new ServiceException(e); } } @Override public IsFileClosedResponseProto isFileClosed( RpcController controller, IsFileClosedRequestProto request) throws ServiceException { try { boolean result = server.isFileClosed(request.getSrc()); return IsFileClosedResponseProto.newBuilder().setResult(result).build(); } catch (IOException e) { throw new ServiceException(e); } }
<<<<<<< public static final String DFS_NAMENODE_RESOURCE_CHECK_INTERVAL_KEY = "dfs.namenode.resource.check.interval"; public static final int DFS_NAMENODE_RESOURCE_CHECK_INTERVAL_DEFAULT = 5000; public static final String DFS_NAMENODE_DU_RESERVED_KEY = "dfs.namenode.resource.du.reserved"; public static final long DFS_NAMENODE_DU_RESERVED_DEFAULT = 1024 * 1024 * 100; // 100 MB public static final String DFS_NAMENODE_CHECKED_VOLUMES_KEY = "dfs.namenode.resource.checked.volumes"; ======= public static final String DFS_FEDERATION_NAMESERVICES = "dfs.federation.nameservices"; public static final String DFS_FEDERATION_NAMESERVICE_ID = "dfs.federation.nameservice.id"; >>>>>>> public static final String DFS_FEDERATION_NAMESERVICES = "dfs.federation.nameservices"; public static final String DFS_FEDERATION_NAMESERVICE_ID = "dfs.federation.nameservice.id"; public static final String DFS_NAMENODE_RESOURCE_CHECK_INTERVAL_KEY = "dfs.namenode.resource.check.interval"; public static final int DFS_NAMENODE_RESOURCE_CHECK_INTERVAL_DEFAULT = 5000; public static final String DFS_NAMENODE_DU_RESERVED_KEY = "dfs.namenode.resource.du.reserved"; public static final long DFS_NAMENODE_DU_RESERVED_DEFAULT = 1024 * 1024 * 100; // 100 MB public static final String DFS_NAMENODE_CHECKED_VOLUMES_KEY = "dfs.namenode.resource.checked.volumes";
<<<<<<< trgParent.removeChild(nodeToRemove, trgLatestSnapshot, null); ======= trgParent.removeChild(nodeToRemove); inodeMap.remove(nodeToRemove); >>>>>>> trgParent.removeChild(nodeToRemove, trgLatestSnapshot, null); inodeMap.remove(nodeToRemove); <<<<<<< return removed; ======= removeAllFromInodesFromMap(targetNode); return filesRemoved; >>>>>>> return removed; <<<<<<< /** * This method is always called with writeLock of FSDirectory held. */ public final void addToInodeMap(INode inode) { if (inode instanceof INodeWithAdditionalFields) { inodeMap.put((INodeWithAdditionalFields)inode); ======= /** Remove all the inodes under given inode from the map */ private void removeAllFromInodesFromMap(INode inode) { removeFromInodeMap(inode); if (!inode.isDirectory()) { return; } INodeDirectory dir = (INodeDirectory) inode; for (INode child : dir.getChildrenList()) { removeAllFromInodesFromMap(child); >>>>>>> /** * This method is always called with writeLock of FSDirectory held. */ public final void addToInodeMap(INode inode) { if (inode instanceof INodeWithAdditionalFields) { inodeMap.put((INodeWithAdditionalFields)inode);
<<<<<<< String key = DFSUtil.addKeySuffixes( DFSConfigKeys.DFS_NAMENODE_HTTP_ADDRESS_KEY, nameserviceId); ======= String key = DFSUtil.getNameServiceIdKey( DFS_NAMENODE_HTTP_ADDRESS_KEY, nameserviceId); >>>>>>> String key = DFSUtil.addKeySuffixes( DFS_NAMENODE_HTTP_ADDRESS_KEY, nameserviceId); <<<<<<< key = DFSUtil.addKeySuffixes( DFSConfigKeys.DFS_NAMENODE_RPC_ADDRESS_KEY, nameserviceId); ======= key = DFSUtil.getNameServiceIdKey( DFS_NAMENODE_RPC_ADDRESS_KEY, nameserviceId); >>>>>>> key = DFSUtil.addKeySuffixes( DFS_NAMENODE_RPC_ADDRESS_KEY, nameserviceId); <<<<<<< conf.set(DFSUtil.addKeySuffixes( DFSConfigKeys.DFS_NAMENODE_RPC_ADDRESS_KEY, nameserviceId), NameNode ======= conf.set(DFSUtil.getNameServiceIdKey( DFS_NAMENODE_RPC_ADDRESS_KEY, nameserviceId), NameNode >>>>>>> conf.set(DFSUtil.addKeySuffixes( DFS_NAMENODE_RPC_ADDRESS_KEY, nameserviceId), NameNode <<<<<<< conf.set(DFSUtil.addKeySuffixes( DFSConfigKeys.DFS_NAMENODE_HTTP_ADDRESS_KEY, nameserviceId), NameNode ======= conf.set(DFSUtil.getNameServiceIdKey( DFS_NAMENODE_HTTP_ADDRESS_KEY, nameserviceId), NameNode >>>>>>> conf.set(DFSUtil.addKeySuffixes( DFS_NAMENODE_HTTP_ADDRESS_KEY, nameserviceId), NameNode
<<<<<<< public HeartbeatResponse sendHeartbeat(DatanodeRegistration registration, long capacity, long dfsUsed, long remaining, long blockPoolUsed, ======= public DatanodeCommand[] sendHeartbeat(DatanodeRegistration registration, StorageReport[] reports, >>>>>>> public HeartbeatResponse sendHeartbeat(DatanodeRegistration registration, StorageReport[] reports, <<<<<<< * @param blocks - the block list as an array of longs. * Each finalized block is represented as 3 longs. Each under- * construction replica is represented as 4 longs. ======= * @param reports - report of blocks per storage * Each block is represented as 2 longs. >>>>>>> * @param reports - report of blocks per storage * Each finalized block is represented as 3 longs. Each under- * construction replica is represented as 4 longs.
<<<<<<< ======= public static final String DFS_CLIENT_READ_SHORTCIRCUIT_KEY = "dfs.client.read.shortcircuit"; public static final boolean DFS_CLIENT_READ_SHORTCIRCUIT_DEFAULT = false; public static final String DFS_CLIENT_READ_SHORTCIRCUIT_SKIP_CHECKSUM_KEY = "dfs.client.read.shortcircuit.skip.checksum"; public static final boolean DFS_CLIENT_READ_SHORTCIRCUIT_SKIP_CHECKSUM_DEFAULT = false; >>>>>>> public static final String DFS_CLIENT_READ_SHORTCIRCUIT_KEY = "dfs.client.read.shortcircuit"; public static final boolean DFS_CLIENT_READ_SHORTCIRCUIT_DEFAULT = false; public static final String DFS_CLIENT_READ_SHORTCIRCUIT_SKIP_CHECKSUM_KEY = "dfs.client.read.shortcircuit.skip.checksum"; public static final boolean DFS_CLIENT_READ_SHORTCIRCUIT_SKIP_CHECKSUM_DEFAULT = false; <<<<<<< // HA related configuration public static final String DFS_HA_NAMENODES_KEY = "dfs.ha.namenodes"; public static final String DFS_HA_NAMENODE_ID_KEY = "dfs.ha.namenode.id"; ======= public static final String DFS_BLOCK_LOCAL_PATH_ACCESS_USER_KEY = "dfs.block.local-path-access.user"; >>>>>>> public static final String DFS_BLOCK_LOCAL_PATH_ACCESS_USER_KEY = "dfs.block.local-path-access.user"; // HA related configuration public static final String DFS_HA_NAMENODES_KEY = "dfs.ha.namenodes"; public static final String DFS_HA_NAMENODE_ID_KEY = "dfs.ha.namenode.id";
<<<<<<< public abstract class INode implements Comparable<byte[]> { /* ======= abstract class INode implements Comparable<byte[]> { static final List<INode> EMPTY_LIST = Collections.unmodifiableList(new ArrayList<INode>()); /** >>>>>>> public abstract class INode implements Comparable<byte[]> { static final List<INode> EMPTY_LIST = Collections.unmodifiableList(new ArrayList<INode>()); /**
<<<<<<< ======= import org.apache.hadoop.hdfs.protocol.ExtendedBlock; import org.apache.hadoop.hdfs.protocol.LocatedBlocks; >>>>>>> import org.apache.hadoop.hdfs.protocol.ExtendedBlock; <<<<<<< cluster = new MiniDFSCluster.Builder(conf) .numDataNodes(1) .format(false).build(); ======= cluster = new MiniDFSCluster.Builder(conf) .numDataNodes(1) .format(false).build(); >>>>>>> cluster = new MiniDFSCluster.Builder(conf) .numDataNodes(1) .format(false).build(); <<<<<<< public void testBlockCorruptionPolicy() throws Exception { ======= public static boolean corruptReplica(ExtendedBlock blk, int replica) throws IOException { return MiniDFSCluster.corruptBlockOnDataNode(replica, blk); } public void testBlockCorruptionPolicy() throws IOException { >>>>>>> public static boolean corruptReplica(ExtendedBlock blk, int replica) throws IOException { return MiniDFSCluster.corruptReplica(replica, blk); } public void testBlockCorruptionPolicy() throws IOException { <<<<<<< conf.setLong(DFSConfigKeys.DFS_BLOCKREPORT_INTERVAL_MSEC_KEY, 30L); conf.setLong(DFSConfigKeys.DFS_NAMENODE_REPLICATION_INTERVAL_KEY, 3); conf.setLong(DFSConfigKeys.DFS_HEARTBEAT_INTERVAL_KEY, 3L); ======= conf.setLong(DFSConfigKeys.DFS_BLOCKREPORT_INTERVAL_MSEC_KEY, 30L); conf.setLong(DFSConfigKeys.DFS_NAMENODE_REPLICATION_INTERVAL_KEY, 30); conf.setLong(DFSConfigKeys.DFS_HEARTBEAT_INTERVAL_KEY, 30L); >>>>>>> conf.setLong(DFSConfigKeys.DFS_BLOCKREPORT_INTERVAL_MSEC_KEY, 30L); conf.setLong(DFSConfigKeys.DFS_NAMENODE_REPLICATION_INTERVAL_KEY, 3); conf.setLong(DFSConfigKeys.DFS_HEARTBEAT_INTERVAL_KEY, 3L); <<<<<<< Block blk = DFSTestUtil.getFirstBlock(fs, file1); String block = blk.getBlockName(); ======= ExtendedBlock blk = DFSTestUtil.getFirstBlock(fs, file1); dfsClient = new DFSClient(new InetSocketAddress("localhost", cluster.getNameNodePort()), conf); blocks = dfsClient.getNamenode(). getBlockLocations(file1.toString(), 0, Long.MAX_VALUE); replicaCount = blocks.get(0).getLocations().length; >>>>>>> ExtendedBlock block = DFSTestUtil.getFirstBlock(fs, file1); <<<<<<< if (cluster.corruptReplica(block, i)) { ======= if (corruptReplica(blk, i)) >>>>>>> if (corruptReplica(block, i)) { <<<<<<< DFSTestUtil.waitCorruptReplicas(fs, cluster.getNamesystem(), file1, blk, numCorruptReplicas); ======= int corruptReplicaSize = cluster.getNamesystem(). numCorruptReplicas(blk.getLocalBlock()); while (corruptReplicaSize != numCorruptReplicas) { try { IOUtils.copyBytes(fs.open(file1), new IOUtils.NullOutputStream(), conf, true); } catch (IOException e) { } try { LOG.info("Looping until expected " + numCorruptReplicas + " are " + "reported. Current reported " + corruptReplicaSize); Thread.sleep(1000); } catch (InterruptedException ignore) { } corruptReplicaSize = cluster.getNamesystem(). numCorruptReplicas(blk.getLocalBlock()); } >>>>>>> DFSTestUtil.waitCorruptReplicas(fs, cluster.getNamesystem(), file1, block, numCorruptReplicas); <<<<<<< DFSTestUtil.waitCorruptReplicas(fs, cluster.getNamesystem(), file1, blk, 0); ======= corruptReplicaSize = cluster.getNamesystem(). numCorruptReplicas(blk.getLocalBlock()); while (corruptReplicaSize != 0 || replicaCount != numReplicas) { try { LOG.info("Looping until corrupt replica is invalidated"); Thread.sleep(1000); } catch (InterruptedException ignore) { } corruptReplicaSize = cluster.getNamesystem(). numCorruptReplicas(blk.getLocalBlock()); blocks = dfsClient.getNamenode(). getBlockLocations(file1.toString(), 0, Long.MAX_VALUE); replicaCount = blocks.get(0).getLocations().length; } // Make sure block is healthy assertTrue(corruptReplicaSize == 0); assertTrue(replicaCount == numReplicas); assertTrue(blocks.get(0).isCorrupt() == false); >>>>>>> DFSTestUtil.waitCorruptReplicas(fs, cluster.getNamesystem(), file1, block, 0); <<<<<<< // Restart the cluster, add a node, and check that the truncated block is // handled correctly cluster = new MiniDFSCluster.Builder(conf) .numDataNodes(REPLICATION_FACTOR) .format(false) .build(); cluster.startDataNodes(conf, 1, true, null, null); cluster.waitActive(); // now we have 3 datanodes ======= // restart the cluster cluster = new MiniDFSCluster.Builder(conf) .numDataNodes(REPLICATION_FACTOR) .format(false) .build(); cluster.startDataNodes(conf, 1, true, null, null); cluster.waitActive(); // now we have 3 datanodes >>>>>>> // Restart the cluster, add a node, and check that the truncated block is // handled correctly cluster = new MiniDFSCluster.Builder(conf) .numDataNodes(REPLICATION_FACTOR) .format(false) .build(); cluster.startDataNodes(conf, 1, true, null, null); cluster.waitActive(); // now we have 3 datanodes <<<<<<< static boolean changeReplicaLength(String blockName, int dnIndex, int lenDelta) throws IOException { File baseDir = new File(MiniDFSCluster.getBaseDirectory(), "data"); for (int i=dnIndex*2; i<dnIndex*2+2; i++) { File blockFile = new File(baseDir, "data" + (i+1) + MiniDFSCluster.FINALIZED_DIR_NAME + blockName); if (blockFile.exists()) { RandomAccessFile raFile = new RandomAccessFile(blockFile, "rw"); long origLen = raFile.length(); raFile.setLength(origLen + lenDelta); raFile.close(); LOG.info("assigned length " + (origLen + lenDelta) + " to block file " + blockFile.getPath() + " on datanode " + dnIndex); return true; } ======= static boolean changeReplicaLength(ExtendedBlock blk, int dnIndex, int lenDelta) throws IOException { File blockFile = MiniDFSCluster.getBlockFile(dnIndex, blk); if (blockFile != null && blockFile.exists()) { RandomAccessFile raFile = new RandomAccessFile(blockFile, "rw"); raFile.setLength(raFile.length()+lenDelta); raFile.close(); return true; >>>>>>> static boolean changeReplicaLength(ExtendedBlock blk, int dnIndex, int lenDelta) throws IOException { File blockFile = MiniDFSCluster.getBlockFile(dnIndex, blk); if (blockFile != null && blockFile.exists()) { RandomAccessFile raFile = new RandomAccessFile(blockFile, "rw"); raFile.setLength(raFile.length()+lenDelta); raFile.close(); return true; <<<<<<< private static void waitForBlockDeleted(String blockName, int dnIndex, long timeout) throws IOException, TimeoutException, InterruptedException { File baseDir = new File(MiniDFSCluster.getBaseDirectory(), "data"); File blockFile1 = new File(baseDir, "data" + (2*dnIndex+1) + MiniDFSCluster.FINALIZED_DIR_NAME + blockName); File blockFile2 = new File(baseDir, "data" + (2*dnIndex+2) + MiniDFSCluster.FINALIZED_DIR_NAME + blockName); long failtime = System.currentTimeMillis() + ((timeout > 0) ? timeout : Long.MAX_VALUE); while (blockFile1.exists() || blockFile2.exists()) { if (failtime < System.currentTimeMillis()) { throw new TimeoutException("waited too long for blocks to be deleted: " + blockFile1.getPath() + (blockFile1.exists() ? " still exists; " : " is absent; ") + blockFile2.getPath() + (blockFile2.exists() ? " still exists." : " is absent.")); } ======= private static void waitForBlockDeleted(ExtendedBlock blk, int dnIndex) throws IOException, InterruptedException { File blockFile = MiniDFSCluster.getBlockFile(dnIndex, blk); while (blockFile != null) { >>>>>>> private static void waitForBlockDeleted(ExtendedBlock blk, int dnIndex, long timeout) throws IOException, TimeoutException, InterruptedException { File blockFile = MiniDFSCluster.getBlockFile(dnIndex, blk); long failtime = System.currentTimeMillis() + ((timeout > 0) ? timeout : Long.MAX_VALUE); while (blockFile != null && blockFile.exists()) { if (failtime < System.currentTimeMillis()) { throw new TimeoutException("waited too long for blocks to be deleted: " + blockFile.getPath() + (blockFile.exists() ? " still exists; " : " is absent; ")); }
<<<<<<< this(nodeID.getIpAddr(), nodeID.getHostName(), nodeID.getDatanodeUuid(), nodeID.getXferPort(), nodeID.getInfoPort(), nodeID.getIpcPort(), capacity, dfsUsed, remaining, blockPoolUsed, lastUpdate, xceiverCount, location, adminState); ======= this(nodeID.getIpAddr(), nodeID.getHostName(), nodeID.getStorageID(), nodeID.getXferPort(), nodeID.getInfoPort(), nodeID.getInfoSecurePort(), nodeID.getIpcPort(), capacity, dfsUsed, remaining, blockPoolUsed, lastUpdate, xceiverCount, location, adminState); >>>>>>> this(nodeID.getIpAddr(), nodeID.getHostName(), nodeID.getDatanodeUuid(), nodeID.getXferPort(), nodeID.getInfoPort(), nodeID.getInfoSecurePort(), nodeID.getIpcPort(), capacity, dfsUsed, remaining, blockPoolUsed, lastUpdate, xceiverCount, location, adminState); <<<<<<< final String DatanodeUuid, final int xferPort, final int infoPort, final int ipcPort, ======= final String storageID, final int xferPort, final int infoPort, final int infoSecurePort, final int ipcPort, >>>>>>> final String DatanodeUuid, final int xferPort, final int infoPort, final int infoSecurePort, final int ipcPort, <<<<<<< super(ipAddr, hostName, DatanodeUuid, xferPort, infoPort, ipcPort); ======= super(ipAddr, hostName, storageID, xferPort, infoPort, infoSecurePort, ipcPort); >>>>>>> super(ipAddr, hostName, DatanodeUuid, xferPort, infoPort, infoSecurePort, ipcPort);
<<<<<<< * DO NOT MODIFY. * * Copied verbatim from Jetty code, just so that Maven's qdox can find * all the injection points. All the changes should go to {@link RunMojo}. ======= * AbstractJettyMojo * * Common base class for most jetty mojos. * * >>>>>>> * DO NOT MODIFY. * * Common base class for most jetty mojos. * * * Copied verbatim from Jetty code, just so that Maven's qdox can find * all the injection points. All the changes should go to {@link RunMojo}. <<<<<<< ======= >>>>>>> <<<<<<< /** * The context path for the webapp. Defaults to the * name of the webapp's artifact. * * @deprecated Use &lt;webApp&gt;&lt;contextPath&gt; instead. * @parameter expression="/${project.artifactId}" * @required * @readonly */ @Parameter(readonly = true, required = true, defaultValue = "/${project.artifactId}") protected String contextPath; /** * The temporary directory to use for the webapp. * Defaults to target/tmp. * * @deprecated Use %lt;webApp&gt;&lt;tempDirectory&gt; instead. * @parameter expression="${project.build.directory}/tmp" * @required * @readonly */ @Deprecated @Parameter(defaultValue = "${project.build.directory}/tmp", required = true, readonly = true) protected File tmpDirectory; ======= >>>>>>> /** * The context path for the webapp. Defaults to the * name of the webapp's artifact. * * @deprecated Use &lt;webApp&gt;&lt;contextPath&gt; instead. * @parameter expression="/${project.artifactId}" * @required * @readonly */ @Parameter(readonly = true, required = true, defaultValue = "/${project.artifactId}") protected String contextPath; /** * The temporary directory to use for the webapp. * Defaults to target/tmp. * * @deprecated Use %lt;webApp&gt;&lt;tempDirectory&gt; instead. * @parameter expression="${project.build.directory}/tmp" * @required * @readonly */ @Deprecated @Parameter(defaultValue = "${project.build.directory}/tmp", required = true, readonly = true) protected File tmpDirectory; <<<<<<< @Parameter(defaultValue = "${jetty.reload}") protected String reload = "automatic"; ======= protected String reload; >>>>>>> @Parameter(defaultValue = "${jetty.reload}") protected String reload = "automatic"; <<<<<<< ======= >>>>>>> <<<<<<< @Parameter(defaultValue = "${mojoExecution}", readonly = true) private org.apache.maven.plugin.MojoExecution execution; ======= protected org.apache.maven.plugin.MojoExecution execution; >>>>>>> @Parameter(defaultValue = "${mojoExecution}", readonly = true) protected org.apache.maven.plugin.MojoExecution execution; <<<<<<< @Parameter(defaultValue = "${plugin.artifacts}", readonly = true) private List pluginArtifacts; ======= protected List pluginArtifacts; /** * A ServerConnector to use. * * @parameter */ protected MavenServerConnector httpConnector; >>>>>>> @Parameter(defaultValue = "${plugin.artifacts}", readonly = true) protected List pluginArtifacts; /** * A ServerConnector to use. * * @parameter */ @Parameter protected MavenServerConnector httpConnector; <<<<<<< protected JettyServer server; ======= protected JettyServer server = new JettyServer(); >>>>>>> protected JettyServer server = new JettyServer(); <<<<<<< ======= >>>>>>> <<<<<<< ======= >>>>>>> <<<<<<< public String PORT_SYSPROPERTY = "jetty.port"; ======= /** * <p> * Determines whether or not the server blocks when started. The default * behavior (false) will cause the server to pause other processes * while it continues to handle web requests. This is useful when starting the * server with the intent to work with it interactively. This is the * behaviour of the jetty:run, jetty:run-war, jetty:run-war-exploded goals. * </p><p> * If true, the server will not block the execution of subsequent code. This * is the behaviour of the jetty:start and default behaviour of the jetty:deploy goals. * </p> */ protected boolean nonblocking = false; >>>>>>> /** * <p> * Determines whether or not the server blocks when started. The default * behavior (false) will cause the server to pause other processes * while it continues to handle web requests. This is useful when starting the * server with the intent to work with it interactively. This is the * behaviour of the jetty:run, jetty:run-war, jetty:run-war-exploded goals. * </p><p> * If true, the server will not block the execution of subsequent code. This * is the behaviour of the jetty:start and default behaviour of the jetty:deploy goals. * </p> */ protected boolean nonblocking = false; <<<<<<< public abstract void checkPomConfiguration() throws MojoExecutionException; ======= public abstract void checkPomConfiguration() throws MojoExecutionException; >>>>>>> public abstract void checkPomConfiguration() throws MojoExecutionException; <<<<<<< ======= /** * @throws MojoExecutionException */ >>>>>>> /** * @throws MojoExecutionException */ <<<<<<< ======= /** * @param artifact * @return */ >>>>>>> /** * @param artifact * @return */ <<<<<<< ======= /** * @throws Exception */ >>>>>>> /** * @throws Exception */ <<<<<<< for ( File xmlFile : getJettyXmlFiles() ) { getLog().info( "Configuring Jetty from xml configuration file = " + xmlFile.getCanonicalPath() ); XmlConfiguration xmlConfiguration = new XmlConfiguration(Resource.toURL(xmlFile)); xmlConfiguration.configure(this.server); } ======= this.server.applyXmlConfigurations(getJettyXmlFiles()); >>>>>>> this.server.applyXmlConfigurations(getJettyXmlFiles()); <<<<<<< if(stopPort>0 && stopKey!=null) { ShutdownMonitor monitor = ShutdownMonitor.getInstance(); monitor.setPort(stopPort); monitor.setKey(stopKey); monitor.setExitVm(!daemon); } ======= >>>>>>> <<<<<<< ======= if ( dumpOnStart ) { getLog().info(this.server.dump()); } >>>>>>> if ( dumpOnStart ) { getLog().info(this.server.dump()); } <<<<<<< //If no contextPath was specified, go with our default ======= //If no contextPath was specified, go with default of project artifactid >>>>>>> //If no contextPath was specified, go with default of project artifactid <<<<<<< if (!tmpDirectory.exists()) tmpDirectory.mkdirs(); webApp.setTempDirectory(tmpDirectory); ======= File target = new File(project.getBuild().getDirectory()); File tmp = new File(target,"tmp"); if (!tmp.exists()) tmp.mkdirs(); webApp.setTempDirectory(tmp); >>>>>>> File target = new File(project.getBuild().getDirectory()); File tmp = new File(target,"tmp"); if (!tmp.exists()) tmp.mkdirs(); webApp.setTempDirectory(tmp); <<<<<<< ======= >>>>>>> <<<<<<< public Scanner getScanner () { return scanner; } public MavenProject getProject() { return this.project; } public void setProject(MavenProject project) { this.project = project; } public File getTmpDirectory() { return this.tmpDirectory; } public void setTmpDirectory(File tmpDirectory) { this.tmpDirectory = tmpDirectory; } /** * @return Returns the contextPath. */ public String getContextPath() { return this.contextPath; } public void setContextPath(String contextPath) { this.contextPath = contextPath; } /** * @return Returns the scanIntervalSeconds. */ public int getScanIntervalSeconds() { return this.scanIntervalSeconds; } public void setScanIntervalSeconds(int scanIntervalSeconds) { this.scanIntervalSeconds = scanIntervalSeconds; } ======= >>>>>>> <<<<<<< public File getSystemPropertiesFile() { return this.systemPropertiesFile; } ======= >>>>>>> <<<<<<< properties.load(propFile); ======= try (InputStream propFile = new FileInputStream(systemPropertiesFile)) { properties.load(propFile); } >>>>>>> try (InputStream propFile = new FileInputStream(systemPropertiesFile)) { properties.load(propFile); } <<<<<<< for (Enumeration keys = properties.keys(); keys.hasMoreElements(); ) ======= for (Enumeration<?> keys = properties.keys(); keys.hasMoreElements(); ) >>>>>>> for (Enumeration<?> keys = properties.keys(); keys.hasMoreElements(); ) <<<<<<< String[] files = this.jettyXml.split(","); ======= String[] files = StringUtil.csvSplit(this.jettyXml); >>>>>>> String[] files = StringUtil.csvSplit(this.jettyXml); <<<<<<< public JettyServer getServer () { return this.server; } public void setServer (JettyServer server) { this.server = server; } public void setScanList (ArrayList<File> list) { this.scanList = new ArrayList<File>(list); } public ArrayList<File> getScanList () { return this.scanList; } public void setScannerListeners (ArrayList<Scanner.BulkListener> listeners) { this.scannerListeners = new ArrayList<Scanner.BulkListener>(listeners); } public ArrayList getScannerListeners() { return this.scannerListeners; } public JettyWebAppContext getWebAppConfig() { return webApp; } public void setWebAppConfig(JettyWebAppContext webAppConfig) { this.webApp = webAppConfig; } public RequestLog getRequestLog() { return requestLog; } public void setRequestLog(RequestLog requestLog) { this.requestLog = requestLog; } public LoginService[] getLoginServices() { return loginServices; } public void setLoginServices(LoginService[] loginServices) { this.loginServices = loginServices; } public ContextHandler[] getContextHandlers() { return contextHandlers; } public void setContextHandlers(ContextHandler[] contextHandlers) { this.contextHandlers = contextHandlers; } public Connector[] getConnectors() { return connectors; } public void setConnectors(Connector[] connectors) { this.connectors = connectors; } public String getReload() { return reload; } public void setReload(String reload) { this.reload = reload; } public String getJettyConfig() { return jettyXml; } public void setJettyConfig(String jettyConfig) { this.jettyXml = jettyConfig; } public String getWebAppXml() { return contextXml; } public void setWebAppXml(String webAppXml) { this.contextXml = webAppXml; } public boolean isSkip() { return skip; } public void setSkip(boolean skip) { this.skip = skip; } public boolean isDaemon() { return daemon; } public void setDaemon(boolean daemon) { this.daemon = daemon; } public String getStopKey() { return stopKey; } public void setStopKey(String stopKey) { this.stopKey = stopKey; } public int getStopPort() { return stopPort; } public void setStopPort(int stopPort) { this.stopPort = stopPort; } public List getPluginArtifacts() { return pluginArtifacts; } public void setPluginArtifacts(List pluginArtifacts) { this.pluginArtifacts = pluginArtifacts; } ======= /** * @param goal * @return */ >>>>>>> /** * @param goal * @return */
<<<<<<< Properties configProperties = new Properties(); ======= >>>>>>>
<<<<<<< import com.iota.iri.network.TransactionRequesterWorker; ======= import com.iota.iri.network.impl.TipsRequesterImpl; >>>>>>> <<<<<<< private final TransactionRequesterWorker transactionRequesterWorker; private final BundleValidator bundleValidator; ======= public final BundleValidator bundleValidator; >>>>>>> public final BundleValidator bundleValidator; <<<<<<< this.ledgerService = ledgerService; this.spentAddressesProvider = spentAddressesProvider; this.spentAddressesService = spentAddressesService; this.snapshotProvider = snapshotProvider; this.snapshotService = snapshotService; this.localSnapshotManager = localSnapshotManager; this.milestoneService = milestoneService; this.latestMilestoneTracker = latestMilestoneTracker; this.latestSolidMilestoneTracker = latestSolidMilestoneTracker; this.seenMilestonesRetriever = seenMilestonesRetriever; this.milestoneSolidifier = milestoneSolidifier; this.transactionPruner = transactionPruner; this.transactionRequesterWorker = transactionRequesterWorker; this.neighborRouter = neighborRouter; this.txPipeline = transactionProcessingPipeline; this.tipsRequester = tipsRequester; // legacy classes this.bundleValidator = bundleValidator; this.tangle = tangle; this.tipsViewModel = tipsViewModel; this.transactionRequester = transactionRequester; this.transactionValidator = transactionValidator; this.tipsSolidifier = tipsSolidifier; this.tipsSelector = tipsSelector; } private void initDependencies() throws SnapshotException, SpentAddressesException { //snapshot provider must be initialized first //because we check whether spent addresses data exists snapshotProvider.init(); spentAddressesProvider.init(); latestMilestoneTracker.init(); seenMilestonesRetriever.init(); if (transactionPruner != null) { transactionPruner.init(); } ======= // new refactored instances spentAddressesProvider = new SpentAddressesProviderImpl(); spentAddressesService = new SpentAddressesServiceImpl(); snapshotProvider = new SnapshotProviderImpl(); snapshotService = new SnapshotServiceImpl(); localSnapshotManager = configuration.getLocalSnapshotsEnabled() ? new LocalSnapshotManagerImpl() : null; milestoneService = new MilestoneServiceImpl(); latestMilestoneTracker = new LatestMilestoneTrackerImpl(); latestSolidMilestoneTracker = new LatestSolidMilestoneTrackerImpl(); seenMilestonesRetriever = new SeenMilestonesRetrieverImpl(); milestoneSolidifier = new MilestoneSolidifierImpl(); transactionPruner = configuration.getLocalSnapshotsEnabled() && configuration.getLocalSnapshotsPruningEnabled() ? new AsyncTransactionPruner() : null; neighborRouter = new NeighborRouter(); txPipeline = new TransactionProcessingPipelineImpl(); tipRequester = new TipsRequesterImpl(); // legacy code bundleValidator = new BundleValidator(); tangle = new Tangle(); tipsViewModel = new TipsViewModel(); transactionRequester = new TransactionRequester(tangle, snapshotProvider); transactionValidator = new TransactionValidator(tangle, snapshotProvider, tipsViewModel, transactionRequester); tipsSolidifier = new TipsSolidifier(tangle, transactionValidator, tipsViewModel, configuration); tipsSelector = createTipSelector(configuration); injectDependencies(); >>>>>>> this.ledgerService = ledgerService; this.spentAddressesProvider = spentAddressesProvider; this.spentAddressesService = spentAddressesService; this.snapshotProvider = snapshotProvider; this.snapshotService = snapshotService; this.localSnapshotManager = localSnapshotManager; this.milestoneService = milestoneService; this.latestMilestoneTracker = latestMilestoneTracker; this.latestSolidMilestoneTracker = latestSolidMilestoneTracker; this.seenMilestonesRetriever = seenMilestonesRetriever; this.milestoneSolidifier = milestoneSolidifier; this.transactionPruner = transactionPruner; this.neighborRouter = neighborRouter; this.txPipeline = transactionProcessingPipeline; this.tipsRequester = tipsRequester; // legacy classes this.bundleValidator = bundleValidator; this.tangle = tangle; this.tipsViewModel = tipsViewModel; this.transactionRequester = transactionRequester; this.transactionValidator = transactionValidator; this.tipsSolidifier = tipsSolidifier; this.tipsSelector = tipsSelector; } private void initDependencies() throws SnapshotException, SpentAddressesException { //snapshot provider must be initialized first //because we check whether spent addresses data exists snapshotProvider.init(); spentAddressesProvider.init(); latestMilestoneTracker.init(); seenMilestonesRetriever.init(); if (transactionPruner != null) { transactionPruner.init(); } <<<<<<< transactionRequester.init(); ======= >>>>>>> <<<<<<< ======= private void injectDependencies() throws SnapshotException, TransactionPruningException, SpentAddressesException { // snapshot provider must be initialized first // because we check whether spent addresses data exists snapshotProvider.init(configuration); spentAddressesProvider.init(configuration, createRocksDbProvider( configuration.getSpentAddressesDbPath(), configuration.getSpentAddressesDbLogPath(), 1000, new HashMap<String, Class<? extends Persistable>>(1) {{put("spent-addresses", SpentAddress.class);}}, null) ); spentAddressesService.init(tangle, snapshotProvider, spentAddressesProvider, bundleValidator, configuration); snapshotService.init(tangle, snapshotProvider, spentAddressesService, spentAddressesProvider, configuration); if (localSnapshotManager != null) { localSnapshotManager.init(snapshotProvider, snapshotService, transactionPruner, configuration); } milestoneService.init(tangle, snapshotProvider, snapshotService, bundleValidator, configuration); latestMilestoneTracker.init(tangle, snapshotProvider, milestoneService, milestoneSolidifier, configuration); latestSolidMilestoneTracker.init(tangle, snapshotProvider, milestoneService, ledgerService, latestMilestoneTracker); seenMilestonesRetriever.init(tangle, snapshotProvider, transactionRequester); milestoneSolidifier.init(snapshotProvider, transactionValidator); ledgerService.init(tangle, snapshotProvider, snapshotService, milestoneService, spentAddressesService, bundleValidator); if (transactionPruner != null) { transactionPruner.init(tangle, snapshotProvider, spentAddressesService, spentAddressesProvider, tipsViewModel, configuration); } neighborRouter.init(configuration, configuration, transactionRequester, txPipeline); txPipeline.init(neighborRouter, configuration, transactionValidator, tangle, snapshotProvider, tipsViewModel, latestMilestoneTracker); tipRequester.init(neighborRouter, tangle, latestMilestoneTracker, transactionRequester); } >>>>>>>
<<<<<<< import com.iota.iri.network.pipeline.TransactionProcessingPipelineImpl; import com.iota.iri.service.ledger.impl.LedgerServiceImpl; import com.iota.iri.service.milestone.impl.LatestMilestoneTrackerImpl; import com.iota.iri.service.milestone.impl.LatestSolidMilestoneTrackerImpl; import com.iota.iri.service.milestone.impl.MilestoneServiceImpl; import com.iota.iri.service.milestone.impl.MilestoneSolidifierImpl; import com.iota.iri.service.milestone.impl.SeenMilestonesRetrieverImpl; ======= import com.iota.iri.service.TipsSolidifier; import com.iota.iri.service.ledger.LedgerService; import com.iota.iri.service.milestone.LatestMilestoneTracker; import com.iota.iri.service.milestone.LatestSolidMilestoneTracker; import com.iota.iri.service.milestone.MilestoneService; import com.iota.iri.service.milestone.MilestoneSolidifier; import com.iota.iri.service.milestone.SeenMilestonesRetriever; import com.iota.iri.service.snapshot.LocalSnapshotManager; >>>>>>> import com.iota.iri.service.ledger.LedgerService; import com.iota.iri.service.milestone.*; import com.iota.iri.service.snapshot.LocalSnapshotManager; <<<<<<< public final Tangle tangle; public final TransactionValidator transactionValidator; public final TransactionRequester transactionRequester; public final TipsRequesterImpl tipRequester; public final TransactionProcessingPipeline txPipeline; public final NeighborRouter neighborRouter; public final IotaConfig configuration; public final TipsViewModel tipsViewModel; public final TipSelector tipsSelector; ======= private final Tangle tangle; private final TransactionValidator transactionValidator; private final TipsSolidifier tipsSolidifier; private final TransactionRequester transactionRequester; private final TipsRequester tipsRequester; private final TransactionProcessingPipeline txPipeline; public final NeighborRouter neighborRouter; // used in test private final IotaConfig configuration; private final TipsViewModel tipsViewModel; private final TipSelector tipsSelector; >>>>>>> private final Tangle tangle; private final TransactionValidator transactionValidator; private final TransactionRequester transactionRequester; private final TipsRequester tipsRequester; private final TransactionProcessingPipeline txPipeline; public final NeighborRouter neighborRouter; // used in test private final IotaConfig configuration; private final TipsViewModel tipsViewModel; private final TipSelector tipsSelector; <<<<<<< // new refactored instances spentAddressesProvider = new SpentAddressesProviderImpl(); spentAddressesService = new SpentAddressesServiceImpl(); snapshotProvider = new SnapshotProviderImpl(); snapshotService = new SnapshotServiceImpl(); localSnapshotManager = configuration.getLocalSnapshotsEnabled() ? new LocalSnapshotManagerImpl() : null; milestoneService = new MilestoneServiceImpl(); latestMilestoneTracker = new LatestMilestoneTrackerImpl(); latestSolidMilestoneTracker = new LatestSolidMilestoneTrackerImpl(); seenMilestonesRetriever = new SeenMilestonesRetrieverImpl(); milestoneSolidifier = new MilestoneSolidifierImpl(); transactionPruner = configuration.getLocalSnapshotsEnabled() && configuration.getLocalSnapshotsPruningEnabled() ? new AsyncTransactionPruner() : null; neighborRouter = new NeighborRouter(); txPipeline = new TransactionProcessingPipelineImpl(); tipRequester = new TipsRequesterImpl(); // legacy code bundleValidator = new BundleValidator(); tangle = new Tangle(); tipsViewModel = new TipsViewModel(); transactionRequester = new TransactionRequester(tangle, snapshotProvider); transactionValidator = new TransactionValidator(tangle, snapshotProvider, tipsViewModel, transactionRequester); tipsSelector = createTipSelector(configuration); injectDependencies(); ======= this.ledgerService = ledgerService; this.spentAddressesProvider = spentAddressesProvider; this.spentAddressesService = spentAddressesService; this.snapshotProvider = snapshotProvider; this.snapshotService = snapshotService; this.localSnapshotManager = localSnapshotManager; this.milestoneService = milestoneService; this.latestMilestoneTracker = latestMilestoneTracker; this.latestSolidMilestoneTracker = latestSolidMilestoneTracker; this.seenMilestonesRetriever = seenMilestonesRetriever; this.milestoneSolidifier = milestoneSolidifier; this.transactionPruner = transactionPruner; this.neighborRouter = neighborRouter; this.txPipeline = transactionProcessingPipeline; this.tipsRequester = tipsRequester; // legacy classes this.bundleValidator = bundleValidator; this.tangle = tangle; this.tipsViewModel = tipsViewModel; this.transactionRequester = transactionRequester; this.transactionValidator = transactionValidator; this.tipsSolidifier = tipsSolidifier; this.tipsSelector = tipsSelector; } private void initDependencies() throws SnapshotException, SpentAddressesException { //snapshot provider must be initialized first //because we check whether spent addresses data exists snapshotProvider.init(); spentAddressesProvider.init(); latestMilestoneTracker.init(); seenMilestonesRetriever.init(); if (transactionPruner != null) { transactionPruner.init(); } >>>>>>> this.ledgerService = ledgerService; this.spentAddressesProvider = spentAddressesProvider; this.spentAddressesService = spentAddressesService; this.snapshotProvider = snapshotProvider; this.snapshotService = snapshotService; this.localSnapshotManager = localSnapshotManager; this.milestoneService = milestoneService; this.latestMilestoneTracker = latestMilestoneTracker; this.latestSolidMilestoneTracker = latestSolidMilestoneTracker; this.seenMilestonesRetriever = seenMilestonesRetriever; this.milestoneSolidifier = milestoneSolidifier; this.transactionPruner = transactionPruner; this.neighborRouter = neighborRouter; this.txPipeline = transactionProcessingPipeline; this.tipsRequester = tipsRequester; // legacy classes this.bundleValidator = bundleValidator; this.tangle = tangle; this.tipsViewModel = tipsViewModel; this.transactionRequester = transactionRequester; this.transactionValidator = transactionValidator; this.tipsSelector = tipsSelector; } private void initDependencies() throws SnapshotException, SpentAddressesException { //snapshot provider must be initialized first //because we check whether spent addresses data exists snapshotProvider.init(); boolean assertSpentAddressesExistence = !configuration.isTestnet() && snapshotProvider.getInitialSnapshot().getIndex() != configuration.getMilestoneStartIndex(); spentAddressesProvider.init(assertSpentAddressesExistence); latestMilestoneTracker.init(); seenMilestonesRetriever.init(); if (transactionPruner != null) { transactionPruner.init(); } <<<<<<< transactionValidator.init(configuration.isTestnet(), configuration.getMwm()); ======= transactionValidator.init(); tipsSolidifier.init(); >>>>>>> transactionValidator.init(); <<<<<<< private void injectDependencies() throws SnapshotException, TransactionPruningException, SpentAddressesException { // snapshot provider must be initialized first // because we check whether spent addresses data exists snapshotProvider.init(configuration); initSpentAddressesProvider(); spentAddressesService.init(tangle, snapshotProvider, spentAddressesProvider, bundleValidator, configuration); snapshotService.init(tangle, snapshotProvider, spentAddressesService, spentAddressesProvider, configuration); if (localSnapshotManager != null) { localSnapshotManager.init(snapshotProvider, snapshotService, transactionPruner, configuration); } milestoneService.init(tangle, snapshotProvider, snapshotService, bundleValidator, configuration); latestMilestoneTracker.init(tangle, snapshotProvider, milestoneService, milestoneSolidifier, configuration); latestSolidMilestoneTracker.init(tangle, snapshotProvider, milestoneService, ledgerService, latestMilestoneTracker, transactionRequester); seenMilestonesRetriever.init(tangle, snapshotProvider, transactionRequester); milestoneSolidifier.init(snapshotProvider, transactionValidator); ledgerService.init(tangle, snapshotProvider, snapshotService, milestoneService, spentAddressesService, bundleValidator); if (transactionPruner != null) { transactionPruner.init(tangle, snapshotProvider, spentAddressesService, spentAddressesProvider, tipsViewModel, configuration); } neighborRouter.init(configuration, configuration, transactionRequester, txPipeline); txPipeline.init(neighborRouter, configuration, transactionValidator, tangle, snapshotProvider, tipsViewModel, latestMilestoneTracker, transactionRequester); tipRequester.init(neighborRouter, tangle, latestMilestoneTracker, transactionRequester); } private void initSpentAddressesProvider() throws SpentAddressesException { PersistenceProvider spentAddressesDbProvider = createRocksDbProvider( configuration.getSpentAddressesDbPath(), configuration.getSpentAddressesDbLogPath(), 1000, new HashMap<String, Class<? extends Persistable>>(1) {{ put("spent-addresses", SpentAddress.class); }}, null); boolean assertSpentAddressesExistence = !configuration.isTestnet() && snapshotProvider.getInitialSnapshot().getIndex() != configuration.getMilestoneStartIndex(); spentAddressesProvider.init(configuration, spentAddressesDbProvider, assertSpentAddressesExistence); } ======= >>>>>>> <<<<<<< tipRequester.shutdown(); ======= tipsSolidifier.shutdown(); tipsRequester.shutdown(); >>>>>>> tipsRequester.shutdown();
<<<<<<< TransactionProcessingPipeline pipeline = new TransactionProcessingPipelineImpl(); pipeline.init(neighborRouter, nodeConfig, transactionValidator, tangle, snapshotProvider, tipsViewModel, latestMilestoneTracker, transactionRequester); ======= TransactionProcessingPipeline pipeline = new TransactionProcessingPipelineImpl(neighborRouter, nodeConfig, transactionValidator, tangle, snapshotProvider, tipsViewModel, latestMilestoneTracker); >>>>>>> TransactionProcessingPipeline pipeline = new TransactionProcessingPipelineImpl(neighborRouter, nodeConfig, transactionValidator, tangle, snapshotProvider, tipsViewModel, latestMilestoneTracker, transactionRequester); <<<<<<< TransactionProcessingPipeline pipeline = new TransactionProcessingPipelineImpl(); pipeline.init(neighborRouter, nodeConfig, transactionValidator, tangle, snapshotProvider, tipsViewModel, latestMilestoneTracker, transactionRequester); ======= TransactionProcessingPipeline pipeline = new TransactionProcessingPipelineImpl(neighborRouter, nodeConfig, transactionValidator, tangle, snapshotProvider, tipsViewModel, latestMilestoneTracker); >>>>>>> TransactionProcessingPipeline pipeline = new TransactionProcessingPipelineImpl(neighborRouter, nodeConfig, transactionValidator, tangle, snapshotProvider, tipsViewModel, latestMilestoneTracker, transactionRequester); <<<<<<< TransactionProcessingPipeline pipeline = new TransactionProcessingPipelineImpl(); pipeline.init(neighborRouter, nodeConfig, transactionValidator, tangle, snapshotProvider, tipsViewModel, latestMilestoneTracker, transactionRequester); ======= TransactionProcessingPipeline pipeline = new TransactionProcessingPipelineImpl(neighborRouter, nodeConfig, transactionValidator, tangle, snapshotProvider, tipsViewModel, latestMilestoneTracker); >>>>>>> TransactionProcessingPipeline pipeline = new TransactionProcessingPipelineImpl(neighborRouter, nodeConfig, transactionValidator, tangle, snapshotProvider, tipsViewModel, latestMilestoneTracker, transactionRequester); <<<<<<< TransactionProcessingPipeline pipeline = new TransactionProcessingPipelineImpl(); pipeline.init(neighborRouter, nodeConfig, transactionValidator, tangle, snapshotProvider, tipsViewModel, latestMilestoneTracker, transactionRequester); ======= TransactionProcessingPipeline pipeline = new TransactionProcessingPipelineImpl(neighborRouter, nodeConfig, transactionValidator, tangle, snapshotProvider, tipsViewModel, latestMilestoneTracker); >>>>>>> TransactionProcessingPipeline pipeline = new TransactionProcessingPipelineImpl(neighborRouter, nodeConfig, transactionValidator, tangle, snapshotProvider, tipsViewModel, latestMilestoneTracker, transactionRequester);
<<<<<<< ======= import java.util.List; import java.util.Map; import com.iota.iri.storage.Tangle; import com.iota.iri.storage.PersistenceProvider; import com.iota.iri.storage.LocalSnapshotsPersistenceProvider; import com.iota.iri.storage.rocksDB.RocksDBPersistenceProvider; import com.iota.iri.storage.Indexable; import com.iota.iri.storage.Persistable; import org.apache.commons.lang3.NotImplementedException; import org.slf4j.Logger; import org.slf4j.LoggerFactory; >>>>>>>
<<<<<<< iter.seek(range, new ArrayList<>(), false); // BufferOverflowException should be thrown as RowEncodingIterator can't fit the whole row into its buffer. ======= iter.seek(range, new ArrayList<ByteSequence>(), false); // BufferOverflowException should be thrown as RowEncodingIterator can't fit the whole row into // its buffer. >>>>>>> iter.seek(range, new ArrayList<>(), false); // BufferOverflowException should be thrown as RowEncodingIterator can't fit the whole row into // its buffer.
<<<<<<< public static int latestMilestoneIndex = 0; public static int latestSolidSubtangleMilestoneIndex = 0; ======= public static final int MILESTONE_START_INDEX = 6000; public static int latestMilestoneIndex = MILESTONE_START_INDEX; public static int latestSolidSubtangleMilestoneIndex = MILESTONE_START_INDEX; >>>>>>> public static final int MILESTONE_START_INDEX = 0; public static int latestMilestoneIndex = MILESTONE_START_INDEX; public static int latestSolidSubtangleMilestoneIndex = MILESTONE_START_INDEX;
<<<<<<< public static List<List<TransactionViewModel>> validate(Tangle tangle, Snapshot initialSnapshot, Hash tailHash) throws Exception { ======= /** * Fetches a bundle of transactions identified by the {@code tailHash} and validates the transactions. * Bundle is a group of transactions with the same bundle hash chained by their trunks. * <p> * The fetched transactions have the same bundle hash as the transaction identified by {@code tailHash} * The validation does the following semantic checks: * <ol> * <li>The absolute bundle value never exceeds the total, global supply of iotas</li> * <li>The last trit when we convert from binary</li> * <li>Total bundle value is 0 (inputs and outputs are balanced)</li> * <li>Recalculate the bundle hash by absorbing and squeezing the transactions' essence</li> * <li>Validate the signature on input transactions</li> * </ol> * * As well as the following syntactic checks: * <ol> * <li>{@code tailHash} has an index of 0</li> * <li>The transactions' reference order is consistent with the indexes</li> * <li>The last index of each transaction in the bundle matches the last index of the tail transaction</li> * <li>Check that last trit in a valid address hash is 0. We generate addresses using binary Kerl and * we lose the last trit in the process</li> * </ol> * * @implNote if {@code tailHash} was already invalidated/validated by a previous call to this method * then we don't validate it * again. *</p> * @param tangle used to fetch the bundle's transactions from the persistence layer * @param tailHash the hash of the last transaction in a bundle. * @return A list of transactions of the bundle contained in another list. If the bundle is valid then the tail * transaction's {@link TransactionViewModel#getValidity()} will return 1, else * {@link TransactionViewModel#getValidity()} will return -1. * If the bundle is invalid then an empty list will be returned. * @throws Exception if a persistence error occured */ public static List<List<TransactionViewModel>> validate(Tangle tangle, Hash tailHash) throws Exception { >>>>>>> /** * Fetches a bundle of transactions identified by the {@code tailHash} and validates the transactions. * Bundle is a group of transactions with the same bundle hash chained by their trunks. * <p> * The fetched transactions have the same bundle hash as the transaction identified by {@code tailHash} * The validation does the following semantic checks: * <ol> * <li>The absolute bundle value never exceeds the total, global supply of iotas</li> * <li>The last trit when we convert from binary</li> * <li>Total bundle value is 0 (inputs and outputs are balanced)</li> * <li>Recalculate the bundle hash by absorbing and squeezing the transactions' essence</li> * <li>Validate the signature on input transactions</li> * </ol> * * As well as the following syntactic checks: * <ol> * <li>{@code tailHash} has an index of 0</li> * <li>The transactions' reference order is consistent with the indexes</li> * <li>The last index of each transaction in the bundle matches the last index of the tail transaction</li> * <li>Check that last trit in a valid address hash is 0. We generate addresses using binary Kerl and * we lose the last trit in the process</li> * </ol> * * @implNote if {@code tailHash} was already invalidated/validated by a previous call to this method * then we don't validate it * again. *</p> * @param tangle used to fetch the bundle's transactions from the persistence layer * @param tailHash the hash of the last transaction in a bundle. * @return A list of transactions of the bundle contained in another list. If the bundle is valid then the tail * transaction's {@link TransactionViewModel#getValidity()} will return 1, else * {@link TransactionViewModel#getValidity()} will return -1. * If the bundle is invalid then an empty list will be returned. * @throws Exception if a persistence error occured */ public static List<List<TransactionViewModel>> validate(Tangle tangle, Snapshot initialSnapshot, Hash tailHash) throws Exception { <<<<<<< instanceTransactionViewModels.get(0).setValidity(tangle, initialSnapshot, 1); ======= //should only be reached after the above for loop is done instanceTransactionViewModels.get(0).setValidity(tangle, 1); >>>>>>> //should only be reached after the above for loop is done instanceTransactionViewModels.get(0).setValidity(tangle, initialSnapshot, 1); <<<<<<< } else { instanceTransactionViewModels.get(0).setValidity(tangle, initialSnapshot, -1); ======= } //bundle hash verification failed else { instanceTransactionViewModels.get(0).setValidity(tangle, -1); >>>>>>> } //bundle hash verification failed else { instanceTransactionViewModels.get(0).setValidity(tangle, initialSnapshot, -1); <<<<<<< } else { instanceTransactionViewModels.get(0).setValidity(tangle, initialSnapshot, -1); ======= } //total bundle value does not sum to 0 else { instanceTransactionViewModels.get(0).setValidity(tangle, -1); >>>>>>> } //total bundle value does not sum to 0 else { instanceTransactionViewModels.get(0).setValidity(tangle, initialSnapshot, -1);
<<<<<<< import com.iota.iri.service.tipselection.*; import com.iota.iri.service.tipselection.impl.*; ======= import com.iota.iri.service.spentaddresses.SpentAddressesException; import com.iota.iri.service.spentaddresses.impl.SpentAddressesProviderImpl; import com.iota.iri.service.spentaddresses.impl.SpentAddressesServiceImpl; import com.iota.iri.service.tipselection.*; import com.iota.iri.service.tipselection.impl.*; >>>>>>> import com.iota.iri.service.spentaddresses.SpentAddressesException; import com.iota.iri.service.spentaddresses.impl.SpentAddressesProviderImpl; import com.iota.iri.service.spentaddresses.impl.SpentAddressesServiceImpl; import com.iota.iri.service.tipselection.*; import com.iota.iri.service.tipselection.impl.*; <<<<<<< import com.iota.iri.storage.*; ======= import com.iota.iri.storage.Indexable; import com.iota.iri.storage.Persistable; import com.iota.iri.storage.Tangle; import com.iota.iri.storage.ZmqPublishProvider; >>>>>>> import com.iota.iri.storage.*; import com.iota.iri.storage.Indexable; import com.iota.iri.storage.Persistable; import com.iota.iri.storage.Tangle; import com.iota.iri.storage.ZmqPublishProvider;
<<<<<<< * @return Descriptions#REQUEST_HASH_SIZE ======= * @return {@value ProtocolConfig.Descriptions#TRANSACTION_PACKET_SIZE} */ int getTransactionPacketSize(); /** * Default Value: {@value BaseIotaConfig.Defaults#REQUEST_HASH_SIZE} * * @return {@value ProtocolConfig.Descriptions#REQUEST_HASH_SIZE} >>>>>>> * Default Value: {@value BaseIotaConfig.Defaults#REQUEST_HASH_SIZE} * * @return {@value ProtocolConfig.Descriptions#REQUEST_HASH_SIZE} <<<<<<< ======= /** * Default Value: {@value BaseIotaConfig.Defaults#P_REPLY_RANDOM_TIP} * * @return {@value ProtocolConfig.Descriptions#P_REPLY_RANDOM_TIP} */ double getpReplyRandomTip(); /** * Default Value: {@value BaseIotaConfig.Defaults#P_DROP_TRANSACTION} * * @return {@value ProtocolConfig.Descriptions#P_DROP_TRANSACTION} */ double getpDropTransaction(); /** * Default Value: {@value BaseIotaConfig.Defaults#P_SELECT_MILESTONE_CHILD} * * @return {@value ProtocolConfig.Descriptions#P_SELECT_MILESTONE_CHILD} */ double getpSelectMilestoneChild(); /** * Default Value: {@value BaseIotaConfig.Defaults#P_SEND_MILESTONE} * * @return {@value ProtocolConfig.Descriptions#P_SEND_MILESTONE} */ >>>>>>> /** * Default Value: {@value BaseIotaConfig.Defaults#P_SEND_MILESTONE} * * @return {@value ProtocolConfig.Descriptions#P_SEND_MILESTONE} */ <<<<<<< ======= /** * Default Value: {@value BaseIotaConfig.Defaults#P_PROPAGATE_REQUEST} * * @return {@value ProtocolConfig.Descriptions#P_PROPAGATE_REQUEST} */ double getpPropagateRequest(); >>>>>>> <<<<<<< ======= String P_DROP_TRANSACTION = DescriptionHelper.PROB_OF + "dropping a received transaction. This is used only for testing purposes."; String P_SELECT_MILESTONE_CHILD = DescriptionHelper.PROB_OF + "requesting a milestone transaction from a neighbor. This should be a large since it is imperative that we find milestones to get transactions confirmed"; >>>>>>>
<<<<<<< private static final Logger log = LoggerFactory.getLogger(IRI.class); public static final String NAME = "IRI Testnet"; public static final String VERSION = "1.1.2.1"; public static void main(final String[] args) { log.info("Welcome to {} {}", NAME, VERSION); validateParams(args); shutdownHook(); if (!Configuration.booling(DefaultConfSettings.HEADLESS)) { showIotaLogo(); } try { Storage.instance().init(); Node.instance().init(); TipsManager.instance().init(); API.instance().init(); } catch (final Exception e) { log.error("Exception during IOTA node initialisation: ", e); System.exit(-1); } log.info("IOTA Node initialised correctly."); } private static void validateParams(final String[] args) { if (args.length > 0 && args[0].equalsIgnoreCase("-h")) { printUsage(); } if (args.length < 2) { log.error("Invalid arguments list. Provide api port number and at least one udp node address."); printUsage(); } final CmdLineParser parser = new CmdLineParser(); final Option<String> port = parser.addStringOption('p', "port"); final Option<String> rport = parser.addStringOption('r', "receiver-port"); final Option<String> cors = parser.addStringOption('c', "enabled-cors"); final Option<Boolean> headless = parser.addBooleanOption("headless"); final Option<Boolean> debug = parser.addBooleanOption('d',"debug"); final Option<String> neighbors = parser.addStringOption('n', "neighbors"); final Option<Boolean> help = parser.addBooleanOption('h',"help"); try { parser.parse(args); ======= private static final Logger log = LoggerFactory.getLogger(IRI.class); public static final String NAME = "IRI"; public static final String VERSION = "1.1.2.2"; public static void main(final String[] args) { log.info("Welcome to {} {}", NAME, VERSION); validateParams(args); shutdownHook(); if (!Configuration.booling(DefaultConfSettings.HEADLESS)) { showIotaLogo(); } try { Storage.instance().init(); Node.instance().init(); TipsManager.instance().init(); API.instance().init(); } catch (final Exception e) { log.error("Exception during IOTA node initialisation: ", e); System.exit(-1); } log.info("IOTA Node initialised correctly."); } private static void validateParams(final String[] args) { if (args == null || args.length < 2) { log.error("Invalid arguments list. Provide Api port number (i.e. '-p 14265')."); printUsage(); >>>>>>> private static final Logger log = LoggerFactory.getLogger(IRI.class); public static final String NAME = "IRI Testnet"; public static final String VERSION = "1.1.2.2"; public static void main(final String[] args) { log.info("Welcome to {} {}", NAME, VERSION); validateParams(args); shutdownHook(); if (!Configuration.booling(DefaultConfSettings.HEADLESS)) { showIotaLogo(); } try { Storage.instance().init(); Node.instance().init(); TipsManager.instance().init(); API.instance().init(); } catch (final Exception e) { log.error("Exception during IOTA node initialisation: ", e); System.exit(-1); } log.info("IOTA Node initialised correctly."); } private static void validateParams(final String[] args) { if (args == null || args.length < 2) { log.error("Invalid arguments list. Provide Api port number (i.e. '-p 14265')."); printUsage();
<<<<<<< ======= LatestMilestoneTracker provideLatestMilestoneTracker(Tangle tangle, SnapshotProvider snapshotProvider, MilestoneService milestoneService, MilestoneSolidifier milestoneSolidifier) { return new LatestMilestoneTrackerImpl(tangle, snapshotProvider, milestoneService, milestoneSolidifier, configuration); } @Singleton @Provides LatestSolidMilestoneTracker provideLatestSolidMilestoneTracker(Tangle tangle, SnapshotProvider snapshotProvider, MilestoneService milestoneService, LedgerService ledgerService, LatestMilestoneTracker latestMilestoneTracker, TransactionRequester transactionRequester) { return new LatestSolidMilestoneTrackerImpl(tangle, snapshotProvider, milestoneService, ledgerService, latestMilestoneTracker, transactionRequester, configuration); } @Singleton @Provides >>>>>>> <<<<<<< SnapshotProvider snapshotProvider, SnapshotService snapshotService, @Nullable LocalSnapshotManager localSnapshotManager, MilestoneService milestoneService, SeenMilestonesRetriever seenMilestonesRetriever, LedgerService ledgerService, @Nullable TransactionPruner transactionPruner, MilestoneSolidifier milestoneSolidifier, BundleValidator bundleValidator, Tangle tangle, TransactionValidator transactionValidator, TransactionRequester transactionRequester, NeighborRouter neighborRouter, TransactionProcessingPipeline transactionProcessingPipeline, TipsRequester tipsRequester, TipsViewModel tipsViewModel, TipSelector tipsSelector, LocalSnapshotsPersistenceProvider localSnapshotsDb, CacheManager cacheManager, TransactionSolidifier transactionSolidifier) { ======= SnapshotProvider snapshotProvider, SnapshotService snapshotService, @Nullable LocalSnapshotManager localSnapshotManager, MilestoneService milestoneService, LatestMilestoneTracker latestMilestoneTracker, LatestSolidMilestoneTracker latestSolidMilestoneTracker, SeenMilestonesRetriever seenMilestonesRetriever, LedgerService ledgerService, @Nullable TransactionPruner transactionPruner, MilestoneSolidifier milestoneSolidifier, BundleValidator bundleValidator, Tangle tangle, TransactionValidator transactionValidator, TransactionRequester transactionRequester, NeighborRouter neighborRouter, TransactionProcessingPipeline transactionProcessingPipeline, TipsRequester tipsRequester, TipsViewModel tipsViewModel, TipSelector tipsSelector, LocalSnapshotsPersistenceProvider localSnapshotsDb, TransactionSolidifier transactionSolidifier) { >>>>>>> SnapshotProvider snapshotProvider, SnapshotService snapshotService, @Nullable LocalSnapshotManager localSnapshotManager, MilestoneService milestoneService, SeenMilestonesRetriever seenMilestonesRetriever, LedgerService ledgerService, @Nullable TransactionPruner transactionPruner, MilestoneSolidifier milestoneSolidifier, BundleValidator bundleValidator, Tangle tangle, TransactionValidator transactionValidator, TransactionRequester transactionRequester, NeighborRouter neighborRouter, TransactionProcessingPipeline transactionProcessingPipeline, TipsRequester tipsRequester, TipsViewModel tipsViewModel, TipSelector tipsSelector, LocalSnapshotsPersistenceProvider localSnapshotsDb, TransactionSolidifier transactionSolidifier) { <<<<<<< localSnapshotManager, milestoneService, seenMilestonesRetriever, ledgerService, transactionPruner, milestoneSolidifier, bundleValidator, tangle, transactionValidator, transactionRequester, neighborRouter, transactionProcessingPipeline, tipsRequester, tipsViewModel, tipsSelector, localSnapshotsDb, cacheManager, transactionSolidifier); ======= localSnapshotManager, milestoneService, latestMilestoneTracker, latestSolidMilestoneTracker, seenMilestonesRetriever, ledgerService, transactionPruner, milestoneSolidifier, bundleValidator, tangle, transactionValidator, transactionRequester, neighborRouter, transactionProcessingPipeline, tipsRequester, tipsViewModel, tipsSelector, localSnapshotsDb, transactionSolidifier); >>>>>>> localSnapshotManager, milestoneService, seenMilestonesRetriever, ledgerService, transactionPruner, milestoneSolidifier, bundleValidator, tangle, transactionValidator, transactionRequester, neighborRouter, transactionProcessingPipeline, tipsRequester, tipsViewModel, tipsSelector, localSnapshotsDb, transactionSolidifier); <<<<<<< SpentAddressesService spentAddressesService, Tangle tangle, BundleValidator bundleValidator, SnapshotProvider snapshotProvider, LedgerService ledgerService, NeighborRouter neighborRouter, TipSelector tipsSelector, TipsViewModel tipsViewModel, TransactionValidator transactionValidator, MilestoneSolidifier milestoneSolidifier, TransactionProcessingPipeline txPipeline, TransactionSolidifier transactionSolidifier) { return new API(configuration, ixi, transactionRequester, spentAddressesService, tangle, bundleValidator, snapshotProvider, ledgerService, neighborRouter, tipsSelector, tipsViewModel, transactionValidator, milestoneSolidifier, txPipeline, transactionSolidifier); ======= SpentAddressesService spentAddressesService, Tangle tangle, BundleValidator bundleValidator, SnapshotProvider snapshotProvider, LedgerService ledgerService, NeighborRouter neighborRouter, TipSelector tipsSelector, TipsViewModel tipsViewModel, TransactionValidator transactionValidator, LatestMilestoneTracker latestMilestoneTracker, TransactionProcessingPipeline txPipeline, TransactionSolidifier transactionSolidifier) { return new API(configuration, ixi, transactionRequester, spentAddressesService, tangle, bundleValidator, snapshotProvider, ledgerService, neighborRouter, tipsSelector, tipsViewModel, transactionValidator, latestMilestoneTracker, txPipeline, transactionSolidifier); >>>>>>> SpentAddressesService spentAddressesService, Tangle tangle, BundleValidator bundleValidator, SnapshotProvider snapshotProvider, LedgerService ledgerService, NeighborRouter neighborRouter, TipSelector tipsSelector, TipsViewModel tipsViewModel, TransactionValidator transactionValidator, MilestoneSolidifier milestoneSolidifier, TransactionProcessingPipeline txPipeline, TransactionSolidifier transactionSolidifier) { return new API(configuration, ixi, transactionRequester, spentAddressesService, tangle, bundleValidator, snapshotProvider, ledgerService, neighborRouter, tipsSelector, tipsViewModel, transactionValidator, milestoneSolidifier, txPipeline, transactionSolidifier);
<<<<<<< ======= public double getpReplyRandomTip() { return pReplyRandomTip; } @JsonProperty @Parameter(names = {"--p-reply-random"}, description = ProtocolConfig.Descriptions.P_REPLY_RANDOM_TIP) protected void setpReplyRandomTip(double pReplyRandomTip) { this.pReplyRandomTip = pReplyRandomTip; } @Override public double getpDropTransaction() { return pDropTransaction; } @JsonProperty @Parameter(names = {"--p-drop-transaction"}, description = ProtocolConfig.Descriptions.P_DROP_TRANSACTION) protected void setpDropTransaction(double pDropTransaction) { this.pDropTransaction = pDropTransaction; } @Override public double getpSelectMilestoneChild() { return pSelectMilestoneChild; } @JsonProperty @Parameter(names = {"--p-select-milestone"}, description = ProtocolConfig.Descriptions.P_SELECT_MILESTONE_CHILD) protected void setpSelectMilestoneChild(double pSelectMilestoneChild) { this.pSelectMilestoneChild = pSelectMilestoneChild; } @Override >>>>>>> <<<<<<< int REQ_HASH_SIZE = 46; ======= int PACKET_SIZE = 1650; int REQUEST_HASH_SIZE = 46; >>>>>>> int REQUEST_HASH_SIZE = 46;
<<<<<<< ConsensusConfig config) { ======= BundleValidator bundleValidator, MessageQ messageQ, ConsensusConfig config) { >>>>>>> BundleValidator bundleValidator, ConsensusConfig config) { <<<<<<< ======= this.bundleValidator = bundleValidator; this.messageQ = messageQ; >>>>>>> this.bundleValidator = bundleValidator;
<<<<<<< import com.iota.iri.conf.MainnetConfig; import com.iota.iri.hash.SpongeFactory; ======= import com.iota.iri.crypto.SpongeFactory; >>>>>>> import com.iota.iri.conf.MainnetConfig; import com.iota.iri.crypto.SpongeFactory;
<<<<<<< private static final String NAME_POLYLINE = "route"; private static final String NAME_POLYGON = "route"; private static final String NAME_MARKER = "reverse_geocode"; private static final String PROP_COLOR = "color"; private static final String COLOR_DEFAULT = "#D2655F"; ======= >>>>>>> private static final String NAME_POLYLINE = "route"; private static final String NAME_POLYGON = "route"; private static final String NAME_MARKER = "reverse_geocode"; private static final String PROP_COLOR = "color"; private static final String COLOR_DEFAULT = "#D2655F"; <<<<<<< /** * Adds a polyline to the map. * @param polyline */ public MapData addPolyline(Polyline polyline) { if (polylineMapData == null) { polylineMapData = mapController.addDataLayer(NAME_POLYLINE); } List<LngLat> line = new ArrayList<>(); for (LatLng coordinate : polyline.getCoordinates()) { line.add(new LngLat(coordinate.getLongitude(), coordinate.getLatitude())); } return polylineMapData.addPolyline(line, null); } /** * Adds a polygon to the map. * @param polygon */ public MapData addPolygon(Polygon polygon) { if (polygonMapData == null) { polygonMapData = mapController.addDataLayer(NAME_POLYGON); } List<LngLat> coords = new ArrayList<>(); for (LatLng coordinate : polygon.getCoordinates()) { coords.add(new LngLat(coordinate.getLongitude(), coordinate.getLatitude())); } LatLng first = polygon.getCoordinates().get(0); coords.add(new LngLat(first.getLongitude(), first.getLatitude())); List allCoords = new ArrayList(); allCoords.add(coords); return polygonMapData.addPolygon(allCoords, null); } /** * Add a point to the map for the marker. * @param marker * @return */ public MapData addMarker(Marker marker) { if (markerMapData == null) { markerMapData = mapController.addDataLayer(NAME_MARKER); } LngLat lngLat = new LngLat(marker.getLocation().getLongitude(), marker.getLocation().getLatitude()); return markerMapData.addPoint(lngLat, null); } /** * You must call this method from your activity or fragment. */ public void onDestroy() { if (currentLocationMapData != null) { currentLocationMapData.clear(); } ======= private void addCurrentLocationMapDataToMap() { currentLocationMapData = mapController.addDataLayer(NAME_CURRENT_LOCATION); >>>>>>> /** * Adds a polyline to the map. * @param polyline */ public MapData addPolyline(Polyline polyline) { if (polylineMapData == null) { polylineMapData = mapController.addDataLayer(NAME_POLYLINE); } List<LngLat> line = new ArrayList<>(); for (LatLng coordinate : polyline.getCoordinates()) { line.add(new LngLat(coordinate.getLongitude(), coordinate.getLatitude())); } return polylineMapData.addPolyline(line, null); } /** * Adds a polygon to the map. * @param polygon */ public MapData addPolygon(Polygon polygon) { if (polygonMapData == null) { polygonMapData = mapController.addDataLayer(NAME_POLYGON); } List<LngLat> coords = new ArrayList<>(); for (LatLng coordinate : polygon.getCoordinates()) { coords.add(new LngLat(coordinate.getLongitude(), coordinate.getLatitude())); } LatLng first = polygon.getCoordinates().get(0); coords.add(new LngLat(first.getLongitude(), first.getLatitude())); List allCoords = new ArrayList(); allCoords.add(coords); return polygonMapData.addPolygon(allCoords, null); } /** * Add a point to the map for the marker. * @param marker * @return */ public MapData addMarker(Marker marker) { if (markerMapData == null) { markerMapData = mapController.addDataLayer(NAME_MARKER); } LngLat lngLat = new LngLat(marker.getLocation().getLongitude(), marker.getLocation().getLatitude()); return markerMapData.addPoint(lngLat, null); } /** * You must call this method from your activity or fragment. */ public void onDestroy() { if (currentLocationMapData != null) { currentLocationMapData.clear(); } } private void addCurrentLocationMapDataToMap() { currentLocationMapData = mapController.addDataLayer(NAME_CURRENT_LOCATION); <<<<<<< LngLat lngLat = new LngLat(location.getLongitude(), location.getLatitude()); mapController.setPosition(lngLat, ANIMATION_DURATION_MILLIS); ======= final LngLat lngLat = new LngLat(location.getLongitude(), location.getLatitude()); mapController.setPosition(lngLat, ANIMATION_DURATION_MILLIS); >>>>>>> final LngLat lngLat = new LngLat(location.getLongitude(), location.getLatitude()); mapController.setPosition(lngLat, ANIMATION_DURATION_MILLIS);
<<<<<<< QueryWriteStatusResponse response = bsBlockingStub.get() .queryWriteStatus(QueryWriteStatusRequest.newBuilder() .setResourceName(resourceName) .build()); if (response.getComplete()) { writeFuture.set(null); } return response; ======= >>>>>>> <<<<<<< checkNotNull(deadlineAfterUnits); writeObserver = bsStub.get() .withDeadlineAfter(deadlineAfter, deadlineAfterUnits) .write( new StreamObserver<WriteResponse>() { @Override public void onNext(WriteResponse response) { writtenBytes += response.getCommittedSize() - getCommittedSize(); writeFuture.set(null); } @Override public void onError(Throwable t) { writeFuture.setException(t); } @Override public void onCompleted() { writeObserver = null; } }); ======= writeObserver = bsStub.get() .write( new StreamObserver<WriteResponse>() { @Override public void onNext(WriteResponse response) { writeFuture.set(response.getCommittedSize()); } @Override public void onError(Throwable t) { writeFuture.setException(t); } @Override public void onCompleted() { writeObserver = null; } }); >>>>>>> checkNotNull(deadlineAfterUnits); writeObserver = bsStub.get() .withDeadlineAfter(deadlineAfter, deadlineAfterUnits) .write( new StreamObserver<WriteResponse>() { @Override public void onNext(WriteResponse response) { writeFuture.set(response.getCommittedSize()); } @Override public void onError(Throwable t) { writeFuture.setException(t); } @Override public void onCompleted() { writeObserver = null; } }); <<<<<<< if (isComplete() || checkComplete()) { throw new WriteCompleteException(); ======= if (isComplete()) { throw new WriteCompleteException(); >>>>>>> if (isComplete()) { throw new WriteCompleteException(); <<<<<<< if (isComplete() || checkComplete()) { throw new WriteCompleteException(); ======= if (isComplete()) { throw new WriteCompleteException(); >>>>>>> if (isComplete()) { throw new WriteCompleteException();
<<<<<<< import static build.buildfarm.instance.Utils.getBlob; import static build.buildfarm.worker.CASFileCache.getInterruptiblyOrIOException; import static build.buildfarm.worker.Utils.removeDirectory; ======= import static build.buildfarm.common.IOUtils.formatIOError; >>>>>>> import static build.buildfarm.common.IOUtils.formatIOError; import static build.buildfarm.instance.Utils.getBlob; import static build.buildfarm.worker.CASFileCache.getInterruptiblyOrIOException; import static build.buildfarm.worker.Utils.removeDirectory; <<<<<<< import static com.google.common.util.concurrent.Futures.allAsList; import static com.google.common.util.concurrent.MoreExecutors.newDirectExecutorService; import static java.lang.String.format; import static java.util.concurrent.TimeUnit.DAYS; import static java.util.logging.Level.SEVERE; ======= import static com.google.common.util.concurrent.MoreExecutors.listeningDecorator; import static com.google.common.util.concurrent.MoreExecutors.shutdownAndAwaitTermination; import static java.util.concurrent.Executors.newSingleThreadScheduledExecutor; import static java.util.concurrent.TimeUnit.MINUTES; >>>>>>> import static com.google.common.util.concurrent.Futures.allAsList; import static com.google.common.util.concurrent.MoreExecutors.newDirectExecutorService; import static com.google.common.util.concurrent.MoreExecutors.listeningDecorator; import static com.google.common.util.concurrent.MoreExecutors.shutdownAndAwaitTermination; import static java.lang.String.format; import static java.util.concurrent.Executors.newSingleThreadScheduledExecutor; import static java.util.concurrent.TimeUnit.DAYS; import static java.util.concurrent.TimeUnit.MINUTES; import static java.util.concurrent.TimeUnit.SECONDS; import static java.util.logging.Level.SEVERE; <<<<<<< import build.buildfarm.common.InputStreamFactory; import build.buildfarm.common.Poller; import build.buildfarm.common.grpc.Retrier; import build.buildfarm.common.grpc.Retrier.Backoff; ======= import build.buildfarm.common.function.InterruptingConsumer; import build.buildfarm.common.grpc.Retrier; import build.buildfarm.common.grpc.Retrier.Backoff; >>>>>>> import build.buildfarm.common.InputStreamFactory; import build.buildfarm.common.Poller; import build.buildfarm.common.grpc.Retrier; import build.buildfarm.common.grpc.Retrier.Backoff; <<<<<<< import build.buildfarm.instance.stub.Chunker; ======= >>>>>>> import build.buildfarm.instance.stub.Chunker; <<<<<<< import java.util.concurrent.Executors; import java.util.concurrent.TimeUnit; ======= >>>>>>> <<<<<<< DigestUtil digestUtil) { return newStubInstance( ======= DigestUtil digestUtil, Retrier retrier) { return createInstance( >>>>>>> DigestUtil digestUtil) { return newStubInstance( <<<<<<< digestUtil); ======= null, retrier); >>>>>>> digestUtil); <<<<<<< ManagedChannel channel, DigestUtil digestUtil) { return new StubInstance( name, "", digestUtil, channel, 60 /* FIXME CONFIG */, TimeUnit.SECONDS); ======= DigestUtil digestUtil, Channel channel, ByteStreamUploader uploader, Retrier retrier) { return new StubInstance(name, digestUtil, channel, uploader, retrier, retryScheduler); >>>>>>> ManagedChannel channel, DigestUtil digestUtil) { return new StubInstance( name, /* identifier=*/ "", digestUtil, channel, 60 /* FIXME CONFIG */, SECONDS, retrier, retryScheduler); <<<<<<< ManagedChannel casChannel = createChannel(casEndpoint.getTarget()); uploader = createStubUploader(casChannel); casInstance = newStubInstance(casEndpoint.getInstanceName(), casChannel, digestUtil); acInstance = newStubInstance(config.getActionCache(), digestUtil); operationQueueInstance = newStubInstance(config.getOperationQueue(), digestUtil); ======= Channel casChannel = createChannel(casEndpoint.getTarget()); Retrier retrier = createStubRetrier(); uploader = createStubUploader(casChannel, retrier); casInstance = createInstance(casEndpoint.getInstanceName(), digestUtil, casChannel, uploader, retrier); acInstance = createInstance(config.getActionCache(), digestUtil, retrier); operationQueueInstance = createInstance(config.getOperationQueue(), digestUtil, retrier); >>>>>>> ManagedChannel casChannel = createChannel(casEndpoint.getTarget()); uploader = createStubUploader(casChannel, retrier); casInstance = newStubInstance(casEndpoint.getInstanceName(), casChannel, digestUtil); acInstance = newStubInstance(config.getActionCache(), digestUtil); operationQueueInstance = newStubInstance(config.getOperationQueue(), digestUtil); <<<<<<< public InputStream newInput(Digest digest, long offset) throws IOException, InterruptedException { return casInstance.newStreamInput(casInstance.getBlobName(digest), offset); ======= public InputStream newInput(Digest digest, long offset) throws IOException { return casInstance.newBlobInput(digest, offset); >>>>>>> public InputStream newInput(Digest digest, long offset) throws IOException { return casInstance.newBlobInput(digest, offset); <<<<<<< public OutputStream getStreamOutput(String name) { return operationQueueInstance.getStreamOutput(name, -1); ======= public OutputStream getOperationStreamOutput(String name) throws IOException { return operationQueueInstance.getOperationStreamWrite(name).getOutput(); >>>>>>> public OutputStream getOperationStreamOutput(String name) throws IOException { return operationQueueInstance.getOperationStreamWrite(name).getOutput();
<<<<<<< import build.buildfarm.common.Watcher; ======= import build.buildfarm.common.Write; >>>>>>> import build.buildfarm.common.Watcher; import build.buildfarm.common.Write;
<<<<<<< import com.google.common.io.ByteStreams; ======= import com.google.common.util.concurrent.SettableFuture; >>>>>>> import com.google.common.collect.Iterables; import com.google.common.io.ByteStreams; import com.google.common.util.concurrent.SettableFuture; <<<<<<< import io.grpc.Status; import io.grpc.Status.Code; ======= import io.grpc.Status; >>>>>>> import io.grpc.Status; import io.grpc.Status.Code; <<<<<<< import java.util.concurrent.ExecutionException; ======= import java.util.concurrent.TimeUnit; >>>>>>> import java.util.concurrent.ExecutionException; import java.util.concurrent.TimeUnit; <<<<<<< test1Name, test1DigestUtil, /* channel=*/ null); ======= test1Name, test1DigestUtil, /* channel=*/ null, /* uploader=*/ null, NO_RETRIES, /* retryScheduler=*/ null); >>>>>>> test1Name, test1DigestUtil, /* channel=*/ null); <<<<<<< test2Name, test2DigestUtil, /* channel=*/ null); ======= test2Name, test2DigestUtil, /* channel=*/ null, /* uploader=*/ null, NO_RETRIES, /* retryScheduler=*/ null); >>>>>>> test2Name, test2DigestUtil, /* channel=*/ null); <<<<<<< public void getActionResultReturnsNullForNotFound() throws InterruptedException { AtomicReference<GetActionResultRequest> reference = new AtomicReference<>(); serviceRegistry.addService( new ActionCacheImplBase() { @Override public void getActionResult(GetActionResultRequest request, StreamObserver<ActionResult> responseObserver) { reference.set(request); responseObserver.onError(Status.NOT_FOUND.asException()); } }); Instance instance = newStubInstance("test"); ActionKey actionKey = DIGEST_UTIL.computeActionKey(Action.getDefaultInstance()); assertThat(instance.getActionResult(actionKey)).isNull(); GetActionResultRequest request = reference.get(); assertThat(request.getInstanceName()).isEqualTo(instance.getName()); assertThat(request.getActionDigest()).isEqualTo(actionKey.getDigest()); instance.stop(); ======= public void getActionResultReturnsNull() { Instance instance = new StubInstance( "test", DIGEST_UTIL, /* channel=*/ null, /* uploader=*/ null, NO_RETRIES, /* retryScheduler=*/ null); assertThat(instance.getActionResult(DIGEST_UTIL.computeActionKey(Action.getDefaultInstance()))).isNull(); >>>>>>> public void getActionResultReturnsNullForNotFound() throws InterruptedException { AtomicReference<GetActionResultRequest> reference = new AtomicReference<>(); serviceRegistry.addService( new ActionCacheImplBase() { @Override public void getActionResult(GetActionResultRequest request, StreamObserver<ActionResult> responseObserver) { reference.set(request); responseObserver.onError(Status.NOT_FOUND.asException()); } }); Instance instance = newStubInstance("test"); ActionKey actionKey = DIGEST_UTIL.computeActionKey(Action.getDefaultInstance()); assertThat(instance.getActionResult(actionKey)).isNull(); GetActionResultRequest request = reference.get(); assertThat(request.getInstanceName()).isEqualTo(instance.getName()); assertThat(request.getActionDigest()).isEqualTo(actionKey.getDigest()); instance.stop(); <<<<<<< Instance instance = newStubInstance(instanceName); ======= Instance instance = new StubInstance( instanceName, DIGEST_UTIL, InProcessChannelBuilder.forName(fakeServerName).directExecutor().build(), /* uploader=*/ null, NO_RETRIES, /* retryScheduler=*/ null); >>>>>>> Instance instance = newStubInstance(instanceName); <<<<<<< Instance instance = newStubInstance("findMissingBlobs-test"); ======= String instanceName = "findMissingBlobs-test"; Instance instance = new StubInstance( instanceName, DIGEST_UTIL, InProcessChannelBuilder.forName(fakeServerName).directExecutor().build(), /* uploader=*/ null, NO_RETRIES, /* retryScheduler=*/ null); >>>>>>> Instance instance = newStubInstance("findMissingBlobs-test"); <<<<<<< Instance instance = newStubInstance("early-completed-outputStream-test"); String resourceName = "early-completed-output-stream-test"; ======= String instanceName = "early-completed-outputStream-test"; Instance instance = new StubInstance( instanceName, DIGEST_UTIL, InProcessChannelBuilder.forName(fakeServerName).directExecutor().build(), /* uploader=*/ null, NO_RETRIES, /* retryScheduler=*/ null); >>>>>>> Instance instance = newStubInstance("early-completed-outputStream-test");
<<<<<<< import build.buildfarm.common.Watchdog; import build.buildfarm.common.Watcher; ======= import build.buildfarm.common.Write; import build.buildfarm.common.function.InterruptingPredicate; >>>>>>> import build.buildfarm.common.Watchdog; import build.buildfarm.common.Watcher; import build.buildfarm.common.Write;
<<<<<<< import static build.buildfarm.instance.Utils.putBlob; import build.buildfarm.common.Watchdog; import build.buildfarm.common.ContentAddressableStorage; ======= import static com.google.common.collect.Multimaps.synchronizedSetMultimap; import static com.google.common.util.concurrent.Futures.addCallback; import static com.google.common.util.concurrent.MoreExecutors.listeningDecorator; import static java.util.Collections.synchronizedSortedMap; import static java.util.concurrent.Executors.newCachedThreadPool; import build.buildfarm.ac.ActionCache; import build.buildfarm.ac.GrpcActionCache; import build.buildfarm.cas.ContentAddressableStorage; import build.buildfarm.cas.ContentAddressableStorages; >>>>>>> import static build.buildfarm.instance.Utils.putBlob; import static com.google.common.collect.Multimaps.synchronizedSetMultimap; import static com.google.common.util.concurrent.Futures.addCallback; import static com.google.common.util.concurrent.MoreExecutors.listeningDecorator; import static com.google.common.util.concurrent.MoreExecutors.directExecutor; import static java.util.Collections.synchronizedSortedMap; import static java.util.concurrent.Executors.newCachedThreadPool; import build.buildfarm.ac.ActionCache; import build.buildfarm.ac.GrpcActionCache; import build.buildfarm.cas.ContentAddressableStorage; import build.buildfarm.cas.ContentAddressableStorages; <<<<<<< import build.buildfarm.instance.TreeIterator; import build.buildfarm.instance.TreeIterator.DirectoryEntry; ======= import build.buildfarm.v1test.ActionCacheConfig; import build.buildfarm.v1test.GrpcACConfig; >>>>>>> import build.buildfarm.instance.TreeIterator; import build.buildfarm.instance.TreeIterator.DirectoryEntry; import build.buildfarm.v1test.ActionCacheConfig; import build.buildfarm.v1test.GrpcACConfig; <<<<<<< import com.google.common.util.concurrent.MoreExecutors; import com.google.devtools.remoteexecution.v1test.Action; import com.google.devtools.remoteexecution.v1test.ActionResult; import com.google.devtools.remoteexecution.v1test.Digest; import com.google.devtools.remoteexecution.v1test.Directory; import com.google.devtools.remoteexecution.v1test.ExecuteOperationMetadata; import com.google.devtools.remoteexecution.v1test.Platform; ======= import com.google.common.util.concurrent.FutureCallback; import com.google.common.util.concurrent.ListenableFuture; import com.google.common.util.concurrent.ListeningExecutorService; import build.bazel.remote.execution.v2.Action; import build.bazel.remote.execution.v2.ActionResult; import build.bazel.remote.execution.v2.Command; import build.bazel.remote.execution.v2.Digest; import build.bazel.remote.execution.v2.Directory; import build.bazel.remote.execution.v2.ExecuteOperationMetadata; import build.bazel.remote.execution.v2.Platform; >>>>>>> import com.google.common.util.concurrent.MoreExecutors; import com.google.common.util.concurrent.FutureCallback; import com.google.common.util.concurrent.ListenableFuture; import com.google.common.util.concurrent.ListeningExecutorService; import build.bazel.remote.execution.v2.Action; import build.bazel.remote.execution.v2.ActionResult; import build.bazel.remote.execution.v2.Command; import build.bazel.remote.execution.v2.Digest; import build.bazel.remote.execution.v2.Directory; import build.bazel.remote.execution.v2.ExecuteOperationMetadata; import build.bazel.remote.execution.v2.Platform; <<<<<<< import com.google.protobuf.util.Durations; import com.google.rpc.PreconditionFailure; import io.grpc.StatusException; import java.io.IOException; ======= import io.grpc.Channel; import io.grpc.netty.NegotiationType; import io.grpc.netty.NettyChannelBuilder; >>>>>>> import com.google.protobuf.util.Durations; import com.google.rpc.PreconditionFailure; import io.grpc.StatusException; import io.grpc.Channel; import io.grpc.netty.NegotiationType; import io.grpc.netty.NettyChannelBuilder; import java.io.IOException; <<<<<<< /*contentAddressableStorage=*/ new MemoryLRUContentAddressableStorage(config.getCasMaxSizeBytes()), /*watchers=*/ new ConcurrentHashMap<String, List<Predicate<Operation>>>(), /*outstandingOperations=*/ new TreeMap<String, Operation>()); ======= ContentAddressableStorages.create(config.getCasConfig()), /* watchers=*/ synchronizedSetMultimap( MultimapBuilder .hashKeys() .hashSetValues(/* expectedValuesPerKey=*/ 1) .build()), /* watcherService=*/ newCachedThreadPool(), new OutstandingOperations()); >>>>>>> ContentAddressableStorages.create(config.getCasConfig()), /* watchers=*/ synchronizedSetMultimap( MultimapBuilder .hashKeys() .hashSetValues(/* expectedValuesPerKey=*/ 1) .build()), /* watcherService=*/ newCachedThreadPool(), new OutstandingOperations()); <<<<<<< Map<String, List<Predicate<Operation>>> watchers, Map<String, Operation> outstandingOperations) { ======= SetMultimap<String, Predicate<Operation>> watchers, ExecutorService watcherService, OperationsMap outstandingOperations) { >>>>>>> SetMultimap<String, Predicate<Operation>> watchers, ExecutorService watcherService, OperationsMap outstandingOperations) { <<<<<<< /*completedOperations=*/ new DelegateCASMap<String, Operation>(contentAddressableStorage, Operation.parser(), digestUtil), /*activeBlobWrites=*/ new ConcurrentHashMap<Digest, ByteString>()); ======= MemoryInstance.createCompletedOperationMap(contentAddressableStorage, digestUtil)); >>>>>>> MemoryInstance.createCompletedOperationMap(contentAddressableStorage, digestUtil), /*activeBlobWrites=*/ new ConcurrentHashMap<Digest, ByteString>()); <<<<<<< this.watchers = watchers; streams = new HashMap<String, ByteStringStreamSource>(); queuedOperations = new ArrayList<Operation>(); workers = new ArrayList<Worker>(); requeuers = new HashMap<String, Watchdog>(); operationTimeoutDelays = new HashMap<String, Watchdog>(); ======= this.watchers = watchers; >>>>>>> this.watchers = watchers; <<<<<<< synchronized (queuedOperations) { Preconditions.checkState(!Iterables.any(queuedOperations, (queuedOperation) -> queuedOperation.getName().equals(operation.getName()))); ======= synchronized (queuedOperations) { >>>>>>> synchronized (queuedOperations) { Preconditions.checkState(!Iterables.any(queuedOperations, (queuedOperation) -> queuedOperation.getName().equals(operation.getName()))); <<<<<<< protected void updateOperationWatchers(Operation operation) { synchronized (watchers) { List<Predicate<Operation>> operationWatchers = watchers.get(operation.getName()); if (operationWatchers != null) { if (operation.getDone()) { watchers.remove(operation.getName()); } long unfilteredWatcherCount = operationWatchers.size(); super.updateOperationWatchers(operation); ImmutableList.Builder<Predicate<Operation>> filteredWatchers = new ImmutableList.Builder<>(); long filteredWatcherCount = 0; for (Predicate<Operation> watcher : operationWatchers) { if (watcher.test(operation)) { filteredWatchers.add(watcher); filteredWatcherCount++; ======= protected void updateOperationWatchers(Operation operation) throws InterruptedException { super.updateOperationWatchers(operation); Set<Predicate<Operation>> operationWatchers = watchers.get(operation.getName()); synchronized (watchers) { for (Predicate<Operation> watcher : operationWatchers) { ListenableFuture<Boolean> stillWatchingFuture = watcherService.submit(new Callable<Boolean>() { @Override public Boolean call() { return watcher.test(operation) && !operation.getDone(); >>>>>>> protected void updateOperationWatchers(Operation operation) throws InterruptedException { super.updateOperationWatchers(operation); Set<Predicate<Operation>> operationWatchers = watchers.get(operation.getName()); synchronized (watchers) { for (Predicate<Operation> watcher : operationWatchers) { ListenableFuture<Boolean> stillWatchingFuture = watcherService.submit(new Callable<Boolean>() { @Override public Boolean call() { return watcher.test(operation) && !operation.getDone(); <<<<<<< protected void onQueue(Operation operation, Action action) throws IOException, InterruptedException, StatusException { putBlob(this, digestUtil.compute(action), action.toByteString()); ======= protected void onQueue(Operation operation, Action action) throws InterruptedException { putBlob(action.toByteString()); >>>>>>> protected void onQueue(Operation operation, Action action) throws IOException, InterruptedException, StatusException { putBlob(this, digestUtil.compute(action), action.toByteString()); <<<<<<< if (timeout.getSeconds() > maximum.getSeconds() || (timeout.getSeconds() == maximum.getSeconds() && timeout.getNanos() > maximum.getNanos())) { preconditionFailure.addViolationsBuilder() .setType(VIOLATION_TYPE_INVALID) .setSubject(TIMEOUT_OUT_OF_BOUNDS) .setDescription(Durations.toString(timeout) + " > " + Durations.toString(maximum)); } ======= Preconditions.checkState( timeout.getSeconds() < maximum.getSeconds() || (timeout.getSeconds() == maximum.getSeconds() && timeout.getNanos() < maximum.getNanos())); >>>>>>> if (timeout.getSeconds() > maximum.getSeconds() || (timeout.getSeconds() == maximum.getSeconds() && timeout.getNanos() > maximum.getNanos())) { preconditionFailure.addViolationsBuilder() .setType(VIOLATION_TYPE_INVALID) .setSubject(TIMEOUT_OUT_OF_BOUNDS) .setDescription(Durations.toString(timeout) + " > " + Durations.toString(maximum)); } <<<<<<< Watchdog requeuer = new Watchdog(timeout, () -> { System.out.println("REQUEUEING " + operation.getName()); try { putOperation(operation); } catch (InterruptedException e) { Thread.currentThread().interrupt(); } }); ======= Watchdog requeuer = new Watchdog(timeout, () -> requeueOperation(operation)); >>>>>>> Watchdog requeuer = new Watchdog(timeout, () -> { logger.info("REQUEUEING " + operation.getName()); requeueOperation(operation); }); <<<<<<< private void matchSynchronized(Platform platform, MatchListener listener) throws InterruptedException { ImmutableList.Builder<Operation> rejectedOperations = new ImmutableList.Builder<Operation>(); boolean matched = false; while (!matched && !queuedOperations.isEmpty()) { Operation operation = queuedOperations.remove(0); if (satisfiesRequirements(platform, operation)) { matched = true; if (listener.onOperation(operation)) { onDispatched(operation); /* for this context, we need to make the requeue go into a bucket during onOperation } else if (!requeueOnFailure) { rejectedOperations.add(operation); */ ======= @Override public void match( Platform platform, boolean requeueOnFailure, Predicate<Operation> onMatch) throws InterruptedException { synchronized (queuedOperations) { ImmutableList.Builder<Operation> rejectedOperations = new ImmutableList.Builder<Operation>(); boolean matched = false; while (!matched && !queuedOperations.isEmpty()) { Operation operation = queuedOperations.remove(0); Command command = expectCommand(operation); if (command == null) { cancelOperation(operation.getName()); } else { if (satisfiesRequirements(platform, command)) { matched = true; if (onMatch.test(operation)) { onDispatched(operation); } else if (!requeueOnFailure) { rejectedOperations.add(operation); } } else { rejectedOperations.add(operation); } >>>>>>> private void matchSynchronized( Platform platform, MatchListener listener) throws InterruptedException { ImmutableList.Builder<Operation> rejectedOperations = ImmutableList.builder(); boolean matched = false; while (!matched && !queuedOperations.isEmpty()) { Operation operation = queuedOperations.remove(0); Command command = expectCommand(operation); if (command == null) { cancelOperation(operation.getName()); } else if (satisfiesRequirements(platform, command)) { matched = true; if (listener.onOperation(operation)) { onDispatched(operation);
<<<<<<< import build.buildfarm.common.DigestUtil; import build.buildfarm.common.DigestUtil.ActionKey; import build.buildfarm.common.Watcher; import build.buildfarm.v1test.QueueEntry; import build.buildfarm.v1test.QueuedOperation; import com.google.common.collect.ImmutableList; import com.google.common.util.concurrent.ListenableFuture; ======= import build.buildfarm.common.DigestUtil; import build.buildfarm.common.DigestUtil.ActionKey; import build.buildfarm.common.Write; import build.buildfarm.common.function.InterruptingPredicate; import com.google.common.collect.ImmutableList; import com.google.common.util.concurrent.ListenableFuture; >>>>>>> import build.buildfarm.common.DigestUtil; import build.buildfarm.common.DigestUtil.ActionKey; import build.buildfarm.common.Watcher; import build.buildfarm.common.Write; import build.buildfarm.v1test.QueueEntry; import com.google.common.collect.ImmutableList; import com.google.common.collect.Lists; import com.google.common.util.concurrent.ListenableFuture; <<<<<<< import java.util.concurrent.Executor; import javax.annotation.Nullable; ======= import java.util.UUID; import java.util.function.Predicate; >>>>>>> import java.util.List; import java.util.UUID; import java.util.concurrent.Executor; import javax.annotation.Nullable; <<<<<<< ListenableFuture<Iterable<Digest>> findMissingBlobs(Iterable<Digest> digests, Executor executor); ======= Iterable<Digest> findMissingBlobs(Iterable<Digest> digests); boolean containsBlob(Digest digest); Iterable<Digest> putAllBlobs(Iterable<ByteString> blobs) throws IOException, IllegalArgumentException, InterruptedException; >>>>>>> ListenableFuture<Iterable<Digest>> findMissingBlobs(Iterable<Digest> digests, Executor executor); boolean containsBlob(Digest digest); <<<<<<< void getBlob(Digest blobDigest, long offset, long limit, StreamObserver<ByteString> blobObserver); ChunkObserver getWriteBlobObserver(Digest blobDigest); ChunkObserver getWriteOperationStreamObserver(String operationStream); ======= InputStream newBlobInput(Digest digest, long offset) throws IOException; Write getBlobWrite(Digest digest, UUID uuid); ByteString getBlob(Digest blobDigest); ListenableFuture<Iterable<Response>> getAllBlobsFuture(Iterable<Digest> digests); ByteString getBlob(Digest blobDigest, long offset, long limit); Digest putBlob(ByteString blob) throws IOException, IllegalArgumentException, InterruptedException; >>>>>>> void getBlob(Digest blobDigest, long offset, long limit, StreamObserver<ByteString> blobObserver); InputStream newBlobInput(Digest digest, long offset) throws IOException; ListenableFuture<Iterable<Response>> getAllBlobsFuture(Iterable<Digest> digests); <<<<<<< ImmutableList.Builder<Directory> directories); CommittingOutputStream getStreamOutput(String name, long expectedSize); InputStream newStreamInput(String name, long offset) throws IOException; ======= ImmutableList.Builder<Directory> directories) throws IOException, InterruptedException; Write getOperationStreamWrite(String name); InputStream newOperationStreamInput(String name, long offset) throws IOException; >>>>>>> ImmutableList.Builder<Directory> directories) throws IOException, InterruptedException; Write getBlobWrite(Digest digest, UUID uuid); Iterable<Digest> putAllBlobs(Iterable<ByteString> blobs) throws IOException, IllegalArgumentException, InterruptedException; Write getOperationStreamWrite(String name); InputStream newOperationStreamInput(String name, long offset) throws IOException;
<<<<<<< private final Consumer<Digest> onPut; ======= @GuardedBy("this") >>>>>>> private final Consumer<Digest> onPut; @GuardedBy("this") <<<<<<< public InputStream newInput(Digest digest, long offset) throws IOException { // implicit int bounds compare against size bytes if (offset < 0 || offset > digest.getSizeBytes()) { throw new IndexOutOfBoundsException( String.format( "%d is out of bounds for blob %s", offset, DigestUtil.toString(digest))); ======= public synchronized InputStream newInput(Digest digest, long offset) throws IOException { Entry e = storage.get(digest); if (e == null) { throw new NoSuchFileException(digest.getHash()); >>>>>>> public synchronized InputStream newInput(Digest digest, long offset) throws IOException { // implicit int bounds compare against size bytes if (offset < 0 || offset > digest.getSizeBytes()) { throw new IndexOutOfBoundsException( String.format( "%d is out of bounds for blob %s", offset, DigestUtil.toString(digest)));
<<<<<<< import static java.util.concurrent.TimeUnit.DAYS; import static java.util.concurrent.TimeUnit.MICROSECONDS; import build.buildfarm.v1test.ExecutingOperationMetadata; ======= import static com.google.common.collect.Iterables.filter; import static com.google.common.collect.Iterables.transform; import static build.buildfarm.v1test.ExecutionPolicy.PolicyCase.WRAPPER; import static java.util.concurrent.TimeUnit.MICROSECONDS; import build.buildfarm.v1test.ExecutionPolicy; import com.google.common.base.Stopwatch; >>>>>>> import static com.google.common.collect.Iterables.filter; import static com.google.common.collect.Iterables.transform; import static build.buildfarm.v1test.ExecutionPolicy.PolicyCase.WRAPPER; import static java.util.concurrent.TimeUnit.DAYS; import static java.util.concurrent.TimeUnit.MICROSECONDS; import static java.util.logging.Level.SEVERE; import build.bazel.remote.execution.v2.ActionResult; import build.bazel.remote.execution.v2.Command; import build.bazel.remote.execution.v2.ExecuteOperationMetadata; import build.bazel.remote.execution.v2.ExecuteResponse; import build.bazel.remote.execution.v2.Platform; import build.bazel.remote.execution.v2.Platform.Property; import build.buildfarm.v1test.ExecutingOperationMetadata; import build.buildfarm.v1test.ExecutionPolicy; import com.google.common.base.Stopwatch; <<<<<<< private final PipelineStage owner; private static final OutputStream nullOutputStream = ByteStreams.nullOutputStream(); ======= private final ExecuteActionStage owner; private int exitCode = INCOMPLETE_EXIT_CODE; >>>>>>> private final ExecuteActionStage owner; private int exitCode = INCOMPLETE_EXIT_CODE; <<<<<<< private long runInterruptible() throws InterruptedException { ======= private long runInterruptible(Stopwatch stopwatch) throws InterruptedException { >>>>>>> private long runInterruptible(Stopwatch stopwatch) throws InterruptedException { <<<<<<< resultBuilder); } catch (IOException e) { e.printStackTrace(); try { workerContext.destroyExecDir(operationContext.execDir); } catch (IOException destroyActionRootException) { destroyActionRootException.printStackTrace(); } ======= resultBuilder, policies.build()); } catch (IOException ex) { >>>>>>> resultBuilder, policies.build()); } catch (IOException e) { logger.log(SEVERE, "error executing command for " + operation.getName(), e); try { workerContext.destroyExecDir(operationContext.execDir); } catch (IOException destroyExecDirException) { logger.log( SEVERE, "error destroying exec dir " + operationContext.execDir.toString(), destroyExecDirException); } <<<<<<< Duration executedIn = Durations.fromNanos(System.nanoTime() - executeStartAt); workerContext.logInfo("Executor: Operation " + operation.getName() + " Executed command: exit code " + resultBuilder.getExitCode()); ======= logger.fine( String.format( "Executor::executeCommand(%s): Completed command: exit code %d", operation.getName(), resultBuilder.getExitCode())); >>>>>>> Duration executedIn = Durations.fromNanos(System.nanoTime() - executeStartAt); logger.info( String.format( "Executor::executeCommand(%s): Completed command: exit code %d", operation.getName(), resultBuilder.getExitCode())); <<<<<<< long waitStartTime = System.nanoTime(); operation = operation.toBuilder() .setResponse(Any.pack(ExecuteResponse.newBuilder() .setResult(resultBuilder.build()) .setStatus(com.google.rpc.Status.newBuilder() .setCode(statusCode.getNumber()) .build()) .build())) .build(); OperationContext reportOperationContext = operationContext.toBuilder() .setOperation(operation) .setExecutedIn(executedIn) ======= long executeUSecs = stopwatch.elapsed(MICROSECONDS); if (owner.output().claim()) { operation = operation.toBuilder() .setResponse(Any.pack(ExecuteResponse.newBuilder() .setResult(resultBuilder) .setStatus(com.google.rpc.Status.newBuilder() .setCode(statusCode.getNumber())) .build())) >>>>>>> long executeUSecs = stopwatch.elapsed(MICROSECONDS); operation = operation.toBuilder() .setResponse(Any.pack(ExecuteResponse.newBuilder() .setResult(resultBuilder.build()) .setStatus(com.google.rpc.Status.newBuilder() .setCode(statusCode.getNumber()) .build()) .build())) .build(); OperationContext reportOperationContext = operationContext.toBuilder() .setOperation(operation) .setExecutedIn(executedIn) <<<<<<< if (owner.output().claim()) { try { owner.output().put(reportOperationContext); } catch (InterruptedException e) { owner.output().release(); throw e; } ======= owner.output().put(new OperationContext( operation, operationContext.execDir, operationContext.metadata, operationContext.action, operationContext.command)); >>>>>>> if (owner.output().claim()) { try { owner.output().put(reportOperationContext); } catch (InterruptedException e) { owner.output().release(); throw e; } <<<<<<< long waitTime = System.nanoTime() - waitStartTime; return waitTime; ======= return stopwatch.elapsed(MICROSECONDS) - executeUSecs; >>>>>>> return stopwatch.elapsed(MICROSECONDS) - executeUSecs; <<<<<<< } catch (Exception e) { e.printStackTrace(); try { owner.error().put(operationContext); } catch (InterruptedException errorEx) { errorEx.printStackTrace(); } throw e; } finally { owner.release(); ======= } finally { owner.releaseExecutor( operationContext.operation.getName(), stopwatch.elapsed(MICROSECONDS), stallUSecs, exitCode); >>>>>>> } catch (Exception e) { logger.log(SEVERE, "errored while executing " + operationContext.operation.getName(), e); try { owner.error().put(operationContext); } catch (InterruptedException errorEx) { logger.log(SEVERE, "interrupted while erroring " + operationContext.operation.getName(), errorEx); } throw e; } finally { owner.releaseExecutor( operationContext.operation.getName(), stopwatch.elapsed(MICROSECONDS), stallUSecs, exitCode);
<<<<<<< ======= import build.buildfarm.common.function.InterruptingConsumer; import build.buildfarm.v1test.WorkerConfig; >>>>>>> <<<<<<< public DigestUtil getDigestUtil() { return null; ======= public void match(InterruptingConsumer<Operation> onMatch) throws InterruptedException { onMatch.accept(queue.remove(0)); } @Override public void requeue(Operation operation) { assertThat(operation.getName()).isEqualTo("bad"); >>>>>>> public DigestUtil getDigestUtil() { return null; } @Override public void match(MatchListener listener) throws InterruptedException { listener.onOperation(queue.remove(0));
<<<<<<< import com.google.common.util.concurrent.FutureCallback; import com.google.common.util.concurrent.Futures; import com.google.devtools.remoteexecution.v1test.Digest; ======= import build.bazel.remote.execution.v2.Digest; >>>>>>> import com.google.common.util.concurrent.FutureCallback; import com.google.common.util.concurrent.Futures;
<<<<<<< import static java.util.concurrent.TimeUnit.DAYS; ======= import static com.google.common.collect.Maps.uniqueIndex; >>>>>>> import static com.google.common.collect.Maps.uniqueIndex; import static java.util.concurrent.TimeUnit.DAYS; import static java.util.logging.Level.SEVERE; <<<<<<< import com.google.devtools.remoteexecution.v1test.Action; import com.google.devtools.remoteexecution.v1test.ActionResult; import com.google.devtools.remoteexecution.v1test.Digest; import com.google.devtools.remoteexecution.v1test.Directory; import com.google.devtools.remoteexecution.v1test.DirectoryNode; import com.google.devtools.remoteexecution.v1test.ExecuteOperationMetadata; import com.google.devtools.remoteexecution.v1test.ExecuteOperationMetadata.Stage; import com.google.devtools.remoteexecution.v1test.FileNode; ======= import build.bazel.remote.execution.v2.Action; import build.bazel.remote.execution.v2.ActionResult; import build.bazel.remote.execution.v2.Command; import build.bazel.remote.execution.v2.Digest; import build.bazel.remote.execution.v2.Directory; import build.bazel.remote.execution.v2.DirectoryNode; import build.bazel.remote.execution.v2.ExecuteOperationMetadata.Stage; import build.bazel.remote.execution.v2.FileNode; import build.bazel.remote.execution.v2.Platform; >>>>>>> import build.bazel.remote.execution.v2.Action; import build.bazel.remote.execution.v2.ActionResult; import build.bazel.remote.execution.v2.Command; import build.bazel.remote.execution.v2.Digest; import build.bazel.remote.execution.v2.Directory; import build.bazel.remote.execution.v2.DirectoryNode; import build.bazel.remote.execution.v2.ExecuteOperationMetadata; import build.bazel.remote.execution.v2.ExecuteOperationMetadata.Stage; import build.bazel.remote.execution.v2.FileNode; import build.bazel.remote.execution.v2.Platform; <<<<<<< digestUtil, createChannel(instanceEndpoint.getTarget()), null); } private static Instance createInstance( String name, DigestUtil digestUtil, ManagedChannel channel, ByteStreamUploader uploader) { return new StubInstance( name, digestUtil, channel, 60 /* FIXME CONFIG */, TimeUnit.SECONDS, createStubRetrier(), uploader); ======= createChannel(instanceEndpoint.getTarget()), null, digestUtil); } private static Instance createInstance( String name, Channel channel, ByteStreamUploader uploader, DigestUtil digestUtil) { return new StubInstance(name, digestUtil, channel, uploader); >>>>>>> createChannel(instanceEndpoint.getTarget()), null, digestUtil); } private static Instance createInstance( String name, ManagedChannel channel, ByteStreamUploader uploader, DigestUtil digestUtil) { return new StubInstance( name, digestUtil, channel, 60 /* FIXME CONFIG */, TimeUnit.SECONDS, createStubRetrier(), uploader); <<<<<<< DigestUtil digestUtil = new DigestUtil(hashFunction); InstanceEndpoint casEndpoint = config.getContentAddressableStorage(); ManagedChannel casChannel = createChannel(casEndpoint.getTarget()); uploader = createStubUploader(casChannel); casInstance = createInstance(casEndpoint.getInstanceName(), digestUtil, casChannel, uploader); ======= digestUtil = new DigestUtil(hashFunction); InstanceEndpoint casEndpoint = config.getContentAddressableStorage(); Channel casChannel = createChannel(casEndpoint.getTarget()); uploader = createStubUploader(casChannel); casInstance = createInstance(casEndpoint.getInstanceName(), casChannel, uploader, digestUtil); >>>>>>> DigestUtil digestUtil = new DigestUtil(hashFunction); InstanceEndpoint casEndpoint = config.getContentAddressableStorage(); ManagedChannel casChannel = createChannel(casEndpoint.getTarget()); uploader = createStubUploader(casChannel); casInstance = createInstance(casEndpoint.getInstanceName(), casChannel, uploader, digestUtil); <<<<<<< WorkerContext context = new WorkerContext() { @Override public String getName() { try { return java.net.InetAddress.getLocalHost().getHostName(); } catch (java.net.UnknownHostException e) { throw new RuntimeException(e); } } ======= WorkerContext workerContext = new WorkerContext() { Map<String, ExecutionPolicy> policies = uniqueIndex( config.getExecutionPoliciesList(), (policy) -> policy.getName()); >>>>>>> WorkerContext context = new WorkerContext() { Map<String, ExecutionPolicy> policies = uniqueIndex( config.getExecutionPoliciesList(), (policy) -> policy.getName()); @Override public String getName() { try { return java.net.InetAddress.getLocalHost().getHostName(); } catch (java.net.UnknownHostException e) { throw new RuntimeException(e); } } <<<<<<< public void requeue(Operation operation) throws InterruptedException { try { deactivate(operation); ExecuteOperationMetadata metadata = operation.getMetadata().unpack(ExecuteOperationMetadata.class); ExecuteOperationMetadata executingMetadata = metadata.toBuilder() .setStage(ExecuteOperationMetadata.Stage.QUEUED) .build(); operation = operation.toBuilder() .setMetadata(Any.pack(executingMetadata)) .build(); operationQueueInstance.putOperation(operation); } catch (InvalidProtocolBufferException e) { e.printStackTrace(); } } @Override public void deactivate(Operation operation) { activeOperations.remove(operation.getName()); } @Override public boolean putOperation(Operation operation, Action action) throws InterruptedException { return operationQueueInstance.putOperation(operation); ======= public void match(Predicate<Operation> onMatch) throws InterruptedException { operationQueueInstance.match( getPlatform(), config.getRequeueOnFailure(), onMatch); >>>>>>> public void requeue(Operation operation) throws InterruptedException { try { deactivate(operation); ExecuteOperationMetadata metadata = operation.getMetadata().unpack(ExecuteOperationMetadata.class); ExecuteOperationMetadata executingMetadata = metadata.toBuilder() .setStage(ExecuteOperationMetadata.Stage.QUEUED) .build(); operation = operation.toBuilder() .setMetadata(Any.pack(executingMetadata)) .build(); operationQueueInstance.putOperation(operation); } catch (InvalidProtocolBufferException e) { logger.log(SEVERE, "error unpacking execute operation metadata for " + operation.getName(), e); } } @Override public void deactivate(Operation operation) { activeOperations.remove(operation.getName()); <<<<<<< fileCache.removeDirectoryAsync(execDir); ======= CASFileCache.removeDirectory(root); } @Override public Path getRoot() { return root; } @Override public ExecutionPolicy getExecutionPolicy(String name) { return policies.get(name); } @Override public void removeDirectory(Path path) throws IOException { CASFileCache.removeDirectory(path); } @Override public boolean putOperation(Operation operation) throws InterruptedException { return operationQueueInstance.putOperation(operation); >>>>>>> fileCache.removeDirectoryAsync(execDir); } @Override public ExecutionPolicy getExecutionPolicy(String name) { return policies.get(name); } @Override public boolean putOperation(Operation operation, Action action) throws InterruptedException { return operationQueueInstance.putOperation(operation); <<<<<<< return operationQueueInstance.getStreamOutput(name, -1); } @Override public void putActionResult(ActionKey actionKey, ActionResult actionResult) { ======= return operationQueueInstance.getStreamOutput(name); } @Override public void putActionResult(ActionKey actionKey, ActionResult actionResult) throws InterruptedException { >>>>>>> return operationQueueInstance.getStreamOutput(name, -1); } @Override public void putActionResult(ActionKey actionKey, ActionResult actionResult) throws InterruptedException { <<<<<<< PipelineStage completeStage = new PutOperationStage(context::deactivate); PipelineStage errorStage = completeStage; /* new ErrorStage(); */ PipelineStage reportResultStage = new ReportResultStage(context, completeStage, errorStage); PipelineStage executeActionStage = new ExecuteActionStage(context, reportResultStage, errorStage); ======= PipelineStage errorStage = new ReportResultStage.NullStage("ErrorStage"); /* ErrorStage(); */ PipelineStage reportResultStage = new ReportResultStage(workerContext, errorStage); PipelineStage executeActionStage = new ExecuteActionStage(workerContext, reportResultStage, errorStage); >>>>>>> PipelineStage completeStage = new PutOperationStage(context::deactivate); PipelineStage errorStage = completeStage; /* new ErrorStage(); */ PipelineStage reportResultStage = new ReportResultStage(context, completeStage, errorStage); PipelineStage executeActionStage = new ExecuteActionStage(context, reportResultStage, errorStage);
<<<<<<< import build.buildfarm.common.DigestUtil; ======= import build.buildfarm.common.DigestUtil; import build.buildfarm.common.Write; import com.google.common.cache.Cache; import com.google.common.cache.CacheBuilder; >>>>>>> import build.buildfarm.common.DigestUtil; import build.buildfarm.common.Write; import com.google.common.cache.Cache; import com.google.common.cache.CacheBuilder; <<<<<<< import com.google.protobuf.ByteString; import java.io.ByteArrayOutputStream; import java.io.IOException; import java.io.InputStream; import java.io.OutputStream; import java.nio.file.NoSuchFileException; ======= import com.google.common.collect.Maps; import com.google.common.util.concurrent.ListenableFuture; import com.google.protobuf.ByteString; import com.google.rpc.Code; import com.google.rpc.Status; import io.grpc.protobuf.StatusProto; import java.io.IOException; import java.io.InputStream; import java.nio.file.NoSuchFileException; >>>>>>> import com.google.common.collect.Maps; import com.google.common.util.concurrent.ListenableFuture; import com.google.protobuf.ByteString; import com.google.rpc.Code; import com.google.rpc.Status; import io.grpc.protobuf.StatusProto; import java.io.IOException; import java.io.InputStream; import java.nio.file.NoSuchFileException; <<<<<<< import java.util.function.Consumer; ======= import java.util.UUID; import java.util.concurrent.TimeUnit; import java.util.function.Function; >>>>>>> import java.util.UUID; import java.util.concurrent.TimeUnit; import java.util.function.Consumer; import java.util.function.Function; <<<<<<< private Consumer<Digest> onPut; private final Map<Digest, Entry> storage = new HashMap<>(); private final Map<Digest, Object> mutexes = new HashMap<>(); private final Entry header = new SentinelEntry(); private transient long sizeInBytes = 0; ======= private final Map<Digest, Entry> storage; private final Entry header; private long sizeInBytes; >>>>>>> private final Consumer<Digest> onPut; private final Map<Digest, Entry> storage; private final Entry header; private long sizeInBytes; <<<<<<< this.onPut = onPut; ======= sizeInBytes = 0; header = new SentinelEntry(); header.before = header.after = header; storage = Maps.newHashMap(); } @Override public synchronized boolean contains(Digest digest) { return get(digest) != null; >>>>>>> this.onPut = onPut; sizeInBytes = 0; header = new SentinelEntry(); header.before = header.after = header; storage = Maps.newHashMap(); } @Override public synchronized boolean contains(Digest digest) { return get(digest) != null;
<<<<<<< try { getInstanceTree( instance, request.getRootDigest(), request.getPageToken(), pageSize, responseObserver); } catch (IOException e) { responseObserver.onError(Status.fromThrowable(e).asException()); } catch (InterruptedException e) { Thread.currentThread().interrupt(); } ======= String pageToken = request.getPageToken(); try { do { ImmutableList.Builder<Directory> directories = new ImmutableList.Builder<>(); String nextPageToken = instance.getTree( request.getRootDigest(), pageSize, pageToken, directories); responseObserver.onNext(GetTreeResponse.newBuilder() .addAllDirectories(directories.build()) .setNextPageToken(nextPageToken) .build()); pageToken = nextPageToken; } while (!pageToken.isEmpty()); responseObserver.onCompleted(); } catch (IOException e) { responseObserver.onError(Status.fromThrowable(e).asException()); } catch (InterruptedException e) { Thread.currentThread().interrupt(); } >>>>>>> getInstanceTree( instance, request.getRootDigest(), request.getPageToken(), pageSize, responseObserver);
<<<<<<< import build.buildfarm.v1test.CompletedOperationMetadata; import com.google.common.collect.Iterables; import com.google.devtools.remoteexecution.v1test.ActionResult; import com.google.devtools.remoteexecution.v1test.ExecuteOperationMetadata; import com.google.devtools.remoteexecution.v1test.ExecuteResponse; ======= import build.buildfarm.instance.stub.Chunker; import build.buildfarm.v1test.CASInsertionPolicy; import com.google.common.annotations.VisibleForTesting; import com.google.common.collect.ImmutableList; import com.google.common.collect.ImmutableSet; import com.google.common.collect.Iterables; import build.bazel.remote.execution.v2.Action; import build.bazel.remote.execution.v2.ActionResult; import build.bazel.remote.execution.v2.Digest; import build.bazel.remote.execution.v2.Directory; import build.bazel.remote.execution.v2.ExecuteOperationMetadata; import build.bazel.remote.execution.v2.ExecuteResponse; import build.bazel.remote.execution.v2.FileNode; import build.bazel.remote.execution.v2.OutputDirectory; import build.bazel.remote.execution.v2.OutputFile; import build.bazel.remote.execution.v2.Tree; >>>>>>> import build.buildfarm.instance.stub.Chunker; import build.buildfarm.v1test.CompletedOperationMetadata; import com.google.common.annotations.VisibleForTesting; import com.google.common.collect.ImmutableList; import com.google.common.collect.ImmutableSet; import com.google.common.collect.Iterables; <<<<<<< import com.google.protobuf.util.Durations; import io.grpc.Deadline; import io.grpc.StatusRuntimeException; ======= import com.google.rpc.Status; import com.google.rpc.Code; >>>>>>> import com.google.protobuf.util.Durations; import com.google.rpc.Status; import com.google.rpc.Code; import io.grpc.Deadline; import io.grpc.StatusRuntimeException; <<<<<<< ======= import java.io.InputStream; import java.nio.file.FileVisitResult; import java.nio.file.Files; import java.nio.file.Path; import java.nio.file.SimpleFileVisitor; import java.nio.file.attribute.BasicFileAttributes; import java.util.ArrayList; import java.util.Collection; import java.util.List; import java.util.Map; import java.util.Stack; import java.util.function.Consumer; import java.util.concurrent.BlockingQueue; >>>>>>> import java.nio.file.Path; import java.util.ArrayList; import java.util.Collection; import java.util.List; import java.util.Map; <<<<<<< import java.util.concurrent.BlockingQueue; ======= import java.util.logging.Logger; >>>>>>> import java.util.concurrent.BlockingQueue; import java.util.logging.Logger; <<<<<<< ======= @VisibleForTesting public void uploadOutputs( ActionResult.Builder result, Path execRoot, Collection<String> outputFiles, Collection<String> outputDirs) throws IOException, InterruptedException { UploadManifest manifest = new UploadManifest( getDigestUtil(), result, execRoot, /* allowSymlinks= */ true, workerContext.getInlineContentLimit()); manifest.addFiles( Iterables.transform(outputFiles, (file) -> execRoot.resolve(file)), workerContext.getFileCasPolicy()); manifest.addDirectories( Iterables.transform(outputDirs, (dir) -> execRoot.resolve(dir))); /* put together our outputs and update the result */ if (result.getStdoutRaw().size() > 0) { manifest.addContent( result.getStdoutRaw(), workerContext.getStdoutCasPolicy(), result::setStdoutRaw, result::setStdoutDigest); } if (result.getStderrRaw().size() > 0) { manifest.addContent( result.getStderrRaw(), workerContext.getStderrCasPolicy(), result::setStderrRaw, result::setStderrDigest); } List<Chunker> filesToUpload = new ArrayList<>(); Map<Digest, Path> digestToFile = manifest.getDigestToFile(); Map<Digest, Chunker> digestToChunkers = manifest.getDigestToChunkers(); Collection<Digest> digests = new ArrayList<>(); digests.addAll(digestToFile.keySet()); digests.addAll(digestToChunkers.keySet()); for (Digest digest : digests) { Chunker chunker; Path file = digestToFile.get(digest); if (file != null) { chunker = new Chunker(file, digest); } else { chunker = digestToChunkers.get(digest); } if (chunker != null) { filesToUpload.add(chunker); } } if (!filesToUpload.isEmpty()) { workerContext.getUploader().uploadBlobs(filesToUpload); } } >>>>>>>
<<<<<<< MockitoAnnotations.initMocks(this); blobs = Maps.newHashMap(); putService = newSingleThreadExecutor(); storage = Maps.newConcurrentMap(); ======= blobs = new HashMap<Digest, ByteString>(); >>>>>>> MockitoAnnotations.initMocks(this); blobs = Maps.newHashMap(); putService = newSingleThreadExecutor(); storage = Maps.newConcurrentMap(); <<<<<<< DIGEST_UTIL, /* expireService=*/ newDirectExecutorService(), storage, onPut, onExpire) { @Override protected InputStream newExternalInput(Digest digest, long offset) { ByteString content = blobs.get(digest); if (content == null) { return new BrokenInputStream(new IOException("NOT_FOUND: " + DigestUtil.toString(digest))); } return content.substring((int) offset).newInput(); } }; } @After public void tearDown() { if (!shutdownAndAwaitTermination(putService, 1, SECONDS)) { throw new RuntimeException("could not shut down put service"); } ======= DIGEST_UTIL); >>>>>>> DIGEST_UTIL, /* expireService=*/ newDirectExecutorService(), storage, onPut, onExpire) { @Override protected InputStream newExternalInput(Digest digest, long offset) { ByteString content = blobs.get(digest); if (content == null) { return new BrokenInputStream(new IOException("NOT_FOUND: " + DigestUtil.toString(digest))); } return content.substring((int) offset).newInput(); } }; } @After public void tearDown() { if (!shutdownAndAwaitTermination(putService, 1, SECONDS)) { throw new RuntimeException("could not shut down put service"); } <<<<<<< CASFileCache fileCache = new CASFileCache(root, /* maxSizeInBytes=*/ 1024, DIGEST_UTIL, /* expireService=*/ newDirectExecutorService()) { @Override protected InputStream newExternalInput(Digest digest, long offset) throws IOException, InterruptedException { return mockInputStreamFactory.newInput(digest, offset); } }; ======= CASFileCache fileCache = new CASFileCache( mockInputStreamFactory, root, /* maxSizeInBytes=*/ 1024, DIGEST_UTIL); >>>>>>> CASFileCache fileCache = new CASFileCache( root, /* maxSizeInBytes=*/ 1024, DIGEST_UTIL, /* expireService=*/ newDirectExecutorService()) { @Override protected InputStream newExternalInput(Digest digest, long offset) throws IOException, InterruptedException { return mockInputStreamFactory.newInput(digest, offset); } };
<<<<<<< import static com.google.common.base.Preconditions.checkState; import static java.util.logging.Level.SEVERE; ======= import build.bazel.remote.execution.v2.ActionCacheGrpc; import build.bazel.remote.execution.v2.ActionCacheGrpc.ActionCacheBlockingStub; import build.bazel.remote.execution.v2.ActionResult; import build.bazel.remote.execution.v2.ContentAddressableStorageGrpc; import build.bazel.remote.execution.v2.ContentAddressableStorageGrpc.ContentAddressableStorageBlockingStub; import build.bazel.remote.execution.v2.Digest; import build.bazel.remote.execution.v2.Directory; import build.bazel.remote.execution.v2.ExecuteOperationMetadata; import build.bazel.remote.execution.v2.ExecutionPolicy; import build.bazel.remote.execution.v2.FindMissingBlobsRequest; import build.bazel.remote.execution.v2.FindMissingBlobsResponse; import build.bazel.remote.execution.v2.GetTreeRequest; import build.bazel.remote.execution.v2.GetTreeResponse; import build.bazel.remote.execution.v2.Platform; import build.bazel.remote.execution.v2.ResultsCachePolicy; import build.bazel.remote.execution.v2.ServerCapabilities; import build.bazel.remote.execution.v2.UpdateActionResultRequest; >>>>>>> import static com.google.common.base.Preconditions.checkState; import static com.google.common.util.concurrent.Futures.immediateFuture; import static com.google.common.util.concurrent.Futures.transform; import static java.util.logging.Level.SEVERE; import build.bazel.remote.execution.v2.ActionCacheGrpc; import build.bazel.remote.execution.v2.ActionCacheGrpc.ActionCacheBlockingStub; import build.bazel.remote.execution.v2.ActionResult; import build.bazel.remote.execution.v2.ContentAddressableStorageGrpc; import build.bazel.remote.execution.v2.ContentAddressableStorageGrpc.ContentAddressableStorageBlockingStub; import build.bazel.remote.execution.v2.ContentAddressableStorageGrpc.ContentAddressableStorageFutureStub; import build.bazel.remote.execution.v2.Digest; import build.bazel.remote.execution.v2.Directory; import build.bazel.remote.execution.v2.ExecuteOperationMetadata; import build.bazel.remote.execution.v2.ExecutionPolicy; import build.bazel.remote.execution.v2.FindMissingBlobsRequest; import build.bazel.remote.execution.v2.FindMissingBlobsResponse; import build.bazel.remote.execution.v2.GetActionResultRequest; import build.bazel.remote.execution.v2.GetTreeRequest; import build.bazel.remote.execution.v2.GetTreeResponse; import build.bazel.remote.execution.v2.Platform; import build.bazel.remote.execution.v2.RequestMetadata; import build.bazel.remote.execution.v2.ResultsCachePolicy; import build.bazel.remote.execution.v2.ServerCapabilities; import build.bazel.remote.execution.v2.UpdateActionResultRequest; <<<<<<< import com.google.common.util.concurrent.Futures; import com.google.common.util.concurrent.ListenableFuture; import com.google.common.util.concurrent.SettableFuture; import com.google.longrunning.CancelOperationRequest; import com.google.longrunning.DeleteOperationRequest; import com.google.longrunning.GetOperationRequest; import com.google.longrunning.ListOperationsRequest; import com.google.longrunning.ListOperationsResponse; import com.google.longrunning.OperationsGrpc; import com.google.longrunning.OperationsGrpc.OperationsBlockingStub; import build.bazel.remote.execution.v2.ActionCacheGrpc; import build.bazel.remote.execution.v2.ActionCacheGrpc.ActionCacheBlockingStub; import build.bazel.remote.execution.v2.ActionResult; import build.bazel.remote.execution.v2.ContentAddressableStorageGrpc; import build.bazel.remote.execution.v2.ContentAddressableStorageGrpc.ContentAddressableStorageBlockingStub; import build.bazel.remote.execution.v2.ContentAddressableStorageGrpc.ContentAddressableStorageFutureStub; import build.bazel.remote.execution.v2.Digest; import build.bazel.remote.execution.v2.Directory; import build.bazel.remote.execution.v2.ExecuteOperationMetadata; import build.bazel.remote.execution.v2.ExecuteRequest; import build.bazel.remote.execution.v2.ExecutionGrpc; import build.bazel.remote.execution.v2.ExecutionGrpc.ExecutionFutureStub; import build.bazel.remote.execution.v2.ExecutionPolicy; import build.bazel.remote.execution.v2.FindMissingBlobsRequest; import build.bazel.remote.execution.v2.FindMissingBlobsResponse; import build.bazel.remote.execution.v2.GetActionResultRequest; import build.bazel.remote.execution.v2.GetTreeRequest; import build.bazel.remote.execution.v2.GetTreeResponse; import build.bazel.remote.execution.v2.Platform; import build.bazel.remote.execution.v2.RequestMetadata; import build.bazel.remote.execution.v2.ResultsCachePolicy; import build.bazel.remote.execution.v2.ServerCapabilities; import build.bazel.remote.execution.v2.UpdateActionResultRequest; ======= import com.google.common.util.concurrent.SettableFuture; >>>>>>> import com.google.common.util.concurrent.ListenableFuture; import com.google.common.util.concurrent.SettableFuture; import com.google.longrunning.CancelOperationRequest; import com.google.longrunning.DeleteOperationRequest; import com.google.longrunning.GetOperationRequest; import com.google.longrunning.ListOperationsRequest; import com.google.longrunning.ListOperationsResponse; import com.google.longrunning.OperationsGrpc; import com.google.longrunning.OperationsGrpc.OperationsBlockingStub; <<<<<<< import java.util.concurrent.ExecutorService; import java.util.concurrent.TimeUnit; import java.util.concurrent.atomic.AtomicReference; ======= import java.util.concurrent.ExecutionException; >>>>>>> import java.util.concurrent.ExecutionException; import java.util.concurrent.ExecutorService; import java.util.concurrent.TimeUnit; import java.util.concurrent.atomic.AtomicReference; <<<<<<< long writtenBytes = 0; ======= long written_bytes = 0; SettableFuture<WriteResponse> writeResponseFuture = SettableFuture.create(); >>>>>>> long writtenBytes = 0; SettableFuture<WriteResponse> writeResponseFuture = SettableFuture.create(); <<<<<<< public void onNext(WriteResponse reply) { checkState(reply.getCommittedSize() == writtenBytes); requestObserver.onCompleted(); committedFuture.set(reply.getCommittedSize()); ======= public void onNext(WriteResponse response) { writeResponseFuture.set(response); >>>>>>> public void onNext(WriteResponse response) { writeResponseFuture.set(response); <<<<<<< committedFuture.setException(t); ======= writeResponseFuture.setException( new StatusRuntimeException(Status.fromThrowable(t))); >>>>>>> writeResponseFuture.setException( new StatusRuntimeException(Status.fromThrowable(t))); <<<<<<< if (!closed) { logger.severe("Server closed connection before output stream for " + resourceName + " at " + writtenBytes); // FIXME(werkt) better error, status committedFuture.setException( new RuntimeException("Server closed connection before output stream.")); ======= if (!closed && !writeResponseFuture.isDone()) { writeResponseFuture.setException( new RuntimeException("Server closed connection before output stream.")); >>>>>>> if (!closed && !writeResponseFuture.isDone()) { logger.severe("Server closed connection before output stream for " + resourceName + " at " + writtenBytes); // FIXME(werkt) better error, status writeResponseFuture.setException( new RuntimeException("Server closed connection before output stream.")); <<<<<<< public void close() { if (!closed) { closed = true; requestObserver.onNext(WriteRequest.newBuilder() .setFinishWrite(true) .build()); } ======= public void close() throws IOException { boolean finish = !closed && !writeResponseFuture.isDone(); if (finish) { closed = true; requestObserver.onNext(WriteRequest.newBuilder() .setResourceName(resourceName) .setFinishWrite(true) .build()); requestObserver.onCompleted(); } if (checkWriteResponse() != written_bytes) { throw new IOException("committed_size did not match bytes written"); } >>>>>>> public void close() throws IOException { boolean finish = !closed && !writeResponseFuture.isDone(); if (finish) { closed = true; if (expectedSize < 0 || writtenBytes < expectedSize) { WriteRequest.Builder builder = WriteRequest.newBuilder() .setFinishWrite(true); if (writtenBytes == 0) { builder.setResourceName(resourceName); } requestObserver.onNext(builder.build()); } requestObserver.onCompleted(); } if (checkWriteResponse() != writtenBytes) { throw new IOException("committed_size did not match bytes written"); } <<<<<<< throw new IOException("stream is closed"); } closed = isFinishWrite(writtenBytes + len); WriteRequest request = createWriteRequest(ByteString.copyFrom(b, off, len), closed); requestObserver.onNext(request); writtenBytes += len; } @Override public ListenableFuture<Long> getCommittedFuture() { return committedFuture; ======= throw new IOException(); } if (writeResponseFuture.isDone()) { long committedSize = checkWriteResponse(); throw new IOException("write response with committed_size " + committedSize + " received before write"); } requestObserver.onNext(WriteRequest.newBuilder() .setResourceName(resourceName) .setData(ByteString.copyFrom(b, off, len)) .setWriteOffset(written_bytes) .setFinishWrite(false) .build()); written_bytes += len; >>>>>>> throw new IOException("stream is closed"); } if (writeResponseFuture.isDone()) { long committedSize = checkWriteResponse(); throw new IOException("write response with committed_size " + committedSize + " received before write"); } if (expectedSize >= 0 && writtenBytes + len > expectedSize) { throw new IOException("write of " + len + " would exceed expectedSize by " + (writtenBytes + len - expectedSize)); } WriteRequest request = createWriteRequest( ByteString.copyFrom(b, off, len), isFinishWrite(writtenBytes + len)); requestObserver.onNext(request); writtenBytes += len; } @Override public ListenableFuture<Long> getCommittedFuture() { return transform(writeResponseFuture, (writeResponse) -> writeResponse.getCommittedSize());
<<<<<<< import build.buildfarm.instance.TokenizableIterator; import build.buildfarm.instance.TreeIterator; import build.buildfarm.instance.TreeIterator.DirectoryEntry; ======= >>>>>>> <<<<<<< import com.google.common.util.concurrent.MoreExecutors; ======= >>>>>>> <<<<<<< import com.google.protobuf.util.Durations; import com.google.rpc.PreconditionFailure; ======= import com.google.rpc.PreconditionFailure; import com.google.rpc.PreconditionFailure.Violation; >>>>>>> import com.google.rpc.PreconditionFailure; <<<<<<< import io.grpc.StatusException; ======= import java.io.IOException; >>>>>>> import java.io.IOException; <<<<<<< protected void validateAction( Action action, PreconditionFailure.Builder preconditionFailure) throws InterruptedException { ======= protected void validateAction( Action action, PreconditionFailure.Builder preconditionFailure) { >>>>>>> protected void validateAction( Action action, PreconditionFailure.Builder preconditionFailure) throws InterruptedException, StatusException { <<<<<<< protected TokenizableIterator<DirectoryEntry> createTreeIterator( Digest rootDigest, String pageToken) throws IOException, InterruptedException { ExecutorService service = newDirectExecutorService(); return new TreeIterator((digest) -> expect(digest, Directory.parser(), service), rootDigest, pageToken); ======= protected TokenizableIterator<DirectoryEntry> createTreeIterator( Digest rootDigest, String pageToken) { return new TreeIterator(this::getBlob, rootDigest, pageToken); >>>>>>> protected TokenizableIterator<DirectoryEntry> createTreeIterator( Digest rootDigest, String pageToken) { ExecutorService service = newDirectExecutorService(); return new TreeIterator((digest) -> expect(digest, Directory.parser(), service), rootDigest, pageToken);
<<<<<<< import build.buildfarm.instance.Instance.MatchListener; import build.buildfarm.instance.stub.ByteStreamUploader; ======= import build.buildfarm.instance.stub.ByteStreamUploader; >>>>>>> import build.buildfarm.instance.Instance.MatchListener; <<<<<<< import com.google.devtools.remoteexecution.v1test.Action; import com.google.devtools.remoteexecution.v1test.ActionResult; import com.google.devtools.remoteexecution.v1test.Digest; import com.google.devtools.remoteexecution.v1test.Directory; import com.google.devtools.remoteexecution.v1test.ExecuteOperationMetadata.Stage; ======= import build.buildfarm.v1test.ExecutionPolicy; import build.bazel.remote.execution.v2.Action; import build.bazel.remote.execution.v2.ActionResult; import build.bazel.remote.execution.v2.Command; import build.bazel.remote.execution.v2.Digest; import build.bazel.remote.execution.v2.ExecuteOperationMetadata.Stage; >>>>>>> import build.buildfarm.v1test.ExecutionPolicy; import build.bazel.remote.execution.v2.Action; import build.bazel.remote.execution.v2.ActionResult; import build.bazel.remote.execution.v2.Command; import build.bazel.remote.execution.v2.Digest; import build.bazel.remote.execution.v2.Directory; import build.bazel.remote.execution.v2.ExecuteOperationMetadata.Stage; <<<<<<< ======= @Override public ExecutionPolicy getExecutionPolicy(String name) { throw new UnsupportedOperationException(); } @Override public int getInlineContentLimit() { throw new UnsupportedOperationException(); } >>>>>>> @Override public ExecutionPolicy getExecutionPolicy(String name) { throw new UnsupportedOperationException(); } <<<<<<< @Override public Path createExecDir(String operationName, Map<Digest, Directory> directoriesIndex, Action action) { throw new UnsupportedOperationException(); } @Override public void destroyExecDir(Path execDir) { throw new UnsupportedOperationException(); } @Override public void uploadOutputs(ActionResult.Builder resultBuilder, Path actionRoot, Iterable<String> outputFiles, Iterable<String> outputDirs) { throw new UnsupportedOperationException(); } @Override public boolean putOperation(Operation operation, Action action) { throw new UnsupportedOperationException(); } ======= @Override public ByteStreamUploader getUploader() { throw new UnsupportedOperationException(); } @Override public ByteString getBlob(Digest digest) { throw new UnsupportedOperationException(); } @Override public void createActionRoot(Path root, Action action, Command command) { throw new UnsupportedOperationException(); } @Override public void destroyActionRoot(Path root) { throw new UnsupportedOperationException(); } @Override public Path getRoot() { throw new UnsupportedOperationException(); } @Override public void removeDirectory(Path path) { throw new UnsupportedOperationException(); } @Override public boolean putOperation(Operation operation) { throw new UnsupportedOperationException(); } >>>>>>> @Override public Path createExecDir(String operationName, Map<Digest, Directory> directoriesIndex, Action action, Command command) { throw new UnsupportedOperationException(); } @Override public void destroyExecDir(Path execDir) { throw new UnsupportedOperationException(); } @Override public void uploadOutputs(ActionResult.Builder resultBuilder, Path actionRoot, Iterable<String> outputFiles, Iterable<String> outputDirs) { throw new UnsupportedOperationException(); } @Override public boolean putOperation(Operation operation, Action action) { throw new UnsupportedOperationException(); }
<<<<<<< QueuedOperation getQueuedOperation(QueueEntry queueEntry) throws IOException, InterruptedException; Path createExecDir(String operationName, Iterable<Directory> directories, Action action, Command command) throws IOException, InterruptedException; void destroyExecDir(Path execDir) throws IOException, InterruptedException; void uploadOutputs(ActionResult.Builder resultBuilder, Path actionRoot, Iterable<String> outputFiles, Iterable<String> outputDirs) throws IOException, InterruptedException; boolean putOperation(Operation operation, Action Action) throws IOException, InterruptedException; OutputStream getOperationStreamOutput(String name) throws IOException; void putActionResult(ActionKey actionKey, ActionResult actionResult) throws IOException, InterruptedException; ======= ByteStreamUploader getUploader(); ByteString getBlob(Digest digest); void createActionRoot(Path root, Action action, Command command) throws IOException, InterruptedException; void destroyActionRoot(Path root) throws IOException; Path getRoot(); boolean putOperation(Operation operation) throws InterruptedException; Write getOperationStreamWrite(String name) throws IOException; void putActionResult(ActionKey actionKey, ActionResult actionResult) throws InterruptedException; >>>>>>> QueuedOperation getQueuedOperation(QueueEntry queueEntry) throws IOException, InterruptedException; Path createExecDir(String operationName, Iterable<Directory> directories, Action action, Command command) throws IOException, InterruptedException; void destroyExecDir(Path execDir) throws IOException, InterruptedException; void uploadOutputs(ActionResult.Builder resultBuilder, Path actionRoot, Iterable<String> outputFiles, Iterable<String> outputDirs) throws IOException, InterruptedException; boolean putOperation(Operation operation, Action Action) throws IOException, InterruptedException; void putActionResult(ActionKey actionKey, ActionResult actionResult) throws IOException, InterruptedException; Write getOperationStreamWrite(String name) throws IOException;
<<<<<<< ======= import com.google.common.collect.Maps; import com.google.common.hash.HashCode; import build.bazel.remote.execution.v2.Action; import build.bazel.remote.execution.v2.ActionResult; import build.bazel.remote.execution.v2.Digest; import build.bazel.remote.execution.v2.Directory; import build.bazel.remote.execution.v2.ExecuteOperationMetadata; import build.bazel.remote.execution.v2.ExecuteResponse; import build.bazel.remote.execution.v2.FileNode; import build.bazel.remote.execution.v2.OutputDirectory; import build.bazel.remote.execution.v2.OutputFile; import build.bazel.remote.execution.v2.Tree; >>>>>>> import com.google.common.collect.Maps; import com.google.common.hash.HashCode; <<<<<<< ======= @VisibleForTesting public void uploadOutputs( ActionResult.Builder result, Path execRoot, Collection<String> outputFiles, Collection<String> outputDirs) throws IOException, InterruptedException { UploadManifest manifest = new UploadManifest( getDigestUtil(), result, execRoot, /* allowSymlinks= */ true, workerContext.getInlineContentLimit()); manifest.addFiles( Iterables.transform(outputFiles, (file) -> execRoot.resolve(file)), workerContext.getFileCasPolicy()); manifest.addDirectories( Iterables.transform(outputDirs, (dir) -> execRoot.resolve(dir))); /* put together our outputs and update the result */ if (result.getStdoutRaw().size() > 0) { manifest.addContent( result.getStdoutRaw(), workerContext.getStdoutCasPolicy(), result::setStdoutRaw, result::setStdoutDigest); } if (result.getStderrRaw().size() > 0) { manifest.addContent( result.getStderrRaw(), workerContext.getStderrCasPolicy(), result::setStderrRaw, result::setStderrDigest); } Map<HashCode, Chunker> filesToUpload = Maps.newHashMap(); Map<Digest, Path> digestToFile = manifest.getDigestToFile(); Map<Digest, Chunker> digestToChunkers = manifest.getDigestToChunkers(); ImmutableList.Builder<Digest> digests = ImmutableList.builder(); digests.addAll(digestToFile.keySet()); digests.addAll(digestToChunkers.keySet()); for (Digest digest : digests.build()) { Chunker chunker; Path file = digestToFile.get(digest); if (file != null) { chunker = Chunker.builder() .setInput(digest.getSizeBytes(), file) .build(); } else { chunker = digestToChunkers.get(digest); } if (chunker != null) { filesToUpload.put(HashCode.fromString(digest.getHash()), chunker); } } if (!filesToUpload.isEmpty()) { workerContext.getUploader().uploadBlobs(filesToUpload); } } >>>>>>> <<<<<<< } catch (InvalidProtocolBufferException e) { logger.log(SEVERE, "error unpacking execute response", e); ======= } catch (InvalidProtocolBufferException e) { poller.stop(); logger.log(SEVERE, "invalid ExecuteResponse for " + operationName, e); >>>>>>> } catch (InvalidProtocolBufferException e) { logger.log(SEVERE, "invalid ExecuteResponse for " + operationName, e); <<<<<<< } catch (IOException e) { logger.log(SEVERE, "error uploading outputs", e); return null; } ExecuteOperationMetadata metadata; try { metadata = operationContext.operation .getMetadata() .unpack(ExecutingOperationMetadata.class) .getExecuteOperationMetadata(); } catch (InvalidProtocolBufferException e) { logger.log(SEVERE, "invalid execute operation metadata", e); ======= } catch (IllegalStateException e) { status .setCode(Code.FAILED_PRECONDITION.getNumber()) .setMessage(e.getMessage()); } catch (IOException e) { poller.stop(); logger.log(SEVERE, "error while uploading outputs for " + operationName, e); >>>>>>> } catch (IOException e) { logger.log(SEVERE, "error while uploading outputs for " + operationName, e); return null; } ExecuteOperationMetadata metadata; try { metadata = operation .getMetadata() .unpack(ExecutingOperationMetadata.class) .getExecuteOperationMetadata(); } catch (InvalidProtocolBufferException e) { logger.log(SEVERE, "invalid execute operation metadata", e); <<<<<<< try { if (!workerContext.putOperation(operation, operationContext.action)) { return null; } } catch (IOException e) { logger.log(SEVERE, "error reporting complete operation " + operation.getName(), e); ======= if (!workerContext.putOperation(operation)) { logger.severe("could not put operation " + operationName); >>>>>>> try { if (!workerContext.putOperation(doneOperation, operationContext.action)) { return null; } } catch (IOException e) { logger.log(SEVERE, "error reporting complete operation " + operationName, e);