name
stringlengths 12
178
| code_snippet
stringlengths 8
36.5k
| score
float64 3.26
3.68
|
---|---|---|
hadoop_ConnectionContext_isIdle_rdh | /**
* Check if the connection is idle. It checks if the connection is not used
* by another thread.
*
* @return True if the connection is not used by another thread.
*/
public synchronized boolean isIdle() {
return (!isActive()) && (!isClosed());
} | 3.26 |
hadoop_ConnectionContext_getClient_rdh | /**
* Get the connection client.
*
* @return Connection client.
*/
public synchronized ProxyAndInfo<?> getClient() {
this.numThreads++;
this.lastActiveTs = Time.monotonicNow();
return this.client;
} | 3.26 |
hadoop_ConnectionContext_hasAvailableConcurrency_rdh | /**
* Return true if this connection context still has available concurrency,
* else return false.
*/private synchronized boolean hasAvailableConcurrency() {
return this.numThreads
< maxConcurrencyPerConn;
} | 3.26 |
hadoop_ConnectionContext_isClosed_rdh | /**
* Check if the connection is closed.
*
* @return If the connection is closed.
*/
public synchronized boolean isClosed() {
return this.closed;
} | 3.26 |
hadoop_ConnectionContext_release_rdh | /**
* Release this connection.
*/
public synchronized void release() {
if (this.numThreads > 0) {
this.numThreads--;}
} | 3.26 |
hadoop_ConnectionContext_isActive_rdh | /**
* Check if the connection is active.
*
* @return True if the connection is active.
*/
public synchronized boolean isActive() {
return this.numThreads > 0;
} | 3.26 |
hadoop_ConnectionContext_isUsable_rdh | /**
* Check if the connection can be used. It checks if the connection is used by
* another thread or already closed.
*
* @return True if the connection can be used.
*/
public synchronized boolean isUsable() {
return hasAvailableConcurrency() && (!isClosed()); } | 3.26 |
hadoop_ConnectionContext_close_rdh | /**
* Close a connection. Only idle connections can be closed since
* the RPC proxy would be shut down immediately.
*
* @param force
* whether the connection should be closed anyway.
*/
public synchronized void close(boolean force) {
if ((!force) && (this.numThreads > 0)) {
// this is an erroneous case, but we have to close the connection
// anyway since there will be connection leak if we don't do so
// the connection has been moved out of the pool
LOG.error("Active connection with {} handlers will be closed, ConnectionContext is {}", this.numThreads, this);}
this.closed = true;
Object proxy = this.client.getProxy();
// Nobody should be using this anymore, so it should close right away
RPC.stopProxy(proxy);
} | 3.26 |
hadoop_ResourceUsageMatcher_getProgress_rdh | /**
* Returns the average progress.
*/
@Override
public float getProgress() {
if (emulationPlugins.size() > 0) {
// return the average progress
float progress = 0.0F;
for (ResourceUsageEmulatorPlugin emulator : emulationPlugins) {
// consider weighted progress of each emulator
progress += emulator.getProgress();
}
return progress / emulationPlugins.size();
}
// if no emulators are configured then return 1
return 1.0F;
} | 3.26 |
hadoop_ResourceUsageMatcher_configure_rdh | /**
* Configure the {@link ResourceUsageMatcher} to load the configured plugins
* and initialize them.
*/
@SuppressWarnings("unchecked")
public void configure(Configuration conf, ResourceCalculatorPlugin monitor, ResourceUsageMetrics metrics, Progressive progress) {
Class[] plugins =
conf.getClasses(RESOURCE_USAGE_EMULATION_PLUGINS);
if (plugins == null) {
System.out.println("No resource usage emulator plugins configured.");
} else {
for (Class clazz : plugins) {
if (clazz != null) {
if (ResourceUsageEmulatorPlugin.class.isAssignableFrom(clazz)) {
ResourceUsageEmulatorPlugin plugin = ((ResourceUsageEmulatorPlugin) (ReflectionUtils.newInstance(clazz, conf)));
emulationPlugins.add(plugin);
} else {
throw new RuntimeException((((("Misconfigured resource usage plugins. " + "Class ") + clazz.getClass().getName()) + " is not a resource ") + "usage plugin as it does not extend ") + ResourceUsageEmulatorPlugin.class.getName());
}
}
}
}
// initialize the emulators once all the configured emulator plugins are
// loaded
for (ResourceUsageEmulatorPlugin emulator : emulationPlugins) {
emulator.initialize(conf, metrics, monitor, progress);
}
} | 3.26 |
hadoop_CombinedHostsFileWriter_writeFile_rdh | /**
* Serialize a set of DatanodeAdminProperties to a json file.
*
* @param hostsFile
* the json file name.
* @param allDNs
* the set of DatanodeAdminProperties
* @throws IOException
*/
public static void writeFile(final String hostsFile, final Set<DatanodeAdminProperties> allDNs) throws IOException {
final ObjectMapper objectMapper = new ObjectMapper();
try (Writer output = new OutputStreamWriter(Files.newOutputStream(Paths.get(hostsFile)), StandardCharsets.UTF_8)) {
objectMapper.writeValue(output, allDNs);
}
} | 3.26 |
hadoop_SystemErasureCodingPolicies_getByName_rdh | /**
* Get a policy by policy name.
*
* @return ecPolicy, or null if not found
*/
public static ErasureCodingPolicy getByName(String name) {
return SYSTEM_POLICIES_BY_NAME.get(name);
} | 3.26 |
hadoop_SystemErasureCodingPolicies_getPolicies_rdh | /**
* Get system defined policies.
*
* @return system policies
*/
public static List<ErasureCodingPolicy> getPolicies() {
return SYS_POLICIES;
} | 3.26 |
hadoop_SystemErasureCodingPolicies_getByID_rdh | /**
* Get a policy by policy ID.
*
* @return ecPolicy, or null if not found
*/
public static ErasureCodingPolicy getByID(byte id) {
return SYSTEM_POLICIES_BY_ID.get(id);} | 3.26 |
hadoop_KerberosAuthException_getInitialMessage_rdh | /**
*
* @return The initial message, or null if not set.
*/
public String getInitialMessage() {
return f1;
} | 3.26 |
hadoop_KerberosAuthException_getTicketCacheFile_rdh | /**
*
* @return The ticket cache file path, or null if not set.
*/
public String getTicketCacheFile() {
return f0;
} | 3.26 |
hadoop_KerberosAuthException_getPrincipal_rdh | /**
*
* @return The principal, or null if not set.
*/
public String getPrincipal() {
return principal;
} | 3.26 |
hadoop_KerberosAuthException_getKeytabFile_rdh | /**
*
* @return The keytab file path, or null if not set.
*/
public String getKeytabFile() {
return keytabFile;
} | 3.26 |
hadoop_KerberosAuthException_getUser_rdh | /**
*
* @return The user, or null if not set.
*/
public String getUser() {
return user;
} | 3.26 |
hadoop_XDR_verifyLength_rdh | /**
* check if the rest of data has more than len bytes.
*
* @param xdr
* XDR message
* @param len
* minimum remaining length
* @return specify remaining length is enough or not
*/
public static boolean verifyLength(XDR xdr, int len) {
return xdr.f0.remaining() >= len;
} | 3.26 |
hadoop_XDR_writeMessageUdp_rdh | /**
* Write an XDR message to a UDP ChannelBuffer.
*
* @param response
* XDR response
* @return UDP buffer
*/
public static ByteBuf writeMessageUdp(XDR response) {
Preconditions.checkState(response.state == XDR.State.READING);
// TODO: Investigate whether making a copy of the buffer is necessary.
return Unpooled.copiedBuffer(response.f0);
} | 3.26 |
hadoop_SysInfoWindows_getCpuFrequency_rdh | /**
* {@inheritDoc }
*/
@Override
public long getCpuFrequency() {
refreshIfNeeded();
return cpuFrequencyKhz;
} | 3.26 |
hadoop_SysInfoWindows_getAvailableVirtualMemorySize_rdh | /**
* {@inheritDoc }
*/
@Override
public long getAvailableVirtualMemorySize() {
refreshIfNeeded();
return vmemAvailable;
} | 3.26 |
hadoop_SysInfoWindows_getPhysicalMemorySize_rdh | /**
* {@inheritDoc }
*/
@Override
public long getPhysicalMemorySize() {
refreshIfNeeded();
return memSize;
} | 3.26 |
hadoop_SysInfoWindows_getNetworkBytesWritten_rdh | /**
* {@inheritDoc }
*/
@Override
public long getNetworkBytesWritten() {
refreshIfNeeded();
return netBytesWritten;
} | 3.26 |
hadoop_SysInfoWindows_getVirtualMemorySize_rdh | /**
* {@inheritDoc }
*/
@Override
public long getVirtualMemorySize() {
refreshIfNeeded();
return vmemSize;
} | 3.26 |
hadoop_SysInfoWindows_getNetworkBytesRead_rdh | /**
* {@inheritDoc }
*/
@Override
public long getNetworkBytesRead() {
refreshIfNeeded();
return netBytesRead;
} | 3.26 |
hadoop_SysInfoWindows_getCpuUsagePercentage_rdh | /**
* {@inheritDoc }
*/@Override
public synchronized float getCpuUsagePercentage() {
refreshIfNeeded();
float ret = cpuUsage;
if (ret != (-1)) {
ret = ret / numProcessors;
}
return ret;
} | 3.26 |
hadoop_SysInfoWindows_m1_rdh | /**
* {@inheritDoc }
*/
@Override
public long m1()
{
refreshIfNeeded();return cumulativeCpuTimeMs;
} | 3.26 |
hadoop_SysInfoWindows_getNumProcessors_rdh | /**
* {@inheritDoc }
*/
@Override
public synchronized int getNumProcessors() {
refreshIfNeeded();
return numProcessors;
}
/**
* {@inheritDoc } | 3.26 |
hadoop_SysInfoWindows_getAvailablePhysicalMemorySize_rdh | /**
* {@inheritDoc }
*/
@Override
public long getAvailablePhysicalMemorySize() {
refreshIfNeeded();
return memAvailable;
} | 3.26 |
hadoop_SysInfoWindows_getNumVCoresUsed_rdh | /**
* {@inheritDoc }
*/
@Overridepublic synchronized float getNumVCoresUsed() {
refreshIfNeeded();
float ret = cpuUsage;
if (ret != (-1)) {
ret = ret / 100.0F;
}
return ret;
} | 3.26 |
hadoop_LocalCacheCleaner_cleanCache_rdh | /**
* Delete resources from the cache in the sorted order generated by the
* Comparator used to construct this class.
*
* @return stats about what was cleaned up during this call of cleanCache
*/
public LocalCacheCleanerStats cleanCache() {
LocalCacheCleanerStats stats = new LocalCacheCleanerStats(currentSize);
for (Iterator<Map.Entry<LocalizedResource, LocalResourcesTracker>> i = resourceMap.entrySet().iterator(); ((currentSize - stats.totalDelSize) > targetSize) && i.hasNext();) {
Map.Entry<LocalizedResource, LocalResourcesTracker> rsrc = i.next();
LocalizedResource resource = rsrc.getKey();
LocalResourcesTracker tracker = rsrc.getValue();
if (tracker.remove(resource, delService)) {
stats.incDelSize(tracker.getUser(), resource.getSize());
}
}
this.resourceMap.clear();
return stats;
} | 3.26 |
hadoop_LocalCacheCleaner_addResources_rdh | /**
* Adds resources from the passed LocalResourceTracker that are candidates for
* deletion from the cache.
*
* @param newTracker
* add all resources being tracked by the passed
* LocalResourcesTracker to the LocalCacheCleaner.
*/
public void addResources(LocalResourcesTracker newTracker) {
for (LocalizedResource resource : newTracker) { currentSize += resource.getSize();
if (resource.getRefCount() > 0) {
// Do not delete resources that are still in use
continue;
}
resourceMap.put(resource, newTracker);
}
} | 3.26 |
hadoop_BufferedIOStatisticsOutputStream_hsync_rdh | /**
* If the inner stream is Syncable, flush the buffer and then
* invoke the inner stream's hsync() operation.
*
* Otherwise: throw an exception, unless the stream was constructed with
* {@link #downgradeSyncable} set to true, in which case the stream
* is just flushed.
*
* @throws IOException
* IO Problem
* @throws UnsupportedOperationException
* if the inner class is not syncable
*/
@Override
public void hsync() throws IOException {
if
(out instanceof Syncable) {
flush();
((Syncable) (out)).hsync();
} else if (!downgradeSyncable) {
throw
new UnsupportedOperationException("hsync not supported by " + out);
} else {
flush();
}
} | 3.26 |
hadoop_BufferedIOStatisticsOutputStream_m0_rdh | /**
* If the inner stream is Syncable, flush the buffer and then
* invoke the inner stream's hflush() operation.
*
* Otherwise: throw an exception, unless the stream was constructed with
* {@link #downgradeSyncable} set to true, in which case the stream
* is just flushed.
*
* @throws IOException
* IO Problem
* @throws UnsupportedOperationException
* if the inner class is not syncable
*/
@Override
public void m0() throws IOException {
if (out instanceof Syncable) {
flush();
((Syncable) (out)).hflush();
} else if (!downgradeSyncable) {
throw new UnsupportedOperationException("hflush not supported by " + out);
} else {
flush();
}
} | 3.26 |
hadoop_BufferedIOStatisticsOutputStream_hasCapability_rdh | /**
* If the inner stream supports {@link StreamCapabilities},
* forward the probe to it.
* Otherwise: return false.
*
* @param capability
* string to query the stream support for.
* @return true if a capability is known to be supported.
*/
@Override
public boolean hasCapability(final String capability) {
if (out instanceof StreamCapabilities) {
return ((StreamCapabilities) (out)).hasCapability(capability);
} else {
return false;
}} | 3.26 |
hadoop_MySQLDBRecordReader_executeQuery_rdh | // Execute statements for mysql in unbuffered mode.
protected ResultSet executeQuery(String query) throws SQLException {
statement = getConnection().prepareStatement(query, ResultSet.TYPE_FORWARD_ONLY, ResultSet.CONCUR_READ_ONLY);statement.setFetchSize(Integer.MIN_VALUE);// MySQL: read row-at-a-time.
return statement.executeQuery();
} | 3.26 |
hadoop_SQLDelegationTokenSecretManager_updateToken_rdh | /**
* Updates the TokenInformation of an existing TokenIdentifier in
* the SQL database.
*
* @param ident
* Existing TokenIdentifier in the SQL database.
* @param tokenInfo
* Updated DelegationTokenInformation associated with the TokenIdentifier.
*/
@Override
protected void updateToken(TokenIdent ident, DelegationTokenInformation tokenInfo) throws IOException {
try (ByteArrayOutputStream bos = new ByteArrayOutputStream()) {
try (DataOutputStream dos = new DataOutputStream(bos)) {
tokenInfo.write(dos);
// Update token in SQL database
updateToken(ident.getSequenceNumber(), ident.getBytes(), bos.toByteArray());
// Update token in local cache
super.updateToken(ident, tokenInfo);
}
} catch (SQLException e) {
throw new IOException("Failed to update token in SQL secret manager", e);
}
} | 3.26 |
hadoop_SQLDelegationTokenSecretManager_storeToken_rdh | /**
* Persists a TokenIdentifier and its corresponding TokenInformation into
* the SQL database. The TokenIdentifier is expected to be unique and any
* duplicate token attempts will result in an IOException.
*
* @param ident
* TokenIdentifier to persist.
* @param tokenInfo
* DelegationTokenInformation associated with the TokenIdentifier.
*/
@Override
protected void storeToken(TokenIdent ident, DelegationTokenInformation tokenInfo) throws IOException {
try (ByteArrayOutputStream bos = new ByteArrayOutputStream();DataOutputStream dos = new DataOutputStream(bos)) {
tokenInfo.write(dos);
// Add token to SQL database
m1(ident.getSequenceNumber(), ident.getBytes(), bos.toByteArray());
// Add token to local cache
super.storeToken(ident, tokenInfo);
} catch (SQLException e) {
throw new IOException("Failed to store token in SQL secret manager", e);
}} | 3.26 |
hadoop_SQLDelegationTokenSecretManager_storeDelegationKey_rdh | /**
* Persists a DelegationKey into the SQL database. The delegation keyId
* is expected to be unique and any duplicate key attempts will result
* in an IOException.
*
* @param key
* DelegationKey to persist into the SQL database.
*/
@Override
protected void storeDelegationKey(DelegationKey key) throws IOException {
try (ByteArrayOutputStream bos = new ByteArrayOutputStream();DataOutputStream v22 = new DataOutputStream(bos)) {
key.write(v22);
// Add delegation key to SQL database
insertDelegationKey(key.getKeyId(), bos.toByteArray());
// Add delegation key to local cache
super.storeDelegationKey(key);
} catch (SQLException e) {throw new IOException("Failed to store delegation key in SQL secret manager", e);
}
} | 3.26 |
hadoop_SQLDelegationTokenSecretManager_setDelegationTokenSeqNum_rdh | /**
* Updates the value of the last reserved sequence number.
*
* @param seqNum
* Value to update the sequence number to.
*/
@Override
public void setDelegationTokenSeqNum(int seqNum) {
try {
updateSequenceNum(seqNum);
} catch (SQLException e) {
throw new RuntimeException("Failed to update token sequence number in SQL secret manager", e);
}
} | 3.26 |
hadoop_SQLDelegationTokenSecretManager_m0_rdh | /**
* Updates an existing DelegationKey in the SQL database.
*
* @param key
* Updated DelegationKey.
*/
@Override
protected void m0(DelegationKey key) throws IOException {
try (ByteArrayOutputStream bos = new ByteArrayOutputStream();DataOutputStream dos = new DataOutputStream(bos)) {
key.write(dos);
// Update delegation key in SQL database
updateDelegationKey(key.getKeyId(), bos.toByteArray());
// Update delegation key in local cache
super.updateDelegationKey(key);
} catch (SQLException e) {
throw new IOException("Failed to update delegation key in SQL secret manager", e);
}
} | 3.26 |
hadoop_SQLDelegationTokenSecretManager_incrementCurrentKeyId_rdh | /**
* Obtains the next available delegation key id that can be allocated to a DelegationKey.
* Delegation key id need to be reserved using the shared delegationKeyIdCounter,
* which handles keyId allocation concurrently with other secret managers.
*
* @return Next available delegation key id.
*/
@Override
public int incrementCurrentKeyId() {
try { return incrementKeyId(1) + 1;
} catch (SQLException e) {
throw new RuntimeException("Failed to increment delegation key id in SQL secret manager", e);
}
} | 3.26 |
hadoop_SQLDelegationTokenSecretManager_getDelegationTokenSeqNum_rdh | /**
* Obtains the value of the last reserved sequence number.
*
* @return Last reserved sequence number.
*/
@Override
public int getDelegationTokenSeqNum() {
try {
return selectSequenceNum();
} catch (SQLException e) {
throw new RuntimeException("Failed to get token sequence number in SQL secret manager", e);
}
} | 3.26 |
hadoop_SQLDelegationTokenSecretManager_cancelToken_rdh | /**
* Cancels a token by removing it from the SQL database. This will
* call the corresponding method in {@link AbstractDelegationTokenSecretManager}
* to perform validation and remove the token from the cache.
*
* @return Identifier of the canceled token
*/
@Override
public synchronized TokenIdent cancelToken(Token<TokenIdent> token, String canceller) throws IOException {
TokenIdent v6 = createTokenIdent(token.getIdentifier());
// Calling getTokenInfo to load token into local cache if not present.
// super.cancelToken() requires token to be present in local cache.
getTokenInfo(v6);
return super.cancelToken(token, canceller);
} | 3.26 |
hadoop_SQLDelegationTokenSecretManager_incrementDelegationTokenSeqNum_rdh | /**
* Obtains the next available sequence number that can be allocated to a Token.
* Sequence numbers need to be reserved using the shared sequenceNumberCounter once
* the local batch has been exhausted, which handles sequenceNumber allocation
* concurrently with other secret managers.
* This method ensures that sequence numbers are incremental in a single secret manager,
* but not across secret managers.
*
* @return Next available sequence number.
*/
@Override
public synchronized int incrementDelegationTokenSeqNum() {if (currentSeqNum >= currentMaxSeqNum) {
try {
// Request a new batch of sequence numbers and use the
// lowest one available.
currentSeqNum = incrementSequenceNum(seqNumBatchSize);
currentMaxSeqNum = currentSeqNum + seqNumBatchSize;
} catch (SQLException e) {
throw new RuntimeException("Failed to increment token sequence number in SQL secret manager", e);
}
}
return ++currentSeqNum;
} | 3.26 |
hadoop_SQLDelegationTokenSecretManager_removeStoredMasterKey_rdh | /**
* Removes the existing DelegationKey from the SQL database to
* invalidate it.
*
* @param key
* DelegationKey to remove from the SQL database.
*/
@Override
protected void removeStoredMasterKey(DelegationKey key)
{
try
{
deleteDelegationKey(key.getKeyId());
} catch (SQLException e) {
LOG.warn("Failed to remove delegation key in SQL secret manager", e);
}
} | 3.26 |
hadoop_SQLDelegationTokenSecretManager_setCurrentKeyId_rdh | /**
* Updates the value of the last delegation key id.
*
* @param keyId
* Value to update the delegation key id to.
*/
@Override
public void setCurrentKeyId(int keyId) {
try {
updateKeyId(keyId);
} catch (SQLException e) {
throw new RuntimeException("Failed to set delegation key id in SQL secret manager", e);
}
} | 3.26 |
hadoop_SQLDelegationTokenSecretManager_getCurrentKeyId_rdh | /**
* Obtains the value of the last delegation key id.
*
* @return Last delegation key id.
*/
@Override
public int getCurrentKeyId() {
try {
return selectKeyId();
} catch (SQLException e) {
throw new RuntimeException("Failed to get delegation key id in SQL secret manager", e);
}
} | 3.26 |
hadoop_MySQLDataDrivenDBRecordReader_m0_rdh | // Execute statements for mysql in unbuffered mode.
protected ResultSet m0(String query) throws SQLException {
statement = getConnection().prepareStatement(query, ResultSet.TYPE_FORWARD_ONLY, ResultSet.CONCUR_READ_ONLY);
statement.setFetchSize(Integer.MIN_VALUE);// MySQL: read row-at-a-time.
return statement.executeQuery();
} | 3.26 |
hadoop_S3ARemoteInputStream_setInputPolicy_rdh | /**
* Set/update the input policy of the stream.
* This updates the stream statistics.
*
* @param inputPolicy
* new input policy.
*/
private void setInputPolicy(S3AInputPolicy inputPolicy) {
this.inputPolicy = inputPolicy;
streamStatistics.inputPolicySet(inputPolicy.ordinal());
} | 3.26 |
hadoop_S3ARemoteInputStream_getIOStatistics_rdh | /**
* Gets the internal IO statistics.
*
* @return the internal IO statistics.
*/
@Override
public IOStatistics getIOStatistics() {
return ioStatistics;
} | 3.26 |
hadoop_S3ARemoteInputStream_mark_rdh | // Unsupported functions.
@Override
public void mark(int readlimit) {
throw new UnsupportedOperationException("mark not supported");
} | 3.26 |
hadoop_S3ARemoteInputStream_getPos_rdh | /**
* Gets the current position.
*
* @return the current position.
* @throws IOException
* if there is an IO error during this operation.
*/
public long getPos() throws IOException {
throwIfClosed();
return nextReadPos;
} | 3.26 |
hadoop_S3ARemoteInputStream_available_rdh | /**
* Returns the number of bytes that can read from this stream without blocking.
*/@Override
public int available() throws IOException {
throwIfClosed();// Update the current position in the current buffer, if possible.
if (!fpos.setAbsolute(nextReadPos)) {
return 0;
}
return fpos.buffer().remaining();
} | 3.26 |
hadoop_S3ARemoteInputStream_hasCapability_rdh | /**
* Indicates whether the given {@code capability} is supported by this stream.
*
* @param capability
* the capability to check.
* @return true if the given {@code capability} is supported by this stream, false otherwise.
*/
@Override
public boolean hasCapability(String capability) {
return capability.equalsIgnoreCase(StreamCapabilities.IOSTATISTICS) ||
capability.equalsIgnoreCase(StreamCapabilities.READAHEAD);
} | 3.26 |
hadoop_S3ARemoteInputStream_seek_rdh | /**
* Moves the current read position so that the next read will occur at {@code pos}.
*
* @param pos
* the absolute position to seek to.
* @throws IOException
* if there an error during this operation.
* @throws IllegalArgumentException
* if pos is outside of the range [0, file size].
*/
public void seek(long pos) throws IOException {
throwIfClosed();
m0(pos);
nextReadPos = pos;
} | 3.26 |
hadoop_S3ARemoteInputStream_read_rdh | /**
* Reads up to {@code len} bytes from this stream and copies them into
* the given {@code buffer} starting at the given {@code offset}.
* Returns the number of bytes actually copied in to the given buffer.
*
* @param buffer
* the buffer to copy data into.
* @param offset
* data is copied starting at this offset.
* @param len
* max number of bytes to copy.
* @return the number of bytes actually copied in to the given buffer.
* @throws IOException
* if there is an IO error during this operation.
*/
@Override
public int read(byte[] buffer, int offset, int len) throws IOException {throwIfClosed();
if (len == 0) {return 0;
}
if
((remoteObject.size() == 0) || (nextReadPos >= remoteObject.size())) {
return -1;
}
if (!ensureCurrentBuffer()) {
return -1;
}
int numBytesRead = 0;
int numBytesRemaining = len;
while (numBytesRemaining > 0) {
if (!ensureCurrentBuffer()) {
break;}
ByteBuffer buf = fpos.buffer();
int bytesToRead = Math.min(numBytesRemaining, buf.remaining());
buf.get(buffer, offset, bytesToRead);
nextReadPos += bytesToRead;
incrementBytesRead(bytesToRead);
offset += bytesToRead;
numBytesRemaining -= bytesToRead;
numBytesRead += bytesToRead;
}
return
numBytesRead;
} | 3.26 |
hadoop_S3ARemoteInputStream_setReadahead_rdh | /**
* Sets the number of bytes to read ahead each time.
*
* @param readahead
* the number of bytes to read ahead each time..
*/
@Override
public synchronized void setReadahead(Long readahead) {
// We support read head by prefetching therefore we ignore the supplied value.
if (readahead != null) {
Validate.checkNotNegative(readahead, "readahead");
}
} | 3.26 |
hadoop_S3ARemoteInputStream_close_rdh | /**
* Closes this stream and releases all acquired resources.
*
* @throws IOException
* if there is an IO error during this operation.
*/
@Override
public void close() throws IOException {
if (f0) {
return;
}
f0 = true;
blockData = null;
reader.close();
reader = null;
remoteObject = null;
fpos.invalidate();
try {
client.close();
} finally {
streamStatistics.close();
}
client = null;
} | 3.26 |
hadoop_S3ARemoteInputStream_getS3AStreamStatistics_rdh | /**
* Access the input stream statistics.
* This is for internal testing and may be removed without warning.
*
* @return the statistics for this input stream
*/
@InterfaceAudience.Private
@InterfaceStability.Unstable
public S3AInputStreamStatistics getS3AStreamStatistics() {
return streamStatistics;
} | 3.26 |
hadoop_DynamicIOStatistics_addMinimumFunction_rdh | /**
* add a mapping of a key to a minimum function.
*
* @param key
* the key
* @param eval
* the evaluator
*/
void addMinimumFunction(String key,
Function<String, Long> eval) {
minimums.addFunction(key, eval);
} | 3.26 |
hadoop_DynamicIOStatistics_addMaximumFunction_rdh | /**
* add a mapping of a key to a maximum function.
*
* @param key
* the key
* @param eval
* the evaluator
*/
void addMaximumFunction(String key, Function<String, Long> eval) {
maximums.addFunction(key, eval);
} | 3.26 |
hadoop_DynamicIOStatistics_addMeanStatisticFunction_rdh | /**
* add a mapping of a key to a meanStatistic function.
*
* @param key
* the key
* @param eval
* the evaluator
*/
void addMeanStatisticFunction(String key, Function<String, MeanStatistic> eval) {
meanStatistics.addFunction(key, eval);
} | 3.26 |
hadoop_DynamicIOStatistics_addGaugeFunction_rdh | /**
* add a mapping of a key to a gauge function.
*
* @param key
* the key
* @param eval
* the evaluator
*/
void addGaugeFunction(String key, Function<String, Long> eval) {
gauges.addFunction(key, eval);
} | 3.26 |
hadoop_DynamicIOStatistics_addCounterFunction_rdh | /**
* add a mapping of a key to a counter function.
*
* @param key
* the key
* @param eval
* the evaluator
*/
void addCounterFunction(String key, Function<String, Long> eval) {
counters.addFunction(key, eval);
} | 3.26 |
hadoop_WrappedIOStatistics_getWrapped_rdh | /**
* Get at the wrapped inner statistics.
*
* @return the wrapped value
*/
protected IOStatistics
getWrapped() {
return wrapped;
} | 3.26 |
hadoop_WrappedIOStatistics_toString_rdh | /**
* Return the statistics dump of the wrapped statistics.
*
* @return the statistics for logging.
*/
@Override
public String toString() {
return ioStatisticsToString(wrapped);
} | 3.26 |
hadoop_WrappedIOStatistics_setWrapped_rdh | /**
* Set the wrapped statistics.
* Will fail if the field is already set.
*
* @param wrapped
* new value
*/protected void setWrapped(final IOStatistics wrapped) {
Preconditions.checkState(this.wrapped == null, "Attempted to overwrite existing wrapped statistics");
this.wrapped = wrapped;
} | 3.26 |
hadoop_WebServiceClient_createSSLFactory_rdh | /**
* Start SSL factory.
*
* @param conf
* configuration.
* @return SSL factory.
* @throws Exception
* exception occur.
*/
private static SSLFactory createSSLFactory(Configuration conf) throws Exception {
sslFactory = new SSLFactory(Mode.CLIENT, conf);
sslFactory.init();
return sslFactory;
} | 3.26 |
hadoop_WebServiceClient_initialize_rdh | /**
* Construct a new WebServiceClient based on the configuration. It will try to
* load SSL certificates when it is specified.
*
* @param conf
* configuration.
* @throws Exception
* exception occur.
*/ public static void initialize(Configuration conf) throws
Exception {
if (instance == null) {
synchronized(WebServiceClient.class)
{
if (instance == null) {
isHttps = YarnConfiguration.useHttps(conf);
if (isHttps) {
createSSLFactory(conf);
}
instance = new WebServiceClient();
}
}
}
} | 3.26 |
hadoop_WebServiceClient_createClient_rdh | /**
* Create a client based on http conf.
*
* @return Client
*/
public Client createClient() {
return new Client(new URLConnectionClientHandler(getHttpURLConnectionFactory()));
} | 3.26 |
hadoop_CachedDNSToSwitchMapping_cacheResolvedHosts_rdh | /**
* Caches the resolved host:rack mappings. The two list
* parameters must be of equal size.
*
* @param uncachedHosts
* a list of hosts that were uncached
* @param resolvedHosts
* a list of resolved host entries where the element
* at index(i) is the resolved value for the entry in uncachedHosts[i]
*/
private void cacheResolvedHosts(List<String> uncachedHosts, List<String> resolvedHosts) {
// Cache the result
if (resolvedHosts != null)
{
for (int i = 0; i < uncachedHosts.size(); i++) {
cache.put(uncachedHosts.get(i), resolvedHosts.get(i));
}
}
} | 3.26 |
hadoop_CachedDNSToSwitchMapping_getSwitchMap_rdh | /**
* Get the (host x switch) map.
*
* @return a copy of the cached map of hosts to rack
*/
@Override
public Map<String, String> getSwitchMap() {
return new HashMap<>(cache);
} | 3.26 |
hadoop_CachedDNSToSwitchMapping_isSingleSwitch_rdh | /**
* Delegate the switch topology query to the raw mapping, via
* {@link AbstractDNSToSwitchMapping#isMappingSingleSwitch(DNSToSwitchMapping)}
*
* @return true iff the raw mapper is considered single-switch.
*/
@Override
public boolean isSingleSwitch() {
return isMappingSingleSwitch(rawMapping);
} | 3.26 |
hadoop_BlockMovementAttemptFinished_getTargetDatanode_rdh | /**
*
* @return the target datanode where it moved the block.
*/
public DatanodeInfo getTargetDatanode() {
return target;
} | 3.26 |
hadoop_BlockMovementAttemptFinished_getTargetType_rdh | /**
*
* @return target storage type.
*/
public StorageType getTargetType() {
return targetType;
} | 3.26 |
hadoop_BlockMovementAttemptFinished_getStatus_rdh | /**
*
* @return block movement status code.
*/
public BlockMovementStatus getStatus() {
return status;
} | 3.26 |
hadoop_DataNodeVolumeMetrics_getDataFileIoSampleCount_rdh | // Based on dataFileIoRate
public long getDataFileIoSampleCount() {
return dataFileIoRate.lastStat().numSamples();
} | 3.26 |
hadoop_DataNodeVolumeMetrics_getNativeCopyIoSampleCount_rdh | // Based on nativeCopyIoRate
public long getNativeCopyIoSampleCount() {
return nativeCopyIoRate.lastStat().numSamples();} | 3.26 |
hadoop_DataNodeVolumeMetrics_getReadIoSampleCount_rdh | // Based on readIoRate
public long getReadIoSampleCount() {
return
f1.lastStat().numSamples();
} | 3.26 |
hadoop_DataNodeVolumeMetrics_getTransferIoSampleCount_rdh | // Based on transferIoRate
public long getTransferIoSampleCount() {
return transferIoRate.lastStat().numSamples();
} | 3.26 |
hadoop_DataNodeVolumeMetrics_getWriteIoSampleCount_rdh | // Based on writeIoRate
public long getWriteIoSampleCount() {
return writeIoRate.lastStat().numSamples();
} | 3.26 |
hadoop_DataNodeVolumeMetrics_getFlushIoSampleCount_rdh | // Based on flushIoRate
public long getFlushIoSampleCount() {
return flushIoRate.lastStat().numSamples();
} | 3.26 |
hadoop_DataNodeVolumeMetrics_getMetadataOperationSampleCount_rdh | // Based on metadataOperationRate
public long getMetadataOperationSampleCount() {
return
metadataOperationRate.lastStat().numSamples();
} | 3.26 |
hadoop_DataNodeVolumeMetrics_getSyncIoSampleCount_rdh | // Based on syncIoRate
public long getSyncIoSampleCount() {
return syncIoRate.lastStat().numSamples();
} | 3.26 |
hadoop_TaskRuntimeEstimator_hasStagnatedProgress_rdh | /**
* Returns true if the estimator has no updates records for a threshold time
* window. This helps to identify task attempts that are stalled at the
* beginning of execution.
*
* @param id
* the {@link TaskAttemptId} of the attempt we are asking about
* @param timeStamp
* the time of the report we compare with
* @return true if the task attempt has no progress for a given time window
*/
default boolean hasStagnatedProgress(TaskAttemptId id, long timeStamp) {
return
false;
} | 3.26 |
hadoop_SpillCallBackInjector_getAndSet_rdh | /**
* Sets the global SpillFilesCBInjector to the new value, returning the old
* value.
*
* @param spillInjector
* the new implementation for the spill injector.
* @return the previous implementation.
*/
public static SpillCallBackInjector getAndSet(SpillCallBackInjector spillInjector) {
SpillCallBackInjector prev = instance;
instance = spillInjector;return prev;} | 3.26 |
hadoop_BCFile_getAPIVersion_rdh | /**
* Get version of BCFile API.
*
* @return version of BCFile API.
*/
public Version getAPIVersion() {
return API_VERSION;
} | 3.26 |
hadoop_BCFile_getStartPos_rdh | /**
* Get the starting position of the block in the file.
*
* @return the starting position of the block in the file.
*/
public long getStartPos() {
return rBlkState.getBlockRegion().getOffset();
} | 3.26 |
hadoop_BCFile_getCurrentPos_rdh | /**
* Get the current position in file.
*
* @return The current byte offset in underlying file.
* @throws IOException
*/
long getCurrentPos() throws IOException {
return fsOut.getPos() + fsBufferedOutput.size();
} | 3.26 |
hadoop_BCFile_m0_rdh | /**
* Stream access to a Meta Block.
*
* @param name
* meta block name
* @return BlockReader input stream for reading the meta block.
* @throws IOException
* @throws MetaBlockDoesNotExist
* The Meta Block with the given name does not exist.
*/
public BlockReader m0(String name) throws IOException, MetaBlockDoesNotExist {
MetaIndexEntry imeBCIndex = metaIndex.getMetaByName(name);
if (imeBCIndex == null) {
throw new MetaBlockDoesNotExist("name=" + name);
}
BlockRegion region = imeBCIndex.getRegion();
return createReader(imeBCIndex.getCompressionAlgorithm(), region);
} | 3.26 |
hadoop_BCFile_getRawSize_rdh | /**
* Get the uncompressed size of the block.
*
* @return uncompressed size of the block.
*/
public long getRawSize() {
return rBlkState.getBlockRegion().getRawSize();
} | 3.26 |
hadoop_BCFile_finish_rdh | /**
* Finishing up the current block.
*/
public void finish() throws IOException {
try {
if (out != null) {
out.flush();
out = null;
}
} finally {
compressAlgo.returnCompressor(compressor);
compressor = null;
}
} | 3.26 |
hadoop_BCFile_getBlockCount_rdh | /**
* Get the number of data blocks.
*
* @return the number of data blocks.
*/
public int getBlockCount() {
return dataIndex.getBlockRegionList().size();
} | 3.26 |
hadoop_BCFile_prepareMetaBlock_rdh | /**
* Create a Meta Block and obtain an output stream for adding data into the
* block. The Meta Block will be compressed with the same compression
* algorithm as data blocks. There can only be one BlockAppender stream
* active at any time. Regular Blocks may not be created after the first
* Meta Blocks. The caller must call BlockAppender.close() to conclude the
* block creation.
*
* @param name
* The name of the Meta Block. The name must not conflict with
* existing Meta Blocks.
* @return The BlockAppender stream
* @throws MetaBlockAlreadyExists
* If the meta block with the name already exists.
* @throws IOException
*/
public BlockAppender prepareMetaBlock(String name) throws IOException, MetaBlockAlreadyExists {
return prepareMetaBlock(name, getDefaultCompressionAlgorithm());} | 3.26 |
hadoop_BCFile_getDataBlock_rdh | /**
* Stream access to a Data Block.
*
* @param blockIndex
* 0-based data block index.
* @return BlockReader input stream for reading the data block.
* @throws IOException
*/
public BlockReader getDataBlock(int blockIndex) throws IOException {
if ((blockIndex < 0) || (blockIndex
>= getBlockCount())) {
throw new IndexOutOfBoundsException(String.format("blockIndex=%d, numBlocks=%d", blockIndex, getBlockCount()));
}
BlockRegion region = dataIndex.getBlockRegionList().get(blockIndex);
return createReader(dataIndex.getDefaultCompressionAlgorithm(), region);
} | 3.26 |
hadoop_BCFile_getInputStream_rdh | /**
* Get the output stream for BlockAppender's consumption.
*
* @return the output stream suitable for writing block data.
*/
public InputStream getInputStream() {
return in;
} | 3.26 |
hadoop_BCFile_prepareDataBlock_rdh | /**
* Create a Data Block and obtain an output stream for adding data into the
* block. There can only be one BlockAppender stream active at any time.
* Data Blocks may not be created after the first Meta Blocks. The caller
* must call BlockAppender.close() to conclude the block creation.
*
* @return The BlockAppender stream
* @throws IOException
*/
public BlockAppender prepareDataBlock() throws IOException {
if (blkInProgress == true) {
throw new IllegalStateException("Cannot create Data Block until previous block is closed.");
}
if (metaBlkSeen == true) {
throw new IllegalStateException("Cannot create Data Block after Meta Blocks.");
}
DataBlockRegister dbr = new DataBlockRegister();WBlockState wbs = new WBlockState(getDefaultCompressionAlgorithm(), out, fsOutputBuffer, conf);
BlockAppender ba = new BlockAppender(dbr, wbs);
blkInProgress = true;
return ba;
} | 3.26 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.