name
stringlengths 12
178
| code_snippet
stringlengths 8
36.5k
| score
float64 3.26
3.68
|
---|---|---|
hadoop_TypedBytesInput_readRawBytes_rdh | /**
* Reads the raw bytes following a <code>Type.BYTES</code> code.
*
* @return the obtained bytes sequence
* @throws IOException
*/
public byte[] readRawBytes() throws IOException {
return readRawBytes(BYTES.code);
} | 3.26 |
hadoop_TypedBytesInput_m0_rdh | /**
* Reads the raw bytes following a <code>Type.BOOL</code> code.
*
* @return the obtained bytes sequence
* @throws IOException
*/
public byte[] m0() throws IOException {byte[] bytes = new byte[2];
bytes[0] = ((byte) (BOOL.code));
in.readFully(bytes, 1, 1);
return bytes;
} | 3.26 |
hadoop_TypedBytesInput_readRawDouble_rdh | /**
* Reads the raw bytes following a <code>Type.DOUBLE</code> code.
*
* @return the obtained bytes sequence
* @throws IOException
*/
public byte[] readRawDouble() throws
IOException {
byte[] bytes = new byte[9];
bytes[0] = ((byte) (DOUBLE.code));
in.readFully(bytes, 1, 8);
return
bytes;
} | 3.26 |
hadoop_TypedBytesInput_skipType_rdh | /**
* Skips a type byte.
*
* @return true iff the end of the file was not reached
* @throws IOException
*/
public boolean skipType() throws IOException {
try {
in.readByte();
return true;
} catch (EOFException eof) {
return false;
}
} | 3.26 |
hadoop_TypedBytesInput_readByte_rdh | /**
* Reads the byte following a <code>Type.BYTE</code> code.
*
* @return the obtained byte
* @throws IOException
*/
public byte readByte() throws IOException {
return in.readByte();
} | 3.26 |
hadoop_LimitInputStream_mark_rdh | // it's okay to mark even if mark isn't supported, as reset won't work
@Override
public synchronized void mark(int readLimit) {
in.mark(readLimit);
mark = left;
} | 3.26 |
hadoop_FederationMethodWrapper_getTypes_rdh | /**
* Get the calling types for this method.
*
* @return An array of calling types.
*/
public Class<?>[] getTypes() {
return Arrays.copyOf(this.types, this.types.length);
} | 3.26 |
hadoop_HsNavBlock_render_rdh | /* (non-Javadoc)
@see org.apache.hadoop.yarn.webapp.view.HtmlBlock#render(org.apache.hadoop.yarn.webapp.view.HtmlBlock.Block)
*/
@Override
protected void render(Block html) {
DIV<Hamlet> nav = html.div("#nav").h3("Application").ul().li().a(url("about"), "About").__().li().a(url("app"), "Jobs").__().__();
if (app.getJob() != null) {
String jobid = MRApps.toString(app.getJob().getID());
nav.h3("Job").ul().li().a(url("job", jobid), "Overview").__().li().a(url("jobcounters",
jobid), "Counters").__().li().a(url("conf", jobid), "Configuration").__().li().a(url("tasks", jobid, "m"), "Map tasks").__().li().a(url("tasks", jobid, "r"), "Reduce tasks").__().__();
if (app.getTask() != null) {
String taskid = MRApps.toString(app.getTask().getID());
nav.h3("Task").ul().li().a(url("task", taskid), "Task Overview").__().li().a(url("taskcounters", taskid), "Counters").__().__();
}
}
Hamlet.UL<DIV<Hamlet>> tools = WebPageUtils.appendToolSection(nav, conf);
if (tools != null) {
tools.__().__();
}} | 3.26 |
hadoop_SimpleBufferedOutputStream_size_rdh | // Get the size of internal buffer being used.
public int size() {
return count;
} | 3.26 |
hadoop_CrcComposer_newCrcComposer_rdh | /**
* Returns a CrcComposer which will collapse all ingested CRCs into a single
* value.
*
* @param type
* type.
* @param bytesPerCrcHint
* bytesPerCrcHint.
* @throws IOException
* raised on errors performing I/O.
* @return a CrcComposer which will collapse all ingested CRCs into a single value.
*/
public static CrcComposer newCrcComposer(DataChecksum.Type type, long bytesPerCrcHint) throws IOException {
return newStripedCrcComposer(type, bytesPerCrcHint, Long.MAX_VALUE);
} | 3.26 |
hadoop_CrcComposer_digest_rdh | /**
* Returns byte representation of composed CRCs; if no stripeLength was
* specified, the digest should be of length equal to exactly one CRC.
* Otherwise, the number of CRCs in the returned array is equal to the
* total sum bytesPerCrc divided by stripeLength. If the sum of bytesPerCrc
* is not a multiple of stripeLength, then the last CRC in the array
* corresponds to totalLength % stripeLength underlying data bytes.
*
* @return byte representation of composed CRCs.
*/
public byte[] digest() {
if (curPositionInStripe > 0) {
digestOut.write(CrcUtil.intToBytes(curCompositeCrc), 0, CRC_SIZE_BYTES);
curCompositeCrc = 0;
curPositionInStripe = 0;
}
byte[] digestValue
= digestOut.toByteArray();
digestOut.reset();
return digestValue;
} | 3.26 |
hadoop_CrcComposer_update_rdh | /**
* Updates with a single additional CRC which corresponds to an underlying
* data size of {@code bytesPerCrc}.
*
* @param crcB
* crcB.
* @param bytesPerCrc
* bytesPerCrc.
* @throws IOException
* raised on errors performing I/O.
*/public void update(int crcB, long bytesPerCrc) throws IOException {
if (curCompositeCrc == 0) {
curCompositeCrc = crcB;
} else if (bytesPerCrc
== bytesPerCrcHint) {curCompositeCrc = CrcUtil.composeWithMonomial(curCompositeCrc, crcB, f0, crcPolynomial);
} else {
curCompositeCrc = CrcUtil.compose(curCompositeCrc, crcB, bytesPerCrc, crcPolynomial);
}
curPositionInStripe += bytesPerCrc;
if (curPositionInStripe > stripeLength) {
throw new IOException(String.format("Current position in stripe '%d' after advancing by bytesPerCrc '%d' " + "exceeds stripeLength '%d' without stripe alignment.", curPositionInStripe, bytesPerCrc, stripeLength));
} else if (curPositionInStripe == stripeLength) {
// Hit a stripe boundary; flush the curCompositeCrc and reset for next
// stripe.
digestOut.write(CrcUtil.intToBytes(curCompositeCrc), 0, CRC_SIZE_BYTES);
curCompositeCrc
=
0;
curPositionInStripe = 0;
}
} | 3.26 |
hadoop_BlockMovementStatus_getStatusCode_rdh | /**
*
* @return the status code.
*/
int getStatusCode() {
return code;
} | 3.26 |
hadoop_S3ABlockManager_read_rdh | /**
* Reads into the given {@code buffer} {@code size} bytes from the underlying file
* starting at {@code startOffset}.
*
* @param buffer
* the buffer to read data in to.
* @param startOffset
* the offset at which reading starts.
* @param size
* the number bytes to read.
* @return number of bytes read.
*/
@Override
public int read(ByteBuffer buffer, long startOffset, int size) throws IOException {
return reader.read(buffer, startOffset, size);
} | 3.26 |
hadoop_RouterHeartbeatService_updateStateStore_rdh | /**
* Update the state of the Router in the State Store.
*/
@VisibleForTesting
synchronized void updateStateStore() {
String routerId = router.getRouterId();
if (routerId == null) {
LOG.error("Cannot heartbeat for router: unknown router id");
return;
}
if (isStoreAvailable()) {
RouterStore routerStore = router.getRouterStateManager();
try {
RouterState record = RouterState.newInstance(routerId, router.getStartTime(), router.getRouterState());
StateStoreVersion stateStoreVersion = StateStoreVersion.newInstance(getStateStoreVersion(MembershipStore.class), getStateStoreVersion(MountTableStore.class));
record.setStateStoreVersion(stateStoreVersion);
// if admin server not started then hostPort will be empty
String hostPort = StateStoreUtils.getHostPortString(router.getAdminServerAddress());
record.setAdminAddress(hostPort);
RouterHeartbeatRequest request = RouterHeartbeatRequest.newInstance(record);
RouterHeartbeatResponse response = routerStore.routerHeartbeat(request);
if (!response.getStatus()) {
LOG.warn("Cannot heartbeat router {}", routerId);
} else {
LOG.debug("Router heartbeat for router {}", routerId);
}
} catch (IOException e) {
LOG.error("Cannot heartbeat router {}", routerId, e);
}
} else {
LOG.warn("Cannot heartbeat router {}: State Store unavailable", routerId);}
} | 3.26 |
hadoop_RouterHeartbeatService_getStateStoreVersion_rdh | /**
* Get the version of the data in the State Store.
*
* @param clazz
* Class in the State Store.
* @return Version of the data.
*/
private <R extends BaseRecord, S extends RecordStore<R>> long getStateStoreVersion(final Class<S>
clazz) {
long version = -1;
try {
StateStoreService
stateStore = router.getStateStore(); S recordStore = stateStore.getRegisteredRecordStore(clazz);
if (recordStore != null) {
if (recordStore instanceof CachedRecordStore) {
CachedRecordStore<R> cachedRecordStore =
((CachedRecordStore<R>) (recordStore)); List<R> records = cachedRecordStore.getCachedRecords();
for (BaseRecord record : records) {
if (record.getDateModified() > version) {
version = record.getDateModified();
}
}
}
}
} catch (Exception e) {
LOG.error("Cannot get version for {}", clazz, e);
}
return version;} | 3.26 |
hadoop_RouterHeartbeatService_updateStateAsync_rdh | /**
* Trigger the update of the Router state asynchronously.
*/
protected void updateStateAsync() { Thread thread = new Thread(this::updateStateStore, "Router Heartbeat Async");
thread.setDaemon(true);
thread.start();
} | 3.26 |
hadoop_CachedSASToken_getExpiry_rdh | /**
* Parse the sasExpiry from the SAS token. The sasExpiry is the minimum
* of the ske and se parameters. The se parameter is required and the
* ske parameter is optional.
*
* @param token
* an Azure Storage SAS token
* @return the sasExpiry or OffsetDateTime.MIN if invalid.
*/
private static OffsetDateTime getExpiry(String token) {
// return MIN for all invalid input, including a null token
if (token == null) {
return OffsetDateTime.MIN;
}
String signedExpiry = "se=";
int signedExpiryLen = 3;
int start = token.indexOf(signedExpiry); // return MIN if the required se parameter is absent
if (start == (-1)) {
return OffsetDateTime.MIN;
}
start += signedExpiryLen;
// extract the value of se parameter
int
end = token.indexOf("&", start);
String seValue = (end == (-1)) ? token.substring(start) : token.substring(start, end);
try {
seValue =
URLDecoder.decode(seValue, "utf-8");
} catch (UnsupportedEncodingException ex) {
LOG.error("Error decoding se query parameter ({}) from SAS.", seValue, ex);
return OffsetDateTime.MIN;
}
// parse the ISO 8601 date value; return MIN if invalid
OffsetDateTime seDate = OffsetDateTime.MIN;
try {
seDate = OffsetDateTime.parse(seValue, DateTimeFormatter.ISO_DATE_TIME);
} catch (DateTimeParseException
ex) {
LOG.error("Error parsing se query parameter ({}) from SAS.", seValue, ex);
}
String signedKeyExpiry = "ske=";
int signedKeyExpiryLen = 4;
// if ske is present, the sasExpiry is the minimum of ske and se
start = token.indexOf(signedKeyExpiry);
// return seDate if ske is absent
if (start == (-1)) {
return seDate;
}
start += signedKeyExpiryLen;
// extract the value of ske parameter
end = token.indexOf("&", start);
String skeValue = (end == (-1)) ? token.substring(start) : token.substring(start, end);
try {
skeValue = URLDecoder.decode(skeValue, "utf-8");} catch (UnsupportedEncodingException ex) {
LOG.error("Error decoding ske query parameter ({}) from SAS.", skeValue, ex);return OffsetDateTime.MIN;
}
// parse the ISO 8601 date value; return MIN if invalid
OffsetDateTime skeDate = OffsetDateTime.MIN;
try {
skeDate = OffsetDateTime.parse(skeValue, DateTimeFormatter.ISO_DATE_TIME); } catch (DateTimeParseException ex) {
LOG.error("Error parsing ske query parameter ({}) from SAS.", skeValue, ex);
return OffsetDateTime.MIN;
}
return skeDate.isBefore(seDate) ? skeDate : seDate;
} | 3.26 |
hadoop_CachedSASToken_get_rdh | /**
* Gets the token if still valid.
*
* @return the token or null if it is expired or near sasExpiry.
*/
public String get() {// quickly return null if not set
if (sasToken == null) {
return null;
}
String token;
OffsetDateTime exp;
synchronized(this) {
token = sasToken;
exp = sasExpiry;}
boolean isInvalid = isNearExpiry(exp, minExpirationInSeconds);
return isInvalid ? null : token;} | 3.26 |
hadoop_CachedSASToken_update_rdh | /**
* Updates the cached SAS token and expiry. If the token is invalid, the cached value
* is cleared by setting it to null and the expiry to MIN.
*
* @param token
* an Azure Storage SAS token
*/
public void update(String token)
{
// quickly return if token and cached sasToken are the same reference
// Note: use of operator == is intentional
if (token == sasToken) {
return;
}
OffsetDateTime newExpiry = getExpiry(token);
boolean isInvalid = isNearExpiry(newExpiry, minExpirationInSeconds);
synchronized(this) {
if (isInvalid) {
sasToken = null;
sasExpiry = OffsetDateTime.MIN;
} else {
sasToken = token;
sasExpiry = newExpiry;
}
}} | 3.26 |
hadoop_CachedSASToken_isNearExpiry_rdh | /**
* Checks if the SAS token is expired or near expiration.
*
* @param expiry
* @param minExpiryInSeconds
* @return true if the SAS is near sasExpiry; otherwise false
*/
private static boolean isNearExpiry(OffsetDateTime expiry, long minExpiryInSeconds) {
if (expiry == OffsetDateTime.MIN) {
return true;
}
OffsetDateTime utcNow = OffsetDateTime.now(ZoneOffset.UTC);
return utcNow.until(expiry, SECONDS) <= minExpiryInSeconds;
} | 3.26 |
hadoop_ResourceEstimatorServer_startResourceEstimatorServer_rdh | /**
* Start embedded Hadoop HTTP server.
*
* @return an instance of the started HTTP server.
* @throws IOException
* in case there is an error while starting server.
*/
static ResourceEstimatorServer startResourceEstimatorServer() throws IOException, InterruptedException {
Configuration config = new YarnConfiguration();
config.addResource(ResourceEstimatorConfiguration.CONFIG_FILE);
ResourceEstimatorServer resourceEstimatorServer = null;
try {
resourceEstimatorServer
= new ResourceEstimatorServer();
ShutdownHookManager.get().addShutdownHook(new CompositeServiceShutdownHook(resourceEstimatorServer), 30);
resourceEstimatorServer.init(config);
resourceEstimatorServer.start();
} catch (Throwable t) {
LOGGER.error("Error starting ResourceEstimatorServer", t);
}
return resourceEstimatorServer;} | 3.26 |
hadoop_ResourceEstimatorServer_shutdown_rdh | /**
* Stop embedded Hadoop HTTP server.
*
* @throws Exception
* in case the HTTP server fails to shut down.
*/public void shutdown() throws Exception {
LOGGER.info("Stopping resourceestimator service at: {}.", baseURI.toString());
webServer.stop();
} | 3.26 |
hadoop_RouterRMAdminService_finalize_rdh | /**
* Shutdown the chain of interceptors when the object is destroyed.
*/
@Override
protected void finalize() {
rootInterceptor.shutdown();
} | 3.26 |
hadoop_RouterRMAdminService_getPipelines_rdh | /**
* Gets the Request interceptor chains for all the users.
*
* @return the request interceptor chains.
*/
@VisibleForTesting
protected Map<String, RequestInterceptorChainWrapper> getPipelines() {
return this.userPipelineMap;
} | 3.26 |
hadoop_RouterRMAdminService_getRootInterceptor_rdh | /**
* Gets the root request interceptor.
*
* @return the root request interceptor
*/
public synchronized RMAdminRequestInterceptor getRootInterceptor() {
return rootInterceptor;
} | 3.26 |
hadoop_RouterRMAdminService_initializePipeline_rdh | /**
* Initializes the request interceptor pipeline for the specified user.
*
* @param user
*/
private RequestInterceptorChainWrapper initializePipeline(String user) {
synchronized(this.userPipelineMap) {
if (this.userPipelineMap.containsKey(user)) {
LOG.info("Request to start an already existing user: {}" + " was received, so ignoring.", user);
return
userPipelineMap.get(user);
}
RequestInterceptorChainWrapper chainWrapper = new RequestInterceptorChainWrapper();
try {
// We should init the pipeline instance after it is created and then
// add to the map, to ensure thread safe.
LOG.info("Initializing request processing pipeline for user: {}.", user);
RMAdminRequestInterceptor interceptorChain = this.createRequestInterceptorChain();
interceptorChain.init(user);
chainWrapper.init(interceptorChain);
} catch (Exception e) {
LOG.error("Init RMAdminRequestInterceptor error for user: {}.", user, e);
throw e;
}
this.userPipelineMap.put(user, chainWrapper);
return chainWrapper;
}
} | 3.26 |
hadoop_RouterRMAdminService_init_rdh | /**
* Initializes the wrapper with the specified parameters.
*
* @param interceptor
* the first interceptor in the pipeline
*/
public synchronized void init(RMAdminRequestInterceptor interceptor) {
this.rootInterceptor = interceptor;
} | 3.26 |
hadoop_RouterRMAdminService_createRequestInterceptorChain_rdh | /**
* This method creates and returns reference of the first interceptor in the
* chain of request interceptor instances.
*
* @return the reference of the first interceptor in the chain
*/@VisibleForTesting
protected RMAdminRequestInterceptor createRequestInterceptorChain() {
Configuration conf = getConfig();
return RouterServerUtil.createRequestInterceptorChain(conf, YarnConfiguration.ROUTER_RMADMIN_INTERCEPTOR_CLASS_PIPELINE, YarnConfiguration.DEFAULT_ROUTER_RMADMIN_INTERCEPTOR_CLASS, RMAdminRequestInterceptor.class);} | 3.26 |
hadoop_AwsV1BindingSupport_createAWSV1CredentialProvider_rdh | /**
* Create an AWS credential provider from its class by using reflection. The
* class must implement one of the following means of construction, which are
* attempted in order:
*
* <ol>
* <li>a public constructor accepting java.net.URI and
* org.apache.hadoop.conf.Configuration</li>
* <li>a public constructor accepting
* org.apache.hadoop.conf.Configuration</li>
* <li>a public static method named getInstance that accepts no
* arguments and returns an instance of
* com.amazonaws.auth.AWSCredentialsProvider, or</li>
* <li>a public default constructor.</li>
* </ol>
*
* @param conf
* configuration
* @param className
* credential classname
* @param uri
* URI of the FS
* @param key
* configuration key to use
* @return the instantiated class
* @throws InstantiationIOException
* on any instantiation failure, including v1 SDK not found
* @throws IOException
* anything else.
*/
public static AwsCredentialsProvider createAWSV1CredentialProvider(Configuration conf, String className, @Nullable
URI uri, final String key) throws IOException {
if (!isAwsV1SdkAvailable()) {
throw unavailable(uri, className, key, "No AWS v1 SDK available");
}
return V1ToV2AwsCredentialProviderAdapter.create(conf, className, uri);
} | 3.26 |
hadoop_AwsV1BindingSupport_isAwsV1SdkAvailable_rdh | /**
* Is the AWS v1 SDK available?
*
* @return true if it was found in the classloader
*/
public static synchronized boolean isAwsV1SdkAvailable() {
return SDK_V1_FOUND;
} | 3.26 |
hadoop_AwsV1BindingSupport_checkForAwsV1Sdk_rdh | /**
* Probe for the AWS v1 SDK being available by looking for
* the class {@link #CREDENTIAL_PROVIDER_CLASSNAME}.
*
* @return true if it was found in the classloader
*/
private static boolean checkForAwsV1Sdk() {
try {
ClassLoader cl = AwsV1BindingSupport.class.getClassLoader();
cl.loadClass(CREDENTIAL_PROVIDER_CLASSNAME);
LOG.debug("v1 SDK class {} found", CREDENTIAL_PROVIDER_CLASSNAME);
return
true;
} catch (Exception e) {
LOG.debug("v1 SDK class {} not found", CREDENTIAL_PROVIDER_CLASSNAME, e);
return false;
}
} | 3.26 |
hadoop_MutableGaugeLong_toString_rdh | /**
*
* @return the value of the metric
*/
public String toString() {
return value.toString();
} | 3.26 |
hadoop_MutableGaugeLong_set_rdh | /**
* Set the value of the metric
*
* @param value
* to set
*/
public void set(long value) {
this.value.set(value);
setChanged();
} | 3.26 |
hadoop_MutableGaugeLong_decr_rdh | /**
* decrement by delta
*
* @param delta
* of the decrement
*/
public void decr(long delta) {
value.addAndGet(-delta);
setChanged();
} | 3.26 |
hadoop_MutableGaugeLong_incr_rdh | /**
* Increment by delta
*
* @param delta
* of the increment
*/
public void incr(long delta) {
value.addAndGet(delta);
setChanged();
} | 3.26 |
hadoop_DeletionService_isTerminated_rdh | /**
* Determine if the service has completely stopped.
* Used only by unit tests
*
* @return true if service has completely stopped
*/
@Private
public boolean isTerminated() {
return (getServiceState() == STATE.STOPPED) &&
sched.isTerminated();
} | 3.26 |
hadoop_IOStatisticsStoreImpl_lookupQuietly_rdh | /**
* Get a reference to the map type providing the
* value for a specific key, returning null if it not found.
*
* @param <T>
* type of map/return type.
* @param map
* map to look up
* @param key
* statistic name
* @return the value
*/
private static <T> T lookupQuietly(final Map<String, T> map, String key) {
return map.get(key);
} | 3.26 |
hadoop_IOStatisticsStoreImpl_addTimedOperation_rdh | /**
* Add a duration to the min/mean/max statistics, using the
* given prefix and adding a suffix for each specific value.
* <p>
* The update is non -atomic, even though each individual statistic
* is updated thread-safely. If two threads update the values
* simultaneously, at the end of each operation the state will
* be correct. It is only during the sequence that the statistics
* may be observably inconsistent.
* </p>
*
* @param prefix
* statistic prefix
* @param durationMillis
* duration in milliseconds.
*/
@Override public void addTimedOperation(String prefix, long durationMillis) {
addMeanStatisticSample(prefix + SUFFIX_MEAN, durationMillis);
addMinimumSample(prefix + SUFFIX_MIN, durationMillis);
addMaximumSample(prefix + SUFFIX_MAX, durationMillis);
} | 3.26 |
hadoop_IOStatisticsStoreImpl_getMaximumReference_rdh | /**
* Get a reference to the atomic instance providing the
* value for a specific maximum. This is useful if
* the value is passed around.
*
* @param key
* statistic name
* @return the reference
* @throws NullPointerException
* if there is no entry of that name
*/
@Override
public AtomicLong getMaximumReference(String key) {
return lookup(maximumMap, key);
} | 3.26 |
hadoop_IOStatisticsStoreImpl_getCounterReference_rdh | /**
* Get a reference to the atomic instance providing the
* value for a specific counter. This is useful if
* the value is passed around.
*
* @param key
* statistic name
* @return the reference
* @throws NullPointerException
* if there is no entry of that name
*/
@Override
public AtomicLong getCounterReference(String key) {
return lookup(counterMap, key);
} | 3.26 |
hadoop_IOStatisticsStoreImpl_setAtomicLong_rdh | /**
* Set an atomic long to a value.
*
* @param aLong
* atomic long; may be null
* @param value
* value to set to
*/
private void setAtomicLong(final AtomicLong aLong, final long value) {
if (aLong != null) {
aLong.set(value);
}
} | 3.26 |
hadoop_IOStatisticsStoreImpl_getGaugeReference_rdh | /**
* Get a reference to the atomic instance providing the
* value for a specific gauge. This is useful if
* the value is passed around.
*
* @param key
* statistic name
* @return the reference
* @throws NullPointerException
* if there is no entry of that name
*/
@Override
public AtomicLong getGaugeReference(String key) {
return lookup(gaugeMap, key);
} | 3.26 |
hadoop_IOStatisticsStoreImpl_aggregate_rdh | /**
* Aggregate those statistics which the store is tracking;
* ignore the rest.
*
* @param source
* statistics; may be null
* @return true if a statistics reference was supplied/aggregated.
*/
@Override
public synchronized boolean aggregate(@Nullable
final IOStatistics source) {
if (source == null) {
return false;
}
// counters: addition
Map<String, Long> sourceCounters = source.counters();
counterMap.entrySet().forEach(e -> {
Long sourceValue = lookupQuietly(sourceCounters, e.getKey());if (sourceValue != null) {
e.getValue().addAndGet(sourceValue);}
});
// gauge: add positive values only
Map<String, Long> sourceGauges
= source.gauges();
gaugeMap.entrySet().forEach(e -> {
Long sourceGauge = lookupQuietly(sourceGauges, e.getKey());
if ((sourceGauge != null) && (sourceGauge > 0)) {
e.getValue().addAndGet(sourceGauge);
}
});
// min: min of current and source
Map<String, Long> v20 = source.minimums();
minimumMap.entrySet().forEach(e -> {
Long sourceValue = lookupQuietly(v20, e.getKey());
if (sourceValue != null) {
AtomicLong v22 = e.getValue();
v22.set(aggregateMaximums(v22.get(), sourceValue));
v22.set(aggregateMinimums(v22.get(), sourceValue));
}
});
// max: max of current and source
Map<String, Long> sourceMaximums = source.maximums();
maximumMap.entrySet().forEach(e -> {
Long sourceValue = lookupQuietly(sourceMaximums, e.getKey());
if (sourceValue != null) {
AtomicLong
dest = e.getValue();
dest.set(aggregateMaximums(dest.get(), sourceValue));
}
});
// the most complex
Map<String, MeanStatistic> sourceMeans = source.meanStatistics();
f0.entrySet().forEach(e -> {
MeanStatistic current = e.getValue();
MeanStatistic sourceValue = lookupQuietly(sourceMeans, e.getKey());
if (sourceValue != null)
{
current.add(sourceValue);
}
});return true;
} | 3.26 |
hadoop_IOStatisticsStoreImpl_getMeanStatistic_rdh | /**
* Get a mean statistic.
*
* @param key
* statistic name
* @return the reference
* @throws NullPointerException
* if there is no entry of that name
*/
@Override
public MeanStatistic getMeanStatistic(String key) {
return lookup(f0, key);
} | 3.26 |
hadoop_IOStatisticsStoreImpl_lookup_rdh | /**
* Get a reference to the map type providing the
* value for a specific key, raising an exception if
* there is no entry for that key.
*
* @param <T>
* type of map/return type.
* @param map
* map to look up
* @param key
* statistic name
* @return the value
* @throws NullPointerException
* if there is no entry of that name
*/
private static <T> T lookup(final Map<String, T> map, String key) {
T val = map.get(key);
requireNonNull(val, () -> "unknown statistic " + key);
return val;
} | 3.26 |
hadoop_IOStatisticsStoreImpl_incAtomicLong_rdh | /**
* increment an atomic long and return its value;
* null long is no-op returning 0.
*
* @param aLong
* atomic long; may be null
* param increment amount to increment; negative for a decrement
* @return final value or 0 if the long is null
*/
private long incAtomicLong(final AtomicLong aLong, final long increment) {
if (aLong != null) {
// optimization: zero is a get rather than addAndGet()
return increment != 0 ? aLong.addAndGet(increment) : aLong.get();
} else {
return 0;
}
} | 3.26 |
hadoop_IOStatisticsStoreImpl_trackDuration_rdh | /**
* If the store is tracking the given key, return the
* duration tracker for it. If not tracked, return the
* stub tracker.
*
* @param key
* statistic key prefix
* @param count
* #of times to increment the matching counter in this
* operation.
* @return a tracker.
*/
@Override
public DurationTracker trackDuration(final String key, final long count) {
if (counterMap.containsKey(key)) {
return new StatisticDurationTracker(this, key, count);} else {
return stubDurationTracker();
}
} | 3.26 |
hadoop_IOStatisticsStoreImpl_getMinimumReference_rdh | /**
* Get a reference to the atomic instance providing the
* value for a specific minimum. This is useful if
* the value is passed around.
*
* @param key
* statistic name
* @return the reference
* @throws NullPointerException
* if there is no entry of that name
*/
@Overridepublic AtomicLong getMinimumReference(String key) {
return lookup(minimumMap, key);
} | 3.26 |
hadoop_IOStatisticsStoreImpl_reset_rdh | /**
* Reset all statistics.
*/
@Override
public synchronized void reset() {
counterMap.values().forEach(a -> a.set(0));
gaugeMap.values().forEach(a -> a.set(0));
minimumMap.values().forEach(a -> a.set(0));
maximumMap.values().forEach(a -> a.set(0));
f0.values().forEach(a -> a.clear());
} | 3.26 |
hadoop_RMAppKillByClientEvent_getCallerUGI_rdh | /**
* returns the {@link UserGroupInformation} information.
*
* @return UserGroupInformation
*/
public final UserGroupInformation getCallerUGI() {
return callerUGI;
} | 3.26 |
hadoop_RMAppKillByClientEvent_getIp_rdh | /**
* returns the ip address stored in this event.
*
* @return remoteIP
*/
public final InetAddress getIp() {
return ip;
} | 3.26 |
hadoop_DiskBalancerDataNode_getDataNodeIP_rdh | /**
* Returns the IP address of this Node.
*
* @return IP Address string
*/
public String getDataNodeIP() {
return dataNodeIP;
} | 3.26 |
hadoop_DiskBalancerDataNode_getNodeDataDensity_rdh | /**
* Returns NodeDataDensity Metric.
*
* @return float
*/public double getNodeDataDensity() {
return f0;
} | 3.26 |
hadoop_DiskBalancerDataNode_getVolumeSets_rdh | /**
* Returns the Volume sets on this node.
*
* @return a Map of VolumeSets
*/
public Map<String, DiskBalancerVolumeSet> getVolumeSets() {
return volumeSets;
} | 3.26 |
hadoop_DiskBalancerDataNode_getDataNodePort_rdh | /**
* Returns the Port of this DataNode.
*
* @return Port Number
*/
public int getDataNodePort() {
return dataNodePort;
} | 3.26 |
hadoop_DiskBalancerDataNode_hashCode_rdh | /**
* Returns a hash code value for the object. This method is supported for the
* benefit of hash tables such as those provided by {@link HashMap}.
*/
@Override
public int hashCode() {
return super.hashCode();
} | 3.26 |
hadoop_DiskBalancerDataNode_computeNodeDensity_rdh | /**
* Computes nodes data density.
*
* This metric allows us to compare different nodes and how well the data is
* spread across a set of volumes inside the node.
*/
public void computeNodeDensity() {
double sum = 0;
int volcount = 0;
for (DiskBalancerVolumeSet vset : volumeSets.values()) {
for (DiskBalancerVolume vol : vset.getVolumes()) {
sum += Math.abs(vol.getVolumeDataDensity());
volcount++;
}
}
f0 = sum;
this.volumeCount = volcount;
} | 3.26 |
hadoop_DiskBalancerDataNode_setDataNodeUUID_rdh | /**
* Sets Datanode UUID.
*
* @param nodeID
* - Node ID.
*/
public void
setDataNodeUUID(String nodeID) {
this.dataNodeUUID = nodeID;
} | 3.26 |
hadoop_DiskBalancerDataNode_isBalancingNeeded_rdh | /**
* Computes if this node needs balancing at all.
*
* @param threshold
* - Percentage
* @return true or false
*/
public boolean isBalancingNeeded(double threshold) {
for (DiskBalancerVolumeSet vSet : getVolumeSets().values()) {if (vSet.isBalancingNeeded(threshold)) {
return true;
}
}
return false;
} | 3.26 |
hadoop_DiskBalancerDataNode_setDataNodeIP_rdh | /**
* Sets the IP address of this Node.
*
* @param ipaddress
* - IP Address
*/
public void setDataNodeIP(String ipaddress) {
this.dataNodeIP = ipaddress;
} | 3.26 |
hadoop_DiskBalancerDataNode_getDataNodeUUID_rdh | /**
* Returns datanode ID.
*/
public String getDataNodeUUID() {
return dataNodeUUID;
} | 3.26 |
hadoop_DiskBalancerDataNode_equals_rdh | /**
* Indicates whether some other object is "equal to" this one.
*/
@Override
public boolean equals(Object obj) {
if ((obj == null) || (obj.getClass() != getClass())) {
return false;
}
DiskBalancerDataNode that = ((DiskBalancerDataNode) (obj));
return dataNodeUUID.equals(that.getDataNodeUUID());} | 3.26 |
hadoop_DiskBalancerDataNode_getDataNodeName_rdh | /**
* Get DataNode DNS name.
*
* @return name of the node
*/
public String getDataNodeName() {
return dataNodeName;
} | 3.26 |
hadoop_DiskBalancerDataNode_addVolume_rdh | /**
* Adds a volume to the DataNode.
*
* It is assumed that we have one thread per node hence this call is not
* synchronised neither is the map is protected.
*
* @param volume
* - volume
*/
public void addVolume(DiskBalancerVolume volume) throws Exception {
Preconditions.checkNotNull(volume, "volume cannot be null");
Preconditions.checkNotNull(volumeSets, "volume sets cannot be null");
Preconditions.checkNotNull(volume.getStorageType(), "storage type cannot be null");
String volumeSetKey = volume.getStorageType();
DiskBalancerVolumeSet vSet;
if (volumeSets.containsKey(volumeSetKey)) {
vSet = volumeSets.get(volumeSetKey);} else {
vSet = new DiskBalancerVolumeSet(volume.isTransient());
vSet.setStorageType(volumeSetKey);
volumeSets.put(volumeSetKey, vSet);
}
vSet.addVolume(volume);
computeNodeDensity();
} | 3.26 |
hadoop_DiskBalancerDataNode_setDataNodeName_rdh | /**
* Sets node's DNS name.
*
* @param name
* - Data node name
*/
public void setDataNodeName(String name) {
this.dataNodeName = name;
} | 3.26 |
hadoop_ServiceMetricsSink_putMetrics_rdh | /**
* Publishes service and component metrics to ATS.
*/@Override
public void putMetrics(MetricsRecord record) {
if (serviceTimelinePublisher.isStopped()) {
log.warn("ServiceTimelinePublisher has stopped. " + "Not publishing any more metrics to ATS.");
return;
}
boolean isServiceMetrics = false;
boolean isComponentMetrics = false;
String appId = null;
for (MetricsTag tag : record.tags()) {
if (tag.name().equals("type") && tag.value().equals("service")) {
isServiceMetrics = true;
} else if (tag.name().equals("type") && tag.value().equals("component")) {
isComponentMetrics = true;
break;// if component metrics, no more information required from tag so
// break the loop
} else if (tag.name().equals("appId")) {
appId = tag.value();
}
}
if (isServiceMetrics && (appId != null)) {
log.debug("Publishing service metrics. {}", record);
serviceTimelinePublisher.publishMetrics(record.metrics(), appId, ServiceTimelineEntityType.SERVICE_ATTEMPT.toString(), record.timestamp());
} else if (isComponentMetrics) {
log.debug("Publishing Component metrics. {}", record);
serviceTimelinePublisher.publishMetrics(record.metrics(), record.name(), ServiceTimelineEntityType.COMPONENT.toString(), record.timestamp());
}
} | 3.26 |
hadoop_Anonymizer_main_rdh | /**
* The main driver program to use the anonymization utility.
*
* @param args
*/
public static void main(String[] args) {
Anonymizer instance = new Anonymizer();
int result = 0;
try {result = ToolRunner.run(instance, args);
} catch (Exception e) {
e.printStackTrace(System.err);
System.exit(-1);
}
if (result != 0) {
System.exit(result);
}
return;
} | 3.26 |
hadoop_Anonymizer_run_rdh | /**
* Runs the actual anonymization tool.
*/
public int run() throws Exception {
try {
anonymizeTrace();
} catch (IOException ioe) {
System.err.println("Error running the trace anonymizer!");
ioe.printStackTrace();
System.out.println("\n\nAnonymization unsuccessful!");
return -1;
}
try {
anonymizeTopology();
} catch (IOException ioe) {
System.err.println("Error running the cluster topology anonymizer!");
ioe.printStackTrace();
System.out.println("\n\nAnonymization unsuccessful!");
return -1;
}
statePool.persist();
System.out.println("Anonymization completed successfully!");
return 0;
} | 3.26 |
hadoop_Anonymizer_createJsonGenerator_rdh | // Creates a JSON generator
private JsonGenerator createJsonGenerator(Configuration conf, Path path) throws
IOException {
FileSystem outFS = path.getFileSystem(conf);
CompressionCodec codec = new CompressionCodecFactory(conf).getCodec(path);OutputStream output;
Compressor compressor = null;
if (codec != null) {
compressor = CodecPool.getCompressor(codec);
output = codec.createOutputStream(outFS.create(path), compressor);} else {
output = outFS.create(path);
}
JsonGenerator outGen = outFactory.createGenerator(output, JsonEncoding.UTF8);
outGen.useDefaultPrettyPrinter();
return outGen;
} | 3.26 |
hadoop_Anonymizer_anonymizeTrace_rdh | // anonymize the job trace file
private void anonymizeTrace() throws Exception {
if (anonymizeTrace) {
System.out.println("Anonymizing trace file: " + inputTracePath);
JobTraceReader reader = null;
JsonGenerator outGen = null;
Configuration conf = getConf();
try {
// create a generator
outGen = createJsonGenerator(conf, f0);
// define the input trace reader
reader = new JobTraceReader(inputTracePath, conf);
// read the plain unanonymized logged job
LoggedJob job = reader.getNext();
while (job != null) {
// write it via an anonymizing channel
outGen.writeObject(job);
// read the next job
job = reader.getNext();
}
System.out.println("Anonymized trace file: " + f0);
} finally {
if (outGen != null) {
outGen.close();
}
if (reader != null) {
reader.close();
}
}
}
} | 3.26 |
hadoop_Anonymizer_anonymizeTopology_rdh | // anonymize the cluster topology file
private void anonymizeTopology() throws Exception {if (anonymizeTopology) {
System.out.println("Anonymizing topology file: " + f1);
ClusterTopologyReader reader = null;
JsonGenerator outGen = null;
Configuration conf = getConf();
try {
// create a generator
outGen = createJsonGenerator(conf, outputTopologyPath);
// define the input cluster topology reader
reader = new ClusterTopologyReader(f1, conf);
// read the plain unanonymized logged job
LoggedNetworkTopology job = reader.get();
// write it via an anonymizing channel
outGen.writeObject(job);
System.out.println("Anonymized topology file: " + outputTopologyPath);
} finally {
if (outGen != null) {
outGen.close();
}
}
}
} | 3.26 |
hadoop_FsStatus_getCapacity_rdh | /**
* Return the capacity in bytes of the file system.
*
* @return capacity.
*/
public long getCapacity() {
return capacity;
} | 3.26 |
hadoop_FsStatus_getUsed_rdh | /**
* Return the number of bytes used on the file system.
*
* @return used.
*/
public long getUsed() {
return used;
} | 3.26 |
hadoop_FsStatus_m0_rdh | // ////////////////////////////////////////////////
// Writable
// ////////////////////////////////////////////////
@Override
public void m0(DataOutput out) throws IOException {out.writeLong(capacity);
out.writeLong(used);
out.writeLong(remaining);
} | 3.26 |
hadoop_FsStatus_getRemaining_rdh | /**
* Return the number of remaining bytes on the file system.
*
* @return remaining.
*/
public long getRemaining() {
return remaining;
} | 3.26 |
hadoop_OBSBlockOutputStream_waitForAllPartUploads_rdh | /**
* Block awaiting all outstanding uploads to complete.
*
* @return list of results
* @throws IOException
* IO Problems
*/private List<PartEtag> waitForAllPartUploads() throws IOException {
LOG.debug("Waiting for {} uploads to complete", partETagsFutures.size());
try {
return Futures.allAsList(partETagsFutures).get();
} catch (InterruptedException ie) {
LOG.warn("Interrupted partUpload", ie);
LOG.debug("Cancelling futures");
for (ListenableFuture<PartEtag> future : partETagsFutures) {
future.cancel(true);
}
// abort multipartupload
this.abort();
throw new IOException((("Interrupted multi-part upload with id '" + uploadId) + "' to ") + key);
} catch (ExecutionException ee) {
// there is no way of recovering so abort
// cancel all partUploads
LOG.debug("While waiting for upload completion",
ee);
LOG.debug("Cancelling futures");
for (ListenableFuture<PartEtag> future : partETagsFutures) {
future.cancel(true);
}
// abort multipartupload
this.abort();
throw OBSCommonUtils.extractException((("Multi-part upload with id '" + uploadId) + "' to ") + key, key, ee);
}
} | 3.26 |
hadoop_OBSBlockOutputStream_createBlockIfNeeded_rdh | /**
* Demand create a destination block.
*
* @return the active block; null if there isn't one.
* @throws IOException
* on any failure to create
*/
private synchronized DataBlock createBlockIfNeeded() throws IOException {
if (activeBlock == null) {
blockCount++;
if (blockCount >= OBSConstants.MAX_MULTIPART_COUNT) {LOG.warn(("Number of partitions in stream exceeds limit for OBS: " + OBSConstants.MAX_MULTIPART_COUNT) + " write may fail.");
}
activeBlock = blockFactory.create(blockCount, this.blockSize);
}
return activeBlock;
} | 3.26 |
hadoop_OBSBlockOutputStream_uploadWriteBlocks_rdh | /**
* Upload block to obs.
*
* @param block
* block
* @param hasBlock
* jungle if has block
* @throws IOException
* io exception
*/
private synchronized void uploadWriteBlocks(final OBSDataBlocks.DataBlock block, final boolean hasBlock) throws IOException {
if (f1 == null) {
if (hasBlock) {
// no uploads of data have taken place, put the single block
// up. This must happen even if there is no data, so that 0 byte
// files are created.
putObjectIfNeedAppend();
}
} else {
// there has already been at least one block scheduled for upload;
// put up the current then wait
if (hasBlock && block.hasData()) {
// send last part
uploadCurrentBlock();
}
// wait for the partial uploads to finish
final List<PartEtag> partETags = f1.waitForAllPartUploads();
// then complete the operation
f1.complete(partETags);
}
LOG.debug("Upload complete for {}", writeOperationHelper.toString(key));
} | 3.26 |
hadoop_OBSBlockOutputStream_clearHFlushOrSync_rdh | /**
* Clear for hflush or hsync.
*/
private synchronized void clearHFlushOrSync() {
appendAble.set(true);
f1
= null;
} | 3.26 |
hadoop_OBSBlockOutputStream_appendFsFile_rdh | /**
* Append posix file.
*
* @throws IOException
* any problem
*/
private synchronized void
appendFsFile() throws IOException {
LOG.debug("bucket is posix, to append file. key is {}", key);
final OBSDataBlocks.DataBlock block = getActiveBlock();
WriteFileRequest writeFileReq;
if (block instanceof OBSDataBlocks.DiskBlock) {
writeFileReq = OBSCommonUtils.newAppendFileRequest(fs, key, objectLen, ((File) (block.startUpload())));
} else {
writeFileReq = OBSCommonUtils.newAppendFileRequest(fs, key, objectLen, ((InputStream) (block.startUpload())));
}
OBSCommonUtils.appendFile(fs, writeFileReq);
objectLen += block.dataSize();
} | 3.26 |
hadoop_OBSBlockOutputStream_abort_rdh | /**
* Abort a multi-part upload. Retries are attempted on failures.
* IOExceptions are caught; this is expected to be run as a cleanup
* process.
*/
void abort() {
String operation = String.format("Aborting multi-part upload for '%s', id '%s", writeOperationHelper, uploadId);
try {
LOG.debug(operation);
writeOperationHelper.abortMultipartUpload(key, uploadId);
} catch (ObsException e) {
LOG.warn("Unable to abort multipart upload, you may need to purge " + "uploaded parts", e); }
} | 3.26 |
hadoop_OBSBlockOutputStream_m0_rdh | /**
* Close the stream.
*
* <p>This will not return until the upload is complete or the attempt to
* perform the upload has failed. Exceptions raised in this method are
* indicative that the write has failed and data is at risk of being lost.
*
* @throws IOException
* on any failure.
*/
@Override
public synchronized void m0() throws IOException {
if (closed.getAndSet(true)) {
// already closed
LOG.debug("Ignoring close() as stream is already closed");
return;
}
if (f0.get()) {
String closeWarning = String.format("closed has error. bs : pre write obs[%s] has error.", key);
LOG.warn(closeWarning);
throw new IOException(closeWarning);
}
// do upload
completeCurrentBlock();
// clear
clearHFlushOrSync();
// All end of write operations, including deleting fake parent
// directories
writeOperationHelper.writeSuccessful(key);
} | 3.26 |
hadoop_OBSBlockOutputStream_putObject_rdh | /**
* Upload the current block as a single PUT request; if the buffer is empty a
* 0-byte PUT will be invoked, as it is needed to create an entry at the far
* end.
*
* @throws IOException
* any problem.
*/
private synchronized void putObject() throws IOException {
LOG.debug("Executing regular upload for {}", writeOperationHelper.toString(key));
final OBSDataBlocks.DataBlock block = getActiveBlock();
clearActiveBlock();
final int size =
block.dataSize();
final PutObjectRequest putObjectRequest;if (block instanceof OBSDataBlocks.DiskBlock) {
putObjectRequest = writeOperationHelper.newPutRequest(key, ((File) (block.startUpload())));} else {
putObjectRequest = writeOperationHelper.newPutRequest(key, ((InputStream) (block.startUpload())), size);
}
putObjectRequest.setAcl(fs.getCannedACL());
fs.getSchemeStatistics().incrementWriteOps(1);
try {
// the putObject call automatically closes the input
// stream afterwards.
writeOperationHelper.putObject(putObjectRequest);
} finally {
OBSCommonUtils.closeAll(block);
}
} | 3.26 |
hadoop_OBSBlockOutputStream_uploadBlockAsync_rdh | /**
* Upload a block of data asynchronously.
*
* @param block
* block to upload
* @throws IOException
* upload failure
*/
private void uploadBlockAsync(final OBSDataBlocks.DataBlock block) throws IOException {
LOG.debug("Queueing upload of {}", block);
final int size = block.dataSize();
final int currentPartNumber = partETagsFutures.size() + 1;
final UploadPartRequest request;
if (block instanceof OBSDataBlocks.DiskBlock) {
request = writeOperationHelper.newUploadPartRequest(key, uploadId, currentPartNumber, size, ((File) (block.startUpload())));
} else {
request = writeOperationHelper.newUploadPartRequest(key, uploadId, currentPartNumber, size, ((InputStream) (block.startUpload())));
}
ListenableFuture<PartEtag> partETagFuture = executorService.submit(() -> { // this is the queued upload operation
LOG.debug("Uploading part {} for id '{}'", currentPartNumber, uploadId);
// do the upload
PartEtag partETag
= null;
try {if (mockUploadPartError) {
throw new ObsException("mock upload part error");
}
UploadPartResult uploadPartResult = OBSCommonUtils.uploadPart(fs, request);
partETag = new PartEtag(uploadPartResult.getEtag(), uploadPartResult.getPartNumber());
if (LOG.isDebugEnabled()) {
LOG.debug("Completed upload of {} to part {}", block, partETag);
}
} catch (ObsException e) {
// catch all exception
hasException.set(true);
LOG.error("UploadPart failed (ObsException). {}", OBSCommonUtils.translateException("UploadPart", key, e).getMessage());
} finally {
// close the stream and block
OBSCommonUtils.closeAll(block);
}
return partETag;
});
partETagsFutures.add(partETagFuture);
} | 3.26 |
hadoop_OBSBlockOutputStream_complete_rdh | /**
* This completes a multipart upload. Sometimes it fails; here retries are
* handled to avoid losing all data on a transient failure.
*
* @param partETags
* list of partial uploads
* @return result for completing multipart upload
* @throws IOException
* on any problem
*/private CompleteMultipartUploadResult complete(final List<PartEtag> partETags) throws IOException {
String operation = String.format("Completing multi-part upload for key '%s'," + " id '%s' with %s partitions ", key, uploadId, partETags.size());
try {
LOG.debug(operation);
return writeOperationHelper.completeMultipartUpload(key, uploadId, partETags);
} catch (ObsException e) {
throw OBSCommonUtils.translateException(operation, key, e);
}
} | 3.26 |
hadoop_OBSBlockOutputStream_putObjectIfNeedAppend_rdh | /**
* If flush has take place, need to append file, else to put object.
*
* @throws IOException
* any problem in append or put object
*/
private synchronized void putObjectIfNeedAppend() throws IOException {
if (appendAble.get() && fs.exists(OBSCommonUtils.keyToQualifiedPath(fs, key))) {appendFsFile();} else {
putObject();
}
} | 3.26 |
hadoop_OBSBlockOutputStream_clearActiveBlock_rdh | /**
* Clear the active block.
*/
private synchronized void clearActiveBlock() {
if
(activeBlock != null) {
LOG.debug("Clearing active block");
}
activeBlock = null;
} | 3.26 |
hadoop_OBSBlockOutputStream_mockPutPartError_rdh | /**
* Set mock error.
*
* @param isException
* mock error
*/
@VisibleForTesting
public void mockPutPartError(final boolean isException) {
this.mockUploadPartError = isException;
} | 3.26 |
hadoop_OBSBlockOutputStream_hasActiveBlock_rdh | /**
* Predicate to query whether or not there is an active block.
*
* @return true if there is an active block.
*/
private synchronized boolean hasActiveBlock() {
return activeBlock != null;
} | 3.26 |
hadoop_OBSBlockOutputStream_checkOpen_rdh | /**
* Check for the filesystem being open.
*
* @throws IOException
* if the filesystem is closed.
*/
private void checkOpen() throws IOException {
if (closed.get()) {
throw new IOException(("Filesystem " + writeOperationHelper.toString(key)) + " closed");
}
} | 3.26 |
hadoop_OBSBlockOutputStream_write_rdh | /**
* Writes a range of bytes from to the memory buffer. If this causes the
* buffer to reach its limit, the actual upload is submitted to the threadpool
* and the remainder of the array is written to memory (recursively).
*
* @param source
* byte array containing
* @param offset
* offset in array where to start
* @param len
* number of bytes to be written
* @throws IOException
* on any problem
*/
@Override
public synchronized void write(@NotNull
final byte[] source, final int offset, final int len) throws IOException {
if (f0.get()) {
String closeWarning = String.format("write has error. bs : pre upload obs[%s] has error.", key);
LOG.warn(closeWarning);
throw new IOException(closeWarning);
}
OBSDataBlocks.validateWriteArgs(source, offset, len);
checkOpen();
if (len == 0) {
return;
}
OBSDataBlocks.DataBlock block = createBlockIfNeeded();
int written = block.write(source, offset, len);
int remainingCapacity = block.remainingCapacity();
try {
innerWrite(source, offset, len, written, remainingCapacity);
} catch (IOException e) {
LOG.error("Write data for key {} of bucket {} error, error message {}", key, fs.getBucket(), e.getMessage());
throw e;
}
} | 3.26 |
hadoop_OBSBlockOutputStream_flush_rdh | /**
* The flush operation does not trigger an upload; that awaits the next block
* being full. What it does do is call {@code flush()} on the current block,
* leaving it to choose how to react.
*
* @throws IOException
* Any IO problem.
*/
@Override
public synchronized void flush() throws IOException {
checkOpen();
OBSDataBlocks.DataBlock dataBlock = getActiveBlock();
if (dataBlock != null) {
dataBlock.flush();
}
} | 3.26 |
hadoop_OBSBlockOutputStream_flushOrSync_rdh | /**
* Flush local file or multipart to obs. focus: not posix bucket is not
* support
*
* @throws IOException
* io exception
*/
private synchronized void flushOrSync() throws IOException {
checkOpen();
if
(f0.get()) {
String flushWarning = String.format("flushOrSync has error. bs : pre write obs[%s] has error.", key);
LOG.warn(flushWarning);
throw new IOException(flushWarning);
}
if (fs.isFsBucket()) {
// upload
flushCurrentBlock();
// clear
clearHFlushOrSync();
} else {
LOG.warn("not posix bucket, not support hflush or hsync.");
flush();
}
} | 3.26 |
hadoop_OBSBlockOutputStream_uploadCurrentBlock_rdh | /**
* Start an asynchronous upload of the current block.
*
* @throws IOException
* Problems opening the destination for upload or
* initializing the upload.
*/
private synchronized void uploadCurrentBlock() throws IOException { Preconditions.checkState(hasActiveBlock(), "No active block");LOG.debug("Writing block # {}", blockCount);
try {
if (f1 == null) {
LOG.debug("Initiating Multipart upload");
f1 = new MultiPartUpload();
}
f1.uploadBlockAsync(getActiveBlock());
} catch (IOException e) {
f0.set(true);
LOG.error("Upload current block on ({}/{}) failed.", fs.getBucket(), key, e);
throw e;
} finally {
// set the block to null, so the next write will create a new block.
clearActiveBlock();
}
} | 3.26 |
hadoop_OBSBlockOutputStream_getActiveBlock_rdh | /**
* Synchronized accessor to the active block.
*
* @return the active block; null if there isn't one.
*/
synchronized DataBlock getActiveBlock() {
return activeBlock;
} | 3.26 |
hadoop_SnappyDecompressor_setInput_rdh | /**
* Sets input data for decompression.
* This should be called if and only if {@link #needsInput()} returns
* <code>true</code> indicating that more input data is required.
* (Both native and non-native versions of various Decompressors require
* that the data passed in via <code>b[]</code> remain unmodified until
* the caller is explicitly notified--via {@link #needsInput()}--that the
* buffer may be safely modified. With this requirement, an extra
* buffer-copy can be avoided.)
*
* @param b
* Input data
* @param off
* Start offset
* @param len
* Length
*/
@Override
public void setInput(byte[] b, int off, int len) {if (b == null) {
throw new NullPointerException();
}if (((off < 0) || (len < 0)) || (off > (b.length - len))) {
throw new ArrayIndexOutOfBoundsException();
}
this.userBuf = b;
this.userBufOff
= off;
this.userBufLen
= len;
m0();
// Reinitialize snappy's output direct-buffer
uncompressedDirectBuf.limit(directBufferSize);
uncompressedDirectBuf.position(directBufferSize);
} | 3.26 |
hadoop_SnappyDecompressor_setDictionary_rdh | /**
* Does nothing.
*/
@Override
public void setDictionary(byte[] b, int off, int len) {
// do nothing
}
/**
* Returns true if the input data buffer is empty and
* {@link #setInput(byte[], int, int)} should be called to
* provide more input.
*
* @return <code>true</code> if the input data buffer is empty and
{@link #setInput(byte[], int, int)} | 3.26 |
hadoop_SnappyDecompressor_m0_rdh | /**
* If a write would exceed the capacity of the direct buffers, it is set
* aside to be loaded by this function while the compressed data are
* consumed.
*/
void m0() {
compressedDirectBufLen = Math.min(userBufLen, directBufferSize);
// Reinitialize snappy's input direct buffer
compressedDirectBuf.rewind();
((ByteBuffer) (compressedDirectBuf)).put(userBuf, userBufOff, compressedDirectBufLen);
// Note how much data is being fed to snappy
userBufOff += compressedDirectBufLen;
userBufLen -= compressedDirectBufLen;
} | 3.26 |
hadoop_SnappyDecompressor_end_rdh | /**
* Resets decompressor and input and output buffers so that a new set of
* input data can be processed.
*/
@Override
public void end() {
// do nothing
} | 3.26 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.