name
stringlengths 12
178
| code_snippet
stringlengths 8
36.5k
| score
float64 3.26
3.68
|
---|---|---|
hadoop_Chain_setup_rdh | /**
* Setup the chain.
*
* @param jobConf
* chain job's {@link Configuration}.
*/
@SuppressWarnings("unchecked")
void setup(Configuration jobConf) {
String prefix = getPrefix(isMap);
int index = jobConf.getInt(prefix + CHAIN_MAPPER_SIZE, 0);
for (int i = 0; i < index;
i++) {
Class<? extends Mapper> klass = jobConf.getClass((prefix + CHAIN_MAPPER_CLASS) + i, null, Mapper.class);Configuration mConf = getChainElementConf(jobConf, (prefix + CHAIN_MAPPER_CONFIG) + i);
confList.add(mConf);
Mapper mapper = ReflectionUtils.newInstance(klass, mConf);
mappers.add(mapper);
}
Class<? extends Reducer> klass = jobConf.getClass(prefix + CHAIN_REDUCER_CLASS, null,
Reducer.class);
if
(klass != null) {
rConf = getChainElementConf(jobConf, prefix + CHAIN_REDUCER_CONFIG);
reducer = ReflectionUtils.newInstance(klass, rConf);
}
} | 3.26 |
hadoop_Chain_close_rdh | /**
* Close this <code>RecordWriter</code> to future operations.
*
* @param context
* the context of the task
* @throws IOException
*/
public void close(TaskAttemptContext context) throws IOException, InterruptedException {
if (outputQueue != null) {
// write end of input
outputQueue.enqueue(new KeyValuePair<KEYOUT, VALUEOUT>(true));
}
} | 3.26 |
hadoop_MagicCommitTracker_outputImmediatelyVisible_rdh | /**
* Flag to indicate that output is not visible after the stream
* is closed.
*
* @return true
*/
@Override
public boolean outputImmediatelyVisible() {
return false;
} | 3.26 |
hadoop_MagicCommitTracker_aboutToComplete_rdh | /**
* Complete operation: generate the final commit data, put it.
*
* @param uploadId
* Upload ID
* @param parts
* list of parts
* @param bytesWritten
* bytes written
* @param iostatistics
* nullable IO statistics
* @return false, indicating that the commit must fail.
* @throws IOException
* any IO problem.
* @throws IllegalArgumentException
* bad argument
*/
@Override
public boolean aboutToComplete(String uploadId, List<CompletedPart> parts, long bytesWritten, final
IOStatistics iostatistics) throws IOException {
Preconditions.checkArgument(StringUtils.isNotEmpty(uploadId), "empty/null upload ID: " + uploadId);
Preconditions.checkArgument(parts != null, "No uploaded parts list");
Preconditions.checkArgument(!parts.isEmpty(), "No uploaded parts to save");
// put a 0-byte file with the name of the original under-magic path
// Add the final file length as a header
// this is done before the task commit, so its duration can be
// included in the statistics
Map<String, String> headers = new HashMap<>();
headers.put(X_HEADER_MAGIC_MARKER, Long.toString(bytesWritten));
PutObjectRequest v1 = writer.createPutObjectRequest(originalDestKey, 0, new PutObjectOptions(true, null,
headers), false);
upload(v1, new ByteArrayInputStream(EMPTY));
// build the commit summary
SinglePendingCommit commitData = new SinglePendingCommit();
commitData.touch(System.currentTimeMillis());
commitData.setDestinationKey(getDestKey());
commitData.setBucket(bucket);
commitData.setUri(f0.toUri().toString());
commitData.setUploadId(uploadId);commitData.setText("");
commitData.setLength(bytesWritten);
commitData.bindCommitData(parts);
commitData.setIOStatistics(new IOStatisticsSnapshot(iostatistics));
byte[] bytes = commitData.toBytes(SinglePendingCommit.serializer());
LOG.info("Uncommitted data pending to file {};" + " commit metadata for {} parts in {}. size: {} byte(s)", f0.toUri(), parts.size(), pendingPartKey, bytesWritten);
LOG.debug("Closed MPU to {}, saved commit information to {}; data=:\n{}", f0, pendingPartKey, commitData);
PutObjectRequest put = writer.createPutObjectRequest(pendingPartKey, bytes.length, null, false);
upload(put, new ByteArrayInputStream(bytes));
return false;
} | 3.26 |
hadoop_MagicCommitTracker_initialize_rdh | /**
* Initialize the tracker.
*
* @return true, indicating that the multipart commit must start.
* @throws IOException
* any IO problem.
*/
@Override
public boolean initialize() throws IOException {
return true;
} | 3.26 |
hadoop_MagicCommitTracker_upload_rdh | /**
* PUT an object.
*
* @param request
* the request
* @param inputStream
* input stream of data to be uploaded
* @throws IOException
* on problems
*/
@Retries.RetryTranslated
private void upload(PutObjectRequest request, InputStream inputStream) throws IOException
{ trackDurationOfInvocation(trackerStatistics, COMMITTER_MAGIC_MARKER_PUT.getSymbol(), () -> writer.putObject(request, PutObjectOptions.keepingDirs(), new S3ADataBlocks.BlockUploadData(inputStream), false, null));
} | 3.26 |
hadoop_ConfigurationBasicValidator_validate_rdh | /**
* This method handles the base case where the configValue is null, based on the throwIfInvalid it either throws or returns the defaultVal,
* otherwise it returns null indicating that the configValue needs to be validated further.
*
* @param configValue
* the configuration value set by the user
* @return the defaultVal in case the configValue is null and not required to be set, null in case the configValue not null
* @throws InvalidConfigurationValueException
* in case the configValue is null and required to be set
*/
public T validate(final String configValue) throws InvalidConfigurationValueException {
if
(configValue == null) {
if (this.throwIfInvalid) {
throw new InvalidConfigurationValueException(this.configKey);
}
return this.defaultVal;
}
return null;
} | 3.26 |
hadoop_IOStatisticsContext_enabled_rdh | /**
* Static probe to check if the thread-level IO statistics enabled.
*
* @return if the thread-level IO statistics enabled.
*/
static boolean enabled() {
return IOStatisticsContextIntegration.isIOStatisticsThreadLevelEnabled();
} | 3.26 |
hadoop_IOStatisticsContext_setThreadIOStatisticsContext_rdh | /**
* Set the IOStatisticsContext for the current thread.
*
* @param statisticsContext
* IOStatistics context instance for the
* current thread. If null, the context is reset.
*/static void setThreadIOStatisticsContext(IOStatisticsContext statisticsContext) {
IOStatisticsContextIntegration.setThreadIOStatisticsContext(statisticsContext);
} | 3.26 |
hadoop_IOStatisticsContext_getCurrentIOStatisticsContext_rdh | /**
* Get the context's IOStatisticsContext.
*
* @return instance of IOStatisticsContext for the context.
*/
static IOStatisticsContext getCurrentIOStatisticsContext() {
// the null check is just a safety check to highlight exactly where a null value would
// be returned if HADOOP-18456 has resurfaced.
return requireNonNull(IOStatisticsContextIntegration.getCurrentIOStatisticsContext(), "Null IOStatisticsContext");
} | 3.26 |
hadoop_Signer_verifyAndExtract_rdh | /**
* Verifies a signed string and extracts the original string.
*
* @param signedStr
* the signed string to verify and extract.
* @return the extracted original string.
* @throws SignerException
* thrown if the given string is not a signed string or if the signature is invalid.
*/
public String verifyAndExtract(String signedStr) throws SignerException {
int index = signedStr.lastIndexOf(SIGNATURE);
if (index == (-1)) {
throw new SignerException("Invalid signed text: " + signedStr);
}
String originalSignature = signedStr.substring(index + SIGNATURE.length());
String rawValue = signedStr.substring(0, index);
checkSignatures(rawValue, originalSignature);
return rawValue;
} | 3.26 |
hadoop_Signer_sign_rdh | /**
* Returns a signed string.
*
* @param str
* string to sign.
* @return the signed string.
*/
public synchronized String sign(String str) {
if ((str == null) || (str.length() == 0)) {
throw new IllegalArgumentException("NULL or empty string to sign");
}
byte[] secret = secretProvider.getCurrentSecret();
String signature = computeSignature(secret, str);return (str + SIGNATURE) + signature;
} | 3.26 |
hadoop_Signer_computeSignature_rdh | /**
* Returns then signature of a string.
*
* @param secret
* The secret to use
* @param str
* string to sign.
* @return the signature for the string.
*/
protected String computeSignature(byte[] secret, String str) {
try { SecretKeySpec key = new SecretKeySpec(secret, SIGNING_ALGORITHM);
Mac mac = Mac.getInstance(SIGNING_ALGORITHM);
mac.init(key);
byte[]
sig = mac.doFinal(StringUtils.getBytesUtf8(str));
return new Base64(0).encodeToString(sig);} catch (NoSuchAlgorithmException | InvalidKeyException ex) {
throw new RuntimeException("It should not happen, " + ex.getMessage(), ex);
}
} | 3.26 |
hadoop_StreamCapabilitiesPolicy_unbuffer_rdh | /**
* Implement the policy for {@link CanUnbuffer#unbuffer()}.
*
* @param in
* the input stream
*/public static void unbuffer(InputStream in) {
try
{
if ((in instanceof StreamCapabilities) && ((StreamCapabilities) (in)).hasCapability(StreamCapabilities.UNBUFFER)) {
((CanUnbuffer) (in)).unbuffer();
} else {
LOG.debug(((in.getClass().getName() + ":") + " does not implement StreamCapabilities") + " and the unbuffer capability");
}
} catch (ClassCastException e) {
throw new UnsupportedOperationException((in.getClass().getName() + ": ") + CAN_UNBUFFER_NOT_IMPLEMENTED_MESSAGE);
}
} | 3.26 |
hadoop_RBFMetrics_m1_rdh | // NameNodeMXBean
@Override
public String m1() {
double median = 0;
double max = 0;
double min = 0;
double dev = 0;
final Map<String, Map<String, Object>> info = new HashMap<>();
try {
DatanodeInfo[] live = null;
if (this.enableGetDNUsage) {
RouterRpcServer rpcServer = this.router.getRpcServer();
live = rpcServer.getDatanodeReport(DatanodeReportType.LIVE, false, timeOut);
} else {
LOG.debug("Getting node usage is disabled.");
}
if ((live != null) && (live.length > 0)) {
double[] usages = new double[live.length];
int i = 0;
for (DatanodeInfo dn : live) {
usages[i++] = dn.getDfsUsedPercent();
}
Arrays.sort(usages);
median = usages[usages.length / 2];
max = usages[usages.length - 1];
min = usages[0];
StandardDeviation deviation = new StandardDeviation();
dev = deviation.evaluate(usages);
}
} catch (IOException e) {
LOG.error("Cannot get the live nodes: {}", e.getMessage());
}
final Map<String, Object> innerInfo
= new HashMap<>();
innerInfo.put("min", StringUtils.format("%.2f%%", min));
innerInfo.put("median", StringUtils.format("%.2f%%", median));
innerInfo.put("max", StringUtils.format("%.2f%%", max));
innerInfo.put("stdDev", StringUtils.format("%.2f%%", dev));
info.put("nodeUsage", innerInfo);
return JSON.toString(info);
}
@Override
@Metric({ "NumBlocks",
"Total number of blocks" } | 3.26 |
hadoop_RBFMetrics_getNameserviceAggregatedLong_rdh | /**
* Get the aggregated value for a method for all nameservices.
*
* @param f
* Method reference
* @return Aggregated long.
*/
private long getNameserviceAggregatedLong(ToLongFunction<MembershipStats> f) {
try {
return getActiveNamenodeRegistrations().stream().map(MembershipState::getStats).collect(Collectors.summingLong(f));
} catch (IOException e) {
LOG.error("Unable to extract metrics: {}", e.getMessage());
return 0;
}
} | 3.26 |
hadoop_RBFMetrics_m5_rdh | /**
* Returns all serializable fields in the object.
*
* @return Map with the fields.
*/
private static Map<String, Class<?>> m5(BaseRecord record) {
Map<String, Class<?>> getters = new HashMap<>();
for (Method m : record.getClass().getDeclaredMethods()) {
if (m.getName().startsWith("get")) {
try {
Class<?> type = m.getReturnType();
char[] c = m.getName().substring(3).toCharArray();
c[0]
= Character.toLowerCase(c[0]);
String key = new String(c);
getters.put(key, type);
} catch (Exception e) {
LOG.error("Cannot execute getter {} on {}", m.getName(), record);
}
}
}
return
getters;
} | 3.26 |
hadoop_RBFMetrics_close_rdh | /**
* Unregister the JMX beans.
*/
public void close() {
if (this.routerBeanName != null) {
MBeans.unregister(routerBeanName);
}
if (this.federationBeanName != null) {
MBeans.unregister(federationBeanName);
}
MetricsSystem ms = DefaultMetricsSystem.instance();
ms.unregisterSource(RBFMetrics.class.getName());
} | 3.26 |
hadoop_RBFMetrics_getSecondsSince_rdh | /**
* Get the number of seconds passed since a date.
*
* @param timeMs
* to use as a reference.
* @return Seconds since the date.
*/
private static long getSecondsSince(long timeMs) {
if (timeMs < 0) {
return -1;
}return (now() - timeMs) / 1000;
} | 3.26 |
hadoop_RBFMetrics_locateGetter_rdh | /**
* Finds the appropriate getter for a field name.
*
* @param fieldName
* The legacy name of the field.
* @return The matching getter or null if not found.
*/
private static Method locateGetter(BaseRecord record, String fieldName) {
for (Method m : record.getClass().getMethods()) {
if (m.getName().equalsIgnoreCase("get" + fieldName)) {
return m;}
}
return null;
} | 3.26 |
hadoop_RBFMetrics_getNameserviceAggregatedInt_rdh | /**
* Get the aggregated value for a method for all nameservices.
*
* @param f
* Method reference
* @return Aggregated integer.
*/
private int getNameserviceAggregatedInt(ToIntFunction<MembershipStats> f) {
try {
return getActiveNamenodeRegistrations().stream().map(MembershipState::getStats).collect(Collectors.summingInt(f));
} catch (IOException
e) {
LOG.error("Unable to extract metrics: {}", e.getMessage());
return 0;
}
} | 3.26 |
hadoop_RBFMetrics_setStateStoreVersions_rdh | /**
* Populate the map with the State Store versions.
*
* @param map
* Map with the information.
* @param version
* State Store versions.
*/
private static void setStateStoreVersions(Map<String, Object> map, StateStoreVersion version) {
long membershipVersion = version.getMembershipVersion();
String lastMembershipUpdate = getDateString(membershipVersion);map.put("lastMembershipUpdate", lastMembershipUpdate);
long mountTableVersion = version.getMountTableVersion();
String lastMountTableDate = getDateString(mountTableVersion);
map.put("lastMountTableUpdate", lastMountTableDate);
} | 3.26 |
hadoop_RBFMetrics_getField_rdh | /**
* Fetches the value for a field name.
*
* @param fieldName
* the legacy name of the field.
* @return The field data or null if not found.
*/
private static Object getField(BaseRecord
record, String fieldName) {
Object result = null;
Method m = locateGetter(record, fieldName);
if (m != null) {
try {
result = m.invoke(record);
} catch (Exception e) {
LOG.error("Cannot get field {} on {}", fieldName, record);
}
}
return result;
} | 3.26 |
hadoop_RBFMetrics_getActiveNamenodeRegistrations_rdh | /**
* Fetches the most active namenode memberships for all known nameservices.
* The fetched membership may or may not be active. Excludes expired
* memberships.
*
* @throws IOException
* if the query could not be performed.
* @return List of the most active NNs from each known nameservice.
*/
private List<MembershipState> getActiveNamenodeRegistrations() throws IOException {
List<MembershipState> resultList = new ArrayList<>();
if (membershipStore == null) {
return resultList;
}
GetNamespaceInfoRequest request = GetNamespaceInfoRequest.newInstance();
GetNamespaceInfoResponse response = membershipStore.getNamespaceInfo(request);
for (FederationNamespaceInfo nsInfo : response.getNamespaceInfo()) {
// Fetch the most recent namenode registration
String nsId = nsInfo.getNameserviceId();
List<? extends FederationNamenodeContext> nns = namenodeResolver.getNamenodesForNameserviceId(nsId, false);
if (nns != null) {
FederationNamenodeContext v95 = nns.get(0);
if (v95 instanceof MembershipState) {
resultList.add(((MembershipState) (v95)));
}
}
}
return resultList;
} | 3.26 |
hadoop_RBFMetrics_getJson_rdh | /**
* Get JSON for this record.
*
* @return Map representing the data for the JSON representation.
*/
private static Map<String, Object> getJson(BaseRecord record) {
Map<String, Object> json = new HashMap<>();
Map<String, Class<?>> v99 = m5(record);
for (String fieldName : v99.keySet()) {
if (!fieldName.equalsIgnoreCase("proto")) {
try {
Object value = getField(record, fieldName);
if (value instanceof BaseRecord) {
BaseRecord recordField = ((BaseRecord) (value));
json.putAll(getJson(recordField));
} else {
json.put(fieldName, value == null ? JSONObject.NULL : value); }
} catch (Exception e) {
throw new IllegalArgumentException(("Cannot serialize field " + fieldName) + " into JSON");
}
}}
return json;
} | 3.26 |
hadoop_RBFMetrics_getDateString_rdh | /**
* Get time as a date string.
*
* @param time
* Seconds since 1970.
* @return String representing the date.
*/
@VisibleForTesting
static String getDateString(long time) {
if (time <= 0) {
return "-";
}
Date date = new Date(time);
SimpleDateFormat sdf = new SimpleDateFormat(DATE_FORMAT);
return sdf.format(date);
} | 3.26 |
hadoop_ClientToAMTokenSecretManagerInRM_m0_rdh | // Only for RM recovery
public synchronized SecretKey m0(ApplicationAttemptId applicationAttemptID, byte[] keyData) {
SecretKey key = createSecretKey(keyData);
registerApplication(applicationAttemptID, key);
return key;
} | 3.26 |
hadoop_AppCollectorData_happensBefore_rdh | /**
* Returns if a collector data item happens before another one. Null data
* items happens before any other non-null items. Non-null data items A
* happens before another non-null item B when A's rmIdentifier is less than
* B's rmIdentifier. Or A's version is less than B's if they have the same
* rmIdentifier.
*
* @param dataA
* first collector data item.
* @param dataB
* second collector data item.
* @return true if dataA happens before dataB.
*/
public static boolean happensBefore(AppCollectorData dataA, AppCollectorData dataB) {
if ((dataA == null) && (dataB == null)) {
return false;
} else if ((dataA == null) || (dataB == null)) {
return dataA == null;
}
return (dataA.getRMIdentifier() < dataB.getRMIdentifier()) || ((dataA.getRMIdentifier() == dataB.getRMIdentifier()) && (dataA.getVersion() < dataB.getVersion()));
} | 3.26 |
hadoop_StripedBlockReconstructor_clearBuffers_rdh | /**
* Clear all associated buffers.
*/
private void clearBuffers() {
getStripedReader().clearBuffers();
stripedWriter.clearBuffers();
} | 3.26 |
hadoop_ZookeeperClient_checkNotNull_rdh | // Preconditions allowed to be imported from hadoop-common, but that results
// in a circular dependency
private void checkNotNull(Object reference, String errorMessage) {
if (reference == null) {
throw new NullPointerException(errorMessage);
}
} | 3.26 |
hadoop_StageExecutionIntervalByDemand_calcWeight_rdh | // Weight = total memory consumption of stage
protected double calcWeight(ReservationRequest stage) {
return (stage.getDuration() *
stage.getCapability().getMemorySize()) * stage.getNumContainers();
} | 3.26 |
hadoop_S3ALocatedFileStatus_toS3AFileStatus_rdh | /**
* Generate an S3AFileStatus instance, including etag and
* version ID, if present.
*
* @return the S3A status.
*/
public S3AFileStatus toS3AFileStatus() {
return new S3AFileStatus(getPath(), isDirectory(), isEmptyDirectory, getLen(), getModificationTime(), getBlockSize(), getOwner(), getEtag(), getVersionId());
} | 3.26 |
hadoop_DatanodeAdminProperties_getUpgradeDomain_rdh | /**
* Get the upgrade domain of the datanode.
*
* @return the upgrade domain of the datanode.
*/
public String getUpgradeDomain() {
return upgradeDomain;
} | 3.26 |
hadoop_DatanodeAdminProperties_setHostName_rdh | /**
* Set the host name of the datanode.
*
* @param hostName
* the host name of the datanode.
*/
public void setHostName(final String hostName) {
this.hostName = hostName;
} | 3.26 |
hadoop_DatanodeAdminProperties_getAdminState_rdh | /**
* Get the admin state of the datanode.
*
* @return the admin state of the datanode.
*/
public AdminStates getAdminState() {
return adminState;
} | 3.26 |
hadoop_DatanodeAdminProperties_getMaintenanceExpireTimeInMS_rdh | /**
* Get the maintenance expiration time in milliseconds.
*
* @return the maintenance expiration time in milliseconds.
*/
public long getMaintenanceExpireTimeInMS() {
return this.maintenanceExpireTimeInMS;
} | 3.26 |
hadoop_DatanodeAdminProperties_setPort_rdh | /**
* Set the port number of the datanode.
*
* @param port
* the port number of the datanode.
*/
public void setPort(final int
port) {
this.port =
port;
} | 3.26 |
hadoop_DatanodeAdminProperties_setMaintenanceExpireTimeInMS_rdh | /**
* Get the maintenance expiration time in milliseconds.
*
* @param maintenanceExpireTimeInMS
* the maintenance expiration time in milliseconds.
*/
public void setMaintenanceExpireTimeInMS(final long maintenanceExpireTimeInMS) {
this.maintenanceExpireTimeInMS = maintenanceExpireTimeInMS;
} | 3.26 |
hadoop_DatanodeAdminProperties_getPort_rdh | /**
* Get the port number of the datanode.
*
* @return the port number of the datanode.
*/
public int getPort() {
return port;} | 3.26 |
hadoop_DatanodeAdminProperties_setUpgradeDomain_rdh | /**
* Set the upgrade domain of the datanode.
*
* @param upgradeDomain
* the upgrade domain of the datanode.
*/
public void setUpgradeDomain(final String upgradeDomain) {
this.upgradeDomain = upgradeDomain;
} | 3.26 |
hadoop_DatanodeAdminProperties_getHostName_rdh | /**
* Return the host name of the datanode.
*
* @return the host name of the datanode.
*/
public String getHostName() {
return hostName;
} | 3.26 |
hadoop_DatanodeAdminProperties_setAdminState_rdh | /**
* Set the admin state of the datanode.
*
* @param adminState
* the admin state of the datanode.
*/
public void setAdminState(final AdminStates adminState) {
this.adminState = adminState;
} | 3.26 |
hadoop_DefaultNoHARMFailoverProxyProvider_performFailover_rdh | /**
* PerformFailover does nothing in this class.
*
* @param currentProxy
* currentProxy.
*/
@Override
public void performFailover(T currentProxy) {
// Nothing to do.
} | 3.26 |
hadoop_DefaultNoHARMFailoverProxyProvider_init_rdh | /**
* Initialize internal data structures, invoked right after instantiation.
*
* @param conf
* Configuration to use
* @param proxy
* The {@link RMProxy} instance to use
* @param protocol
* The communication protocol to use
*/
@Override
public void init(Configuration conf, RMProxy<T> proxy, Class<T> protocol) {
this.protocol = protocol;
try {
YarnConfiguration yarnConf = new YarnConfiguration(conf);
InetSocketAddress rmAddress = proxy.getRMAddress(yarnConf, protocol);
LOG.info("Connecting to ResourceManager at {}", rmAddress);
this.proxy = proxy.getProxy(yarnConf, protocol, rmAddress);
} catch (IOException ioe) {
LOG.error("Unable to create proxy to the ResourceManager ", ioe);
}
} | 3.26 |
hadoop_DefaultNoHARMFailoverProxyProvider_close_rdh | /**
* Close the current proxy.
*
* @throws IOException
* io error occur.
*/
@Overridepublic void close() throws IOException {
RPC.stopProxy(proxy);
} | 3.26 |
hadoop_MemoryPlacementConstraintManager_addConstraintToMap_rdh | /**
* Helper method that adds a constraint to a map for a given source tag.
* Assumes there is already a lock on the constraint map.
*
* @param constraintMap
* constraint map to which the constraint will be added
* @param sourceTags
* the source tags that will enable this constraint
* @param placementConstraint
* the new constraint to be added
* @param replace
* if true, an existing constraint for these sourceTags will be
* replaced with the new one
*/
private void addConstraintToMap(Map<String, PlacementConstraint> constraintMap, Set<String>
sourceTags, PlacementConstraint placementConstraint, boolean replace) {
if (validateConstraint(sourceTags, placementConstraint)) {
String sourceTag = getValidSourceTag(sourceTags);
if ((constraintMap.get(sourceTag) == null) || replace) {
if (replace) {
LOG.info("Replacing the constraint associated with tag {} with {}.", sourceTag, placementConstraint);
} constraintMap.put(sourceTag, placementConstraint);
} else {
LOG.info("Constraint {} will not be added. There is already a " + "constraint associated with tag {}.", placementConstraint, sourceTag);
}
}
} | 3.26 |
hadoop_CloseableTaskPoolSubmitter_getPool_rdh | /**
* Get the pool.
*
* @return the pool.
*/
public ExecutorService getPool() {
return pool;} | 3.26 |
hadoop_CloseableTaskPoolSubmitter_close_rdh | /**
* Shut down the pool.
*/
@Override
public void close() {
if (pool != null) {
pool.shutdown();
pool = null;
}
} | 3.26 |
hadoop_BoundedResourcePool_close_rdh | /**
* Derived classes may implement a way to cleanup each item.
*/
@Override
protected synchronized void close(T item) {
// Do nothing in this class. Allow overriding classes to take any cleanup action.
} | 3.26 |
hadoop_BoundedResourcePool_tryAcquire_rdh | /**
* Acquires a resource blocking if one is immediately available. Otherwise returns null.
*/
@Override
public T tryAcquire() {
return this.acquireHelper(false);
} | 3.26 |
hadoop_BoundedResourcePool_toString_rdh | // For debugging purposes.
@Override
public synchronized String toString() {
return String.format("size = %d, #created = %d, #in-queue = %d, #available = %d", size, numCreated(), items.size(), m0());
} | 3.26 |
hadoop_BoundedResourcePool_m0_rdh | /**
* Number of items available to be acquired. Mostly for testing purposes.
*
* @return the number available.
*/
public synchronized int m0() {
return (size - numCreated())
+ items.size();
} | 3.26 |
hadoop_BoundedResourcePool_numCreated_rdh | /**
* Number of items created so far. Mostly for testing purposes.
*
* @return the count.
*/
public int numCreated() {
synchronized(createdItems) {
return createdItems.size();
}
} | 3.26 |
hadoop_BoundedResourcePool_acquire_rdh | /**
* Acquires a resource blocking if necessary until one becomes available.
*/
@Override
public T acquire() {
return this.acquireHelper(true);
} | 3.26 |
hadoop_BoundedResourcePool_release_rdh | /**
* Releases a previously acquired resource.
*
* @throws IllegalArgumentException
* if item is null.
*/
@Override
public void release(T item) {
checkNotNull(item, "item");
synchronized(createdItems) {
if (!createdItems.contains(item)) {
throw new IllegalArgumentException("This item is not a part of this pool");
}
}
// Return if this item was released earlier.
// We cannot use items.contains() because that check is not based on reference equality.
for (T entry : items) {
if (entry == item) {
return;
}
}
try {
items.put(item);
} catch (InterruptedException e) {
throw new IllegalStateException("release() should never block", e);
}
} | 3.26 |
hadoop_ContainerReapContext_getContainer_rdh | /**
* Get the container set for the context.
*
* @return the {@link Container} set in the context.
*/
public Container getContainer()
{
return container;
} | 3.26 |
hadoop_ContainerReapContext_setContainer_rdh | /**
* Set the container within the context.
*
* @param container
* the {@link Container}.
* @return the Builder with the container set.
*/
public Builder setContainer(Container container) {
this.builderContainer = container;
return this;
} | 3.26 |
hadoop_ContainerReapContext_m0_rdh | /**
* Get the user set for the context.
*
* @return the user set in the context.
*/
public String m0() {
return user;
} | 3.26 |
hadoop_ContainerReapContext_setUser_rdh | /**
* Set the set within the context.
*
* @param user
* the user.
* @return the Builder with the user set.
*/
public Builder setUser(String user) {
this.builderUser = user;
return this;
} | 3.26 |
hadoop_ContainerReapContext_build_rdh | /**
* Builds the context with the attributes set.
*
* @return the context.
*/public ContainerReapContext build() {
return new
ContainerReapContext(this);
} | 3.26 |
hadoop_ServiceTimelinePublisher_publishMetrics_rdh | /**
* Called from ServiceMetricsSink at regular interval of time.
*
* @param metrics
* of service or components
* @param entityId
* Id of entity
* @param entityType
* Type of entity
* @param timestamp
*/
public void publishMetrics(Iterable<AbstractMetric> metrics, String entityId, String entityType, long timestamp) {TimelineEntity entity = createTimelineEntity(entityId, entityType);
Set<TimelineMetric> entityMetrics = new HashSet<TimelineMetric>();
for (AbstractMetric metric : metrics) {
TimelineMetric timelineMetric = new TimelineMetric();
timelineMetric.setId(metric.name());
timelineMetric.addValue(timestamp, metric.value());
entityMetrics.add(timelineMetric);
}
entity.setMetrics(entityMetrics);
putEntity(entity);
} | 3.26 |
hadoop_JobMonitor_join_rdh | /**
* Wait for the monitor to halt, assuming shutdown or abort have been
* called. Note that, since submission may be sporatic, this will hang
* if no form of shutdown has been requested.
*/ public void join(long millis) throws InterruptedException {
executor.awaitTermination(millis, TimeUnit.MILLISECONDS);
} | 3.26 |
hadoop_JobMonitor_start_rdh | /**
* Start the internal, monitoring thread.
*/
public void start() {
for (int i = 0; i < numPollingThreads; ++i) {
executor.execute(new MonitorThread(i));
}
} | 3.26 |
hadoop_JobMonitor_submissionFailed_rdh | /**
* Add a submission failed job's status, such that it can be communicated
* back to serial.
* TODO: Cleaner solution for this problem
*
* @param job
*/
public void submissionFailed(JobStats job) {
String jobID = job.getJob().getConfiguration().get(Gridmix.ORIGINAL_JOB_ID);
LOG.info("Job submission failed notification for job " + jobID);
synchronized(statistics) {
this.statistics.add(job);
}
} | 3.26 |
hadoop_JobMonitor_add_rdh | /**
* Add a running job's status to the polling queue.
*/
public void add(JobStats job) throws InterruptedException {
f0.put(job);
} | 3.26 |
hadoop_JobMonitor_shutdown_rdh | /**
* When all monitored jobs have completed, stop the monitoring thread.
* Upstream submitter is assumed dead.
*/
public void shutdown() {
synchronized(mJobs) {
graceful = true;
shutdown = true;
}
executor.shutdown();
} | 3.26 |
hadoop_JobMonitor_onSuccess_rdh | /**
* Temporary hook for recording job success.
*/
protected void onSuccess(Job job) {
LOG.info((((job.getJobName() + " (") + job.getJobID()) + ")") + " success");
} | 3.26 |
hadoop_JobMonitor_getRemainingJobs_rdh | /**
* If shutdown before all jobs have completed, any still-running jobs
* may be extracted from the component.
*
* @throws IllegalStateException
* If monitoring thread is still running.
* @return Any jobs submitted and not known to have completed.
*/
List<JobStats> getRemainingJobs() {
synchronized(mJobs) {
return new ArrayList<JobStats>(mJobs);
}
} | 3.26 |
hadoop_JobMonitor_onFailure_rdh | /**
* Temporary hook for recording job failure.
*/protected void onFailure(Job job) {LOG.info((((job.getJobName() + " (") + job.getJobID()) + ")") + " failure");
} | 3.26 |
hadoop_JobMonitor_abort_rdh | /**
* Drain all submitted jobs to a queue and stop the monitoring thread.
* Upstream submitter is assumed dead.
*/
public void abort() {
synchronized(mJobs) {
graceful = false;
shutdown = true;
}
executor.shutdown();
} | 3.26 |
hadoop_RequestFactoryImpl_withStorageClass_rdh | /**
* Storage class.
*
* @param value
* new value
* @return the builder
*/
public RequestFactoryBuilder withStorageClass(final StorageClass value) {
storageClass = value;
return this;
} | 3.26 |
hadoop_RequestFactoryImpl_copyEncryptionParameters_rdh | /**
* Propagate encryption parameters from source file if set else use the
* current filesystem encryption settings.
*
* @param copyObjectRequestBuilder
* copy object request builder.
* @param srcom
* source object metadata.
*/
protected void copyEncryptionParameters(HeadObjectResponse srcom, CopyObjectRequest.Builder copyObjectRequestBuilder) {
final S3AEncryptionMethods algorithm = getServerSideEncryptionAlgorithm();
String sourceKMSId = srcom.ssekmsKeyId();
if (isNotEmpty(sourceKMSId)) {
// source KMS ID is propagated
LOG.debug("Propagating SSE-KMS settings from source {}", sourceKMSId);
copyObjectRequestBuilder.ssekmsKeyId(sourceKMSId);
return;
}
switch (algorithm) {
case SSE_S3 :
copyObjectRequestBuilder.serverSideEncryption(algorithm.getMethod());
break;
case SSE_KMS :
copyObjectRequestBuilder.serverSideEncryption(ServerSideEncryption.AWS_KMS);
// Set the KMS key if present, else S3 uses AWS managed key.
EncryptionSecretOperations.getSSEAwsKMSKey(encryptionSecrets).ifPresent(copyObjectRequestBuilder::ssekmsKeyId);
break;
case DSSE_KMS :
copyObjectRequestBuilder.serverSideEncryption(ServerSideEncryption.AWS_KMS_DSSE);
EncryptionSecretOperations.getSSEAwsKMSKey(encryptionSecrets).ifPresent(copyObjectRequestBuilder::ssekmsKeyId);
break;
case SSE_C :
EncryptionSecretOperations.getSSECustomerKey(encryptionSecrets).ifPresent(base64customerKey -> copyObjectRequestBuilder.copySourceSSECustomerAlgorithm(ServerSideEncryption.AES256.name()).copySourceSSECustomerKey(base64customerKey).copySourceSSECustomerKeyMD5(Md5Utils.md5AsBase64(Base64.getDecoder().decode(base64customerKey))).sseCustomerAlgorithm(ServerSideEncryption.AES256.name()).sseCustomerKey(base64customerKey).sseCustomerKeyMD5(Md5Utils.md5AsBase64(Base64.getDecoder().decode(base64customerKey))));
break;
case CSE_KMS :
case CSE_CUSTOM :
case NONE :
break;
default :
LOG.warn((UNKNOWN_ALGORITHM + ": ") + algorithm);}
} | 3.26 |
hadoop_RequestFactoryImpl_getServerSideEncryptionAlgorithm_rdh | /**
* Get the encryption algorithm of this endpoint.
*
* @return the encryption algorithm.
*/
@Override
public S3AEncryptionMethods getServerSideEncryptionAlgorithm() {
return encryptionSecrets.getEncryptionMethod();
} | 3.26 |
hadoop_RequestFactoryImpl_withContentEncoding_rdh | /**
* Content encoding.
*
* @param value
* new value
* @return the builder
*/
public RequestFactoryBuilder withContentEncoding(final String value) {
contentEncoding = value;
return this;
} | 3.26 |
hadoop_RequestFactoryImpl_getBucket_rdh | /**
* Get the target bucket.
*
* @return the bucket.
*/
protected String getBucket() {
return bucket;
} | 3.26 |
hadoop_RequestFactoryImpl_withMultipartPartCountLimit_rdh | /**
* Multipart limit.
*
* @param value
* new value
* @return the builder
*/
public RequestFactoryBuilder withMultipartPartCountLimit(final long value) {
multipartPartCountLimit = value;
return this;
} | 3.26 |
hadoop_RequestFactoryImpl_withBucket_rdh | /**
* Target bucket.
*
* @param value
* new value
* @return the builder
*/
public RequestFactoryBuilder withBucket(final String value) {
bucket = value;
return this;
} | 3.26 |
hadoop_RequestFactoryImpl_getContentEncoding_rdh | /**
* Get the content encoding (e.g. gzip) or return null if none.
*
* @return content encoding
*/
@Override
public String getContentEncoding() {
return contentEncoding;
} | 3.26 |
hadoop_RequestFactoryImpl_getStorageClass_rdh | /**
* Get the object storage class, return null if none.
*
* @return storage class
*/
@Override
public StorageClass getStorageClass() {
return storageClass;
} | 3.26 |
hadoop_RequestFactoryImpl_builder_rdh | /**
* Create a builder.
*
* @return new builder.
*/
public static RequestFactoryBuilder builder() {
return new RequestFactoryBuilder();
} | 3.26 |
hadoop_RequestFactoryImpl_build_rdh | /**
* Build the request factory.
*
* @return the factory
*/
public RequestFactory build() {
return new RequestFactoryImpl(this);
} | 3.26 |
hadoop_RequestFactoryImpl_withMultipartUploadEnabled_rdh | /**
* Multipart upload enabled.
*
* @param value
* new value
* @return the builder
*/
public RequestFactoryBuilder withMultipartUploadEnabled(final boolean value) {
this.isMultipartUploadEnabled = value;
return this;
} | 3.26 |
hadoop_RequestFactoryImpl_m0_rdh | /**
* ACL For new objects.
*
* @param value
* new value
* @return the builder
*/
public RequestFactoryBuilder m0(final String value) {
cannedACL = value;
return this;
} | 3.26 |
hadoop_RequestFactoryImpl_uploadPartEncryptionParameters_rdh | /**
* Sets server side encryption parameters to the part upload
* request when encryption is enabled.
*
* @param builder
* upload part request builder
*/
protected void uploadPartEncryptionParameters(UploadPartRequest.Builder builder) {
// need to set key to get objects encrypted with SSE_C
EncryptionSecretOperations.getSSECustomerKey(encryptionSecrets).ifPresent(base64customerKey -> {
builder.sseCustomerAlgorithm(ServerSideEncryption.AES256.name()).sseCustomerKey(base64customerKey).sseCustomerKeyMD5(Md5Utils.md5AsBase64(Base64.getDecoder().decode(base64customerKey)));
});
} | 3.26 |
hadoop_ChangeDetectionPolicy_createPolicy_rdh | /**
* Create a policy.
*
* @param mode
* mode pf checks
* @param source
* source of change
* @param requireVersion
* throw exception when no version available?
* @return the policy
*/
@VisibleForTesting
public static ChangeDetectionPolicy createPolicy(final Mode mode, final Source source, final boolean requireVersion) {
switch (source) {
case ETag :
return new ETagChangeDetectionPolicy(mode, requireVersion);
case VersionId :
return new VersionIdChangeDetectionPolicy(mode, requireVersion);default :
return new NoChangeDetection();
}
} | 3.26 |
hadoop_ChangeDetectionPolicy_toString_rdh | /**
* String value for logging.
*
* @return source and mode.
*/
@Overridepublic String toString() {
return (("Policy " + getSource()) + "/") + getMode(); }
/**
* Pulls the attribute this policy uses to detect change out of the S3 object
* metadata. The policy generically refers to this attribute as
* {@code revisionId} | 3.26 |
hadoop_ChangeDetectionPolicy_getPolicy_rdh | /**
* Reads the change detection policy from Configuration.
*
* @param configuration
* the configuration
* @return the policy
*/
public static ChangeDetectionPolicy getPolicy(Configuration configuration) {
Mode mode = Mode.fromConfiguration(configuration);
Source source = Source.fromConfiguration(configuration);
boolean requireVersion = configuration.getBoolean(CHANGE_DETECT_REQUIRE_VERSION, CHANGE_DETECT_REQUIRE_VERSION_DEFAULT);
return createPolicy(mode, source, requireVersion);
} | 3.26 |
hadoop_CreateFlag_validateForAppend_rdh | /**
* Validate the CreateFlag for the append operation. The flag must contain
* APPEND, and cannot contain OVERWRITE.
*
* @param flag
* enum set flag.
*/
public static void validateForAppend(EnumSet<CreateFlag> flag) {
validate(flag);
if (!flag.contains(APPEND)) {
throw new HadoopIllegalArgumentException(flag + " does not contain APPEND");
}
} | 3.26 |
hadoop_CreateFlag_validate_rdh | /**
* Validate the CreateFlag for create operation
*
* @param path
* Object representing the path; usually String or {@link Path}
* @param pathExists
* pass true if the path exists in the file system
* @param flag
* set of CreateFlag
* @throws IOException
* on error
* @throws HadoopIllegalArgumentException
* if the CreateFlag is invalid
*/
public static void validate(Object path, boolean pathExists, EnumSet<CreateFlag>
flag) throws IOException {
validate(flag);
final boolean append = flag.contains(APPEND);
final boolean overwrite = flag.contains(OVERWRITE);
if (pathExists) {
if (!(append || overwrite)) {
throw new FileAlreadyExistsException((("File already exists: " + path.toString()) + ". Append or overwrite option must be specified in ") + flag);
}
} else if (!flag.contains(CREATE)) {
throw new
FileNotFoundException((("Non existing file: " + path.toString()) + ". Create option is not specified in ") + flag);
}
} | 3.26 |
hadoop_XException_format_rdh | /**
* Creates a message using a error message template and arguments.
* <p>
* The template must be in JDK <code>MessageFormat</code> syntax
* (using {#} positional parameters).
*
* @param error
* error code, to get the template from.
* @param args
* arguments to use for creating the message.
* @return the resolved error message.
*/private static String
format(ERROR error, Object... args) {
String
template = error.getTemplate();
if (template == null) {
StringBuilder sb =
new StringBuilder();
for (int i = 0; i < args.length; i++) {
sb.append(" {").append(i).append("}");
}
template = sb.deleteCharAt(0).toString();
}
return (error + ": ") + MessageFormat.format(template, args);
} | 3.26 |
hadoop_XException_getError_rdh | /**
* Returns the error code of the exception.
*
* @return the error code of the exception.
*/
public ERROR getError() {
return f0;
} | 3.26 |
hadoop_HadoopLogsAnalyzer_main_rdh | /**
*
* @param args
* Last arg is the input file. That file can be a directory, in which
* case you get all the files in sorted order. We will decompress
* files whose nmes end in .gz .
*
* switches: -c collect line types.
*
* -d debug mode
*
* -delays print out the delays [interval between job submit time and
* launch time]
*
* -runtimes print out the job runtimes
*
* -spreads print out the ratio of 10%ile and 90%ile, of both the
* successful map task attempt run times and the the successful
* reduce task attempt run times
*
* -tasktimes prints out individual task time distributions
*
* collects all the line types and prints the first example of each
* one
*/
public static void main(String[] args) {
try {
HadoopLogsAnalyzer analyzer = new HadoopLogsAnalyzer();
int result = ToolRunner.run(analyzer, args);
if (result == 0) {
return;
}
System.exit(result);
} catch
(FileNotFoundException e) {
LOG.error("", e);
e.printStackTrace(staticDebugOutput);
System.exit(1);
} catch (IOException e) {
LOG.error("", e);
e.printStackTrace(staticDebugOutput);
System.exit(2);
} catch (Exception e) {
LOG.error("", e);
e.printStackTrace(staticDebugOutput);
System.exit(3);
}
} | 3.26 |
hadoop_HadoopLogsAnalyzer_readBalancedLine_rdh | // This can return either the Pair of the !!file line and the XMLconf
// file, or null and an ordinary line. Returns just null if there's
// no more input.
private Pair<String, String> readBalancedLine() throws IOException {
String line = readCountedLine();
if (line == null) {
return null;
}
while (line.indexOf('\f') > 0) {
line = line.substring(line.indexOf('\f'));
}
if ((line.length() != 0) && (line.charAt(0) == '\f')) {
String subjectLine = readCountedLine();
if ((((subjectLine != null) && (subjectLine.length() != 0)) && apparentConfFileHeader(line)) && apparentXMLFileStart(subjectLine)) {
StringBuilder sb = new StringBuilder();
while ((subjectLine != null) && (subjectLine.indexOf('\f') > 0)) {
subjectLine = subjectLine.substring(subjectLine.indexOf('\f'));
}
while ((subjectLine != null) && ((subjectLine.length() == 0) || (subjectLine.charAt(0) != '\f'))) {
sb.append(subjectLine);
subjectLine = readCountedLine();
}
if (subjectLine != null) {
unreadCountedLine(subjectLine);
}
return new Pair<String, String>(line, sb.toString());
}
// here we had a file line, but it introduced a log segment, not
// a conf file. We want to just ignore the file line.
return readBalancedLine();
}
String endlineString = (version == 0) ? " " : " .";if (line.length()
< endlineString.length()) {
return new Pair<String, String>(null, line);
}
if (!endlineString.equals(line.substring(line.length() - endlineString.length()))) {
StringBuilder sb = new StringBuilder(line);String addedLine;
do {
addedLine = readCountedLine();
if (addedLine == null) {
return new Pair<String, String>(null, sb.toString());
}
while (addedLine.indexOf('\f') > 0) {
addedLine = addedLine.substring(addedLine.indexOf('\f'));
}
if ((addedLine.length() > 0) && (addedLine.charAt(0) == '\f')) {
unreadCountedLine(addedLine);
return new Pair<String, String>(null, sb.toString());
}
sb.append("\n");
sb.append(addedLine);
} while (!endlineString.equals(addedLine.substring(addedLine.length() - endlineString.length())) );
line = sb.toString();
}
return new Pair<String, String>(null, line);
} | 3.26 |
hadoop_HadoopLogsAnalyzer_initializeHadoopLogsAnalyzer_rdh | /**
*
* @param args
* string arguments. See {@code usage()}
* @throws FileNotFoundException
* @throws IOException
*/ private int initializeHadoopLogsAnalyzer(String[] args) throws FileNotFoundException, IOException {
Path jobTraceFilename = null;
Path topologyFilename = null;
if ((args.length == 0) || (args[args.length - 1].charAt(0) == '-')) {
throw new IllegalArgumentException("No input specified.");
} else {
inputFilename = args[args.length - 1];
}
for (int i = 0; i < (args.length - (inputFilename == null ? 0 : 1)); ++i) {
if (StringUtils.equalsIgnoreCase("-h",
args[i]) || StringUtils.equalsIgnoreCase("-help", args[i])) {
usage();
return 0;
}
if (StringUtils.equalsIgnoreCase("-c", args[i]) || StringUtils.equalsIgnoreCase("-collect-prefixes", args[i])) {
collecting = true;continue;
}
// these control the job digest
if (StringUtils.equalsIgnoreCase("-write-job-trace", args[i])) {
++i;
jobTraceFilename = new Path(args[i]);continue;
}
if (StringUtils.equalsIgnoreCase("-single-line-job-traces", args[i])) {
prettyprintTrace = false;
continue;
}
if (StringUtils.equalsIgnoreCase("-omit-task-details", args[i])) {
omitTaskDetails = true;
continue;
}
if (StringUtils.equalsIgnoreCase("-write-topology", args[i])) {++i;
topologyFilename = new Path(args[i]);
continue;
}
if (StringUtils.equalsIgnoreCase("-job-digest-spectra", args[i])) {
ArrayList<Integer> values = new ArrayList<Integer>();
++i;
while ((i < args.length) && Character.isDigit(args[i].charAt(0))) {
values.add(Integer.parseInt(args[i]));
++i;
}
if (values.size() == 0) {
throw new IllegalArgumentException("Empty -job-digest-spectra list");
}
attemptTimesPercentiles = new int[values.size()];
int lastValue = 0;
for (int j = 0; j < attemptTimesPercentiles.length; ++j) {
if ((values.get(j) <= lastValue) || (values.get(j) >= 100)) {
throw new IllegalArgumentException("Bad -job-digest-spectra percentiles list");
}
attemptTimesPercentiles[j] = values.get(j);
}
--i;
continue;
}if (StringUtils.equalsIgnoreCase("-d", args[i]) || StringUtils.equalsIgnoreCase("-debug", args[i])) {
debug = true;
continue;
}
if (StringUtils.equalsIgnoreCase("-spreads", args[i])) {
int min = Integer.parseInt(args[i + 1]);
int max = Integer.parseInt(args[i + 2]);
if (((min < max) && (min < 1000)) && (max < 1000)) {
spreadMin = min;
spreadMax = max;spreading = true;
i += 2;
}continue;
}
// These control log-wide CDF outputs
if (StringUtils.equalsIgnoreCase("-delays", args[i])) {
delays = true;
continue;
}
if (StringUtils.equalsIgnoreCase("-runtimes", args[i])) {
runtimes = true; continue;
}
if (StringUtils.equalsIgnoreCase("-tasktimes", args[i])) {
collectTaskTimes = true;
continue;
}
if (StringUtils.equalsIgnoreCase("-v1", args[i])) {
version = 1;
continue;
}throw new IllegalArgumentException("Unrecognized argument: " + args[i]);
}
runTimeDists = newDistributionBlock();
delayTimeDists = newDistributionBlock();
mapTimeSpreadDists = newDistributionBlock("map-time-spreads");
shuffleTimeSpreadDists = newDistributionBlock();sortTimeSpreadDists = newDistributionBlock();
reduceTimeSpreadDists = newDistributionBlock();
mapTimeDists = newDistributionBlock();
shuffleTimeDists = newDistributionBlock();
sortTimeDists = newDistributionBlock();
reduceTimeDists = newDistributionBlock();
taskAttemptStartTimes = new HashMap<String, Long>();
taskReduceAttemptShuffleEndTimes = new HashMap<String, Long>();
taskReduceAttemptSortEndTimes = new HashMap<String, Long>();
taskMapAttemptFinishTimes = new HashMap<String, Long>();
taskReduceAttemptFinishTimes = new HashMap<String, Long>();
final Path inputPath = new Path(inputFilename);
f1 = pathIsDirectory(inputPath);
if ((jobTraceFilename != null) && (attemptTimesPercentiles == null)) {
attemptTimesPercentiles = new int[19];
for (int i = 0; i < 19;
++i) {attemptTimesPercentiles[i] = (i
+ 1) * 5;
}
}
if (!f1) {
input = maybeUncompressedPath(inputPath);
} else {
inputDirectoryPath = inputPath;FileSystem fs = inputPath.getFileSystem(getConf());
FileStatus[] statuses = fs.listStatus(inputPath);
inputDirectoryFiles =
new String[statuses.length];for (int i = 0; i < statuses.length; ++i) {
inputDirectoryFiles[i] = statuses[i].getPath().getName();
}
// filter out the .crc files, if any
int dropPoint = 0;
for (int i = 0; i < inputDirectoryFiles.length; ++i) {
String name = inputDirectoryFiles[i];
if (!((name.length() >= 4) && ".crc".equals(name.substring(name.length() - 4)))) {
inputDirectoryFiles[dropPoint++]
= name;
}
}
LOG.info(("We dropped " + (inputDirectoryFiles.length - dropPoint)) + " crc files.");
String[] new_inputDirectoryFiles = new String[dropPoint];
System.arraycopy(inputDirectoryFiles, 0, new_inputDirectoryFiles, 0, dropPoint);
inputDirectoryFiles = new_inputDirectoryFiles;
Arrays.sort(inputDirectoryFiles);
if (!setNextDirectoryInputStream()) {
throw new FileNotFoundException("Empty directory specified.");
}
}
if (jobTraceFilename != null) {
jobTraceGen = new DefaultOutputter<LoggedJob>();
jobTraceGen.init(jobTraceFilename, getConf());
if (topologyFilename != null) {
topologyGen
= new DefaultOutputter<LoggedNetworkTopology>();
topologyGen.init(topologyFilename, getConf());
}
}
return 0;
} | 3.26 |
hadoop_ResourceRequestSet_m0_rdh | /**
* Add a {@link ResourceRequest} into the requestSet. If there's already an RR
* with the same resource name, override it and update accordingly.
*
* @param ask
* the new {@link ResourceRequest}
* @throws YarnException
* indicates exceptions from yarn servers.
*/
public void m0(ResourceRequest ask) throws YarnException {
if (!this.f0.equals(new ResourceRequestSetKey(ask))) {
throw new YarnException((("None compatible asks: \n" + ask) + "\n") + this.f0);
}
// Override directly if exists
this.asks.put(ask.getResourceName(), ask);
if (this.f0.getExeType().equals(ExecutionType.GUARANTEED)) {
// For G requestSet, update the numContainers only for ANY RR
if (ask.getResourceName().equals(ResourceRequest.ANY)) {
this.numContainers = ask.getNumContainers();
this.relaxable = ask.getRelaxLocality();
}
} else {
// The assumption we made about O asks is that all RR in a requestSet has
// the same numContainers value. So we just take the value of the last RR
this.numContainers = ask.getNumContainers();
}
if (this.numContainers < 0) {
throw new YarnException((((("numContainers becomes " + this.numContainers) + " when adding ask ") + ask) + "\n requestSet: ") + toString());
}
} | 3.26 |
hadoop_ResourceRequestSet_addAndOverrideRRSet_rdh | /**
* Merge a requestSet into this one.
*
* @param requestSet
* the requestSet to merge
* @throws YarnException
* indicates exceptions from yarn servers.
*/
public void addAndOverrideRRSet(ResourceRequestSet requestSet) throws YarnException {
if (requestSet == null) {
return;
}
for (ResourceRequest rr :
requestSet.getRRs()) {
m0(rr);
}
} | 3.26 |
hadoop_ResourceRequestSet_setNumContainers_rdh | /**
* Force set the # of containers to ask for this requestSet to a given value.
*
* @param newValue
* the new # of containers value
* @throws YarnException
* indicates exceptions from yarn servers.
*/
public void setNumContainers(int newValue) throws YarnException {
if (this.numContainers
== 0)
{
throw new YarnException((("should not set numContainers to " + newValue) + " for a cancel requestSet: ") + toString());
}
// Clone the ResourceRequest object whenever we need to change it
int oldValue = this.numContainers;
this.numContainers = newValue;
if (this.f0.getExeType().equals(ExecutionType.OPPORTUNISTIC)) {
// The assumption we made about O asks is that all RR in a requestSet has
// the same numContainers value
Map<String, ResourceRequest> newAsks = new HashMap<>();
for (ResourceRequest rr : this.asks.values()) {
ResourceRequest clone = ResourceRequest.clone(rr);
clone.setNumContainers(newValue);
newAsks.put(clone.getResourceName(), clone);
}
this.asks = newAsks;
} else {
ResourceRequest rr = this.asks.get(ResourceRequest.ANY);
if (rr == null) { throw new YarnException("No ANY RR found in requestSet with numContainers=" + oldValue);
}
ResourceRequest clone = ResourceRequest.clone(rr);clone.setNumContainers(newValue);
this.asks.put(ResourceRequest.ANY, clone);
}
} | 3.26 |
hadoop_ResourceRequestSet_isANYRelaxable_rdh | /**
* Whether the request set is relaxable at ANY level.
*
* @return whether the request set is relaxable at ANY level
*/
public boolean isANYRelaxable() {
return this.relaxable;
} | 3.26 |
hadoop_ResourceRequestSet_cleanupZeroNonAnyRR_rdh | /**
* Remove all non-Any ResourceRequests from the set. This is necessary cleanup
* to avoid requestSet getting too big.
*/
public void cleanupZeroNonAnyRR() {
Iterator<Entry<String, ResourceRequest>> iter = this.asks.entrySet().iterator();
while (iter.hasNext()) {
Entry<String, ResourceRequest> entry = iter.next();
if (entry.getKey().equals(ResourceRequest.ANY)) {
// Do not delete ANY RR
continue;
}
if (entry.getValue().getNumContainers() == 0) {
iter.remove();
}
}
} | 3.26 |
hadoop_VersionedWritable_write_rdh | // javadoc from Writable
@Override
public void write(DataOutput out) throws IOException {
out.writeByte(getVersion());// store version
} | 3.26 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.