name
stringlengths 12
178
| code_snippet
stringlengths 8
36.5k
| score
float64 3.26
3.68
|
---|---|---|
hadoop_HAServiceTarget_getZKFCProxy_rdh | /**
*
* @return a proxy to the ZKFC which is associated with this HA service.
* @param conf
* configuration.
* @param timeoutMs
* timeout in milliseconds.
* @throws IOException
* raised on errors performing I/O.
*/
public ZKFCProtocol getZKFCProxy(Configuration conf, int timeoutMs)
throws IOException {
Configuration confCopy = new Configuration(conf);
// Lower the timeout so we quickly fail to connect
confCopy.setInt(CommonConfigurationKeysPublic.IPC_CLIENT_CONNECT_MAX_RETRIES_KEY, 1);
SocketFactory factory = NetUtils.getDefaultSocketFactory(confCopy);
return new ZKFCProtocolClientSideTranslatorPB(getZKFCAddress(), confCopy, factory, timeoutMs);
} | 3.26 |
hadoop_HAServiceTarget_addFencingParameters_rdh | /**
* Hook to allow subclasses to add any parameters they would like to
* expose to fencing implementations/scripts. Fencing methods are free
* to use this map as they see fit -- notably, the shell script
* implementation takes each entry, prepends 'target_', substitutes
* '_' for '.', and adds it to the environment of the script.
*
* Subclass implementations should be sure to delegate to the superclass
* implementation as well as adding their own keys.
*
* @param ret
* map which can be mutated to pass parameters to the fencer
*/
protected void addFencingParameters(Map<String, String> ret) {
ret.put(ADDRESS_SUBST_KEY, String.valueOf(getAddress()));
ret.put(HOST_SUBST_KEY, getAddress().getHostName());
ret.put(PORT_SUBST_KEY, String.valueOf(getAddress().getPort()));
} | 3.26 |
hadoop_SimpleTcpClientHandler_channelRead_rdh | /**
* Shutdown connection by default. Subclass can override this method to do
* more interaction with the server.
*/
@Override
public void channelRead(ChannelHandlerContext ctx, Object msg) {
ctx.channel().close();
} | 3.26 |
hadoop_TimelineDomain_getCreatedTime_rdh | /**
* Get the created time of the domain
*
* @return the created time of the domain
*/
@XmlElement(name = "createdtime")
public Long getCreatedTime() {
return createdTime;
} | 3.26 |
hadoop_TimelineDomain_getReaders_rdh | /**
* Get the reader (and/or reader group) list string
*
* @return the reader (and/or reader group) list string
*/
@XmlElement(name = "readers")
public String getReaders() {
return readers;
} | 3.26 |
hadoop_TimelineDomain_setModifiedTime_rdh | /**
* Set the modified time of the domain
*
* @param modifiedTime
* the modified time of the domain
*/
public void setModifiedTime(Long modifiedTime) {
this.modifiedTime = modifiedTime;
} | 3.26 |
hadoop_TimelineDomain_getOwner_rdh | /**
* Get the domain owner
*
* @return the domain owner
*/
@XmlElement(name = "owner")
public String getOwner() {
return owner;
} | 3.26 |
hadoop_TimelineDomain_setReaders_rdh | /**
* Set the reader (and/or reader group) list string
*
* @param readers
* the reader (and/or reader group) list string
*/
public void setReaders(String readers) {
this.readers = readers;
} | 3.26 |
hadoop_TimelineDomain_setDescription_rdh | /**
* Set the domain description
*
* @param description
* the domain description
*/
public void setDescription(String description) {
this.f0 = description;
} | 3.26 |
hadoop_TimelineDomain_setWriters_rdh | /**
* Set the writer (and/or writer group) list string
*
* @param writers
* the writer (and/or writer group) list string
*/public void setWriters(String writers) {
this.writers = writers;
} | 3.26 |
hadoop_TimelineDomain_setCreatedTime_rdh | /**
* Set the created time of the domain
*
* @param createdTime
* the created time of the domain
*/
public void
setCreatedTime(Long createdTime) {
this.createdTime = createdTime;
} | 3.26 |
hadoop_TimelineDomain_setOwner_rdh | /**
* Set the domain owner. The user doesn't need to set it, which will
* automatically set to the user who puts the domain.
*
* @param owner
* the domain owner
*/
public void setOwner(String owner) {
this.owner = owner;
} | 3.26 |
hadoop_TimelineDomain_setId_rdh | /**
* Set the domain ID
*
* @param id
* the domain ID
*/
public void setId(String id) {
this.id = id;
} | 3.26 |
hadoop_TimelineDomain_getWriters_rdh | /**
* Get the writer (and/or writer group) list string
*
* @return the writer (and/or writer group) list string
*/
@XmlElement(name = "writers")
public String getWriters() {
return writers;
} | 3.26 |
hadoop_TimelineDomain_getDescription_rdh | /**
* Get the domain description
*
* @return the domain description
*/
@XmlElement(name = "description")
public String getDescription() {
return f0;
} | 3.26 |
hadoop_RpcProgram_m0_rdh | /**
* Unregister this program with the local portmapper.
*
* @param transport
* transport layer for port map
* @param boundPort
* port number of bounded RPC program
*/
public void m0(int transport, int boundPort) {
if (boundPort != port) {
LOG.info((("The bound port is " + boundPort) + ", different with configured port ") + port);
port = boundPort;
}
// Unregister all the program versions with portmapper for a given transport
for (int
vers = lowProgVersion; vers <= highProgVersion; vers++) {
PortmapMapping mapEntry = new PortmapMapping(progNumber, vers, transport, port);
register(mapEntry, false);
}
} | 3.26 |
hadoop_RpcProgram_m1_rdh | // Start extra daemons or services
public void m1() {
} | 3.26 |
hadoop_RpcProgram_register_rdh | /**
* Register the program with Portmap or Rpcbind.
*
* @param mapEntry
* port map entries
* @param set
* specifies registration or not
*/
protected void register(PortmapMapping mapEntry, boolean set) {
XDR mappingRequest = PortmapRequest.create(mapEntry, set);
SimpleUdpClient registrationClient = new SimpleUdpClient(host, RPCB_PORT, mappingRequest, true, registrationSocket, portmapUdpTimeoutMillis);
try {
registrationClient.run();
} catch (IOException e) {
String request = (set) ? "Registration" : "Unregistration";
LOG.error((((((request + " failure with ") +
host) + ":") + port) + ", portmap entry: ") + mapEntry);
throw new RuntimeException(request + " failure", e);
}
} | 3.26 |
hadoop_Summarizer_toString_rdh | /**
* Summarizes the current {@link Gridmix} run and the cluster used.
*/
@Override
public String toString() {
StringBuilder builder = new StringBuilder();
builder.append(executionSummarizer.toString());
builder.append(clusterSummarizer.toString());
return builder.toString();
} | 3.26 |
hadoop_Summarizer_finalize_rdh | /**
* This finalizes the summarizer.
*/
@SuppressWarnings("unchecked")
void finalize(JobFactory factory, String path, long size, UserResolver resolver, DataStatistics stats, Configuration conf) throws IOException {
executionSummarizer.finalize(factory, path, size,
resolver, stats, conf);
} | 3.26 |
hadoop_TimelineReaderAuthenticationFilterInitializer_m0_rdh | /**
* Filter initializer to initialize {@link AuthenticationFilter}
* for ATSv2 timeline reader server with timeline service specific
* configurations.
*/ public class TimelineReaderAuthenticationFilterInitializer extends TimelineAuthenticationFilterInitializer {
/**
* Initializes {@link AuthenticationFilter}
* <p>
* Propagates to {@link AuthenticationFilter} configuration all
* YARN configuration properties prefixed with
* {@value org.apache.hadoop.yarn.conf.YarnConfiguration#TIMELINE_HTTP_AUTH_PREFIX}.
*
* @param container
* The filter container
* @param conf
* Configuration for run-time parameters
*/
@Override
public void m0(FilterContainer container,
Configuration conf) {
setAuthFilterConfig(conf);
container.addGlobalFilter("Timeline Reader Authentication Filter", AuthenticationFilter.class.getName(), getFilterConfig());
} | 3.26 |
hadoop_TFileDumper_dumpInfo_rdh | /**
* Dump information about TFile.
*
* @param file
* Path string of the TFile
* @param out
* PrintStream to output the information.
* @param conf
* The configuration object.
* @throws IOException
*/
public static void dumpInfo(String file, PrintStream out, Configuration conf) throws IOException {
final int maxKeySampleLen = 16;
Path path = new Path(file);
FileSystem fs = path.getFileSystem(conf);long length = fs.getFileStatus(path).getLen();
FSDataInputStream fsdis = fs.open(path);
TFile.Reader reader = new TFile.Reader(fsdis, length, conf);
try {
LinkedHashMap<String, String> properties = new LinkedHashMap<String, String>();
int blockCnt = reader.readerBCF.getBlockCount();
int metaBlkCnt = reader.readerBCF.metaIndex.index.size();
properties.put("BCFile Version", reader.readerBCF.version.toString());
properties.put("TFile Version", reader.tfileMeta.version.toString());
properties.put("File Length", Long.toString(length));
properties.put("Data Compression", reader.readerBCF.getDefaultCompressionName());
properties.put("Record Count", Long.toString(reader.getEntryCount()));
properties.put("Sorted", Boolean.toString(reader.isSorted()));
if (reader.isSorted()) {
properties.put("Comparator", reader.getComparatorName());
}
properties.put("Data Block Count", Integer.toString(blockCnt));
long dataSize = 0;
long dataSizeUncompressed = 0;
if (blockCnt > 0) {
for (int i = 0; i < blockCnt; ++i) {
BlockRegion region = reader.readerBCF.dataIndex.getBlockRegionList().get(i);
dataSize += region.getCompressedSize();
dataSizeUncompressed += region.getRawSize();
}properties.put("Data Block Bytes", Long.toString(dataSize));
if (!reader.readerBCF.getDefaultCompressionName().equals("none")) {
properties.put("Data Block Uncompressed Bytes", Long.toString(dataSizeUncompressed));
properties.put("Data Block Compression Ratio", String.format("1:%.1f", ((double) (dataSizeUncompressed)) / dataSize));
}
}
properties.put("Meta Block Count", Integer.toString(metaBlkCnt));
long metaSize = 0;
long metaSizeUncompressed = 0;
if (metaBlkCnt > 0) {
Collection<MetaIndexEntry> metaBlks = reader.readerBCF.metaIndex.index.values();
boolean calculateCompression =
false;
for (Iterator<MetaIndexEntry> it = metaBlks.iterator(); it.hasNext();) {
MetaIndexEntry e = it.next();
metaSize += e.getRegion().getCompressedSize();
metaSizeUncompressed += e.getRegion().getRawSize();
if (e.getCompressionAlgorithm() != Algorithm.NONE) {
calculateCompression = true;
}
}
properties.put("Meta Block Bytes", Long.toString(metaSize));
if (calculateCompression) {
properties.put("Meta Block Uncompressed Bytes", Long.toString(metaSizeUncompressed));
properties.put("Meta Block Compression Ratio", String.format("1:%.1f", ((double) (metaSizeUncompressed)) / metaSize));
}
}
properties.put("Meta-Data Size Ratio", String.format("1:%.1f", ((double) (dataSize)) / metaSize));
long v22 = (length - dataSize) - metaSize;
long miscSize = ((BCFile.Magic.size() * 2) + (Long.SIZE / Byte.SIZE)) + Version.size();
long metaIndexSize = v22 - miscSize;
properties.put("Meta Block Index Bytes", Long.toString(metaIndexSize));
properties.put("Headers Etc Bytes", Long.toString(miscSize));// Now output the properties table.
int maxKeyLength = 0;
Set<Map.Entry<String, String>> entrySet = properties.entrySet();
for (Iterator<Map.Entry<String, String>>
it = entrySet.iterator(); it.hasNext();) {
Map.Entry<String, String> e
= it.next();
if (e.getKey().length() > maxKeyLength) {
maxKeyLength = e.getKey().length();
}}for (Iterator<Map.Entry<String, String>> it = entrySet.iterator(); it.hasNext();) {
Map.Entry<String, String> e = it.next();
out.printf("%s : %s%n", Align.format(e.getKey(), maxKeyLength, Align.LEFT), e.getValue());
}
out.println();
reader.checkTFileDataIndex();
if (blockCnt > 0) {String blkID = "Data-Block";
int blkIDWidth = Align.calculateWidth(blkID, blockCnt);
int blkIDWidth2 = Align.calculateWidth("", blockCnt);
String v34 = "Offset";
int offsetWidth = Align.calculateWidth(v34, length);
String blkLen = "Length";
int blkLenWidth = Align.calculateWidth(blkLen, (dataSize / blockCnt) * 10);
String rawSize = "Raw-Size";
int rawSizeWidth = Align.calculateWidth(rawSize, (dataSizeUncompressed / blockCnt) * 10);
String records = "Records";
int recordsWidth = Align.calculateWidth(records, (reader.getEntryCount() / blockCnt) * 10);
String endKey = "End-Key";
int endKeyWidth = Math.max(endKey.length(), (maxKeySampleLen * 2) + 5);
out.printf("%s %s %s %s %s %s%n", Align.format(blkID, blkIDWidth, Align.CENTER), Align.format(v34, offsetWidth, Align.CENTER), Align.format(blkLen, blkLenWidth, Align.CENTER), Align.format(rawSize, rawSizeWidth, Align.CENTER), Align.format(records, recordsWidth, Align.CENTER), Align.format(endKey, endKeyWidth, Align.LEFT));
for (int i = 0; i < blockCnt; ++i) {
BlockRegion region = reader.readerBCF.dataIndex.getBlockRegionList().get(i);
TFileIndexEntry indexEntry = reader.tfileIndex.getEntry(i);
out.printf("%s %s %s %s %s ", Align.format(Align.format(i, blkIDWidth2, Align.ZERO_PADDED), blkIDWidth, Align.LEFT), Align.format(region.getOffset(), offsetWidth, Align.LEFT), Align.format(region.getCompressedSize(), blkLenWidth, Align.LEFT), Align.format(region.getRawSize(), rawSizeWidth, Align.LEFT), Align.format(indexEntry.kvEntries, recordsWidth, Align.LEFT));
byte[] key = indexEntry.key;
boolean asAscii = true;
int sampleLen = Math.min(maxKeySampleLen, key.length);
for (int j = 0; j < sampleLen; ++j) {
byte b = key[j];
if (((b < 32) && (b != 9)) || (b == 127)) {
asAscii = false;
}
}
if (!asAscii) {
out.print("0X");
for (int j = 0; j < sampleLen; ++j) {
byte b = key[i];
out.printf("%X", b);
}
} else {
out.print(new String(key, 0, sampleLen, StandardCharsets.UTF_8));
}
if (sampleLen < key.length) {
out.print("...");
}
out.println();
}
}
out.println();
if (metaBlkCnt > 0) {
String name = "Meta-Block";
int maxNameLen = 0;
Set<Map.Entry<String, MetaIndexEntry>> metaBlkEntrySet = reader.readerBCF.metaIndex.index.entrySet();
for (Iterator<Map.Entry<String, MetaIndexEntry>> it = metaBlkEntrySet.iterator(); it.hasNext();) {
Map.Entry<String,
MetaIndexEntry> e = it.next();
if (e.getKey().length() > maxNameLen) {
maxNameLen = e.getKey().length();
}
}
int nameWidth = Math.max(name.length(), maxNameLen);String offset = "Offset";
int
offsetWidth = Align.calculateWidth(offset, length);
String blkLen = "Length";
int blkLenWidth = Align.calculateWidth(blkLen, (metaSize / metaBlkCnt) * 10);
String v64 = "Raw-Size";
int rawSizeWidth = Align.calculateWidth(v64, (metaSizeUncompressed / metaBlkCnt) * 10);
String compression = "Compression";
int compressionWidth = compression.length();
out.printf("%s %s %s %s %s%n", Align.format(name, nameWidth, Align.CENTER), Align.format(offset, offsetWidth, Align.CENTER), Align.format(blkLen, blkLenWidth, Align.CENTER), Align.format(v64, rawSizeWidth, Align.CENTER), Align.format(compression,
compressionWidth, Align.LEFT));
for (Iterator<Map.Entry<String, MetaIndexEntry>> it = metaBlkEntrySet.iterator(); it.hasNext();) {
Map.Entry<String, MetaIndexEntry> e = it.next();
String blkName = e.getValue().getMetaName();
BlockRegion region = e.getValue().getRegion();
String blkCompression = e.getValue().getCompressionAlgorithm().getName();
out.printf("%s %s %s %s %s%n", Align.format(blkName, nameWidth, Align.LEFT), Align.format(region.getOffset(), offsetWidth, Align.LEFT), Align.format(region.getCompressedSize(), blkLenWidth, Align.LEFT), Align.format(region.getRawSize(), rawSizeWidth, Align.LEFT), Align.format(blkCompression, compressionWidth, Align.LEFT));
}
}} finally {
IOUtils.cleanupWithLogger(LOG, reader, fsdis);
}
} | 3.26 |
hadoop_ExecutingStoreOperation_executeOnlyOnce_rdh | /**
* Check that the operation has not been invoked twice.
* This is an atomic check.
* After the check: activates the span.
*
* @throws IllegalStateException
* on a second invocation.
*/
protected void executeOnlyOnce() {
Preconditions.checkState(!executed.getAndSet(true), "Operation attempted twice");
activateAuditSpan();} | 3.26 |
hadoop_ExecutingStoreOperation_apply_rdh | /**
* Apply calls {@link #execute()}.
*
* @return the result.
* @throws IOException
* IO problem
*/
@Override
public final T apply() throws IOException {
return execute();
} | 3.26 |
hadoop_CounterGroupFactory_updateFrameworkGroupMapping_rdh | // Update static mappings (c2i, i2s) of framework groups
private static synchronized void updateFrameworkGroupMapping(Class<?> cls) {
String name = cls.getName();Integer v1 = s2i.get(name);
if (v1 != null)
return;
i2s.add(name);
s2i.put(name, i2s.size() - 1);} | 3.26 |
hadoop_CounterGroupFactory_newGroup_rdh | /**
* Create a new counter group
*
* @param name
* of the group
* @param displayName
* of the group
* @param limits
* the counters limits policy object
* @return a new counter group
*/
public G
newGroup(String name, String displayName, Limits limits) {
FrameworkGroupFactory<G> gf
= fmap.get(name);
if (gf != null) return gf.newGroup(name);
if (name.equals(FS_GROUP_NAME)) {
return newFileSystemGroup();
} else if (s2i.get(name) != null) {
return newFrameworkGroup(s2i.get(name));
}
return newGenericGroup(name, displayName, limits);
} | 3.26 |
hadoop_CounterGroupFactory_version_rdh | /**
*
* @return the counter factory version
*/public int version() {
return VERSION;
} | 3.26 |
hadoop_CounterGroupFactory_isFrameworkGroup_rdh | /**
* Check whether a group name is a name of a framework group (including
* the filesystem group).
*
* @param name
* to check
* @return true for framework group names
*/
public static synchronized boolean isFrameworkGroup(String name) {
return (s2i.get(name) != null) || name.equals(FS_GROUP_NAME);
} | 3.26 |
hadoop_CounterGroupFactory_getFrameworkGroupId_rdh | /**
* Get the id of a framework group
*
* @param name
* of the group
* @return the framework group id
*/
public static synchronized int getFrameworkGroupId(String name) {
Integer i
= s2i.get(name);
if (i == null)
throwBadFrameworkGroupNameException(name);
return i;
} | 3.26 |
hadoop_CounterGroupFactory_addFrameworkGroup_rdh | // Initialize the framework counter group mapping
private synchronized <T extends Enum<T>> void addFrameworkGroup(final Class<T> cls) {
updateFrameworkGroupMapping(cls);fmap.put(cls.getName(), newFrameworkGroupFactory(cls));
} | 3.26 |
hadoop_CounterGroupFactory_newFrameworkGroup_rdh | /**
* Create a new framework group
*
* @param id
* of the group
* @return a new framework group
*/
public G newFrameworkGroup(int id) {
String name;
synchronized(CounterGroupFactory.class) {
if ((id < 0) || (id >= i2s.size()))
throwBadFrameGroupIdException(id);
name = i2s.get(id);// should not throw here.
}
FrameworkGroupFactory<G> gf = fmap.get(name);
if (gf == null)
throwBadFrameGroupIdException(id);
return gf.newGroup(name);
} | 3.26 |
hadoop_CommitResponse_m0_rdh | /**
* Create a Commit Response.
*
* @return Commit Response.
*/
@Private
@Unstable
public static CommitResponse m0() {
return Records.newRecord(CommitResponse.class);
} | 3.26 |
hadoop_MutableQuantiles_getEstimator_rdh | /**
* Get the quantile estimator.
*
* @return the quantile estimator
*/
@VisibleForTesting
public synchronized QuantileEstimator getEstimator() {
return estimator;
} | 3.26 |
hadoop_MutableQuantiles_addQuantileInfo_rdh | /**
* Add entry to quantileInfos array.
*
* @param i
* array index.
* @param info
* info to be added to quantileInfos array.
*/
public synchronized void addQuantileInfo(int i, MetricsInfo info) {
this.quantileInfos[i] = info;
} | 3.26 |
hadoop_MutableQuantiles_setQuantiles_rdh | /**
* Sets quantileInfo.
*
* @param ucName
* capitalized name of the metric
* @param uvName
* capitalized type of the values
* @param desc
* uncapitalized long-form textual description of the metric
* @param lvName
* uncapitalized type of the values
* @param pDecimalFormat
* Number formatter for percentile value
*/void setQuantiles(String ucName, String uvName, String desc, String lvName, DecimalFormat pDecimalFormat) {
for (int i = 0; i < QUANTILES.length; i++) {
double percentile = 100 * QUANTILES[i].quantile;
String nameTemplate = ((ucName + pDecimalFormat.format(percentile)) + "thPercentile") + uvName;
String descTemplate = (((((pDecimalFormat.format(percentile) + " percentile ") + lvName) + " with ") + getInterval()) + " second interval for ") + desc;
addQuantileInfo(i, info(nameTemplate, descTemplate));
}
} | 3.26 |
hadoop_MutableQuantiles_getInterval_rdh | /**
* Get the rollover interval (in seconds) of the estimator.
*
* @return intervalSecs of the estimator.
*/
public synchronized int getInterval() {
return intervalSecs;
} | 3.26 |
hadoop_MutableQuantiles_setNumInfo_rdh | /**
* Set info about the metrics.
*
* @param pNumInfo
* info about the metrics.
*/
public synchronized void setNumInfo(MetricsInfo pNumInfo) {
this.numInfo = pNumInfo;
} | 3.26 |
hadoop_MutableQuantiles_getQuantiles_rdh | /**
* Returns the array of Quantiles declared in MutableQuantiles.
*
* @return array of Quantiles
*/
public synchronized Quantile[] getQuantiles() {
return QUANTILES;
} | 3.26 |
hadoop_MutableQuantiles_setQuantileInfos_rdh | /**
* Initialize quantileInfos array.
*
* @param length
* of the quantileInfos array.
*/
public synchronized void setQuantileInfos(int length) {this.quantileInfos = new MetricsInfo[length];} | 3.26 |
hadoop_MutableQuantiles_setInterval_rdh | /**
* Set the rollover interval (in seconds) of the estimator.
*
* @param pIntervalSecs
* of the estimator.
*/
public synchronized void setInterval(int pIntervalSecs) {this.intervalSecs = pIntervalSecs;
} | 3.26 |
hadoop_LocalSASKeyGeneratorImpl_getStorageAccountInstance_rdh | /**
* Helper method that creates CloudStorageAccount Instance using the
* storage account key.
*
* @param accountName
* Name of the storage account
* @param accountKey
* Storage Account key
* @return CloudStorageAccount instance for the storage account.
* @throws SASKeyGenerationException
*/
private CloudStorageAccount
getStorageAccountInstance(String accountName,
String accountKey) throws SASKeyGenerationException {
if (!storageAccountMap.containsKey(accountName)) {
if ((accountKey == null) || accountKey.isEmpty()) {
throw new SASKeyGenerationException("No key for Storage account " + accountName);}
CloudStorageAccount account = null;
try {
account = new CloudStorageAccount(new StorageCredentialsAccountAndKey(accountName, accountKey));
} catch (URISyntaxException uriSyntaxEx) {
throw new SASKeyGenerationException(("Encountered URISyntaxException " + "for account ") + accountName, uriSyntaxEx);
}
storageAccountMap.put(accountName, account);
}
return storageAccountMap.get(accountName);
} | 3.26 |
hadoop_LocalSASKeyGeneratorImpl_getRelativeBlobSASUri_rdh | /**
* Implementation for generation of Relative Path Blob SAS Uri.
*/
@Override
public URI getRelativeBlobSASUri(String accountName, String container, String relativePath) throws SASKeyGenerationException {
CloudBlobContainer sc = null;
CloudBlobClient client = null;
CachedSASKeyEntry cacheKey = null;
try {
cacheKey = new CachedSASKeyEntry(accountName, container, relativePath);
URI cacheResult = cache.get(cacheKey);
if (cacheResult != null) {
return
cacheResult;
}
CloudStorageAccount account = getSASKeyBasedStorageAccountInstance(accountName);
client = account.createCloudBlobClient();
sc = client.getContainerReference(container);
} catch (URISyntaxException uriSyntaxEx) {
throw new SASKeyGenerationException(((("Encountered URISyntaxException " + "while getting container references for container ") + container) + " inside storage account : ") + accountName, uriSyntaxEx);
} catch (StorageException stoEx) {
throw new SASKeyGenerationException(((("Encountered StorageException while " + "getting container references for container ") + container) + " inside storage account : ") + accountName, stoEx);
}
CloudBlockBlob
blob = null;
try {
blob = sc.getBlockBlobReference(relativePath);
} catch (URISyntaxException uriSyntaxEx) {
throw new SASKeyGenerationException(((("Encountered URISyntaxException while " + "getting Block Blob references for container ") + container) + " inside storage account : ") + accountName, uriSyntaxEx);
} catch (StorageException stoEx) {
throw new SASKeyGenerationException(((("Encountered StorageException while " + "getting Block Blob references for container ") + container) + " inside storage account : ") + accountName, stoEx);
}
try {
URI sasKey = client.getCredentials().transformUri(blob.getUri());
cache.put(cacheKey, sasKey);
return sasKey;
} catch (StorageException stoEx) {
throw new SASKeyGenerationException((((((("Encountered StorageException while " + "generating SAS key for Blob: ") + relativePath) + " inside ") + "container : ") + container) + " in Storage Account : ") + accountName, stoEx);} catch (URISyntaxException uriSyntaxEx) {
throw new SASKeyGenerationException((((((("Encountered URISyntaxException " + "while generating SAS key for Blob: ") + relativePath) + " inside ") + "container: ") + container) + " in Storage Account : ") + accountName, uriSyntaxEx);
}
} | 3.26 |
hadoop_LocalSASKeyGeneratorImpl_getDefaultAccountAccessPolicy_rdh | /**
* Helper method to generate Access Policy for the Storage Account SAS Key
*
* @return SharedAccessAccountPolicy
*/
private SharedAccessAccountPolicy getDefaultAccountAccessPolicy() {
SharedAccessAccountPolicy ap = new SharedAccessAccountPolicy();
Calendar cal = new GregorianCalendar(TimeZone.getTimeZone("UTC"));
cal.setTime(new Date());
cal.add(Calendar.HOUR, ((int) (getSasKeyExpiryPeriod())) * HOURS_IN_DAY);
ap.setSharedAccessExpiryTime(cal.getTime());
ap.setPermissions(getDefaultAccoutSASKeyPermissions());
ap.setResourceTypes(EnumSet.of(SharedAccessAccountResourceType.CONTAINER, SharedAccessAccountResourceType.OBJECT));
ap.setServices(EnumSet.of(SharedAccessAccountService.BLOB));
return ap;
} | 3.26 |
hadoop_LocalSASKeyGeneratorImpl_getContainerSASUri_rdh | /**
* Implementation to generate SAS Key for a container
*/
@Override
public URI getContainerSASUri(String accountName, String container) throws SASKeyGenerationException {
LOG.debug("Retrieving Container SAS URI For {}@{}", container, accountName);
try {
CachedSASKeyEntry cacheKey = new CachedSASKeyEntry(accountName, container, "/");
URI cacheResult =
cache.get(cacheKey);
if (cacheResult != null) {
return cacheResult;
}
CloudStorageAccount account
= getSASKeyBasedStorageAccountInstance(accountName);
CloudBlobClient client = account.createCloudBlobClient();
URI sasKey = client.getCredentials().transformUri(client.getContainerReference(container).getUri());
cache.put(cacheKey, sasKey);
return sasKey;
} catch (StorageException stoEx) {
throw new SASKeyGenerationException((((("Encountered StorageException while" + " generating SAS Key for container ") + container) + " inside ") + "storage account ") + accountName, stoEx);
} catch (URISyntaxException uriSyntaxEx) {
throw new SASKeyGenerationException((((("Encountered URISyntaxException while" + " generating SAS Key for container ") + container) + " inside storage") + " account ") + accountName, uriSyntaxEx);
}
} | 3.26 |
hadoop_LocalSASKeyGeneratorImpl_getAccountNameWithoutDomain_rdh | /**
* Helper method that returns the Storage account name without
* the domain name suffix.
*
* @param fullAccountName
* Storage account name with domain name suffix
* @return String
*/
private String getAccountNameWithoutDomain(String fullAccountName) {
StringTokenizer tokenizer = new StringTokenizer(fullAccountName, ".");
return tokenizer.nextToken();
} | 3.26 |
hadoop_FilterFs_getDelegationTokens_rdh | // AbstractFileSystem
@Override
public List<Token<?>> getDelegationTokens(String
renewer) throws IOException {
return myFs.getDelegationTokens(renewer);
} | 3.26 |
hadoop_FilterFs_getCanonicalServiceName_rdh | // AbstractFileSystem
@Override
public String getCanonicalServiceName()
{
return myFs.getCanonicalServiceName();
} | 3.26 |
hadoop_RemoteSASKeyGeneratorImpl_makeRemoteRequest_rdh | /**
* Helper method to make a remote request.
*
* @param urls
* - Urls to use for the remote request
* @param path
* - hadoop.auth token for the remote request
* @param queryParams
* - queryParams to be used.
* @return RemoteSASKeyGenerationResponse
*/
private RemoteSASKeyGenerationResponse makeRemoteRequest(String[] urls, String path, List<NameValuePair> queryParams) throws SASKeyGenerationException {
try {
String responseBody = remoteCallHelper.makeRemoteRequest(urls, path, queryParams, HttpGet.METHOD_NAME);
return RESPONSE_READER.readValue(responseBody);
} catch (WasbRemoteCallException remoteCallEx) {
throw new SASKeyGenerationException("Encountered RemoteCallException" + " while retrieving SAS key from remote service", remoteCallEx);
} catch (JsonParseException jsonParserEx) {
throw new SASKeyGenerationException(("Encountered JsonParseException " + "while parsing the response from remote") + " service into RemoteSASKeyGenerationResponse object", jsonParserEx);
} catch (JsonMappingException jsonMappingEx) {
throw new SASKeyGenerationException(("Encountered JsonMappingException" + " while mapping the response from remote service into ") + "RemoteSASKeyGenerationResponse object", jsonMappingEx);
} catch (IOException ioEx) {
throw new SASKeyGenerationException("Encountered IOException while " + "accessing remote service to retrieve SAS Key", ioEx);
}} | 3.26 |
hadoop_FifoCandidatesSelector_preemptAMContainers_rdh | /**
* As more resources are needed for preemption, saved AMContainers has to be
* rescanned. Such AMContainers can be preemptionCandidates based on resToObtain, but
* maxAMCapacityForThisQueue resources will be still retained.
*
* @param clusterResource
* @param preemptMap
* @param skippedAMContainerlist
* @param skippedAMSize
* @param maxAMCapacityForThisQueue
*/
private void preemptAMContainers(Resource clusterResource, Map<ApplicationAttemptId, Set<RMContainer>> preemptMap, Map<ApplicationAttemptId, Set<RMContainer>> curCandidates, List<RMContainer> skippedAMContainerlist, Map<String, Resource> resToObtainByPartition, Resource skippedAMSize, Resource maxAMCapacityForThisQueue, Resource totalPreemptionAllowed) {
for (RMContainer c : skippedAMContainerlist) {
// Got required amount of resources for preemption, can stop now
if (resToObtainByPartition.isEmpty()) {
break;
}
// Once skippedAMSize reaches down to maxAMCapacityForThisQueue,
// container selection iteration for preemption will be stopped.
if (Resources.lessThanOrEqual(rc, clusterResource, skippedAMSize, maxAMCapacityForThisQueue)) {break;
}
boolean preempted = CapacitySchedulerPreemptionUtils.tryPreemptContainerAndDeductResToObtain(rc, preemptionContext, resToObtainByPartition, c, clusterResource, preemptMap, curCandidates, totalPreemptionAllowed, preemptionContext.getCrossQueuePreemptionConservativeDRF());
if (preempted) {
Resources.subtractFrom(skippedAMSize, c.getAllocatedResource());
}
}
skippedAMContainerlist.clear();
} | 3.26 |
hadoop_ResourceBundles_getValue_rdh | /**
* Get a resource given bundle name and key
*
* @param <T>
* type of the resource
* @param bundleName
* name of the resource bundle
* @param key
* to lookup the resource
* @param suffix
* for the key to lookup
* @param defaultValue
* of the resource
* @return the resource or the defaultValue
* @throws ClassCastException
* if the resource found doesn't match T
*/
@SuppressWarnings("unchecked")
public static synchronized <T> T getValue(String bundleName, String key, String suffix, T defaultValue) {
T value;
try {
ResourceBundle bundle = getBundle(bundleName);
value = ((T)
(bundle.getObject(getLookupKey(key, suffix))));
} catch (Exception e) {
return defaultValue;
}return value;
} | 3.26 |
hadoop_ResourceBundles_getCounterName_rdh | /**
* Get the counter display name
*
* @param group
* the counter group name for the counter
* @param counter
* the counter name to lookup
* @param defaultValue
* of the counter
* @return the counter display name
*/
public static String getCounterName(String group, String counter, String defaultValue) {
return getValue(group,
counter, ".name", defaultValue);
} | 3.26 |
hadoop_ResourceBundles_getCounterGroupName_rdh | /**
* Get the counter group display name
*
* @param group
* the group name to lookup
* @param defaultValue
* of the group
* @return the group display name
*/
public static String
getCounterGroupName(String group, String defaultValue) {
return getValue(group, "CounterGroupName", "", defaultValue);
} | 3.26 |
hadoop_ExportedBlockKeys_write_rdh | /**
*/
@Override
public void write(DataOutput out) throws IOException {
out.writeBoolean(isBlockTokenEnabled);
out.writeLong(keyUpdateInterval);
out.writeLong(tokenLifetime);
currentKey.write(out);
out.writeInt(allKeys.length);
for (int i = 0; i < allKeys.length; i++) {
allKeys[i].write(out);
}
} | 3.26 |
hadoop_ExportedBlockKeys_readFields_rdh | /**
*/
@Override
public void readFields(DataInput in) throws IOException {
isBlockTokenEnabled = in.readBoolean();
keyUpdateInterval = in.readLong();
tokenLifetime = in.readLong();
currentKey.readFields(in);
this.allKeys = new BlockKey[in.readInt()]; for (int i = 0; i < allKeys.length; i++) {
allKeys[i] = new BlockKey();allKeys[i].readFields(in);
}
} | 3.26 |
hadoop_FsAction_and_rdh | /**
* AND operation.
*
* @param that
* FsAction that.
* @return FsAction.
*/
public FsAction and(FsAction that) {
return vals[ordinal() &
that.ordinal()];
} | 3.26 |
hadoop_FsAction_implies_rdh | /**
* Return true if this action implies that action.
*
* @param that
* FsAction that.
* @return if implies true,not false.
*/
public boolean implies(FsAction that) {
if (that != null) {
return (ordinal() & that.ordinal()) == that.ordinal();
}
return false;
} | 3.26 |
hadoop_FsAction_not_rdh | /**
* NOT operation.
*
* @return FsAction.
*/
public FsAction not() {
return vals[7 - ordinal()];} | 3.26 |
hadoop_FsAction_or_rdh | /**
* OR operation.
*
* @param that
* FsAction that.
* @return FsAction.
*/
public FsAction or(FsAction that) {
return vals[ordinal() | that.ordinal()];
} | 3.26 |
hadoop_AMRunner_startAMFromRumenTrace_rdh | /**
* Parse workload from a rumen trace file.
*/
private void startAMFromRumenTrace(String inputTrace, long baselineTimeMS) throws IOException {
Configuration conf = new Configuration();
conf.set("fs.defaultFS", "file:///");
File fin = new File(inputTrace);
try (JobTraceReader reader =
new JobTraceReader(new Path(fin.getAbsolutePath()), conf)) {
LoggedJob job = reader.getNext();
while (job != null) {
try {
AMDefinitionRumen amDef = AMDefinitionFactory.createFromRumenTrace(job, baselineTimeMS, slsRunner); startAMs(amDef);
} catch (Exception e) {
LOG.error("Failed to create an AM", e);
}
job = reader.getNext();
}
}
} | 3.26 |
hadoop_AMRunner_startAMFromSLSTrace_rdh | /**
* Parse workload from a SLS trace file.
*/
private void startAMFromSLSTrace(String inputTrace) throws IOException {
JsonFactory jsonF = new JsonFactory();
ObjectMapper mapper = new ObjectMapper();
try (Reader input = new InputStreamReader(new FileInputStream(inputTrace), StandardCharsets.UTF_8)) {
JavaType type = mapper.getTypeFactory().constructMapType(Map.class, String.class, String.class);
Iterator<Map<String, String>> jobIter = mapper.readValues(jsonF.createParser(input), type);
while (jobIter.hasNext()) {
try {
Map<String, String> jsonJob = jobIter.next();AMDefinitionSLS amDef = AMDefinitionFactory.createFromSlsTrace(jsonJob, slsRunner);
startAMs(amDef);
} catch (Exception e) {
LOG.error("Failed to create an AM: {}", e.getMessage());
}
}
}
} | 3.26 |
hadoop_AMRunner_startAMFromSynthGenerator_rdh | /**
* parse workload information from synth-generator trace files.
*/
private void startAMFromSynthGenerator() throws YarnException, IOException {
Configuration localConf = new Configuration();
localConf.set("fs.defaultFS", "file:///");
// if we use the nodeFile this could have been not initialized yet.
if (slsRunner.getStjp() == null) {
slsRunner.setStjp(new SynthTraceJobProducer(conf, new Path(inputTraces[0])));
}
SynthJob job;
// we use stjp, a reference to the job producer instantiated during node
// creation
while
((job = ((SynthJob) (slsRunner.getStjp().getNextJob()))) != null) {
ReservationId reservationId = null;
if (job.hasDeadline()) {
reservationId = ReservationId.newInstance(rm.getStartTime(), AM_ID);
}
AMDefinitionSynth amDef = AMDefinitionFactory.createFromSynth(job, slsRunner);
startAMs(amDef, reservationId, job.getParams(), job.getDeadline());
}
} | 3.26 |
hadoop_ContainerSchedulerEvent_getContainer_rdh | /**
* Get the container associated with the event.
*
* @return Container.
*/
public Container getContainer() {
return container;
} | 3.26 |
hadoop_AMRMClientAsyncImpl_getAvailableResources_rdh | /**
* Get the currently available resources in the cluster.
* A valid value is available after a call to allocate has been made
*
* @return Currently available resources
*/
public Resource getAvailableResources() {
return client.getAvailableResources();
} | 3.26 |
hadoop_AMRMClientAsyncImpl_getClusterNodeCount_rdh | /**
* Get the current number of nodes in the cluster.
* A valid values is available after a call to allocate has been made
*
* @return Current number of nodes in the cluster
*/
public int getClusterNodeCount() {
return client.getClusterNodeCount();
} | 3.26 |
hadoop_AMRMClientAsyncImpl_removeContainerRequest_rdh | /**
* Remove previous container request. The previous container request may have
* already been sent to the ResourceManager. So even after the remove request
* the app must be prepared to receive an allocation for the previous request
* even after the remove request
*
* @param req
* Resource request
*/
public void removeContainerRequest(T req) {
client.removeContainerRequest(req);
} | 3.26 |
hadoop_AMRMClientAsyncImpl_releaseAssignedContainer_rdh | /**
* Release containers assigned by the Resource Manager. If the app cannot use
* the container or wants to give up the container then it can release them.
* The app needs to make new requests for the released resource capability if
* it still needs it. eg. it released non-local resources
*
* @param containerId
*/
public void releaseAssignedContainer(ContainerId containerId) {
client.releaseAssignedContainer(containerId);
} | 3.26 |
hadoop_AMRMClientAsyncImpl_updateBlacklist_rdh | /**
* Update application's blacklist with addition or removal resources.
*
* @param blacklistAdditions
* list of resources which should be added to the
* application blacklist
* @param blacklistRemovals
* list of resources which should be removed from the
* application blacklist
*/
public void updateBlacklist(List<String> blacklistAdditions, List<String> blacklistRemovals) {
client.updateBlacklist(blacklistAdditions, blacklistRemovals);
} | 3.26 |
hadoop_AMRMClientAsyncImpl_addContainerRequest_rdh | /**
* Request containers for resources before calling <code>allocate</code>
*
* @param req
* Resource request
*/
public void
addContainerRequest(T req) {
client.addContainerRequest(req);
} | 3.26 |
hadoop_AMRMClientAsyncImpl_registerApplicationMaster_rdh | /**
* Registers this application master with the resource manager. On successful
* registration, starts the heartbeating thread.
*
* @param appHostName
* Name of the host on which master is running
* @param appHostPort
* Port master is listening on
* @param appTrackingUrl
* URL at which the master info can be seen
* @param placementConstraintsMap
* Placement Constraints Mapping.
* @return Register AM Response.
* @throws YarnException
* @throws IOException
*/
public RegisterApplicationMasterResponse registerApplicationMaster(String appHostName, int
appHostPort, String appTrackingUrl, Map<Set<String>, PlacementConstraint> placementConstraintsMap) throws YarnException, IOException {
RegisterApplicationMasterResponse response = client.registerApplicationMaster(appHostName, appHostPort, appTrackingUrl, placementConstraintsMap);
heartbeatThread.start();
return response;
} | 3.26 |
hadoop_AMRMClientAsyncImpl_m1_rdh | /**
* Unregister the application master. This must be called in the end.
*
* @param appStatus
* Success/Failure status of the master
* @param appMessage
* Diagnostics message on failure
* @param appTrackingUrl
* New URL to get master info
* @throws YarnException
* @throws IOException
*/
public void m1(FinalApplicationStatus appStatus, String appMessage, String appTrackingUrl) throws YarnException, IOException {
synchronized(unregisterHeartbeatLock) {
keepRunning = false;
client.unregisterApplicationMaster(appStatus, appMessage, appTrackingUrl);
}
} | 3.26 |
hadoop_AMRMClientAsyncImpl_serviceStop_rdh | /**
* Tells the heartbeat and handler threads to stop and waits for them to
* terminate.
*/
@Override
protected void serviceStop() throws Exception {
keepRunning = false;heartbeatThread.interrupt();
try {
heartbeatThread.join();
} catch (InterruptedException ex) {
LOG.error("Error joining with heartbeat thread", ex);
}
client.stop();
handlerThread.interrupt();
super.serviceStop();
} | 3.26 |
hadoop_DirectoryPolicy_getOptionName_rdh | /**
* Get the option name.
*
* @return name of the option
*/
public String getOptionName() {
return optionName;
} | 3.26 |
hadoop_WindowsGetSpaceUsed_refresh_rdh | /**
* Override to hook in DUHelper class.
*/
@Override
protected void refresh() {
used.set(DUHelper.getFolderUsage(getDirPath()));
} | 3.26 |
hadoop_HCFSMountTableConfigLoader_m0_rdh | /**
* Loads the mount-table configuration from hadoop compatible file system and
* add the configuration items to given configuration. Mount-table
* configuration format should be suffixed with version number.
* Format: {@literal mount-table.<versionNumber>.xml}
* Example: mount-table.1.xml
* When user wants to update mount-table, the expectation is to upload new
* mount-table configuration file with monotonically increasing integer as
* version number. This API loads the highest version number file. We can
* also configure single file path directly.
*
* @param mountTableConfigPath
* : A directory path where mount-table files
* stored or a mount-table file path. We recommend to configure
* directory with the mount-table version files.
* @param conf
* : to add the mount table as resource.
*/
@Override
public void m0(String mountTableConfigPath, Configuration conf) throws IOException {
this.mountTable = new Path(mountTableConfigPath);
String scheme = mountTable.toUri().getScheme();
FsGetter fsGetter = new ViewFileSystemOverloadScheme.ChildFsGetter(scheme);
try (FileSystem fs = fsGetter.getNewInstance(mountTable.toUri(), conf)) {
RemoteIterator<LocatedFileStatus> listFiles = fs.listFiles(mountTable, false);
LocatedFileStatus lfs = null;
int higherVersion = -1;
while (listFiles.hasNext()) {
LocatedFileStatus curLfs = listFiles.next();
String cur =
curLfs.getPath().getName();
String[] nameParts = cur.split(REGEX_DOT);
if (nameParts.length < 2)
{
logInvalidFileNameFormat(cur);
continue;// invalid file name
}
int v9 = higherVersion;
try {
v9 = Integer.parseInt(nameParts[nameParts.length - 2]);
} catch (NumberFormatException nfe) {
logInvalidFileNameFormat(cur);
continue;
}
if (v9 > higherVersion) {
higherVersion = v9;lfs = curLfs;
}
}
if (lfs == null) {
// No valid mount table file found.
// TODO: Should we fail? Currently viewfs init will fail if no mount
// links anyway.
LOGGER.warn(("No valid mount-table file exist at: {}. At least one " + "mount-table file should present with the name format: ") + "mount-table.<versionNumber>.xml", mountTableConfigPath);
return;
}
// Latest version file.
Path latestVersionMountTable = lfs.getPath();
if (LOGGER.isDebugEnabled())
{
LOGGER.debug("Loading the mount-table {} into configuration.", latestVersionMountTable);
}
try (FSDataInputStream open = fs.open(latestVersionMountTable)) {
Configuration newConf = new Configuration(false);
newConf.addResource(open);
// This will add configuration props as resource, instead of stream
// itself. So, that stream can be closed now.
conf.addResource(newConf);
}
}
} | 3.26 |
hadoop_DefaultAnonymizableDataType_needsAnonymization_rdh | // Determines if the contained data needs anonymization
protected boolean needsAnonymization(Configuration conf) {
return true;
} | 3.26 |
hadoop_DynoInfraUtils_getNameNodeWebUri_rdh | /**
* Get the URI that can be used to access the launched NameNode's web UI, e.g.
* for JMX calls.
*
* @param nameNodeProperties
* The set of properties representing the
* information about the launched NameNode.
* @return The URI to the web UI.
*/
static URI getNameNodeWebUri(Properties nameNodeProperties) {
return URI.create(String.format("http://%s:%s/", nameNodeProperties.getProperty(DynoConstants.NN_HOSTNAME), nameNodeProperties.getProperty(DynoConstants.NN_HTTP_PORT)));
} | 3.26 |
hadoop_DynoInfraUtils_fetchHadoopTarball_rdh | /**
* If a file matching {@value HADOOP_TAR_FILENAME_FORMAT} and {@code version}
* is found in {@code destinationDir}, return its path. Otherwise, first
* download the tarball from an Apache mirror. If the
* {@value APACHE_DOWNLOAD_MIRROR_KEY} configuration or system property
* (checked in that order) is set, use that as the mirror; else use
* {@value APACHE_DOWNLOAD_MIRROR_DEFAULT}.
*
* @param destinationDir
* destination directory to save a tarball
* @param version
* The version of Hadoop to download, like "2.7.4"
* or "3.0.0-beta1"
* @param conf
* configuration
* @param log
* logger instance
* @return The path to the tarball.
* @throws IOException
* on failure
*/
public static File fetchHadoopTarball(File
destinationDir, String version, Configuration conf, Logger log) throws IOException {
log.info("Looking for Hadoop tarball for version: " + version);
File destinationFile = new File(destinationDir, String.format(HADOOP_TAR_FILENAME_FORMAT, version));
if (destinationFile.exists()) {
log.info("Found tarball at: " + destinationFile.getAbsolutePath());
return destinationFile;
}
String apacheMirror = conf.get(APACHE_DOWNLOAD_MIRROR_KEY);
if (apacheMirror == null) {
apacheMirror = System.getProperty(APACHE_DOWNLOAD_MIRROR_KEY, APACHE_DOWNLOAD_MIRROR_DEFAULT);
}
if (!destinationDir.exists()) {
if (!destinationDir.mkdirs()) {
throw new IOException("Unable to create local dir: " + destinationDir);
}
}
URL downloadURL = new URL(apacheMirror + String.format(APACHE_DOWNLOAD_MIRROR_SUFFIX_FORMAT, version, version));
log.info("Downloading tarball from: <{}> to <{}>", downloadURL, destinationFile.getAbsolutePath());
FileUtils.copyURLToFile(downloadURL, destinationFile, 10000, 60000);
log.info("Completed downloading of Hadoop tarball");
return destinationFile;
} | 3.26 |
hadoop_DynoInfraUtils_getNameNodeHdfsUri_rdh | /**
* Get the URI that can be used to access the launched NameNode for HDFS RPCs.
*
* @param nameNodeProperties
* The set of properties representing the
* information about the launched NameNode.
* @return The HDFS URI.
*/
static URI getNameNodeHdfsUri(Properties nameNodeProperties) {
return URI.create(String.format("hdfs://%s:%s/", nameNodeProperties.getProperty(DynoConstants.NN_HOSTNAME), nameNodeProperties.getProperty(DynoConstants.NN_RPC_PORT)));
} | 3.26 |
hadoop_DynoInfraUtils_m0_rdh | /**
* Wait for the launched NameNode to finish starting up. Continues until
* {@code shouldExit} returns true.
*
* @param nameNodeProperties
* The set of properties containing information
* about the NameNode.
* @param shouldExit
* Should return true iff this should stop waiting.
* @param log
* Where to log information.
*/
static void m0(Properties nameNodeProperties, Supplier<Boolean> shouldExit, Logger log) throws IOException, InterruptedException {
if (shouldExit.get()) {
return;
}
log.info("Waiting for NameNode to finish starting up...");
waitForNameNodeJMXValue("Startup progress", NAMENODE_STARTUP_PROGRESS_JMX_QUERY, "PercentComplete", 1.0, 0.01, false, nameNodeProperties, shouldExit, log);
log.info("NameNode has started!");
} | 3.26 |
hadoop_DynoInfraUtils_fetchNameNodeJMXValue_rdh | /**
* Fetch a value from the launched NameNode's JMX.
*
* @param nameNodeProperties
* The set of properties containing information
* about the NameNode.
* @param jmxBeanQuery
* The JMX bean query to execute; should return a
* JMX property matching {@code jmxProperty}.
* @param property
* The name of the JMX property whose value should be polled.
* @return The value associated with the property.
*/
static String fetchNameNodeJMXValue(Properties nameNodeProperties, String
jmxBeanQuery, String property) throws IOException {
URI nnWebUri = getNameNodeWebUri(nameNodeProperties);
URL queryURL;
try {
queryURL = new URL(nnWebUri.getScheme(), nnWebUri.getHost(), nnWebUri.getPort(), "/jmx?qry=" + jmxBeanQuery);
} catch (MalformedURLException e) {
throw new IllegalArgumentException(((("Invalid JMX query: \"" + jmxBeanQuery) + "\" against ") + "NameNode URI: ")
+ nnWebUri);
}
HttpURLConnection conn = ((HttpURLConnection)
(queryURL.openConnection()));
if (conn.getResponseCode() != 200) {
throw new IOException("Unable to retrieve JMX: "
+ conn.getResponseMessage());
}
InputStream in = conn.getInputStream();
JsonFactory v39 = new JsonFactory();
JsonParser parser = v39.createParser(in);
if (((((parser.nextToken() != JsonToken.START_OBJECT) || (parser.nextToken() != JsonToken.FIELD_NAME)) || (!parser.getCurrentName().equals("beans"))) || (parser.nextToken() != JsonToken.START_ARRAY)) || (parser.nextToken() != JsonToken.START_OBJECT)) {throw new IOException("Unexpected format of JMX JSON response for: " + jmxBeanQuery);
}
int objectDepth = 1;
String ret = null;
while (objectDepth > 0) {
JsonToken tok = parser.nextToken();
if (tok == JsonToken.START_OBJECT) {
objectDepth++;
} else if (tok == JsonToken.END_OBJECT) {
objectDepth--;
} else if (tok == JsonToken.FIELD_NAME) {
if (parser.getCurrentName().equals(property)) {
parser.nextToken();
ret = parser.getText();
break;
}
}
}
parser.close();
in.close();
conn.disconnect();
if (ret == null) {
throw new IOException((("Property " + property) + " not found within ") + jmxBeanQuery);
} else {
return ret;
}
} | 3.26 |
hadoop_DynoInfraUtils_getNameNodeServiceRpcAddr_rdh | /**
* Get the URI that can be used to access the launched NameNode for HDFS
* Service RPCs (i.e. from DataNodes).
*
* @param nameNodeProperties
* The set of properties representing the
* information about the launched NameNode.
* @return The service RPC URI.
*/
static URI getNameNodeServiceRpcAddr(Properties nameNodeProperties) {
return URI.create(String.format("hdfs://%s:%s/", nameNodeProperties.getProperty(DynoConstants.NN_HOSTNAME), nameNodeProperties.getProperty(DynoConstants.NN_SERVICERPC_PORT)));
} | 3.26 |
hadoop_DynoInfraUtils_waitForNameNodeReadiness_rdh | /**
* Wait for the launched NameNode to be ready, i.e. to have at least 99% of
* its DataNodes register, have fewer than 0.01% of its blocks missing, and
* less than 1% of its blocks under replicated. Continues until the criteria
* have been met or {@code shouldExit} returns true.
*
* @param nameNodeProperties
* The set of properties containing information
* about the NameNode.
* @param numTotalDataNodes
* Total expected number of DataNodes to register.
* @param shouldExit
* Should return true iff this should stop waiting.
* @param log
* Where to log information.
*/
static void waitForNameNodeReadiness(final Properties nameNodeProperties, int numTotalDataNodes, boolean triggerBlockReports, Supplier<Boolean> shouldExit, final Configuration conf, final Logger log) throws IOException, InterruptedException {
if (shouldExit.get()) {
return;
}
int minDataNodes = ((int) (conf.getFloat(DATANODE_LIVE_MIN_FRACTION_KEY, DATANODE_LIVE_MIN_FRACTION_DEFAULT) * numTotalDataNodes));
log.info(String.format("Waiting for %d DataNodes to register with the NameNode...", minDataNodes));
waitForNameNodeJMXValue("Number of live DataNodes", FSNAMESYSTEM_STATE_JMX_QUERY, f0, minDataNodes, numTotalDataNodes * 0.001, false, nameNodeProperties, shouldExit, log);
final int totalBlocks = Integer.parseInt(fetchNameNodeJMXValue(nameNodeProperties, FSNAMESYSTEM_STATE_JMX_QUERY, JMX_BLOCKS_TOTAL));
final AtomicBoolean
doneWaiting = new AtomicBoolean(false);
if (triggerBlockReports) {
// This will be significantly lower than the actual expected number of
// blocks because it does not
// take into account replication factor. However the block reports are
// pretty binary; either a full
// report has been received or it hasn't. Thus we don't mind the large
// underestimate here.
final int blockThreshold = (totalBlocks /
numTotalDataNodes) * 2;
// The Configuration object here is based on the host cluster, which may
// have security enabled; we need to disable it to talk to the Dyno NN
conf.set(CommonConfigurationKeysPublic.HADOOP_SECURITY_AUTHENTICATION, "simple");
conf.set(CommonConfigurationKeysPublic.HADOOP_SECURITY_AUTHORIZATION, "false");
final DistributedFileSystem dfs = ((DistributedFileSystem) (FileSystem.get(getNameNodeHdfsUri(nameNodeProperties), conf)));
log.info(("Launching thread to trigger block reports for Datanodes with <" + blockThreshold) + " blocks reported");
Thread blockReportThread = new Thread(() -> {
// Here we count both Missing and UnderReplicated within under
// replicated
long lastUnderRepBlocks = Long.MAX_VALUE;
try {
while (true) {
// this will eventually exit via an interrupt
try {
Thread.sleep(TimeUnit.MINUTES.toMillis(1));
long underRepBlocks = Long.parseLong(fetchNameNodeJMXValue(nameNodeProperties, FSNAMESYSTEM_JMX_QUERY, JMX_MISSING_BLOCKS)) + Long.parseLong(fetchNameNodeJMXValue(nameNodeProperties,
FSNAMESYSTEM_STATE_JMX_QUERY, JMX_UNDER_REPLICATED_BLOCKS));
long blockDecrease = lastUnderRepBlocks - underRepBlocks;
lastUnderRepBlocks = underRepBlocks;
if ((blockDecrease < 0) || (blockDecrease > (totalBlocks * 0.001))) {
continue;
}
String liveNodeListString = fetchNameNodeJMXValue(nameNodeProperties,
NAMENODE_INFO_JMX_QUERY, JMX_LIVE_NODES_LIST);
Set<String> datanodesToReport = parseStaleDataNodeList(liveNodeListString, blockThreshold, log);
if (datanodesToReport.isEmpty() && doneWaiting.get()) {
log.info("BlockReportThread exiting; all DataNodes have " + "reported blocks");
break;
}
log.info("Queueing {} Datanodes for block report: {}", datanodesToReport.size(), Joiner.on(",").join(datanodesToReport));
DatanodeInfo[] datanodes = dfs.getDataNodeStats();
int cnt = 0;for (DatanodeInfo datanode : datanodes) {
if (datanodesToReport.contains(datanode.getXferAddr(true))) {
Thread.sleep(1);// to throw an interrupt if one is found
triggerDataNodeBlockReport(conf, datanode.getIpcAddr(true));
cnt++;
Thread.sleep(1000);
}
}
if (cnt != datanodesToReport.size()) {
log.warn("Found {} Datanodes to queue block reports for but "
+ "was only able to trigger {}", datanodesToReport.size(), cnt);
}
} catch (IOException ioe) {
log.warn("Exception encountered in block report thread", ioe);
}
}
} catch (InterruptedException ie) {
// Do nothing; just exit
}
log.info("Block reporting thread exiting");
});
blockReportThread.setDaemon(true);
blockReportThread.setUncaughtExceptionHandler(new YarnUncaughtExceptionHandler());
blockReportThread.start();
}float maxMissingBlocks = totalBlocks * conf.getFloat(MISSING_BLOCKS_MAX_FRACTION_KEY, MISSING_BLOCKS_MAX_FRACTION_DEFAULT);
log.info("Waiting for MissingBlocks to fall below {}...", maxMissingBlocks);
waitForNameNodeJMXValue("Number of missing blocks", FSNAMESYSTEM_JMX_QUERY, JMX_MISSING_BLOCKS, maxMissingBlocks, totalBlocks * 1.0E-4, true, nameNodeProperties, shouldExit, log);
float maxUnderreplicatedBlocks = totalBlocks * conf.getFloat(UNDERREPLICATED_BLOCKS_MAX_FRACTION_KEY, UNDERREPLICATED_BLOCKS_MAX_FRACTION_DEFAULT);
log.info("Waiting for UnderReplicatedBlocks to fall below {}...", maxUnderreplicatedBlocks);
waitForNameNodeJMXValue("Number of under replicated blocks", FSNAMESYSTEM_STATE_JMX_QUERY, JMX_UNDER_REPLICATED_BLOCKS, maxUnderreplicatedBlocks, totalBlocks * 0.001, true, nameNodeProperties, shouldExit, log);log.info("NameNode is ready for use!");
doneWaiting.set(true);
} | 3.26 |
hadoop_DynoInfraUtils_triggerDataNodeBlockReport_rdh | /**
* Trigger a block report on a given DataNode.
*
* @param conf
* Configuration
* @param dataNodeTarget
* The target; should be like {@code <host>:<port>}
*/
private static void triggerDataNodeBlockReport(Configuration conf, String dataNodeTarget) throws IOException {
InetSocketAddress datanodeAddr = NetUtils.createSocketAddr(dataNodeTarget);
ClientDatanodeProtocol dnProtocol = DFSUtilClient.createClientDatanodeProtocolProxy(datanodeAddr, UserGroupInformation.getCurrentUser(), conf, NetUtils.getSocketFactory(conf, ClientDatanodeProtocol.class)); dnProtocol.triggerBlockReport(new BlockReportOptions.Factory().build());
} | 3.26 |
hadoop_DynoInfraUtils_waitForNameNodeJMXValue_rdh | /**
* Poll the launched NameNode's JMX for a specific value, waiting for it to
* cross some threshold. Continues until the threshold has been crossed or
* {@code shouldExit} returns true. Periodically logs the current value.
*
* @param valueName
* The human-readable name of the value which is being
* polled (for printing purposes only).
* @param jmxBeanQuery
* The JMX bean query to execute; should return a JMX
* property matching {@code jmxProperty}.
* @param jmxProperty
* The name of the JMX property whose value should be
* polled.
* @param threshold
* The threshold value to wait for the JMX property to be
* above/below.
* @param printThreshold
* The threshold between each log statement; controls
* how frequently the value is printed. For example,
* if this was 10, a statement would be logged every
* time the value has changed by more than 10.
* @param decreasing
* True iff the property's value is decreasing and this
* should wait until it is lower than threshold; else the
* value is treated as increasing and will wait until it
* is higher than threshold.
* @param nameNodeProperties
* The set of properties containing information
* about the NameNode.
* @param shouldExit
* Should return true iff this should stop waiting.
* @param log
* Where to log information.
*/
@SuppressWarnings("checkstyle:parameternumber")
private static void waitForNameNodeJMXValue(String valueName, String jmxBeanQuery, String jmxProperty, double threshold, double printThreshold, boolean decreasing, Properties nameNodeProperties, Supplier<Boolean> shouldExit, Logger log) throws
InterruptedException {
double lastPrintedValue = (decreasing) ? Double.MAX_VALUE : Double.MIN_VALUE;
double value;
int retryCount = 0;
long
startTime = Time.monotonicNow();
while (!shouldExit.get()) {
try {
value = Double.parseDouble(fetchNameNodeJMXValue(nameNodeProperties, jmxBeanQuery, jmxProperty));if ((decreasing && (value <= threshold)) || ((!decreasing) && (value >= threshold))) {
log.info(String.format("%s = %.2f; %s threshold of %.2f; done waiting after %d ms.", valueName, value, decreasing ? "below" : "above", threshold, Time.monotonicNow() - startTime));
break;
} else if (Math.abs(value - lastPrintedValue) >= printThreshold) {
log.info(String.format("%s: %.2f", valueName, value));
lastPrintedValue = value;}
} catch (IOException ioe) {
if (((++retryCount) % 20) == 0) {
log.warn("Unable to fetch {}; retried {} times / waited {} ms", valueName, retryCount, Time.monotonicNow() - startTime, ioe);
}
}
Thread.sleep(3000);
}
} | 3.26 |
hadoop_Chain_createBlockingQueue_rdh | /**
* Creates a ChainBlockingQueue with KeyValuePair as element
*
* @return the ChainBlockingQueue
*/
ChainBlockingQueue<KeyValuePair<?, ?>> createBlockingQueue() {
return new ChainBlockingQueue<KeyValuePair<?, ?>>();
} | 3.26 |
hadoop_Chain_getCurrentValue_rdh | /**
* Get the current value.
*
* @return the value object that was read into
* @throws IOException
* @throws InterruptedException
*/
public VALUEIN getCurrentValue() throws IOException, InterruptedException {
return this.value;
} | 3.26 |
hadoop_Chain_write_rdh | /**
* Writes a key/value pair.
*
* @param key
* the key to write.
* @param value
* the value to write.
* @throws IOException
*/
public void write(KEYOUT key, VALUEOUT value) throws IOException, InterruptedException {
if (outputQueue != null) {
writeToQueue(key, value);
} else {
outputContext.write(key, value);
}
} | 3.26 |
hadoop_Chain_getPrefix_rdh | /**
* Returns the prefix to use for the configuration of the chain depending if
* it is for a Mapper or a Reducer.
*
* @param isMap
* TRUE for Mapper, FALSE for Reducer.
* @return the prefix to use.
*/
protected static String getPrefix(boolean isMap) {
return isMap ? CHAIN_MAPPER : CHAIN_REDUCER;
} | 3.26 |
hadoop_Chain_addMapper_rdh | /**
* Adds a Mapper class to the chain job.
*
* <p>
* The configuration properties of the chain job have precedence over the
* configuration properties of the Mapper.
*
* @param isMap
* indicates if the Chain is for a Mapper or for a Reducer.
* @param job
* chain job.
* @param klass
* the Mapper class to add.
* @param inputKeyClass
* mapper input key class.
* @param inputValueClass
* mapper input value class.
* @param outputKeyClass
* mapper output key class.
* @param outputValueClass
* mapper output value class.
* @param mapperConf
* a configuration for the Mapper class. It is recommended to use a
* Configuration without default values using the
* <code>Configuration(boolean loadDefaults)</code> constructor with
* FALSE.
*/
@SuppressWarnings("unchecked")
protected static void addMapper(boolean isMap, Job job, Class<? extends Mapper> klass, Class<?> inputKeyClass, Class<?> inputValueClass, Class<?> outputKeyClass, Class<?> outputValueClass, Configuration mapperConf) {
String prefix = getPrefix(isMap);
Configuration jobConf = job.getConfiguration();
// if a reducer chain check the Reducer has been already set
checkReducerAlreadySet(isMap, jobConf, prefix, true);// set the mapper class
int index = getIndex(jobConf, prefix);
jobConf.setClass((prefix + CHAIN_MAPPER_CLASS) + index, klass, Mapper.class);
m2(isMap, jobConf, inputKeyClass, inputValueClass, outputKeyClass, outputValueClass,
index,
prefix);
setMapperConf(isMap, jobConf, inputKeyClass, inputValueClass, outputKeyClass, outputValueClass, mapperConf, index,
prefix);
} | 3.26 |
hadoop_Chain_checkReducerAlreadySet_rdh | // if a reducer chain check the Reducer has been already set or not
protected static void checkReducerAlreadySet(boolean isMap, Configuration jobConf, String prefix, boolean shouldSet) {
if (!isMap) {
if (shouldSet) {
if (jobConf.getClass(prefix + CHAIN_REDUCER_CLASS, null) == null) {
throw new IllegalStateException("A Mapper can be added to the chain only after the Reducer has " + "been set");
}
} else if (jobConf.getClass(prefix + CHAIN_REDUCER_CLASS, null) != null) {
throw new IllegalStateException("Reducer has been already set");
}
}
} | 3.26 |
hadoop_Chain_startAllThreads_rdh | // start all the threads
void startAllThreads() {
for (Thread thread : threads) {
thread.start();
}
} | 3.26 |
hadoop_Chain_joinAllThreads_rdh | // wait till all threads finish
void joinAllThreads() throws IOException, InterruptedException {
for (Thread thread : threads) {
thread.join();
}
Throwable th = getThrowable();
if (th != null) {
if (th instanceof
IOException) {
throw ((IOException) (th));
} else if (th instanceof InterruptedException) {
throw ((InterruptedException) (th));
} else {
throw new RuntimeException(th);
}
} } | 3.26 |
hadoop_Chain_addReducer_rdh | /**
* Add reducer that reads from context and writes to a queue
*/
@SuppressWarnings("unchecked")
void addReducer(TaskInputOutputContext inputContext, ChainBlockingQueue<KeyValuePair<?, ?>> outputQueue) throws IOException, InterruptedException {
Class<?> keyOutClass = rConf.getClass(REDUCER_OUTPUT_KEY_CLASS, Object.class);
Class<?> valueOutClass = rConf.getClass(REDUCER_OUTPUT_VALUE_CLASS, Object.class);
RecordWriter rw =
new ChainRecordWriter(keyOutClass, valueOutClass, outputQueue, rConf);
Reducer.Context v35 = createReduceContext(rw, ((ReduceContext) (inputContext)), rConf);
ReduceRunner runner = new ReduceRunner(v35, reducer, rw);
threads.add(runner);
} | 3.26 |
hadoop_Chain_createMapContext_rdh | /**
* Create a map context that is based on ChainMapContext and the given record
* reader and record writer
*/
private <KEYIN,
VALUEIN, KEYOUT, VALUEOUT> Mapper<KEYIN, VALUEIN, KEYOUT, VALUEOUT>.Context createMapContext(RecordReader<KEYIN, VALUEIN> rr, RecordWriter<KEYOUT, VALUEOUT> rw, TaskInputOutputContext<KEYIN, VALUEIN, KEYOUT, VALUEOUT> context, Configuration conf) {
MapContext<KEYIN, VALUEIN, KEYOUT, VALUEOUT> mapContext = new ChainMapContextImpl<KEYIN, VALUEIN, KEYOUT, VALUEOUT>(context, rr, rw, conf);
Mapper<KEYIN, VALUEIN, KEYOUT, VALUEOUT>.Context mapperContext = new WrappedMapper<KEYIN, VALUEIN, KEYOUT, VALUEOUT>().getMapContext(mapContext);
return mapperContext;
} | 3.26 |
hadoop_Chain_createReduceContext_rdh | /**
* Create a reduce context that is based on ChainMapContext and the given
* record writer
*/
private <KEYIN, VALUEIN, KEYOUT, VALUEOUT>
Reducer<KEYIN, VALUEIN, KEYOUT, VALUEOUT>.Context createReduceContext(RecordWriter<KEYOUT, VALUEOUT> rw, ReduceContext<KEYIN, VALUEIN,
KEYOUT, VALUEOUT> context, Configuration conf) {
ReduceContext<KEYIN, VALUEIN, KEYOUT, VALUEOUT> reduceContext
=
new ChainReduceContextImpl<KEYIN, VALUEIN, KEYOUT, VALUEOUT>(context, rw, conf);
Reducer<KEYIN, VALUEIN, KEYOUT, VALUEOUT>.Context reducerContext =
new WrappedReducer<KEYIN, VALUEIN, KEYOUT, VALUEOUT>().getReducerContext(reduceContext);return reducerContext;
} | 3.26 |
hadoop_Chain_setReducer_rdh | /**
* Sets the Reducer class to the chain job.
*
* <p>
* The configuration properties of the chain job have precedence over the
* configuration properties of the Reducer.
*
* @param job
* the chain job.
* @param klass
* the Reducer class to add.
* @param inputKeyClass
* reducer input key class.
* @param inputValueClass
* reducer input value class.
* @param outputKeyClass
* reducer output key class.
* @param outputValueClass
* reducer output value class.
* @param reducerConf
* a configuration for the Reducer class. It is recommended to use a
* Configuration without default values using the
* <code>Configuration(boolean loadDefaults)</code> constructor with
* FALSE.
*/
@SuppressWarnings("unchecked")
protected static void setReducer(Job job, Class<? extends Reducer> klass, Class<?> inputKeyClass, Class<?> inputValueClass, Class<?> outputKeyClass, Class<?> outputValueClass, Configuration reducerConf) {
String prefix = getPrefix(false);
Configuration jobConf = job.getConfiguration();
checkReducerAlreadySet(false, jobConf, prefix, false);
jobConf.setClass(prefix + CHAIN_REDUCER_CLASS, klass, Reducer.class);
setReducerConf(jobConf, inputKeyClass, inputValueClass, outputKeyClass, outputValueClass, reducerConf, prefix);
} | 3.26 |
hadoop_Chain_getCurrentKey_rdh | /**
* Get the current key.
*
* @return the current key object or null if there isn't one
* @throws IOException
* @throws InterruptedException
*/
public KEYIN getCurrentKey() throws IOException,
InterruptedException {
return this.key;
} | 3.26 |
hadoop_Chain_getReducer_rdh | /**
* Returns the Reducer instance in the chain.
*
* @return the Reducer instance in the chain or NULL if none.
*/
Reducer<?, ?, ?, ?> getReducer() {
return reducer;
} | 3.26 |
hadoop_Chain_interruptAllThreads_rdh | // interrupt all threads
private synchronized void interruptAllThreads() {
for (Thread th : threads) {
th.interrupt();
}
for (ChainBlockingQueue<?> queue : blockingQueues) {
queue.interrupt();
}
} | 3.26 |
hadoop_Chain_runReducer_rdh | // Run the reducer directly.
@SuppressWarnings("unchecked")
<KEYIN,
VALUEIN, KEYOUT, VALUEOUT> void runReducer(TaskInputOutputContext<KEYIN, VALUEIN, KEYOUT, VALUEOUT> context) throws IOException, InterruptedException {
RecordWriter<KEYOUT, VALUEOUT> rw = new ChainRecordWriter<KEYOUT, VALUEOUT>(context);
Reducer.Context reducerContext =
createReduceContext(rw, ((ReduceContext) (context)), rConf);
reducer.run(reducerContext);
rw.close(context);
} | 3.26 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.