name
stringlengths 12
178
| code_snippet
stringlengths 8
36.5k
| score
float64 3.26
3.68
|
---|---|---|
hadoop_AbfsOutputStreamStatisticsImpl_timeSpentTaskWait_rdh | /**
* {@inheritDoc }
*
* Records the total time spent waiting for a task to complete.
*
* When the thread executor has a task queue
* {@link java.util.concurrent.BlockingQueue} of size greater than or
* equal to 2 times the maxConcurrentRequestCounts then, it waits for a
* task in that queue to finish, then do the next task in the queue.
*
* This time spent while waiting for the task to be completed is being
* recorded in this counter.
*/
@Override
public DurationTracker timeSpentTaskWait() {
return ioStatisticsStore.trackDuration(StreamStatisticNames.TIME_SPENT_ON_TASK_WAIT);
} | 3.26 |
hadoop_AbfsOutputStreamStatisticsImpl_blockReleased_rdh | /**
* Increment the counter to indicate a block has been released.
*/
@Override
public void blockReleased() {
blocksReleased.incrementAndGet();
} | 3.26 |
hadoop_AbfsOutputStreamStatisticsImpl_blockAllocated_rdh | /**
* Increment the counter to indicate a block has been allocated.
*/
@Override
public void blockAllocated() {
blocksAllocated.incrementAndGet();} | 3.26 |
hadoop_AbfsOutputStreamStatisticsImpl_getTimeSpentOnPutRequest_rdh | /**
* Getter for mean value of time taken to complete a PUT request by
* AbfsOutputStream.
*
* @return mean value.
*/
@VisibleForTesting
public double getTimeSpentOnPutRequest() {
return ioStatisticsStore.meanStatistics().get(StreamStatisticNames.TIME_SPENT_ON_PUT_REQUEST + StoreStatisticNames.SUFFIX_MEAN).mean();
} | 3.26 |
hadoop_AbfsOutputStreamStatisticsImpl_uploadFailed_rdh | /**
* Records the total bytes failed to upload through AbfsOutputStream.
*
* @param bytes
* number of bytes failed to upload. Negative bytes are ignored.
*/
@Override
public void uploadFailed(long bytes)
{
ioStatisticsStore.incrementCounter(StreamStatisticNames.BYTES_UPLOAD_FAILED, bytes);
} | 3.26 |
hadoop_AbfsOutputStreamStatisticsImpl_writeCurrentBuffer_rdh | /**
* {@inheritDoc }
*
* Records the number of times AbfsOutputStream writes the buffer to the
* service via the AbfsClient and appends the buffer to the service.
*/
@Override
public void writeCurrentBuffer() {
writeCurrentBufferOps.incrementAndGet();} | 3.26 |
hadoop_AbfsOutputStreamStatisticsImpl_uploadSuccessful_rdh | /**
* Records the total bytes successfully uploaded through AbfsOutputStream.
*
* @param bytes
* number of bytes that were successfully uploaded. Negative
* bytes are ignored.
*/
@Override
public void uploadSuccessful(long bytes) {
bytesUploadedSuccessfully.addAndGet(bytes);
} | 3.26 |
hadoop_AbfsOutputStreamStatisticsImpl_getIOStatistics_rdh | /**
* {@inheritDoc }
*
* A getter for IOStatisticsStore instance which extends IOStatistics.
*
* @return IOStatisticsStore instance.
*/
@Overridepublic IOStatistics getIOStatistics() {
return ioStatisticsStore;
} | 3.26 |
hadoop_AbfsOutputStreamStatisticsImpl_toString_rdh | /**
* String to show AbfsOutputStream statistics values in AbfsOutputStream.
*
* @return String with AbfsOutputStream statistics.
*/
@Override
public String toString() {
final StringBuilder outputStreamStats = new StringBuilder("OutputStream Statistics{");
outputStreamStats.append(ioStatisticsStore.toString());
outputStreamStats.append("}");
return outputStreamStats.toString();
} | 3.26 |
hadoop_FileRange_createFileRange_rdh | /**
* Factory method to create a FileRange object.
*
* @param offset
* starting offset of the range.
* @param length
* length of the range.
* @param reference
* nullable reference to store in the range.
* @return a new instance of FileRangeImpl.
*/
static FileRange createFileRange(long offset, int length, Object reference) {
return new FileRangeImpl(offset, length, reference);
} | 3.26 |
hadoop_AuthenticationToken_m0_rdh | /**
* Parses a string into an authentication token.
*
* @param tokenStr
* string representation of a token.
* @return the parsed authentication token.
* @throws AuthenticationException
* thrown if the string representation could not be parsed into
* an authentication token.
*/
public static AuthenticationToken m0(String tokenStr) throws AuthenticationException {
return new AuthenticationToken(AuthToken.parse(tokenStr));
} | 3.26 |
hadoop_AuthenticationToken_setMaxInactives_rdh | /**
* Sets the max inactive time of the token.
*
* @param maxInactives
* inactive time of the token in milliseconds
* since the epoch.
*/
public void setMaxInactives(long maxInactives) {
if (this != AuthenticationToken.ANONYMOUS) {
super.setMaxInactives(maxInactives);
}} | 3.26 |
hadoop_AuthenticationToken_setExpires_rdh | /**
* Sets the expiration of the token.
*
* @param expires
* expiration time of the token in milliseconds since the epoch.
*/
public void setExpires(long expires) {
if (this != AuthenticationToken.ANONYMOUS) {
super.setExpires(expires);}
} | 3.26 |
hadoop_AuthenticationToken_isExpired_rdh | /**
* Returns true if the token has expired.
*
* @return true if the token has expired.
*/
public boolean isExpired() {
return
super.isExpired();
} | 3.26 |
hadoop_ConnectionPool_getNumActiveConnectionsRecently_rdh | /**
* Number of active connections recently in the pool.
*
* @return Number of active connections recently.
*/
protected int getNumActiveConnectionsRecently() {
int ret = 0;
List<ConnectionContext> tmpConnections = this.connections;
for (ConnectionContext conn : tmpConnections) {
if (conn.isActiveRecently()) {
ret++;
}
}
return ret;
} | 3.26 |
hadoop_ConnectionPool_getNumActiveConnections_rdh | /**
* Number of active connections in the pool.
*
* @return Number of active connections.
*/
protected int getNumActiveConnections() {
int ret = 0;
List<ConnectionContext> tmpConnections = this.connections;
for (ConnectionContext conn : tmpConnections) {
if (conn.isActive()) {
ret++;
}
}
return ret;
} | 3.26 |
hadoop_ConnectionPool_getNumConnections_rdh | /**
* Number of connections in the pool.
*
* @return Number of connections.
*/
protected int getNumConnections() {
return this.connections.size();
} | 3.26 |
hadoop_ConnectionPool_addConnection_rdh | /**
* Add a connection to the current pool. It uses a Copy-On-Write approach.
*
* @param conn
* New connection to add to the pool.
*/
public synchronized void addConnection(ConnectionContext
conn) {
List<ConnectionContext> tmpConnections = new ArrayList<>(this.connections);
tmpConnections.add(conn);
this.connections = tmpConnections;
this.lastActiveTime = Time.now();
} | 3.26 |
hadoop_ConnectionPool_getConnection_rdh | /**
* Return the next connection round-robin.
*
* @return Connection context.
*/
protected ConnectionContext getConnection() {
this.lastActiveTime = Time.now();
List<ConnectionContext> tmpConnections =
this.connections;
for (ConnectionContext tmpConnection : tmpConnections) {
if ((tmpConnection !=
null) && tmpConnection.isUsable()) {
return tmpConnection;
}
}
ConnectionContext conn = null;
// We return a connection even if it's busy
int size = tmpConnections.size();
if (size > 0) {
// Get a connection from the pool following round-robin
// Inc and mask off sign bit, lookup index should be non-negative int
int v6 = this.clientIndex.getAndIncrement() & 0x7fffffff;
conn =
tmpConnections.get(v6 % size);
}
return conn;
} | 3.26 |
hadoop_ConnectionPool_getLastActiveTime_rdh | /**
* Get the last time the connection pool was used.
*
* @return Last time the connection pool was used.
*/
protected long getLastActiveTime() {
return this.lastActiveTime;
} | 3.26 |
hadoop_ConnectionPool_getPoolAlignmentContext_rdh | /**
* Get the alignment context for this pool.
*
* @return Alignment context
*/
public PoolAlignmentContext getPoolAlignmentContext() {
return this.alignmentContext;
} | 3.26 |
hadoop_ConnectionPool_close_rdh | /**
* Close the connection pool.
*/
protected synchronized void close() {long v12 = TimeUnit.MILLISECONDS.toSeconds(Time.now() - getLastActiveTime());
LOG.debug("Shutting down connection pool \"{}\" used {} seconds ago", this.connectionPoolId, v12);
for (ConnectionContext connection : this.connections) {
connection.close(true);
}
this.connections.clear();
} | 3.26 |
hadoop_ConnectionPool_getClientIndex_rdh | /**
* Get the clientIndex used to calculate index for lookup.
*
* @return Client index.
*/
@VisibleForTesting
public AtomicInteger getClientIndex() {
return this.clientIndex;
} | 3.26 |
hadoop_ConnectionPool_getMinSize_rdh | /**
* Get the minimum number of connections in this pool.
*
* @return Minimum number of connections.
*/
protected int getMinSize() {
return this.minSize;
} | 3.26 |
hadoop_ConnectionPool_getConnectionPoolId_rdh | /**
* Get the connection pool identifier.
*
* @return Connection pool identifier.
*/
protected ConnectionPoolId getConnectionPoolId() {
return this.connectionPoolId;
} | 3.26 |
hadoop_ConnectionPool_getJSON_rdh | /**
* JSON representation of the connection pool.
*
* @return String representation of the JSON.
*/
public String getJSON() {
final Map<String, String> v23 = new LinkedHashMap<>();
v23.put("active", Integer.toString(getNumActiveConnections()));
v23.put("recent_active", Integer.toString(getNumActiveConnectionsRecently()));
v23.put("idle", Integer.toString(getNumIdleConnections()));
v23.put("total", Integer.toString(getNumConnections()));
if (LOG.isDebugEnabled()) {
List<ConnectionContext> tmpConnections = this.connections;
for (int i = 0; i < tmpConnections.size(); i++) {
ConnectionContext connection = tmpConnections.get(i);
v23.put(i + " active", Boolean.toString(connection.isActive()));
v23.put(i + " recent_active", Integer.toString(getNumActiveConnectionsRecently())); v23.put(i + " idle", Boolean.toString(connection.isUsable()));
v23.put(i + " closed", Boolean.toString(connection.isClosed()));
}
}
return JSON.toString(v23);
} | 3.26 |
hadoop_ConnectionPool_getNumIdleConnections_rdh | /**
* Number of usable i.e. no active thread connections.
*
* @return Number of idle connections
*/
protected int getNumIdleConnections() {
int ret = 0;
List<ConnectionContext> tmpConnections = this.connections;
for (ConnectionContext conn : tmpConnections) {
if (conn.isIdle()) {
ret++;
}
}
return ret;
} | 3.26 |
hadoop_ConnectionPool_removeConnections_rdh | /**
* Remove connections from the current pool.
*
* @param num
* Number of connections to remove.
* @return Removed connections.
*/
public synchronized List<ConnectionContext> removeConnections(int num) {
List<ConnectionContext> removed = new LinkedList<>();
if (this.connections.size() > this.minSize) {
int targetCount = Math.min(num,
this.connections.size() - this.minSize);
// Remove and close targetCount of connections
List<ConnectionContext> tmpConnections = new ArrayList<>();
for (ConnectionContext conn :
this.connections) {
// Only pick idle connections to close
if ((removed.size() < targetCount) && conn.isIdle()) {
removed.add(conn);
} else {
tmpConnections.add(conn);
}
}
this.connections = tmpConnections;
}
LOG.debug("Expected to remove {} connection and actually removed {} connections " + "for connectionPool: {}", num, removed.size(), connectionPoolId);
return removed;
} | 3.26 |
hadoop_ConnectionPool_getMaxSize_rdh | /**
* Get the maximum number of connections allowed in this pool.
*
* @return Maximum number of connections.
*/
protected int getMaxSize() {
return this.maxSize;
} | 3.26 |
hadoop_RpcProgramPortmap_unset_rdh | /**
* When a program becomes unavailable, it should unregister itself with the
* port mapper program on the same machine. The parameters and results have
* meanings identical to those of "PMAPPROC_SET". The protocol and port number
* fields of the argument are ignored.
*/
private XDR unset(int xid, XDR in, XDR out) {
PortmapMapping v4 = PortmapRequest.mapping(in);
String key = PortmapMapping.key(v4);
if (LOG.isDebugEnabled())
LOG.debug("Portmap remove key=" + key);
map.remove(key);
return PortmapResponse.booleanReply(out, xid, true);
} | 3.26 |
hadoop_RpcProgramPortmap_set_rdh | /**
* When a program first becomes available on a machine, it registers itself
* with the port mapper program on the same machine. The program passes its
* program number "prog", version number "vers", transport protocol number
* "prot", and the port "port" on which it awaits service request. The
* procedure returns a boolean reply whose value is "TRUE" if the procedure
* successfully established the mapping and "FALSE" otherwise. The procedure
* refuses to establish a mapping if one already exists for the tuple
* "(prog, vers, prot)".
*/
private XDR set(int xid, XDR in, XDR out) {
PortmapMapping mapping = PortmapRequest.mapping(in);
String v3 = PortmapMapping.key(mapping);
if (LOG.isDebugEnabled()) {
LOG.debug("Portmap set key=" + v3);
}
map.put(v3, mapping);
return PortmapResponse.intReply(out, xid, mapping.getPort());
} | 3.26 |
hadoop_RpcProgramPortmap_nullOp_rdh | /**
* This procedure does no work. By convention, procedure zero of any protocol
* takes no parameters and returns no results.
*/
private XDR nullOp(int xid, XDR in, XDR out) {
return PortmapResponse.voidReply(out, xid);
} | 3.26 |
hadoop_RpcProgramPortmap_dump_rdh | /**
* This procedure enumerates all entries in the port mapper's database. The
* procedure takes no parameters and returns a list of program, version,
* protocol, and port values.
*/private XDR dump(int xid, XDR in, XDR
out) {
PortmapMapping[] pmapList = map.values().toArray(new PortmapMapping[0]);
return PortmapResponse.pmapList(out, xid, pmapList);
} | 3.26 |
hadoop_RpcProgramPortmap_getport_rdh | /**
* Given a program number "prog", version number "vers", and transport
* protocol number "prot", this procedure returns the port number on which the
* program is awaiting call requests. A port value of zeros means the program
* has not been registered. The "port" field of the argument is ignored.
*/
private XDR getport(int xid, XDR in, XDR out) {
PortmapMapping v6 = PortmapRequest.mapping(in);
String key = PortmapMapping.key(v6);
if (LOG.isDebugEnabled()) {
LOG.debug((("Portmap GETPORT key=" + key) + " ") + v6);
}
PortmapMapping value = map.get(key);int
res = 0;
if (value != null) {
res
= value.getPort();
if (LOG.isDebugEnabled()) {
LOG.debug((("Found mapping for key: " + key) + " port:") + res);
}
} else {
LOG.warn("Warning, no mapping for key: " + key);
}
return PortmapResponse.intReply(out, xid, res);
} | 3.26 |
hadoop_VersionInfoMojo_computeMD5_rdh | /**
* Computes and returns an MD5 checksum of the contents of all files in the
* input Maven FileSet.
*
* @return String containing hexadecimal representation of MD5 checksum
* @throws Exception
* if there is any error while computing the MD5 checksum
*/
private String computeMD5() throws Exception {
List<File> files = FileSetUtils.convertFileSetToFiles(source);
// File order of MD5 calculation is significant. Sorting is done on
// unix-format names, case-folded, in order to get a platform-independent
// sort and calculate the same MD5 on all platforms.
Collections.sort(files, new MD5Comparator());
byte[] md5 = computeMD5(files);
String md5str = byteArrayToString(md5);
getLog().info("Computed MD5: " + md5str);
return md5str;
} | 3.26 |
hadoop_VersionInfoMojo_getBuildTime_rdh | /**
* Returns a string representing current build time.
*
* @return String representing current build time
*/
private String getBuildTime() {
DateFormat dateFormat = new SimpleDateFormat("yyyy-MM-dd'T'HH:mm'Z'");
dateFormat.setTimeZone(TimeZone.getTimeZone("UTC"));
return dateFormat.format(new Date());
} | 3.26 |
hadoop_VersionInfoMojo_determineSCM_rdh | /**
* Determines which SCM is in use (git or none) and captures
* output of the SCM command for later parsing.
*
* @return SCM in use for this build
* @throws Exception
* if any error occurs attempting to determine SCM
*/
private SCM determineSCM() throws Exception {
Exec exec = new Exec(this);
SCM scm = SCM.NONE;
scmOut = new ArrayList<String>();
int ret;
ret =
exec.run(Arrays.asList(gitCommand, "branch"), scmOut);
if (ret == 0) {ret = exec.run(Arrays.asList(gitCommand, "remote", "-v"), scmOut);if (ret != 0) {
scm = SCM.NONE;
scmOut = null;
} else {
ret = exec.run(Arrays.asList(gitCommand,
"log", "-n", "1"), scmOut);
if (ret !=
0) {
scm = SCM.NONE;
scmOut = null;
} else {
scm = SCM.GIT;
}
}
}
if (scmOut != null) {
getLog().debug(scmOut.toString());
}
getLog().info("SCM: " + scm);
return scm;
} | 3.26 |
hadoop_VersionInfoMojo_getSCMUri_rdh | /**
* Parses SCM output and returns URI of SCM.
*
* @param scm
* SCM in use for this build
* @return String URI of SCM
*/private String getSCMUri(SCM scm) {
String uri = "Unknown";
switch (scm) {
case GIT :
for (String s : scmOut) {
if (s.startsWith("origin") && s.endsWith("(fetch)")) {
uri = s.substring("origin".length());
uri = uri.substring(0, uri.length() - "(fetch)".length());
break;
}
}
break;
}return uri.trim();
} | 3.26 |
hadoop_VersionInfoMojo_byteArrayToString_rdh | /**
* Converts bytes to a hexadecimal string representation and returns it.
*
* @param array
* byte[] to convert
* @return String containing hexadecimal representation of bytes
*/
private String byteArrayToString(byte[] array) {
StringBuilder sb = new StringBuilder();
for (byte b : array) {
sb.append(Integer.toHexString(0xff & b));
}
return sb.toString();
} | 3.26 |
hadoop_VersionInfoMojo_getSCMBranch_rdh | /**
* Parses SCM output and returns branch of SCM.
*
* @param scm
* SCM in use for this build
* @return String branch of SCM
*/
private String getSCMBranch(SCM scm) {
String v9 = "Unknown";
switch (scm) {case GIT :for (String s : scmOut) {
if (s.startsWith("*")) {
v9 = s.substring("*".length());
break;
}
}
break;
}
return v9.trim();
} | 3.26 |
hadoop_VersionInfoMojo_readFile_rdh | /**
* Reads and returns the full contents of the specified file.
*
* @param file
* File to read
* @return byte[] containing full contents of file
* @throws IOException
* if there is an I/O error while reading the file
*/
private byte[] readFile(File file) throws IOException {
RandomAccessFile raf = new RandomAccessFile(file, "r");
byte[] buffer = new byte[((int) (raf.length()))];
raf.readFully(buffer);
raf.close();
return buffer;
} | 3.26 |
hadoop_SCMController_overview_rdh | /**
* It is referenced in SCMWebServer.SCMWebApp.setup()
*/
@SuppressWarnings("unused")
public void
overview() {
render(SCMOverviewPage.class);
} | 3.26 |
hadoop_ServiceRegistryUtils_registryDNSLookupExists_rdh | /**
* Determine whether a DNS lookup exists for a given name. If a DNS server
* address is provided, the lookup will be performed against this DNS
* server. This option is provided because it may be desirable to perform
* the lookup against Registry DNS directly to avoid caching of negative
* responses that may be performed by other DNS servers, thereby allowing the
* lookup to succeed sooner.
*
* @param addr
* host:port dns address, or null
* @param name
* name to look up
* @return true if a lookup succeeds for the specified name
*/public static boolean registryDNSLookupExists(String addr, String name) {
if (addr == null) {
try {
InetAddress.getByName(name);
return true;
}
catch (UnknownHostException e) {
return false;
}
}
String dnsURI = String.format("dns://%s", addr);
Hashtable<String, Object> env = new Hashtable<>();
env.put(Context.INITIAL_CONTEXT_FACTORY, "com.sun.jndi.dns.DnsContextFactory");
env.put(Context.PROVIDER_URL, dnsURI);
try {
DirContext ictx = new InitialDirContext(env);
Attributes attrs = ictx.getAttributes(name, new String[]{ "A" });
if (attrs.size() > 0) {
return true;
}
} catch (NameNotFoundException e) {
// this doesn't need to be logged
} catch (NamingException e) {
LOG.error("Got exception when performing DNS lookup", e);
}
return false;
} | 3.26 |
hadoop_ServiceRegistryUtils_m0_rdh | /**
* Get the registry path for an instance under the user's home node
*
* @param instanceName
* application instance
* @return a path to the registry location for this application instance.
*/
public static String m0(String instanceName) {
return RegistryUtils.servicePath(RegistryUtils.currentUser(), YarnServiceConstants.APP_TYPE, instanceName);
} | 3.26 |
hadoop_ServiceRegistryUtils_mkServiceHomePath_rdh | /**
* Build the path to a service folder
*
* @param username
* user name
* @param serviceName
* service name
* @return the home path to the service
*/
public static String mkServiceHomePath(String username, String serviceName) {
return (mkUserHomePath(username) + "/") + serviceName;
} | 3.26 |
hadoop_ServiceRegistryUtils_mkUserHomePath_rdh | /**
* Build the path to a user home folder;
*/
public static String mkUserHomePath(String username) {
return (SVC_USERS + "/") + username;
} | 3.26 |
hadoop_SaslInputStream_readMoreData_rdh | /**
* Read more data and get them processed <br>
* Entry condition: ostart = ofinish <br>
* Exit condition: ostart <= ofinish <br>
*
* return (ofinish-ostart) (we have this many bytes for you), 0 (no data now,
* but could have more later), or -1 (absolutely no more data)
*/
private int readMoreData() throws IOException {
try {
inStream.readFully(lengthBuf);
int length = unsignedBytesToInt(lengthBuf);
if (LOG.isDebugEnabled())
LOG.debug("Actual length is " + length);
saslToken = new byte[length];
inStream.readFully(saslToken);
} catch (EOFException e) {return -1;
}
try {
if (saslServer != null) {
// using saslServer
obuffer = saslServer.unwrap(saslToken, 0, saslToken.length);
} else {
// using saslClient
obuffer = saslClient.unwrap(saslToken, 0, saslToken.length);
}
} catch (SaslException se) {
try {
disposeSasl();
} catch (SaslException ignored) {}
throw se;
}
ostart = 0;
if (obuffer == null)
ofinish = 0;
else
ofinish = obuffer.length;
return ofinish;
} | 3.26 |
hadoop_SaslInputStream_skip_rdh | /**
* Skips <code>n</code> bytes of input from the bytes that can be read from
* this input stream without blocking.
*
* <p>
* Fewer bytes than requested might be skipped. The actual number of bytes
* skipped is equal to <code>n</code> or the result of a call to
* {@link #available()}, whichever is smaller. If
* <code>n</code> is less than zero, no bytes are skipped.
*
* <p>
* The actual number of bytes skipped is returned.
*
* @param n
* the number of bytes to be skipped.
* @return the actual number of bytes skipped.
* @exception IOException
* if an I/O error occurs.
*/
@Override
public long skip(long n) throws IOException {
if (!useWrap) {return inStream.skip(n);
}
int available = ofinish - ostart;
if (n > available) {
n = available;
}
if (n < 0) {
return 0;
}
ostart += n;
return n;
} | 3.26 |
hadoop_SaslInputStream_close_rdh | /**
* Closes this input stream and releases any system resources associated with
* the stream.
* <p>
* The <code>close</code> method of <code>SASLInputStream</code> calls the
* <code>close</code> method of its underlying input stream.
*
* @exception IOException
* if an I/O error occurs.
*/
@Override
public void close() throws IOException {
disposeSasl();
ostart = 0;
ofinish = 0;
inStream.close();
isOpen = false;
} | 3.26 |
hadoop_SaslOutputStream_write_rdh | /**
* Writes <code>len</code> bytes from the specified byte array starting at
* offset <code>off</code> to this output stream.
*
* @param inBuf
* the data.
* @param off
* the start offset in the data.
* @param len
* the number of bytes to write.
* @exception IOException
* if an I/O error occurs.
*/
@Override
public void write(byte[] inBuf, int off, int len) throws IOException {
if (!useWrap) {
outStream.write(inBuf, off, len);
return;
}
try {
if (saslServer != null) {
// using saslServer
saslToken = saslServer.wrap(inBuf, off, len);
} else {
// using saslClient
saslToken = saslClient.wrap(inBuf, off, len);
}} catch (SaslException se) {
try {disposeSasl();
} catch (SaslException ignored) {
}
throw se;
}
if (saslToken != null) {
ByteArrayOutputStream byteOut
= new ByteArrayOutputStream();
DataOutputStream dout = new DataOutputStream(byteOut);
dout.writeInt(saslToken.length);
outStream.write(byteOut.toByteArray());
outStream.write(saslToken, 0, saslToken.length);
saslToken = null;
}} | 3.26 |
hadoop_SaslOutputStream_flush_rdh | /**
* Flushes this output stream
*
* @exception IOException
* if an I/O error occurs.
*/@Override
public void flush() throws IOException {
outStream.flush();
} | 3.26 |
hadoop_SaslOutputStream_disposeSasl_rdh | /**
* Disposes of any system resources or security-sensitive information Sasl
* might be using.
*
* @exception SaslException
* if a SASL error occurs.
*/private void disposeSasl() throws SaslException {
if (saslClient != null) {
saslClient.dispose();
}
if (saslServer != null) {
saslServer.dispose();
}
} | 3.26 |
hadoop_SaslOutputStream_close_rdh | /**
* Closes this output stream and releases any system resources associated with
* this stream.
*
* @exception IOException
* if an I/O error occurs.
*/
@Override
public void close() throws IOException {
disposeSasl();
outStream.close();
} | 3.26 |
hadoop_PathLocation_getSourcePath_rdh | /**
* Get the source path in the global namespace for this path location.
*
* @return The path in the global namespace.
*/
public String getSourcePath() {
return this.sourcePath;
} | 3.26 |
hadoop_PathLocation_orderedNamespaces_rdh | /**
* Prioritize a location/destination by its name space/nameserviceId.
* This destination might be used by other threads, so the source is not
* modifiable.
*
* @param original
* List of destinations to order.
* @param nsId
* The name space/nameserviceID to prioritize.
* @return Prioritized list of detinations that cannot be modified.
*/
private static List<RemoteLocation> orderedNamespaces(final List<RemoteLocation> original,
final String nsId) {
if (original.size() <= 1) {
return original;
}
LinkedList<RemoteLocation> newDestinations = new LinkedList<>();
boolean found = false;
for (RemoteLocation dest : original) {
if (dest.getNameserviceId().equals(nsId))
{
found = true;
newDestinations.addFirst(dest);
} else {
newDestinations.add(dest);
}
}
if (!found)
{
LOG.debug("Cannot find location with namespace {} in {}", nsId, original);
}
return Collections.unmodifiableList(newDestinations);
} | 3.26 |
hadoop_PathLocation_getDestinations_rdh | /**
* Get the list of locations found in the mount table.
* The first result is the highest priority path.
*
* @return List of remote locations.
*/
public List<RemoteLocation> getDestinations() {
return Collections.unmodifiableList(this.destinations);
} | 3.26 |
hadoop_PathLocation_getDefaultLocation_rdh | /**
* Get the default or highest priority location.
*
* @return The default location.
*/
public RemoteLocation getDefaultLocation() {
if (destinations.isEmpty() || (destinations.get(0).getDest() == null)) {
throw new UnsupportedOperationException(("Unsupported path " + sourcePath) + " please check mount table");
}return destinations.get(0);
} | 3.26 |
hadoop_PathLocation_getNamespaces_rdh | /**
* Get the subclusters defined for the destinations.
*
* @return Set containing the subclusters.
*/public Set<String> getNamespaces() {
Set<String> namespaces = new HashSet<>();
List<RemoteLocation> locations = this.getDestinations();
for (RemoteLocation location : locations) {
String nsId = location.getNameserviceId();
namespaces.add(nsId);}
return namespaces;
} | 3.26 |
hadoop_PathLocation_getDestinationOrder_rdh | /**
* Get the order for the destinations.
*
* @return Order for the destinations.
*/
public DestinationOrder getDestinationOrder() {
return this.destOrder;
} | 3.26 |
hadoop_PathLocation_prioritizeDestination_rdh | /**
* Return a path location with the prioritized destinations based on
* the current path location.
*
* @param base
* The base path location we'd like to prioritize on.
* @param firstNsId
* Identifier of the namespace to place first.
* @return path location with the prioritized destinations.
*/
public static PathLocation prioritizeDestination(PathLocation base, String firstNsId) {
List<RemoteLocation> prioritizedDestinations = orderedNamespaces(base.destinations, firstNsId);
return new PathLocation(base.sourcePath, prioritizedDestinations, base.destOrder);
} | 3.26 |
hadoop_PathLocation_hasMultipleDestinations_rdh | /**
* Check if this location supports multiple clusters/paths.
*
* @return If it has multiple destinations.
*/
public boolean hasMultipleDestinations() {
return this.destinations.size() > 1;
} | 3.26 |
hadoop_NMTokenSecretManagerInNM_retrievePassword_rdh | /**
* This method will be used to verify NMTokens generated by different master
* keys.
*/
@Override
public synchronized byte[] retrievePassword(NMTokenIdentifier identifier) throws InvalidToken {
int keyId = identifier.getKeyId();
ApplicationAttemptId appAttemptId = identifier.getApplicationAttemptId();
/* MasterKey used for retrieving password will be as follows. 1) By default
older saved master key will be used. 2) If identifier's master key id
matches that of previous master key id then previous key will be used. 3)
If identifier's master key id matches that of current master key id then
current key will be used.
*/
MasterKeyData oldMasterKey = oldMasterKeys.get(appAttemptId);
MasterKeyData masterKeyToUse = oldMasterKey;
if ((previousMasterKey != null) && (keyId == previousMasterKey.getMasterKey().getKeyId())) {
masterKeyToUse = previousMasterKey;
} else if (keyId == currentMasterKey.getMasterKey().getKeyId()) {masterKeyToUse = currentMasterKey;
}
if ((nodeId != null) && (!identifier.getNodeId().equals(nodeId))) {
throw new InvalidToken(((((("Given NMToken for application : " + appAttemptId.toString()) + " is not valid for current node manager.") + "expected : ") + nodeId.toString()) + " found : ") + identifier.getNodeId().toString());
}
if (masterKeyToUse != null) {
byte[] password = retrivePasswordInternal(identifier, masterKeyToUse);
LOG.debug("NMToken password retrieved successfully!!");
return password;
}
throw new InvalidToken(("Given NMToken for application : " + appAttemptId.toString()) + " seems to have been generated illegally.");
} | 3.26 |
hadoop_NMTokenSecretManagerInNM_generateNMToken_rdh | /**
* Used by the Distributed Scheduler framework to generate NMTokens
*
* @param applicationSubmitter
* @param container
* @return NMToken
*/
public NMToken generateNMToken(String applicationSubmitter, Container container) {
this.readLock.lock();
try {
Token token = createNMToken(container.getId().getApplicationAttemptId(), container.getNodeId(), applicationSubmitter);
return NMToken.newInstance(container.getNodeId(),
token);
} finally {this.readLock.unlock();
}
} | 3.26 |
hadoop_NMTokenSecretManagerInNM_setMasterKey_rdh | /**
* Used by NodeManagers to create a token-secret-manager with the key
* obtained from the RM. This can happen during registration or when the RM
* rolls the master-key and signal the NM.
*/
@Private
public synchronized void setMasterKey(MasterKey masterKey) {
// Update keys only if the key has changed.
if ((super.currentMasterKey == null) || (super.currentMasterKey.getMasterKey().getKeyId() != masterKey.getKeyId())) {
LOG.info("Rolling master-key for container-tokens, got key with id " + masterKey.getKeyId());
if (super.currentMasterKey != null) {
updatePreviousMasterKey(super.currentMasterKey);}
updateCurrentMasterKey(new MasterKeyData(masterKey, createSecretKey(masterKey.getBytes().array())));
}
} | 3.26 |
hadoop_NMTokenSecretManagerInNM_appAttemptStartContainer_rdh | /**
* This will be called by startContainer. It will add the master key into
* the cache used for starting this container. This should be called before
* validating the startContainer request.
*/
public synchronized void appAttemptStartContainer(NMTokenIdentifier identifier) throws InvalidToken {
ApplicationAttemptId appAttemptId = identifier.getApplicationAttemptId();if (!appToAppAttemptMap.containsKey(appAttemptId.getApplicationId())) {
// First application attempt for the given application
appToAppAttemptMap.put(appAttemptId.getApplicationId(), new ArrayList<ApplicationAttemptId>());
}
MasterKeyData oldKey = oldMasterKeys.get(appAttemptId);
if (oldKey == null)
{
// This is a new application attempt.
appToAppAttemptMap.get(appAttemptId.getApplicationId()).add(appAttemptId);
}if ((oldKey == null) || (oldKey.getMasterKey().getKeyId() != identifier.getKeyId())) {
// Update key only if it is modified.
LOG.debug("NMToken key updated for application attempt : {}", identifier.getApplicationAttemptId().toString());
if (identifier.getKeyId() == currentMasterKey.getMasterKey().getKeyId()) {
updateAppAttemptKey(appAttemptId, currentMasterKey);
} else if ((previousMasterKey != null) && (identifier.getKeyId() == previousMasterKey.getMasterKey().getKeyId())) {
updateAppAttemptKey(appAttemptId, previousMasterKey);
} else {
throw new InvalidToken("Older NMToken should not be used while starting the container.");
}
}
} | 3.26 |
hadoop_ProxyUtils_notFound_rdh | /**
* Output 404 with appropriate message.
*
* @param resp
* the http response.
* @param message
* the message to include on the page.
* @throws IOException
* on any error.
*/
public static void notFound(HttpServletResponse resp, String message) throws IOException {
resp.setStatus(HttpServletResponse.SC_NOT_FOUND);
resp.setContentType(MimeType.HTML);
Page p = new Page(resp.getWriter());
p.html().h1(message).__();} | 3.26 |
hadoop_ProxyUtils_sendRedirect_rdh | /**
* Handle redirects with a status code that can in future support verbs other
* than GET, thus supporting full REST functionality.
* <p>
* The target URL is included in the redirect text returned
* <p>
* At the end of this method, the output stream is closed.
*
* @param request
* request (hence: the verb and any other information
* relevant to a redirect)
* @param response
* the response
* @param target
* the target URL -unencoded
*/
public static void sendRedirect(HttpServletRequest request, HttpServletResponse response, String target) throws IOException {
LOG.debug("Redirecting {} {} to {}", request.getMethod(), request.getRequestURI(), target);
String location = response.encodeRedirectURL(target);
response.setStatus(HttpServletResponse.SC_FOUND);
response.setHeader(LOCATION, location);
response.setContentType(MimeType.HTML);
PrintWriter
writer = response.getWriter(); Page p = new Page(writer); p.html().head().title("Moved").__().body().h1("Moved").div().__("Content has moved ").a(location, "here").__().__().__();
writer.close();
} | 3.26 |
hadoop_ProxyUtils_rejectNonHttpRequests_rdh | /**
* Reject any request that isn't from an HTTP servlet
*
* @param req
* request
* @throws ServletException
* if the request is of the wrong type
*/
public static void
rejectNonHttpRequests(ServletRequest req) throws ServletException {if (!(req instanceof HttpServletRequest)) {
throw new ServletException(E_HTTP_HTTPS_ONLY);
}
} | 3.26 |
hadoop_CSQueuePreemptionSettings_isQueueHierarchyPreemptionDisabled_rdh | /**
* The specified queue is cross-queue preemptable if system-wide cross-queue
* preemption is turned on unless any queue in the <em>qPath</em> hierarchy
* has explicitly turned cross-queue preemption off.
* NOTE: Cross-queue preemptability is inherited from a queue's parent.
*
* @param q
* queue to check preemption state
* @param configuration
* capacity scheduler config
* @return true if queue has cross-queue preemption disabled, false otherwise
*/
private boolean isQueueHierarchyPreemptionDisabled(CSQueue q, CapacitySchedulerConfiguration configuration) {
boolean systemWidePreemption = configuration.getBoolean(YarnConfiguration.RM_SCHEDULER_ENABLE_MONITORS, YarnConfiguration.DEFAULT_RM_SCHEDULER_ENABLE_MONITORS);
CSQueue parentQ = q.getParent();
// If the system-wide preemption switch is turned off, all of the queues in
// the qPath hierarchy have preemption disabled, so return true.
if (!systemWidePreemption)
return true;
// If q is the root queue and the system-wide preemption switch is turned
// on, then q does not have preemption disabled (default=false, below)
// unless the preemption_disabled property is explicitly set.
if (parentQ == null) {
return configuration.getPreemptionDisabled(q.getQueuePath(), false);
}
// If this is not the root queue, inherit the default value for the
// preemption_disabled property from the parent. Preemptability will be
// inherited from the parent's hierarchy unless explicitly overridden at
// this level.
return configuration.getPreemptionDisabled(q.getQueuePath(), parentQ.getPreemptionDisabled());
} | 3.26 |
hadoop_CSQueuePreemptionSettings_isIntraQueueHierarchyPreemptionDisabled_rdh | /**
* The specified queue is intra-queue preemptable if
* 1) system-wide intra-queue preemption is turned on
* 2) no queue in the <em>qPath</em> hierarchy has explicitly turned off intra
* queue preemption.
* NOTE: Intra-queue preemptability is inherited from a queue's parent.
*
* @param q
* queue to check intra-queue preemption state
* @param configuration
* capacity scheduler config
* @return true if queue has intra-queue preemption disabled, false otherwise
*/
private boolean isIntraQueueHierarchyPreemptionDisabled(CSQueue q, CapacitySchedulerConfiguration configuration) {
boolean systemWideIntraQueuePreemption = configuration.getBoolean(CapacitySchedulerConfiguration.INTRAQUEUE_PREEMPTION_ENABLED, CapacitySchedulerConfiguration.DEFAULT_INTRAQUEUE_PREEMPTION_ENABLED);
// Intra-queue preemption is disabled for this queue if the system-wide
// intra-queue preemption flag is false
if (!systemWideIntraQueuePreemption)
return true;
// Check if this is the root queue and the root queue's intra-queue
// preemption disable switch is set
CSQueue parentQ = q.getParent();
if
(parentQ == null) {
return configuration.getIntraQueuePreemptionDisabled(q.getQueuePath(), false);
}
// At this point, the master preemption switch is enabled down to this
// queue's level. Determine whether intra-queue preemption is enabled
// down to this queue's level and return that value.
return configuration.getIntraQueuePreemptionDisabled(q.getQueuePath(), parentQ.getIntraQueuePreemptionDisabledInHierarchy());
} | 3.26 |
hadoop_BlockStorageMovementTracker_stopTracking_rdh | /**
* Sets running flag to false.
*/
public void stopTracking() {
running = false;
} | 3.26 |
hadoop_StageConfig_currentManifestSerializer_rdh | /**
* Get a thread local task manifest serializer.
*
* @return a serializer.
*/
public JsonSerialization<TaskManifest> currentManifestSerializer() {
return threadLocalSerializer.get();
} | 3.26 |
hadoop_StageConfig_getProgressable_rdh | /**
* Get optional progress callback.
*
* @return callback or null
*/
public Progressable getProgressable() {
return progressable;
} | 3.26 |
hadoop_StageConfig_withProgressable_rdh | /**
* Optional progress callback.
*
* @param value
* new value
* @return this
*/public StageConfig withProgressable(final Progressable value) {
checkOpen();
progressable = value;
return this;
} | 3.26 |
hadoop_StageConfig_withJobAttemptTaskSubDir_rdh | /**
* Set builder value.
*
* @param value
* new value
* @return the builder
*/
public StageConfig withJobAttemptTaskSubDir(Path value) {jobAttemptTaskSubDir = value;
return this;
} | 3.26 |
hadoop_StageConfig_getName_rdh | /**
* Get name of task/job.
*
* @return name for logging.
*/
public String getName() {
return name;
} | 3.26 |
hadoop_StageConfig_withWriterQueueCapacity_rdh | /**
* Set writer queue capacity.
*
* @param value
* new value
* @return the builder
*/
public StageConfig withWriterQueueCapacity(final int value) {
writerQueueCapacity = value;
return this;
} | 3.26 |
hadoop_StageConfig_withJobId_rdh | /**
* Set job ID with no attempt included.
*
* @param value
* new value
* @return this
*/
public StageConfig withJobId(final String value) {
checkOpen();
jobId = value;
return this;
} | 3.26 |
hadoop_StageConfig_getJobAttemptDir_rdh | /**
* Job attempt dir.
*/
public Path getJobAttemptDir() {
return jobAttemptDir;
} | 3.26 |
hadoop_StageConfig_withJobAttemptNumber_rdh | /**
* Set the job attempt number.
*
* @param value
* new value
* @return this
*/
public StageConfig withJobAttemptNumber(final int value) {
checkOpen();
jobAttemptNumber = value;
return this;
} | 3.26 |
hadoop_StageConfig_getTaskAttemptId_rdh | /**
* ID of this specific attempt at a task.
*/
public String getTaskAttemptId() {
return taskAttemptId;
} | 3.26 |
hadoop_StageConfig_getIOStatistics_rdh | /**
* IOStatistics to update.
*/
public IOStatisticsStore getIOStatistics() {
return iostatistics;
} | 3.26 |
hadoop_StageConfig_withIOStatistics_rdh | /**
* Set IOStatistics store.
*
* @param store
* new store
* @return this
*/
public StageConfig withIOStatistics(final IOStatisticsStore store) {
checkOpen();
iostatistics = store;
return this;
} | 3.26 |
hadoop_StageConfig_getJobAttemptNumber_rdh | /**
* Get the job attempt number.
*
* @return the value
*/
public int getJobAttemptNumber() {
return jobAttemptNumber;
} | 3.26 |
hadoop_StageConfig_getConf_rdh | /**
* Get configuration.
*
* @return the configuration
*/
public Configuration getConf() {
return conf;
} | 3.26 |
hadoop_StageConfig_build_rdh | /**
* The build command makes the config immutable.
* Idempotent.
*
* @return the now-frozen config
*/
public StageConfig build() {frozen = true;
return this;
} | 3.26 |
hadoop_StageConfig_withOperations_rdh | /**
* Set builder value.
*
* @param value
* new value
* @return this
*/
public StageConfig withOperations(final ManifestStoreOperations value) {
checkOpen();
operations = value;
return this;
} | 3.26 |
hadoop_StageConfig_getEnterStageEventHandler_rdh | /**
* Handler for stage entry events.
*
* @return the handler.
*/
public StageEventCallbacks getEnterStageEventHandler() {
return enterStageEventHandler;
} | 3.26 |
hadoop_StageConfig_getTaskId_rdh | /**
* ID of the task.
*/
public String getTaskId() {
return taskId;
} | 3.26 |
hadoop_StageConfig_withOutputTempSubDir_rdh | /**
* Set builder value.
*
* @param value
* new value
* @return this
*/
public StageConfig withOutputTempSubDir(final Path value) {
checkOpen();
outputTempSubDir = value;
return
this;
} | 3.26 |
hadoop_StageConfig_withConfiguration_rdh | /**
* Set configuration.
*
* @param value
* new value
* @return the builder
*/
public StageConfig withConfiguration(Configuration value) {
conf = value;
return this;
} | 3.26 |
hadoop_StageConfig_withDestinationDir_rdh | /**
* Set job destination dir.
*
* @param dir
* new dir
* @return this
*/
public StageConfig withDestinationDir(final Path dir) {
destinationDir = dir;
return this;
} | 3.26 |
hadoop_StageConfig_withJobIdSource_rdh | /**
* Set the Job ID source.
*
* @param value
* new value
* @return this
*/
public StageConfig withJobIdSource(final String value) {
checkOpen();f0 = value;
return this;
} | 3.26 |
hadoop_StageConfig_getOperations_rdh | /**
* Callbacks to update store.
* This is not made visible to the stages; they must
* go through the wrapper classes in this class, which
* add statistics and logging.
*/
public ManifestStoreOperations getOperations() {
return operations;
} | 3.26 |
hadoop_StageConfig_getJobId_rdh | /**
* Job ID.
*/
public String getJobId() {
return jobId;
} | 3.26 |
hadoop_StageConfig_getIoProcessors_rdh | /**
* Submitter for doing IO against the store other than
* manifest processing.
*/
public Submitter getIoProcessors() {return ioProcessors;
} | 3.26 |
hadoop_StageConfig_getWriterQueueCapacity_rdh | /**
* Get writer queue capacity.
*
* @return the queue capacity
*/
public int getWriterQueueCapacity() {
return writerQueueCapacity;
} | 3.26 |
hadoop_StageConfig_withTaskId_rdh | /**
* Set builder value.
*
* @param value
* new value
* @return this
*/
public StageConfig withTaskId(final String value) {
checkOpen();
taskId = value;
return this;
} | 3.26 |
hadoop_StageConfig_withStageEventCallbacks_rdh | /**
* Set handler for stage entry events..
*
* @param value
* new value
* @return this
*/
public StageConfig withStageEventCallbacks(StageEventCallbacks value) {
checkOpen();
enterStageEventHandler = value;
return this; } | 3.26 |
hadoop_StageConfig_withDeleteTargetPaths_rdh | /**
* Set builder value.
*
* @param value
* new value
* @return the builder
*/
public StageConfig withDeleteTargetPaths(boolean value) {
checkOpen(); deleteTargetPaths = value;
return this;
} | 3.26 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.