name
stringlengths 12
178
| code_snippet
stringlengths 8
36.5k
| score
float64 3.26
3.68
|
---|---|---|
hadoop_PerGpuTemperature_getSlowThresholdGpuTemp_rdh | /**
* Get celsius GPU temperature which could make GPU runs slower
*
* @return temperature
*/
@XmlJavaTypeAdapter(StrToFloatBeforeSpaceAdapter.class)
@XmlElement(name = "gpu_temp_slow_threshold")
public Float getSlowThresholdGpuTemp() {
return slowThresholdGpuTemp;
} | 3.26 |
hadoop_PerGpuTemperature_getCurrentGpuTemp_rdh | /**
* Get current celsius GPU temperature
*
* @return temperature
*/
@XmlJavaTypeAdapter(StrToFloatBeforeSpaceAdapter.class)
@XmlElement(name = "gpu_temp")
public Float getCurrentGpuTemp() {
return currentGpuTemp;
} | 3.26 |
hadoop_DynamicIOStatisticsBuilder_withLongFunctionGauge_rdh | /**
* Add a new evaluator to the gauge statistics.
*
* @param key
* key of this statistic
* @param eval
* evaluator for the statistic
* @return the builder.
*/
public DynamicIOStatisticsBuilder withLongFunctionGauge(String key, ToLongFunction<String> eval)
{
activeInstance().addGaugeFunction(key, eval::applyAsLong);
return this;
} | 3.26 |
hadoop_DynamicIOStatisticsBuilder_withMeanStatisticFunction_rdh | /**
* Add a new evaluator to the mean statistics.
*
* This is a function which must return the mean and the sample count.
*
* @param key
* key of this statistic
* @param eval
* evaluator for the statistic
* @return the builder.
*/
public DynamicIOStatisticsBuilder withMeanStatisticFunction(String key, Function<String, MeanStatistic> eval) {
activeInstance().addMeanStatisticFunction(key, eval);
return this;
} | 3.26 |
hadoop_DynamicIOStatisticsBuilder_withAtomicLongMinimum_rdh | /**
* Add a minimum statistic to dynamically return the
* latest value of the source.
*
* @param key
* key of this statistic
* @param source
* atomic long minimum
* @return the builder.
*/
public DynamicIOStatisticsBuilder withAtomicLongMinimum(String key, AtomicLong source) {
withLongFunctionMinimum(key, s -> source.get());
return this;
} | 3.26 |
hadoop_DynamicIOStatisticsBuilder_build_rdh | /**
* Build the IOStatistics instance.
*
* @return an instance.
* @throws IllegalStateException
* if the builder has already been built.
*/
public IOStatistics build() {
final DynamicIOStatistics stats = activeInstance();
// stop the builder from working any more.
instance = null;
return stats;
} | 3.26 |
hadoop_DynamicIOStatisticsBuilder_withAtomicIntegerCounter_rdh | /**
* Add a counter statistic to dynamically return the
* latest value of the source.
*
* @param key
* key of this statistic
* @param source
* atomic int counter
* @return the builder.
*/
public DynamicIOStatisticsBuilder withAtomicIntegerCounter(String key, AtomicInteger source) {
withLongFunctionCounter(key, s -> source.get());
return
this;
} | 3.26 |
hadoop_DynamicIOStatisticsBuilder_withAtomicLongMaximum_rdh | /**
* Add a maximum statistic to dynamically return the
* latest value of the source.
*
* @param key
* key of this statistic
* @param source
* atomic long maximum
* @return the builder.
*/
public DynamicIOStatisticsBuilder withAtomicLongMaximum(String key, AtomicLong source) {
withLongFunctionMaximum(key, s -> source.get());
return this;
} | 3.26 |
hadoop_DynamicIOStatisticsBuilder_withLongFunctionCounter_rdh | /**
* Add a new evaluator to the counter statistics.
*
* @param key
* key of this statistic
* @param eval
* evaluator for the statistic
* @return the builder.
*/
public DynamicIOStatisticsBuilder withLongFunctionCounter(String key, ToLongFunction<String> eval) {
activeInstance().addCounterFunction(key, eval::applyAsLong);
return this;
} | 3.26 |
hadoop_DynamicIOStatisticsBuilder_withAtomicIntegerMinimum_rdh | /**
* Add a minimum statistic to dynamically return the
* latest value of the source.
*
* @param key
* key of this statistic
* @param source
* atomic int minimum
* @return the builder.
*/
public DynamicIOStatisticsBuilder withAtomicIntegerMinimum(String key, AtomicInteger source) {
withLongFunctionMinimum(key, s -> source.get());
return this;
} | 3.26 |
hadoop_DynamicIOStatisticsBuilder_withMutableCounter_rdh | /**
* Build a dynamic counter statistic from a
* {@link MutableCounterLong}.
*
* @param key
* key of this statistic
* @param source
* mutable long counter
* @return the builder.
*/
public DynamicIOStatisticsBuilder withMutableCounter(String key, MutableCounterLong source) {
withLongFunctionCounter(key, s -> source.value());
return this;
} | 3.26 |
hadoop_DynamicIOStatisticsBuilder_withLongFunctionMinimum_rdh | /**
* Add a new evaluator to the minimum statistics.
*
* @param key
* key of this statistic
* @param eval
* evaluator for the statistic
* @return the builder.
*/
public DynamicIOStatisticsBuilder withLongFunctionMinimum(String key, ToLongFunction<String> eval) {
activeInstance().addMinimumFunction(key, eval::applyAsLong);
return this;
} | 3.26 |
hadoop_DynamicIOStatisticsBuilder_withLongFunctionMaximum_rdh | /**
* Add a new evaluator to the maximum statistics.
*
* @param key
* key of this statistic
* @param eval
* evaluator for the statistic
* @return the builder.
*/public DynamicIOStatisticsBuilder withLongFunctionMaximum(String key,
ToLongFunction<String> eval) {
activeInstance().addMaximumFunction(key, eval::applyAsLong);
return this;
} | 3.26 |
hadoop_DynamicIOStatisticsBuilder_withAtomicLongGauge_rdh | /**
* Add a gauge statistic to dynamically return the
* latest value of the source.
*
* @param key
* key of this statistic
* @param source
* atomic long gauge
* @return the builder.
*/
public DynamicIOStatisticsBuilder withAtomicLongGauge(String key, AtomicLong source) {
withLongFunctionGauge(key, s -> source.get());
return this;
} | 3.26 |
hadoop_DynamicIOStatisticsBuilder_withAtomicIntegerMaximum_rdh | /**
* Add a maximum statistic to dynamically return the
* latest value of the source.
*
* @param key
* key of this statistic
* @param source
* atomic int maximum
* @return the builder.
*/
public DynamicIOStatisticsBuilder withAtomicIntegerMaximum(String key, AtomicInteger source) {
withLongFunctionMaximum(key, s -> source.get());
return this;
} | 3.26 |
hadoop_DynamicIOStatisticsBuilder_withAtomicLongCounter_rdh | /**
* Add a counter statistic to dynamically return the
* latest value of the source.
*
* @param key
* key of this statistic
* @param source
* atomic long counter
* @return the builder.
*/
public DynamicIOStatisticsBuilder withAtomicLongCounter(String key, AtomicLong source) {
withLongFunctionCounter(key, s -> source.get());
return this;
} | 3.26 |
hadoop_DynamicIOStatisticsBuilder_activeInstance_rdh | /**
* Get the statistics instance.
*
* @return the instance to build/return
* @throws IllegalStateException
* if the builder has already been built.
*/
private DynamicIOStatistics activeInstance() {
checkState(instance != null,
"Already built");
return instance;
} | 3.26 |
hadoop_AllocateResponse_setUpdateErrors_rdh | /**
* Set the list of container update errors to inform the
* Application Master about the container updates that could not be
* satisfied due to error.
*
* @param updateErrors
* list of <code>UpdateContainerError</code> for
* containers updates requests that were in error
*/
@Public
@Unstablepublic void setUpdateErrors(List<UpdateContainerError> updateErrors) {
} | 3.26 |
hadoop_AllocateResponse_containersFromPreviousAttempt_rdh | /**
* Set the <code>containersFromPreviousAttempt</code> of the response.
*
* @see AllocateResponse#setContainersFromPreviousAttempts(List)
* @param containersFromPreviousAttempt
* <code>containersFromPreviousAttempt</code> of the response
* @return {@link AllocateResponseBuilder}
*/
@Private
@Unstable
public AllocateResponseBuilder containersFromPreviousAttempt(List<Container> containersFromPreviousAttempt) {
allocateResponse.setContainersFromPreviousAttempts(containersFromPreviousAttempt);
return this;
} | 3.26 |
hadoop_AllocateResponse_amRmToken_rdh | /**
* Set the <code>amRmToken</code> of the response.
*
* @see AllocateResponse#setAMRMToken(Token)
* @param amRmToken
* <code>amRmToken</code> of the response
* @return {@link AllocateResponseBuilder}
*/
@Private
@Unstable
public AllocateResponseBuilder amRmToken(Token amRmToken) {
allocateResponse.setAMRMToken(amRmToken);
return this;
} | 3.26 |
hadoop_AllocateResponse_getUpdateErrors_rdh | /**
* Get the list of container update errors to inform the
* Application Master about the container updates that could not be
* satisfied due to error.
*
* @return List of Update Container Errors.
*/
@Public
@Unstable
public List<UpdateContainerError> getUpdateErrors() {
return new ArrayList<>();
} | 3.26 |
hadoop_AllocateResponse_numClusterNodes_rdh | /**
* Set the <code>numClusterNodes</code> of the response.
*
* @see AllocateResponse#setNumClusterNodes(int)
* @param numClusterNodes
* <code>numClusterNodes</code> of the response
* @return {@link AllocateResponseBuilder}
*/
@Private
@Unstable
public AllocateResponseBuilder numClusterNodes(int numClusterNodes) {
allocateResponse.setNumClusterNodes(numClusterNodes);return this;} | 3.26 |
hadoop_AllocateResponse_updatedContainers_rdh | /**
* Set the <code>updatedContainers</code> of the response.
*
* @see AllocateResponse#setUpdatedContainers(List)
* @param updatedContainers
* <code>updatedContainers</code> of the response
* @return {@link AllocateResponseBuilder}
*/
@Private
@Unstable
public AllocateResponseBuilder updatedContainers(List<UpdatedContainer> updatedContainers) {
allocateResponse.setUpdatedContainers(updatedContainers);return this;
} | 3.26 |
hadoop_AllocateResponse_updatedNodes_rdh | /**
* Set the <code>updatedNodes</code> of the response.
*
* @see AllocateResponse#setUpdatedNodes(List)
* @param updatedNodes
* <code>updatedNodes</code> of the response
* @return {@link AllocateResponseBuilder}
*/
@Private
@Unstable
public AllocateResponseBuilder updatedNodes(List<NodeReport> updatedNodes) {
allocateResponse.setUpdatedNodes(updatedNodes);
return this;
} | 3.26 |
hadoop_AllocateResponse_availableResources_rdh | /**
* Set the <code>availableResources</code> of the response.
*
* @see AllocateResponse#setAvailableResources(Resource)
* @param availableResources
* <code>availableResources</code> of the response
* @return {@link AllocateResponseBuilder}
*/
@Private
@Unstable
public AllocateResponseBuilder availableResources(Resource availableResources) {
allocateResponse.setAvailableResources(availableResources);
return this;
} | 3.26 |
hadoop_AllocateResponse_completedContainersStatuses_rdh | /**
* Set the <code>completedContainersStatuses</code> of the response.
*
* @see AllocateResponse#setCompletedContainersStatuses(List)
* @param completedContainersStatuses
* <code>completedContainersStatuses</code> of the response
* @return {@link AllocateResponseBuilder}
*/
@Private
@Unstable
public AllocateResponseBuilder completedContainersStatuses(List<ContainerStatus> completedContainersStatuses) {
allocateResponse.setCompletedContainersStatuses(completedContainersStatuses);
return this;} | 3.26 |
hadoop_AllocateResponse_responseId_rdh | /**
* Set the <code>responseId</code> of the response.
*
* @see AllocateResponse#setResponseId(int)
* @param responseId
* <code>responseId</code> of the response
* @return {@link AllocateResponseBuilder}
*/
@Private
@Unstable
public AllocateResponseBuilder responseId(int responseId) {
allocateResponse.setResponseId(responseId);
return this;
} | 3.26 |
hadoop_AllocateResponse_allocatedContainers_rdh | /**
* Set the <code>allocatedContainers</code> of the response.
*
* @see AllocateResponse#setAllocatedContainers(List)
* @param allocatedContainers
* <code>allocatedContainers</code> of the response
* @return {@link AllocateResponseBuilder}
*/
@Private
@Unstable
public AllocateResponseBuilder allocatedContainers(List<Container> allocatedContainers) {
allocateResponse.setAllocatedContainers(allocatedContainers);
return this;
} | 3.26 |
hadoop_AllocateResponse_collectorInfo_rdh | /**
* Set the <code>collectorInfo</code> of the response.
*
* @see AllocateResponse#setCollectorInfo(CollectorInfo)
* @param collectorInfo
* <code>collectorInfo</code> of the response which
* contains collector address, RM id, version and collector token.
* @return {@link AllocateResponseBuilder}
*/@Private
@Unstable
public AllocateResponseBuilder collectorInfo(CollectorInfo collectorInfo) {
allocateResponse.setCollectorInfo(collectorInfo);
return this;
} | 3.26 |
hadoop_AllocateResponse_preemptionMessage_rdh | /**
* Set the <code>preemptionMessage</code> of the response.
*
* @see AllocateResponse#setPreemptionMessage(PreemptionMessage)
* @param preemptionMessage
* <code>preemptionMessage</code> of the response
* @return {@link AllocateResponseBuilder}
*/@Private
@Unstable
public AllocateResponseBuilder preemptionMessage(PreemptionMessage preemptionMessage) {
allocateResponse.setPreemptionMessage(preemptionMessage);
return this;
} | 3.26 |
hadoop_AllocateResponse_updateErrors_rdh | /**
* Set the <code>updateErrors</code> of the response.
*
* @see AllocateResponse#setUpdateErrors(List)
* @param updateErrors
* <code>updateErrors</code> of the response
* @return {@link AllocateResponseBuilder}
*/
@Private
@Unstable
public AllocateResponseBuilder updateErrors(List<UpdateContainerError> updateErrors) {
allocateResponse.setUpdateErrors(updateErrors);
return this;
} | 3.26 |
hadoop_AllocateResponse_nmTokens_rdh | /**
* Set the <code>nmTokens</code> of the response.
*
* @see AllocateResponse#setNMTokens(List)
* @param nmTokens
* <code>nmTokens</code> of the response
* @return {@link AllocateResponseBuilder}
*/
@Private
@Unstable
public AllocateResponseBuilder nmTokens(List<NMToken> nmTokens) {
allocateResponse.setNMTokens(nmTokens);
return this;
} | 3.26 |
hadoop_AllocateResponse_applicationPriority_rdh | /**
* Set the <code>applicationPriority</code> of the response.
*
* @see AllocateResponse#setApplicationPriority(Priority)
* @param applicationPriority
* <code>applicationPriority</code> of the response
* @return {@link AllocateResponseBuilder}
*/@Private
@Unstable
public AllocateResponseBuilder applicationPriority(Priority applicationPriority) {
allocateResponse.setApplicationPriority(applicationPriority);
return this;
} | 3.26 |
hadoop_AllocateResponse_amCommand_rdh | /**
* Set the <code>amCommand</code> of the response.
*
* @see AllocateResponse#setAMCommand(AMCommand)
* @param amCommand
* <code>amCommand</code> of the response
* @return {@link AllocateResponseBuilder}
*/
@Private
@Unstable
public AllocateResponseBuilder amCommand(AMCommand amCommand) {
allocateResponse.setAMCommand(amCommand);
return this;
} | 3.26 |
hadoop_AllocateResponse_setRejectedSchedulingRequests_rdh | /**
* Add a list of rejected SchedulingRequests to the AllocateResponse.
*
* @param rejectedRequests
* List of Rejected Scheduling Requests.
*/
@Private
@Unstable
public void
setRejectedSchedulingRequests(List<RejectedSchedulingRequest> rejectedRequests) {
} | 3.26 |
hadoop_Matcher_match_rdh | /**
* Match any datanode with any other datanode.
*/public static final Matcher ANY_OTHER = new Matcher() {
@Override
public boolean match(NetworkTopology cluster, Node left, Node right) {
return left != right;
} | 3.26 |
hadoop_AMRMProxyMetrics_getMetrics_rdh | /**
* Initialize the singleton instance.
*
* @return the singleton
*/
public static AMRMProxyMetrics getMetrics() {
synchronized(AMRMProxyMetrics.class) {if (instance
== null) {
instance = DefaultMetricsSystem.instance().register("AMRMProxyMetrics", "Metrics for the Yarn AMRMProxy", new AMRMProxyMetrics());
}
}
return instance;
} | 3.26 |
hadoop_NativeTaskOutputFiles_getOutputIndexFileForWrite_rdh | /**
* Create a local map output index file name.
*
* @param size
* the size of the file
*/
public Path getOutputIndexFileForWrite(long size) throws IOException {
String path = String.format(OUTPUT_FILE_INDEX_FORMAT_STRING, TASKTRACKER_OUTPUT, id);
return lDirAlloc.getLocalPathForWrite(path, size, conf);
} | 3.26 |
hadoop_NativeTaskOutputFiles_getInputFile_rdh | /**
* Return a local reduce input file created earlier
*
* @param mapId
* a map task id
*/
public Path getInputFile(int mapId) throws IOException {
return lDirAlloc.getLocalPathToRead(String.format(REDUCE_INPUT_FILE_FORMAT_STRING, TASKTRACKER_OUTPUT, Integer.valueOf(mapId)), conf);
} | 3.26 |
hadoop_NativeTaskOutputFiles_m0_rdh | /**
* Return the path to local map output file created earlier
*/
public Path m0() throws IOException {
String path = String.format(OUTPUT_FILE_FORMAT_STRING, TASKTRACKER_OUTPUT, id);
return lDirAlloc.getLocalPathToRead(path, conf);
} | 3.26 |
hadoop_NativeTaskOutputFiles_getSpillIndexFile_rdh | /**
* Return a local map spill index file created earlier
*
* @param spillNumber
* the number
*/
public Path getSpillIndexFile(int spillNumber) throws IOException {
String path =
String.format(SPILL_INDEX_FILE_FORMAT_STRING, id, TASKTRACKER_OUTPUT, spillNumber);
return lDirAlloc.getLocalPathToRead(path, conf);
} | 3.26 |
hadoop_NativeTaskOutputFiles_getOutputIndexFile_rdh | /**
* Return the path to a local map output index file created earlier
*/
public Path getOutputIndexFile() throws IOException {
String path = String.format(OUTPUT_FILE_INDEX_FORMAT_STRING, TASKTRACKER_OUTPUT, id);
return lDirAlloc.getLocalPathToRead(path, conf);
} | 3.26 |
hadoop_NativeTaskOutputFiles_m1_rdh | /**
* Create a local reduce input file name.
*
* @param mapId
* a map task id
* @param size
* the size of the file
*/
public Path m1(TaskID mapId, long size,
Configuration conf) throws IOException {
return lDirAlloc.getLocalPathForWrite(String.format(REDUCE_INPUT_FILE_FORMAT_STRING, TASKTRACKER_OUTPUT, mapId.getId()), size, conf);
} | 3.26 |
hadoop_NativeTaskOutputFiles_getSpillFileForWrite_rdh | /**
* Create a local map spill file name.
*
* @param spillNumber
* the number
* @param size
* the size of the file
*/
public Path getSpillFileForWrite(int spillNumber, long size) throws IOException {
String path = String.format(SPILL_FILE_FORMAT_STRING, id, TASKTRACKER_OUTPUT, spillNumber);
return lDirAlloc.getLocalPathForWrite(path, size, conf);
} | 3.26 |
hadoop_NativeTaskOutputFiles_getOutputFileForWrite_rdh | /**
* Create a local map output file name.
*
* @param size
* the size of the file
*/
public Path getOutputFileForWrite(long size) throws IOException {
String path = String.format(OUTPUT_FILE_FORMAT_STRING, TASKTRACKER_OUTPUT, id);
return lDirAlloc.getLocalPathForWrite(path, size, conf);
} | 3.26 |
hadoop_NativeTaskOutputFiles_getSpillFile_rdh | /**
* Return a local map spill file created earlier.
*
* @param spillNumber
* the number
*/
public Path getSpillFile(int spillNumber) throws IOException {
String path = String.format(SPILL_FILE_FORMAT_STRING, id, TASKTRACKER_OUTPUT, spillNumber);
return lDirAlloc.getLocalPathToRead(path, conf);
} | 3.26 |
hadoop_NativeTaskOutputFiles_getSpillIndexFileForWrite_rdh | /**
* Create a local map spill index file name.
*
* @param spillNumber
* the number
* @param size
* the size of the file
*/
public Path getSpillIndexFileForWrite(int spillNumber, long size) throws IOException {String path = String.format(SPILL_INDEX_FILE_FORMAT_STRING, id, TASKTRACKER_OUTPUT, spillNumber);
return lDirAlloc.getLocalPathForWrite(path, size, conf);
} | 3.26 |
hadoop_NativeTaskOutputFiles_removeAll_rdh | /**
* Removes all of the files related to a task.
*/
public void removeAll() throws IOException {
conf.deleteLocalFiles(TASKTRACKER_OUTPUT);
} | 3.26 |
hadoop_MonotonicClock_getTime_rdh | /**
* Get current time from some arbitrary time base in the past, counting in
* milliseconds, and not affected by settimeofday or similar system clock
* changes.
*
* @return a monotonic clock that counts in milliseconds.
*/
public long getTime() {
return Time.monotonicNow();
} | 3.26 |
hadoop_EntityTypeReader_getNextRowKey_rdh | /**
* Gets the possibly next row key prefix given current prefix and type.
*
* @param currRowKeyPrefix
* The current prefix that contains user, cluster,
* flow, run, and application id.
* @param entityType
* Current entity type.
* @return A new prefix for the possibly immediately next row key.
*/
private static byte[] getNextRowKey(byte[] currRowKeyPrefix, String entityType) {
if ((currRowKeyPrefix == null) || (entityType == null)) {
return null;
}
byte[] entityTypeEncoded = Separator.QUALIFIERS.join(Separator.encode(entityType, Separator.SPACE, Separator.TAB, Separator.QUALIFIERS), Separator.EMPTY_BYTES);
byte[] currRowKey = new byte[currRowKeyPrefix.length + entityTypeEncoded.length];
System.arraycopy(currRowKeyPrefix, 0, currRowKey, 0, currRowKeyPrefix.length);
System.arraycopy(entityTypeEncoded, 0, currRowKey, currRowKeyPrefix.length, entityTypeEncoded.length);
return HBaseTimelineStorageUtils.calculateTheClosestNextRowKeyForPrefix(currRowKey);
} | 3.26 |
hadoop_LogParserUtil_stringToUnixTimestamp_rdh | /**
* Converts String date to unix timestamp. Note that we assume the time in the
* logs has the same time zone with the machine which runs the
* {@link RmSingleLineParser}.
*
* @param date
* The String date.
* @return Unix time stamp.
* @throws ParseException
* if data conversion from String to unix timestamp
* fails.
*/
public long stringToUnixTimestamp(final String date) throws ParseException {
return format.parse(date).getTime();
} | 3.26 |
hadoop_LogParserUtil_setLogParser_rdh | /**
* Set the {@link LogParser} to use.
*
* @param logParser
* the {@link LogParser} to use.
*/
public void setLogParser(final LogParser logParser) {this.logParser = logParser;} | 3.26 |
hadoop_LogParserUtil_setDateFormat_rdh | /**
* Set date format for the {@link LogParser}.
*
* @param datePattern
* the date pattern in the log.
*/
public void setDateFormat(final String datePattern) {
this.format = new SimpleDateFormat(datePattern);} | 3.26 |
hadoop_LogParserUtil_parseLog_rdh | /**
* Parse the log file/directory.
*
* @param logFile
* the file/directory of the log.
* @throws SkylineStoreException
* if fails to addHistory to
* {@link SkylineStore}.
* @throws IOException
* if fails to parse the log.
* @throws ResourceEstimatorException
* if the {@link LogParser}
* is not initialized.
*/
public final void parseLog(final String logFile) throws SkylineStoreException, IOException, ResourceEstimatorException {
if (logParser == null) {
throw new ResourceEstimatorException("The log parser is not initialized," + " please try again after initializing.");
}
InputStream inputStream = null;
try {
inputStream = new FileInputStream(logFile);
logParser.parseStream(inputStream);
} finally {
if (inputStream != null) { inputStream.close();
}
}
} | 3.26 |
hadoop_AuditingIntegration_exitStage_rdh | /**
* Remove stage from common audit context.
*/
public static void exitStage() {
currentAuditContext().remove(CONTEXT_ATTR_STAGE);
} | 3.26 |
hadoop_AuditingIntegration_enterStage_rdh | /**
* Callback on stage entry.
* Sets the activeStage and updates the
* common context.
*
* @param stage
* new stage
*/
public static void enterStage(String stage) {
currentAuditContext().put(CONTEXT_ATTR_STAGE, stage);
} | 3.26 |
hadoop_AuditingIntegration_updateCommonContextOnCommitterEntry_rdh | /**
* Add jobID to current context; also
* task attempt ID if set.
*/
public static void updateCommonContextOnCommitterEntry(ManifestCommitterConfig committerConfig) {
CommonAuditContext context = currentAuditContext();
context.put(PARAM_JOB_ID, committerConfig.getJobUniqueId());
// maybe the task attempt ID.
if (!committerConfig.getTaskAttemptId().isEmpty()) {
context.put(CONTEXT_ATTR_TASK_ATTEMPT_ID, committerConfig.getTaskAttemptId());
}
} | 3.26 |
hadoop_AuditingIntegration_updateCommonContextOnCommitterExit_rdh | /**
* Remove commit info at the end of the task or job.
*/
public static void updateCommonContextOnCommitterExit() {
currentAuditContext().remove(PARAM_JOB_ID);
currentAuditContext().remove(CONTEXT_ATTR_TASK_ATTEMPT_ID);
} | 3.26 |
hadoop_AuditReplayCommand_isPoison_rdh | /**
* If true, the thread which consumes this item should not process any further
* items and instead simply terminate itself.
*/
boolean isPoison() {
return false;
} | 3.26 |
hadoop_QueueStateManager_canDelete_rdh | /**
* Whether this queue can be deleted.
*
* @param queueName
* the queue name
* @return true if the queue can be deleted
*/
@SuppressWarnings("unchecked")
public boolean canDelete(String queueName) {
SchedulerQueue<T> queue = queueManager.getQueue(queueName);
if (queue == null) {
LOG.info(("The specified queue:" + queueName) + " does not exist!");
return false;
}
if (queue.getState() == QueueState.STOPPED) {
return true;
}
LOG.info(("Need to stop the specific queue:" + queueName) + " first.");
return false;
} | 3.26 |
hadoop_QueueStateManager_stopQueue_rdh | /**
* Stop the queue.
*
* @param queueName
* the queue name
* @throws YarnException
* if the queue does not exist
*/
@SuppressWarnings("unchecked")
public synchronized void stopQueue(String queueName) throws YarnException {
SchedulerQueue<T> queue = queueManager.getQueue(queueName);
if (queue == null) {
throw new YarnException(("The specified queue:" + queueName) + " does not exist!"); }
queue.stopQueue();
} | 3.26 |
hadoop_QueueStateManager_activateQueue_rdh | /**
* Active the queue.
*
* @param queueName
* the queue name
* @throws YarnException
* if the queue does not exist
* or the queue can not be activated.
*/
@SuppressWarnings("unchecked")
public synchronized void activateQueue(String queueName) throws YarnException {
SchedulerQueue<T> queue = queueManager.getQueue(queueName);
if (queue == null) {
throw new YarnException(("The specified queue:" + queueName) + " does not exist!");
}
queue.activateQueue();
} | 3.26 |
hadoop_AzureNativeFileSystemStore_isAtomicRenameKey_rdh | /**
* Checks if the given key in Azure storage should have synchronized
* atomic folder rename createNonRecursive implemented.
*/
@Override
public boolean isAtomicRenameKey(String key) {
return isKeyForDirectorySet(key, atomicRenameDirs);
} | 3.26 |
hadoop_AzureNativeFileSystemStore_getInstrumentedContext_rdh | /**
* Creates a new OperationContext for the Azure Storage operation that has
* listeners hooked to it that will update the metrics for this file system.
*
* @param bindConcurrentOOBIo
* - bind to intercept send request call backs to handle OOB I/O.
* @return The OperationContext object to use.
*/
private OperationContext getInstrumentedContext(boolean bindConcurrentOOBIo) {
OperationContext v81 = new OperationContext();
// Set User-Agent
v81.getSendingRequestEventHandler().addListener(new StorageEvent<SendingRequestEvent>() {
@Override
public void eventOccurred(SendingRequestEvent eventArg) {
HttpURLConnection connection = ((HttpURLConnection) (eventArg.getConnectionObject()));
String userAgentInfo = String.format(Utility.LOCALE_US, "WASB/%s (%s) %s", VersionInfo.getVersion(), userAgentId, BaseRequest.getUserAgent());
connection.setRequestProperty(HeaderConstants.USER_AGENT, userAgentInfo);
}
});
if (selfThrottlingEnabled) {
SelfThrottlingIntercept.hook(v81, f5, selfThrottlingWriteFactor);
} else if (autoThrottlingEnabled) {
ClientThrottlingIntercept.hook(v81);
}
if (bandwidthGaugeUpdater
!= null) {
// bandwidthGaugeUpdater is null when we config to skip azure metrics
ResponseReceivedMetricUpdater.hook(v81, instrumentation, bandwidthGaugeUpdater);
}
// Bind operation context to receive send request callbacks on this operation.
// If reads concurrent to OOB writes are allowed, the interception will reset
// the conditional header on all Azure blob storage read requests.
if (bindConcurrentOOBIo) {
SendRequestIntercept.bind(v81);}
if (testHookOperationContext != null) {
v81 = testHookOperationContext.modifyOperationContext(v81);}
ErrorMetricUpdater.hook(v81, instrumentation);
// Return the operation context.
return v81;
} | 3.26 |
hadoop_AzureNativeFileSystemStore_suppressRetryPolicyInClientIfNeeded_rdh | /**
* If we're asked by unit tests to not retry, set the retry policy factory in
* the client accordingly.
*/
private void suppressRetryPolicyInClientIfNeeded() {
if (suppressRetryPolicy) {
storageInteractionLayer.setRetryPolicyFactory(new RetryNoRetry());
}
} | 3.26 |
hadoop_AzureNativeFileSystemStore_checkContainer_rdh | /**
* This should be called from any method that does any modifications to the
* underlying container: it makes sure to put the WASB current version in the
* container's metadata if it's not already there.
*/
private ContainerState checkContainer(ContainerAccessType accessType) throws StorageException, AzureException {
synchronized(containerStateLock) {if (isOkContainerState(accessType)) {
return currentKnownContainerState;
}
if (currentKnownContainerState == ContainerState.ExistsAtWrongVersion) {
String containerVersion = retrieveVersionAttribute(f0);
throw wrongVersionException(containerVersion);
}
// This means I didn't check it before or it didn't exist or
// we need to stamp the version. Since things may have changed by
// other machines since then, do the check again and don't depend
// on past information.
// Sanity check: we don't expect this at this point.
if (currentKnownContainerState == ContainerState.ExistsAtRightVersion) {
throw new AssertionError("Unexpected state: " + currentKnownContainerState);
}
// Download the attributes - doubles as an existence check with just
// one service call
try {
f0.downloadAttributes(getInstrumentedContext());
currentKnownContainerState = ContainerState.Unknown;
} catch (StorageException ex) {
if (StorageErrorCodeStrings.CONTAINER_NOT_FOUND.toString().equals(ex.getErrorCode())) {
currentKnownContainerState = ContainerState.DoesntExist;
} else {
throw ex;
}
}
if (currentKnownContainerState == ContainerState.DoesntExist) {
// If the container doesn't exist and we intend to write to it,
// create it now.
if (needToCreateContainer(accessType)) {
storeVersionAttribute(f0);
f0.create(getInstrumentedContext());
currentKnownContainerState = ContainerState.ExistsAtRightVersion;
}
} else {
// The container exists, check the version.
String containerVersion = retrieveVersionAttribute(f0);
if (containerVersion != null) {
if (containerVersion.equals(FIRST_WASB_VERSION)) {
// It's the version from when WASB was called ASV, just
// fix the version attribute if needed and proceed.
// We should be good otherwise.
if (needToStampVersion(accessType)) {
storeVersionAttribute(f0);
f0.uploadMetadata(getInstrumentedContext());
}
} else if (!containerVersion.equals(CURRENT_WASB_VERSION)) {
// Don't know this version - throw.
currentKnownContainerState = ContainerState.ExistsAtWrongVersion;
throw wrongVersionException(containerVersion);
} else {
// It's our correct version.
currentKnownContainerState = ContainerState.ExistsAtRightVersion;
}
} else {
// No version info exists.
currentKnownContainerState = ContainerState.ExistsNoVersion;
if (needToStampVersion(accessType)) {
// Need to stamp the version
storeVersionAttribute(f0);
f0.uploadMetadata(getInstrumentedContext());
currentKnownContainerState = ContainerState.ExistsAtRightVersion;
}
}
}
return currentKnownContainerState;
}
} | 3.26 |
hadoop_AzureNativeFileSystemStore_delete_rdh | /**
* API implementation to delete a blob in the back end azure storage.
*/
@Override
public boolean
delete(String key) throws IOException {
try {
return delete(key, null);
} catch (IOException e) {
Throwable t = e.getCause();
if (t instanceof StorageException) {
StorageException se = ((StorageException) (t));
if ("LeaseIdMissing".equals(se.getErrorCode())) {
SelfRenewingLease lease = null;
try {
lease = acquireLease(key);
return delete(key, lease);
} catch (AzureException
e3) {
LOG.warn((("Got unexpected exception trying to acquire lease on " + key) + ".") + e3.getMessage());
throw e3;
} finally {
try {
if (lease != null) {
lease.free();
}
} catch (Exception e4) {
LOG.error("Unable to free lease on " + key, e4);}
}
} else {
throw e;
}
} else {
throw e;
}
}
} | 3.26 |
hadoop_AzureNativeFileSystemStore_connectUsingAnonymousCredentials_rdh | /**
* Connect to Azure storage using anonymous credentials.
*
* @param uri
* - URI to target blob (R/O access to public blob)
* @throws StorageException
* raised on errors communicating with Azure storage.
* @throws IOException
* raised on errors performing I/O or setting up the session.
* @throws URISyntaxException
* raised on creating mal-formed URI's.
*/private void connectUsingAnonymousCredentials(final URI uri) throws StorageException, IOException, URISyntaxException {
// Use an HTTP scheme since the URI specifies a publicly accessible
// container. Explicitly create a storage URI corresponding to the URI
// parameter for use in creating the service client.
String accountName = getAccountFromAuthority(uri);
URI storageUri = new URI((((getHTTPScheme() + ":") + PATH_DELIMITER) + PATH_DELIMITER) + accountName);
// Create the service client with anonymous credentials.
String containerName = getContainerFromAuthority(uri);
storageInteractionLayer.createBlobClient(storageUri);
suppressRetryPolicyInClientIfNeeded();
// Capture the container reference.
f0 = storageInteractionLayer.getContainerReference(containerName);
rootDirectory = f0.getDirectoryReference("");
// Check for container existence, and our ability to access it.
boolean canAccess;
try {
canAccess = f0.exists(getInstrumentedContext());
} catch (StorageException ex) {
LOG.error("Service returned StorageException when checking existence " + "of container {} in account {}", containerName, accountName, ex);
canAccess = false;
}if (!canAccess) {
throw new AzureException(String.format(NO_ACCESS_TO_CONTAINER_MSG, accountName, containerName));
}
// Accessing the storage server unauthenticated using
// anonymous credentials.
isAnonymousCredentials = true;
} | 3.26 |
hadoop_AzureNativeFileSystemStore_getHadoopBlockSize_rdh | /**
* Returns the file block size. This is a fake value used for integration
* of the Azure store with Hadoop.
*/
@Override
public long getHadoopBlockSize() {
return hadoopBlockSize;
} | 3.26 |
hadoop_AzureNativeFileSystemStore_connectToAzureStorageInSecureMode_rdh | /**
* Method to set up the Storage Interaction layer in Secure mode.
*
* @param accountName
* - Storage account provided in the initializer
* @param containerName
* - Container name provided in the initializer
* @param sessionUri
* - URI provided in the initializer
*/
private void connectToAzureStorageInSecureMode(String accountName, String containerName, URI sessionUri) throws AzureException, StorageException, URISyntaxException {
LOG.debug("Connecting to Azure storage in Secure Mode");
// Assertion: storageInteractionLayer instance has to be a SecureStorageInterfaceImpl
if (!(this.storageInteractionLayer instanceof SecureStorageInterfaceImpl)) {
throw new AssertionError("connectToAzureStorageInSecureMode() should be called only" + " for SecureStorageInterfaceImpl instances");
}
((SecureStorageInterfaceImpl) (this.storageInteractionLayer)).setStorageAccountName(accountName);
connectingUsingSAS = true;
f0 = storageInteractionLayer.getContainerReference(containerName);
rootDirectory = f0.getDirectoryReference("");
canCreateOrModifyContainer = true;
} | 3.26 |
hadoop_AzureNativeFileSystemStore_isOkContainerState_rdh | // Determines whether we have to pull the container information again
// or we can work based off what we already have.
private boolean isOkContainerState(ContainerAccessType accessType) {
switch (currentKnownContainerState) {
case Unknown :
// When using SAS, we can't discover container attributes
// so just live with Unknown state and fail later if it
// doesn't exist.
return connectingUsingSAS;
case DoesntExist :
return false;// the container could have been created
case
ExistsAtRightVersion :
return true;// fine to optimize
case ExistsAtWrongVersion :
return false;
case ExistsNoVersion :
// If there's no version, it's OK if we don't need to stamp the version
// or we can't anyway even if we wanted to.
return !needToStampVersion(accessType);
default :
throw new AssertionError("Unknown access type: " + accessType);
}
} | 3.26 |
hadoop_AzureNativeFileSystemStore_m0_rdh | /**
* Checks if the given key in Azure Storage should be stored as a page
* blob instead of block blob.
*/
public boolean m0(String key) {
return isKeyForDirectorySet(key, pageBlobDirs);
} | 3.26 |
hadoop_AzureNativeFileSystemStore_setToString_rdh | /**
* Helper to format a string for log output from Set<String>
*/private String setToString(Set<String> set) {
StringBuilder sb
= new StringBuilder();
int i = 1;
for (String s : set) {
sb.append("/" + s);
if (i != set.size()) {
sb.append(", ");
}
i++;
}
return sb.toString();
} | 3.26 |
hadoop_AzureNativeFileSystemStore_getDirectorySet_rdh | /**
* Take a comma-separated list of directories from a configuration variable
* and transform it to a set of directories.
*/
private Set<String> getDirectorySet(final String configVar) throws AzureException {
String[] rawDirs = sessionConfiguration.getStrings(configVar, new String[0]);Set<String> directorySet = new HashSet<String>();
for (String currentDir : rawDirs) {
String myDir;
try {
myDir = verifyAndConvertToStandardFormat(currentDir.trim());
} catch (URISyntaxException ex) {
throw new AzureException(String.format("The directory %s specified in the configuration entry %s is not" + " a valid URI.", currentDir, configVar));
}
if (myDir != null) {
directorySet.add(myDir);
}
}
return directorySet;
} | 3.26 |
hadoop_AzureNativeFileSystemStore_finalize_rdh | // Finalizer to ensure complete shutdown
@Override
protected void finalize() throws Throwable {LOG.debug("finalize() called");
close();
super.finalize();
} | 3.26 |
hadoop_AzureNativeFileSystemStore_getContainerFromAuthority_rdh | /**
* Method to extract the container name from an Azure URI.
*
* @param uri
* -- WASB blob URI
* @returns containerName -- the container name for the URI. May be null.
* @throws URISyntaxException
* if the uri does not have an authority it is badly formed.
*/
private String getContainerFromAuthority(URI uri) throws URISyntaxException {
// Check to make sure that the authority is valid for the URI.
//
String
authority
= uri.getRawAuthority();
if (null == authority) {
// Badly formed or illegal URI.
//
throw new URISyntaxException(uri.toString(), "Expected URI with a valid authority");
}// The URI has a valid authority. Extract the container name. It is the
// second component of the WASB URI authority.
if (!authority.contains(WASB_AUTHORITY_DELIMITER)) {
// The authority does not have a container name. Use the default container by
// setting the container name to the default Azure root container.
//
return AZURE_ROOT_CONTAINER;
}
// Split off the container name and the authority.
String[] authorityParts
= authority.split(WASB_AUTHORITY_DELIMITER, 2);
// Because the string contains an '@' delimiter, a container must be
// specified.
if ((authorityParts.length < 2) || "".equals(authorityParts[0])) {
// Badly formed WASB authority since there is no container.
final String v12 = String.format("URI '%s' has a malformed WASB authority, expected container name." + "Authority takes the form wasb://[<container name>@]<account name>", uri.toString());
throw new IllegalArgumentException(v12);
}
// Set the container name from the first entry for the split parts of the
// authority.
return authorityParts[0];
} | 3.26 |
hadoop_AzureNativeFileSystemStore_isBlockBlobWithCompactionKey_rdh | /**
* Checks if the given key in Azure Storage should be stored as a block blobs
* with compaction enabled instead of normal block blob.
*
* @param key
* blob name
* @return true, if the file is in directory with block compaction enabled.
*/
public boolean isBlockBlobWithCompactionKey(String key) {
return isKeyForDirectorySet(key, blockBlobWithCompationDirs);
} | 3.26 |
hadoop_AzureNativeFileSystemStore_acquireLease_rdh | /**
* Get a lease on the blob identified by key. This lease will be renewed
* indefinitely by a background thread.
*/
@Override
public SelfRenewingLease acquireLease(String key) throws AzureException {
LOG.debug("acquiring lease on {}", key);
try {
checkContainer(ContainerAccessType.ReadThenWrite);
CloudBlobWrapper blob = getBlobReference(key);
return blob.acquireLease();
} catch (Exception e) {
// Caught exception while attempting to get lease. Re-throw as an
// Azure storage exception.
throw new AzureException(e);
}
} | 3.26 |
hadoop_AzureNativeFileSystemStore_getHTTPScheme_rdh | /**
* Get the appropriate return the appropriate scheme for communicating with
* Azure depending on whether wasb or wasbs is specified in the target URI.
*
* return scheme - HTTPS or HTTP as appropriate.
*/
private String getHTTPScheme() {
String sessionScheme = sessionUri.getScheme();
// Check if we're on a secure URI scheme: wasbs or the legacy asvs scheme.
if ((sessionScheme != null) && (sessionScheme.equalsIgnoreCase("asvs") || sessionScheme.equalsIgnoreCase("wasbs"))) {
return HTTPS_SCHEME;
} else {
// At this point the scheme should be either null or asv or wasb.
// Intentionally I'm not going to validate it though since I don't feel
// it's this method's job to ensure a valid URI scheme for this file
// system.
return HTTP_SCHEME;
}
} | 3.26 |
hadoop_AzureNativeFileSystemStore_changePermissionStatus_rdh | /**
* Changes the permission status on the given key.
*/
@Override
public void changePermissionStatus(String key, PermissionStatus newPermission) throws AzureException {
try {
checkContainer(ContainerAccessType.ReadThenWrite);
CloudBlobWrapper blob = getBlobReference(key);
blob.downloadAttributes(getInstrumentedContext());
storePermissionStatus(blob, newPermission);
blob.uploadMetadata(getInstrumentedContext());
} catch (Exception e) {
throw new AzureException(e);
}
} | 3.26 |
hadoop_AzureNativeFileSystemStore_getDataLength_rdh | /**
* Return the actual data length of the blob with the specified properties.
* If it is a page blob, you can't rely on the length from the properties
* argument and you must get it from the file. Otherwise, you can.
*/
private long getDataLength(CloudBlobWrapper blob, BlobProperties properties) throws AzureException { if (blob instanceof
CloudPageBlobWrapper) {
try {
return PageBlobInputStream.getPageBlobDataSize(((CloudPageBlobWrapper) (blob)), getInstrumentedContext(isConcurrentOOBAppendAllowed()));
} catch (Exception e) {
throw new AzureException("Unexpected exception getting page blob actual data size.", e);
}
}
return properties.getLength();
} | 3.26 |
hadoop_AzureNativeFileSystemStore_safeDelete_rdh | /**
* Deletes the given blob, taking special care that if we get a
* blob-not-found exception upon retrying the operation, we just
* swallow the error since what most probably happened is that
* the first operation succeeded on the server.
*
* @param blob
* The blob to delete.
* @param lease
* Azure blob lease, or null if no lease is to be used.
* @throws StorageException
*/private void safeDelete(CloudBlobWrapper blob, SelfRenewingLease lease) throws StorageException {
OperationContext operationContext = getInstrumentedContext();
try {
blob.delete(operationContext, lease);
} catch (StorageException e) {
if (!NativeAzureFileSystemHelper.isFileNotFoundException(e)) {
LOG.error("Encountered Storage Exception for delete on Blob: {}" + ", Exception Details: {} Error Code: {}", blob.getUri(), e.getMessage(), e.getErrorCode());
}
// On exception, check that if:
// 1. It's a BlobNotFound exception AND
// 2. It got there after one-or-more retries THEN
// we swallow the exception.
if ((((e.getErrorCode() != null) && "BlobNotFound".equals(e.getErrorCode())) && (operationContext.getRequestResults().size() > 1)) && (operationContext.getRequestResults().get(0).getException() != null)) {
LOG.debug("Swallowing delete exception on retry: {}", e.getMessage());
return;
} else {
throw e;
}
} finally {
if (lease != null) {
lease.free();
}
}
} | 3.26 |
hadoop_AzureNativeFileSystemStore_getLinkInFileMetadata_rdh | /**
* If the blob with the given key exists and has a link in its metadata to a
* temporary file (see storeEmptyLinkFile), this method returns the key to
* that temporary file. Otherwise, returns null.
*/
@Override
public String getLinkInFileMetadata(String key) throws AzureException {
if (null == storageInteractionLayer) {
final String errMsg = String.format("Storage session expected for URI '%s' but does not exist.", sessionUri);throw new AssertionError(errMsg);
}
try {
checkContainer(ContainerAccessType.PureRead);
CloudBlobWrapper blob = getBlobReference(key);
blob.downloadAttributes(getInstrumentedContext());
return getLinkAttributeValue(blob);
} catch (Exception e) {
// Caught exception while attempting download. Re-throw as an Azure
// storage exception.
throw new AzureException(e);
}
} | 3.26 |
hadoop_AzureNativeFileSystemStore_trim_rdh | /**
* Trims a suffix/prefix from the given string. For example if
* s is given as "/xy" and toTrim is "/", this method returns "xy"
*/
private static String trim(String s, String toTrim) {
return StringUtils.removeEnd(StringUtils.removeStart(s, toTrim), toTrim);
} | 3.26 |
hadoop_AzureNativeFileSystemStore_defaultPermissionNoBlobMetadata_rdh | /**
* Default permission to use when no permission metadata is found.
*
* @return The default permission to use.
*/
private static PermissionStatus defaultPermissionNoBlobMetadata() {
return new PermissionStatus("", "", FsPermission.getDefault());
} | 3.26 |
hadoop_AzureNativeFileSystemStore_openOutputStream_rdh | /**
* Opens a new output stream to the given blob (page or block blob)
* to populate it from scratch with data.
*/
private OutputStream openOutputStream(final CloudBlobWrapper blob) throws StorageException {
if
(blob instanceof CloudPageBlobWrapper) {return new PageBlobOutputStream(((CloudPageBlobWrapper) (blob)), getInstrumentedContext(), sessionConfiguration);
} else {
// Handle both ClouldBlockBlobWrapperImpl and (only for the test code path)
// MockCloudBlockBlobWrapper.
return ((CloudBlockBlobWrapper) (blob)).openOutputStream(getUploadOptions(), getInstrumentedContext());
}
} | 3.26 |
hadoop_AzureNativeFileSystemStore_verifyAndConvertToStandardFormat_rdh | /**
* Checks if the given rawDir belongs to this account/container, and
* if so returns the canonicalized path for it. Otherwise return null.
*/
private String verifyAndConvertToStandardFormat(String rawDir) throws URISyntaxException {
URI asUri = new URI(rawDir);
if ((asUri.getAuthority() == null) || asUri.getAuthority().toLowerCase(Locale.ENGLISH).equalsIgnoreCase(sessionUri.getAuthority().toLowerCase(Locale.ENGLISH))) {
// Applies to me.
return trim(asUri.getPath(), "/");
} else {// Doen't apply to me.
return null;
}
} | 3.26 |
hadoop_AzureNativeFileSystemStore_normalizeKey_rdh | /**
* This private method normalizes the key by stripping the container name from
* the path and returns a path relative to the root directory of the
* container.
*
* @param directory
* - adjust the key to this directory to a path relative to the root
* directory
* @returns normKey
*/
private String normalizeKey(CloudBlobDirectoryWrapper directory) {
String dirKey = normalizeKey(directory.getUri());
// Strip the last delimiter
if (dirKey.endsWith(PATH_DELIMITER)) {
dirKey
= dirKey.substring(0, dirKey.length() - 1);
}
return dirKey;
} | 3.26 |
hadoop_AzureNativeFileSystemStore_configureAzureStorageSession_rdh | /**
* Set the configuration parameters for this client storage session with
* Azure.
*
* @throws AzureException
*/
private void configureAzureStorageSession() throws AzureException {// Assertion: Target session URI already should have been captured.
if (sessionUri == null) {
throw new AssertionError("Expected a non-null session URI when configuring storage session");
}
// Assertion: A client session already should have been established with
// Azure.
if (storageInteractionLayer == null) {
throw new AssertionError(String.format("Cannot configure storage session for URI '%s' " + "if storage session has not been established.", sessionUri.toString()));
}
// Determine whether or not reads are allowed concurrent with OOB writes.
tolerateOobAppends = sessionConfiguration.getBoolean(KEY_READ_TOLERATE_CONCURRENT_APPEND, DEFAULT_READ_TOLERATE_CONCURRENT_APPEND);
// Retrieve configuration for the minimum stream read and write block size.
//
this.downloadBlockSizeBytes = sessionConfiguration.getInt(KEY_STREAM_MIN_READ_SIZE, DEFAULT_DOWNLOAD_BLOCK_SIZE);
this.uploadBlockSizeBytes = sessionConfiguration.getInt(KEY_WRITE_BLOCK_SIZE, DEFAULT_UPLOAD_BLOCK_SIZE);
this.hadoopBlockSize = sessionConfiguration.getLong(HADOOP_BLOCK_SIZE_PROPERTY_NAME, DEFAULT_HADOOP_BLOCK_SIZE);
this.inputStreamVersion = sessionConfiguration.getInt(KEY_INPUT_STREAM_VERSION, DEFAULT_INPUT_STREAM_VERSION);
// The job may want to specify a timeout to use when engaging the
// storage service. The default is currently 90 seconds. It may
// be necessary to increase this value for long latencies in larger
// jobs. If the timeout specified is greater than zero seconds use
// it, otherwise use the default service client timeout.
int storageConnectionTimeout = sessionConfiguration.getInt(KEY_STORAGE_CONNECTION_TIMEOUT, 0);
if (0 < storageConnectionTimeout) {
storageInteractionLayer.setTimeoutInMs(storageConnectionTimeout * 1000);
}
// Set the concurrency values equal to the that specified in the
// configuration file. If it does not exist, set it to the default
// value calculated as double the number of CPU cores on the client
// machine. The concurrency value is minimum of double the cores and
// the read/write property.
int cpuCores = 2 * Runtime.getRuntime().availableProcessors();
concurrentWrites = sessionConfiguration.getInt(KEY_CONCURRENT_CONNECTION_VALUE_OUT, Math.min(cpuCores, DEFAULT_CONCURRENT_WRITES));
// Set up the exponential retry policy.
//
minBackoff = sessionConfiguration.getInt(KEY_MIN_BACKOFF_INTERVAL, DEFAULT_MIN_BACKOFF_INTERVAL);
maxBackoff = sessionConfiguration.getInt(KEY_MAX_BACKOFF_INTERVAL, DEFAULT_MAX_BACKOFF_INTERVAL);
deltaBackoff = sessionConfiguration.getInt(KEY_BACKOFF_INTERVAL, DEFAULT_BACKOFF_INTERVAL);
maxRetries = sessionConfiguration.getInt(KEY_MAX_IO_RETRIES, DEFAULT_MAX_RETRY_ATTEMPTS);
storageInteractionLayer.setRetryPolicyFactory(new RetryExponentialRetry(minBackoff, deltaBackoff, maxBackoff, maxRetries));
// read the self-throttling config.
selfThrottlingEnabled = sessionConfiguration.getBoolean(f3, DEFAULT_SELF_THROTTLE_ENABLE);
f5 = sessionConfiguration.getFloat(KEY_SELF_THROTTLE_READ_FACTOR, DEFAULT_SELF_THROTTLE_READ_FACTOR);
selfThrottlingWriteFactor = sessionConfiguration.getFloat(KEY_SELF_THROTTLE_WRITE_FACTOR, DEFAULT_SELF_THROTTLE_WRITE_FACTOR);
if (!selfThrottlingEnabled) {
autoThrottlingEnabled = sessionConfiguration.getBoolean(KEY_AUTO_THROTTLE_ENABLE, DEFAULT_AUTO_THROTTLE_ENABLE);
if (autoThrottlingEnabled) {
ClientThrottlingIntercept.initializeSingleton();
}
} else {
// cannot enable both self-throttling and client-throttling
autoThrottlingEnabled = false;
}
OperationContext.setLoggingEnabledByDefault(sessionConfiguration.getBoolean(KEY_ENABLE_STORAGE_CLIENT_LOGGING, false));
LOG.debug("AzureNativeFileSystemStore init. Settings={},{},{},{{},{},{},{}},{{},{},{}}", concurrentWrites, tolerateOobAppends, storageConnectionTimeout > 0 ? storageConnectionTimeout : STORAGE_CONNECTION_TIMEOUT_DEFAULT, minBackoff, deltaBackoff, maxBackoff, maxRetries, selfThrottlingEnabled, f5, selfThrottlingWriteFactor);
} | 3.26 |
hadoop_AzureNativeFileSystemStore_connectUsingConnectionStringCredentials_rdh | /**
* Connect to Azure storage using account key credentials.
*/
private void connectUsingConnectionStringCredentials(final String accountName, final String containerName, final String accountKey) throws InvalidKeyException, StorageException, IOException, URISyntaxException {
// If the account name is "acc.blob.core.windows.net", then the
// rawAccountName is just "acc"
String rawAccountName = accountName.split("\\.")[0];
StorageCredentials credentials = new StorageCredentialsAccountAndKey(rawAccountName, accountKey);
connectUsingCredentials(accountName, credentials, containerName);
} | 3.26 |
hadoop_AzureNativeFileSystemStore_openInputStream_rdh | /**
* Opens a new input stream for the given blob (page or block blob)
* to read its data.
*/
private InputStream openInputStream(CloudBlobWrapper blob, Optional<Configuration> options) throws StorageException, IOException {
if (blob
instanceof CloudBlockBlobWrapper) {
LOG.debug("Using stream seek algorithm {}", inputStreamVersion);
switch (inputStreamVersion) {
case 1 :
return blob.openInputStream(getDownloadOptions(), getInstrumentedContext(isConcurrentOOBAppendAllowed()));
case 2
:
boolean bufferedPreadDisabled = options.map(c -> c.getBoolean(f4, false)).orElse(false);
return new BlockBlobInputStream(((CloudBlockBlobWrapper) (blob)), getDownloadOptions(),
getInstrumentedContext(isConcurrentOOBAppendAllowed()), bufferedPreadDisabled);
default :
throw new IOException("Unknown seek algorithm: " + inputStreamVersion);
}
} else {
return new PageBlobInputStream(((CloudPageBlobWrapper) (blob)), getInstrumentedContext(isConcurrentOOBAppendAllowed()));
}
} | 3.26 |
hadoop_AzureNativeFileSystemStore_addTestHookToOperationContext_rdh | /**
* Add a test hook to modify the operation context we use for Azure Storage
* operations.
*
* @param testHook
* The test hook, or null to unset previous hooks.
*/
@VisibleForTesting
void addTestHookToOperationContext(TestHookOperationContext testHook) {
this.testHookOperationContext = testHook;
} | 3.26 |
hadoop_AzureNativeFileSystemStore_listRootBlobs_rdh | /**
* This private method uses the root directory or the original container to
* list blobs under the directory or container given a specified prefix for
* the directory depending on whether the original file system object was
* constructed with a short- or long-form URI. It also uses the specified flat
* or hierarchical option, listing details options, request options, and
* operation context.
*
* @param aPrefix
* string name representing the prefix of containing blobs.
* @param useFlatBlobListing
* - the list is flat if true, or hierarchical otherwise.
* @param listingDetails
* - determine whether snapshots, metadata, committed/uncommitted
* data
* @param options
* - object specifying additional options for the request. null =
* default options
* @param opContext
* - context of the current operation
* @returns blobItems : iterable collection of blob items.
* @throws URISyntaxException
*/
private Iterable<ListBlobItem> listRootBlobs(String aPrefix, boolean useFlatBlobListing, EnumSet<BlobListingDetails> listingDetails, BlobRequestOptions options, OperationContext opContext) throws StorageException, URISyntaxException {
CloudBlobDirectoryWrapper v76 = this.f0.getDirectoryReference(aPrefix);
return v76.listBlobs(null, useFlatBlobListing, listingDetails, options, opContext);} | 3.26 |
hadoop_AzureNativeFileSystemStore_connectUsingSASCredentials_rdh | /**
* Connect to Azure storage using shared access signature credentials.
*/
private void connectUsingSASCredentials(final String accountName, final String containerName, final String sas) throws InvalidKeyException, StorageException, IOException, URISyntaxException {
StorageCredentials credentials = new StorageCredentialsSharedAccessSignature(sas);
connectingUsingSAS = true;
connectUsingCredentials(accountName, credentials, containerName);
} | 3.26 |
hadoop_AzureNativeFileSystemStore_initialize_rdh | /**
* Method for the URI and configuration object necessary to create a storage
* session with an Azure session. It parses the scheme to ensure it matches
* the storage protocol supported by this file system.
*
* @param uri
* - URI for target storage blob.
* @param conf
* - reference to configuration object.
* @param instrumentation
* - the metrics source that will keep track of operations here.
* @throws IllegalArgumentException
* if URI or job object is null, or invalid scheme.
*/
@Override
public void initialize(URI uri, Configuration conf, AzureFileSystemInstrumentation instrumentation) throws IllegalArgumentException, AzureException, IOException {
if (null == instrumentation) {
throw new IllegalArgumentException("Null instrumentation");
}
this.instrumentation = instrumentation;
// Check that URI exists.
//
if (null == uri) {
throw new IllegalArgumentException("Cannot initialize WASB file system, URI is null");
}
// Check that configuration object is non-null.
//
if (null == conf) {
throw new IllegalArgumentException("Cannot initialize WASB file system, conf is null");
}
if (!conf.getBoolean(NativeAzureFileSystem.SKIP_AZURE_METRICS_PROPERTY_NAME, false)) {
// If not skip azure metrics, create bandwidthGaugeUpdater
this.bandwidthGaugeUpdater = new BandwidthGaugeUpdater(instrumentation);
}
// Incoming parameters validated. Capture the URI and the job configuration
// object.
//
sessionUri = uri;
sessionConfiguration = conf;
useSecureMode = conf.getBoolean(KEY_USE_SECURE_MODE, DEFAULT_USE_SECURE_MODE);
useLocalSasKeyMode =
conf.getBoolean(KEY_USE_LOCAL_SAS_KEY_MODE, DEFAULT_USE_LOCAL_SAS_KEY_MODE);
if (null == this.storageInteractionLayer) {
if (!useSecureMode) {this.storageInteractionLayer = new StorageInterfaceImpl();
} else {
this.storageInteractionLayer = new SecureStorageInterfaceImpl(useLocalSasKeyMode, conf);
}
}
// Configure Azure storage session.
configureAzureStorageSession();
// Start an Azure storage session.
//
createAzureStorageSession();
// Extract the directories that should contain page blobs
pageBlobDirs = getDirectorySet(KEY_PAGE_BLOB_DIRECTORIES);
LOG.debug("Page blob directories: {}", setToString(pageBlobDirs));
// User-agent
userAgentId = conf.get(USER_AGENT_ID_KEY, USER_AGENT_ID_DEFAULT);
// Extract the directories that should contain block blobs with compaction
blockBlobWithCompationDirs = getDirectorySet(KEY_BLOCK_BLOB_WITH_COMPACTION_DIRECTORIES);LOG.debug("Block blobs with compaction directories: {}", setToString(blockBlobWithCompationDirs));
// Extract directories that should have atomic rename applied.
atomicRenameDirs = getDirectorySet(KEY_ATOMIC_RENAME_DIRECTORIES);
String hbaseRoot;
try {
// Add to this the hbase root directory, or /hbase is that is not set.
hbaseRoot = verifyAndConvertToStandardFormat(sessionConfiguration.get("hbase.rootdir", "hbase"));
if (hbaseRoot != null) {
atomicRenameDirs.add(hbaseRoot);
}
} catch (URISyntaxException e) {
LOG.warn("Unable to initialize HBase root as an atomic rename directory.");
}
LOG.debug("Atomic rename directories: {} ", setToString(atomicRenameDirs));
metadataKeyCaseSensitive = conf.getBoolean(KEY_BLOB_METADATA_KEY_CASE_SENSITIVE, true);
if (!metadataKeyCaseSensitive) {
LOG.info("{} configured as false. Blob metadata will be treated case insensitive.", KEY_BLOB_METADATA_KEY_CASE_SENSITIVE);
}
} | 3.26 |
hadoop_AzureNativeFileSystemStore_getAccountFromAuthority_rdh | /**
* Method to extract the account name from an Azure URI.
*
* @param uri
* -- WASB blob URI
* @returns accountName -- the account name for the URI.
* @throws URISyntaxException
* if the URI does not have an authority it is badly formed.
*/
private String getAccountFromAuthority(URI uri) throws URISyntaxException {
// Check to make sure that the authority is valid for the URI.
//
String authority = uri.getRawAuthority();
if (null == authority) {
// Badly formed or illegal URI.
//
throw new URISyntaxException(uri.toString(), "Expected URI with a valid authority");
}
// Check if authority container the delimiter separating the account name from the
// the container.
//
if (!authority.contains(WASB_AUTHORITY_DELIMITER)) {
return authority;
}
// Split off the container name and the authority.
//
String[] authorityParts = authority.split(WASB_AUTHORITY_DELIMITER, 2);
// Because the string contains an '@' delimiter, a container must be
// specified.
//
if ((authorityParts.length < 2) || "".equals(authorityParts[0])) {
// Badly formed WASB authority since there is no container.
//
final String errMsg = String.format("URI '%s' has a malformed WASB authority, expected container name. " + "Authority takes the form wasb://[<container name>@]<account name>", uri.toString());throw new
IllegalArgumentException(errMsg);
}
// Return with the account name. It is possible that this name is NULL.
//
return authorityParts[1];
} | 3.26 |
hadoop_AzureNativeFileSystemStore_createAzureStorageSession_rdh | /**
* Establish a session with Azure blob storage based on the target URI. The
* method determines whether or not the URI target contains an explicit
* account or an implicit default cluster-wide account.
*
* @throws AzureException
* @throws IOException
*/
private void createAzureStorageSession() throws AzureException, IOException {
// Make sure this object was properly initialized with references to
// the sessionUri and sessionConfiguration.
if ((null == sessionUri) || (null == sessionConfiguration)) {
throw new AzureException("Filesystem object not initialized properly." + "Unable to start session with Azure Storage server.");
}
// File system object initialized, attempt to establish a session
// with the Azure storage service for the target URI string.
try {
// Inspect the URI authority to determine the account and use the account
// to start an Azure blob client session using an account key for the
// the account or anonymously.
// For all URI's do the following checks in order:
// 1. Validate that <account> can be used with the current Hadoop
// cluster by checking it exists in the list of configured accounts
// for the cluster.
// 2. Look up the AccountKey in the list of configured accounts for the
// cluster.
// 3. If there is no AccountKey, assume anonymous public blob access
// when accessing the blob.
//
// If the URI does not specify a container use the default root container
// under the account name.
// Assertion: Container name on the session Uri should be non-null.
if (getContainerFromAuthority(sessionUri) == null) {
throw new AssertionError(String.format("Non-null container expected from session URI: %s.", sessionUri.toString()));
}
// Get the account name.
String accountName = getAccountFromAuthority(sessionUri);
if (null == accountName) {
// Account name is not specified as part of the URI. Throw indicating
// an invalid account name.
final String errMsg = String.format("Cannot load WASB file system account name not" + " specified in URI: %s.", sessionUri.toString());
throw new AzureException(errMsg);
}
instrumentation.setAccountName(accountName);
String containerName =
getContainerFromAuthority(sessionUri);
instrumentation.setContainerName(containerName);
// Check whether this is a storage emulator account.
if (isStorageEmulatorAccount(accountName)) {
// It is an emulator account, connect to it with no credentials.
connectUsingCredentials(accountName, null, containerName);
return;
}
// If the securemode flag is set, WASB uses SecureStorageInterfaceImpl instance
// to communicate with Azure storage. In SecureStorageInterfaceImpl SAS keys
// are used to communicate with Azure storage, so connectToAzureStorageInSecureMode
// instantiates the default container using a SAS Key.
if (useSecureMode) {
connectToAzureStorageInSecureMode(accountName, containerName, sessionUri);
return;
}
// Check whether we have a shared access signature for that container.
String propertyValue = sessionConfiguration.get(((f1 + containerName) + ".") + accountName); if (propertyValue != null) {
// SAS was found. Connect using that.
connectUsingSASCredentials(accountName, containerName, propertyValue);
return;
}
// Check whether the account is configured with an account key.
propertyValue
= getAccountKeyFromConfiguration(accountName, sessionConfiguration);
if (StringUtils.isNotEmpty(propertyValue)) {
// Account key was found.
// Create the Azure storage session using the account key and container.
connectUsingConnectionStringCredentials(getAccountFromAuthority(sessionUri), getContainerFromAuthority(sessionUri), propertyValue);} else {
LOG.debug("The account access key is not configured for {}. " + "Now try anonymous access.", sessionUri);
connectUsingAnonymousCredentials(sessionUri);
}
} catch (Exception e) {
// Caught exception while attempting to initialize the Azure File
// System store, re-throw the exception.
throw new AzureException(e);
}
} | 3.26 |
hadoop_AzureNativeFileSystemStore_buildUpList_rdh | /**
* Build up a metadata list of blobs in an Azure blob directory. This method
* uses a in-order first traversal of blob directory structures to maintain
* the sorted order of the blob names.
*
* @param aCloudBlobDirectory
* Azure blob directory
* @param metadataHashMap
* a map of file metadata objects for each
* non-directory blob.
* @param maxListingCount
* maximum length of the built up list.
*/
private void buildUpList(CloudBlobDirectoryWrapper aCloudBlobDirectory, HashMap<String, FileMetadata> metadataHashMap, final int maxListingCount, final int maxListingDepth) throws Exception {
// Push the blob directory onto the stack.
//
AzureLinkedStack<Iterator<ListBlobItem>> dirIteratorStack = new AzureLinkedStack<Iterator<ListBlobItem>>();Iterable<ListBlobItem> blobItems = aCloudBlobDirectory.listBlobs(null,
false, EnumSet.of(BlobListingDetails.METADATA), null, getInstrumentedContext());
Iterator<ListBlobItem> blobItemIterator = blobItems.iterator();
if ((0 == maxListingDepth) || (0 == maxListingCount)) {
// Recurrence depth and listing count are already exhausted. Return
// immediately.
return;
}
// The directory listing depth is unbounded if the maximum listing depth
// is negative.
final boolean isUnboundedDepth =
maxListingDepth < 0;
// Reset the current directory listing depth.
int listingDepth = 1;
// Loop until all directories have been traversed in-order. Loop only
// the following conditions are satisfied:
// (1) The stack is not empty, and
// (2) maxListingCount > 0 implies that the number of items in the
// metadata list is less than the max listing count.
while ((null != blobItemIterator) && ((maxListingCount <= 0) || (metadataHashMap.size() < maxListingCount))) {
while (blobItemIterator.hasNext()) {
// Check if the count of items on the list exhausts the maximum
// listing count.
//
if ((0 < maxListingCount) && (metadataHashMap.size() >= maxListingCount)) {
break;
}
ListBlobItem blobItem = blobItemIterator.next();
// Add the file metadata to the list if this is not a blob
// directory item.
//
if ((blobItem instanceof CloudBlockBlobWrapper) || (blobItem instanceof CloudPageBlobWrapper)) {
String blobKey = null; CloudBlobWrapper blob = ((CloudBlobWrapper) (blobItem));
BlobProperties properties = blob.getProperties();
// Determine format of the blob name depending on whether an absolute
// path is being used or not.
blobKey = normalizeKey(blob);
FileMetadata metadata;
if (retrieveFolderAttribute(blob)) {
metadata = new FileMetadata(blobKey, properties.getLastModified().getTime(), getPermissionStatus(blob),
BlobMaterialization.Explicit, hadoopBlockSize);
} else {
metadata = new FileMetadata(blobKey, getDataLength(blob, properties), properties.getLastModified().getTime(), getPermissionStatus(blob), hadoopBlockSize);
}
// Add the metadata but remove duplicates. Note that the azure
// storage java SDK returns two types of entries: CloudBlobWrappter
// and CloudDirectoryWrapper. In the case where WASB generated the
// data, there will be an empty blob for each "directory", and we will
// receive a CloudBlobWrapper. If there are also files within this
// "directory", we will also receive a CloudDirectoryWrapper. To
// complicate matters, the data may not be generated by WASB, in
// which case we may not have an empty blob for each "directory".
// So, sometimes we receive both a CloudBlobWrapper and a
// CloudDirectoryWrapper for each directory, and sometimes we receive
// one or the other but not both. We remove duplicates, but
// prefer CloudBlobWrapper over CloudDirectoryWrapper.
// Furthermore, it is very unfortunate that the list results are not
// ordered, and it is a partial list which uses continuation. So
// the HashMap is the best structure to remove the duplicates, despite
// its potential large size.
metadataHashMap.put(blobKey, metadata);
} else if (blobItem instanceof CloudBlobDirectoryWrapper) {
CloudBlobDirectoryWrapper directory = ((CloudBlobDirectoryWrapper) (blobItem));
// This is a directory blob, push the current iterator onto
// the stack of iterators and start iterating through the current
// directory.
if (isUnboundedDepth || (maxListingDepth > listingDepth)) {
// Push the current directory on the stack and increment the listing
// depth.
dirIteratorStack.push(blobItemIterator);
++listingDepth;
// The current blob item represents the new directory. Get
// an iterator for this directory and continue by iterating through
// this directory.
blobItems = directory.listBlobs(null, false, EnumSet.noneOf(BlobListingDetails.class), null, getInstrumentedContext());
blobItemIterator = blobItems.iterator();
} else {
// Determine format of directory name depending on whether an
// absolute path is being used or not.
String dirKey = normalizeKey(directory);
// Add the directory metadata to the list only if it's not already
// there. See earlier note, we prefer CloudBlobWrapper over
// CloudDirectoryWrapper because it may have additional metadata (
// properties and ACLs).
if (!metadataHashMap.containsKey(dirKey)) {
// Reached the targeted listing depth. Return metadata for the
// directory using default permissions.
//
// Note: Something smarter should be done about permissions. Maybe
// inherit the permissions of the first non-directory blob.
// Also, getting a proper value for last-modified is tricky.
//
FileMetadata
directoryMetadata = new FileMetadata(dirKey, 0, defaultPermissionNoBlobMetadata(), BlobMaterialization.Implicit, hadoopBlockSize);
// Add the directory metadata to the list.
metadataHashMap.put(dirKey, directoryMetadata);
}
}
}
}
// Traversal of directory tree
// Check if the iterator stack is empty. If it is set the next blob
// iterator to null. This will act as a terminator for the for-loop.
// Otherwise pop the next iterator from the stack and continue looping.
//
if (dirIteratorStack.isEmpty()) {blobItemIterator = null;
} else {
// Pop the next directory item from the stack and decrement the
// depth.
blobItemIterator = dirIteratorStack.pop();
--listingDepth;
// Assertion: Listing depth should not be less than zero.
if (listingDepth < 0) {
throw new AssertionError("Non-negative listing depth expected");
}
}
}
} | 3.26 |
hadoop_AzureNativeFileSystemStore_createPermissionJsonSerializer_rdh | /**
* Creates a JSON serializer that can serialize a PermissionStatus object into
* the JSON string we want in the blob metadata.
*
* @return The JSON serializer.
*/
private static JSON createPermissionJsonSerializer() {
log.Log.getProperties().setProperty("org.eclipse.jetty.util.log.announce", "false");
JSON serializer = new JSON();
serializer.addConvertor(PermissionStatus.class, new PermissionStatusJsonSerializer());
return serializer;
} | 3.26 |
hadoop_ZStandardCompressor_setInputFromSavedData_rdh | // copy enough data from userBuf to uncompressedDirectBuf
private void setInputFromSavedData() {
int len = Math.min(userBufLen, uncompressedDirectBuf.remaining());
uncompressedDirectBuf.put(userBuf, userBufOff, len);
userBufLen -= len;
userBufOff += len;
uncompressedDirectBufLen = uncompressedDirectBuf.position();
} | 3.26 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.