name
stringlengths 12
178
| code_snippet
stringlengths 8
36.5k
| score
float64 3.26
3.68
|
---|---|---|
hadoop_OBSObjectBucketUtils_renameFolder | /**
* Implement rename folder.
*
* @param owner OBS File System instance
* @param srcKey source folder key
* @param dstKey destination folder key
* @throws IOException any problem with rename folder
*/
static void renameFolder(final OBSFileSystem owner, final String srcKey,
final String dstKey)
throws IOException {
long startTime = System.nanoTime();
List<KeyAndVersion> keysToDelete = new ArrayList<>();
createFakeDirectory(owner, dstKey);
ListObjectsRequest request = new ListObjectsRequest();
request.setBucketName(owner.getBucket());
request.setPrefix(srcKey);
request.setMaxKeys(owner.getMaxKeys());
ObjectListing objects = OBSCommonUtils.listObjects(owner, request);
List<Future<CopyObjectResult>> copyfutures = new LinkedList<>();
while (true) {
for (ObsObject summary : objects.getObjects()) {
if (summary.getObjectKey().equals(srcKey)) {
// skip prefix itself
continue;
}
keysToDelete.add(new KeyAndVersion(summary.getObjectKey()));
String newDstKey = dstKey + summary.getObjectKey()
.substring(srcKey.length());
// copyFile(summary.getObjectKey(), newDstKey,
// summary.getMetadata().getContentLength());
copyfutures.add(
copyFileAsync(owner, summary.getObjectKey(), newDstKey,
summary.getMetadata().getContentLength()));
if (keysToDelete.size() == owner.getMaxEntriesToDelete()) {
waitAllCopyFinished(copyfutures);
copyfutures.clear();
}
}
if (!objects.isTruncated()) {
if (!keysToDelete.isEmpty()) {
waitAllCopyFinished(copyfutures);
copyfutures.clear();
}
break;
}
objects = OBSCommonUtils.continueListObjects(owner, objects);
}
keysToDelete.add(new KeyAndVersion(srcKey));
DeleteObjectsRequest deleteObjectsRequest = new DeleteObjectsRequest(
owner.getBucket());
deleteObjectsRequest.setKeyAndVersions(
keysToDelete.toArray(new KeyAndVersion[0]));
OBSCommonUtils.deleteObjects(owner, deleteObjectsRequest);
if (LOG.isDebugEnabled()) {
long delay = System.nanoTime() - startTime;
LOG.debug(
"OBSFileSystem rename: "
+ ", {src="
+ srcKey
+ ", dst="
+ dstKey
+ ", delay="
+ delay
+ "}");
}
} | 3.68 |
hadoop_RequestFactoryImpl_withContentEncoding | /**
* Content encoding.
* @param value new value
* @return the builder
*/
public RequestFactoryBuilder withContentEncoding(final String value) {
contentEncoding = value;
return this;
} | 3.68 |
morf_RemoveIndex_getTableName | /**
* @return The name of the table to remove the index from.
*/
public String getTableName() {
return tableName;
} | 3.68 |
framework_JSR356WebsocketInitializer_isAtmosphereAvailable | /**
* Checks if Atmosphere is available on the classpath.
*
* @return <code>true</code> if Atmosphere is available, <code>false</code>
* otherwise
*/
public static boolean isAtmosphereAvailable() {
return atmosphereAvailable;
} | 3.68 |
flink_BaseMappingExtractor_correctVarArgMethod | /**
* Special case for Scala which generates two methods when using var-args (a {@code Seq < String
* >} and {@code String...}). This method searches for the Java-like variant.
*/
static Method correctVarArgMethod(Method method) {
final int paramCount = method.getParameterCount();
final Class<?>[] paramClasses = method.getParameterTypes();
if (paramCount > 0
&& paramClasses[paramCount - 1].getName().equals("scala.collection.Seq")) {
final Type[] paramTypes = method.getGenericParameterTypes();
final ParameterizedType seqType = (ParameterizedType) paramTypes[paramCount - 1];
final Type varArgType = seqType.getActualTypeArguments()[0];
return ExtractionUtils.collectMethods(method.getDeclaringClass(), method.getName())
.stream()
.filter(Method::isVarArgs)
.filter(candidate -> candidate.getParameterCount() == paramCount)
.filter(
candidate -> {
final Type[] candidateParamTypes =
candidate.getGenericParameterTypes();
for (int i = 0; i < paramCount - 1; i++) {
if (candidateParamTypes[i] != paramTypes[i]) {
return false;
}
}
final Class<?> candidateVarArgType =
candidate.getParameterTypes()[paramCount - 1];
return candidateVarArgType.isArray()
&&
// check for Object is needed in case of Scala primitives
// (e.g. Int)
(varArgType == Object.class
|| candidateVarArgType.getComponentType()
== varArgType);
})
.findAny()
.orElse(method);
}
return method;
} | 3.68 |
streampipes_PipelineManager_addPipeline | /**
* Adds a new pipeline for the user with the username to the storage
*
* @param principalSid the ID of the owner principal
* @param pipeline to be added
* @return pipelineId of the stored pipeline
*/
public static String addPipeline(String principalSid,
Pipeline pipeline) {
String pipelineId = Objects.isNull(pipeline.getPipelineId())
? UUIDGenerator.generateUuid()
: pipeline.getPipelineId();
preparePipelineBasics(principalSid, pipeline, pipelineId);
Operations.storePipeline(pipeline);
Permission permission = new PermissionManager().makePermission(pipeline, principalSid);
getPermissionStorage().addPermission(permission);
return pipelineId;
} | 3.68 |
hadoop_ListResultEntrySchema_lastModified | /**
* Get the lastModified value.
*
* @return the lastModified value
*/
public String lastModified() {
return lastModified;
} | 3.68 |
framework_VScrollTable_getNavigationPageDownKey | /**
* Get the key the moves the selection one page down in the table. By
* default this is the Page Down key but by overriding this you can change
* the key to whatever you want.
*
* @return
*/
protected int getNavigationPageDownKey() {
return KeyCodes.KEY_PAGEDOWN;
} | 3.68 |
hadoop_AMRMProxyService_getPipelines | /**
* Gets the Request interceptor chains for all the applications.
*
* @return the request interceptor chains.
*/
protected Map<ApplicationId, RequestInterceptorChainWrapper> getPipelines() {
return this.applPipelineMap;
} | 3.68 |
hibernate-validator_AnnotationMetaDataProvider_findConstraints | /**
* Finds all constraint annotations defined for the given constrainable and returns them in a list of constraint
* descriptors.
*
* @param constrainable The constrainable element (will be the executable for a method parameter).
* @param annotations The annotations.
* @param kind The constraint location kind.
*
* @return A list of constraint descriptors for all constraint specified for the given member.
*/
private List<ConstraintDescriptorImpl<?>> findConstraints(Constrainable constrainable, Annotation[] annotations,
ConstraintLocationKind kind) {
if ( annotations.length == 0 ) {
return Collections.emptyList();
}
List<ConstraintDescriptorImpl<?>> metaData = newArrayList();
for ( Annotation annotation : annotations ) {
metaData.addAll( findConstraintAnnotations( constrainable, annotation, kind ) );
}
return metaData;
} | 3.68 |
hmily_Resource_getResult | /**
* Gets result.
*
* @param r the r
* @return the result
*/
public static Result getResult(final int r) {
Result rs = READONLY;
switch (r) {
case 0:
rs = COMMIT;
break;
case 1:
rs = ROLLBACK;
break;
case 2:
rs = READONLY;
break;
default:
break;
}
return rs;
} | 3.68 |
hadoop_HttpReferrerAuditHeader_withPath2 | /**
* Set Path2 of operation.
* @param value new value
* @return the builder
*/
public Builder withPath2(final String value) {
path2 = value;
return this;
} | 3.68 |
flink_PrioritizedOperatorSubtaskState_getPrioritizedRawKeyedState | /**
* Returns an immutable list with all alternative snapshots to restore the raw keyed state, in
* the order in which we should attempt to restore.
*/
@Nonnull
public List<StateObjectCollection<KeyedStateHandle>> getPrioritizedRawKeyedState() {
return prioritizedRawKeyedState;
} | 3.68 |
flink_UnsortedGrouping_combineGroup | /**
* Applies a GroupCombineFunction on a grouped {@link DataSet}. A GroupCombineFunction is
* similar to a GroupReduceFunction but does not perform a full data exchange. Instead, the
* CombineFunction calls the combine method once per partition for combining a group of results.
* This operator is suitable for combining values into an intermediate format before doing a
* proper groupReduce where the data is shuffled across the node for further reduction. The
* GroupReduce operator can also be supplied with a combiner by implementing the RichGroupReduce
* function. The combine method of the RichGroupReduce function demands input and output type to
* be the same. The CombineFunction, on the other side, can have an arbitrary output type.
*
* @param combiner The GroupCombineFunction that is applied on the DataSet.
* @return A GroupCombineOperator which represents the combined DataSet.
*/
public <R> GroupCombineOperator<T, R> combineGroup(GroupCombineFunction<T, R> combiner) {
if (combiner == null) {
throw new NullPointerException("GroupCombine function must not be null.");
}
TypeInformation<R> resultType =
TypeExtractor.getGroupCombineReturnTypes(
combiner,
this.getInputDataSet().getType(),
Utils.getCallLocationName(),
true);
return new GroupCombineOperator<T, R>(
this, resultType, inputDataSet.clean(combiner), Utils.getCallLocationName());
} | 3.68 |
framework_BasicDateClickHandler_setDates | /**
* Set the start and end dates for the event.
*
* @param event
* The event that the start and end dates should be set
* @param start
* The start date
* @param end
* The end date
*/
protected void setDates(DateClickEvent event, Date start, Date end) {
event.getComponent().setStartDate(start);
event.getComponent().setEndDate(end);
} | 3.68 |
framework_VAbsoluteLayout_setWidgetCaption | /**
* Sets a caption for a contained widget.
*
* @param child
* The child widget to set the caption for
* @param caption
* The caption of the widget
*/
public void setWidgetCaption(Widget child, VCaption caption) {
AbsoluteWrapper wrapper = getChildWrapper(child);
if (wrapper != null) {
if (caption != null) {
if (!getChildren().contains(caption)) {
super.add(caption, canvas);
}
wrapper.setCaption(caption);
caption.updateCaption();
wrapper.updateCaptionPosition();
} else if (wrapper.getCaption() != null) {
wrapper.setCaption(null);
}
}
} | 3.68 |
flink_Tuple18_setFields | /**
* Sets new values to all fields of the tuple.
*
* @param f0 The value for field 0
* @param f1 The value for field 1
* @param f2 The value for field 2
* @param f3 The value for field 3
* @param f4 The value for field 4
* @param f5 The value for field 5
* @param f6 The value for field 6
* @param f7 The value for field 7
* @param f8 The value for field 8
* @param f9 The value for field 9
* @param f10 The value for field 10
* @param f11 The value for field 11
* @param f12 The value for field 12
* @param f13 The value for field 13
* @param f14 The value for field 14
* @param f15 The value for field 15
* @param f16 The value for field 16
* @param f17 The value for field 17
*/
public void setFields(
T0 f0,
T1 f1,
T2 f2,
T3 f3,
T4 f4,
T5 f5,
T6 f6,
T7 f7,
T8 f8,
T9 f9,
T10 f10,
T11 f11,
T12 f12,
T13 f13,
T14 f14,
T15 f15,
T16 f16,
T17 f17) {
this.f0 = f0;
this.f1 = f1;
this.f2 = f2;
this.f3 = f3;
this.f4 = f4;
this.f5 = f5;
this.f6 = f6;
this.f7 = f7;
this.f8 = f8;
this.f9 = f9;
this.f10 = f10;
this.f11 = f11;
this.f12 = f12;
this.f13 = f13;
this.f14 = f14;
this.f15 = f15;
this.f16 = f16;
this.f17 = f17;
} | 3.68 |
framework_GridElement_getFooterCells | /**
* Gets list of header cell elements on given row.
*
* @param rowIndex
* Row index
* @return Header cell elements on given row.
*/
public List<GridCellElement> getFooterCells(int rowIndex) {
List<GridCellElement> footers = new ArrayList<GridCellElement>();
for (TestBenchElement e : TestBenchElement.wrapElements(
getSubPart("#footer[" + rowIndex + "]").findElements(
By.xpath("./td")),
getCommandExecutor())) {
footers.add(e.wrap(GridCellElement.class));
}
return footers;
} | 3.68 |
framework_Label_getPropertyDataSource | /**
* Gets the viewing data-source property.
*
* @return the data source property.
* @see Property.Viewer#getPropertyDataSource()
*/
@Override
public Property getPropertyDataSource() {
return dataSource;
} | 3.68 |
flink_TableConfig_getMaxIdleStateRetentionTime | /**
* NOTE: Currently the concept of min/max idle state retention has been deprecated and only idle
* state retention time is supported. The min idle state retention is regarded as idle state
* retention and the max idle state retention is derived from idle state retention as 1.5 x idle
* state retention.
*
* @return The maximum time until state which was not updated will be retained.
* @deprecated use{@link getIdleStateRetention} instead.
*/
@Deprecated
public long getMaxIdleStateRetentionTime() {
return getMinIdleStateRetentionTime() * 3 / 2;
} | 3.68 |
hadoop_CommitUtils_validateCollectionClass | /**
* Verify that all instances in a collection are of the given class.
* @param it iterator
* @param classname classname to require
* @throws ValidationFailure on a failure
*/
public static void validateCollectionClass(Iterable it, Class classname)
throws ValidationFailure {
for (Object o : it) {
verify(o.getClass().equals(classname),
"Collection element is not a %s: %s", classname, o.getClass());
}
} | 3.68 |
framework_VTooltip_isTooltipOpen | /**
* For assistive tooltips to work correctly we must have the tooltip visible
* and attached to the DOM well in advance. For this reason both isShowing
* and isVisible return false positives. We can't override either of them as
* external code may depend on this behavior.
*
* @return boolean
*/
public boolean isTooltipOpen() {
return super.isShowing() && super.isVisible() && getPopupLeft() > 0
&& getPopupTop() > 0;
} | 3.68 |
hadoop_TFileDumper_dumpInfo | /**
* Dump information about TFile.
*
* @param file
* Path string of the TFile
* @param out
* PrintStream to output the information.
* @param conf
* The configuration object.
* @throws IOException
*/
static public void dumpInfo(String file, PrintStream out, Configuration conf)
throws IOException {
final int maxKeySampleLen = 16;
Path path = new Path(file);
FileSystem fs = path.getFileSystem(conf);
long length = fs.getFileStatus(path).getLen();
FSDataInputStream fsdis = fs.open(path);
TFile.Reader reader = new TFile.Reader(fsdis, length, conf);
try {
LinkedHashMap<String, String> properties =
new LinkedHashMap<String, String>();
int blockCnt = reader.readerBCF.getBlockCount();
int metaBlkCnt = reader.readerBCF.metaIndex.index.size();
properties.put("BCFile Version", reader.readerBCF.version.toString());
properties.put("TFile Version", reader.tfileMeta.version.toString());
properties.put("File Length", Long.toString(length));
properties.put("Data Compression", reader.readerBCF
.getDefaultCompressionName());
properties.put("Record Count", Long.toString(reader.getEntryCount()));
properties.put("Sorted", Boolean.toString(reader.isSorted()));
if (reader.isSorted()) {
properties.put("Comparator", reader.getComparatorName());
}
properties.put("Data Block Count", Integer.toString(blockCnt));
long dataSize = 0, dataSizeUncompressed = 0;
if (blockCnt > 0) {
for (int i = 0; i < blockCnt; ++i) {
BlockRegion region =
reader.readerBCF.dataIndex.getBlockRegionList().get(i);
dataSize += region.getCompressedSize();
dataSizeUncompressed += region.getRawSize();
}
properties.put("Data Block Bytes", Long.toString(dataSize));
if (!reader.readerBCF.getDefaultCompressionName().equals("none")) {
properties.put("Data Block Uncompressed Bytes", Long
.toString(dataSizeUncompressed));
properties.put("Data Block Compression Ratio", String.format(
"1:%.1f", (double) dataSizeUncompressed / dataSize));
}
}
properties.put("Meta Block Count", Integer.toString(metaBlkCnt));
long metaSize = 0, metaSizeUncompressed = 0;
if (metaBlkCnt > 0) {
Collection<MetaIndexEntry> metaBlks =
reader.readerBCF.metaIndex.index.values();
boolean calculateCompression = false;
for (Iterator<MetaIndexEntry> it = metaBlks.iterator(); it.hasNext();) {
MetaIndexEntry e = it.next();
metaSize += e.getRegion().getCompressedSize();
metaSizeUncompressed += e.getRegion().getRawSize();
if (e.getCompressionAlgorithm() != Compression.Algorithm.NONE) {
calculateCompression = true;
}
}
properties.put("Meta Block Bytes", Long.toString(metaSize));
if (calculateCompression) {
properties.put("Meta Block Uncompressed Bytes", Long
.toString(metaSizeUncompressed));
properties.put("Meta Block Compression Ratio", String.format(
"1:%.1f", (double) metaSizeUncompressed / metaSize));
}
}
properties.put("Meta-Data Size Ratio", String.format("1:%.1f",
(double) dataSize / metaSize));
long leftOverBytes = length - dataSize - metaSize;
long miscSize =
BCFile.Magic.size() * 2 + Long.SIZE / Byte.SIZE + Version.size();
long metaIndexSize = leftOverBytes - miscSize;
properties.put("Meta Block Index Bytes", Long.toString(metaIndexSize));
properties.put("Headers Etc Bytes", Long.toString(miscSize));
// Now output the properties table.
int maxKeyLength = 0;
Set<Map.Entry<String, String>> entrySet = properties.entrySet();
for (Iterator<Map.Entry<String, String>> it = entrySet.iterator(); it
.hasNext();) {
Map.Entry<String, String> e = it.next();
if (e.getKey().length() > maxKeyLength) {
maxKeyLength = e.getKey().length();
}
}
for (Iterator<Map.Entry<String, String>> it = entrySet.iterator(); it
.hasNext();) {
Map.Entry<String, String> e = it.next();
out.printf("%s : %s%n", Align.format(e.getKey(), maxKeyLength,
Align.LEFT), e.getValue());
}
out.println();
reader.checkTFileDataIndex();
if (blockCnt > 0) {
String blkID = "Data-Block";
int blkIDWidth = Align.calculateWidth(blkID, blockCnt);
int blkIDWidth2 = Align.calculateWidth("", blockCnt);
String offset = "Offset";
int offsetWidth = Align.calculateWidth(offset, length);
String blkLen = "Length";
int blkLenWidth =
Align.calculateWidth(blkLen, dataSize / blockCnt * 10);
String rawSize = "Raw-Size";
int rawSizeWidth =
Align.calculateWidth(rawSize, dataSizeUncompressed / blockCnt * 10);
String records = "Records";
int recordsWidth =
Align.calculateWidth(records, reader.getEntryCount() / blockCnt
* 10);
String endKey = "End-Key";
int endKeyWidth = Math.max(endKey.length(), maxKeySampleLen * 2 + 5);
out.printf("%s %s %s %s %s %s%n", Align.format(blkID, blkIDWidth,
Align.CENTER), Align.format(offset, offsetWidth, Align.CENTER),
Align.format(blkLen, blkLenWidth, Align.CENTER), Align.format(
rawSize, rawSizeWidth, Align.CENTER), Align.format(records,
recordsWidth, Align.CENTER), Align.format(endKey, endKeyWidth,
Align.LEFT));
for (int i = 0; i < blockCnt; ++i) {
BlockRegion region =
reader.readerBCF.dataIndex.getBlockRegionList().get(i);
TFileIndexEntry indexEntry = reader.tfileIndex.getEntry(i);
out.printf("%s %s %s %s %s ", Align.format(Align.format(i,
blkIDWidth2, Align.ZERO_PADDED), blkIDWidth, Align.LEFT), Align
.format(region.getOffset(), offsetWidth, Align.LEFT), Align
.format(region.getCompressedSize(), blkLenWidth, Align.LEFT),
Align.format(region.getRawSize(), rawSizeWidth, Align.LEFT),
Align.format(indexEntry.kvEntries, recordsWidth, Align.LEFT));
byte[] key = indexEntry.key;
boolean asAscii = true;
int sampleLen = Math.min(maxKeySampleLen, key.length);
for (int j = 0; j < sampleLen; ++j) {
byte b = key[j];
if ((b < 32 && b != 9) || (b == 127)) {
asAscii = false;
}
}
if (!asAscii) {
out.print("0X");
for (int j = 0; j < sampleLen; ++j) {
byte b = key[i];
out.printf("%X", b);
}
} else {
out.print(new String(key, 0, sampleLen, StandardCharsets.UTF_8));
}
if (sampleLen < key.length) {
out.print("...");
}
out.println();
}
}
out.println();
if (metaBlkCnt > 0) {
String name = "Meta-Block";
int maxNameLen = 0;
Set<Map.Entry<String, MetaIndexEntry>> metaBlkEntrySet =
reader.readerBCF.metaIndex.index.entrySet();
for (Iterator<Map.Entry<String, MetaIndexEntry>> it =
metaBlkEntrySet.iterator(); it.hasNext();) {
Map.Entry<String, MetaIndexEntry> e = it.next();
if (e.getKey().length() > maxNameLen) {
maxNameLen = e.getKey().length();
}
}
int nameWidth = Math.max(name.length(), maxNameLen);
String offset = "Offset";
int offsetWidth = Align.calculateWidth(offset, length);
String blkLen = "Length";
int blkLenWidth =
Align.calculateWidth(blkLen, metaSize / metaBlkCnt * 10);
String rawSize = "Raw-Size";
int rawSizeWidth =
Align.calculateWidth(rawSize, metaSizeUncompressed / metaBlkCnt
* 10);
String compression = "Compression";
int compressionWidth = compression.length();
out.printf("%s %s %s %s %s%n", Align.format(name, nameWidth,
Align.CENTER), Align.format(offset, offsetWidth, Align.CENTER),
Align.format(blkLen, blkLenWidth, Align.CENTER), Align.format(
rawSize, rawSizeWidth, Align.CENTER), Align.format(compression,
compressionWidth, Align.LEFT));
for (Iterator<Map.Entry<String, MetaIndexEntry>> it =
metaBlkEntrySet.iterator(); it.hasNext();) {
Map.Entry<String, MetaIndexEntry> e = it.next();
String blkName = e.getValue().getMetaName();
BlockRegion region = e.getValue().getRegion();
String blkCompression =
e.getValue().getCompressionAlgorithm().getName();
out.printf("%s %s %s %s %s%n", Align.format(blkName, nameWidth,
Align.LEFT), Align.format(region.getOffset(), offsetWidth,
Align.LEFT), Align.format(region.getCompressedSize(),
blkLenWidth, Align.LEFT), Align.format(region.getRawSize(),
rawSizeWidth, Align.LEFT), Align.format(blkCompression,
compressionWidth, Align.LEFT));
}
}
} finally {
IOUtils.cleanupWithLogger(LOG, reader, fsdis);
}
} | 3.68 |
framework_CaptionLeak_setup | /*
* (non-Javadoc)
*
* @see com.vaadin.tests.components.AbstractTestUI#setup(com.vaadin.server.
* VaadinRequest)
*/
@Override
protected void setup(VaadinRequest request) {
VerticalLayout root = new VerticalLayout();
root.setSizeFull();
root.setMargin(false);
root.setSpacing(false);
HorizontalLayout layout = new HorizontalLayout();
Panel parent = new Panel();
Button setLeakyContent = makeButton("Set leaky content", parent,
VerticalLayout.class);
Button setNonLeakyContent = makeButton("Set non leaky content", parent,
CssLayout.class);
layout.addComponent(setLeakyContent);
layout.addComponent(setNonLeakyContent);
root.addComponent(layout);
root.addComponent(parent);
setContent(root);
} | 3.68 |
framework_ServerRpcQueue_size | /**
* Returns the current size of the queue.
*
* @return the number of invocations in the queue
*/
public int size() {
return pendingInvocations.size();
} | 3.68 |
framework_RefreshRenderedCellsOnlyIfAttached_getTestDescription | /*
* (non-Javadoc)
*
* @see com.vaadin.tests.components.AbstractTestUI#getTestDescription()
*/
@Override
protected String getTestDescription() {
return "There shouldn't be any attempts to refresh table's cells if the table isn't attached.";
} | 3.68 |
hbase_IPCUtil_wrapException | /**
* Takes an Exception, the address, and if pertinent, the RegionInfo for the Region we were trying
* to connect to and returns an IOException with the input exception as the cause. The new
* exception provides the stack trace of the place where the exception is thrown and some extra
* diagnostics information.
* <p/>
* Notice that we will try our best to keep the original exception type when creating a new
* exception, especially for the 'connection' exceptions, as it is used to determine whether this
* is a network issue or the remote side tells us clearly what is wrong, which is important
* deciding whether to retry. If it is not possible to create a new exception with the same type,
* for example, the {@code error} is not an {@link IOException}, an {@link IOException} will be
* created.
* @param addr target address
* @param error the relevant exception
* @return an exception to throw
* @see ClientExceptionsUtil#isConnectionException(Throwable)
*/
static IOException wrapException(Address addr, RegionInfo regionInfo, Throwable error) {
if (error instanceof ConnectException) {
// connection refused; include the host:port in the error
return (IOException) new ConnectException(
"Call to " + getCallTarget(addr, regionInfo) + " failed on connection exception: " + error)
.initCause(error);
} else if (error instanceof SocketTimeoutException) {
return (IOException) new SocketTimeoutException(
"Call to " + getCallTarget(addr, regionInfo) + " failed because " + error).initCause(error);
} else if (error instanceof ConnectionClosingException) {
return new ConnectionClosingException(
"Call to " + getCallTarget(addr, regionInfo) + " failed on local exception: " + error,
error);
} else if (error instanceof ServerTooBusyException) {
// we already have address in the exception message
return (IOException) error;
} else if (error instanceof DoNotRetryIOException) {
// try our best to keep the original exception type
try {
return (IOException) error.getClass().asSubclass(DoNotRetryIOException.class)
.getConstructor(String.class)
.newInstance(
"Call to " + getCallTarget(addr, regionInfo) + " failed on local exception: " + error)
.initCause(error);
} catch (InstantiationException | IllegalAccessException | IllegalArgumentException
| InvocationTargetException | NoSuchMethodException | SecurityException e) {
// just ignore, will just new a DoNotRetryIOException instead below
}
return new DoNotRetryIOException(
"Call to " + getCallTarget(addr, regionInfo) + " failed on local exception: " + error,
error);
} else if (error instanceof ConnectionClosedException) {
return new ConnectionClosedException(
"Call to " + getCallTarget(addr, regionInfo) + " failed on local exception: " + error,
error);
} else if (error instanceof CallTimeoutException) {
return new CallTimeoutException(
"Call to " + getCallTarget(addr, regionInfo) + " failed on local exception: " + error,
error);
} else if (error instanceof ClosedChannelException) {
// ClosedChannelException does not have a constructor which takes a String but it is a
// connection exception so we keep its original type
return (IOException) error;
} else if (error instanceof TimeoutException) {
// TimeoutException is not an IOException, let's convert it to TimeoutIOException.
return new TimeoutIOException(
"Call to " + getCallTarget(addr, regionInfo) + " failed on local exception: " + error,
error);
} else {
// try our best to keep the original exception type
if (error instanceof IOException) {
try {
return (IOException) error.getClass().asSubclass(IOException.class)
.getConstructor(String.class)
.newInstance(
"Call to " + getCallTarget(addr, regionInfo) + " failed on local exception: " + error)
.initCause(error);
} catch (InstantiationException | IllegalAccessException | IllegalArgumentException
| InvocationTargetException | NoSuchMethodException | SecurityException e) {
// just ignore, will just new an IOException instead below
}
}
return new HBaseIOException(
"Call to " + getCallTarget(addr, regionInfo) + " failed on local exception: " + error,
error);
}
} | 3.68 |
hadoop_ServerWebApp_setHomeDirForCurrentThread | /**
* Method for testing purposes.
*/
public static void setHomeDirForCurrentThread(String homeDir) {
HOME_DIR_TL.set(homeDir);
} | 3.68 |
morf_ConnectionResourcesBean_setDatabaseType | /**
* @see org.alfasoftware.morf.jdbc.AbstractConnectionResources#setDatabaseType(java.lang.String)
*/
@Override
public void setDatabaseType(String databaseType) {
this.databaseType = databaseType;
} | 3.68 |
graphhopper_AbstractAverageSpeedParser_isValidSpeed | /**
* @return <i>true</i> if the given speed is not {@link Double#NaN}
*/
protected static boolean isValidSpeed(double speed) {
return !Double.isNaN(speed);
} | 3.68 |
pulsar_ProducerConfigurationData_isEncryptionEnabled | /**
*
* Returns true if encryption keys are added.
*
*/
@JsonIgnore
public boolean isEncryptionEnabled() {
return (this.encryptionKeys != null) && !this.encryptionKeys.isEmpty() && (this.cryptoKeyReader != null);
} | 3.68 |
flink_Broker_getAndRemove | /** Blocking retrieval and removal of the object to share. */
public V getAndRemove(String key) {
try {
V objToShare = retrieveSharedQueue(key).take();
mediations.remove(key);
return objToShare;
} catch (InterruptedException e) {
throw new RuntimeException(e);
}
} | 3.68 |
framework_ContainerOrderedWrapper_removeFromOrderWrapper | /**
* Removes the specified Item from the wrapper's internal hierarchy
* structure.
* <p>
* Note : The Item is not removed from the underlying Container.
* </p>
*
* @param id
* the ID of the Item to be removed from the ordering.
*/
private void removeFromOrderWrapper(Object id) {
if (id != null) {
final Object pid = prev.get(id);
final Object nid = next.get(id);
if (first.equals(id)) {
first = nid;
}
if (last.equals(id)) {
last = pid;
}
if (nid != null) {
if (pid == null) {
prev.remove(nid);
} else {
prev.put(nid, pid);
}
}
if (pid != null) {
if (nid == null) {
next.remove(pid);
} else {
next.put(pid, nid);
}
}
next.remove(id);
prev.remove(id);
}
} | 3.68 |
hbase_HMobStore_getConfiguration | /**
* Gets current config.
*/
public Configuration getConfiguration() {
return this.conf;
} | 3.68 |
pulsar_TopicsBase_lookUpBrokerForTopic | // Look up topic owner for non-partitioned topic or single topic partition.
private CompletableFuture<Void> lookUpBrokerForTopic(TopicName partitionedTopicName,
boolean authoritative, List<String> redirectAddresses) {
CompletableFuture<Void> future = new CompletableFuture<>();
if (!pulsar().getBrokerService().getLookupRequestSemaphore().tryAcquire()) {
if (log.isDebugEnabled()) {
log.debug("Too many concurrent lookup request.");
}
future.completeExceptionally(new BrokerServiceException.TooManyRequestsException("Too many "
+ "concurrent lookup request"));
return future;
}
CompletableFuture<Optional<LookupResult>> lookupFuture = pulsar().getNamespaceService()
.getBrokerServiceUrlAsync(partitionedTopicName,
LookupOptions.builder().authoritative(authoritative).loadTopicsInBundle(false).build());
lookupFuture.thenAccept(optionalResult -> {
if (optionalResult == null || !optionalResult.isPresent()) {
if (log.isDebugEnabled()) {
log.debug("Fail to lookup topic for rest produce message request for topic {}.",
partitionedTopicName);
}
completeLookup(Pair.of(Collections.emptyList(), false), redirectAddresses, future);
return;
}
LookupResult result = optionalResult.get();
String httpUrl = result.getLookupData().getHttpUrl();
String httpUrlTls = result.getLookupData().getHttpUrlTls();
if ((StringUtils.isNotBlank(httpUrl) && httpUrl.equals(pulsar().getWebServiceAddress()))
|| (StringUtils.isNotBlank(httpUrlTls) && httpUrlTls.equals(pulsar().getWebServiceAddressTls()))) {
// Current broker owns the topic, add to owning topic.
if (log.isDebugEnabled()) {
log.debug("Complete topic look up for rest produce message request for topic {}, "
+ "current broker is owner broker: {}",
partitionedTopicName, result.getLookupData());
}
pulsar().getBrokerService().getOwningTopics().computeIfAbsent(partitionedTopicName
.getPartitionedTopicName(),
(key) -> ConcurrentOpenHashSet.<Integer>newBuilder().build())
.add(partitionedTopicName.getPartitionIndex());
completeLookup(Pair.of(Collections.emptyList(), false), redirectAddresses, future);
} else {
// Current broker doesn't own the topic or doesn't know who own the topic.
if (log.isDebugEnabled()) {
log.debug("Complete topic look up for rest produce message request for topic {}, "
+ "current broker is not owner broker: {}",
partitionedTopicName, result.getLookupData());
}
if (result.isRedirect()) {
// Redirect lookup.
completeLookup(Pair.of(Arrays.asList(httpUrl, httpUrlTls), false), redirectAddresses, future);
} else {
// Found owner for topic.
completeLookup(Pair.of(Arrays.asList(httpUrl, httpUrlTls), true), redirectAddresses, future);
}
}
}).exceptionally(exception -> {
if (log.isDebugEnabled()) {
log.debug("Fail to lookup broker with rest produce message request for topic {}: {}",
partitionedTopicName, exception.getMessage());
}
completeLookup(Pair.of(Collections.emptyList(), false), redirectAddresses, future);
return null;
});
return future;
} | 3.68 |
hbase_HttpServer_stop | /**
* stop the server
*/
public void stop() throws Exception {
MultiException exception = null;
for (ListenerInfo li : listeners) {
if (!li.isManaged) {
continue;
}
try {
li.listener.close();
} catch (Exception e) {
LOG.error("Error while stopping listener for webapp" + webAppContext.getDisplayName(), e);
exception = addMultiException(exception, e);
}
}
try {
// clear & stop webAppContext attributes to avoid memory leaks.
webAppContext.clearAttributes();
webAppContext.stop();
} catch (Exception e) {
LOG.error("Error while stopping web app context for webapp " + webAppContext.getDisplayName(),
e);
exception = addMultiException(exception, e);
}
try {
webServer.stop();
} catch (Exception e) {
LOG.error("Error while stopping web server for webapp " + webAppContext.getDisplayName(), e);
exception = addMultiException(exception, e);
}
if (exception != null) {
exception.ifExceptionThrow();
}
} | 3.68 |
framework_HasValue_getValue | /**
* Returns the new value that triggered this value change event.
*
* @return the new value
*/
public V getValue() {
return value;
} | 3.68 |
dubbo_StringUtils_repeat | /**
* <p>Returns padding using the specified delimiter repeated
* to a given length.</p>
*
* <pre>
* StringUtils.repeat('e', 0) = ""
* StringUtils.repeat('e', 3) = "eee"
* StringUtils.repeat('e', -2) = ""
* </pre>
*
* <p>Note: this method doesn't not support padding with
* <a href="http://www.unicode.org/glossary/#supplementary_character">Unicode Supplementary Characters</a>
* as they require a pair of {@code char}s to be represented.
* If you are needing to support full I18N of your applications
* consider using {@link #repeat(String, int)} instead.
* </p>
*
* @param ch character to repeat
* @param repeat number of times to repeat char, negative treated as zero
* @return String with repeated character
* @see #repeat(String, int)
*/
public static String repeat(final char ch, final int repeat) {
final char[] buf = new char[repeat];
for (int i = repeat - 1; i >= 0; i--) {
buf[i] = ch;
}
return new String(buf);
} | 3.68 |
flink_BlobClient_sendGetHeader | /**
* Constructs and writes the header data for a GET operation to the given output stream.
*
* @param outputStream the output stream to write the header data to
* @param jobId ID of the job this blob belongs to (or <tt>null</tt> if job-unrelated)
* @param blobKey blob key associated with the requested file
* @throws IOException thrown if an I/O error occurs while writing the header data to the output
* stream
*/
private static void sendGetHeader(
OutputStream outputStream, @Nullable JobID jobId, BlobKey blobKey) throws IOException {
checkNotNull(blobKey);
checkArgument(
jobId != null || blobKey instanceof TransientBlobKey,
"permanent BLOBs must be job-related");
// Signal type of operation
outputStream.write(GET_OPERATION);
// Send job ID and key
if (jobId == null) {
outputStream.write(JOB_UNRELATED_CONTENT);
} else {
outputStream.write(JOB_RELATED_CONTENT);
outputStream.write(jobId.getBytes());
}
blobKey.writeToOutputStream(outputStream);
} | 3.68 |
hadoop_CacheDirectiveStats_hasExpired | /**
* @return Whether this directive has expired.
*/
public boolean hasExpired() {
return hasExpired;
} | 3.68 |
flink_InFlightRequestTracker_deregisterRequest | /** Deregisters an in-flight request. */
public void deregisterRequest() {
phaser.arriveAndDeregister();
} | 3.68 |
flink_PartitionRequestQueue_addCreditOrResumeConsumption | /**
* Adds unannounced credits from the consumer or resumes data consumption after an exactly-once
* checkpoint and enqueues the corresponding reader for this consumer (if not enqueued yet).
*
* @param receiverId The input channel id to identify the consumer.
* @param operation The operation to be performed (add credit or resume data consumption).
*/
void addCreditOrResumeConsumption(
InputChannelID receiverId, Consumer<NetworkSequenceViewReader> operation)
throws Exception {
if (fatalError) {
return;
}
NetworkSequenceViewReader reader = obtainReader(receiverId);
operation.accept(reader);
enqueueAvailableReader(reader);
} | 3.68 |
pulsar_LeastResourceUsageWithWeight_getMaxResourceUsageWithWeight | // A broker's max resource usage with weight using its historical load and short-term load data with weight.
private double getMaxResourceUsageWithWeight(final String broker, final BrokerData brokerData,
final ServiceConfiguration conf) {
final double overloadThreshold = conf.getLoadBalancerBrokerOverloadedThresholdPercentage() / 100.0;
final double maxUsageWithWeight =
updateAndGetMaxResourceUsageWithWeight(broker, brokerData, conf);
if (maxUsageWithWeight > overloadThreshold) {
final LocalBrokerData localData = brokerData.getLocalData();
log.warn(
"Broker {} is overloaded, max resource usage with weight percentage: {}%, "
+ "CPU: {}%, MEMORY: {}%, DIRECT MEMORY: {}%, BANDWIDTH IN: {}%, "
+ "BANDWIDTH OUT: {}%, CPU weight: {}, MEMORY weight: {}, DIRECT MEMORY weight: {}, "
+ "BANDWIDTH IN weight: {}, BANDWIDTH OUT weight: {}",
broker, maxUsageWithWeight * 100,
localData.getCpu().percentUsage(), localData.getMemory().percentUsage(),
localData.getDirectMemory().percentUsage(), localData.getBandwidthIn().percentUsage(),
localData.getBandwidthOut().percentUsage(), conf.getLoadBalancerCPUResourceWeight(),
conf.getLoadBalancerMemoryResourceWeight(), conf.getLoadBalancerDirectMemoryResourceWeight(),
conf.getLoadBalancerBandwithInResourceWeight(),
conf.getLoadBalancerBandwithOutResourceWeight());
}
if (log.isDebugEnabled()) {
log.debug("Broker {} has max resource usage with weight percentage: {}%",
brokerData.getLocalData().getWebServiceUrl(), maxUsageWithWeight * 100);
}
return maxUsageWithWeight;
} | 3.68 |
framework_BeanPropertySet_getParent | /**
* Gets the parent property definition.
*
* @return the property definition for the parent
*/
public PropertyDefinition<T, ?> getParent() {
return parent;
} | 3.68 |
framework_ComboBoxElement_selectByText | /**
* Selects the first option in the ComboBox which matches the given text.
*
* @param text
* the text of the option to select
*/
public void selectByText(String text) {
if (isReadOnly()) {
throw new ReadOnlyException();
}
if (!isTextInputAllowed()) {
selectByTextFromPopup(text);
return;
}
clear();
sendKeys(text);
selectSuggestion(text);
} | 3.68 |
AreaShop_RegionFeature_shutdownFeature | /**
* Destroy the feature and deregister everything.
*/
public void shutdownFeature() {
HandlerList.unregisterAll(this);
shutdown();
} | 3.68 |
hbase_ZKMainServer_runCmdLine | /**
* Run the command-line args passed. Calls System.exit when done.
* @throws IOException in case of a network failure
* @throws InterruptedException if the ZooKeeper client closes
* @throws CliException if the ZooKeeper exception happens in cli command
*/
void runCmdLine() throws IOException, InterruptedException, CliException {
processCmd(this.cl);
System.exit(0);
} | 3.68 |
flink_HiveParserBaseSemanticAnalyzer_getVariablesSetForFilter | /**
* traverse the given node to find all correlated variables, the main logic is from {@link
* HiveFilter#getVariablesSet()}.
*/
public static Set<CorrelationId> getVariablesSetForFilter(RexNode rexNode) {
Set<CorrelationId> correlationVariables = new HashSet<>();
if (rexNode instanceof RexSubQuery) {
RexSubQuery rexSubQuery = (RexSubQuery) rexNode;
// we expect correlated variables in Filter only for now.
// also check case where operator has 0 inputs .e.g TableScan
if (rexSubQuery.rel.getInputs().isEmpty()) {
return correlationVariables;
}
RelNode input = rexSubQuery.rel.getInput(0);
while (input != null
&& !(input instanceof LogicalFilter)
&& input.getInputs().size() >= 1) {
// we don't expect corr vars within UNION for now
if (input.getInputs().size() > 1) {
if (input instanceof LogicalJoin) {
correlationVariables.addAll(
findCorrelatedVar(((LogicalJoin) input).getCondition()));
}
// todo: throw Unsupported exception when the input isn't LogicalJoin and
// contains correlate variables in FLINK-28317
return correlationVariables;
}
input = input.getInput(0);
}
if (input instanceof LogicalFilter) {
correlationVariables.addAll(
findCorrelatedVar(((LogicalFilter) input).getCondition()));
}
return correlationVariables;
}
// AND, NOT etc
if (rexNode instanceof RexCall) {
int numOperands = ((RexCall) rexNode).getOperands().size();
for (int i = 0; i < numOperands; i++) {
RexNode op = ((RexCall) rexNode).getOperands().get(i);
correlationVariables.addAll(getVariablesSetForFilter(op));
}
}
return correlationVariables;
} | 3.68 |
flink_CliUtils_createFile | /** Create the file as well as the parent directory. */
public static boolean createFile(final Path filePath) {
try {
final Path parent = filePath.getParent();
if (parent == null) {
return false;
}
if (Files.notExists(parent)) {
Files.createDirectories(parent);
}
if (Files.notExists(filePath)) {
Files.createFile(filePath);
}
return true;
} catch (final Exception e) {
return false;
}
} | 3.68 |
hbase_SnapshotDescriptionUtils_isWithinDefaultWorkingDir | /**
* Determines if the given workingDir is a subdirectory of the default working snapshot directory
* @param workingDir a directory to check
* @param conf configuration for the HBase cluster
* @return true if the given workingDir is a subdirectory of the default working directory for
* snapshots, false otherwise
* @throws IOException if we can't get the root dir
*/
public static boolean isWithinDefaultWorkingDir(final Path workingDir, Configuration conf)
throws IOException {
Path defaultWorkingDir = getDefaultWorkingSnapshotDir(CommonFSUtils.getRootDir(conf));
return workingDir.equals(defaultWorkingDir) || isSubDirectoryOf(workingDir, defaultWorkingDir);
} | 3.68 |
hadoop_SaslInputStream_markSupported | /**
* Tests if this input stream supports the <code>mark</code> and
* <code>reset</code> methods, which it does not.
*
* @return <code>false</code>, since this class does not support the
* <code>mark</code> and <code>reset</code> methods.
*/
@Override
public boolean markSupported() {
return false;
} | 3.68 |
flink_MailboxProcessor_suspend | /** Suspend the running of the loop which was started by {@link #runMailboxLoop()}}. */
public void suspend() {
sendPoisonMail(() -> suspended = true);
} | 3.68 |
flink_HashPartitionIterator_advanceAndRead | /* jump to the next partition and continue reading from that */
private BT advanceAndRead() throws IOException {
if (!partitions.hasNext()) {
return null;
}
currentPartition = partitions.next();
currentPartition.setReadPosition(0);
try {
return serializer.deserialize(currentPartition);
} catch (EOFException e) {
return advanceAndRead();
}
} | 3.68 |
hbase_SegmentFactory_createMutableSegment | // create mutable segment
public MutableSegment createMutableSegment(final Configuration conf, CellComparator comparator,
MemStoreSizing memstoreSizing) {
MemStoreLAB memStoreLAB = MemStoreLAB.newInstance(conf);
return generateMutableSegment(conf, comparator, memStoreLAB, memstoreSizing);
} | 3.68 |
hbase_MasterObserver_preRegionOffline | /**
* Called prior to marking a given region as offline.
* @param ctx the environment to interact with the framework and master
*/
default void preRegionOffline(final ObserverContext<MasterCoprocessorEnvironment> ctx,
final RegionInfo regionInfo) throws IOException {
} | 3.68 |
hudi_TypedProperties_fromMap | /**
* This method is introduced to get rid of the scala compile error:
* <pre>
* <code>
* ambiguous reference to overloaded definition,
* both method putAll in class Properties of type (x$1: java.util.Map[_, _])Unit
* and method putAll in class Hashtable of type (x$1: java.util.Map[_ <: Object, _ <: Object])Unit
* match argument types (java.util.HashMap[Nothing,Nothing])
* properties.putAll(new java.util.HashMap())
* </code>
* </pre>
*
* @param items The new items to put
*/
public static TypedProperties fromMap(Map<?, ?> items) {
TypedProperties props = new TypedProperties();
props.putAll(items);
return props;
} | 3.68 |
framework_VaadinSession_setState | /**
* Sets the lifecycle state of this session. The allowed transitions are
* OPEN to CLOSING and CLOSING to CLOSED.
*
* @since 7.2
* @param state
* the new state
*/
protected void setState(State state) {
assert hasLock();
assert this.state.isValidChange(state) : "Invalid session state change "
+ this.state + "->" + state;
this.state = state;
} | 3.68 |
hadoop_AsyncDataService_shutdown | /**
* Gracefully shut down the ThreadPool. Will wait for all data tasks to
* finish.
*/
synchronized void shutdown() {
if (executor == null) {
LOG.warn("AsyncDataService has already shut down.");
} else {
LOG.info("Shutting down all async data service threads...");
executor.shutdown();
// clear the executor so that calling execute again will fail.
executor = null;
LOG.info("All async data service threads have been shut down");
}
} | 3.68 |
hbase_MasterObserver_postGetNamespaceDescriptor | /**
* Called after a getNamespaceDescriptor request has been processed.
* @param ctx the environment to interact with the framework and master
* @param ns the NamespaceDescriptor
*/
default void postGetNamespaceDescriptor(ObserverContext<MasterCoprocessorEnvironment> ctx,
NamespaceDescriptor ns) throws IOException {
} | 3.68 |
AreaShop_GeneralRegion_saveNow | /**
* Save this region to disk now, using this method could slow down the plugin, normally saveRequired() should be used.
* @return true if the region is saved successfully, otherwise false
*/
public boolean saveNow() {
if(isDeleted()) {
return false;
}
saveRequired = false;
File file = new File(plugin.getFileManager().getRegionFolder() + File.separator + getName().toLowerCase() + ".yml");
try {
config.save(file);
return true;
} catch(IOException e) {
return false;
}
} | 3.68 |
MagicPlugin_BaseSpell_getPlayerBlock | /**
* Get the block the player is standing on.
*
* @return The Block the player is standing on
*/
@Nullable
public Block getPlayerBlock() {
Location location = getLocation();
if (location == null) return null;
if (!CompatibilityLib.getCompatibilityUtils().isChunkLoaded(location)) return null;
return location.getBlock().getRelative(BlockFace.DOWN);
} | 3.68 |
flink_Tuple17_equals | /**
* Deep equality for tuples by calling equals() on the tuple members.
*
* @param o the object checked for equality
* @return true if this is equal to o.
*/
@Override
public boolean equals(Object o) {
if (this == o) {
return true;
}
if (!(o instanceof Tuple17)) {
return false;
}
@SuppressWarnings("rawtypes")
Tuple17 tuple = (Tuple17) o;
if (f0 != null ? !f0.equals(tuple.f0) : tuple.f0 != null) {
return false;
}
if (f1 != null ? !f1.equals(tuple.f1) : tuple.f1 != null) {
return false;
}
if (f2 != null ? !f2.equals(tuple.f2) : tuple.f2 != null) {
return false;
}
if (f3 != null ? !f3.equals(tuple.f3) : tuple.f3 != null) {
return false;
}
if (f4 != null ? !f4.equals(tuple.f4) : tuple.f4 != null) {
return false;
}
if (f5 != null ? !f5.equals(tuple.f5) : tuple.f5 != null) {
return false;
}
if (f6 != null ? !f6.equals(tuple.f6) : tuple.f6 != null) {
return false;
}
if (f7 != null ? !f7.equals(tuple.f7) : tuple.f7 != null) {
return false;
}
if (f8 != null ? !f8.equals(tuple.f8) : tuple.f8 != null) {
return false;
}
if (f9 != null ? !f9.equals(tuple.f9) : tuple.f9 != null) {
return false;
}
if (f10 != null ? !f10.equals(tuple.f10) : tuple.f10 != null) {
return false;
}
if (f11 != null ? !f11.equals(tuple.f11) : tuple.f11 != null) {
return false;
}
if (f12 != null ? !f12.equals(tuple.f12) : tuple.f12 != null) {
return false;
}
if (f13 != null ? !f13.equals(tuple.f13) : tuple.f13 != null) {
return false;
}
if (f14 != null ? !f14.equals(tuple.f14) : tuple.f14 != null) {
return false;
}
if (f15 != null ? !f15.equals(tuple.f15) : tuple.f15 != null) {
return false;
}
if (f16 != null ? !f16.equals(tuple.f16) : tuple.f16 != null) {
return false;
}
return true;
} | 3.68 |
hudi_TimeWait_waitFor | /**
* Wait for an interval time.
*/
public void waitFor() {
try {
if (waitingTime > timeout) {
throw new HoodieException("Timeout(" + waitingTime + "ms) while waiting for " + action);
}
TimeUnit.MILLISECONDS.sleep(interval);
waitingTime += interval;
} catch (InterruptedException e) {
throw new HoodieException("Error while waiting for " + action, e);
}
} | 3.68 |
hadoop_BaseService_postInit | /**
* Post initializes the service. This method is called by the
* {@link Server} after all services of the server have been initialized.
* <p>
* This method does a NOP.
*
* @throws ServiceException thrown if the service could not be
* post-initialized.
*/
@Override
public void postInit() throws ServiceException {
} | 3.68 |
hadoop_AbfsOutputStreamStatisticsImpl_queueShrunk | /**
* {@inheritDoc}
*
* Records the number of times AbfsOutputStream try to remove the completed
* write operations from the beginning of write operation task queue.
*/
@Override
public void queueShrunk() {
ioStatisticsStore.incrementCounter(StreamStatisticNames.QUEUE_SHRUNK_OPS);
} | 3.68 |
flink_ModifyKindSet_toChangelogMode | /** Returns the default {@link ChangelogMode} from this {@link ModifyKindSet}. */
public ChangelogMode toChangelogMode() {
ChangelogMode.Builder builder = ChangelogMode.newBuilder();
if (this.contains(ModifyKind.INSERT)) {
builder.addContainedKind(RowKind.INSERT);
}
if (this.contains(ModifyKind.UPDATE)) {
builder.addContainedKind(RowKind.UPDATE_BEFORE);
builder.addContainedKind(RowKind.UPDATE_AFTER);
}
if (this.contains(ModifyKind.DELETE)) {
builder.addContainedKind(RowKind.DELETE);
}
return builder.build();
} | 3.68 |
framework_VCalendarPanel_buildCalendarHeader | /**
* Builds the top buttons and current month and year header.
*
* @param needsMonth
* Should the month buttons be visible?
*/
private void buildCalendarHeader(boolean needsMonth) {
getRowFormatter().addStyleName(0,
parent.getStylePrimaryName() + "-calendarpanel-header");
if (prevMonth == null && needsMonth) {
prevMonth = new VEventButton();
prevMonth.setHTML("‹");
prevMonth.setStyleName("v-button-prevmonth");
prevMonth.setTabIndex(-1);
nextMonth = new VEventButton();
nextMonth.setHTML("›");
nextMonth.setStyleName("v-button-nextmonth");
nextMonth.setTabIndex(-1);
setWidget(0, 3, nextMonth);
setWidget(0, 1, prevMonth);
} else if (prevMonth != null && !needsMonth) {
// Remove month traverse buttons
remove(prevMonth);
remove(nextMonth);
prevMonth = null;
nextMonth = null;
}
if (prevYear == null) {
prevYear = new VEventButton();
prevYear.setHTML("«");
prevYear.setStyleName("v-button-prevyear");
prevYear.setTabIndex(-1);
nextYear = new VEventButton();
nextYear.setHTML("»");
nextYear.setStyleName("v-button-nextyear");
nextYear.setTabIndex(-1);
setWidget(0, 0, prevYear);
setWidget(0, 4, nextYear);
}
updateControlButtonRangeStyles(needsMonth);
final String monthName = needsMonth
? getDateTimeService().getMonth(displayedMonth.getMonth())
: "";
final int year = displayedMonth.getYear() + 1900;
getFlexCellFormatter().setStyleName(0, 2,
parent.getStylePrimaryName() + "-calendarpanel-month");
getFlexCellFormatter().setStyleName(0, 0,
parent.getStylePrimaryName() + "-calendarpanel-prevyear");
getFlexCellFormatter().setStyleName(0, 4,
parent.getStylePrimaryName() + "-calendarpanel-nextyear");
getFlexCellFormatter().setStyleName(0, 3,
parent.getStylePrimaryName() + "-calendarpanel-nextmonth");
getFlexCellFormatter().setStyleName(0, 1,
parent.getStylePrimaryName() + "-calendarpanel-prevmonth");
setHTML(0, 2,
"<span class=\"" + parent.getStylePrimaryName()
+ "-calendarpanel-month\">" + monthName + " " + year
+ "</span>");
} | 3.68 |
framework_ConnectorTracker_isHierarchyComplete | /**
* Checks that the connector hierarchy is consistent.
*
* @return <code>true</code> if the hierarchy is consistent,
* <code>false</code> otherwise
* @since 8.1
*/
private boolean isHierarchyComplete() {
boolean noErrors = true;
Set<ClientConnector> danglingConnectors = new HashSet<>(
connectorIdToConnector.values());
LinkedList<ClientConnector> stack = new LinkedList<>();
stack.add(uI);
while (!stack.isEmpty()) {
ClientConnector connector = stack.pop();
danglingConnectors.remove(connector);
Iterable<? extends ClientConnector> children = AbstractClientConnector
.getAllChildrenIterable(connector);
for (ClientConnector child : children) {
stack.add(child);
if (!connector.equals(child.getParent())) {
noErrors = false;
getLogger().log(Level.WARNING,
"{0} claims that {1} is its child, but the child claims {2} is its parent.",
new Object[] { getConnectorString(connector),
getConnectorString(child),
getConnectorString(child.getParent()) });
}
}
}
for (ClientConnector dangling : danglingConnectors) {
noErrors = false;
getLogger().log(Level.WARNING,
"{0} claims that {1} is its parent, but the parent does not acknowledge the parenthood.",
new Object[] { getConnectorString(dangling),
getConnectorString(dangling.getParent()) });
}
return noErrors;
} | 3.68 |
framework_VMenuBar_onFocus | /*
* (non-Javadoc)
*
* @see
* com.google.gwt.event.dom.client.FocusHandler#onFocus(com.google.gwt.event
* .dom.client.FocusEvent)
*/
@Override
public void onFocus(FocusEvent event) {
if (!ignoreFocus && getSelected() == null) {
selectFirstItem();
}
} | 3.68 |
hbase_SingleColumnValueExcludeFilter_hasFilterRow | // We cleaned result row in FilterRow to be consistent with scanning process.
@Override
public boolean hasFilterRow() {
return true;
} | 3.68 |
hbase_CompactionProgress_getTotalCompactedSize | /** Returns the total data size processed by the currently running compaction, in bytes */
public long getTotalCompactedSize() {
return totalCompactedSize;
} | 3.68 |
flink_VertexThreadInfoTrackerBuilder_setStatsRefreshInterval | /**
* Sets {@code statsRefreshInterval}.
*
* @param statsRefreshInterval Time interval after which the available thread info stats are
* deprecated and need to be refreshed.
* @return Builder.
*/
public VertexThreadInfoTrackerBuilder setStatsRefreshInterval(Duration statsRefreshInterval) {
this.statsRefreshInterval = statsRefreshInterval;
return this;
} | 3.68 |
hadoop_LogAggregationWebUtils_verifyAndGetContainerId | /**
* Verify and parse containerId.
* @param html the html
* @param containerIdStr the containerId string
* @return the {@link ContainerId}
*/
public static ContainerId verifyAndGetContainerId(Block html,
String containerIdStr) {
if (containerIdStr == null || containerIdStr.isEmpty()) {
html.h1().__("Cannot get container logs without a ContainerId").__();
return null;
}
ContainerId containerId = null;
try {
containerId = ContainerId.fromString(containerIdStr);
} catch (IllegalArgumentException e) {
html.h1()
.__("Cannot get container logs for invalid containerId: "
+ containerIdStr).__();
return null;
}
return containerId;
} | 3.68 |
hbase_ProcedureEvent_wakeEvents | /**
* Wakes up all the given events and puts the procedures waiting on them back into
* ProcedureScheduler queues.
*/
public static void wakeEvents(AbstractProcedureScheduler scheduler, ProcedureEvent... events) {
scheduler.wakeEvents(events);
} | 3.68 |
pulsar_BacklogQuotaManager_advanceSlowestSystemCursor | /**
* Advances the slowest cursor if that is a system cursor.
*
* @param persistentTopic
* @return true if the slowest cursor is a system cursor
*/
private boolean advanceSlowestSystemCursor(PersistentTopic persistentTopic) {
ManagedLedgerImpl mLedger = (ManagedLedgerImpl) persistentTopic.getManagedLedger();
ManagedCursor slowestConsumer = mLedger.getSlowestConsumer();
if (slowestConsumer == null) {
return false;
}
if (PersistentTopic.isDedupCursorName(slowestConsumer.getName())) {
persistentTopic.getMessageDeduplication().takeSnapshot();
return true;
}
// We may need to check other system cursors here : replicator, compaction
return false;
} | 3.68 |
hudi_FlinkHoodieBackedTableMetadataWriter_validateTimelineBeforeSchedulingCompaction | /**
* Validates the timeline for both main and metadata tables to ensure compaction on MDT can be scheduled.
*/
@Override
protected boolean validateTimelineBeforeSchedulingCompaction(Option<String> inFlightInstantTimestamp, String latestDeltaCommitTimeInMetadataTable) {
// Allows compaction of the metadata table to run regardless of inflight instants
return true;
} | 3.68 |
flink_MessageParameters_isResolved | /**
* Returns whether all mandatory parameters have been resolved.
*
* @return true, if all mandatory parameters have been resolved, false otherwise
*/
public final boolean isResolved() {
return getPathParameters().stream()
.filter(MessageParameter::isMandatory)
.allMatch(MessageParameter::isResolved)
&& getQueryParameters().stream()
.filter(MessageParameter::isMandatory)
.allMatch(MessageParameter::isResolved);
} | 3.68 |
hbase_KeyValue_getValueOffset | /** Returns the value offset */
@Override
public int getValueOffset() {
int voffset = getKeyOffset() + getKeyLength();
return voffset;
} | 3.68 |
graphhopper_VectorTile_getKeysList | /**
* <pre>
* Dictionary encoding for keys
* </pre>
*
* <code>repeated string keys = 3;</code>
*/
public com.google.protobuf.ProtocolStringList
getKeysList() {
return keys_.getUnmodifiableView();
} | 3.68 |
hadoop_TaggedInputSplit_getMapperClass | /**
* Retrieves the Mapper class to use for this split.
*
* @return The Mapper class to use
*/
public Class<? extends Mapper> getMapperClass() {
return mapperClass;
} | 3.68 |
flink_HiveParserSemanticAnalyzer_genAllExprNodeDesc | /**
* Generates all of the expression node descriptors for the expression and children of it passed
* in the arguments. This function uses the row resolver and the metadata information that are
* passed as arguments to resolve the column names to internal names.
*/
@SuppressWarnings("nls")
public Map<HiveParserASTNode, ExprNodeDesc> genAllExprNodeDesc(
HiveParserASTNode expr, HiveParserRowResolver input, HiveParserTypeCheckCtx tcCtx)
throws SemanticException {
// Create the walker and the rules dispatcher.
tcCtx.setUnparseTranslator(unparseTranslator);
Map<HiveParserASTNode, ExprNodeDesc> nodeOutputs =
HiveParserTypeCheckProcFactory.genExprNode(expr, tcCtx);
ExprNodeDesc desc = nodeOutputs.get(expr);
if (desc == null) {
String errMsg = tcCtx.getError();
if (errMsg == null) {
errMsg = "Error in parsing ";
}
throw new SemanticException(errMsg);
}
if (desc instanceof HiveParserExprNodeColumnListDesc) {
throw new SemanticException("TOK_ALLCOLREF is not supported in current context");
}
if (!unparseTranslator.isEnabled()) {
// Not creating a view, so no need to track view expansions.
return nodeOutputs;
}
Map<ExprNodeDesc, String> nodeToText = new HashMap<>();
List<HiveParserASTNode> fieldDescList = new ArrayList<>();
for (Map.Entry<HiveParserASTNode, ExprNodeDesc> entry : nodeOutputs.entrySet()) {
if (!(entry.getValue() instanceof ExprNodeColumnDesc)) {
// we need to translate the ExprNodeFieldDesc too, e.g., identifiers in
// struct<>.
if (entry.getValue() instanceof ExprNodeFieldDesc) {
fieldDescList.add(entry.getKey());
}
continue;
}
HiveParserASTNode node = entry.getKey();
ExprNodeColumnDesc columnDesc = (ExprNodeColumnDesc) entry.getValue();
if ((columnDesc.getTabAlias() == null) || (columnDesc.getTabAlias().length() == 0)) {
// These aren't real column refs; instead, they are special
// internal expressions used in the representation of aggregation.
continue;
}
String[] tmp = input.reverseLookup(columnDesc.getColumn());
// in subquery case, tmp may be from outside.
if (tmp[0] != null
&& columnDesc.getTabAlias() != null
&& !tmp[0].equals(columnDesc.getTabAlias())
&& tcCtx.getOuterRR() != null) {
tmp = tcCtx.getOuterRR().reverseLookup(columnDesc.getColumn());
}
StringBuilder replacementText = new StringBuilder();
replacementText.append(HiveUtils.unparseIdentifier(tmp[0], conf));
replacementText.append(".");
replacementText.append(HiveUtils.unparseIdentifier(tmp[1], conf));
nodeToText.put(columnDesc, replacementText.toString());
unparseTranslator.addTranslation(node, replacementText.toString());
}
for (HiveParserASTNode node : fieldDescList) {
Map<HiveParserASTNode, String> map = translateFieldDesc(node);
for (Entry<HiveParserASTNode, String> entry : map.entrySet()) {
unparseTranslator.addTranslation(entry.getKey(), entry.getValue());
}
}
return nodeOutputs;
} | 3.68 |
framework_VDateField_sendBufferedValuesWithDelay | /**
* Puts the {@link #bufferedDateString} and {@link #bufferedResolutions}
* changes into the rpc queue and clears their values.
* <p>
* Note: The value will not be sent to the server immediately. It will be
* sent when a non {@link com.vaadin.shared.annotations.Delayed} annotated
* rpc is triggered.
* </p>
*
* @since 8.9
*/
public void sendBufferedValuesWithDelay() {
rpc.updateValueWithDelay(bufferedDateString,
bufferedResolutions.entrySet().stream().collect(
Collectors.toMap(entry -> entry.getKey().name(),
entry -> entry.getValue())));
bufferedDateString = null;
bufferedResolutions.clear();
} | 3.68 |
hbase_MetricsRegionAggregateSourceImpl_getMetrics | /**
* Yes this is a get function that doesn't return anything. Thanks Hadoop for breaking all
* expectations of java programmers. Instead of returning anything Hadoop metrics expects
* getMetrics to push the metrics into the collector.
* @param collector the collector
* @param all get all the metrics regardless of when they last changed.
*/
@Override
public void getMetrics(MetricsCollector collector, boolean all) {
MetricsRecordBuilder mrb = collector.addRecord(metricsName);
if (regionSources != null) {
for (MetricsRegionSource regionMetricSource : regionSources) {
if (regionMetricSource instanceof MetricsRegionSourceImpl) {
((MetricsRegionSourceImpl) regionMetricSource).snapshot(mrb, all);
}
}
metricsRegistry.snapshot(mrb, all);
}
} | 3.68 |
rocketmq-connect_MetricUtils_getHistogramValue | /**
* get histogram value
*
* @param name
* @param histogram
* @return
*/
public static Double getHistogramValue(MetricName name, Histogram histogram) {
if (name.getType().equals(Stat.NoneType.none.name())) {
throw new IllegalArgumentException("Histogram type configuration error");
}
Stat.HistogramType histogramType = Stat.HistogramType.valueOf(name.getType());
switch (histogramType) {
case Min:
return Double.valueOf(histogram.getSnapshot().getMin());
case Avg:
return Double.valueOf(histogram.getSnapshot().getMean());
case Max:
return Double.valueOf(histogram.getSnapshot().getMax());
case Percentile_75th:
return histogram.getSnapshot().get75thPercentile();
case Percentile_95th:
return histogram.getSnapshot().get95thPercentile();
case Percentile_98th:
return histogram.getSnapshot().get98thPercentile();
case Percentile_99th:
return histogram.getSnapshot().get99thPercentile();
case Percentile_999th:
return histogram.getSnapshot().get999thPercentile();
default:
return 0.0;
}
} | 3.68 |
framework_VAbsoluteLayout_getWidgetCaption | /**
* Get the caption for a widget.
*
* @param child
* The child widget to get the caption of
*/
public VCaption getWidgetCaption(Widget child) {
AbsoluteWrapper wrapper = getChildWrapper(child);
if (wrapper != null) {
return wrapper.getCaption();
}
return null;
} | 3.68 |
hbase_Superusers_isSuperUser | /**
* Check if the current user is a super user
* @return true if current user is a super user, false otherwise.
* @param user to check
*/
public static boolean isSuperUser(String user) {
return superUsers.contains(user) || superGroups.contains(user);
} | 3.68 |
framework_Table_isColumnCollapsingAllowed | /**
* Checks if column collapsing is allowed.
*
* @return true if columns can be collapsed; false otherwise.
*/
public boolean isColumnCollapsingAllowed() {
return columnCollapsingAllowed;
} | 3.68 |
framework_VaadinServletService_getContextRootRelativePath | /**
* Gets a relative path you can use to refer to the context root.
*
* @param request
* the request for which the location should be determined
* @return A relative path to the context root. Never ends with a slash (/).
*
* @since 8.0.3
*/
public static String getContextRootRelativePath(VaadinRequest request) {
VaadinServletRequest servletRequest = (VaadinServletRequest) request;
// Generate location from the request by finding how many "../" should
// be added to the servlet path before we get to the context root
String servletPath = servletRequest.getServletPath();
if (servletPath == null) {
// Not allowed by the spec but servers are servers...
servletPath = "";
}
String pathInfo = servletRequest.getPathInfo();
if (pathInfo != null && !pathInfo.isEmpty()) {
servletPath += pathInfo;
}
return getCancelingRelativePath(servletPath);
} | 3.68 |
dubbo_DubboBootstrapStopedEvent_getDubboBootstrap | /**
* Get {@link org.apache.dubbo.config.bootstrap.DubboBootstrap} instance
*
* @return non-null
*/
public DubboBootstrap getDubboBootstrap() {
return (DubboBootstrap) super.getSource();
} | 3.68 |
hadoop_Time_monotonicNow | /**
* Current time from some arbitrary time base in the past, counting in
* milliseconds, and not affected by settimeofday or similar system clock
* changes. This is appropriate to use when computing how much longer to
* wait for an interval to expire.
* This function can return a negative value and it must be handled correctly
* by callers. See the documentation of System#nanoTime for caveats.
* @return a monotonic clock that counts in milliseconds.
*/
public static long monotonicNow() {
return System.nanoTime() / NANOSECONDS_PER_MILLISECOND;
} | 3.68 |
hadoop_LoggedLocation_compareStrings | // I'll treat this as an atomic object type
private void compareStrings(List<NodeName> c1, List<NodeName> c2,
TreePath loc, String eltname)
throws DeepInequalityException {
if (c1 == null && c2 == null) {
return;
}
TreePath recursePath = new TreePath(loc, eltname);
if (c1 == null || c2 == null || (c1.size() != c2.size())) {
throw new DeepInequalityException(eltname + " miscompared", recursePath);
}
for (NodeName n1 : c1) {
boolean found = false;
for (NodeName n2 : c2) {
if (n1.getValue().equals(n2.getValue())) {
found = true;
break;
}
}
if (!found) {
throw new DeepInequalityException(eltname
+ " miscompared [" + n1.getValue() +"]", recursePath);
}
}
} | 3.68 |
hadoop_AbfsOutputStream_createBlockIfNeeded | /**
* Demand create a destination block.
*
* @return the active block; null if there isn't one.
* @throws IOException on any failure to create
*/
private synchronized DataBlocks.DataBlock createBlockIfNeeded()
throws IOException {
if (activeBlock == null) {
blockCount++;
activeBlock = blockFactory
.create(blockCount, this.blockSize, outputStreamStatistics);
}
return activeBlock;
} | 3.68 |
querydsl_OrderSpecifier_getNullHandling | /**
* Get the null handling
*
* @return null handling
*/
public NullHandling getNullHandling() {
return nullHandling;
} | 3.68 |
hadoop_CSQueueStore_updateGetMapForShortName | /**
* This method will update the getMap for the short name provided, depending
* on how many queues are present with the same shortname.
* @param shortName The short name of the queue to be updated
*/
private void updateGetMapForShortName(String shortName) {
//we protect the root, since root can be both a full path and a short name
//we simply deny adding root as a shortname to the getMap.
if (shortName.equals(CapacitySchedulerConfiguration.ROOT)) {
return;
}
//getting all queues with the same short name
Set<String> fullNames = this.shortNameToLongNames.get(shortName);
//if there is only one queue we add it to the getMap
if (fullNames != null && fullNames.size() == 1) {
getMap.put(shortName,
fullNameQueues.get(fullNames.iterator().next()));
} else {
//in all other cases using only shortName cannot disambigously identifiy
//a queue
getMap.remove(shortName);
}
} | 3.68 |
hadoop_IOStatisticsBinding_entryToString | /**
* Convert entry values to the string format used in logging.
*
* @param <E> type of values.
* @param name statistic name
* @param value stat value
* @return formatted string
*/
public static <E> String entryToString(
final String name, final E value) {
return String.format(
ENTRY_PATTERN,
name,
value);
} | 3.68 |
framework_PropertysetItem_fireItemPropertySetChange | /**
* Sends a Property set change event to all interested listeners.
*/
private void fireItemPropertySetChange() {
if (propertySetChangeListeners != null) {
final Item.PropertySetChangeEvent event = new PropertysetItem.PropertySetChangeEvent(
this);
for (Object l : propertySetChangeListeners.toArray()) {
((Item.PropertySetChangeListener) l)
.itemPropertySetChange(event);
}
}
} | 3.68 |
flink_ProcessAllWindowFunction_clear | /**
* Deletes any state in the {@code Context} when the Window expires (the watermark passes its
* {@code maxTimestamp} + {@code allowedLateness}).
*
* @param context The context to which the window is being evaluated
* @throws Exception The function may throw exceptions to fail the program and trigger recovery.
*/
public void clear(Context context) throws Exception {} | 3.68 |
hbase_ByteBuff_readLong | /**
* Read long which was written to fitInBytes bytes and increment position.
* @param fitInBytes In how many bytes given long is stored.
* @return The value of parsed long.
*/
public static long readLong(ByteBuff in, final int fitInBytes) {
long tmpLength = 0;
for (int i = 0; i < fitInBytes; ++i) {
tmpLength |= (in.get() & 0xffl) << (8l * i);
}
return tmpLength;
} | 3.68 |
framework_VaadinSession_getConfiguration | /**
* Gets the configuration for this session.
*
* @return the deployment configuration
*/
public DeploymentConfiguration getConfiguration() {
assert hasLock();
return configuration;
} | 3.68 |
hadoop_PlacementConstraints_cardinality | /**
* Similar to {@link #cardinality(String, int, int, String...)}, but let you
* attach a namespace to the given allocation tags.
*
* @param scope the scope of the constraint
* @param namespace the namespace of the allocation tags
* @param minCardinality determines the minimum number of allocations within
* the scope
* @param maxCardinality determines the maximum number of allocations within
* the scope
* @param allocationTags allocation tags
* @return the resulting placement constraint
*/
public static AbstractConstraint cardinality(String scope, String namespace,
int minCardinality, int maxCardinality, String... allocationTags) {
return new SingleConstraint(scope, minCardinality, maxCardinality,
PlacementTargets.allocationTagWithNamespace(namespace, allocationTags));
} | 3.68 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.