name
stringlengths 12
178
| code_snippet
stringlengths 8
36.5k
| score
float64 3.26
3.68
|
---|---|---|
flink_ZooKeeperStateHandleStore_releaseAndTryRemove | /**
* Releases the lock for the given state node and tries to remove the state node if it is no
* longer locked.
*
* @param pathInZooKeeper Path of state handle to remove
* @return {@code true} if the state handle could be deleted; {@code false}, if the handle is
* locked by another connection.
* @throws Exception If the ZooKeeper operation or discarding the state handle fails
*/
@Override
public boolean releaseAndTryRemove(String pathInZooKeeper) throws Exception {
checkNotNull(pathInZooKeeper, "Path in ZooKeeper");
final String path = normalizePath(pathInZooKeeper);
RetrievableStateHandle<T> stateHandle = null;
try {
stateHandle = get(path, false);
} catch (Exception e) {
LOG.warn("Could not retrieve the state handle from node {}.", path, e);
}
release(pathInZooKeeper);
try {
deleteIfExists(getRootLockPath(path));
} catch (KeeperException.NotEmptyException ignored) {
LOG.debug(
"Could not delete znode {} because it is still locked.", getRootLockPath(path));
return false;
}
if (stateHandle != null) {
stateHandle.discardState();
}
// we can now commit the deletion by removing the parent node
deleteIfExists(path);
return true;
} | 3.68 |
framework_VaadinSession_setCurrent | /**
* Sets the thread local for the current session. This method is used by the
* framework to set the current session whenever a new request is processed
* and it is cleared when the request has been processed.
* <p>
* The application developer can also use this method to define the current
* session outside the normal request handling and treads started from
* request handling threads, e.g. when initiating custom background threads.
* <p>
* The session is stored using a weak reference to avoid leaking memory in
* case it is not explicitly cleared.
*
* @param session
* the session to set as current
*
* @see #getCurrent()
* @see ThreadLocal
*
* @since 7.0
*/
public static void setCurrent(VaadinSession session) {
CurrentInstance.set(VaadinSession.class, session);
} | 3.68 |
hbase_HttpServer_getAttribute | /**
* Get the value in the webapp context.
* @param name The name of the attribute
* @return The value of the attribute
*/
public Object getAttribute(String name) {
return webAppContext.getAttribute(name);
} | 3.68 |
dubbo_Bytes_short2bytes | /**
* to byte array.
*
* @param v value.
* @param b byte array.
*/
public static void short2bytes(short v, byte[] b, int off) {
b[off + 1] = (byte) v;
b[off + 0] = (byte) (v >>> 8);
} | 3.68 |
morf_AbstractSqlDialectTest_expectedSqlForMathOperations7 | /**
* @return expected SQL for math operation 7
*/
protected String expectedSqlForMathOperations7() {
return "(a + b) / (c - d)";
} | 3.68 |
hbase_RegionState_convert | /**
* Convert a protobuf HBaseProtos.RegionState to a RegionState
* @return the RegionState
*/
public static RegionState convert(ClusterStatusProtos.RegionState proto) {
return new RegionState(ProtobufUtil.toRegionInfo(proto.getRegionInfo()),
State.convert(proto.getState()), proto.getStamp(), null);
} | 3.68 |
hadoop_RolloverSignerSecretProvider_init | /**
* Initialize the SignerSecretProvider. It initializes the current secret
* and starts the scheduler for the rollover to run at an interval of
* tokenValidity.
* @param config configuration properties
* @param servletContext servlet context
* @param tokenValidity The amount of time a token is valid for
* @throws Exception thrown if an error occurred
*/
@Override
public void init(Properties config, ServletContext servletContext,
long tokenValidity) throws Exception {
initSecrets(generateNewSecret(), null);
startScheduler(tokenValidity, tokenValidity);
} | 3.68 |
flink_CommonExecLegacySink_translateToTransformation | /**
* Translates {@link TableSink} into a {@link Transformation}.
*
* @param withChangeFlag Set to true to emit records with change flags.
* @return The {@link Transformation} that corresponds to the translated {@link TableSink}.
*/
@SuppressWarnings("unchecked")
private Transformation<T> translateToTransformation(
PlannerBase planner, ExecNodeConfig config, boolean withChangeFlag) {
// if no change flags are requested, verify table is an insert-only (append-only) table.
if (!withChangeFlag && needRetraction) {
throw new TableException(
"Table is not an append-only table. "
+ "Use the toRetractStream() in order to handle add and retract messages.");
}
final ExecEdge inputEdge = getInputEdges().get(0);
final Transformation<RowData> inputTransform =
(Transformation<RowData>) inputEdge.translateToPlan(planner);
final RowType inputRowType = (RowType) inputEdge.getOutputType();
final RowType convertedInputRowType = checkAndConvertInputTypeIfNeeded(inputRowType);
final DataType resultDataType = tableSink.getConsumedDataType();
if (CodeGenUtils.isInternalClass(resultDataType)) {
return (Transformation<T>) inputTransform;
} else {
final int rowtimeIndex = getRowtimeIndex(inputRowType);
final DataType physicalOutputType =
TableSinkUtils.inferSinkPhysicalDataType(
resultDataType, convertedInputRowType, withChangeFlag);
final TypeInformation<T> outputTypeInfo =
SinkCodeGenerator.deriveSinkOutputTypeInfo(
tableSink, physicalOutputType, withChangeFlag);
final CodeGenOperatorFactory<T> converterOperator =
SinkCodeGenerator.generateRowConverterOperator(
new CodeGeneratorContext(
config, planner.getFlinkContext().getClassLoader()),
convertedInputRowType,
tableSink,
physicalOutputType,
withChangeFlag,
"SinkConversion",
rowtimeIndex);
final String description =
"SinkConversion To " + resultDataType.getConversionClass().getSimpleName();
return ExecNodeUtil.createOneInputTransformation(
inputTransform,
createFormattedTransformationName(description, "SinkConversion", config),
createFormattedTransformationDescription(description, config),
converterOperator,
outputTypeInfo,
inputTransform.getParallelism(),
false);
}
} | 3.68 |
hbase_TerminatedWrapper_terminatorPosition | /**
* Return the position at which {@code term} begins within {@code src}, or {@code -1} if
* {@code term} is not found.
*/
protected int terminatorPosition(PositionedByteRange src) {
byte[] a = src.getBytes();
final int offset = src.getOffset();
int i;
SKIP: for (i = src.getPosition(); i < src.getLength(); i++) {
if (a[offset + i] != term[0]) {
continue;
}
int j;
for (j = 1; j < term.length && offset + j < src.getLength(); j++) {
if (a[offset + i + j] != term[j]) {
continue SKIP;
}
}
if (j == term.length) {
return i; // success
}
}
return -1;
} | 3.68 |
framework_ApplicationConnection_isActive | /**
* Checks if there is some work to be done on the client side
*
* @return true if the client has some work to be done, false otherwise
*/
private boolean isActive() {
return !getMessageHandler().isInitialUidlHandled() || isWorkPending()
|| getMessageSender().hasActiveRequest()
|| isExecutingDeferredCommands();
} | 3.68 |
morf_HumanReadableStatementProducer_analyseTable | /** @see org.alfasoftware.morf.upgrade.SchemaEditor#analyseTable(String) **/
@Override
public void analyseTable(String tableName) {
consumer.schemaChange(HumanReadableStatementHelper.generateAnalyseTableFromString(tableName));
} | 3.68 |
hbase_AbstractStateMachineRegionProcedure_setRegion | /**
* Used when deserializing. Otherwise, DON'T TOUCH IT!
*/
protected void setRegion(final RegionInfo hri) {
this.hri = hri;
} | 3.68 |
hbase_MetricsSnapshot_addSnapshot | /**
* Record a single instance of a snapshot
* @param time time that the snapshot took
*/
public void addSnapshot(long time) {
source.updateSnapshotTime(time);
} | 3.68 |
flink_ChecksumUtils_convertChecksumToString | /**
* Converts an int crc32 checksum to the string format used by Google storage, which is the
* base64 string for the int in big-endian format.
*
* @param checksum The int checksum
* @return The string checksum
*/
public static String convertChecksumToString(int checksum) {
ByteBuffer buffer = ByteBuffer.allocate(Integer.BYTES);
buffer.order(ByteOrder.BIG_ENDIAN);
buffer.putInt(checksum);
return BASE64_ENCODER.encodeToString(buffer.array());
} | 3.68 |
flink_BatchTask_initBroadcastInputsSerializers | /** Creates all the serializers and iterators for the broadcast inputs. */
protected void initBroadcastInputsSerializers(int numBroadcastInputs) {
this.broadcastInputSerializers = new TypeSerializerFactory<?>[numBroadcastInputs];
ClassLoader userCodeClassLoader = getUserCodeClassLoader();
for (int i = 0; i < numBroadcastInputs; i++) {
// ---------------- create the serializer first ---------------------
final TypeSerializerFactory<?> serializerFactory =
this.config.getBroadcastInputSerializer(i, userCodeClassLoader);
this.broadcastInputSerializers[i] = serializerFactory;
}
} | 3.68 |
hadoop_AuxServiceConfiguration_properties | /**
* A blob of key-value pairs of common service properties.
**/
public AuxServiceConfiguration properties(Map<String, String> props) {
this.properties = props;
return this;
} | 3.68 |
flink_BinaryStringDataUtil_toTimestamp | /** Used by {@code CAST(x as TIMESTAMP_LTZ)}. */
public static TimestampData toTimestamp(
BinaryStringData input, int precision, TimeZone timeZone) throws DateTimeException {
return DateTimeUtils.parseTimestampData(input.toString(), precision, timeZone);
} | 3.68 |
framework_ScrollbarBundle_addVisibilityHandler | /**
* Adds handler for the scrollbar handle visibility.
*
* @param handler
* the {@link VisibilityHandler} to add
* @return {@link HandlerRegistration} used to remove the handler
*/
public HandlerRegistration addVisibilityHandler(
final VisibilityHandler handler) {
return getHandlerManager().addHandler(VisibilityChangeEvent.TYPE,
handler);
} | 3.68 |
framework_FileUploadHandler_doHandleSimpleMultipartFileUpload | /**
* Method used to stream content from a multipart request (either from
* servlet or portlet request) to given StreamVariable.
* <p>
* This method takes care of locking the session as needed and does not
* assume the caller has locked the session. This allows the session to be
* locked only when needed and not when handling the upload data.
* </p>
*
* @param session
* The session containing the stream variable
* @param request
* The upload request
* @param response
* The upload response
* @param streamVariable
* The destination stream variable
* @param variableName
* The name of the destination stream variable
* @param owner
* The owner of the stream variable
* @param boundary
* The mime boundary used in the upload request
* @throws IOException
* If there is a problem reading the request or writing the
* response
*/
protected void doHandleSimpleMultipartFileUpload(VaadinSession session,
VaadinRequest request, VaadinResponse response,
StreamVariable streamVariable, String variableName,
ClientConnector owner, String boundary) throws IOException {
// multipart parsing, supports only one file for request, but that is
// fine for our current terminal
final InputStream inputStream = request.getInputStream();
long contentLength = getContentLength(request);
boolean atStart = false;
boolean firstFileFieldFound = false;
String rawfilename = "unknown";
String rawMimeType = "application/octet-stream";
/*
* Read the stream until the actual file starts (empty line). Read
* filename and content type from multipart headers.
*/
while (!atStart) {
String readLine = readLine(inputStream);
contentLength -= (readLine.getBytes(UTF_8).length + CRLF.length());
if (readLine.startsWith("Content-Disposition:")
&& readLine.indexOf("filename=") > 0) {
rawfilename = readLine.replaceAll(".*filename=", "");
char quote = rawfilename.charAt(0);
rawfilename = rawfilename.substring(1);
rawfilename = rawfilename.substring(0,
rawfilename.indexOf(quote));
firstFileFieldFound = true;
} else if (firstFileFieldFound && readLine.isEmpty()) {
atStart = true;
} else if (readLine.startsWith("Content-Type")) {
rawMimeType = readLine.split(": ")[1];
}
}
contentLength -= (boundary.length() + CRLF.length()
+ 2 * DASHDASH.length() + CRLF.length());
/*
* Reads bytes from the underlying stream. Compares the read bytes to
* the boundary string and returns -1 if met.
*
* The matching happens so that if the read byte equals to the first
* char of boundary string, the stream goes to "buffering mode". In
* buffering mode bytes are read until the character does not match the
* corresponding from boundary string or the full boundary string is
* found.
*
* Note, if this is someday needed elsewhere, don't shoot yourself to
* foot and split to a top level helper class.
*/
InputStream simpleMultiPartReader = new SimpleMultiPartInputStream(
inputStream, boundary);
/*
* Should report only the filename even if the browser sends the path
*/
final String filename = removePath(rawfilename);
final String mimeType = rawMimeType;
try {
handleFileUploadValidationAndData(session, simpleMultiPartReader,
streamVariable, filename, mimeType, contentLength, owner,
variableName);
} catch (UploadException e) {
session.getCommunicationManager()
.handleConnectorRelatedException(owner, e);
}
sendUploadResponse(request, response);
} | 3.68 |
hbase_RegionStateStore_hasMergeRegions | /**
* Check whether the given {@code region} has any 'info:merge*' columns.
*/
public boolean hasMergeRegions(RegionInfo region) throws IOException {
return CatalogFamilyFormat.hasMergeRegions(getRegionCatalogResult(region).rawCells());
} | 3.68 |
flink_SqlNodeConverter_supportedSqlKinds | /**
* Returns the {@link SqlKind SqlKinds} of {@link SqlNode SqlNodes} that the {@link
* SqlNodeConverter} supports to convert.
*
* <p>If a {@link SqlNodeConverter} returns a non-empty SqlKinds, The conversion framework will
* find the corresponding converter by matching the SqlKind of SqlNode instead of the class of
* SqlNode
*
* @see SqlQueryConverter
*/
default Optional<EnumSet<SqlKind>> supportedSqlKinds() {
return Optional.empty();
} | 3.68 |
flink_PrioritizedDeque_getAndRemove | /**
* Find first element matching the {@link Predicate}, remove it from the {@link
* PrioritizedDeque} and return it.
*
* @return removed element
*/
public T getAndRemove(Predicate<T> preCondition) {
Iterator<T> iterator = deque.iterator();
for (int i = 0; iterator.hasNext(); i++) {
T next = iterator.next();
if (preCondition.test(next)) {
if (i < numPriorityElements) {
numPriorityElements--;
}
iterator.remove();
return next;
}
}
throw new NoSuchElementException();
} | 3.68 |
hbase_QuotaTableUtil_extractQuotaSnapshot | /**
* Extracts the {@link SpaceViolationPolicy} and {@link TableName} from the provided
* {@link Result} and adds them to the given {@link Map}. If the result does not contain the
* expected information or the serialized policy in the value is invalid, this method will throw
* an {@link IllegalArgumentException}.
* @param result A row from the quota table.
* @param snapshots A map of snapshots to add the result of this method into.
*/
public static void extractQuotaSnapshot(Result result,
Map<TableName, SpaceQuotaSnapshot> snapshots) {
byte[] row = Objects.requireNonNull(result).getRow();
if (row == null || row.length == 0) {
throw new IllegalArgumentException("Provided result had a null row");
}
final TableName targetTableName = getTableFromRowKey(row);
Cell c = result.getColumnLatestCell(QUOTA_FAMILY_USAGE, QUOTA_QUALIFIER_POLICY);
if (c == null) {
throw new IllegalArgumentException("Result did not contain the expected column "
+ QUOTA_POLICY_COLUMN + ", " + result.toString());
}
ByteString buffer =
UnsafeByteOperations.unsafeWrap(c.getValueArray(), c.getValueOffset(), c.getValueLength());
try {
QuotaProtos.SpaceQuotaSnapshot snapshot = QuotaProtos.SpaceQuotaSnapshot.parseFrom(buffer);
snapshots.put(targetTableName, SpaceQuotaSnapshot.toSpaceQuotaSnapshot(snapshot));
} catch (InvalidProtocolBufferException e) {
throw new IllegalArgumentException(
"Result did not contain a valid SpaceQuota protocol buffer message", e);
}
} | 3.68 |
hadoop_TaskPool_stopRevertsOnFailure | /**
* Stop trying to revert if one operation fails.
* @return the builder
*/
public Builder<I> stopRevertsOnFailure() {
this.stopRevertsOnFailure = true;
return this;
} | 3.68 |
flink_ResolveCallByArgumentsRule_adaptArguments | /** Adapts the arguments according to the properties of the {@link Result}. */
private List<ResolvedExpression> adaptArguments(
Result inferenceResult, List<ResolvedExpression> resolvedArgs) {
return IntStream.range(0, resolvedArgs.size())
.mapToObj(
pos -> {
final ResolvedExpression argument = resolvedArgs.get(pos);
final DataType argumentType = argument.getOutputDataType();
final DataType expectedType =
inferenceResult.getExpectedArgumentTypes().get(pos);
if (!supportsAvoidingCast(
argumentType.getLogicalType(),
expectedType.getLogicalType())) {
return resolutionContext
.postResolutionFactory()
.cast(argument, expectedType);
}
return argument;
})
.collect(Collectors.toList());
} | 3.68 |
flink_StringUtils_readString | /**
* Reads a non-null String from the given input.
*
* @param in The input to read from
* @return The deserialized String
* @throws IOException Thrown, if the reading or the deserialization fails.
*/
public static String readString(DataInputView in) throws IOException {
return StringValue.readString(in);
} | 3.68 |
graphhopper_VectorTile_removeFeatures | /**
* <pre>
* The actual features in this tile.
* </pre>
*
* <code>repeated .vector_tile.Tile.Feature features = 2;</code>
*/
public Builder removeFeatures(int index) {
if (featuresBuilder_ == null) {
ensureFeaturesIsMutable();
features_.remove(index);
onChanged();
} else {
featuresBuilder_.remove(index);
}
return this;
} | 3.68 |
flink_FileRegionWriteReadUtils_readHsInternalRegionFromFile | /**
* Read {@link InternalRegion} from {@link FileChannel}.
*
* <p>Note that this type of region's length may be variable because it contains an array to
* indicate each buffer's release state.
*
* @param channel the channel to read.
* @param headerBuffer the buffer to read {@link InternalRegion}'s header.
* @param fileOffset the file offset to start read.
* @return the {@link InternalRegion} that read from this channel.
*/
public static InternalRegion readHsInternalRegionFromFile(
FileChannel channel, ByteBuffer headerBuffer, long fileOffset) throws IOException {
headerBuffer.clear();
BufferReaderWriterUtil.readByteBufferFully(channel, headerBuffer, fileOffset);
headerBuffer.flip();
int firstBufferIndex = headerBuffer.getInt();
int numBuffers = headerBuffer.getInt();
long firstBufferOffset = headerBuffer.getLong();
ByteBuffer payloadBuffer = allocateAndConfigureBuffer(numBuffers);
BufferReaderWriterUtil.readByteBufferFully(
channel, payloadBuffer, fileOffset + InternalRegion.HEADER_SIZE);
boolean[] released = new boolean[numBuffers];
payloadBuffer.flip();
for (int i = 0; i < numBuffers; i++) {
released[i] = payloadBuffer.get() != 0;
}
return new InternalRegion(firstBufferIndex, firstBufferOffset, numBuffers, released);
} | 3.68 |
framework_Profiler_leave | /**
* Leaves a named block. There should always be a matching invocation of
* {@link #enter(String)} when entering the block. Calls to this method will
* be removed by the compiler unless profiling is enabled.
*
* @param name
* the name of the left block
*/
public static void leave(String name) {
if (isEnabled()) {
logGwtEvent(name, "end");
}
} | 3.68 |
framework_AbstractComponent_detach | /*
* Detach the component from application. Don't add a JavaDoc comment here,
* we use the default documentation from implemented interface.
*/
@Override
public void detach() {
super.detach();
if (actionManager != null) {
// Remove any existing viewer. UI cast is just to make the
// compiler happy
actionManager.setViewer((UI) null);
}
} | 3.68 |
morf_AbstractSelectStatement_copyOnWriteOrMutate | /**
* Either uses {@link #shallowCopy()} and mutates the result, returning it,
* or mutates the statement directly, depending on
* {@link AliasedField#immutableDslEnabled()}.
*
* TODO for removal along with mutable behaviour.
*
* @param transform A transform which modifies the shallow copy builder.
* @param mutator Code which applies the local changes instead.
* @param <U> The builder type.
* @return The result (which may be {@code this}).
*/
@SuppressWarnings({ "unchecked" })
protected <U extends AbstractSelectStatementBuilder<T, ?>> T copyOnWriteOrMutate(Function<U, U> transform, Runnable mutator) {
if (AliasedField.immutableDslEnabled()) {
return transform.apply((U) shallowCopy()).build();
} else {
mutator.run();
return castToChild(this);
}
} | 3.68 |
hadoop_AMRMProxyTokenSecretManager_recover | /**
* Recover secretManager from state store. Called after serviceInit before
* serviceStart.
*
* @param state the state to recover from
*/
public void recover(RecoveredAMRMProxyState state) {
if (state != null) {
// recover the current master key
MasterKey currentKey = state.getCurrentMasterKey();
if (currentKey != null) {
this.currentMasterKey = new MasterKeyData(currentKey,
createSecretKey(currentKey.getBytes().array()));
} else {
LOG.warn("No current master key recovered from NM StateStore"
+ " for AMRMProxyTokenSecretManager");
}
// recover the next master key if not null
MasterKey nextKey = state.getNextMasterKey();
if (nextKey != null) {
this.nextMasterKey = new MasterKeyData(nextKey,
createSecretKey(nextKey.getBytes().array()));
this.timer.schedule(new NextKeyActivator(), this.activationDelay);
}
}
} | 3.68 |
hadoop_LoadManifestsStage_add | /**
* Add all statistics; synchronized.
* @param manifest manifest to add.
*/
public synchronized void add(TaskManifest manifest) {
manifestCount.incrementAndGet();
iostatistics.aggregate(manifest.getIOStatistics());
fileCount.addAndGet(manifest.getFilesToCommit().size());
directoryCount.addAndGet(manifest.getDestDirectories().size());
totalFileSize.addAndGet(manifest.getTotalFileSize());
taskIDs.add(manifest.getTaskID());
taskAttemptIDs.add(manifest.getTaskAttemptID());
} | 3.68 |
morf_MySqlMetaDataProvider_setAdditionalColumnMetadata | /**
* MySQL can (and must) provide the auto-increment start value from the column remarks.
*
* @see org.alfasoftware.morf.jdbc.DatabaseMetaDataProvider#setAdditionalColumnMetadata(RealName, ColumnBuilder, ResultSet)
*/
@Override
protected ColumnBuilder setAdditionalColumnMetadata(RealName tableName, ColumnBuilder columnBuilder, ResultSet columnMetaData) throws SQLException {
columnBuilder = super.setAdditionalColumnMetadata(tableName, columnBuilder, columnMetaData);
if (columnBuilder.isAutoNumbered()) {
int startValue = getAutoIncrementStartValue(columnMetaData.getString(COLUMN_REMARKS));
return columnBuilder.autoNumbered(startValue == -1 ? 1 : startValue);
} else {
return columnBuilder;
}
} | 3.68 |
pulsar_OneStageAuthenticationState_authenticateAsync | /**
* Warning: this method is not intended to be called concurrently.
*/
@Override
public CompletableFuture<AuthData> authenticateAsync(AuthData authData) {
if (authRole != null) {
// Authentication is already completed
return CompletableFuture.completedFuture(null);
}
this.authenticationDataSource = new AuthenticationDataCommand(
new String(authData.getBytes(), UTF_8), remoteAddress, sslSession);
return provider
.authenticateAsync(authenticationDataSource)
.thenApply(role -> {
this.authRole = role;
// Single stage authentication always returns null
return null;
});
} | 3.68 |
hadoop_OBSCommonUtils_maybeDeleteBeginningSlash | /**
* Delete obs key started '/'.
*
* @param key object key
* @return new key
*/
static String maybeDeleteBeginningSlash(final String key) {
return !StringUtils.isEmpty(key) && key.startsWith("/") ? key.substring(
1) : key;
} | 3.68 |
morf_TestingDataSourceModule_configure | /**
* @see com.google.inject.Module#configure(Binder)
*/
@Override
public void configure() {
bind(ConnectionResources.class).toInstance(new ConnectionResourcesBean(Resources.getResource("morf.properties")));
} | 3.68 |
hadoop_RouterFedBalance_setMap | /**
* Max number of concurrent maps to use for copy.
* @param value the map number of the distcp.
*/
public Builder setMap(int value) {
this.map = value;
return this;
} | 3.68 |
hadoop_ContentCounts_getFileCount | // Get the number of files.
public long getFileCount() {
return contents.get(Content.FILE);
} | 3.68 |
hadoop_TextView_echo | /**
* Print strings escaping html.
* @param args the strings to print
*/
public void echo(Object... args) {
PrintWriter out = writer();
for (Object s : args) {
String escapedString = StringEscapeUtils.escapeEcmaScript(
StringEscapeUtils.escapeHtml4(s.toString()));
out.print(escapedString);
}
} | 3.68 |
hbase_MetricsConnection_updateTableMetric | /** Report table rpc context to metrics system. */
private void updateTableMetric(String methodName, TableName tableName, CallStats stats,
Throwable e) {
if (tableMetricsEnabled) {
if (methodName != null) {
String table = tableName != null && StringUtils.isNotEmpty(tableName.getNameAsString())
? tableName.getNameAsString()
: "unknown";
String metricKey = methodName + "_" + table;
// update table rpc context to metrics system,
// includes rpc call duration, rpc call request/response size(bytes).
updateRpcGeneric(metricKey, stats);
if (e != null) {
// rpc failure call counter with table name.
getMetric(FAILURE_CNT_BASE + metricKey, rpcCounters, counterFactory).inc();
}
}
}
} | 3.68 |
pulsar_AdminResource_domain | /**
* Get the domain of the topic (whether it's persistent or non-persistent).
*/
protected String domain() {
if (uri.getPath().startsWith("persistent/")) {
return "persistent";
} else if (uri.getPath().startsWith("non-persistent/")) {
return "non-persistent";
} else {
throw new RestException(Status.INTERNAL_SERVER_ERROR, "domain() invoked from wrong resource");
}
} | 3.68 |
framework_FieldGroup_configureField | /**
* Configures a field with the settings set for this FieldBinder.
* <p>
* By default this updates the buffered, read only and enabled state of the
* field. Also adds validators when applicable. Fields with read only data
* source are always configured as read only.
*
* @param field
* The field to update
*/
protected void configureField(Field<?> field) {
field.setBuffered(isBuffered());
field.setEnabled(isEnabled());
if (field.getPropertyDataSource().isReadOnly()) {
field.setReadOnly(true);
} else {
field.setReadOnly(isReadOnly());
}
} | 3.68 |
flink_AsyncWaitOperator_tryOnce | /** Increments number of attempts and fire the attempt. */
private void tryOnce(RetryableResultHandlerDelegator resultHandlerDelegator) throws Exception {
// increment current attempt number
resultHandlerDelegator.currentAttempts++;
// fire a new attempt
userFunction.asyncInvoke(
resultHandlerDelegator.resultHandler.inputRecord.getValue(),
resultHandlerDelegator);
} | 3.68 |
morf_AbstractSqlDialectTest_testSelectWithUnionStatements | /**
* Tests the generation of a select statement with multiple union statements.
*/
@Test
public void testSelectWithUnionStatements() {
SelectStatement stmt = new SelectStatement(new FieldReference(STRING_FIELD))
.from(new TableReference(OTHER_TABLE))
.union(new SelectStatement(new FieldReference(STRING_FIELD)).from(new TableReference(TEST_TABLE)))
.unionAll(new SelectStatement(new FieldReference(STRING_FIELD)).from(new TableReference(ALTERNATE_TABLE)))
.orderBy(new FieldReference(STRING_FIELD));
String result = testDialect.convertStatementToSQL(stmt);
assertEquals("Select script should match expected", expectedSelectWithUnion(), result);
} | 3.68 |
framework_Upload_getSource | /**
* Uploads where the event occurred.
*
* @return the Source of the event.
*/
@Override
public Upload getSource() {
return (Upload) super.getSource();
} | 3.68 |
hudi_OptionsResolver_isNonBlockingConcurrencyControl | /**
* Returns whether this is non-blocking concurrency control.
*/
public static boolean isNonBlockingConcurrencyControl(Configuration config) {
return WriteConcurrencyMode.isNonBlockingConcurrencyControl(config.getString(HoodieWriteConfig.WRITE_CONCURRENCY_MODE.key(), HoodieWriteConfig.WRITE_CONCURRENCY_MODE.defaultValue()));
} | 3.68 |
hbase_ReplicationSourceWALReader_isReaderRunning | /** Returns whether the reader thread is running */
public boolean isReaderRunning() {
return isReaderRunning && !isInterrupted();
} | 3.68 |
flink_SlotSharingGroup_setTaskOffHeapMemoryMB | /** Set the task off-heap memory for this SlotSharingGroup in MB. */
public Builder setTaskOffHeapMemoryMB(int taskOffHeapMemoryMB) {
this.taskOffHeapMemory = MemorySize.ofMebiBytes(taskOffHeapMemoryMB);
return this;
} | 3.68 |
flink_BinaryHashTable_getNumWriteBehindBuffers | /**
* Determines the number of buffers to be used for asynchronous write behind. It is currently
* computed as the logarithm of the number of buffers to the base 4, rounded up, minus 2. The
* upper limit for the number of write behind buffers is however set to six.
*
* @param numBuffers The number of available buffers.
* @return The number
*/
@VisibleForTesting
static int getNumWriteBehindBuffers(int numBuffers) {
int numIOBufs = (int) (Math.log(numBuffers) / Math.log(4) - 1.5);
return numIOBufs > 6 ? 6 : numIOBufs;
} | 3.68 |
hbase_HFileBlock_getBlockForCaching | /**
* Creates a new HFileBlock. Checksums have already been validated, so the byte buffer passed
* into the constructor of this newly created block does not have checksum data even though the
* header minor version is MINOR_VERSION_WITH_CHECKSUM. This is indicated by setting a 0 value
* in bytesPerChecksum. This method copies the on-disk or uncompressed data to build the
* HFileBlock which is used only while writing blocks and caching.
* <p>
* TODO: Should there be an option where a cache can ask that hbase preserve block checksums for
* checking after a block comes out of the cache? Otehrwise, cache is responsible for blocks
* being wholesome (ECC memory or if file-backed, it does checksumming).
*/
HFileBlock getBlockForCaching(CacheConfig cacheConf) {
HFileContext newContext = new HFileContextBuilder().withBlockSize(fileContext.getBlocksize())
.withBytesPerCheckSum(0).withChecksumType(ChecksumType.NULL) // no checksums in cached data
.withCompression(fileContext.getCompression())
.withDataBlockEncoding(fileContext.getDataBlockEncoding())
.withHBaseCheckSum(fileContext.isUseHBaseChecksum())
.withCompressTags(fileContext.isCompressTags())
.withIncludesMvcc(fileContext.isIncludesMvcc())
.withIncludesTags(fileContext.isIncludesTags())
.withColumnFamily(fileContext.getColumnFamily()).withTableName(fileContext.getTableName())
.build();
// Build the HFileBlock.
HFileBlockBuilder builder = new HFileBlockBuilder();
ByteBuff buff;
if (cacheConf.shouldCacheCompressed(blockType.getCategory())) {
buff = cloneOnDiskBufferWithHeader();
} else {
buff = cloneUncompressedBufferWithHeader();
}
return builder.withBlockType(blockType)
.withOnDiskSizeWithoutHeader(getOnDiskSizeWithoutHeader())
.withUncompressedSizeWithoutHeader(getUncompressedSizeWithoutHeader())
.withPrevBlockOffset(prevOffset).withByteBuff(buff).withFillHeader(FILL_HEADER)
.withOffset(startOffset).withNextBlockOnDiskSize(UNSET)
.withOnDiskDataSizeWithHeader(onDiskBlockBytesWithHeader.size() + onDiskChecksum.length)
.withHFileContext(newContext).withByteBuffAllocator(cacheConf.getByteBuffAllocator())
.withShared(!buff.hasArray()).build();
} | 3.68 |
hadoop_ContainerServiceRecordProcessor_createTXTInfo | /**
* Create a container TXT record descriptor.
* @param serviceRecord the service record.
* @throws Exception if the descriptor creation yields an issue.
*/
protected void createTXTInfo(ServiceRecord serviceRecord) throws Exception {
TXTContainerRecordDescriptor txtInfo =
new TXTContainerRecordDescriptor(getPath(), serviceRecord);
registerRecordDescriptor(Type.TXT, txtInfo);
} | 3.68 |
hudi_BaseConsistentHashingBucketClusteringPlanStrategy_isBucketClusteringSortEnabled | /**
* Whether generate regular sort clustering plans for buckets that are not involved in merge or split.
*
* @return true if generate regular sort clustering plans for buckets that are not involved in merge or split, false otherwise.
*/
protected boolean isBucketClusteringSortEnabled() {
return true;
} | 3.68 |
framework_VaadinServiceClassLoaderUtil_findDefaultClassLoader | /**
* Called by {@link VaadinService#setDefaultClassLoader()} to acquire
* appropriate class loader to load application's classes (e.g. UI). Calls
* should be guarded by try/catch block to catch SecurityException and log
* appropriate message. The code for this method is modeled after
* recommendations laid out by JEE 5 specification sections EE.6.2.4.7 and
* EE.8.2.5
*
* @return Instance of {@link ClassLoader} that should be used by this
* instance of {@link VaadinService}
* @throws SecurityException
* if current security policy doesn't allow acquiring current
* thread's context class loader
*/
public static ClassLoader findDefaultClassLoader()
throws SecurityException {
return AccessController.doPrivileged(
new VaadinServiceClassLoaderUtil.GetClassLoaderPrivilegedAction());
} | 3.68 |
flink_ExecNodeBase_inputsContainSingleton | /** Whether singleton distribution is required. */
protected boolean inputsContainSingleton() {
return getInputProperties().stream()
.anyMatch(
p ->
p.getRequiredDistribution().getType()
== InputProperty.DistributionType.SINGLETON);
} | 3.68 |
flink_DateTimeUtils_parseTimestampTz | /**
* Parse date time string to timestamp based on the given time zone string and format. Returns
* null if parsing failed.
*
* @param dateStr the date time string
* @param tzStr the time zone id string
*/
private static long parseTimestampTz(String dateStr, String tzStr) throws ParseException {
TimeZone tz = TIMEZONE_CACHE.get(tzStr);
return parseTimestampMillis(dateStr, DateTimeUtils.TIMESTAMP_FORMAT_STRING, tz);
} | 3.68 |
framework_DDEventHandleStrategy_updateDragImage | /**
* Updates drag image DOM element. This method updates drag image position
* and adds additional styles. Default implementation hides drag element to
* be able to get target element by the point (see
* {@link #getTargetElement(NativePreviewEvent, DDManagerMediator)}. Method
* {@link #restoreDragImage(String, DDManagerMediator, NativePreviewEvent)}
* is used later on to restore the drag element in its state before
* temporary update. Returns "display" CSS style property of the original
* drag image. This value will be passed to the
* {@link #restoreDragImage(String, DDManagerMediator, NativePreviewEvent)}
* method.
*
* @param event
* GWT event for active DnD operation
* @param mediator
* VDragAndDropManager data accessor
* @return "display" CSS style property of drag image element to restore it
* later on
*/
public String updateDragImage(NativePreviewEvent event,
DDManagerMediator mediator) {
VDragAndDropManager manager = mediator.getManager();
manager.updateDragImagePosition(event.getNativeEvent(),
manager.getDragElement());
String display = null;
if (manager.getDragElement() != null) {
// to detect the "real" target, hide dragelement temporary and
// use elementFromPoint
display = manager.getDragElement().getStyle().getDisplay();
manager.getDragElement().getStyle().setDisplay(Display.NONE);
}
return display;
} | 3.68 |
flink_RocksDBOptionsFactory_createWriteOptions | /**
* This method should set the additional options on top of the current options object. The
* current options object may contain pre-defined options based on flags that have been
* configured on the state backend.
*
* <p>It is important to set the options on the current object and return the result from the
* setter methods, otherwise the pre-defined options may get lost.
*
* @param currentOptions The options object with the pre-defined options.
* @param handlesToClose The collection to register newly created {@link
* org.rocksdb.RocksObject}s.
* @return The options object on which the additional options are set.
*/
default WriteOptions createWriteOptions(
WriteOptions currentOptions, Collection<AutoCloseable> handlesToClose) {
return currentOptions;
} | 3.68 |
morf_ConnectionResourcesBean_setDatabaseName | /**
* @see org.alfasoftware.morf.jdbc.AbstractConnectionResources#setDatabaseName(java.lang.String)
*/
@Override
public void setDatabaseName(String databaseName) {
this.databaseName = databaseName;
} | 3.68 |
flink_RpcEndpoint_onStop | /**
* User overridable callback which is called from {@link #internalCallOnStop()}.
*
* <p>This method is called when the RpcEndpoint is being shut down. The method is guaranteed to
* be executed in the main thread context and can be used to clean up internal state.
*
* <p>IMPORTANT: This method should never be called directly by the user.
*
* @return Future which is completed once all post stop actions are completed. If an error
* occurs this future is completed exceptionally
*/
protected CompletableFuture<Void> onStop() {
return CompletableFuture.completedFuture(null);
} | 3.68 |
hadoop_AbstractS3ACommitter_preCommitJob | /**
* Subclass-specific pre-Job-commit actions.
* The staging committers all load the pending files to verify that
* they can be loaded.
* The Magic committer does not, because of the overhead of reading files
* from S3 makes it too expensive.
* @param commitContext commit context
* @param pending the pending operations
* @throws IOException any failure
*/
@VisibleForTesting
public void preCommitJob(CommitContext commitContext,
ActiveCommit pending) throws IOException {
} | 3.68 |
framework_DropTargetExtensionConnector_addDropListeners | /**
* Adds dragenter, dragover, dragleave and drop event listeners to the given
* DOM element.
*
* @param element
* DOM element to attach event listeners to.
*/
private void addDropListeners(Element element) {
EventTarget target = element.cast();
target.addEventListener(Event.DRAGENTER, dragEnterListener);
target.addEventListener(Event.DRAGOVER, dragOverListener);
target.addEventListener(Event.DRAGLEAVE, dragLeaveListener);
target.addEventListener(Event.DROP, dropListener);
} | 3.68 |
flink_HiveParserRowResolver_get | /**
* Gets the column Info to tab_alias.col_alias type of a column reference. I the tab_alias is
* not provided as can be the case with an non aliased column, this function looks up the column
* in all the table aliases in this row resolver and returns the match. It also throws an
* exception if the column is found in multiple table aliases. If no match is found a null
* values is returned. This allows us to interpret both select t.c1 type of references and
* select c1 kind of references. The later kind are what we call non aliased column references
* in the query.
*
* @param tabAlias The table alias to match (this is null if the column reference is non
* aliased)
* @param colAlias The column name that is being searched for
* @return ColumnInfo
* @throws SemanticException
*/
public ColumnInfo get(String tabAlias, String colAlias) throws SemanticException {
ColumnInfo ret = null;
if (!isExprResolver && isAmbiguousReference(tabAlias, colAlias)) {
String colNamePrefix = tabAlias != null ? tabAlias + "." : "";
String fullQualifiedName = colNamePrefix + colAlias;
throw new SemanticException("Ambiguous column reference: " + fullQualifiedName);
}
if (tabAlias != null) {
tabAlias = tabAlias.toLowerCase();
HashMap<String, ColumnInfo> fMap = rslvMap.get(tabAlias);
if (fMap == null) {
return null;
}
ret = fMap.get(colAlias);
} else {
boolean found = false;
String foundTbl = null;
for (Map.Entry<String, LinkedHashMap<String, ColumnInfo>> rslvEntry :
rslvMap.entrySet()) {
String rslvKey = rslvEntry.getKey();
LinkedHashMap<String, ColumnInfo> cmap = rslvEntry.getValue();
for (Map.Entry<String, ColumnInfo> cmapEnt : cmap.entrySet()) {
if (colAlias.equalsIgnoreCase(cmapEnt.getKey())) {
// We can have an unaliased and one aliased mapping to a Column.
if (found && foundTbl != null && rslvKey != null) {
throw new SemanticException(
"Column "
+ colAlias
+ " Found in more than One Tables/Subqueries");
}
found = true;
foundTbl = rslvKey == null ? foundTbl : rslvKey;
ret = cmapEnt.getValue();
}
}
}
}
return ret;
} | 3.68 |
hadoop_ClientRegistryBinder_lookupExternalRestAPI | /**
* Look up an external REST API
* @param user user which will be qualified as per {@link #qualifyUser(String)}
* @param serviceClass service class
* @param instance instance name
* @param api API
* @return the API, or an exception is raised.
* @throws IOException
*/
public String lookupExternalRestAPI(String user,
String serviceClass,
String instance,
String api)
throws IOException {
String qualified = qualifyUser(user);
String path = servicePath(qualified, serviceClass, instance);
String restAPI = resolveExternalRestAPI(api, path);
if (restAPI == null) {
throw new PathNotFoundException(path + " API " + api);
}
return restAPI;
} | 3.68 |
hadoop_DefaultStringifier_storeArray | /**
* Stores the array of items in the configuration with the given keyName.
*
* @param <K> the class of the item
* @param conf the configuration to use
* @param items the objects to be stored
* @param keyName the name of the key to use
* @throws IndexOutOfBoundsException if the items array is empty
* @throws IOException : forwards Exceptions from the underlying
* {@link Serialization} classes.
*/
public static <K> void storeArray(Configuration conf, K[] items,
String keyName) throws IOException {
if (items.length == 0) {
throw new IndexOutOfBoundsException();
}
DefaultStringifier<K> stringifier = new DefaultStringifier<K>(conf,
GenericsUtil.getClass(items[0]));
try {
StringBuilder builder = new StringBuilder();
for (K item : items) {
builder.append(stringifier.toString(item)).append(SEPARATOR);
}
conf.set(keyName, builder.toString());
}
finally {
stringifier.close();
}
} | 3.68 |
hadoop_BalanceJob_nextProcedure | /**
* Append a procedure to the tail.
*/
public Builder nextProcedure(T procedure) {
int size = procedures.size();
if (size > 0) {
procedures.get(size - 1).setNextProcedure(procedure.name());
}
procedure.setNextProcedure(NEXT_PROCEDURE_NONE);
procedures.add(procedure);
return this;
} | 3.68 |
flink_KeyedStream_upperBoundExclusive | /** Set the upper bound to be exclusive. */
@PublicEvolving
public IntervalJoined<IN1, IN2, KEY> upperBoundExclusive() {
this.upperBoundInclusive = false;
return this;
} | 3.68 |
hibernate-validator_SizeValidatorForArraysOfInt_isValid | /**
* Checks the number of entries in an array.
*
* @param array The array to validate.
* @param constraintValidatorContext context in which the constraint is evaluated.
*
* @return Returns {@code true} if the array is {@code null} or the number of entries in
* {@code array} is between the specified {@code min} and {@code max} values (inclusive),
* {@code false} otherwise.
*/
@Override
public boolean isValid(int[] array, ConstraintValidatorContext constraintValidatorContext) {
if ( array == null ) {
return true;
}
return array.length >= min && array.length <= max;
} | 3.68 |
flink_CopyOnWriteSkipListStateMap_compareNamespaceAndNode | /**
* Compare the first namespace in the given memory segment with the second namespace in the
* given node.
*
* @param namespaceSegment memory segment storing the first namespace.
* @param namespaceOffset offset of the first namespace in memory segment.
* @param namespaceLen length of the first namespace.
* @param targetNode the node storing the second namespace.
* @return Returns a negative integer, zero, or a positive integer as the first key is less
* than, equal to, or greater than the second.
*/
private int compareNamespaceAndNode(
MemorySegment namespaceSegment,
int namespaceOffset,
int namespaceLen,
long targetNode) {
Node nodeStorage = getNodeSegmentAndOffset(targetNode);
MemorySegment targetSegment = nodeStorage.nodeSegment;
int offsetInSegment = nodeStorage.nodeOffset;
int level = SkipListUtils.getLevel(targetSegment, offsetInSegment);
int targetKeyOffset = offsetInSegment + SkipListUtils.getKeyDataOffset(level);
return SkipListKeyComparator.compareNamespaceAndNode(
namespaceSegment, namespaceOffset, namespaceLen, targetSegment, targetKeyOffset);
} | 3.68 |
AreaShop_Utils_getOnlinePlayers | /**
* Gets the online players.
* Provides backwards compatibility for 1.7- where it returns an array
* @return Online players
*/
@SuppressWarnings("unchecked")
public static Collection<? extends Player> getOnlinePlayers() {
try {
Method onlinePlayerMethod = Server.class.getMethod("getOnlinePlayers");
if(onlinePlayerMethod.getReturnType().equals(Collection.class)) {
return ((Collection<? extends Player>)onlinePlayerMethod.invoke(Bukkit.getServer()));
} else {
return Arrays.asList((Player[])onlinePlayerMethod.invoke(Bukkit.getServer()));
}
} catch(Exception ex) {
AreaShop.debug("getOnlinePlayers error: " + ex.getMessage());
}
return new HashSet<>();
} | 3.68 |
hadoop_FlowActivityRowKey_parseRowKey | /**
* Given the raw row key as bytes, returns the row key as an object.
*
* @param rowKey Byte representation of row key.
* @return A <cite>FlowActivityRowKey</cite> object.
*/
public static FlowActivityRowKey parseRowKey(byte[] rowKey) {
return new FlowActivityRowKeyConverter().decode(rowKey);
} | 3.68 |
flink_ManagedTableListener_isManagedTable | /** Check a resolved catalog table is Flink's managed table or not. */
public static boolean isManagedTable(@Nullable Catalog catalog, CatalogBaseTable table) {
if (catalog == null || !catalog.supportsManagedTable()) {
// catalog not support managed table
return false;
}
if (table.getTableKind() != CatalogBaseTable.TableKind.TABLE
|| !(table instanceof CatalogTable)) {
// view is not managed table
return false;
}
Map<String, String> options;
try {
options = table.getOptions();
} catch (TableException ignore) {
// exclude abnormal tables, such as InlineCatalogTable that does not have the options
return false;
}
// check legacy connector, here we need to check the factory, other properties are dummy
if (TableFactoryUtil.isLegacyConnectorOptions(
catalog,
new Configuration(),
true,
ObjectIdentifier.of("dummy_catalog", "dummy_database", "dummy_table"),
(CatalogTable) table,
true)) {
// legacy connector is not managed table
return false;
}
if (!StringUtils.isNullOrWhitespaceOnly(options.get(FactoryUtil.CONNECTOR.key()))) {
// with connector is not managed table
return false;
}
if (table instanceof ResolvedCatalogBaseTable) {
table = ((ResolvedCatalogBaseTable<?>) table).getOrigin();
}
// ConnectorCatalogTable is not managed table
return !(table instanceof ConnectorCatalogTable);
} | 3.68 |
hadoop_LightWeightLinkedSet_removeElem | /**
* Remove the element corresponding to the key, given key.hashCode() == index.
*
* @return Return the entry with the element if exists. Otherwise return null.
*/
@Override
protected DoubleLinkedElement<T> removeElem(final T key) {
DoubleLinkedElement<T> found = (DoubleLinkedElement<T>) (super
.removeElem(key));
if (found == null) {
return null;
}
// update linked list
if (found.after != null) {
found.after.before = found.before;
}
if (found.before != null) {
found.before.after = found.after;
}
if (head == found) {
head = head.after;
}
if (tail == found) {
tail = tail.before;
}
// Update bookmark, if necessary.
if (found == this.bookmark.next) {
this.bookmark.next = found.after;
}
return found;
} | 3.68 |
hbase_CatalogReplicaLoadBalanceSimpleSelector_onError | /**
* When a client runs into RegionNotServingException, it will call this method to update
* Selector's internal state.
* @param loc the location which causes exception.
*/
@Override
public void onError(HRegionLocation loc) {
ConcurrentNavigableMap<byte[], StaleLocationCacheEntry> tableCache = computeIfAbsent(staleCache,
loc.getRegion().getTable(), () -> new ConcurrentSkipListMap<>(BYTES_COMPARATOR));
byte[] startKey = loc.getRegion().getStartKey();
tableCache.putIfAbsent(startKey, new StaleLocationCacheEntry(loc.getRegion().getEndKey()));
LOG.debug("Add entry to stale cache for table {} with startKey {}, {}",
loc.getRegion().getTable(), startKey, loc.getRegion().getEndKey());
} | 3.68 |
hadoop_MagicCommitTracker_upload | /**
* PUT an object.
* @param request the request
* @param inputStream input stream of data to be uploaded
* @throws IOException on problems
*/
@Retries.RetryTranslated
private void upload(PutObjectRequest request, InputStream inputStream) throws IOException {
trackDurationOfInvocation(trackerStatistics, COMMITTER_MAGIC_MARKER_PUT.getSymbol(),
() -> writer.putObject(request, PutObjectOptions.keepingDirs(),
new S3ADataBlocks.BlockUploadData(inputStream), false, null));
} | 3.68 |
hbase_ScanDeleteTracker_isDeleted | /**
* Check if the specified Cell buffer has been deleted by a previously seen delete.
* @param cell - current cell to check if deleted by a previously seen delete
*/
@Override
public DeleteResult isDeleted(Cell cell) {
long timestamp = cell.getTimestamp();
if (hasFamilyStamp && timestamp <= familyStamp) {
return DeleteResult.FAMILY_DELETED;
}
if (familyVersionStamps.contains(Long.valueOf(timestamp))) {
return DeleteResult.FAMILY_VERSION_DELETED;
}
if (deleteCell != null) {
int ret = -(this.comparator.compareQualifiers(cell, deleteCell));
if (ret == 0) {
if (deleteType == KeyValue.Type.DeleteColumn.getCode()) {
return DeleteResult.COLUMN_DELETED;
}
// Delete (aka DeleteVersion)
// If the timestamp is the same, keep this one
if (timestamp == deleteTimestamp) {
return DeleteResult.VERSION_DELETED;
}
// use assert or not?
assert timestamp < deleteTimestamp;
// different timestamp, let's clear the buffer.
deleteCell = null;
} else if (ret < 0) {
// Next column case.
deleteCell = null;
} else {
throw new IllegalStateException("isDelete failed: deleteBuffer="
+ Bytes.toStringBinary(deleteCell.getQualifierArray(), deleteCell.getQualifierOffset(),
deleteCell.getQualifierLength())
+ ", qualifier="
+ Bytes.toStringBinary(cell.getQualifierArray(), cell.getQualifierOffset(),
cell.getQualifierLength())
+ ", timestamp=" + timestamp + ", comparison result: " + ret);
}
}
return DeleteResult.NOT_DELETED;
} | 3.68 |
hbase_Procedure_acquireLock | /**
* The user should override this method if they need a lock on an Entity. A lock can be anything,
* and it is up to the implementor. The Procedure Framework will call this method just before it
* invokes {@link #execute(Object)}. It calls {@link #releaseLock(Object)} after the call to
* execute.
* <p/>
* If you need to hold the lock for the life of the Procedure -- i.e. you do not want any other
* Procedure interfering while this Procedure is running, see {@link #holdLock(Object)}.
* <p/>
* Example: in our Master we can execute request in parallel for different tables. We can create
* t1 and create t2 and these creates can be executed at the same time. Anything else on t1/t2 is
* queued waiting that specific table create to happen.
* <p/>
* There are 3 LockState:
* <ul>
* <li>LOCK_ACQUIRED should be returned when the proc has the lock and the proc is ready to
* execute.</li>
* <li>LOCK_YIELD_WAIT should be returned when the proc has not the lock and the framework should
* take care of readding the procedure back to the runnable set for retry</li>
* <li>LOCK_EVENT_WAIT should be returned when the proc has not the lock and someone will take
* care of readding the procedure back to the runnable set when the lock is available.</li>
* </ul>
* @return the lock state as described above.
*/
protected LockState acquireLock(TEnvironment env) {
return LockState.LOCK_ACQUIRED;
} | 3.68 |
hbase_WindowMovingAverage_getStatisticsAtIndex | /**
* Get statistics at index.
* @param index index of bar
*/
protected long getStatisticsAtIndex(int index) {
if (index < 0 || index >= getNumberOfStatistics()) {
// This case should not happen, but a prudent check.
throw new IndexOutOfBoundsException();
}
return lastN[index];
} | 3.68 |
cron-utils_SecondsDescriptor_describe | /**
* Provide a human readable description for On instance.
*
* @param on - On
* @return human readable description - String
*/
protected String describe(final On on, final boolean and) {
if (and) {
return nominalValue(on.getTime());
}
return String.format("%s %s ", bundle.getString("at"), nominalValue(on.getTime())) + "%s";
} | 3.68 |
hudi_HoodieAvroUtils_avroToBytes | /**
* Convert a given avro record to bytes.
*/
public static byte[] avroToBytes(GenericRecord record) {
return indexedRecordToBytes(record);
} | 3.68 |
AreaShop_GeneralRegion_getOwner | /**
* Get the player that is currently the owner of this region (either bought or rented it).
* @return The UUID of the owner of this region
*/
public UUID getOwner() {
if(this instanceof RentRegion) {
return ((RentRegion)this).getRenter();
} else {
return ((BuyRegion)this).getBuyer();
}
} | 3.68 |
flink_Rowtime_watermarksPeriodicAscending | /**
* Sets a built-in watermark strategy for ascending rowtime attributes.
*
* <p>Emits a watermark of the maximum observed timestamp so far minus 1. Rows that have a
* timestamp equal to the max timestamp are not late.
*/
public Rowtime watermarksPeriodicAscending() {
internalProperties.putString(
ROWTIME_WATERMARKS_TYPE, ROWTIME_WATERMARKS_TYPE_VALUE_PERIODIC_ASCENDING);
return this;
} | 3.68 |
hbase_ReplicationSink_replicateEntries | /**
* Replicate this array of entries directly into the local cluster using the native client. Only
* operates against raw protobuf type saving on a conversion from pb to pojo.
* @param entries WAL entries to be replicated.
* @param cells cell scanner for iteration.
* @param replicationClusterId Id which will uniquely identify source cluster FS client
* configurations in the replication configuration directory
* @param sourceBaseNamespaceDirPath Path that point to the source cluster base namespace
* directory
* @param sourceHFileArchiveDirPath Path that point to the source cluster hfile archive directory
* @throws IOException If failed to replicate the data
*/
public void replicateEntries(List<WALEntry> entries, final CellScanner cells,
String replicationClusterId, String sourceBaseNamespaceDirPath,
String sourceHFileArchiveDirPath) throws IOException {
if (entries.isEmpty()) {
return;
}
// Very simple optimization where we batch sequences of rows going
// to the same table.
try {
long totalReplicated = 0;
// Map of table => list of Rows, grouped by cluster id, we only want to flushCommits once per
// invocation of this method per table and cluster id.
Map<TableName, Map<List<UUID>, List<Row>>> rowMap = new TreeMap<>();
Map<List<String>, Map<String, List<Pair<byte[], List<String>>>>> bulkLoadsPerClusters = null;
Pair<List<Mutation>, List<WALEntry>> mutationsToWalEntriesPairs =
new Pair<>(new ArrayList<>(), new ArrayList<>());
for (WALEntry entry : entries) {
TableName table = TableName.valueOf(entry.getKey().getTableName().toByteArray());
if (this.walEntrySinkFilter != null) {
if (this.walEntrySinkFilter.filter(table, entry.getKey().getWriteTime())) {
// Skip Cells in CellScanner associated with this entry.
int count = entry.getAssociatedCellCount();
for (int i = 0; i < count; i++) {
// Throw index out of bounds if our cell count is off
if (!cells.advance()) {
this.metrics.incrementFailedBatches();
throw new ArrayIndexOutOfBoundsException("Expected=" + count + ", index=" + i);
}
}
continue;
}
}
Cell previousCell = null;
Mutation mutation = null;
int count = entry.getAssociatedCellCount();
for (int i = 0; i < count; i++) {
// Throw index out of bounds if our cell count is off
if (!cells.advance()) {
this.metrics.incrementFailedBatches();
throw new ArrayIndexOutOfBoundsException("Expected=" + count + ", index=" + i);
}
Cell cell = cells.current();
// Handle bulk load hfiles replication
if (CellUtil.matchingQualifier(cell, WALEdit.BULK_LOAD)) {
BulkLoadDescriptor bld = WALEdit.getBulkLoadDescriptor(cell);
if (bld.getReplicate()) {
if (bulkLoadsPerClusters == null) {
bulkLoadsPerClusters = new HashMap<>();
}
// Map of table name Vs list of pair of family and list of
// hfile paths from its namespace
Map<String, List<Pair<byte[], List<String>>>> bulkLoadHFileMap =
bulkLoadsPerClusters.computeIfAbsent(bld.getClusterIdsList(), k -> new HashMap<>());
buildBulkLoadHFileMap(bulkLoadHFileMap, table, bld);
}
} else if (CellUtil.matchingQualifier(cell, WALEdit.REPLICATION_MARKER)) {
Mutation put = processReplicationMarkerEntry(cell);
if (put == null) {
continue;
}
table = REPLICATION_SINK_TRACKER_TABLE_NAME;
List<UUID> clusterIds = new ArrayList<>();
for (HBaseProtos.UUID clusterId : entry.getKey().getClusterIdsList()) {
clusterIds.add(toUUID(clusterId));
}
put.setClusterIds(clusterIds);
addToHashMultiMap(rowMap, table, clusterIds, put);
} else {
// Handle wal replication
if (isNewRowOrType(previousCell, cell)) {
// Create new mutation
mutation = CellUtil.isDelete(cell)
? new Delete(cell.getRowArray(), cell.getRowOffset(), cell.getRowLength())
: new Put(cell.getRowArray(), cell.getRowOffset(), cell.getRowLength());
List<UUID> clusterIds = new ArrayList<>(entry.getKey().getClusterIdsList().size());
for (HBaseProtos.UUID clusterId : entry.getKey().getClusterIdsList()) {
clusterIds.add(toUUID(clusterId));
}
mutation.setClusterIds(clusterIds);
mutation.setAttribute(ReplicationUtils.REPLICATION_ATTR_NAME,
HConstants.EMPTY_BYTE_ARRAY);
if (rsServerHost != null) {
rsServerHost.preReplicationSinkBatchMutate(entry, mutation);
mutationsToWalEntriesPairs.getFirst().add(mutation);
mutationsToWalEntriesPairs.getSecond().add(entry);
}
addToHashMultiMap(rowMap, table, clusterIds, mutation);
}
if (CellUtil.isDelete(cell)) {
((Delete) mutation).add(cell);
} else {
((Put) mutation).add(cell);
}
previousCell = cell;
}
}
totalReplicated++;
}
// TODO Replicating mutations and bulk loaded data can be made parallel
if (!rowMap.isEmpty()) {
LOG.debug("Started replicating mutations.");
for (Entry<TableName, Map<List<UUID>, List<Row>>> entry : rowMap.entrySet()) {
batch(entry.getKey(), entry.getValue().values(), rowSizeWarnThreshold);
}
LOG.debug("Finished replicating mutations.");
}
if (rsServerHost != null) {
List<Mutation> mutations = mutationsToWalEntriesPairs.getFirst();
List<WALEntry> walEntries = mutationsToWalEntriesPairs.getSecond();
for (int i = 0; i < mutations.size(); i++) {
rsServerHost.postReplicationSinkBatchMutate(walEntries.get(i), mutations.get(i));
}
}
if (bulkLoadsPerClusters != null) {
for (Entry<List<String>,
Map<String, List<Pair<byte[], List<String>>>>> entry : bulkLoadsPerClusters.entrySet()) {
Map<String, List<Pair<byte[], List<String>>>> bulkLoadHFileMap = entry.getValue();
if (bulkLoadHFileMap != null && !bulkLoadHFileMap.isEmpty()) {
LOG.debug("Replicating {} bulk loaded data", entry.getKey().toString());
Configuration providerConf = this.provider.getConf(this.conf, replicationClusterId);
try (HFileReplicator hFileReplicator = new HFileReplicator(providerConf,
sourceBaseNamespaceDirPath, sourceHFileArchiveDirPath, bulkLoadHFileMap, conf,
getConnection(), entry.getKey())) {
hFileReplicator.replicate();
LOG.debug("Finished replicating {} bulk loaded data", entry.getKey().toString());
}
}
}
}
int size = entries.size();
this.metrics.setAgeOfLastAppliedOp(entries.get(size - 1).getKey().getWriteTime());
this.metrics.applyBatch(size + hfilesReplicated, hfilesReplicated);
this.totalReplicatedEdits.addAndGet(totalReplicated);
} catch (IOException ex) {
LOG.error("Unable to accept edit because:", ex);
this.metrics.incrementFailedBatches();
throw ex;
}
} | 3.68 |
flink_AbstractKeyedStateBackend_getKeyGroupRange | /** @see KeyedStateBackend */
@Override
public KeyGroupRange getKeyGroupRange() {
return keyGroupRange;
} | 3.68 |
rocketmq-connect_RetryUtil_asyncExecuteWithRetry | /**
* Async execute with retry
*
* @param callable
* @param retryTimes
* @param sleepTimeInMilliSecond
* @param exponential
* @param timeoutMs
* @param executor
* @param <T>
* @return
* @throws Exception
*/
public static <T> T asyncExecuteWithRetry(Callable<T> callable,
int retryTimes,
long sleepTimeInMilliSecond,
boolean exponential,
long timeoutMs,
ThreadPoolExecutor executor) throws Exception {
Retry retry = new AsyncRetry(timeoutMs, executor);
return retry.doRetry(callable, retryTimes, sleepTimeInMilliSecond, exponential, null);
} | 3.68 |
flink_Pattern_or | /**
* Adds a condition that has to be satisfied by an event in order to be considered a match. If
* another condition has already been set, the new one is going to be combined with the previous
* with a logical {@code OR}. In other case, this is going to be the only condition.
*
* @param condition The condition as an {@link IterativeCondition}.
* @return The pattern with the new condition is set.
*/
public Pattern<T, F> or(IterativeCondition<F> condition) {
Preconditions.checkNotNull(condition, "The condition cannot be null.");
ClosureCleaner.clean(condition, ExecutionConfig.ClosureCleanerLevel.RECURSIVE, true);
if (this.condition == null) {
this.condition = condition;
} else {
this.condition = new RichOrCondition<>(this.condition, condition);
}
return this;
} | 3.68 |
hadoop_ZStandardCompressor_setInputFromSavedData | //copy enough data from userBuf to uncompressedDirectBuf
private void setInputFromSavedData() {
int len = Math.min(userBufLen, uncompressedDirectBuf.remaining());
uncompressedDirectBuf.put(userBuf, userBufOff, len);
userBufLen -= len;
userBufOff += len;
uncompressedDirectBufLen = uncompressedDirectBuf.position();
} | 3.68 |
dubbo_AppResponse_setAttachments | /**
* Append all items from the map into the attachment, if map is empty then nothing happens
*
* @param map contains all key-value pairs to append
*/
public void setAttachments(Map<String, String> map) {
this.attachments = map == null ? new HashMap<>() : new HashMap<>(map);
} | 3.68 |
morf_SqlDialect_getName | /**
* @see org.alfasoftware.morf.metadata.Table#getName()
*/
@Override
public String getName() {
return tableName;
} | 3.68 |
hbase_ShutdownHook_install | /**
* Install a shutdown hook that calls stop on the passed Stoppable and then thread joins against
* the passed <code>threadToJoin</code>. When this thread completes, it then runs the hdfs thread
* (This install removes the hdfs shutdown hook keeping a handle on it to run it after
* <code>threadToJoin</code> has stopped).
* <p>
* To suppress all shutdown hook handling -- both the running of the regionserver hook and of the
* hdfs hook code -- set {@link ShutdownHook#RUN_SHUTDOWN_HOOK} in {@link Configuration} to
* <code>false</code>. This configuration value is checked when the hook code runs.
* @param fs Instance of Filesystem used by the RegionServer
* @param stop Installed shutdown hook will call stop against this passed
* <code>Stoppable</code> instance.
* @param threadToJoin After calling stop on <code>stop</code> will then join this thread.
*/
public static void install(final Configuration conf, final FileSystem fs, final Stoppable stop,
final Thread threadToJoin) {
Runnable fsShutdownHook = suppressHdfsShutdownHook(fs);
Thread t = new ShutdownHookThread(conf, stop, threadToJoin, fsShutdownHook);
ShutdownHookManager.affixShutdownHook(t, 0);
LOG.debug("Installed shutdown hook thread: " + t.getName());
} | 3.68 |
pulsar_PulsarAdminImpl_nonPersistentTopics | /**
* @return the persistentTopics management object
* @deprecated Since 2.0. See {@link #topics()}
*/
@Deprecated
public NonPersistentTopics nonPersistentTopics() {
return nonPersistentTopics;
} | 3.68 |
hadoop_AzureBlobFileSystem_setAcl | /**
* Fully replaces ACL of files and directories, discarding all existing
* entries.
*
* @param path Path to modify
* @param aclSpec List of AclEntry describing modifications, must include
* entries for user, group, and others for compatibility with
* permission bits.
* @throws IOException if an ACL could not be modified
*/
@Override
public void setAcl(final Path path, final List<AclEntry> aclSpec)
throws IOException {
LOG.debug("AzureBlobFileSystem.setAcl path: {}", path);
TracingContext tracingContext = new TracingContext(clientCorrelationId,
fileSystemId, FSOperationType.SET_ACL, true, tracingHeaderFormat,
listener);
if (!getIsNamespaceEnabled(tracingContext)) {
throw new UnsupportedOperationException(
"setAcl is only supported by storage accounts with the hierarchical "
+ "namespace enabled.");
}
if (aclSpec == null || aclSpec.size() == 0) {
throw new IllegalArgumentException("The aclSpec argument is invalid.");
}
Path qualifiedPath = makeQualified(path);
try {
abfsStore.setAcl(qualifiedPath, aclSpec, tracingContext);
} catch (AzureBlobFileSystemException ex) {
checkException(path, ex);
}
} | 3.68 |
hbase_Server_isStopping | /** Returns True is the server is Stopping */
// Note: This method is not part of the Stoppable Interface.
default boolean isStopping() {
return false;
} | 3.68 |
rocketmq-connect_AbstractPositionManagementService_set | /**
* send position
*
* @param partition
* @param position
*/
protected synchronized void set(PositionChange change, ExtendRecordPartition partition, RecordOffset position) {
String namespace = partition.getNamespace();
// When serializing the key, we add in the namespace information so the key is [namespace, real key]
byte[] key = keyConverter.fromConnectData(namespace, null, Arrays.asList(change.name(), namespace, partition != null ? partition.getPartition() : new HashMap<>()));
ByteBuffer keyBuffer = (key != null) ? ByteBuffer.wrap(key) : null;
byte[] value = valueConverter.fromConnectData(namespace, null, position != null ? position.getOffset() : new HashMap<>());
ByteBuffer valueBuffer = (value != null) ? ByteBuffer.wrap(value) : null;
notify(keyBuffer, valueBuffer);
} | 3.68 |
hadoop_TimelineEntity_getOtherInfoJAXB | // Required by JAXB
@Private
@XmlElement(name = "otherinfo")
public HashMap<String, Object> getOtherInfoJAXB() {
return otherInfo;
} | 3.68 |
hudi_TableHeader_get | /**
* Lookup field by offset.
*/
public String get(int index) {
return fieldNames.get(index);
} | 3.68 |
rocketmq-connect_ChangeCaseConfig_to | /**
* to
*
* @return
*/
public CaseFormat to() {
return this.to;
} | 3.68 |
hbase_HFileBlock_addMetaData | /**
* Adds metadata at current position (position is moved forward). Does not flip or reset.
* @return The passed <code>destination</code> with metadata added.
*/
private ByteBuffer addMetaData(final ByteBuffer destination, boolean includeNextBlockMetadata) {
destination.put(this.fileContext.isUseHBaseChecksum() ? (byte) 1 : (byte) 0);
destination.putLong(this.offset);
if (includeNextBlockMetadata) {
destination.putInt(this.nextBlockOnDiskSize);
}
return destination;
} | 3.68 |
hadoop_AMRunner_startAMFromRumenTrace | /**
* Parse workload from a rumen trace file.
*/
private void startAMFromRumenTrace(String inputTrace, long baselineTimeMS)
throws IOException {
Configuration conf = new Configuration();
conf.set("fs.defaultFS", "file:///");
File fin = new File(inputTrace);
try (JobTraceReader reader = new JobTraceReader(
new Path(fin.getAbsolutePath()), conf)) {
LoggedJob job = reader.getNext();
while (job != null) {
try {
AMDefinitionRumen amDef =
AMDefinitionFactory.createFromRumenTrace(job, baselineTimeMS,
slsRunner);
startAMs(amDef);
} catch (Exception e) {
LOG.error("Failed to create an AM", e);
}
job = reader.getNext();
}
}
} | 3.68 |
framework_AbsoluteLayout_setLeftUnits | /**
* Sets the unit for the 'left' attribute.
*
* @param leftUnits
* See {@link Sizeable} UNIT_SYMBOLS for a description of the
* available units.
*/
public void setLeftUnits(Unit leftUnits) {
this.leftUnits = leftUnits;
markAsDirty();
} | 3.68 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.