name
stringlengths 12
178
| code_snippet
stringlengths 8
36.5k
| score
float64 3.26
3.68
|
---|---|---|
hudi_Option_toJavaOptional | /**
* Convert to java Optional.
*/
public Optional<T> toJavaOptional() {
return Optional.ofNullable(val);
} | 3.68 |
hadoop_InterruptEscalator_isSignalAlreadyReceived | /**
* Flag set if a signal has been received.
* @return true if there has been one interrupt already.
*/
public boolean isSignalAlreadyReceived() {
return signalAlreadyReceived.get();
} | 3.68 |
hbase_ProcedureCoordinator_startProcedure | /**
* Kick off the named procedure Currently only one procedure with the same type and name is
* allowed to run at a time.
* @param procName name of the procedure to start
* @param procArgs arguments for the procedure
* @param expectedMembers expected members to start
* @return handle to the running procedure, if it was started correctly, <tt>null</tt> otherwise.
* Null could be due to submitting a procedure multiple times (or one with the same name),
* or runtime exception. Check the procedure's monitor that holds a reference to the
* exception that caused the failure.
*/
public Procedure startProcedure(ForeignExceptionDispatcher fed, String procName, byte[] procArgs,
List<String> expectedMembers) {
Procedure proc = createProcedure(fed, procName, procArgs, expectedMembers);
if (!this.submitProcedure(proc)) {
LOG.error("Failed to submit procedure '" + procName + "'");
return null;
}
return proc;
} | 3.68 |
morf_TableReference_field | /**
* @param fieldName the name of the field
* @return reference to a field on this table.
*/
public FieldReference field(String fieldName) {
return FieldReference.field(this, fieldName).build();
} | 3.68 |
flink_FileSystemSafetyNet_closeSafetyNetAndGuardedResourcesForThread | /**
* Closes the safety net for a thread. This closes all remaining unclosed streams that were
* opened by safety-net-guarded file systems. After this method was called, no streams can be
* opened any more from any FileSystem instance that was obtained while the thread was guarded
* by the safety net.
*
* <p>This method should be called at the very end of a guarded thread.
*/
@Internal
public static void closeSafetyNetAndGuardedResourcesForThread() {
SafetyNetCloseableRegistry registry = REGISTRIES.get();
if (null != registry) {
REGISTRIES.remove();
IOUtils.closeQuietly(registry);
}
} | 3.68 |
zilla_HttpServerFactory_encodeLiteral | // TODO dynamic table, Huffman, never indexed
private void encodeLiteral(
HpackLiteralHeaderFieldFW.Builder builder,
HpackContext hpackContext,
DirectBuffer nameBuffer,
DirectBuffer valueBuffer)
{
builder.type(WITHOUT_INDEXING);
final int nameIndex = hpackContext.index(nameBuffer);
if (nameIndex != -1)
{
builder.name(nameIndex);
}
else
{
builder.name(nameBuffer, 0, nameBuffer.capacity());
}
builder.value(valueBuffer, 0, valueBuffer.capacity());
} | 3.68 |
hbase_HBaseTestingUtility_expireRegionServerSession | /**
* Expire a region server's session
* @param index which RS
*/
public void expireRegionServerSession(int index) throws Exception {
HRegionServer rs = getMiniHBaseCluster().getRegionServer(index);
expireSession(rs.getZooKeeper(), false);
decrementMinRegionServerCount();
} | 3.68 |
morf_SelectStatement_getSetOperators | /**
* @return the list of set operators to be applied on this select statement.
*/
public List<SetOperator> getSetOperators() {
return setOperators;
} | 3.68 |
framework_SimpleTree_addDoubleClickHandler | /**
* {@inheritDoc} Events are not fired when double clicking child widgets.
*/
@Override
public HandlerRegistration addDoubleClickHandler(
DoubleClickHandler handler) {
if (textDoubleClickHandlerManager == null) {
textDoubleClickHandlerManager = new HandlerManager(this);
addDomHandler(event -> {
if (event.getNativeEvent().getEventTarget().cast() == text) {
textDoubleClickHandlerManager.fireEvent(event);
}
}, DoubleClickEvent.getType());
}
return textDoubleClickHandlerManager
.addHandler(DoubleClickEvent.getType(), handler);
} | 3.68 |
flink_HiveParserBaseSemanticAnalyzer_convert | /* This method returns the flip big-endian representation of value */
public static ImmutableBitSet convert(int value, int length) {
BitSet bits = new BitSet();
for (int index = length - 1; index >= 0; index--) {
if (value % 2 != 0) {
bits.set(index);
}
value = value >>> 1;
}
// We flip the bits because Calcite considers that '1'
// means that the column participates in the GroupBy
// and '0' does not, as opposed to grouping_id.
bits.flip(0, length);
return ImmutableBitSet.fromBitSet(bits);
} | 3.68 |
pulsar_SaslRoleToken_isExpired | /**
* Returns if the token has expired.
*
* @return if the token has expired.
*/
public boolean isExpired() {
return getExpires() != -1 && System.currentTimeMillis() > getExpires();
} | 3.68 |
morf_ConcatenatedField_getConcatenationFields | /**
* Get the fields to be concatenated
*
* @return the fields to be concatenated
*/
public List<AliasedField> getConcatenationFields() {
return fields;
} | 3.68 |
hadoop_RegistryPathUtils_encodeYarnID | /**
* Perform whatever transforms are needed to get a YARN ID into
* a DNS-compatible name
* @param yarnId ID as string of YARN application, instance or container
* @return a string suitable for use in registry paths.
*/
public static String encodeYarnID(String yarnId) {
return yarnId.replace("container", "ctr").replace("_", "-");
} | 3.68 |
flink_CheckpointProperties_forSavepoint | /**
* Creates the checkpoint properties for a (manually triggered) savepoint.
*
* <p>Savepoints are not queued due to time trigger limits. They have to be garbage collected
* manually.
*
* @return Checkpoint properties for a (manually triggered) savepoint.
*/
public static CheckpointProperties forSavepoint(
boolean forced, SavepointFormatType formatType) {
return new CheckpointProperties(
forced,
SavepointType.savepoint(formatType),
false,
false,
false,
false,
false,
false);
} | 3.68 |
hudi_BufferedRandomAccessFile_fillBuffer | /**
* read ahead file contents to buffer.
* @return number of bytes filled
* @throws IOException
*/
private int fillBuffer() throws IOException {
int cnt = 0;
int bytesToRead = this.capacity;
// blocking read, until buffer is filled or EOF reached
while (bytesToRead > 0) {
int n = super.read(this.dataBuffer.array(), cnt, bytesToRead);
if (n < 0) {
break;
}
cnt += n;
bytesToRead -= n;
}
this.isEOF = (cnt < this.dataBuffer.array().length);
this.diskPosition += cnt;
return cnt;
} | 3.68 |
hadoop_MkdirOperation_execute | /**
*
* Make the given path and all non-existent parents into
* directories.
* @return true if a directory was created or already existed
* @throws FileAlreadyExistsException there is a file at the path specified
* @throws IOException other IO problems
*/
@Override
@Retries.RetryTranslated
public Boolean execute() throws IOException {
LOG.debug("Making directory: {}", dir);
if (dir.isRoot()) {
// fast exit for root.
return true;
}
// get the file status of the path.
// this is done even for a magic path, to avoid always issuing PUT
// requests. Doing that without a check wouild seem to be an
// optimization, but it is not because
// 1. PUT is slower than HEAD
// 2. Write capacity is less than read capacity on a shard
// 3. It adds needless entries in versioned buckets, slowing
// down subsequent operations.
FileStatus fileStatus = getPathStatusExpectingDir(dir);
if (fileStatus != null) {
if (fileStatus.isDirectory()) {
return true;
} else {
throw new FileAlreadyExistsException("Path is a file: " + dir);
}
}
// file status was null
// is the path magic?
// If so, we declare success without looking any further
if (isMagicPath) {
// Create the marker file immediately,
// and don't delete markers
callbacks.createFakeDirectory(dir, true);
return true;
}
// Walk path to root, ensuring closest ancestor is a directory, not file
Path fPart = dir.getParent();
try {
while (fPart != null && !fPart.isRoot()) {
fileStatus = getPathStatusExpectingDir(fPart);
if (fileStatus == null) {
// nothing at this path, so validate the parent
fPart = fPart.getParent();
continue;
}
if (fileStatus.isDirectory()) {
// the parent dir exists. All is good.
break;
}
// there's a file at the parent entry
throw new FileAlreadyExistsException(String.format(
"Can't make directory for path '%s' since it is a file.",
fPart));
}
} catch (AccessDeniedException e) {
LOG.info("mkdirs({}}: Access denied when looking"
+ " for parent directory {}; skipping checks",
dir, fPart);
LOG.debug("{}", e, e);
} | 3.68 |
hadoop_BlockMovementAttemptFinished_getStatus | /**
* @return block movement status code.
*/
public BlockMovementStatus getStatus() {
return status;
} | 3.68 |
hbase_BoundedRecoveredHFilesOutputSink_writeRemainingEntryBuffers | /**
* Write out the remaining RegionEntryBuffers and close the writers.
* @return true when there is no error.
*/
private boolean writeRemainingEntryBuffers() throws IOException {
for (EntryBuffers.RegionEntryBuffer buffer : entryBuffers.buffers.values()) {
closeCompletionService.submit(() -> {
append(buffer);
return null;
});
}
boolean progressFailed = false;
try {
for (int i = 0, n = entryBuffers.buffers.size(); i < n; i++) {
Future<Void> future = closeCompletionService.take();
future.get();
if (!progressFailed && reporter != null && !reporter.progress()) {
progressFailed = true;
}
}
} catch (InterruptedException e) {
IOException iie = new InterruptedIOException();
iie.initCause(e);
throw iie;
} catch (ExecutionException e) {
throw new IOException(e.getCause());
} finally {
closeThreadPool.shutdownNow();
}
return !progressFailed;
} | 3.68 |
hbase_RecoverableZooKeeper_connect | /**
* Creates a new connection to ZooKeeper, pulling settings and ensemble config from the specified
* configuration object using methods from {@link ZKConfig}. Sets the connection status monitoring
* watcher to the specified watcher.
* @param conf configuration to pull ensemble and other settings from
* @param watcher watcher to monitor connection changes
* @param ensemble ZooKeeper servers quorum string
* @param identifier value used to identify this client instance.
* @return connection to zookeeper
* @throws IOException if unable to connect to zk or config problem
*/
public static RecoverableZooKeeper connect(Configuration conf, String ensemble, Watcher watcher,
final String identifier) throws IOException {
if (ensemble == null) {
throw new IOException("Unable to determine ZooKeeper ensemble");
}
int timeout = conf.getInt(HConstants.ZK_SESSION_TIMEOUT, HConstants.DEFAULT_ZK_SESSION_TIMEOUT);
if (LOG.isTraceEnabled()) {
LOG.trace("{} opening connection to ZooKeeper ensemble={}", identifier, ensemble);
}
int retry = conf.getInt("zookeeper.recovery.retry", 3);
int retryIntervalMillis = conf.getInt("zookeeper.recovery.retry.intervalmill", 1000);
int maxSleepTime = conf.getInt("zookeeper.recovery.retry.maxsleeptime", 60000);
int multiMaxSize = conf.getInt("zookeeper.multi.max.size", 1024 * 1024);
return new RecoverableZooKeeper(ensemble, timeout, watcher, retry, retryIntervalMillis,
maxSleepTime, identifier, multiMaxSize);
} | 3.68 |
hbase_User_getToken | /**
* Returns the Token of the specified kind associated with this user, or null if the Token is not
* present.
* @param kind the kind of token
* @param service service on which the token is supposed to be used
* @return the token of the specified kind.
*/
public Token<?> getToken(String kind, String service) throws IOException {
for (Token<?> token : ugi.getTokens()) {
if (
token.getKind().toString().equals(kind)
&& (service != null && token.getService().toString().equals(service))
) {
return token;
}
}
return null;
} | 3.68 |
flink_DataSet_reduceGroup | /**
* Applies a GroupReduce transformation on a non-grouped {@link DataSet}.
*
* <p>The transformation calls a {@link
* org.apache.flink.api.common.functions.RichGroupReduceFunction} once with the full DataSet.
* The GroupReduceFunction can iterate over all elements of the DataSet and emit any number of
* output elements including none.
*
* @param reducer The GroupReduceFunction that is applied on the DataSet.
* @return A GroupReduceOperator that represents the reduced DataSet.
* @see org.apache.flink.api.common.functions.RichGroupReduceFunction
* @see org.apache.flink.api.java.operators.GroupReduceOperator
* @see DataSet
*/
public <R> GroupReduceOperator<T, R> reduceGroup(GroupReduceFunction<T, R> reducer) {
if (reducer == null) {
throw new NullPointerException("GroupReduce function must not be null.");
}
String callLocation = Utils.getCallLocationName();
TypeInformation<R> resultType =
TypeExtractor.getGroupReduceReturnTypes(reducer, getType(), callLocation, true);
return new GroupReduceOperator<>(this, resultType, clean(reducer), callLocation);
} | 3.68 |
flink_TaskSlot_markInactive | /**
* Mark the slot as inactive/allocated. A slot can only be marked as inactive/allocated if it's
* in state allocated or active.
*
* @return True if the new state of the slot is allocated; otherwise false
*/
public boolean markInactive() {
if (TaskSlotState.ACTIVE == state || TaskSlotState.ALLOCATED == state) {
state = TaskSlotState.ALLOCATED;
return true;
} else {
return false;
}
} | 3.68 |
hadoop_StoreContext_getBucketLocation | /**
* Get the location of the bucket.
* @return the bucket location.
* @throws IOException failure.
*/
public String getBucketLocation() throws IOException {
return contextAccessors.getBucketLocation();
} | 3.68 |
streampipes_OpcUaTypes_getType | /**
* Maps OPC UA data types to internal StreamPipes data types
*
* @param o data type id as UInteger
* @return StreamPipes internal data type
*/
public static Datatypes getType(UInteger o) {
if (UInteger.valueOf(4).equals(o)
| UInteger.valueOf(5).equals(o)
| UInteger.valueOf(6).equals(o)
| UInteger.valueOf(7).equals(o)
| UInteger.valueOf(8).equals(o)
| UInteger.valueOf(9).equals(o)
| UInteger.valueOf(27).equals(o)) {
return Datatypes.Integer;
} else if (UInteger.valueOf(8).equals(o)) {
return Datatypes.Long;
} else if (UInteger.valueOf(11).equals(o)) {
return Datatypes.Double;
} else if (UInteger.valueOf(10).equals(o) | UInteger.valueOf(26).equals(o) | UInteger.valueOf(50).equals(o)) {
return Datatypes.Float;
} else if (UInteger.valueOf(1).equals(o)) {
return Datatypes.Boolean;
} else if (UInteger.valueOf(12).equals(o)) {
return Datatypes.String;
}
return Datatypes.String;
} | 3.68 |
hbase_TableDescriptorBuilder_getDurability | /**
* Returns the durability setting for the table.
* @return durability setting for the table.
*/
@Override
public Durability getDurability() {
return getOrDefault(DURABILITY_KEY, Durability::valueOf, DEFAULT_DURABLITY);
} | 3.68 |
hbase_FilterBase_getNextCellHint | /**
* Filters that are not sure which key must be next seeked to, can inherit this implementation
* that, by default, returns a null Cell. {@inheritDoc}
*/
@Override
public Cell getNextCellHint(Cell currentCell) throws IOException {
return null;
} | 3.68 |
graphhopper_MaxAxleLoad_create | /**
* Currently enables to store 0.5 to max=0.5*2⁷ tons and infinity. If a value is between the maximum and infinity
* it is assumed to use the maximum value. To save bits it might make more sense to store only a few values like
* it was done with the MappedDecimalEncodedValue still handling (or rounding) of unknown values is unclear.
*/
public static DecimalEncodedValue create() {
return new DecimalEncodedValueImpl(KEY, 7, 0, 0.5, false, false, true);
} | 3.68 |
hadoop_BoundedResourcePool_numCreated | /**
* Number of items created so far. Mostly for testing purposes.
* @return the count.
*/
public int numCreated() {
synchronized (createdItems) {
return createdItems.size();
}
} | 3.68 |
hbase_HRegionFileSystem_commitDaughterRegion | /**
* Commit a daughter region, moving it from the split temporary directory to the proper location
* in the filesystem.
* @param regionInfo daughter {@link org.apache.hadoop.hbase.client.RegionInfo}
*/
public Path commitDaughterRegion(final RegionInfo regionInfo, List<Path> allRegionFiles,
MasterProcedureEnv env) throws IOException {
Path regionDir = this.getSplitsDir(regionInfo);
if (fs.exists(regionDir)) {
// Write HRI to a file in case we need to recover hbase:meta
Path regionInfoFile = new Path(regionDir, REGION_INFO_FILE);
byte[] regionInfoContent = getRegionInfoFileContent(regionInfo);
writeRegionInfoFileContent(conf, fs, regionInfoFile, regionInfoContent);
HRegionFileSystem regionFs = HRegionFileSystem.openRegionFromFileSystem(
env.getMasterConfiguration(), fs, getTableDir(), regionInfo, false);
insertRegionFilesIntoStoreTracker(allRegionFiles, env, regionFs);
}
return regionDir;
} | 3.68 |
hadoop_JsonSerialization_fromInstance | /**
* clone by converting to JSON and back again.
* This is much less efficient than any Java clone process.
* @param instance instance to duplicate
* @return a new instance
* @throws IOException IO problems.
*/
public T fromInstance(T instance) throws IOException {
return fromJson(toJson(instance));
} | 3.68 |
flink_TestcontainersSettings_logger | /**
* Sets the {@code baseImage} and returns a reference to this Builder enabling method
* chaining.
*
* @param logger The {@code logger} to set.
* @return A reference to this Builder.
*/
public Builder logger(Logger logger) {
this.logger = logger;
return this;
} | 3.68 |
framework_ApplicationConnection_start | /**
* Starts this application. Don't call this method directly - it's called by
* {@link ApplicationConfiguration#startNextApplication()}, which should be
* called once this application has started (first response received) or
* failed to start. This ensures that the applications are started in order,
* to avoid session-id problems.
*
*/
public void start() {
String jsonText = configuration.getUIDL();
if (jsonText == null) {
// initial UIDL not in DOM, request from server
getMessageSender().resynchronize();
} else {
// initial UIDL provided in DOM, continue as if returned by request
// Hack to avoid logging an error in endRequest()
getMessageSender().startRequest();
getMessageHandler()
.handleMessage(MessageHandler.parseJson(jsonText));
}
// Tooltip can't be created earlier because the
// necessary fields are not setup to add it in the
// correct place in the DOM
if (!tooltipInitialized) {
tooltipInitialized = true;
ApplicationConfiguration.runWhenDependenciesLoaded(
() -> getVTooltip().initializeAssistiveTooltips());
}
} | 3.68 |
framework_HierarchicalDataCommunicator_doCollapse | /**
* Collapses the given item and removes its sub-hierarchy. Calling this
* method will have no effect if the row is already collapsed. The index is
* provided by the client-side or calculated from a full data request.
* {@code syncAndRefresh} indicates whether the changes should be
* synchronised to the client and the data provider be notified.
*
* @param item
* the item to collapse
* @param index
* the index of the item
* @param syncAndRefresh
* {@code true} if the changes should be synchronised to the
* client and the data provider should be notified of the
* changes, {@code false} otherwise.
*/
private void doCollapse(T item, Integer index, boolean syncAndRefresh) {
Range removedRows = mapper.collapse(item, index);
if (syncAndRefresh) {
if (!reset && !removedRows.isEmpty()) {
getClientRpc().removeRows(removedRows.getStart(),
removedRows.length());
}
refresh(item);
}
} | 3.68 |
pulsar_OwnedBundle_isActive | /**
* Access method to the namespace state to check whether the namespace is active or not.
*
* @return boolean value indicate that the namespace is active or not.
*/
public boolean isActive() {
return IS_ACTIVE_UPDATER.get(this) == TRUE;
} | 3.68 |
dubbo_RpcServiceContext_getInvoker | /**
* @deprecated Replace to getUrl()
*/
@Override
@Deprecated
public Invoker<?> getInvoker() {
return invoker;
} | 3.68 |
flink_MemorySegmentFactory_allocateOffHeapUnsafeMemory | /**
* Allocates an off-heap unsafe memory and creates a new memory segment to represent that
* memory.
*
* <p>Creation of this segment schedules its memory freeing operation when its java wrapping
* object is about to be garbage collected, similar to {@link
* java.nio.DirectByteBuffer#DirectByteBuffer(int)}. The difference is that this memory
* allocation is out of option -XX:MaxDirectMemorySize limitation.
*
* @param size The size of the off-heap unsafe memory segment to allocate.
* @param owner The owner to associate with the off-heap unsafe memory segment.
* @param customCleanupAction A custom action to run upon calling GC cleaner.
* @return A new memory segment, backed by off-heap unsafe memory.
*/
public static MemorySegment allocateOffHeapUnsafeMemory(
int size, Object owner, Runnable customCleanupAction) {
long address = MemoryUtils.allocateUnsafe(size);
ByteBuffer offHeapBuffer = MemoryUtils.wrapUnsafeMemoryWithByteBuffer(address, size);
Runnable cleaner = MemoryUtils.createMemoryCleaner(address, customCleanupAction);
return new MemorySegment(offHeapBuffer, owner, false, cleaner);
} | 3.68 |
hbase_KeyValue_compareWithoutRow | /**
* Compare columnFamily, qualifier, timestamp, and key type (everything except the row). This
* method is used both in the normal comparator and the "same-prefix" comparator. Note that we
* are assuming that row portions of both KVs have already been parsed and found identical, and
* we don't validate that assumption here. the length of the common prefix of the two key-values
* being compared, including row length and row
*/
private int compareWithoutRow(int commonPrefix, byte[] left, int loffset, int llength,
byte[] right, int roffset, int rlength, short rowlength) {
/***
* KeyValue Format and commonLength:
* |_keyLen_|_valLen_|_rowLen_|_rowKey_|_famiLen_|_fami_|_Quali_|....
* ------------------|-------commonLength--------|--------------
*/
int commonLength = ROW_LENGTH_SIZE + FAMILY_LENGTH_SIZE + rowlength;
// commonLength + TIMESTAMP_TYPE_SIZE
int commonLengthWithTSAndType = TIMESTAMP_TYPE_SIZE + commonLength;
// ColumnFamily + Qualifier length.
int lcolumnlength = llength - commonLengthWithTSAndType;
int rcolumnlength = rlength - commonLengthWithTSAndType;
byte ltype = left[loffset + (llength - 1)];
byte rtype = right[roffset + (rlength - 1)];
// If the column is not specified, the "minimum" key type appears the
// latest in the sorted order, regardless of the timestamp. This is used
// for specifying the last key/value in a given row, because there is no
// "lexicographically last column" (it would be infinitely long). The
// "maximum" key type does not need this behavior.
if (lcolumnlength == 0 && ltype == Type.Minimum.getCode()) {
// left is "bigger", i.e. it appears later in the sorted order
return 1;
}
if (rcolumnlength == 0 && rtype == Type.Minimum.getCode()) {
return -1;
}
int lfamilyoffset = commonLength + loffset;
int rfamilyoffset = commonLength + roffset;
// Column family length.
int lfamilylength = left[lfamilyoffset - 1];
int rfamilylength = right[rfamilyoffset - 1];
// If left family size is not equal to right family size, we need not
// compare the qualifiers.
boolean sameFamilySize = (lfamilylength == rfamilylength);
int common = 0;
if (commonPrefix > 0) {
common = Math.max(0, commonPrefix - commonLength);
if (!sameFamilySize) {
// Common should not be larger than Math.min(lfamilylength,
// rfamilylength).
common = Math.min(common, Math.min(lfamilylength, rfamilylength));
} else {
common = Math.min(common, Math.min(lcolumnlength, rcolumnlength));
}
}
if (!sameFamilySize) {
// comparing column family is enough.
return Bytes.compareTo(left, lfamilyoffset + common, lfamilylength - common, right,
rfamilyoffset + common, rfamilylength - common);
}
// Compare family & qualifier together.
final int comparison = Bytes.compareTo(left, lfamilyoffset + common, lcolumnlength - common,
right, rfamilyoffset + common, rcolumnlength - common);
if (comparison != 0) {
return comparison;
}
////
// Next compare timestamps.
long ltimestamp = Bytes.toLong(left, loffset + (llength - TIMESTAMP_TYPE_SIZE));
long rtimestamp = Bytes.toLong(right, roffset + (rlength - TIMESTAMP_TYPE_SIZE));
int compare = compareTimestamps(ltimestamp, rtimestamp);
if (compare != 0) {
return compare;
}
// Compare types. Let the delete types sort ahead of puts; i.e. types
// of higher numbers sort before those of lesser numbers. Maximum (255)
// appears ahead of everything, and minimum (0) appears after
// everything.
return (0xff & rtype) - (0xff & ltype);
} | 3.68 |
hadoop_SchedulingResponse_getSchedulingRequest | /**
* Get Scheduling Request.
* @return Scheduling Request.
*/
public SchedulingRequest getSchedulingRequest() {
return this.schedulingRequest;
} | 3.68 |
morf_AbstractSqlDialectTest_testAverage | /**
* Tests an average statement
*/
@Test
public void testAverage() {
final TableReference tableOne = tableRef("TableOne");
SelectStatement testStatement = select(average(field("name")), averageDistinct(field("name"))).from(tableOne);
assertEquals("SELECT AVG(name), AVG(DISTINCT name) FROM " + tableName("TableOne"), testDialect.convertStatementToSQL(testStatement));
} | 3.68 |
druid_PropertiesUtils_loadProperties | /**
* Load properties from the given file into Properties.
*/
public static Properties loadProperties(String file) {
Properties properties = new Properties();
if (file == null) {
return properties;
}
InputStream is = null;
try {
LOG.debug("Trying to load " + file + " from FileSystem.");
is = new FileInputStream(file);
} catch (FileNotFoundException e) {
LOG.debug("Trying to load " + file + " from Classpath.");
try {
is = PropertiesUtils.class.getResourceAsStream(file);
} catch (Exception ex) {
LOG.warn("Can not load resource " + file, ex);
}
}
if (is != null) {
try {
properties.load(is);
} catch (Exception e) {
LOG.error("Exception occurred while loading " + file, e);
} finally {
try {
is.close();
} catch (Exception e) {
LOG.debug("Can not close Inputstream.", e);
}
}
} else {
LOG.warn("File " + file + " can't be loaded!");
}
return properties;
} | 3.68 |
hadoop_HSAuditLogger_add | /**
* Appends the key-val pair to the passed builder in the following format
* <pair-delim>key=value
*/
static void add(Keys key, String value, StringBuilder b) {
b.append(AuditConstants.PAIR_SEPARATOR).append(key.name())
.append(AuditConstants.KEY_VAL_SEPARATOR).append(value);
} | 3.68 |
hadoop_AbfsInputStreamStatisticsImpl_remoteBytesRead | /**
* Total bytes read remotely after nothing was read from readAhead buffer.
*
* @param bytes the bytes to be incremented.
*/
@Override
public void remoteBytesRead(long bytes) {
ioStatisticsStore.incrementCounter(StreamStatisticNames.REMOTE_BYTES_READ, bytes);
} | 3.68 |
hmily_HmilyLockRetryHandler_sleep | /**
* Sleep.
* @param e the e
* @throws LockWaitTimeoutException the lock wait timeout exception
*/
public void sleep(final Exception e) {
if (--lockRetryTimes < 0) {
log.error("Global lock wait timeout");
throw new LockWaitTimeoutException("Global lock wait timeout", e);
}
try {
Thread.sleep(lockRetryInterval);
} catch (InterruptedException ignore) {
}
} | 3.68 |
hbase_ProcedureStoreTracker_setDeletedIfModified | /**
* Set the given bit for the procId to delete if it was modified before.
* <p/>
* This method is used to test whether a procedure wal file can be safely deleted, as if all the
* procedures in the given procedure wal file has been modified in the new procedure wal files,
* then we can delete it.
*/
public void setDeletedIfModified(long... procId) {
BitSetNode node = null;
for (int i = 0; i < procId.length; ++i) {
node = lookupClosestNode(node, procId[i]);
if (node != null && node.isModified(procId[i])) {
node.delete(procId[i]);
}
}
} | 3.68 |
flink_Schema_column | /**
* Declares a physical column that is appended to this schema.
*
* <p>See {@link #column(String, AbstractDataType)} for a detailed explanation.
*
* <p>This method uses a type string that can be easily persisted in a durable catalog.
*
* @param columnName column name
* @param serializableTypeString data type of the column as a serializable string
* @see LogicalType#asSerializableString()
*/
public Builder column(String columnName, String serializableTypeString) {
return column(columnName, DataTypes.of(serializableTypeString));
} | 3.68 |
hadoop_MutableGaugeFloat_toString | /**
* @return the value of the metric
*/
public String toString() {
return value.toString();
} | 3.68 |
hbase_WALSplitUtil_getCompletedRecoveredEditsFilePath | /**
* Get the completed recovered edits file path, renaming it to be by last edit in the file from
* its first edit. Then we could use the name to skip recovered edits when doing
* HRegion#replayRecoveredEditsIfAny(Map, CancelableProgressable, MonitoredTask).
* @return dstPath take file's last edit log seq num as the name
*/
static Path getCompletedRecoveredEditsFilePath(Path srcPath, long maximumEditWALSeqNum) {
String fileName = formatRecoveredEditsFileName(maximumEditWALSeqNum);
return new Path(srcPath.getParent(), fileName);
} | 3.68 |
hbase_ProcedureExecutor_setFailureResultForNonce | /**
* If the failure failed before submitting it, we may want to give back the same error to the
* requests with the same nonceKey.
* @param nonceKey A unique identifier for this operation from the client or process
* @param procName name of the procedure, used to inform the user
* @param procOwner name of the owner of the procedure, used to inform the user
* @param exception the failure to report to the user
*/
public void setFailureResultForNonce(NonceKey nonceKey, String procName, User procOwner,
IOException exception) {
if (nonceKey == null) {
return;
}
Long procId = nonceKeysToProcIdsMap.get(nonceKey);
if (procId == null || completed.containsKey(procId)) {
return;
}
completed.computeIfAbsent(procId, (key) -> {
Procedure<TEnvironment> proc =
new FailedProcedure<>(procId.longValue(), procName, procOwner, nonceKey, exception);
return new CompletedProcedureRetainer<>(proc);
});
} | 3.68 |
pulsar_FunctionMetaDataManager_getFunctionMetaData | /**
* Get the function metadata for a function.
* @param tenant the tenant the function belongs to
* @param namespace the namespace the function belongs to
* @param functionName the function name
* @return FunctionMetaData that contains the function metadata
*/
public synchronized FunctionMetaData getFunctionMetaData(String tenant, String namespace, String functionName) {
return this.functionMetaDataMap.get(tenant).get(namespace).get(functionName);
} | 3.68 |
framework_AbsoluteLayout_setBottom | /**
* Sets the 'bottom' attribute; distance from the bottom of the
* component to the bottom edge of the layout.
*
* @param bottomValue
* The value of the 'bottom' attribute
* @param bottomUnits
* The unit of the 'bottom' attribute. See UNIT_SYMBOLS for a
* description of the available units.
*/
public void setBottom(Float bottomValue, Unit bottomUnits) {
this.bottomValue = bottomValue;
this.bottomUnits = bottomUnits;
markAsDirty();
} | 3.68 |
framework_SharedUtil_dashSeparatedToCamelCase | /**
* Converts a dash ("-") separated string into camelCase.
* <p>
* Examples:
* <p>
* {@literal foo} becomes {@literal foo} {@literal foo-bar} becomes
* {@literal fooBar} {@literal foo--bar} becomes {@literal fooBar}
*
* @since 7.5
* @param dashSeparated
* The dash separated string to convert
* @return a camelCase version of the input string
*/
public static String dashSeparatedToCamelCase(String dashSeparated) {
if (dashSeparated == null) {
return null;
}
String[] parts = dashSeparated.split("-");
for (int i = 1; i < parts.length; i++) {
parts[i] = capitalize(parts[i]);
}
return join(parts, "");
} | 3.68 |
shardingsphere-elasticjob_JobFacade_registerJobBegin | /**
* Register job begin.
*
* @param shardingContexts sharding contexts
*/
public void registerJobBegin(final ShardingContexts shardingContexts) {
executionService.registerJobBegin(shardingContexts);
} | 3.68 |
streampipes_BoilerpipeHTMLParser_toTextDocument | /**
* Returns a {@link TextDocument} containing the extracted {@link TextBlock} s. NOTE: Only call
* this after {@link #parse(org.xml.sax.InputSource)}.
*
* @return The {@link TextDocument}
*/
public TextDocument toTextDocument() {
return contentHandler.toTextDocument();
} | 3.68 |
flink_ScriptProcessBuilder_addJobConfToEnvironment | /**
* addJobConfToEnvironment is mostly shamelessly copied from hadoop streaming. Added additional
* check on environment variable length
*/
void addJobConfToEnvironment(Configuration conf, Map<String, String> env) {
for (Map.Entry<String, String> en : conf) {
String name = en.getKey();
if (!blackListed(conf, name)) {
// String value = (String)en.getValue(); // does not apply variable
// expansion
String value = conf.get(name); // does variable expansion
name = safeEnvVarName(name);
boolean truncate =
conf.getBoolean(HiveConf.ConfVars.HIVESCRIPTTRUNCATEENV.toString(), false);
value = safeEnvVarValue(value, name, truncate);
env.put(name, value);
}
}
} | 3.68 |
hudi_SerializationUtils_serialize | /**
* <p>
* Serializes an {@code Object} to a byte array for storage/serialization.
* </p>
*
* @param obj the object to serialize to bytes
* @return a byte[] with the converted Serializable
* @throws IOException if the serialization fails
*/
public static byte[] serialize(final Object obj) throws IOException {
return SERIALIZER_REF.get().serialize(obj);
} | 3.68 |
hadoop_Event_getReplication | /**
* Replication is zero if the CreateEvent iNodeType is directory or symlink.
*/
public int getReplication() {
return replication;
} | 3.68 |
pulsar_ClientConfiguration_getListenerThreads | /**
* @return the number of threads to use for message listeners
*/
public int getListenerThreads() {
return confData.getNumListenerThreads();
} | 3.68 |
flink_TableFactoryUtil_findAndCreateTableSink | /**
* Creates a {@link TableSink} from a {@link CatalogTable}.
*
* <p>It considers {@link Catalog#getFactory()} if provided.
*/
@SuppressWarnings("unchecked")
public static <T> TableSink<T> findAndCreateTableSink(
@Nullable Catalog catalog,
ObjectIdentifier objectIdentifier,
CatalogTable catalogTable,
ReadableConfig configuration,
boolean isStreamingMode,
boolean isTemporary) {
TableSinkFactory.Context context =
new TableSinkFactoryContextImpl(
objectIdentifier,
catalogTable,
configuration,
!isStreamingMode,
isTemporary);
if (catalog == null) {
return findAndCreateTableSink(context);
} else {
return createTableSinkForCatalogTable(catalog, context)
.orElseGet(() -> findAndCreateTableSink(context));
}
} | 3.68 |
hbase_CellUtil_getCellKeyAsString | /**
* Return the Key portion of the passed <code>cell</code> as a String.
* @param cell the cell to convert
* @param rowConverter used to convert the row of the cell to a string
* @return The Key portion of the passed <code>cell</code> as a String.
*/
public static String getCellKeyAsString(Cell cell, Function<Cell, String> rowConverter) {
StringBuilder sb = new StringBuilder(rowConverter.apply(cell));
sb.append('/');
sb.append(cell.getFamilyLength() == 0
? ""
: Bytes.toStringBinary(cell.getFamilyArray(), cell.getFamilyOffset(),
cell.getFamilyLength()));
// KeyValue only added ':' if family is non-null. Do same.
if (cell.getFamilyLength() > 0) sb.append(':');
sb.append(cell.getQualifierLength() == 0
? ""
: Bytes.toStringBinary(cell.getQualifierArray(), cell.getQualifierOffset(),
cell.getQualifierLength()));
sb.append('/');
sb.append(KeyValue.humanReadableTimestamp(cell.getTimestamp()));
sb.append('/');
sb.append(KeyValue.Type.codeToType(cell.getTypeByte()));
if (!(cell instanceof KeyValue.KeyOnlyKeyValue)) {
sb.append("/vlen=");
sb.append(cell.getValueLength());
}
sb.append("/seqid=");
sb.append(cell.getSequenceId());
return sb.toString();
} | 3.68 |
hadoop_AbfsOutputStream_hasActiveBlockDataToUpload | /**
* Is there an active block and is there any data in it to upload?
*
* @return true if there is some data to upload in an active block else false.
*/
private boolean hasActiveBlockDataToUpload() {
return hasActiveBlock() && getActiveBlock().hasData();
} | 3.68 |
hadoop_IncrementalBlockReportManager_put | /** Put the block to this IBR. */
void put(ReceivedDeletedBlockInfo rdbi) {
blocks.put(rdbi.getBlock(), rdbi);
increaseBlocksCounter(rdbi);
} | 3.68 |
morf_UpdateStatementBuilder_getHints | /**
* @return all hints in the order they were declared.
*/
public List<Hint> getHints() {
return hints;
} | 3.68 |
flink_FileInputFormat_setFilePaths | /**
* Sets multiple paths of files to be read.
*
* @param filePaths The paths of the files to read.
*/
public void setFilePaths(Path... filePaths) {
if (!supportsMultiPaths() && filePaths.length > 1) {
throw new UnsupportedOperationException(
"Multiple paths are not supported by this FileInputFormat.");
}
if (filePaths.length < 1) {
throw new IllegalArgumentException("At least one file path must be specified.");
}
if (filePaths.length == 1) {
// set for backwards compatibility
this.filePath = filePaths[0];
} else {
// clear file path in case it had been set before
this.filePath = null;
}
this.filePaths = filePaths;
} | 3.68 |
flink_WindowedStream_reduce | /**
* Applies the given window function to each window. The window function is called for each
* evaluation of the window for each key individually. The output of the window function is
* interpreted as a regular non-windowed stream.
*
* <p>Arriving data is incrementally aggregated using the given reducer.
*
* @param reduceFunction The reduce function that is used for incremental aggregation.
* @param function The window function.
* @param resultType Type information for the result type of the window function
* @return The data stream that is the result of applying the window function to the window.
*/
@Internal
public <R> SingleOutputStreamOperator<R> reduce(
ReduceFunction<T> reduceFunction,
ProcessWindowFunction<T, R, K, W> function,
TypeInformation<R> resultType) {
// clean the closures
function = input.getExecutionEnvironment().clean(function);
reduceFunction = input.getExecutionEnvironment().clean(reduceFunction);
final String opName = builder.generateOperatorName();
final String opDescription = builder.generateOperatorDescription(reduceFunction, function);
OneInputStreamOperator<T, R> operator = builder.reduce(reduceFunction, function);
return input.transform(opName, resultType, operator).setDescription(opDescription);
} | 3.68 |
hbase_ScanQueryMatcher_currentRow | /** Returns a cell represent the current row */
public Cell currentRow() {
return currentRow;
} | 3.68 |
hbase_MergeTableRegionsProcedure_checkRegionsToMerge | /**
* @throws MergeRegionException If unable to merge regions for whatever reasons.
*/
private static void checkRegionsToMerge(MasterProcedureEnv env, final RegionInfo[] regions,
final boolean force) throws MergeRegionException {
long count = Arrays.stream(regions).distinct().count();
if (regions.length != count) {
throw new MergeRegionException("Duplicate regions specified; cannot merge a region to "
+ "itself. Passed in " + regions.length + " but only " + count + " unique.");
}
if (count < 2) {
throw new MergeRegionException("Need two Regions at least to run a Merge");
}
RegionInfo previous = null;
for (RegionInfo ri : regions) {
if (previous != null) {
if (!previous.getTable().equals(ri.getTable())) {
String msg = "Can't merge regions from different tables: " + previous + ", " + ri;
LOG.warn(msg);
throw new MergeRegionException(msg);
}
if (!force && !ri.isAdjacent(previous) && !ri.isOverlap(previous)) {
String msg = "Unable to merge non-adjacent or non-overlapping regions '"
+ previous.getShortNameToLog() + "', '" + ri.getShortNameToLog() + "' when force=false";
LOG.warn(msg);
throw new MergeRegionException(msg);
}
}
if (ri.getReplicaId() != RegionInfo.DEFAULT_REPLICA_ID) {
throw new MergeRegionException("Can't merge non-default replicas; " + ri);
}
try {
checkOnline(env, ri);
} catch (DoNotRetryRegionException dnrre) {
throw new MergeRegionException(dnrre);
}
previous = ri;
}
} | 3.68 |
flink_FunctionDefinition_getRequirements | /** Returns the set of requirements this definition demands. */
default Set<FunctionRequirement> getRequirements() {
return Collections.emptySet();
} | 3.68 |
hadoop_AllocateResponse_amCommand | /**
* Set the <code>amCommand</code> of the response.
* @see AllocateResponse#setAMCommand(AMCommand)
* @param amCommand <code>amCommand</code> of the response
* @return {@link AllocateResponseBuilder}
*/
@Private
@Unstable
public AllocateResponseBuilder amCommand(AMCommand amCommand) {
allocateResponse.setAMCommand(amCommand);
return this;
} | 3.68 |
hadoop_WeakReferenceThreadMap_currentThreadId | /**
* Get the current thread ID.
* @return thread ID.
*/
public long currentThreadId() {
return Thread.currentThread().getId();
} | 3.68 |
flink_Tuple15_copy | /**
* Shallow tuple copy.
*
* @return A new Tuple with the same fields as this.
*/
@Override
@SuppressWarnings("unchecked")
public Tuple15<T0, T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14> copy() {
return new Tuple15<>(
this.f0, this.f1, this.f2, this.f3, this.f4, this.f5, this.f6, this.f7, this.f8,
this.f9, this.f10, this.f11, this.f12, this.f13, this.f14);
} | 3.68 |
flink_DeclarativeAggregateFunction_mergeOperand | /**
* Merge input of {@link #mergeExpressions()}, the input are AGG buffer generated by user
* definition.
*/
public final UnresolvedReferenceExpression mergeOperand(
UnresolvedReferenceExpression aggBuffer) {
String name = String.valueOf(Arrays.asList(aggBufferAttributes()).indexOf(aggBuffer));
validateOperandName(name);
return unresolvedRef(name);
} | 3.68 |
hudi_InstantStateHandler_refresh | /**
* Refresh the checkpoint messages cached. Will be called when coordinator start/commit/abort instant.
*
* @return Whether refreshing is successful.
*/
public boolean refresh(String instantStatePath) {
try {
cachedInstantStates.put(instantStatePath, scanInstantState(new Path(instantStatePath)));
requestCount.set(0);
} catch (Exception e) {
LOG.error("Failed to load instant states, path: " + instantStatePath, e);
return false;
}
return true;
} | 3.68 |
hbase_HRegionFileSystem_checkRegionInfoOnFilesystem | /**
* Write out an info file under the stored region directory. Useful recovering mangled regions. If
* the regionInfo already exists on-disk, then we fast exit.
*/
void checkRegionInfoOnFilesystem() throws IOException {
// Compose the content of the file so we can compare to length in filesystem. If not same,
// rewrite it (it may have been written in the old format using Writables instead of pb). The
// pb version is much shorter -- we write now w/o the toString version -- so checking length
// only should be sufficient. I don't want to read the file every time to check if it pb
// serialized.
byte[] content = getRegionInfoFileContent(regionInfoForFs);
// Verify if the region directory exists before opening a region. We need to do this since if
// the region directory doesn't exist we will re-create the region directory and a new HRI
// when HRegion.openHRegion() is called.
try {
FileStatus status = fs.getFileStatus(getRegionDir());
} catch (FileNotFoundException e) {
LOG.warn(getRegionDir() + " doesn't exist for region: " + regionInfoForFs.getEncodedName()
+ " on table " + regionInfo.getTable());
}
try {
Path regionInfoFile = new Path(getRegionDir(), REGION_INFO_FILE);
FileStatus status = fs.getFileStatus(regionInfoFile);
if (status != null && status.getLen() == content.length) {
// Then assume the content good and move on.
// NOTE: that the length is not sufficient to define the the content matches.
return;
}
LOG.info("Rewriting .regioninfo file at: " + regionInfoFile);
if (!fs.delete(regionInfoFile, false)) {
throw new IOException("Unable to remove existing " + regionInfoFile);
}
} catch (FileNotFoundException e) {
LOG.warn(REGION_INFO_FILE + " file not found for region: " + regionInfoForFs.getEncodedName()
+ " on table " + regionInfo.getTable());
}
// Write HRI to a file in case we need to recover hbase:meta
writeRegionInfoOnFilesystem(content, true);
} | 3.68 |
hbase_AbstractFSWAL_getWALArchivePath | /*
* only public so WALSplitter can use.
* @return archived location of a WAL file with the given path p
*/
public static Path getWALArchivePath(Path archiveDir, Path p) {
return new Path(archiveDir, p.getName());
} | 3.68 |
framework_VVideo_setPoster | /**
* Sets the poster URL.
*
* @param poster
* the poster image URL
*/
public void setPoster(String poster) {
video.setPoster(poster);
} | 3.68 |
graphhopper_VectorTile_getLayersOrBuilder | /**
* <code>repeated .vector_tile.Tile.Layer layers = 3;</code>
*/
public vector_tile.VectorTile.Tile.LayerOrBuilder getLayersOrBuilder(
int index) {
if (layersBuilder_ == null) {
return layers_.get(index); } else {
return layersBuilder_.getMessageOrBuilder(index);
}
} | 3.68 |
hadoop_TextOutputFormat_writeObject | /**
* Write the object to the byte stream, handling Text as a special
* case.
* @param o the object to print
* @throws IOException if the write throws, we pass it on
*/
private void writeObject(Object o) throws IOException {
if (o instanceof Text) {
Text to = (Text) o;
out.write(to.getBytes(), 0, to.getLength());
} else {
out.write(o.toString().getBytes(StandardCharsets.UTF_8));
}
} | 3.68 |
hadoop_HsController_countersPage | /*
* (non-Javadoc)
* @see org.apache.hadoop.mapreduce.v2.app.webapp.AppController#countersPage()
*/
@Override
public Class<? extends View> countersPage() {
return HsCountersPage.class;
} | 3.68 |
framework_VAbstractSplitPanel_setStylenames | /** For internal use only. May be removed or replaced in the future. */
public void setStylenames() {
final String splitterClass = CLASSNAME
+ (orientation == Orientation.HORIZONTAL ? "-hsplitter"
: "-vsplitter");
final String firstContainerClass = CLASSNAME + "-first-container";
final String secondContainerClass = CLASSNAME + "-second-container";
final String lockedSuffix = locked ? "-locked" : "";
splitter.setClassName(splitterClass + lockedSuffix);
firstContainer.setClassName(firstContainerClass);
secondContainer.setClassName(secondContainerClass);
for (String styleName : componentStyleNames) {
splitter.addClassName(
splitterClass + "-" + styleName + lockedSuffix);
firstContainer.addClassName(firstContainerClass + "-" + styleName);
secondContainer
.addClassName(secondContainerClass + "-" + styleName);
}
} | 3.68 |
hudi_MarkerHandler_doesMarkerDirExist | /**
* @param markerDir marker directory path
* @return {@code true} if the marker directory exists; {@code false} otherwise.
*/
public boolean doesMarkerDirExist(String markerDir) {
MarkerDirState markerDirState = getMarkerDirState(markerDir);
return markerDirState.exists();
} | 3.68 |
hadoop_Validate_checkValuesEqual | /**
* Validates that the given two values are equal.
* @param value1 the first value to check.
* @param value1Name the name of the first argument.
* @param value2 the second value to check.
* @param value2Name the name of the second argument.
*/
public static void checkValuesEqual(
long value1,
String value1Name,
long value2,
String value2Name) {
checkArgument(
value1 == value2,
"'%s' (%s) must equal '%s' (%s).",
value1Name,
value1,
value2Name,
value2);
} | 3.68 |
hbase_TableRegionModel_setId | /**
* @param id the region's encoded id
*/
public void setId(long id) {
this.id = id;
} | 3.68 |
dubbo_URLParam_addParametersIfAbsent | /**
* Add absent parameters to a new URLParam.
*
* @param parameters parameters in key-value pairs
* @return A new URL
*/
public URLParam addParametersIfAbsent(Map<String, String> parameters) {
if (CollectionUtils.isEmptyMap(parameters)) {
return this;
}
return doAddParameters(parameters, true);
} | 3.68 |
framework_Slot_setCaptionResizeListener | /**
* Sets the caption resize listener for this slot.
*
* @param captionResizeListener
* the listener to set, or {@code null} to remove a previously
* set listener
*/
public void setCaptionResizeListener(
ElementResizeListener captionResizeListener) {
detachListeners();
this.captionResizeListener = captionResizeListener;
attachListeners();
} | 3.68 |
dubbo_ServiceDiscoveryRegistry_getServiceDiscovery | /**
* Get the instance {@link ServiceDiscovery} from the registry {@link URL} using
* {@link ServiceDiscoveryFactory} SPI
*
* @param registryURL the {@link URL} to connect the registry
* @return
*/
private ServiceDiscovery getServiceDiscovery(URL registryURL) {
ServiceDiscoveryFactory factory = getExtension(registryURL);
return factory.getServiceDiscovery(registryURL);
} | 3.68 |
morf_AbstractSqlDialectTest_testSqlDateConversion | /**
* Tests SQL date conversion to string via databaseSafeStringtoRecordValue
*
* @throws SQLException If a SQL exception is thrown.
*/
@Test
public void testSqlDateConversion() throws SQLException {
ResultSet rs = mock(ResultSet.class);
LocalDate localDate1 = new LocalDate(2010, 1, 1);
LocalDate localDate2 = new LocalDate(2010, 12, 21);
LocalDate localDate3 = new LocalDate(100, 1, 1);
LocalDate localDate4 = new LocalDate(9999, 12, 31);
java.sql.Date date1 = new java.sql.Date(localDate1.toDate().getTime());
java.sql.Date date2 = new java.sql.Date(localDate2.toDate().getTime());
java.sql.Date date3 = new java.sql.Date(localDate3.toDate().getTime());
java.sql.Date date4 = new java.sql.Date(localDate4.toDate().getTime());
when(rs.getDate(1)).thenReturn(date1);
when(rs.getDate(2)).thenReturn(date2);
when(rs.getDate(3)).thenReturn(date3);
when(rs.getDate(4)).thenReturn(date4);
Record record = testDialect.resultSetToRecord(rs, ImmutableList.of(
column("Date1", DataType.DATE),
column("Date2", DataType.DATE),
column("Date3", DataType.DATE),
column("Date4", DataType.DATE)
));
assertEquals(localDate1, record.getLocalDate("Date1"));
assertEquals(localDate2, record.getLocalDate("Date2"));
assertEquals(localDate3, record.getLocalDate("Date3"));
assertEquals(localDate4, record.getLocalDate("Date4"));
assertEquals(date1, record.getDate("Date1"));
assertEquals(date2, record.getDate("Date2"));
assertEquals(date3, record.getDate("Date3"));
assertEquals(date4, record.getDate("Date4"));
} | 3.68 |
druid_ZookeeperNodeRegister_destroy | /**
* @see #deregister()
*/
public void destroy() {
deregister();
} | 3.68 |
hbase_BucketCache_freeSpace | /**
* Free the space if the used size reaches acceptableSize() or one size block couldn't be
* allocated. When freeing the space, we use the LRU algorithm and ensure there must be some
* blocks evicted
* @param why Why we are being called
*/
void freeSpace(final String why) {
// Ensure only one freeSpace progress at a time
if (!freeSpaceLock.tryLock()) {
return;
}
try {
freeInProgress = true;
long bytesToFreeWithoutExtra = 0;
// Calculate free byte for each bucketSizeinfo
StringBuilder msgBuffer = LOG.isDebugEnabled() ? new StringBuilder() : null;
BucketAllocator.IndexStatistics[] stats = bucketAllocator.getIndexStatistics();
long[] bytesToFreeForBucket = new long[stats.length];
for (int i = 0; i < stats.length; i++) {
bytesToFreeForBucket[i] = 0;
long freeGoal = (long) Math.floor(stats[i].totalCount() * (1 - minFactor));
freeGoal = Math.max(freeGoal, 1);
if (stats[i].freeCount() < freeGoal) {
bytesToFreeForBucket[i] = stats[i].itemSize() * (freeGoal - stats[i].freeCount());
bytesToFreeWithoutExtra += bytesToFreeForBucket[i];
if (msgBuffer != null) {
msgBuffer.append("Free for bucketSize(" + stats[i].itemSize() + ")="
+ StringUtils.byteDesc(bytesToFreeForBucket[i]) + ", ");
}
}
}
if (msgBuffer != null) {
msgBuffer.append("Free for total=" + StringUtils.byteDesc(bytesToFreeWithoutExtra) + ", ");
}
if (bytesToFreeWithoutExtra <= 0) {
return;
}
long currentSize = bucketAllocator.getUsedSize();
long totalSize = bucketAllocator.getTotalSize();
if (LOG.isDebugEnabled() && msgBuffer != null) {
LOG.debug("Free started because \"" + why + "\"; " + msgBuffer.toString()
+ " of current used=" + StringUtils.byteDesc(currentSize) + ", actual cacheSize="
+ StringUtils.byteDesc(realCacheSize.sum()) + ", total="
+ StringUtils.byteDesc(totalSize));
}
long bytesToFreeWithExtra =
(long) Math.floor(bytesToFreeWithoutExtra * (1 + extraFreeFactor));
// Instantiate priority buckets
BucketEntryGroup bucketSingle =
new BucketEntryGroup(bytesToFreeWithExtra, blockSize, getPartitionSize(singleFactor));
BucketEntryGroup bucketMulti =
new BucketEntryGroup(bytesToFreeWithExtra, blockSize, getPartitionSize(multiFactor));
BucketEntryGroup bucketMemory =
new BucketEntryGroup(bytesToFreeWithExtra, blockSize, getPartitionSize(memoryFactor));
// Scan entire map putting bucket entry into appropriate bucket entry
// group
for (Map.Entry<BlockCacheKey, BucketEntry> bucketEntryWithKey : backingMap.entrySet()) {
switch (bucketEntryWithKey.getValue().getPriority()) {
case SINGLE: {
bucketSingle.add(bucketEntryWithKey);
break;
}
case MULTI: {
bucketMulti.add(bucketEntryWithKey);
break;
}
case MEMORY: {
bucketMemory.add(bucketEntryWithKey);
break;
}
}
}
PriorityQueue<BucketEntryGroup> bucketQueue =
new PriorityQueue<>(3, Comparator.comparingLong(BucketEntryGroup::overflow));
bucketQueue.add(bucketSingle);
bucketQueue.add(bucketMulti);
bucketQueue.add(bucketMemory);
int remainingBuckets = bucketQueue.size();
long bytesFreed = 0;
BucketEntryGroup bucketGroup;
while ((bucketGroup = bucketQueue.poll()) != null) {
long overflow = bucketGroup.overflow();
if (overflow > 0) {
long bucketBytesToFree =
Math.min(overflow, (bytesToFreeWithoutExtra - bytesFreed) / remainingBuckets);
bytesFreed += bucketGroup.free(bucketBytesToFree);
}
remainingBuckets--;
}
// Check and free if there are buckets that still need freeing of space
if (bucketSizesAboveThresholdCount(minFactor) > 0) {
bucketQueue.clear();
remainingBuckets = 3;
bucketQueue.add(bucketSingle);
bucketQueue.add(bucketMulti);
bucketQueue.add(bucketMemory);
while ((bucketGroup = bucketQueue.poll()) != null) {
long bucketBytesToFree = (bytesToFreeWithExtra - bytesFreed) / remainingBuckets;
bytesFreed += bucketGroup.free(bucketBytesToFree);
remainingBuckets--;
}
}
// Even after the above free we might still need freeing because of the
// De-fragmentation of the buckets (also called Slab Calcification problem), i.e
// there might be some buckets where the occupancy is very sparse and thus are not
// yielding the free for the other bucket sizes, the fix for this to evict some
// of the buckets, we do this by evicting the buckets that are least fulled
freeEntireBuckets(DEFAULT_FREE_ENTIRE_BLOCK_FACTOR * bucketSizesAboveThresholdCount(1.0f));
if (LOG.isDebugEnabled()) {
long single = bucketSingle.totalSize();
long multi = bucketMulti.totalSize();
long memory = bucketMemory.totalSize();
if (LOG.isDebugEnabled()) {
LOG.debug("Bucket cache free space completed; " + "freed="
+ StringUtils.byteDesc(bytesFreed) + ", " + "total=" + StringUtils.byteDesc(totalSize)
+ ", " + "single=" + StringUtils.byteDesc(single) + ", " + "multi="
+ StringUtils.byteDesc(multi) + ", " + "memory=" + StringUtils.byteDesc(memory));
}
}
} catch (Throwable t) {
LOG.warn("Failed freeing space", t);
} finally {
cacheStats.evict();
freeInProgress = false;
freeSpaceLock.unlock();
}
} | 3.68 |
hadoop_LocalityMulticastAMRMProxyPolicy_getActiveAndEnabledSC | /**
* Return the set of sub-clusters that are both active and allowed by our
* policy (weight > 0).
*
* @return a set of active and enabled {@link SubClusterId}s
*/
private Set<SubClusterId> getActiveAndEnabledSC() {
return activeAndEnabledSC;
} | 3.68 |
flink_FlinkHints_getTableName | /** Returns the qualified name of a table scan, otherwise returns empty. */
public static Optional<String> getTableName(RelOptTable table) {
if (table == null) {
return Optional.empty();
}
String tableName;
if (table instanceof FlinkPreparingTableBase) {
tableName = StringUtils.join(((FlinkPreparingTableBase) table).getNames(), '.');
} else {
throw new TableException(
String.format(
"Could not get the table name with the unknown table class `%s`",
table.getClass().getCanonicalName()));
}
return Optional.of(tableName);
} | 3.68 |
dubbo_AbstractAnnotationBeanPostProcessor_needsRefreshInjectionMetadata | // Use custom check method to compatible with Spring 4.x
private boolean needsRefreshInjectionMetadata(AnnotatedInjectionMetadata metadata, Class<?> clazz) {
return (metadata == null || metadata.needsRefresh(clazz));
} | 3.68 |
hbase_MasterObserver_preSnapshot | /**
* Called before a new snapshot is taken. Called as part of snapshot RPC call.
* @param ctx the environment to interact with the framework and master
* @param snapshot the SnapshotDescriptor for the snapshot
* @param tableDescriptor the TableDescriptor of the table to snapshot
*/
default void preSnapshot(final ObserverContext<MasterCoprocessorEnvironment> ctx,
final SnapshotDescription snapshot, final TableDescriptor tableDescriptor) throws IOException {
} | 3.68 |
flink_Hardware_getSizeOfPhysicalMemoryForFreeBSD | /**
* Returns the size of the physical memory in bytes on FreeBSD.
*
* @return the size of the physical memory in bytes or {@code -1}, if the size could not be
* determined
*/
private static long getSizeOfPhysicalMemoryForFreeBSD() {
BufferedReader bi = null;
try {
Process proc = Runtime.getRuntime().exec("sysctl hw.physmem");
bi =
new BufferedReader(
new InputStreamReader(proc.getInputStream(), StandardCharsets.UTF_8));
String line;
while ((line = bi.readLine()) != null) {
if (line.startsWith("hw.physmem")) {
long memsize = Long.parseLong(line.split(":")[1].trim());
bi.close();
proc.destroy();
return memsize;
}
}
LOG.error(
"Cannot determine the size of the physical memory for FreeBSD host "
+ "(using 'sysctl hw.physmem').");
return -1;
} catch (Throwable t) {
LOG.error(
"Cannot determine the size of the physical memory for FreeBSD host "
+ "(using 'sysctl hw.physmem')",
t);
return -1;
} finally {
if (bi != null) {
try {
bi.close();
} catch (IOException ignored) {
}
}
}
} | 3.68 |
framework_GridElement_getRows | /**
* Gets all the data rows in the grid.
* <p>
* Returns an iterable which will lazily scroll rows into views and lazy
* load data as needed.
*
* @return an iterable of all the data rows in the grid.
*/
public Iterable<GridRowElement> getRows() {
return () -> new Iterator<GridElement.GridRowElement>() {
int nextIndex = 0;
@Override
public GridRowElement next() {
return getRow(nextIndex++);
}
@Override
public boolean hasNext() {
try {
getRow(nextIndex);
return true;
} catch (Exception e) {
return false;
}
}
@Override
public void remove() {
throw new UnsupportedOperationException("remove not supported");
}
};
} | 3.68 |
graphhopper_IntsRef_compareTo | /**
* Signed int order comparison
*/
@Override
public int compareTo(IntsRef other) {
if (this == other) return 0;
final int[] aInts = this.ints;
int aUpto = this.offset;
final int[] bInts = other.ints;
int bUpto = other.offset;
final int aStop = aUpto + Math.min(this.length, other.length);
while (aUpto < aStop) {
int aInt = aInts[aUpto++];
int bInt = bInts[bUpto++];
if (aInt > bInt) {
return 1;
} else if (aInt < bInt) {
return -1;
}
}
// One is a prefix of the other, or, they are equal:
return this.length - other.length;
} | 3.68 |
flink_DataType_getFields | /**
* Returns an ordered list of fields starting from the provided {@link DataType}.
*
* <p>Note: This method returns an empty list for every {@link DataType} that is not a composite
* type.
*/
public static List<DataTypes.Field> getFields(DataType dataType) {
final List<String> names = getFieldNames(dataType);
final List<DataType> dataTypes = getFieldDataTypes(dataType);
return IntStream.range(0, names.size())
.mapToObj(i -> DataTypes.FIELD(names.get(i), dataTypes.get(i)))
.collect(Collectors.toList());
} | 3.68 |
hbase_RSGroupInfo_containsServer | /** Returns true if a server with hostPort is found */
public boolean containsServer(Address hostPort) {
return servers.contains(hostPort);
} | 3.68 |
hudi_HoodieIndexUtils_getLatestBaseFilesForAllPartitions | /**
* Fetches Pair of partition path and {@link HoodieBaseFile}s for interested partitions.
*
* @param partitions list of partitions of interest
* @param context instance of {@link HoodieEngineContext} to use
* @param hoodieTable instance of {@link HoodieTable} of interest
* @return the list of Pairs of partition path and fileId
*/
public static List<Pair<String, HoodieBaseFile>> getLatestBaseFilesForAllPartitions(final List<String> partitions,
final HoodieEngineContext context,
final HoodieTable hoodieTable) {
context.setJobStatus(HoodieIndexUtils.class.getSimpleName(), "Load latest base files from all partitions: " + hoodieTable.getConfig().getTableName());
return context.flatMap(partitions, partitionPath -> {
List<Pair<String, HoodieBaseFile>> filteredFiles =
getLatestBaseFilesForPartition(partitionPath, hoodieTable).stream()
.map(baseFile -> Pair.of(partitionPath, baseFile))
.collect(toList());
return filteredFiles.stream();
}, Math.max(partitions.size(), 1));
} | 3.68 |
cron-utils_CronParserField_isOptional | /**
* Returns optional tag.
*
* @return optional tag
*/
public final boolean isOptional() {
return optional;
} | 3.68 |
hbase_EventHandler_getEventType | /**
* Return the event type
* @return The event type.
*/
public EventType getEventType() {
return this.eventType;
} | 3.68 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.