name
stringlengths 12
178
| code_snippet
stringlengths 8
36.5k
| score
float64 3.26
3.68
|
---|---|---|
hadoop_Event_getFileSize | /**
* The size of the truncated file in bytes.
*/
public long getFileSize() {
return fileSize;
} | 3.68 |
flink_OrcNoHiveColumnarRowInputFormat_createPartitionedFormat | /**
* Create a partitioned {@link OrcColumnarRowInputFormat}, the partition columns can be
* generated by split.
*/
public static <SplitT extends FileSourceSplit>
OrcColumnarRowInputFormat<?, SplitT> createPartitionedFormat(
Configuration hadoopConfig,
RowType tableType,
List<String> partitionKeys,
PartitionFieldExtractor<SplitT> extractor,
int[] selectedFields,
List<OrcFilters.Predicate> conjunctPredicates,
int batchSize,
Function<RowType, TypeInformation<RowData>> rowTypeInfoFactory) {
// TODO FLINK-25113 all this partition keys code should be pruned from the orc format,
// because now FileSystemTableSource uses FileInfoExtractorBulkFormat for reading partition
// keys.
String[] tableFieldNames = tableType.getFieldNames().toArray(new String[0]);
LogicalType[] tableFieldTypes = tableType.getChildren().toArray(new LogicalType[0]);
List<String> orcFieldNames = getNonPartNames(tableFieldNames, partitionKeys);
int[] orcSelectedFields =
getSelectedOrcFields(tableFieldNames, selectedFields, orcFieldNames);
ColumnBatchFactory<VectorizedRowBatch, SplitT> batchGenerator =
(SplitT split, VectorizedRowBatch rowBatch) -> {
// create and initialize the row batch
ColumnVector[] vectors = new ColumnVector[selectedFields.length];
for (int i = 0; i < vectors.length; i++) {
String name = tableFieldNames[selectedFields[i]];
LogicalType type = tableFieldTypes[selectedFields[i]];
vectors[i] =
partitionKeys.contains(name)
? createFlinkVectorFromConstant(
type,
extractor.extract(split, name, type),
batchSize)
: createFlinkVector(
rowBatch.cols[orcFieldNames.indexOf(name)]);
}
return new VectorizedColumnBatch(vectors);
};
return new OrcColumnarRowInputFormat<>(
new OrcNoHiveShim(),
hadoopConfig,
convertToOrcTypeWithPart(tableFieldNames, tableFieldTypes, partitionKeys),
orcSelectedFields,
conjunctPredicates,
batchSize,
batchGenerator,
rowTypeInfoFactory.apply(
new RowType(
Arrays.stream(selectedFields)
.mapToObj(i -> tableType.getFields().get(i))
.collect(Collectors.toList()))));
} | 3.68 |
hbase_KeyValue_isValidType | /**
* True to indicate that the byte b is a valid type.
* @param b byte to check
* @return true or false
*/
static boolean isValidType(byte b) {
return codeArray[b & 0xff] != null;
} | 3.68 |
hudi_AbstractTableFileSystemView_getVisibleCommitsAndCompactionTimeline | /**
* Return Only Commits and Compaction timeline for building file-groups.
*
* @return {@code HoodieTimeline}
*/
public HoodieTimeline getVisibleCommitsAndCompactionTimeline() {
return visibleCommitsAndCompactionTimeline;
} | 3.68 |
pulsar_DispatchRateLimiter_tryDispatchPermit | /**
* It acquires msg and bytes permits from rate-limiter and returns if acquired permits succeed.
*
* @param msgPermits
* @param bytePermits
* @return
*/
public boolean tryDispatchPermit(long msgPermits, long bytePermits) {
boolean acquiredMsgPermit = msgPermits <= 0 || dispatchRateLimiterOnMessage == null
|| dispatchRateLimiterOnMessage.tryAcquire(msgPermits);
boolean acquiredBytePermit = bytePermits <= 0 || dispatchRateLimiterOnByte == null
|| dispatchRateLimiterOnByte.tryAcquire(bytePermits);
return acquiredMsgPermit && acquiredBytePermit;
} | 3.68 |
flink_OneShotLatch_trigger | /** Fires the latch. Code that is blocked on {@link #await()} will now return. */
public void trigger() {
synchronized (lock) {
triggered = true;
lock.notifyAll();
}
} | 3.68 |
framework_Table_setColumnCollapsed | /**
* Sets whether the specified column is collapsed or not.
*
*
* @param propertyId
* the propertyID identifying the column.
* @param collapsed
* the desired collapsedness.
* @throws IllegalStateException
* if column collapsing is not allowed
* @throws IllegalArgumentException
* if the property id does not exist
*/
public void setColumnCollapsed(Object propertyId, boolean collapsed)
throws IllegalStateException {
if (!isColumnCollapsingAllowed()) {
throw new IllegalStateException("Column collapsing not allowed!");
}
if (collapsed && noncollapsibleColumns.contains(propertyId)) {
throw new IllegalStateException("The column is noncollapsible!");
}
if (!getContainerPropertyIds().contains(propertyId)
&& !columnGenerators.containsKey(propertyId)) {
throw new IllegalArgumentException("Property '" + propertyId
+ "' was not found in the container");
}
if (collapsed) {
if (collapsedColumns.add(propertyId)) {
fireColumnCollapseEvent(propertyId);
}
} else {
if (collapsedColumns.remove(propertyId)) {
fireColumnCollapseEvent(propertyId);
}
}
// Assures the visual refresh
refreshRowCache();
} | 3.68 |
hbase_MetaTableAccessor_putToMetaTable | /**
* Put the passed <code>p</code> to the <code>hbase:meta</code> table.
* @param connection connection we're using
* @param p Put to add to hbase:meta
*/
private static void putToMetaTable(Connection connection, Put p) throws IOException {
try (Table table = getMetaHTable(connection)) {
put(table, p);
}
} | 3.68 |
flink_FailureHandlingResult_isGlobalFailure | /**
* Checks if this failure was a global failure, i.e., coming from a "safety net" failover that
* involved all tasks and should reset also components like the coordinators.
*/
public boolean isGlobalFailure() {
return globalFailure;
} | 3.68 |
flink_SavepointReader_window | /**
* Read window state from an operator in a {@code Savepoint}. This method supports reading from
* any type of window.
*
* @param windowSerializer The serializer used for the window type.
* @return A {@link WindowSavepointReader}.
*/
public <W extends Window> WindowSavepointReader<W> window(TypeSerializer<W> windowSerializer) {
Preconditions.checkNotNull(windowSerializer, "The window serializer must not be null");
return new WindowSavepointReader<>(env, metadata, stateBackend, windowSerializer);
} | 3.68 |
flink_BinarySegmentUtils_readArrayData | /** Gets an instance of {@link ArrayData} from underlying {@link MemorySegment}. */
public static ArrayData readArrayData(
MemorySegment[] segments, int baseOffset, long offsetAndSize) {
final int size = ((int) offsetAndSize);
int offset = (int) (offsetAndSize >> 32);
BinaryArrayData array = new BinaryArrayData();
array.pointTo(segments, offset + baseOffset, size);
return array;
} | 3.68 |
framework_ConnectorTracker_cleanStreamVariables | /**
* Removes StreamVariables that belong to connectors that are no longer
* attached to the session.
*/
private void cleanStreamVariables() {
if (pidToNameToStreamVariable != null) {
ConnectorTracker connectorTracker = uI.getConnectorTracker();
Iterator<String> iterator = pidToNameToStreamVariable.keySet()
.iterator();
while (iterator.hasNext()) {
String connectorId = iterator.next();
if (connectorTracker.getConnector(connectorId) == null) {
// Owner is no longer attached to the session
Map<String, StreamVariable> removed = pidToNameToStreamVariable
.get(connectorId);
for (String key : removed.keySet()) {
streamVariableToSeckey.remove(removed.get(key));
}
iterator.remove();
}
}
}
} | 3.68 |
framework_AbstractSelect_getContainerDataSource | /**
* Gets the viewing data-source container.
*
* @see Container.Viewer#getContainerDataSource()
*/
@Override
public Container getContainerDataSource() {
return items;
} | 3.68 |
flink_ThreadBase_isRunning | /**
* Checks whether this thread is still alive.
*
* @return true, if the thread is alive, false otherwise.
*/
protected boolean isRunning() {
return this.alive;
} | 3.68 |
hbase_Delete_add | /**
* Add an existing delete marker to this Delete object.
* @param cell An existing cell of type "delete".
* @return this for invocation chaining
*/
@Override
public Delete add(Cell cell) throws IOException {
super.add(cell);
return this;
} | 3.68 |
hbase_BlockIOUtils_preadWithExtra | /**
* Read from an input stream at least <code>necessaryLen</code> and if possible,
* <code>extraLen</code> also if available. Analogous to
* {@link IOUtils#readFully(InputStream, byte[], int, int)}, but uses positional read and
* specifies a number of "extra" bytes that would be desirable but not absolutely necessary to
* read. If the input stream supports ByteBufferPositionedReadable, it reads to the byte buffer
* directly, and does not allocate a temporary byte array.
* @param buff ByteBuff to read into.
* @param dis the input stream to read from
* @param position the position within the stream from which to start reading
* @param necessaryLen the number of bytes that are absolutely necessary to read
* @param extraLen the number of extra bytes that would be nice to read
* @param readAllBytes whether we must read the necessaryLen and extraLen
* @return true if and only if extraLen is > 0 and reading those extra bytes was successful
* @throws IOException if failed to read the necessary bytes
*/
public static boolean preadWithExtra(ByteBuff buff, FSDataInputStream dis, long position,
int necessaryLen, int extraLen, boolean readAllBytes) throws IOException {
boolean preadbytebuffer = dis.hasCapability("in:preadbytebuffer");
if (preadbytebuffer) {
return preadWithExtraDirectly(buff, dis, position, necessaryLen, extraLen, readAllBytes);
} else {
return preadWithExtraOnHeap(buff, dis, position, necessaryLen, extraLen, readAllBytes);
}
} | 3.68 |
hbase_ColumnFamilyDescriptorBuilder_isLegalColumnFamilyName | /**
* Check if the column family name is legal.
* @param b Family name.
* @return <code>b</code>
* @throws IllegalArgumentException If not null and not a legitimate family name: i.e. 'printable'
* and ends in a ':' (Null passes are allowed because
* <code>b</code> can be null when deserializing). Cannot start
* with a '.' either. Also Family can not be an empty value or
* equal "recovered.edits".
*/
public static byte[] isLegalColumnFamilyName(final byte[] b) {
if (b == null) {
return null;
}
Preconditions.checkArgument(b.length != 0, "Column Family name can not be empty");
if (b[0] == '.') {
throw new IllegalArgumentException(
"Column Family names cannot start with a " + "period: " + Bytes.toString(b));
}
for (int i = 0; i < b.length; i++) {
if (Character.isISOControl(b[i]) || b[i] == ':' || b[i] == '\\' || b[i] == '/') {
throw new IllegalArgumentException("Illegal character <" + b[i]
+ ">. Column Family names cannot contain control characters or colons: "
+ Bytes.toString(b));
}
}
byte[] recoveredEdit = Bytes.toBytes(HConstants.RECOVERED_EDITS_DIR);
if (Bytes.equals(recoveredEdit, b)) {
throw new IllegalArgumentException(
"Column Family name cannot be: " + HConstants.RECOVERED_EDITS_DIR);
}
return b;
} | 3.68 |
morf_Criterion_deepCopy | /**
* @see org.alfasoftware.morf.util.DeepCopyableWithTransformation#deepCopy(org.alfasoftware.morf.util.DeepCopyTransformation)
*/
@Override
public Builder<Criterion> deepCopy(DeepCopyTransformation transformer) {
return TempTransitionalBuilderWrapper.wrapper(new Criterion(this,transformer));
} | 3.68 |
pulsar_EntryImpl_getDataAndRelease | // Only for test
@Override
public byte[] getDataAndRelease() {
byte[] array = getData();
release();
return array;
} | 3.68 |
flink_CheckpointedInputGate_getAlignmentDurationNanos | /**
* Gets the time that the latest alignment took, in nanoseconds. If there is currently an
* alignment in progress, it will return the time spent in the current alignment so far.
*
* @return The duration in nanoseconds
*/
@VisibleForTesting
long getAlignmentDurationNanos() {
return barrierHandler.getAlignmentDurationNanos();
} | 3.68 |
morf_GraphBasedUpgradeNode_equals | /**
* Only the name property is considered while checking equality of this class.
*
* @see java.lang.Object#equals(java.lang.Object)
*/
@Override
public boolean equals(Object obj) {
if (this == obj) return true;
if (obj == null) return false;
if (getClass() != obj.getClass()) return false;
GraphBasedUpgradeNode other = (GraphBasedUpgradeNode) obj;
if (name == null) {
if (other.name != null) return false;
} else if (!name.equals(other.name)) return false;
return true;
} | 3.68 |
flink_JoinInputSideSpec_getUniqueKeyType | /**
* Returns the {@link TypeInformation} of the unique key. Returns null if the input hasn't
* unique key.
*/
@Nullable
public InternalTypeInfo<RowData> getUniqueKeyType() {
return uniqueKeyType;
} | 3.68 |
hadoop_GPGPolicyFacade_confCacheEqual | /**
* @param queue the queue to check the cached policy configuration for
* @param conf the new policy configuration
* @return whether or not the conf is equal to the cached conf
*/
private boolean confCacheEqual(String queue,
SubClusterPolicyConfiguration conf) {
SubClusterPolicyConfiguration cachedConf = policyConfMap.get(queue);
if (conf == null && cachedConf == null) {
return true;
} else if (conf != null && cachedConf != null) {
if (conf.equals(cachedConf)) {
return true;
}
}
return false;
} | 3.68 |
flink_SerializedCheckpointData_getNumIds | /**
* Gets the number of IDs in the checkpoint.
*
* @return The number of IDs in the checkpoint.
*/
public int getNumIds() {
return numIds;
} | 3.68 |
hadoop_Probe_init | /**
* perform any prelaunch initialization
*/
public void init() throws IOException {
} | 3.68 |
hadoop_ContainerReInitEvent_getResourceSet | /**
* Get the ResourceSet.
* @return ResourceSet.
*/
public ResourceSet getResourceSet() {
return resourceSet;
} | 3.68 |
flink_SqlFunctionUtils_ceil | /** SQL <code>CEIL</code> operator applied to long values. */
public static long ceil(long b0, long b1) {
return floor(b0 + b1 - 1, b1);
} | 3.68 |
flink_SpanningWrapper_transferFrom | /** Copies the data and transfers the "ownership" (i.e. clears the passed wrapper). */
void transferFrom(NonSpanningWrapper partial, int nextRecordLength) throws IOException {
updateLength(nextRecordLength);
accumulatedRecordBytes =
isAboveSpillingThreshold() ? spill(partial) : partial.copyContentTo(buffer);
partial.clear();
} | 3.68 |
hadoop_PeriodicService_setIntervalMs | /**
* Set the interval for the periodic service.
*
* @param interval Interval in milliseconds.
*/
protected void setIntervalMs(long interval) {
if (getServiceState() == STATE.STARTED) {
throw new ServiceStateException("Periodic service already started");
} else {
this.intervalMs = interval;
}
} | 3.68 |
flink_LogicalTypeUtils_toInternalConversionClass | /**
* Returns the conversion class for the given {@link LogicalType} that is used by the table
* runtime as internal data structure.
*
* @see RowData
*/
public static Class<?> toInternalConversionClass(LogicalType type) {
// ordered by type root definition
switch (type.getTypeRoot()) {
case CHAR:
case VARCHAR:
return StringData.class;
case BOOLEAN:
return Boolean.class;
case BINARY:
case VARBINARY:
return byte[].class;
case DECIMAL:
return DecimalData.class;
case TINYINT:
return Byte.class;
case SMALLINT:
return Short.class;
case INTEGER:
case DATE:
case TIME_WITHOUT_TIME_ZONE:
case INTERVAL_YEAR_MONTH:
return Integer.class;
case BIGINT:
case INTERVAL_DAY_TIME:
return Long.class;
case FLOAT:
return Float.class;
case DOUBLE:
return Double.class;
case TIMESTAMP_WITHOUT_TIME_ZONE:
case TIMESTAMP_WITH_LOCAL_TIME_ZONE:
return TimestampData.class;
case TIMESTAMP_WITH_TIME_ZONE:
throw new UnsupportedOperationException("Unsupported type: " + type);
case ARRAY:
return ArrayData.class;
case MULTISET:
case MAP:
return MapData.class;
case ROW:
case STRUCTURED_TYPE:
return RowData.class;
case DISTINCT_TYPE:
return toInternalConversionClass(((DistinctType) type).getSourceType());
case RAW:
return RawValueData.class;
case NULL:
return Object.class;
case SYMBOL:
case UNRESOLVED:
default:
throw new IllegalArgumentException("Illegal type: " + type);
}
} | 3.68 |
Activiti_ProcessEngines_getProcessEngineInfos | /** Get initialization results. */
public static List<ProcessEngineInfo> getProcessEngineInfos() {
return processEngineInfos;
} | 3.68 |
pulsar_MessageIdAdv_getBatchSize | /**
* Get the batch size.
*
* @return 0 if the message is not in a batch
*/
default int getBatchSize() {
return 0;
} | 3.68 |
hudi_MarkerDirState_syncMarkersFromFileSystem | /**
* Syncs all markers maintained in the underlying files under the marker directory in the file system.
*/
private void syncMarkersFromFileSystem() {
Map<String, Set<String>> fileMarkersSetMap = MarkerUtils.readTimelineServerBasedMarkersFromFileSystem(
markerDirPath, fileSystem, hoodieEngineContext, parallelism);
for (String markersFilePathStr : fileMarkersSetMap.keySet()) {
Set<String> fileMarkers = fileMarkersSetMap.get(markersFilePathStr);
if (!fileMarkers.isEmpty()) {
int index = parseMarkerFileIndex(markersFilePathStr);
if (index >= 0) {
fileMarkersMap.put(index, new StringBuilder(StringUtils.join(",", fileMarkers)));
allMarkers.addAll(fileMarkers);
}
}
}
try {
if (MarkerUtils.doesMarkerTypeFileExist(fileSystem, markerDirPath)) {
isMarkerTypeWritten = true;
}
} catch (IOException e) {
throw new HoodieIOException(e.getMessage(), e);
}
} | 3.68 |
flink_StateSerializerProvider_getPreviousSerializerSnapshot | /**
* Gets the previous serializer snapshot.
*
* @return The previous serializer snapshot, or null if registered serializer was for a new
* state, not a restored one.
*/
@Nullable
public final TypeSerializerSnapshot<T> getPreviousSerializerSnapshot() {
return previousSerializerSnapshot;
} | 3.68 |
hadoop_BlockManager_requestCaching | /**
* Requests that the given block should be copied to the cache. Optional operation.
*
* @param data the {@code BufferData} instance to optionally cache.
*/
public void requestCaching(BufferData data) {
// Do nothing because we do not support caching.
} | 3.68 |
pulsar_RawBatchMessageContainerImpl_toByteBuf | /**
* Serializes the batched messages and return the ByteBuf.
* It sets the CompressionType and Encryption Keys from the batched messages.
* If successful, it calls `clear()` at the end to release buffers from this container.
*
* The returned byte buffer follows this format:
* [IdSize][Id][metadataAndPayloadSize][metadataAndPayload].
* This format is the same as RawMessage.serialize()'s format
* as the compacted messages is deserialized as RawMessage in broker.
*
* It throws the following runtime exceptions from encryption:
* IllegalStateException if cryptoKeyReader is not set for encrypted messages.
* IllegalArgumentException if encryption key init fails.
* RuntimeException if message encryption fails.
*
* @return a ByteBuf instance
*/
public ByteBuf toByteBuf() {
if (numMessagesInBatch > 1) {
messageMetadata.setNumMessagesInBatch(numMessagesInBatch);
messageMetadata.setSequenceId(lowestSequenceId);
messageMetadata.setHighestSequenceId(highestSequenceId);
}
MessageImpl lastMessage = messages.get(messages.size() - 1);
MessageIdImpl lastMessageId = (MessageIdImpl) lastMessage.getMessageId();
MessageMetadata lastMessageMetadata = lastMessage.getMessageBuilder();
this.compressionType = lastMessageMetadata.getCompression();
this.compressor = CompressionCodecProvider.getCompressionCodec(lastMessageMetadata.getCompression());
if (!lastMessage.getEncryptionCtx().isEmpty()) {
EncryptionContext encryptionContext = (EncryptionContext) lastMessage.getEncryptionCtx().get();
if (cryptoKeyReader == null) {
IllegalStateException ex =
new IllegalStateException("Messages are encrypted but no cryptoKeyReader is provided.");
discard(ex);
throw ex;
}
encryptionKeys = encryptionContext.getKeys().keySet();
if (msgCrypto == null) {
msgCrypto =
new MessageCryptoBc(String.format(
"[%s] [%s]", topicName, "RawBatchMessageContainer"), true);
try {
msgCrypto.addPublicKeyCipher(encryptionKeys, cryptoKeyReader);
} catch (PulsarClientException.CryptoException e) {
discard(e);
throw new IllegalArgumentException("Failed to set encryption keys", e);
}
}
}
ByteBuf encryptedPayload = encrypt(getCompressedBatchMetadataAndPayload());
updateAndReserveBatchAllocatedSize(encryptedPayload.capacity());
ByteBuf metadataAndPayload = Commands.serializeMetadataAndPayload(Commands.ChecksumType.Crc32c,
messageMetadata, encryptedPayload);
MessageIdData idData = new MessageIdData();
idData.setLedgerId(lastMessageId.getLedgerId());
idData.setEntryId(lastMessageId.getEntryId());
idData.setPartition(lastMessageId.getPartitionIndex());
// Format: [IdSize][Id][metadataAndPayloadSize][metadataAndPayload]
// Following RawMessage.serialize() format as the compacted messages will be parsed as RawMessage in broker
int idSize = idData.getSerializedSize();
int headerSize = 4 /* IdSize */ + idSize + 4 /* metadataAndPayloadSize */;
int totalSize = headerSize + metadataAndPayload.readableBytes();
ByteBuf buf = PulsarByteBufAllocator.DEFAULT.buffer(totalSize);
buf.writeInt(idSize);
idData.writeTo(buf);
buf.writeInt(metadataAndPayload.readableBytes());
buf.writeBytes(metadataAndPayload);
encryptedPayload.release();
clear();
return buf;
} | 3.68 |
flink_SegmentsUtil_getFloat | /**
* get float from segments.
*
* @param segments target segments.
* @param offset value offset.
*/
public static float getFloat(MemorySegment[] segments, int offset) {
if (inFirstSegment(segments, offset, 4)) {
return segments[0].getFloat(offset);
} else {
return getFloatMultiSegments(segments, offset);
}
} | 3.68 |
hadoop_BCFile_getMetaBlock | /**
* Stream access to a Meta Block.
*
* @param name
* meta block name
* @return BlockReader input stream for reading the meta block.
* @throws IOException
* @throws MetaBlockDoesNotExist
* The Meta Block with the given name does not exist.
*/
public BlockReader getMetaBlock(String name) throws IOException,
MetaBlockDoesNotExist {
MetaIndexEntry imeBCIndex = metaIndex.getMetaByName(name);
if (imeBCIndex == null) {
throw new MetaBlockDoesNotExist("name=" + name);
}
BlockRegion region = imeBCIndex.getRegion();
return createReader(imeBCIndex.getCompressionAlgorithm(), region);
} | 3.68 |
framework_DefaultDeploymentConfiguration_isSendUrlsAsParameters | /**
* {@inheritDoc}
* <p>
* The default value is <code>true</code>.
*/
@Override
public boolean isSendUrlsAsParameters() {
return sendUrlsAsParameters;
} | 3.68 |
hbase_SimpleServerRpcConnection_incRpcCount | /* Increment the outstanding RPC count */
protected void incRpcCount() {
rpcCount.increment();
} | 3.68 |
flink_PekkoRpcActor_lookupRpcMethod | /**
* Look up the rpc method on the given {@link RpcEndpoint} instance.
*
* @param methodName Name of the method
* @param parameterTypes Parameter types of the method
* @return Method of the rpc endpoint
* @throws NoSuchMethodException Thrown if the method with the given name and parameter types
* cannot be found at the rpc endpoint
*/
private Method lookupRpcMethod(final String methodName, final Class<?>[] parameterTypes)
throws NoSuchMethodException {
return rpcEndpoint.getClass().getMethod(methodName, parameterTypes);
} | 3.68 |
hadoop_AbstractS3ACommitter_pendingsetCommitted | /**
* Callback when a pendingset has been committed,
* including any source statistics.
* @param sourceStatistics any source statistics
*/
public void pendingsetCommitted(final IOStatistics sourceStatistics) {
ioStatistics.aggregate(sourceStatistics);
} | 3.68 |
framework_FieldGroup_getInvalidFields | /**
* Returns a map containing the fields which failed validation and the
* exceptions the corresponding validators threw.
*
* @since 7.4
* @return a map with all the invalid value exceptions. Can be empty but
* not null
*/
public Map<Field<?>, InvalidValueException> getInvalidFields() {
if (getCause() instanceof FieldGroupInvalidValueException) {
return ((FieldGroupInvalidValueException) getCause())
.getInvalidFields();
}
return new HashMap<Field<?>, InvalidValueException>();
} | 3.68 |
hbase_NullComparator_areSerializedFieldsEqual | /**
* Returns true if and only if the fields of the comparator that are serialized are equal to the
* corresponding fields in other. Used for testing.
*/
@Override
boolean areSerializedFieldsEqual(ByteArrayComparable other) {
if (other == this) {
return true;
}
if (!(other instanceof NullComparator)) {
return false;
}
return super.areSerializedFieldsEqual(other);
} | 3.68 |
flink_FileSystemTableFactory_formatFactoryExists | /**
* Returns true if the format factory can be found using the given factory base class and
* identifier.
*/
private boolean formatFactoryExists(Context context, Class<?> factoryClass) {
Configuration options = Configuration.fromMap(context.getCatalogTable().getOptions());
String identifier = options.get(FactoryUtil.FORMAT);
if (identifier == null) {
throw new ValidationException(
String.format(
"Table options do not contain an option key '%s' for discovering a format.",
FactoryUtil.FORMAT.key()));
}
final List<Factory> factories = new LinkedList<>();
ServiceLoader.load(Factory.class, context.getClassLoader())
.iterator()
.forEachRemaining(factories::add);
final List<Factory> foundFactories =
factories.stream()
.filter(f -> factoryClass.isAssignableFrom(f.getClass()))
.collect(Collectors.toList());
final List<Factory> matchingFactories =
foundFactories.stream()
.filter(f -> f.factoryIdentifier().equals(identifier))
.collect(Collectors.toList());
return !matchingFactories.isEmpty();
} | 3.68 |
dubbo_HealthStatusManager_setStatus | /**
* Updates the status of the server.
*
* @param service the name of some aspect of the server that is associated with a health status.
* This name can have no relation with the gRPC services that the server is
* running with. It can also be an empty String {@code ""} per the gRPC
* specification.
* @param status is one of the values {@link HealthCheckResponse.ServingStatus#SERVING}, {@link
* HealthCheckResponse.ServingStatus#NOT_SERVING} and {@link
* HealthCheckResponse.ServingStatus#UNKNOWN}.
*/
public void setStatus(String service, HealthCheckResponse.ServingStatus status) {
healthService.setStatus(service, status);
} | 3.68 |
hadoop_FindOptions_getErr | /**
* Returns the error stream to be used.
*
* @return error stream to be used
*/
public PrintStream getErr() {
return this.err;
} | 3.68 |
framework_HierarchyPanel_showServerDebugInfo | /**
* Outputs debug information on the server - usually in the console of an
* IDE, with a clickable reference to the relevant code location.
*
* @since 7.1
* @param connector
* show debug info for this connector
*/
static void showServerDebugInfo(ServerConnector connector) {
if (connector != null) {
connector.getConnection().getUIConnector()
.showServerDebugInfo(connector);
}
} | 3.68 |
flink_FlinkTestcontainersConfigurator_configure | /** Configures and creates {@link FlinkContainers}. */
public FlinkContainers configure() {
// Create temporary directory for building Flink image
final Path imageBuildingTempDir;
try {
imageBuildingTempDir = Files.createTempDirectory("flink-image-build");
} catch (IOException e) {
throw new RuntimeException("Failed to create temporary directory", e);
}
// Build JobManager
final GenericContainer<?> jobManager = configureJobManagerContainer(imageBuildingTempDir);
// Build TaskManagers
final List<GenericContainer<?>> taskManagers =
configureTaskManagerContainers(imageBuildingTempDir);
// Setup Zookeeper HA
GenericContainer<?> zookeeper = null;
// Mount HA storage to JobManager
if (flinkContainersSettings.isZookeeperHA()) {
zookeeper = configureZookeeperContainer();
createTempDirAndMountToContainer(
"flink-recovery", flinkContainersSettings.getHaStoragePath(), jobManager);
}
// Mount checkpoint storage to JobManager
createTempDirAndMountToContainer(
"flink-checkpoint", flinkContainersSettings.getCheckpointPath(), jobManager);
return new FlinkContainers(
jobManager, taskManagers, zookeeper, flinkContainersSettings.getFlinkConfig());
} | 3.68 |
hadoop_WeightedPolicyInfo_getRouterPolicyWeights | /**
* Getter of the router weights.
*
* @return the router weights.
*/
public Map<SubClusterIdInfo, Float> getRouterPolicyWeights() {
return routerPolicyWeights;
} | 3.68 |
flink_StreamingRuntimeContext_getInputSplitProvider | /**
* Returns the input split provider associated with the operator.
*
* @return The input split provider.
*/
public InputSplitProvider getInputSplitProvider() {
return taskEnvironment.getInputSplitProvider();
} | 3.68 |
hadoop_SFTPConnectionPool_returnToPool | /** Add the channel into pool.
* @param channel
*/
synchronized void returnToPool(ChannelSftp channel) {
ConnectionInfo info = con2infoMap.get(channel);
HashSet<ChannelSftp> cons = idleConnections.get(info);
if (cons == null) {
cons = new HashSet<>();
idleConnections.put(info, cons);
}
cons.add(channel);
} | 3.68 |
flink_SinkTestSuiteBase_testStartFromSavepoint | /**
* Test connector sink restart from a completed savepoint with the same parallelism.
*
* <p>This test will create a sink in the external system, generate a collection of test data
* and write a half part of them to this sink by the Flink Job with parallelism 2 at first. Then
* stop the job, restart the same job from the completed savepoint. After the job has been
* running, write the other part to the sink and compare the result.
*
* <p>In order to pass this test, the number of records produced by Flink need to be equals to
* the generated test data. And the records in the sink will be compared to the test data by the
* different semantic. There's no requirement for record order.
*/
@TestTemplate
@DisplayName("Test sink restarting from a savepoint")
public void testStartFromSavepoint(
TestEnvironment testEnv,
DataStreamSinkExternalContext<T> externalContext,
CheckpointingMode semantic)
throws Exception {
restartFromSavepoint(testEnv, externalContext, semantic, 2, 2);
} | 3.68 |
hbase_NullComparator_parseFrom | /**
* Parse the serialized representation of {@link NullComparator}
* @param pbBytes A pb serialized {@link NullComparator} instance
* @return An instance of {@link NullComparator} made from <code>bytes</code>
* @throws DeserializationException if an error occurred
* @see #toByteArray
*/
public static NullComparator parseFrom(final byte[] pbBytes) throws DeserializationException {
try {
// Just parse. Don't use what we parse since on end we are returning new NullComparator.
ComparatorProtos.NullComparator.parseFrom(pbBytes);
} catch (InvalidProtocolBufferException e) {
throw new DeserializationException(e);
}
return new NullComparator();
} | 3.68 |
hudi_HoodieTableMetaClient_getRawFs | /**
* Return raw file-system.
*
* @return fs
*/
public FileSystem getRawFs() {
return getFs().getFileSystem();
} | 3.68 |
hbase_LockAndQueue_trySharedLock | /** Returns whether we have succesfully acquired the shared lock. */
public boolean trySharedLock(Procedure<?> proc) {
if (hasExclusiveLock() && !hasLockAccess(proc)) {
return false;
}
// If no one holds the xlock, then we are free to hold the sharedLock
// If the parent proc or we have already held the xlock, then we return true here as
// xlock is more powerful then shared lock.
sharedLock++;
return true;
} | 3.68 |
rocketmq-connect_TransformChain_retryWithToleranceOperator | /**
* set retryWithToleranceOperator
*/
public void retryWithToleranceOperator(RetryWithToleranceOperator retryWithToleranceOperator) {
this.retryWithToleranceOperator = retryWithToleranceOperator;
} | 3.68 |
morf_TableOutputter_getStandardFormat | /**
* @return the format to use for normal cells
* @throws WriteException if the format could not be created
*/
private WritableCellFormat getStandardFormat() throws WriteException {
WritableCellFormat standardFormat = new WritableCellFormat(getStandardFont());
standardFormat.setVerticalAlignment(VerticalAlignment.TOP);
return standardFormat;
} | 3.68 |
framework_AbstractComponentConnector_setWidgetStyleName | /**
* This is used to add / remove state related style names from the widget.
* <p>
* Override this method for example if the style name given here should be
* updated in another widget in addition to the one returned by the
* {@link #getWidget()}.
* </p>
*
* @param styleName
* the style name to be added or removed
* @param add
* <code>true</code> to add the given style, <code>false</code>
* to remove it
*/
protected void setWidgetStyleName(String styleName, boolean add) {
getWidget().setStyleName(styleName, add);
} | 3.68 |
hadoop_InMemoryConfigurationStore_checkVersion | /**
* Configuration mutations not logged (i.e. not persisted). As such, they are
* not persisted and not versioned. Hence, version is always compatible,
* since it is in-memory.
*/
@Override
public void checkVersion() {
// Does nothing. (Version is always compatible since it's in memory)
} | 3.68 |
pulsar_BrokerMonitor_updateBrokers | // Inform the user of any broker gains and losses and put watches on newly acquired brokers.
private synchronized void updateBrokers(final String path) {
final Set<String> newBrokers = new HashSet<>();
try {
newBrokers.addAll(zkClient.getChildren(path, this));
} catch (Exception ex) {
throw new RuntimeException(ex);
}
for (String oldBroker : brokers) {
if (!newBrokers.contains(oldBroker)) {
log.info("Lost broker: " + oldBroker);
synchronized (loadData) {
// Stop including lost broker in global stats.
loadData.remove(oldBroker);
}
}
}
for (String newBroker : newBrokers) {
if (!brokers.contains(newBroker)) {
log.info("Gained broker: " + newBroker);
final BrokerDataWatcher brokerDataWatcher = new BrokerDataWatcher(zkClient);
brokerDataWatcher.printData(path + "/" + newBroker);
}
}
this.brokers = newBrokers;
} | 3.68 |
hadoop_HdfsDataInputStream_getVisibleLength | /**
* Get the visible length of the file. It will include the length of the last
* block even if that is in UnderConstruction state.
*
* @return The visible length of the file.
*/
public long getVisibleLength() {
return getDFSInputStream().getFileLength();
} | 3.68 |
druid_StringUtils_subStringToInteger | /**
* Example: subString("12345","1","4")=23
*
* @param src
* @param start
* @param to
* @return
*/
public static Integer subStringToInteger(String src, String start, String to) {
return stringToInteger(subString(src, start, to));
} | 3.68 |
dubbo_CollectionUtils_equals | /**
* Compares the specified collection with another, the main implementation references
* {@link AbstractSet}
*
* @param one {@link Collection}
* @param another {@link Collection}
* @return if equals, return <code>true</code>, or <code>false</code>
* @since 2.7.6
*/
public static boolean equals(Collection<?> one, Collection<?> another) {
if (one == another) {
return true;
}
if (isEmpty(one) && isEmpty(another)) {
return true;
}
if (size(one) != size(another)) {
return false;
}
try {
return one.containsAll(another);
} catch (ClassCastException | NullPointerException unused) {
return false;
}
} | 3.68 |
benchmark_ListPartition_partitionList | /**
* partition a list to specified size.
*
* @param originList
* @param size
* @param <T>
* @return the partitioned list
*/
public static <T> List<List<T>> partitionList(List<T> originList, int size) {
List<List<T>> resultList = new ArrayList<>();
if (null == originList || 0 == originList.size() || size <= 0) {
return resultList;
}
if (originList.size() <= size) {
for (T item : originList) {
List<T> resultItemList = new ArrayList<>();
resultItemList.add(item);
resultList.add(resultItemList);
}
for (int i = 0; i < (size - originList.size()); i++) {
resultList.add(new ArrayList<>());
}
return resultList;
}
for (int i = 0; i < size; i++) {
resultList.add(new ArrayList<>());
}
int count = 0;
for (T item : originList) {
int index = count % size;
resultList.get(index).add(item);
count++;
}
return resultList;
} | 3.68 |
hadoop_ResourceBundles_getValue | /**
* Get a resource given bundle name and key
* @param <T> type of the resource
* @param bundleName name of the resource bundle
* @param key to lookup the resource
* @param suffix for the key to lookup
* @param defaultValue of the resource
* @return the resource or the defaultValue
* @throws ClassCastException if the resource found doesn't match T
*/
@SuppressWarnings("unchecked")
public static synchronized <T> T getValue(String bundleName, String key,
String suffix, T defaultValue) {
T value;
try {
ResourceBundle bundle = getBundle(bundleName);
value = (T) bundle.getObject(getLookupKey(key, suffix));
}
catch (Exception e) {
return defaultValue;
}
return value;
} | 3.68 |
hbase_SplitLogManagerCoordination_getMaster | /** Returns the master value */
public MasterServices getMaster() {
return master;
} | 3.68 |
AreaShop_RegionAccessSet_getPlayerUniqueIds | /**
* Get the players that have been added by uuid.
* @return Set with players that have been added by uuid
*/
public Set<UUID> getPlayerUniqueIds() {
return playerUniqueIds;
} | 3.68 |
flink_SqlCreateTableConverter_convertCreateTableAS | /** Convert the {@link SqlCreateTableAs} node. */
Operation convertCreateTableAS(
FlinkPlannerImpl flinkPlanner, SqlCreateTableAs sqlCreateTableAs) {
UnresolvedIdentifier unresolvedIdentifier =
UnresolvedIdentifier.of(sqlCreateTableAs.fullTableName());
ObjectIdentifier identifier = catalogManager.qualifyIdentifier(unresolvedIdentifier);
PlannerQueryOperation query =
(PlannerQueryOperation)
SqlNodeToOperationConversion.convert(
flinkPlanner, catalogManager, sqlCreateTableAs.getAsQuery())
.orElseThrow(
() ->
new TableException(
"CTAS unsupported node type "
+ sqlCreateTableAs
.getAsQuery()
.getClass()
.getSimpleName()));
CatalogTable catalogTable = createCatalogTable(sqlCreateTableAs);
CreateTableOperation createTableOperation =
new CreateTableOperation(
identifier,
CatalogTable.of(
Schema.newBuilder()
.fromResolvedSchema(query.getResolvedSchema())
.build(),
catalogTable.getComment(),
catalogTable.getPartitionKeys(),
catalogTable.getOptions()),
sqlCreateTableAs.isIfNotExists(),
sqlCreateTableAs.isTemporary());
return new CreateTableASOperation(
createTableOperation, Collections.emptyMap(), query, false);
} | 3.68 |
hbase_TableDescriptorBuilder_setFlushPolicyClassName | /**
* This sets the class associated with the flush policy which determines determines the stores
* need to be flushed when flushing a region. The class used by default is defined in
* org.apache.hadoop.hbase.regionserver.FlushPolicy.
* @param clazz the class name
* @return the modifyable TD
*/
public ModifyableTableDescriptor setFlushPolicyClassName(String clazz) {
return setValue(FLUSH_POLICY_KEY, clazz);
} | 3.68 |
flink_FlinkPreparingTableBase_getDistribution | /**
* Returns a description of the physical distribution of the rows in this table.
*
* @see org.apache.calcite.rel.metadata.RelMetadataQuery#distribution
*/
public RelDistribution getDistribution() {
return null;
} | 3.68 |
hibernate-validator_MethodConfigurationRule_isStrictSubType | /**
* Whether {@code otherClazz} is a strict subtype of {@code clazz} or not.
* @param clazz the super type to check against
* @param otherClazz the subtype to check
*
* @return {@code true} if {@code otherClazz} is a strict subtype of {@code clazz}, {@code false} otherwise
*/
protected boolean isStrictSubType(Class<?> clazz, Class<?> otherClazz) {
return clazz.isAssignableFrom( otherClazz ) && !clazz.equals( otherClazz );
} | 3.68 |
flink_CopyOnWriteStateMapSnapshot_getSnapshotVersion | /**
* Returns the internal version of the {@link CopyOnWriteStateMap} when this snapshot was
* created. This value must be used to tell the {@link CopyOnWriteStateMap} when to release this
* snapshot.
*/
int getSnapshotVersion() {
return snapshotVersion;
} | 3.68 |
flink_LogicalTypeCasts_supportsReinterpretCast | /**
* Returns whether the source type can be reinterpreted as the target type.
*
* <p>Reinterpret casts correspond to the SQL reinterpret_cast and represent the logic behind a
* {@code REINTERPRET_CAST(sourceType AS targetType)} operation.
*/
public static boolean supportsReinterpretCast(LogicalType sourceType, LogicalType targetType) {
if (sourceType.getTypeRoot() == targetType.getTypeRoot()) {
return true;
}
switch (sourceType.getTypeRoot()) {
case INTEGER:
switch (targetType.getTypeRoot()) {
case DATE:
case TIME_WITHOUT_TIME_ZONE:
case INTERVAL_YEAR_MONTH:
return true;
default:
return false;
}
case BIGINT:
switch (targetType.getTypeRoot()) {
case TIMESTAMP_WITHOUT_TIME_ZONE:
case INTERVAL_DAY_TIME:
return true;
default:
return false;
}
case DATE:
case TIME_WITHOUT_TIME_ZONE:
case INTERVAL_YEAR_MONTH:
switch (targetType.getTypeRoot()) {
case INTEGER:
case BIGINT:
return true;
default:
return false;
}
case TIMESTAMP_WITHOUT_TIME_ZONE:
case INTERVAL_DAY_TIME:
return targetType.getTypeRoot() == BIGINT;
default:
return false;
}
} | 3.68 |
hmily_ThreadLocalHmilyContext_get | /**
* get value.
*
* @return TccTransactionContext
*/
public HmilyTransactionContext get() {
return CURRENT_LOCAL.get();
} | 3.68 |
dubbo_ExecutorUtil_setThreadName | /**
* append thread name with url address
*
* @return new url with updated thread name
*/
public static URL setThreadName(URL url, String defaultName) {
String name = url.getParameter(THREAD_NAME_KEY, defaultName);
name = name + "-" + url.getAddress();
url = url.addParameter(THREAD_NAME_KEY, name);
return url;
} | 3.68 |
framework_Table_setPageLength | /**
* Sets the page length.
*
* <p>
* Setting page length 0 disables paging. The page length defaults to 15.
* </p>
*
* <p>
* If Table has height set ({@link #setHeight(float, Unit)} ) the client
* side may update the page length automatically the correct value.
* </p>
*
* @param pageLength
* the length of one page.
*/
public void setPageLength(int pageLength) {
if (pageLength >= 0 && this.pageLength != pageLength) {
this.pageLength = pageLength;
// Assures the visual refresh
refreshRowCache();
}
} | 3.68 |
querydsl_GenericExporter_setPackageSuffix | /**
* Set the package suffix
*
* @param suffix
*/
public void setPackageSuffix(String suffix) {
codegenModule.bind(CodegenModule.PACKAGE_SUFFIX, suffix);
} | 3.68 |
hadoop_AWSRequestAnalyzer_reading | /**
* A read request.
* @param verb verb
* @param key object/prefix, etc.
* @param size nullable size
* @return request info
*/
private RequestInfo reading(final String verb,
final String key, final Number size) {
return request(verb, false, key, size);
} | 3.68 |
hbase_NamedQueueRecorder_addRecord | /**
* Add various NamedQueue records to ringbuffer. Based on the type of the event (e.g slowLog),
* consumer of disruptor ringbuffer will have specific logic. This method is producer of disruptor
* ringbuffer which is initialized in NamedQueueRecorder constructor.
* @param namedQueuePayload namedQueue payload sent by client of ring buffer service
*/
public void addRecord(NamedQueuePayload namedQueuePayload) {
RingBuffer<RingBufferEnvelope> ringBuffer = this.disruptor.getRingBuffer();
long seqId = ringBuffer.next();
try {
ringBuffer.get(seqId).load(namedQueuePayload);
} finally {
ringBuffer.publish(seqId);
}
} | 3.68 |
hbase_CompactionConfiguration_setMinFilesToCompact | /**
* Set lower bound on number of files to be included in minor compactions
* @param threshold value to set to
*/
public void setMinFilesToCompact(int threshold) {
minFilesToCompact = threshold;
} | 3.68 |
flink_HiveParserUtils_getWritableObjectInspector | /** Convert exprNodeDesc array to ObjectInspector array. */
public static ArrayList<ObjectInspector> getWritableObjectInspector(
ArrayList<ExprNodeDesc> exprs) {
ArrayList<ObjectInspector> result = new ArrayList<>();
for (ExprNodeDesc expr : exprs) {
result.add(expr.getWritableObjectInspector());
}
return result;
} | 3.68 |
flink_PojoFieldUtils_writeField | /**
* Writes a field to the given {@link DataOutputView}.
*
* <p>This write method avoids Java serialization, by writing only the classname of the field's
* declaring class and the field name. The written field can be read using {@link
* #readField(DataInputView, ClassLoader)}.
*
* @param out the output view to write to.
* @param field the field to write.
*/
static void writeField(DataOutputView out, Field field) throws IOException {
Class<?> declaringClass = field.getDeclaringClass();
out.writeUTF(declaringClass.getName());
out.writeUTF(field.getName());
} | 3.68 |
morf_DatabaseSchemaManager_ensureTablesExist | /**
* Ensure that every table in the schema is present in the DB.
*/
private Collection<String> ensureTablesExist(Schema schema, TruncationBehavior truncationBehavior, ProducerCache producerCache) {
Collection<String> sql = Lists.newLinkedList();
for (Table requiredTable : schema.tables()) {
sql.addAll(ensureTableExists(requiredTable, truncationBehavior, producerCache));
}
return sql;
} | 3.68 |
flink_RocksDBStateBackend_getNumberOfTransferThreads | /** Gets the number of threads used to transfer files while snapshotting/restoring. */
public int getNumberOfTransferThreads() {
return rocksDBStateBackend.getNumberOfTransferThreads();
} | 3.68 |
hbase_Bytes_toInt | /**
* Converts a byte array to an int value
* @param bytes byte array
* @param offset offset into array
* @param length length of int (has to be {@link #SIZEOF_INT})
* @return the int value
* @throws IllegalArgumentException if length is not {@link #SIZEOF_INT} or if there's not enough
* room in the array at the offset indicated.
*/
public static int toInt(byte[] bytes, int offset, final int length) {
if (length != SIZEOF_INT || offset + length > bytes.length) {
throw explainWrongLengthOrOffset(bytes, offset, length, SIZEOF_INT);
}
return ConverterHolder.BEST_CONVERTER.toInt(bytes, offset, length);
} | 3.68 |
hbase_SnapshotManager_isRestoringTable | /**
* Verify if the restore of the specified table is in progress.
* @param tableName table under restore
* @return <tt>true</tt> if there is a restore in progress of the specified table.
*/
private synchronized boolean isRestoringTable(final TableName tableName) {
Long procId = this.restoreTableToProcIdMap.get(tableName);
if (procId == null) {
return false;
}
ProcedureExecutor<MasterProcedureEnv> procExec = master.getMasterProcedureExecutor();
if (procExec.isRunning() && !procExec.isFinished(procId)) {
return true;
} else {
this.restoreTableToProcIdMap.remove(tableName);
return false;
}
} | 3.68 |
flink_YarnClusterDescriptor_getStagingDir | /**
* Returns the configured remote target home directory if set, otherwise returns the default
* home directory.
*
* @param defaultFileSystem default file system used
* @return the remote target home directory
*/
@VisibleForTesting
Path getStagingDir(FileSystem defaultFileSystem) throws IOException {
final String configuredStagingDir =
flinkConfiguration.getString(YarnConfigOptions.STAGING_DIRECTORY);
if (configuredStagingDir == null) {
return defaultFileSystem.getHomeDirectory();
}
FileSystem stagingDirFs =
new Path(configuredStagingDir).getFileSystem(defaultFileSystem.getConf());
return stagingDirFs.makeQualified(new Path(configuredStagingDir));
} | 3.68 |
flink_HiveParserQBSubQuery_analyzeConjunct | /*
* 1. The only correlation operator we check for is EQUAL; because that is
* the one for which we can do a Algebraic transformation.
* 2. For expressions that are not an EQUAL predicate, we treat them as conjuncts
* having only 1 side. These should only contain references to the SubQuery
* table sources.
* 3. For expressions that are an EQUAL predicate; we analyze each side and let the
* left and right exprs in the Conjunct object.
*
* @return Conjunct contains details on the left and right side of the conjunct expression.
*/
HiveParserQBSubQuery.Conjunct analyzeConjunct(HiveParserASTNode conjunct)
throws SemanticException {
int type = conjunct.getType();
if (type == HiveASTParser.EQUAL) {
HiveParserASTNode left = (HiveParserASTNode) conjunct.getChild(0);
HiveParserASTNode right = (HiveParserASTNode) conjunct.getChild(1);
ObjectPair<HiveParserQBSubQuery.ExprType, ColumnInfo> leftInfo = analyzeExpr(left);
ObjectPair<HiveParserQBSubQuery.ExprType, ColumnInfo> rightInfo =
analyzeExpr(right);
return new HiveParserQBSubQuery.Conjunct(
left,
right,
leftInfo.getFirst(),
rightInfo.getFirst(),
leftInfo.getSecond(),
rightInfo.getSecond());
} else {
ObjectPair<HiveParserQBSubQuery.ExprType, ColumnInfo> sqExprInfo =
analyzeExpr(conjunct);
return new HiveParserQBSubQuery.Conjunct(
conjunct,
null,
sqExprInfo.getFirst(),
null,
sqExprInfo.getSecond(),
sqExprInfo.getSecond());
}
} | 3.68 |
flink_RocksDBNativeMetricOptions_enableNumEntriesImmMemTables | /** Returns total number of entries in the unflushed immutable memtables. */
public void enableNumEntriesImmMemTables() {
this.properties.add(RocksDBProperty.NumEntriesImmMemTables.getRocksDBProperty());
} | 3.68 |
flink_SqlLikeChainChecker_indexMiddle | /**
* Matches the middle of each string to its pattern.
*
* @return Returns absolute offset of the match.
*/
private static int indexMiddle(
BinaryStringData pattern, MemorySegment[] segments, int start, int len) {
return SegmentsUtil.find(
segments,
start,
len,
pattern.getSegments(),
pattern.getOffset(),
pattern.getSizeInBytes());
} | 3.68 |
framework_AbstractSelect_unselect | /**
* Unselects an item.
*
* @param itemId
* the identifier of the Item to be unselected.
* @see #getNullSelectionItemId()
* @see #setNullSelectionItemId(Object)
*
*/
public void unselect(Object itemId) {
if (isSelected(itemId)) {
if (isMultiSelect()) {
final Set<Object> s = new HashSet<Object>((Set<?>) getValue());
s.remove(itemId);
setValue(s);
} else {
setValue(null);
}
}
} | 3.68 |
hadoop_ServiceLauncher_main | /**
* This is the JVM entry point for the service launcher.
*
* Converts the arguments to a list, then invokes {@link #serviceMain(List)}
* @param args command line arguments.
*/
public static void main(String[] args) {
serviceMain(Arrays.asList(args));
} | 3.68 |
hbase_AbstractFSWAL_getLogFileSize | // public only until class moves to o.a.h.h.wal
/** Returns the size of log files in use */
public long getLogFileSize() {
return this.totalLogSize.get();
} | 3.68 |
graphhopper_BBox_toGeoJson | /**
* @return array containing this bounding box. Attention: GeoJson is lon,lat! If 3D is gets even
* worse: lon,lat,ele
*/
public List<Double> toGeoJson() {
List<Double> list = new ArrayList<>(4);
list.add(Helper.round6(minLon));
list.add(Helper.round6(minLat));
// hmh
if (elevation)
list.add(Helper.round2(minEle));
list.add(Helper.round6(maxLon));
list.add(Helper.round6(maxLat));
if (elevation)
list.add(Helper.round2(maxEle));
return list;
} | 3.68 |
flink_SqlPartitionUtils_getPartitionKVs | /**
* Get static partition key value pair as strings.
*
* <p>For character literals we return the unquoted and unescaped values. For other types we use
* {@link SqlLiteral#toString()} to get the string format of the value literal.
*
* @return the mapping of column names to values of partition specifications, returns an empty
* map if there is no partition specifications.
*/
public static LinkedHashMap<String, String> getPartitionKVs(SqlNodeList partitionSpec) {
if (partitionSpec == null) {
return null;
}
LinkedHashMap<String, String> ret = new LinkedHashMap<>();
if (partitionSpec.size() == 0) {
return ret;
}
for (SqlNode node : partitionSpec.getList()) {
SqlProperty sqlProperty = (SqlProperty) node;
Comparable<?> comparable = SqlLiteral.value(sqlProperty.getValue());
String value =
comparable instanceof NlsString
? ((NlsString) comparable).getValue()
: comparable.toString();
ret.put(sqlProperty.getKey().getSimple(), value);
}
return ret;
} | 3.68 |
flink_TableSource_getProducedDataType | /**
* Returns the {@link DataType} for the produced data of the {@link TableSource}.
*
* @return The data type of the returned {@code DataStream}.
*/
default DataType getProducedDataType() {
final TypeInformation<T> legacyType = getReturnType();
if (legacyType == null) {
throw new TableException("Table source does not implement a produced data type.");
}
return fromLegacyInfoToDataType(legacyType).notNull();
} | 3.68 |
framework_HasValue_getOptionalValue | /**
* Returns the current value of this object, wrapped in an {@code Optional}.
* <p>
* The {@code Optional} will be empty if the value is {@code null} or
* {@code isEmpty()} returns {@code true}.
*
* @return the current value, wrapped in an {@code Optional}
*/
public default Optional<V> getOptionalValue() {
return isEmpty() ? Optional.empty() : Optional.ofNullable(getValue());
} | 3.68 |
framework_VMenuBar_onKeyDown | /*
* (non-Javadoc)
*
* @see
* com.google.gwt.event.dom.client.KeyDownHandler#onKeyDown(com.google.gwt
* .event.dom.client.KeyDownEvent)
*/
@Override
public void onKeyDown(KeyDownEvent event) {
// A bug fix for #14041
// getKeyCode and getCharCode return different values for different
// browsers
int keyCode = event.getNativeEvent().getKeyCode();
if (keyCode == 0) {
keyCode = event.getNativeEvent().getCharCode();
}
if (handleNavigation(keyCode,
event.isControlKeyDown() || event.isMetaKeyDown(),
event.isShiftKeyDown())) {
event.preventDefault();
}
} | 3.68 |
flink_TypeExtractionUtils_isClassType | /**
* Checks if a type can be converted to a Class. This is true for ParameterizedType and Class.
*/
public static boolean isClassType(Type t) {
return t instanceof Class<?> || t instanceof ParameterizedType;
} | 3.68 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.