name
stringlengths 12
178
| code_snippet
stringlengths 8
36.5k
| score
float64 3.26
3.68
|
---|---|---|
hudi_IMetaStoreClientUtil_getMSC | /**
* Returns the Hive metastore client with given Hive conf.
*/
public static IMetaStoreClient getMSC(HiveConf hiveConf) throws HiveException, MetaException {
IMetaStoreClient metaStoreClient;
try {
metaStoreClient = ((Hive) Hive.class.getMethod("getWithoutRegisterFns", HiveConf.class).invoke(null, hiveConf)).getMSC();
} catch (NoSuchMethodException | IllegalAccessException | IllegalArgumentException
| InvocationTargetException ex) {
metaStoreClient = Hive.get(hiveConf).getMSC();
}
return metaStoreClient;
} | 3.68 |
framework_DropTargetExtensionConnector_isDropAllowedByCriteriaScript | /**
* Checks if a criteria script exists and, if yes, executes it. This method
* is protected, so subclasses as e.g. GridDropTargetConnector can override
* it to add additional script parameters.
*
* @param event
* browser event (dragEnter, dragOver, drop) that should be
* evaluated by the criteria script
* @return {@code true} if no script was given or if the script returned
* true, {@code false} otherwise.
*/
protected boolean isDropAllowedByCriteriaScript(NativeEvent event) {
final String criteriaScript = getState().criteriaScript;
if (criteriaScript == null) {
return true;
}
return executeScript(event, criteriaScript);
} | 3.68 |
flink_CliFrontendParser_printHelp | /** Prints the help for the client. */
public static void printHelp(Collection<CustomCommandLine> customCommandLines) {
System.out.println("./flink <ACTION> [OPTIONS] [ARGUMENTS]");
System.out.println();
System.out.println("The following actions are available:");
printHelpForRun(customCommandLines);
printHelpForRunApplication(customCommandLines);
printHelpForInfo();
printHelpForList(customCommandLines);
printHelpForStop(customCommandLines);
printHelpForCancel(customCommandLines);
printHelpForSavepoint(customCommandLines);
System.out.println();
} | 3.68 |
flink_DefaultConfigurableOptionsFactory_checkArgumentValid | /**
* Helper method to check whether the (key,value) is valid through given configuration and
* returns the formatted value.
*
* @param option The configuration key which is configurable in {@link
* RocksDBConfigurableOptions}.
* @param value The value within given configuration.
*/
private static void checkArgumentValid(ConfigOption<?> option, Object value) {
final String key = option.key();
if (POSITIVE_INT_CONFIG_SET.contains(option)) {
Preconditions.checkArgument(
(Integer) value > 0,
"Configured value for key: " + key + " must be larger than 0.");
} else if (SIZE_CONFIG_SET.contains(option)) {
Preconditions.checkArgument(
((MemorySize) value).getBytes() > 0,
"Configured size for key" + key + " must be larger than 0.");
} else if (LOG_MAX_FILE_SIZE.equals(option)) {
Preconditions.checkArgument(
((MemorySize) value).getBytes() >= 0,
"Configured size for key " + key + " must be larger than or equal to 0.");
} else if (LOG_DIR.equals(option)) {
Preconditions.checkArgument(
new File((String) value).isAbsolute(),
"Configured path for key " + key + " is not absolute.");
}
} | 3.68 |
hadoop_StartupProgress_getStatus | /**
* Returns the current run status of the specified phase.
*
* @param phase Phase to get
* @return Status run status of phase
*/
public Status getStatus(Phase phase) {
PhaseTracking tracking = phases.get(phase);
if (tracking.beginTime == Long.MIN_VALUE) {
return Status.PENDING;
} else if (tracking.endTime == Long.MIN_VALUE) {
return Status.RUNNING;
} else {
return Status.COMPLETE;
}
} | 3.68 |
framework_UIDL_getVariableNames | /**
* Gets the names of variables available.
*
* @return the names of available variables
*/
public Set<String> getVariableNames() {
if (!hasVariables()) {
return new HashSet<>();
} else {
Set<String> keySet = var().getKeySet();
return keySet;
}
} | 3.68 |
framework_Table_getPreviousWidth | /**
* Get the width in pixels of the column before the resize event.
*
* @return Width in pixels
*/
public int getPreviousWidth() {
return previousWidth;
} | 3.68 |
flink_FlinkSqlNameMatcher_field | /**
* Compared to the original method we adjust the nullability of the nested column based on the
* nullability of the enclosing type.
*
* <p>If the fields type is NOT NULL, but the enclosing ROW is nullable we still can produce
* nulls.
*/
@Override
public RelDataTypeField field(RelDataType rowType, String fieldName) {
RelDataTypeField field = baseMatcher.field(rowType, fieldName);
if (field != null && rowType.isNullable() && !field.getType().isNullable()) {
RelDataType typeWithNullability =
typeFactory.createTypeWithNullability(field.getType(), true);
return new RelDataTypeFieldImpl(field.getName(), field.getIndex(), typeWithNullability);
}
return field;
} | 3.68 |
hadoop_AbstractDelegationTokenBinding_convertTokenIdentifier | /**
* Verify that a token identifier is of a specific class.
* This will reject subclasses (i.e. it is stricter than
* {@code instanceof}, then cast it to that type.
* @param <T> type of S3A delegation ttoken identifier.
* @param identifier identifier to validate
* @param expectedClass class of the expected token identifier.
* @return token identifier.
* @throws DelegationTokenIOException If the wrong class was found.
*/
protected <T extends AbstractS3ATokenIdentifier> T convertTokenIdentifier(
final AbstractS3ATokenIdentifier identifier,
final Class<T> expectedClass) throws DelegationTokenIOException {
if (!identifier.getClass().equals(expectedClass)) {
throw new DelegationTokenIOException(
DelegationTokenIOException.TOKEN_WRONG_CLASS
+ "; expected a token identifier of type "
+ expectedClass
+ " but got "
+ identifier.getClass()
+ " and kind " + identifier.getKind());
}
return (T) identifier;
} | 3.68 |
hbase_SecurityInfo_addInfo | /**
* Adds a security configuration for a new service name. Note that this will have no effect if the
* service name was already registered.
*/
public static void addInfo(String serviceName, SecurityInfo securityInfo) {
infos.putIfAbsent(serviceName, securityInfo);
} | 3.68 |
hbase_LruCachedBlock_getCachedTime | /** Returns Time we were cached at in nano seconds. */
public long getCachedTime() {
return this.cachedTime;
} | 3.68 |
streampipes_DataStreamBuilder_create | /**
* Creates a new data stream using the builder pattern.
*
* @param id A unique identifier of the new element, e.g., com.mycompany.stream.mynewdatastream
* @return a new instance of {@link DataStreamBuilder}
*/
public static DataStreamBuilder create(String id) {
return new DataStreamBuilder(id);
} | 3.68 |
framework_VDragAndDropWrapper_initDragStartMode | /** For internal use only. May be removed or replaced in the future. */
public void initDragStartMode() {
Element div = getElement();
if (dragStartMode == HTML5) {
if (dragStartElement == null) {
dragStartElement = getDragStartElement();
dragStartElement.setPropertyBoolean(DRAGGABLE, true);
getLogger().info("draggable = "
+ dragStartElement.getPropertyBoolean(DRAGGABLE));
hookHtml5DragStart(dragStartElement);
getLogger().info("drag start listeners hooked.");
}
} else {
dragStartElement = null;
if (div.hasAttribute(DRAGGABLE)) {
div.removeAttribute(DRAGGABLE);
}
}
} | 3.68 |
querydsl_SimpleExpression_as | /**
* Create an alias for the expression
*
* @return alias expression
*/
@Override
public SimpleExpression<T> as(String alias) {
return as(ExpressionUtils.path(getType(), alias));
} | 3.68 |
hmily_FileUtils_readYAML | /**
* Read yaml string.
*
* @param yamlFile the yaml file
* @return the string
*/
@SneakyThrows
public static String readYAML(final String yamlFile) {
return Files.readAllLines(Paths.get(ClassLoader.getSystemResource(yamlFile).toURI()))
.stream().filter(each -> !each.startsWith("#")).map(each -> each + System.lineSeparator()).collect(Collectors.joining());
} | 3.68 |
framework_TreeGrid_collapse | /**
* Collapse the given items.
* <p>
* For items that are already collapsed, does nothing.
*
* @param items
* the collection of items to collapse
*/
public void collapse(Collection<T> items) {
HierarchicalDataCommunicator<T> communicator = getDataCommunicator();
items.forEach(item -> {
if (communicator.isExpanded(item)) {
communicator.collapse(item);
fireCollapseEvent(item, false);
}
});
} | 3.68 |
framework_VFilterSelect_setPromptingOn | /**
* Turns prompting on. When prompting is turned on a command prompt is shown
* in the text box if nothing has been entered.
*/
public void setPromptingOn() {
debug("VFS: setPromptingOn()");
if (!prompting) {
prompting = true;
addStyleDependentName(CLASSNAME_PROMPT);
}
setTextboxText(inputPrompt);
} | 3.68 |
hbase_SnapshotInfo_getMissingLogsCount | /** Returns the number of missing log files */
public int getMissingLogsCount() {
return logsMissing.get();
} | 3.68 |
hadoop_AzureFileSystemInstrumentation_currentUploadBytesPerSecond | /**
* Record the current bytes-per-second upload rate seen.
* @param bytesPerSecond The bytes per second.
*/
public synchronized void currentUploadBytesPerSecond(long bytesPerSecond) {
if (bytesPerSecond > currentMaximumUploadBytesPerSecond) {
currentMaximumUploadBytesPerSecond = bytesPerSecond;
maximumUploadBytesPerSecond.set(bytesPerSecond);
}
} | 3.68 |
framework_AbstractDateField_getZoneId | /**
* Returns the {@link ZoneId}, which is used when {@code z} is included
* inside the {@link #setDateFormat(String)}.
*
* @return the zoneId
* @since 8.2
*/
public ZoneId getZoneId() {
return zoneId;
} | 3.68 |
AreaShop_FileManager_checkRents | /**
* Unrent regions that have no time left, regions to check per tick is in the config.
*/
public void checkRents() {
Do.forAll(
plugin.getConfig().getInt("expiration.regionsPerTick"),
getRents(),
RentRegion::checkExpiration
);
} | 3.68 |
framework_SelectorPredicate_unquote | /**
* Removes the surrounding quotes from a string if it is quoted.
*
* @param str
* the possibly quoted string
* @return an unquoted version of str
*/
private static String unquote(String str) {
if ((str.startsWith("\"") && str.endsWith("\""))
|| (str.startsWith("'") && str.endsWith("'"))) {
return str.substring(1, str.length() - 1);
}
return str;
} | 3.68 |
flink_MutableByteArrayInputStream_setBuffer | /**
* Set buffer that can be read via the InputStream interface and reset the input stream. This
* has the same effect as creating a new ByteArrayInputStream with a new buffer.
*
* @param buf the new buffer to read.
*/
public void setBuffer(byte[] buf) {
this.buf = buf;
this.pos = 0;
this.count = buf.length;
} | 3.68 |
hadoop_TypedBytesInput_readByte | /**
* Reads the byte following a <code>Type.BYTE</code> code.
* @return the obtained byte
* @throws IOException
*/
public byte readByte() throws IOException {
return in.readByte();
} | 3.68 |
hadoop_BaseRecord_getPrimaryKey | /**
* Join the primary keys into one single primary key.
*
* @return A string that is guaranteed to be unique amongst all records of
* this type.
*/
public String getPrimaryKey() {
return generateMashupKey(getPrimaryKeys());
} | 3.68 |
hadoop_TupleWritable_setWritten | /**
* Record that the tuple contains an element at the position provided.
*/
void setWritten(int i) {
written.set(i);
} | 3.68 |
hbase_HFileBlock_ensureBlockReady | /**
* Transitions the block writer from the "writing" state to the "block ready" state. Does
* nothing if a block is already finished.
*/
void ensureBlockReady() throws IOException {
Preconditions.checkState(state != State.INIT, "Unexpected state: " + state);
if (state == State.BLOCK_READY) {
return;
}
// This will set state to BLOCK_READY.
finishBlock();
} | 3.68 |
framework_Notification_setPosition | /**
* Sets the position of the notification message.
*
* @param position
* The desired notification position, not {@code null}
*/
public void setPosition(Position position) {
if (position == null) {
throw new IllegalArgumentException("Position can not be null");
}
getState().position = position;
} | 3.68 |
flink_MutableHashTable_setBucket | // update current bucket status.
private void setBucket(
MemorySegment bucket,
MemorySegment[] overflowSegments,
HashPartition<BT, PT> partition,
int bucketInSegmentOffset) {
this.bucketSegment = bucket;
this.overflowSegments = overflowSegments;
this.partition = partition;
this.bucketInSegmentOffset = bucketInSegmentOffset;
this.countInSegment = bucket.getShort(bucketInSegmentOffset + HEADER_COUNT_OFFSET);
this.numInSegment = 0;
// reset probedSet with probedFlags offset in this bucket.
this.probedSet.setMemorySegment(
bucketSegment, this.bucketInSegmentOffset + HEADER_PROBED_FLAGS_OFFSET);
} | 3.68 |
hbase_TableRegionModel_setLocation | /**
* @param location the name and port of the region server hosting the region
*/
public void setLocation(String location) {
this.location = location;
} | 3.68 |
morf_SqlScriptExecutor_withConnection | /**
* @see org.alfasoftware.morf.jdbc.SqlScriptExecutor.QueryBuilder#withConnection(java.sql.Connection)
*/
@Override
public QueryBuilder withConnection(Connection connection) {
this.connection = connection;
return this;
} | 3.68 |
flink_PartitionedFile_getIndexEntry | /**
* Gets the index entry of the target region and subpartition either from the index data cache
* or the index data file.
*/
void getIndexEntry(FileChannel indexFile, ByteBuffer target, int region, int subpartition)
throws IOException {
checkArgument(target.capacity() == INDEX_ENTRY_SIZE, "Illegal target buffer size.");
target.clear();
long indexEntryOffset = getIndexEntryOffset(region, subpartition);
if (indexEntryCache != null) {
for (int i = 0; i < INDEX_ENTRY_SIZE; ++i) {
target.put(indexEntryCache.get((int) indexEntryOffset + i));
}
} else {
synchronized (indexFilePath) {
indexFile.position(indexEntryOffset);
BufferReaderWriterUtil.readByteBufferFully(indexFile, target);
}
}
target.flip();
} | 3.68 |
hbase_TableDescriptorBuilder_getMaxFileSize | /**
* Returns the maximum size upto which a region can grow to after which a region split is
* triggered. The region size is represented by the size of the biggest store file in that
* region.
* @return max hregion size for table, -1 if not set.
* @see #setMaxFileSize(long)
*/
@Override
public long getMaxFileSize() {
return getOrDefault(MAX_FILESIZE_KEY, Long::valueOf, (long) -1);
} | 3.68 |
flink_ConfigurationUtils_parseTempDirectories | /**
* Extracts the task manager directories for temporary files as defined by {@link
* org.apache.flink.configuration.CoreOptions#TMP_DIRS}.
*
* @param configuration configuration object
* @return array of configured directories (in order)
*/
@Nonnull
public static String[] parseTempDirectories(Configuration configuration) {
return splitPaths(configuration.getString(CoreOptions.TMP_DIRS));
} | 3.68 |
graphhopper_VectorTile_getNameBytes | /**
* <code>required string name = 1;</code>
*/
public com.google.protobuf.ByteString
getNameBytes() {
java.lang.Object ref = name_;
if (ref instanceof String) {
com.google.protobuf.ByteString b =
com.google.protobuf.ByteString.copyFromUtf8(
(java.lang.String) ref);
name_ = b;
return b;
} else {
return (com.google.protobuf.ByteString) ref;
}
} | 3.68 |
framework_AtmospherePushConnection_disconnect | /*
* (non-Javadoc)
*
* @see com.vaadin.client.communication.PushConenction#disconnect()
*/
@Override
public void disconnect(Command command) {
assert command != null;
switch (state) {
case CONNECT_PENDING:
// Make the connection callback initiate the disconnection again
state = State.DISCONNECT_PENDING;
pendingDisconnectCommand = command;
break;
case CONNECTED:
// Normal disconnect
getLogger().info("Closing push connection");
doDisconnect(uri);
state = State.DISCONNECTED;
command.execute();
break;
case DISCONNECT_PENDING:
case DISCONNECTED:
throw new IllegalStateException(
"Can not disconnect more than once");
}
} | 3.68 |
hadoop_CacheStats_roundDown | /**
* Round down a number to the operating system page size.
*/
public long roundDown(long count) {
return count & (~(osPageSize - 1));
} | 3.68 |
hudi_KafkaAvroSchemaDeserializer_deserialize | /**
* We need to inject sourceSchema instead of reader schema during deserialization or later stages of the pipeline.
*
* @param includeSchemaAndVersion
* @param topic
* @param isKey
* @param payload
* @param readerSchema
* @return
* @throws SerializationException
*/
@Override
protected Object deserialize(
boolean includeSchemaAndVersion,
String topic,
Boolean isKey,
byte[] payload,
Schema readerSchema)
throws SerializationException {
return super.deserialize(includeSchemaAndVersion, topic, isKey, payload, sourceSchema);
} | 3.68 |
morf_CaseInsensitiveString_of | /**
* Returns the string, wrapped as case insensitive.
*
* @param string The string.
* @return The wrapped instance;
*/
static CaseInsensitiveString of(String string) {
// Fast case - return the existing interned instance
CaseInsensitiveString result = cache.get(string);
if (result == null) {
synchronized (SYNC) {
String asUpperCase = string.toUpperCase();
result = cache.get(asUpperCase);
boolean isUpperCase = asUpperCase.equals(string);
if (result == null) {
result = new CaseInsensitiveString(string, asUpperCase.hashCode());
if (log.isDebugEnabled()) log.debug("New interned case insensitive string: " + result);
if (isUpperCase) {
internOnly(string, result);
} else {
internNormalAndUpperCase(string, asUpperCase, result);
}
} else if (!isUpperCase && !cache.containsKey(string)) {
internOnly(string, result);
}
}
}
return result;
} | 3.68 |
hbase_MetricsREST_incrementSucessfulIncrementRequests | /**
* @param inc How much to add to sucessfulIncrementCount.
*/
public synchronized void incrementSucessfulIncrementRequests(final int inc) {
source.incrementSucessfulIncrementRequests(inc);
} | 3.68 |
pulsar_LoadSimulationController_find | // Attempt to find a topic on the clients.
private int find(final String topic) throws Exception {
int clientWithTopic = -1;
for (int i = 0; i < clients.length; ++i) {
outputStreams[i].write(LoadSimulationClient.FIND_COMMAND);
outputStreams[i].writeUTF(topic);
}
for (int i = 0; i < clients.length; ++i) {
if (inputStreams[i].readBoolean()) {
clientWithTopic = i;
}
}
return clientWithTopic;
} | 3.68 |
framework_GridLayout_getMargin | /*
* (non-Javadoc)
*
* @see com.vaadin.ui.Layout.MarginHandler#getMargin()
*/
@Override
@SuppressWarnings("deprecation")
public MarginInfo getMargin() {
return new MarginInfo(getState(false).marginsBitmask);
} | 3.68 |
flink_FutureUtils_thenAcceptAsyncIfNotDone | /**
* This function takes a {@link CompletableFuture} and a consumer to accept the result of this
* future. If the input future is already done, this function returns {@link
* CompletableFuture#thenAccept(Consumer)}. Otherwise, the return value is {@link
* CompletableFuture#thenAcceptAsync(Consumer, Executor)} with the given executor.
*
* @param completableFuture the completable future for which we want to call #thenAccept.
* @param executor the executor to run the thenAccept function if the future is not yet done.
* @param consumer the consumer function to call when the future is completed.
* @param <IN> type of the input future.
* @return the new completion stage.
*/
public static <IN> CompletableFuture<Void> thenAcceptAsyncIfNotDone(
CompletableFuture<IN> completableFuture,
Executor executor,
Consumer<? super IN> consumer) {
return completableFuture.isDone()
? completableFuture.thenAccept(consumer)
: completableFuture.thenAcceptAsync(consumer, executor);
} | 3.68 |
framework_VTabsheet_setHiddenOnServer | /**
* Set tab hidden state on server (as opposed to simply hidden because
* it's scrolled out of view).
*
* @param hiddenOnServer
* {@code true} if hidden on server, {@code false} otherwise
*/
public void setHiddenOnServer(boolean hiddenOnServer) {
this.hiddenOnServer = hiddenOnServer;
Roles.getTabRole().setAriaHiddenState(getElement(), hiddenOnServer);
} | 3.68 |
framework_AbstractRemoteDataSource_resetDataAndSize | /**
* Updates the size, discarding all cached data. This method is used when
* the size of the container is changed without any information about the
* structure of the change. In this case, all cached data is discarded to
* avoid cache offset issues.
* <p>
* If you have information about the structure of the change, use
* {@link #insertRowData(int, int)} or {@link #removeRowData(int, int)} to
* indicate where the inserted or removed rows are located.
*
* @param newSize
* the new size of the container
*/
protected void resetDataAndSize(int newSize) {
size = newSize;
indexToRowMap.clear();
keyToIndexMap.clear();
cached = Range.withLength(0, 0);
getHandlers().forEach(dch -> dch.resetDataAndSize(newSize));
} | 3.68 |
hadoop_EncryptionSecrets_hasEncryptionKey | /**
* Does this instance have an encryption key?
* @return true if there's an encryption key.
*/
public boolean hasEncryptionKey() {
return StringUtils.isNotEmpty(encryptionKey);
} | 3.68 |
flink_MemorySegment_getCharLittleEndian | /**
* Reads a character value (16 bit, 2 bytes) from the given position, in little-endian byte
* order. This method's speed depends on the system's native byte order, and it is possibly
* slower than {@link #getChar(int)}. For most cases (such as transient storage in memory or
* serialization for I/O and network), it suffices to know that the byte order in which the
* value is written is the same as the one in which it is read, and {@link #getChar(int)} is the
* preferable choice.
*
* @param index The position from which the value will be read.
* @return The character value at the given position.
* @throws IndexOutOfBoundsException Thrown, if the index is negative, or larger than the
* segment size minus 2.
*/
public char getCharLittleEndian(int index) {
if (LITTLE_ENDIAN) {
return getChar(index);
} else {
return Character.reverseBytes(getChar(index));
}
} | 3.68 |
hibernate-validator_AbstractStaxBuilder_accept | /**
* Checks if the given {@link XMLEvent} is a {@link StartElement} and if the
* corresponding xml tag can be processed based on a tag name.
*
* @param xmlEvent an event to check
*
* @return {@code true} if corresponding event can be processed by current builder,
* {@code false} otherwise
*/
protected boolean accept(XMLEvent xmlEvent) {
return xmlEvent.isStartElement() && xmlEvent.asStartElement().getName().getLocalPart().equals( getAcceptableQName() );
} | 3.68 |
hbase_MemStoreFlusher_isMaximumWait | /** Returns True if we have been delayed > <code>maximumWait</code> milliseconds. */
public boolean isMaximumWait(final long maximumWait) {
return (EnvironmentEdgeManager.currentTime() - this.createTime) > maximumWait;
} | 3.68 |
flink_CopyOnWriteSkipListStateMap_doPhysicalRemoveAndGetValue | /**
* Removes the node physically, and return the newest-version value pointer. Space used by key
* and value will be freed here, but the space of newest-version value will not be freed, and
* the caller should be responsible for the free of space.
*
* @param node node to remove.
* @param prevNode previous node at the level 0.
* @param nextNode next node at the level 0.
* @return newest-version value pointer.
*/
private long doPhysicalRemoveAndGetValue(long node, long prevNode, long nextNode) {
// free space used by key and level index
long valuePointer = deleteNodeMeta(node, prevNode, nextNode);
// free space used by values except for the newest-version
long nextValuePointer = SkipListUtils.helpGetNextValuePointer(valuePointer, spaceAllocator);
SkipListUtils.removeAllValues(nextValuePointer, spaceAllocator);
return valuePointer;
} | 3.68 |
flink_StreamExecutionEnvironment_socketTextStream | /**
* Creates a new data stream that contains the strings received infinitely from a socket.
* Received strings are decoded by the system's default character set, using"\n" as delimiter.
* The reader is terminated immediately when the socket is down.
*
* @param hostname The host name which a server socket binds
* @param port The port number which a server socket binds. A port number of 0 means that the
* port number is automatically allocated.
* @return A data stream containing the strings received from the socket
*/
@PublicEvolving
public DataStreamSource<String> socketTextStream(String hostname, int port) {
return socketTextStream(hostname, port, "\n");
} | 3.68 |
flink_Time_getUnit | /**
* Gets the time unit for this policy's time interval.
*
* @return The time unit for this policy's time interval.
*/
public TimeUnit getUnit() {
return unit;
} | 3.68 |
dubbo_ApplicationModel_getConfigManager | /**
* @deprecated Replace to {@link ApplicationModel#getApplicationConfigManager()}
*/
@Deprecated
public static ConfigManager getConfigManager() {
return defaultModel().getApplicationConfigManager();
} | 3.68 |
hadoop_AbfsInputStreamStatisticsImpl_getActionHttpGetRequest | /**
* Getter for the mean value of the time taken to complete a HTTP GET
* request by AbfsInputStream.
* @return mean value.
*/
@VisibleForTesting
public double getActionHttpGetRequest() {
return ioStatisticsStore.meanStatistics().
get(ACTION_HTTP_GET_REQUEST + SUFFIX_MEAN).mean();
} | 3.68 |
pulsar_ReplicatedSubscriptionsController_completed | /**
* From Topic.PublishContext.
*/
@Override
public void completed(Exception e, long ledgerId, long entryId) {
// Nothing to do in case of publish errors since the retry logic is applied upstream after a snapshot is not
// closed
if (log.isDebugEnabled()) {
log.debug("[{}] Published marker at {}:{}. Exception: {}", topic.getName(), ledgerId, entryId, e);
}
this.positionOfLastLocalMarker = new PositionImpl(ledgerId, entryId);
} | 3.68 |
flink_MetricFetcherImpl_getMetricStore | /**
* Returns the MetricStore containing all stored metrics.
*
* @return MetricStore containing all stored metrics;
*/
@Override
public MetricStore getMetricStore() {
return metrics;
} | 3.68 |
flink_ExecutionConfig_enableClosureCleaner | /**
* Enables the ClosureCleaner. This analyzes user code functions and sets fields to null that
* are not used. This will in most cases make closures or anonymous inner classes serializable
* that where not serializable due to some Scala or Java implementation artifact. User code must
* be serializable because it needs to be sent to worker nodes.
*/
public ExecutionConfig enableClosureCleaner() {
return setClosureCleanerLevel(ClosureCleanerLevel.RECURSIVE);
} | 3.68 |
framework_VComboBox_updateSelectionFromServer | /**
* Perform selection (if appropriate) based on a reply from the server.
* When this method is called, the suggestions have been reset if new
* ones (different from the previous list) were received from the
* server.
*
* @param selectedKey
* new selected key or null if none given by the server
* @param selectedCaption
* new selected item caption if sent by the server or null -
* this is used when the selected item is not on the current
* page
* @param selectedIconUri
* new selected item icon if sent by the server or {@ code
* null} to clear
*/
public void updateSelectionFromServer(String selectedKey,
String selectedCaption, String selectedIconUri) {
boolean oldSuggestionTextMatchTheOldSelection = currentSuggestion != null
&& currentSuggestion.getReplacementString()
.equals(tb.getText());
serverSelectedKey = selectedKey;
performSelection(selectedKey, oldSuggestionTextMatchTheOldSelection,
!isWaitingForFilteringResponse() || popupOpenerClicked);
// currentSuggestion should be set to match the value of the
// ComboBox
resetCurrentSuggestionBasedOnServerResponse(selectedKey,
selectedCaption, selectedIconUri);
cancelPendingPostFiltering();
setSelectedCaption(selectedCaption);
setSelectedItemIcon(selectedIconUri);
} | 3.68 |
dubbo_ClassUtils_getMethodNames | /**
* get method name array.
*
* @return method name array.
*/
public static String[] getMethodNames(Class<?> tClass) {
if (tClass == Object.class) {
return OBJECT_METHODS;
}
Method[] methods =
Arrays.stream(tClass.getMethods()).collect(Collectors.toList()).toArray(new Method[] {});
List<String> mns = new ArrayList<>(); // method names.
boolean hasMethod = hasMethods(methods);
if (hasMethod) {
for (Method m : methods) {
// ignore Object's method.
if (m.getDeclaringClass() == Object.class) {
continue;
}
String mn = m.getName();
mns.add(mn);
}
}
return mns.toArray(new String[0]);
} | 3.68 |
Activiti_ProcessEngines_init | /**
* Initializes all process engines that can be found on the classpath for resources <code>activiti.cfg.xml</code> (plain Activiti style configuration) and for resources
* <code>activiti-context.xml</code> (Spring style configuration).
*/
public synchronized static void init() {
if (!isInitialized()) {
if (processEngines == null) {
// Create new map to store process-engines if current map is
// null
processEngines = new HashMap<String, ProcessEngine>();
}
ClassLoader classLoader = ReflectUtil.getClassLoader();
Enumeration<URL> resources = null;
try {
resources = classLoader.getResources("activiti.cfg.xml");
} catch (IOException e) {
throw new ActivitiIllegalArgumentException("problem retrieving activiti.cfg.xml resources on the classpath: " + System.getProperty("java.class.path"), e);
}
// Remove duplicated configuration URL's using set. Some
// classloaders may return identical URL's twice, causing duplicate
// startups
Set<URL> configUrls = new HashSet<URL>();
while (resources.hasMoreElements()) {
configUrls.add(resources.nextElement());
}
for (Iterator<URL> iterator = configUrls.iterator(); iterator.hasNext();) {
URL resource = iterator.next();
log.info("Initializing process engine using configuration '{}'", resource.toString());
initProcessEngineFromResource(resource);
}
try {
resources = classLoader.getResources("activiti-context.xml");
} catch (IOException e) {
throw new ActivitiIllegalArgumentException("problem retrieving activiti-context.xml resources on the classpath: " + System.getProperty("java.class.path"), e);
}
while (resources.hasMoreElements()) {
URL resource = resources.nextElement();
log.info("Initializing process engine using Spring configuration '{}'", resource.toString());
initProcessEngineFromSpringResource(resource);
}
setInitialized(true);
} else {
log.info("Process engines already initialized");
}
} | 3.68 |
pulsar_PerformanceBaseArguments_validate | /**
* Validate the CLI arguments. Default implementation provides validation for the common arguments.
* Each subclass should call super.validate() and provide validation code specific to the sub-command.
* @throws Exception
*/
public void validate() throws Exception {
if (confFile != null && !confFile.isBlank()) {
File configFile = new File(confFile);
if (!configFile.exists()) {
throw new Exception("config file '" + confFile + "', does not exist");
}
if (configFile.isDirectory()) {
throw new Exception("config file '" + confFile + "', is a directory");
}
}
} | 3.68 |
morf_ViewBean_getDependencies | /**
* @see org.alfasoftware.morf.metadata.View#getDependencies()
*/
@Override
public String[] getDependencies() {
return Arrays.copyOf(dependencies, dependencies.length);
} | 3.68 |
hbase_HBaseTestingUtility_setDFSCluster | /**
* Set the MiniDFSCluster
* @param cluster cluster to use
* @param requireDown require the that cluster not be "up" (MiniDFSCluster#isClusterUp) before it
* is set.
* @throws IllegalStateException if the passed cluster is up when it is required to be down
* @throws IOException if the FileSystem could not be set from the passed dfs cluster
*/
public void setDFSCluster(MiniDFSCluster cluster, boolean requireDown)
throws IllegalStateException, IOException {
if (dfsCluster != null && requireDown && dfsCluster.isClusterUp()) {
throw new IllegalStateException("DFSCluster is already running! Shut it down first.");
}
this.dfsCluster = cluster;
this.setFs();
} | 3.68 |
hadoop_AbfsInputStreamStatisticsImpl_seekForwards | /**
* Record a forward seek, adding a seek operation, a forward
* seek operation, and any bytes skipped.
*
* @param skipped number of bytes skipped by reading from the stream.
* If the seek was implemented by a close + reopen, set this to zero.
*/
@Override
public void seekForwards(long skipped) {
seekOps.incrementAndGet();
ioStatisticsStore.incrementCounter(StreamStatisticNames.STREAM_READ_SEEK_FORWARD_OPERATIONS);
ioStatisticsStore.incrementCounter(StreamStatisticNames.STREAM_READ_SEEK_BYTES_SKIPPED, skipped);
} | 3.68 |
hadoop_Validate_checkGreaterOrEqual | /**
* Validates that the first value is greater than or equal to the second value.
* @param value1 the first value to check.
* @param value1Name the name of the first argument.
* @param value2 the second value to check.
* @param value2Name the name of the second argument.
*/
public static void checkGreaterOrEqual(
long value1,
String value1Name,
long value2,
String value2Name) {
checkArgument(
value1 >= value2,
"'%s' (%s) must be greater than or equal to '%s' (%s).",
value1Name,
value1,
value2Name,
value2);
} | 3.68 |
hudi_HoodieExampleDataGenerator_generateInsertsStream | /**
* Generates new inserts, uniformly across the partition paths above. It also updates the list of existing keys.
*/
public Stream<HoodieRecord<T>> generateInsertsStream(String commitTime, Integer n) {
int currSize = getNumExistingKeys();
return IntStream.range(0, n).boxed().map(i -> {
String partitionPath = partitionPaths[RAND.nextInt(partitionPaths.length)];
HoodieKey key = new HoodieKey(UUID.randomUUID().toString(), partitionPath);
KeyPartition kp = new KeyPartition();
kp.key = key;
kp.partitionPath = partitionPath;
existingKeys.put(currSize + i, kp);
numExistingKeys++;
return new HoodieAvroRecord<>(key, generateRandomValue(key, commitTime));
});
} | 3.68 |
hudi_CleanPlanner_isFileSliceExistInSavepointedFiles | /**
* Verify whether file slice exists in savepointedFiles, check both base file and log files
*/
private boolean isFileSliceExistInSavepointedFiles(FileSlice fs, List<String> savepointedFiles) {
if (fs.getBaseFile().isPresent() && savepointedFiles.contains(fs.getBaseFile().get().getFileName())) {
return true;
}
for (HoodieLogFile hoodieLogFile : fs.getLogFiles().collect(Collectors.toList())) {
if (savepointedFiles.contains(hoodieLogFile.getFileName())) {
return true;
}
}
return false;
} | 3.68 |
hudi_SourceFormatAdapter_transformJsonToGenericRdd | /**
* transform input rdd of json string to generic records with support for adding error events to error table
* @param inputBatch
* @return
*/
private JavaRDD<GenericRecord> transformJsonToGenericRdd(InputBatch<JavaRDD<String>> inputBatch) {
MercifulJsonConverter.clearCache(inputBatch.getSchemaProvider().getSourceSchema().getFullName());
AvroConvertor convertor = new AvroConvertor(inputBatch.getSchemaProvider().getSourceSchema(), isFieldNameSanitizingEnabled(), getInvalidCharMask());
return inputBatch.getBatch().map(rdd -> {
if (errorTableWriter.isPresent()) {
JavaRDD<Either<GenericRecord,String>> javaRDD = rdd.map(convertor::fromJsonWithError);
errorTableWriter.get().addErrorEvents(javaRDD.filter(x -> x.isRight()).map(x ->
new ErrorEvent<>(x.right().get(), ErrorEvent.ErrorReason.JSON_AVRO_DESERIALIZATION_FAILURE)));
return javaRDD.filter(x -> x.isLeft()).map(x -> x.left().get());
} else {
return rdd.map(convertor::fromJson);
}
}).orElse(null);
} | 3.68 |
dubbo_FrameworkModelCleaner_destroyProtocols | /**
* Destroy all the protocols.
*/
private void destroyProtocols(FrameworkModel frameworkModel) {
if (protocolDestroyed.compareAndSet(false, true)) {
ExtensionLoader<Protocol> loader = frameworkModel.getExtensionLoader(Protocol.class);
for (String protocolName : loader.getLoadedExtensions()) {
try {
Protocol protocol = loader.getLoadedExtension(protocolName);
if (protocol != null) {
protocol.destroy();
}
} catch (Throwable t) {
logger.warn(CONFIG_UNDEFINED_PROTOCOL, "", "", t.getMessage(), t);
}
}
}
} | 3.68 |
querydsl_AbstractMySQLQuery_intoDumpfile | /**
* SELECT ... INTO DUMPFILE writes a single row to a file without any formatting.
*
* @param file file to write to
* @return the current object
*/
public C intoDumpfile(File file) {
return addFlag(Position.END, "\ninto dumpfile '" + file.getPath() + "'");
} | 3.68 |
morf_OracleDialect_columnComment | /**
* Build the comment comment that allows the metadata reader to determine the correct lower case table names and types.
*/
private String columnComment(Column column, String tableName) {
StringBuilder comment = new StringBuilder ("COMMENT ON COLUMN " + schemaNamePrefix() + tableName + "." + column.getName() + " IS '"+REAL_NAME_COMMENT_LABEL+":[" + column.getName() + "]/TYPE:[" + column.getType().toString() + "]");
if (column.isAutoNumbered()) {
int autoNumberStart = column.getAutoNumberStart() == -1 ? 1 : column.getAutoNumberStart();
comment.append("/AUTONUMSTART:[").append(autoNumberStart).append("]");
}
comment.append("'");
return comment.toString();
} | 3.68 |
flink_MiniCluster_createRemoteRpcService | /**
* Factory method to instantiate the remote RPC service.
*
* @param configuration Flink configuration.
* @param externalAddress The external address to access the RPC service.
* @param externalPortRange The external port range to access the RPC service.
* @param bindAddress The address to bind the RPC service to.
* @param rpcSystem
* @return The instantiated RPC service
*/
protected RpcService createRemoteRpcService(
Configuration configuration,
String externalAddress,
String externalPortRange,
String bindAddress,
RpcSystem rpcSystem)
throws Exception {
return rpcSystem
.remoteServiceBuilder(configuration, externalAddress, externalPortRange)
.withBindAddress(bindAddress)
.withExecutorConfiguration(RpcUtils.getTestForkJoinExecutorConfiguration())
.createAndStart();
} | 3.68 |
hadoop_ECBlock_isParity | /**
*
* @return true if it's parity block, otherwise false
*/
public boolean isParity() {
return isParity;
} | 3.68 |
morf_NamedParameterPreparedStatement_setLong | /**
* Sets the value of a named long parameter.
*
* @param parameter the parameter metadata.
* @param value the parameter value.
* @return this, for method chaining
* @exception SQLException if an error occurs when setting the parameter
*/
public NamedParameterPreparedStatement setLong(SqlParameter parameter, final long value) throws SQLException {
forEachOccurrenceOfParameter(parameter, new Operation() {
@Override
public void apply(int parameterIndex) throws SQLException {
statement.setLong(parameterIndex, value);
}
});
return this;
} | 3.68 |
framework_PointerEvent_getTiltX | /**
* Gets the angle between the Y-Z plane and the plane containing both the
* transducer and the Y axis. A positive tilt is to the right.
*
* @return the tilt along the X axis as degrees in the range of [-90, 90],
* or 0 if the device does not support tilt
*/
public final double getTiltX() {
return getTiltX(getNativeEvent());
} | 3.68 |
hudi_DFSPathSelector_getNextFilePathsAndMaxModificationTime | /**
* Get the list of files changed since last checkpoint.
*
* @param lastCheckpointStr the last checkpoint time string, empty if first run
* @param sourceLimit max bytes to read each time
* @return the list of files concatenated and their latest modified time
*/
@Deprecated
public Pair<Option<String>, String> getNextFilePathsAndMaxModificationTime(Option<String> lastCheckpointStr,
long sourceLimit) {
try {
// obtain all eligible files under root folder.
log.info("Root path => " + getStringWithAltKeys(props, DFSPathSelectorConfig.ROOT_INPUT_PATH)
+ " source limit => " + sourceLimit);
long lastCheckpointTime = lastCheckpointStr.map(Long::parseLong).orElse(Long.MIN_VALUE);
List<FileStatus> eligibleFiles = listEligibleFiles(
fs, new Path(getStringWithAltKeys(props, DFSPathSelectorConfig.ROOT_INPUT_PATH)), lastCheckpointTime);
// sort them by modification time.
eligibleFiles.sort(Comparator.comparingLong(FileStatus::getModificationTime));
// Filter based on checkpoint & input size, if needed
long currentBytes = 0;
long newCheckpointTime = lastCheckpointTime;
List<FileStatus> filteredFiles = new ArrayList<>();
for (FileStatus f : eligibleFiles) {
if (currentBytes + f.getLen() >= sourceLimit && f.getModificationTime() > newCheckpointTime) {
// we have enough data, we are done
// Also, we've read up to a file with a newer modification time
// so that some files with the same modification time won't be skipped in next read
break;
}
newCheckpointTime = f.getModificationTime();
currentBytes += f.getLen();
filteredFiles.add(f);
}
// no data to read
if (filteredFiles.isEmpty()) {
return new ImmutablePair<>(Option.empty(), String.valueOf(newCheckpointTime));
}
// read the files out.
String pathStr = filteredFiles.stream().map(f -> f.getPath().toString()).collect(Collectors.joining(","));
return new ImmutablePair<>(Option.ofNullable(pathStr), String.valueOf(newCheckpointTime));
} catch (IOException ioe) {
throw new HoodieIOException("Unable to read from source from checkpoint: " + lastCheckpointStr, ioe);
}
} | 3.68 |
querydsl_QBean_as | /**
* Create an alias for the expression
*
* @return this as alias
*/
public Expression<T> as(String alias) {
return as(ExpressionUtils.path(getType(), alias));
} | 3.68 |
hadoop_ScriptBasedNodeLabelsProvider_cleanUp | /**
* Method used to terminate the Node Labels Fetch script.
*/
@Override
public void cleanUp() {
if (runner != null) {
runner.cleanUp();
}
} | 3.68 |
flink_MemoryManager_releaseAll | /**
* Releases all memory segments for the given owner.
*
* @param owner The owner memory segments are to be released.
*/
public void releaseAll(Object owner) {
if (owner == null) {
return;
}
Preconditions.checkState(!isShutDown, "Memory manager has been shut down.");
// get all segments
Set<MemorySegment> segments = allocatedSegments.remove(owner);
// all segments may have been freed previously individually
if (segments == null || segments.isEmpty()) {
return;
}
// free each segment
for (MemorySegment segment : segments) {
segment.free();
}
segments.clear();
} | 3.68 |
flink_StateDescriptor_isQueryable | /**
* Returns whether the state created from this descriptor is queryable.
*
* @return <code>true</code> if state is queryable, <code>false</code> otherwise.
* @deprecated The Queryable State feature is deprecated since Flink 1.18, and will be removed
* in a future Flink major version.
*/
@Deprecated
public boolean isQueryable() {
return queryableStateName != null;
} | 3.68 |
flink_MiniCluster_closeAsync | /**
* Shuts down the mini cluster, failing all currently executing jobs. The mini cluster can be
* started again by calling the {@link #start()} method again.
*
* <p>This method shuts down all started services and components, even if an exception occurs in
* the process of shutting down some component.
*
* @return Future which is completed once the MiniCluster has been completely shut down
*/
@Override
public CompletableFuture<Void> closeAsync() {
return closeInternal(true);
} | 3.68 |
flink_Tuple15_setFields | /**
* Sets new values to all fields of the tuple.
*
* @param f0 The value for field 0
* @param f1 The value for field 1
* @param f2 The value for field 2
* @param f3 The value for field 3
* @param f4 The value for field 4
* @param f5 The value for field 5
* @param f6 The value for field 6
* @param f7 The value for field 7
* @param f8 The value for field 8
* @param f9 The value for field 9
* @param f10 The value for field 10
* @param f11 The value for field 11
* @param f12 The value for field 12
* @param f13 The value for field 13
* @param f14 The value for field 14
*/
public void setFields(
T0 f0,
T1 f1,
T2 f2,
T3 f3,
T4 f4,
T5 f5,
T6 f6,
T7 f7,
T8 f8,
T9 f9,
T10 f10,
T11 f11,
T12 f12,
T13 f13,
T14 f14) {
this.f0 = f0;
this.f1 = f1;
this.f2 = f2;
this.f3 = f3;
this.f4 = f4;
this.f5 = f5;
this.f6 = f6;
this.f7 = f7;
this.f8 = f8;
this.f9 = f9;
this.f10 = f10;
this.f11 = f11;
this.f12 = f12;
this.f13 = f13;
this.f14 = f14;
} | 3.68 |
morf_TableLoader_sqlInsertOrMergeLoad | /**
* Insert the data using an SQL insert.
*
* @param table The table we are inserting into.
* @param records The data to insert.
* @param connection The connection.
*/
private void sqlInsertOrMergeLoad(Table table, Iterable<Record> records, Connection connection) {
try {
if (merge && !table.primaryKey().isEmpty()) { // if the table has no primary we don't have a merge ON criterion
SelectStatement selectStatement = SelectStatement.select()
.fields(table.columns().stream().filter(c -> !c.isAutoNumbered()).map(c -> parameter(c)).collect(toList()))
.build();
MergeStatement mergeStatement = MergeStatement.merge()
.from(selectStatement)
.into(tableRef(table.getName()))
.tableUniqueKey(table.primaryKey().stream().map(c -> parameter(c)).collect(toList()))
.build();
String mergeSQL = sqlDialect.convertStatementToSQL(mergeStatement);
sqlExecutor.executeStatementBatch(mergeSQL, SqlParameter.parametersFromColumns(table.columns()), records, connection, explicitCommit, batchSize);
} else {
InsertStatement insertStatement = InsertStatement.insert().into(tableRef(table.getName())).build();
String insertSQL = sqlDialect.convertStatementToSQL(insertStatement, SchemaUtils.schema(table));
sqlExecutor.executeStatementBatch(insertSQL, SqlParameter.parametersFromColumns(table.columns()), records, connection, explicitCommit, batchSize);
}
} catch (Exception exceptionOnBatch) {
throw new RuntimeException(String.format("Failure in batch insert for table [%s]", table.getName()), exceptionOnBatch);
}
} | 3.68 |
querydsl_PathBuilder_getMap | /**
* Create a new Map typed path
*
* @param <K>
* @param <V>
* @param <E>
* @param property property name
* @param key key type
* @param value value type
* @param queryType vaue expression type
* @return property path
*/
public <K, V, E extends SimpleExpression<V>> MapPath<K, V, E> getMap(String property, Class<K> key, Class<V> value, Class<? super E> queryType) {
validate(property, Map.class);
return super.createMap(property, key, value, queryType);
} | 3.68 |
flink_SqlJsonUtils_createObjectNode | /** Returns a new {@link ObjectNode}. */
public static ObjectNode createObjectNode() {
return MAPPER.createObjectNode();
} | 3.68 |
flink_ParquetColumnarRowSplitReader_clipParquetSchema | /** Clips `parquetSchema` according to `fieldNames`. */
private static MessageType clipParquetSchema(
GroupType parquetSchema, String[] fieldNames, boolean caseSensitive) {
Type[] types = new Type[fieldNames.length];
if (caseSensitive) {
for (int i = 0; i < fieldNames.length; ++i) {
String fieldName = fieldNames[i];
if (parquetSchema.getFieldIndex(fieldName) < 0) {
throw new IllegalArgumentException(fieldName + " does not exist");
}
types[i] = parquetSchema.getType(fieldName);
}
} else {
Map<String, Type> caseInsensitiveFieldMap = new HashMap<>();
for (Type type : parquetSchema.getFields()) {
caseInsensitiveFieldMap.compute(
type.getName().toLowerCase(Locale.ROOT),
(key, previousType) -> {
if (previousType != null) {
throw new FlinkRuntimeException(
"Parquet with case insensitive mode should have no duplicate key: "
+ key);
}
return type;
});
}
for (int i = 0; i < fieldNames.length; ++i) {
Type type = caseInsensitiveFieldMap.get(fieldNames[i].toLowerCase(Locale.ROOT));
if (type == null) {
throw new IllegalArgumentException(fieldNames[i] + " does not exist");
}
// TODO clip for array,map,row types.
types[i] = type;
}
}
return Types.buildMessage().addFields(types).named("flink-parquet");
} | 3.68 |
dubbo_PathURLAddress_getIp | /**
* Fetch IP address for this URL.
* <p>
* Pls. note that IP should be used instead of Host when to compare with socket's address or to search in a map
* which use address as its key.
*
* @return ip in string format
*/
public String getIp() {
if (ip == null) {
ip = NetUtils.getIpByHost(getHost());
}
return ip;
} | 3.68 |
dubbo_InternalServiceConfigBuilder_getRelatedOrDefaultProtocol | /**
* Get other configured protocol from environment in priority order. If get nothing, use default dubbo.
*
* @return
*/
private String getRelatedOrDefaultProtocol() {
String protocol = "";
// <dubbo:consumer/>
List<ModuleModel> moduleModels = applicationModel.getPubModuleModels();
protocol = moduleModels.stream()
.map(ModuleModel::getConfigManager)
.map(ModuleConfigManager::getConsumers)
.filter(CollectionUtils::isNotEmpty)
.flatMap(Collection::stream)
.map(ConsumerConfig::getProtocol)
.filter(StringUtils::isNotEmpty)
.filter(p -> !UNACCEPTABLE_PROTOCOL.contains(p))
.findFirst()
.orElse("");
// <dubbo:provider/>
if (StringUtils.isEmpty(protocol)) {
Stream<ProviderConfig> providerConfigStream = moduleModels.stream()
.map(ModuleModel::getConfigManager)
.map(ModuleConfigManager::getProviders)
.filter(CollectionUtils::isNotEmpty)
.flatMap(Collection::stream);
protocol = providerConfigStream
.filter((providerConfig) -> providerConfig.getProtocol() != null
|| CollectionUtils.isNotEmpty(providerConfig.getProtocols()))
.map(providerConfig -> {
if (providerConfig.getProtocol() != null
&& StringUtils.isNotEmpty(
providerConfig.getProtocol().getName())) {
return providerConfig.getProtocol().getName();
} else {
return providerConfig.getProtocols().stream()
.map(ProtocolConfig::getName)
.filter(StringUtils::isNotEmpty)
.findFirst()
.orElse("");
}
})
.filter(StringUtils::isNotEmpty)
.filter(p -> !UNACCEPTABLE_PROTOCOL.contains(p))
.findFirst()
.orElse("");
}
// <dubbo:protocol/>
if (StringUtils.isEmpty(protocol)) {
Collection<ProtocolConfig> protocols =
applicationModel.getApplicationConfigManager().getProtocols();
if (CollectionUtils.isNotEmpty(protocols)) {
protocol = protocols.stream()
.map(ProtocolConfig::getName)
.filter(StringUtils::isNotEmpty)
.filter(p -> !UNACCEPTABLE_PROTOCOL.contains(p))
.findFirst()
.orElse("");
}
}
// <dubbo:application/>
if (StringUtils.isEmpty(protocol)) {
protocol = getApplicationConfig().getProtocol();
if (StringUtils.isEmpty(protocol)) {
Map<String, String> params = getApplicationConfig().getParameters();
if (CollectionUtils.isNotEmptyMap(params)) {
protocol = params.get(APPLICATION_PROTOCOL_KEY);
}
}
}
return StringUtils.isNotEmpty(protocol) && !UNACCEPTABLE_PROTOCOL.contains(protocol)
? protocol
: DUBBO_PROTOCOL;
} | 3.68 |
framework_JsonPaintTarget_startTag | /**
* Prints the element start tag.
*
* <pre>
* Todo:
* Checking of input values
*
* </pre>
*
* @param tagName
* the name of the start tag.
* @param isChildNode
* {@code true} if child node, {@code false} otherwise
* @throws PaintException
* if the paint operation failed.
*
*/
public void startTag(String tagName, boolean isChildNode)
throws PaintException {
// In case of null data output nothing:
if (tagName == null) {
throw new NullPointerException();
}
// Ensures that the target is open
if (closed) {
throw new PaintException(
"Attempted to write to a closed PaintTarget.");
}
if (tag != null) {
openJsonTags.push(tag);
}
// Checks tagName and attributes here
mOpenTags.push(tagName);
tag = new JsonTag(tagName);
customLayoutArgumentsOpen = false;
} | 3.68 |
flink_BinarySegmentUtils_setShort | /**
* set short from segments.
*
* @param segments target segments.
* @param offset value offset.
*/
public static void setShort(MemorySegment[] segments, int offset, short value) {
if (inFirstSegment(segments, offset, 2)) {
segments[0].putShort(offset, value);
} else {
setShortMultiSegments(segments, offset, value);
}
} | 3.68 |
hbase_MetaTableAccessor_getTableStates | /**
* Fetch table states from META table
* @param conn connection to use
* @return map {tableName -> state}
*/
public static Map<TableName, TableState> getTableStates(Connection conn) throws IOException {
final Map<TableName, TableState> states = new LinkedHashMap<>();
ClientMetaTableAccessor.Visitor collector = r -> {
TableState state = CatalogFamilyFormat.getTableState(r);
if (state != null) {
states.put(state.getTableName(), state);
}
return true;
};
fullScanTables(conn, collector);
return states;
} | 3.68 |
pulsar_AuthenticationProviderSasl_authenticateHttpRequest | /**
* Passed in request, set response, according to request.
* and return whether we should do following chain.doFilter or not.
*/
@Override
public boolean authenticateHttpRequest(HttpServletRequest request, HttpServletResponse response) throws Exception {
AuthenticationState state = getAuthState(request);
String saslAuthRoleToken = authRoleFromHttpRequest(request);
// role token exist
if (saslAuthRoleToken != null) {
// role token expired, send role token expired to client.
if (saslAuthRoleToken.equalsIgnoreCase(SASL_AUTH_ROLE_TOKEN_EXPIRED)) {
setResponseHeaderState(response, SASL_AUTH_ROLE_TOKEN_EXPIRED);
response.sendError(HttpServletResponse.SC_UNAUTHORIZED, "Role token expired");
if (log.isDebugEnabled()) {
log.debug("[{}] Server side role token expired: {}", request.getRequestURI(), saslAuthRoleToken);
}
return false;
}
// role token OK to use,
// if request is ask for role token verify, send auth complete to client
// if request is a real request with valid role token, pass this request down.
if (request.getHeader(SASL_HEADER_STATE).equalsIgnoreCase(SASL_STATE_COMPLETE)) {
request.setAttribute(AuthenticatedRoleAttributeName, saslAuthRoleToken);
request.setAttribute(AuthenticatedDataAttributeName,
new AuthenticationDataHttps(request));
if (log.isDebugEnabled()) {
log.debug("[{}] Server side role token OK to go on: {}", request.getRequestURI(),
saslAuthRoleToken);
}
return true;
} else {
checkState(request.getHeader(SASL_HEADER_STATE).equalsIgnoreCase(SASL_STATE_SERVER_CHECK_TOKEN));
setResponseHeaderState(response, SASL_STATE_COMPLETE);
response.setHeader(SASL_STATE_SERVER, request.getHeader(SASL_STATE_SERVER));
response.setStatus(HttpServletResponse.SC_OK);
if (log.isDebugEnabled()) {
log.debug("[{}] Server side role token verified success: {}", request.getRequestURI(),
saslAuthRoleToken);
}
return false;
}
} else {
// no role token, do sasl auth
// need new authState
if (state == null || request.getHeader(SASL_HEADER_STATE).equalsIgnoreCase(SASL_STATE_CLIENT_INIT)) {
state = newAuthState(null, null, null);
authStates.put(state.getStateId(), state);
}
checkState(request.getHeader(SASL_AUTH_TOKEN) != null,
"Header token should exist if no role token.");
// do the sasl auth
AuthData clientData = AuthData.of(Base64.getDecoder().decode(
request.getHeader(SASL_AUTH_TOKEN)));
AuthData brokerData = state.authenticate(clientData);
// authentication has completed, it has get the auth role.
if (state.isComplete()) {
if (log.isDebugEnabled()) {
log.debug("[{}] SASL server authentication complete, send OK to client.", request.getRequestURI());
}
String authRole = state.getAuthRole();
String authToken = createAuthRoleToken(authRole, String.valueOf(state.getStateId()));
response.setHeader(SASL_AUTH_ROLE_TOKEN, authToken);
// auth request complete, return OK, wait for a new real request to come.
response.setHeader(SASL_STATE_SERVER, String.valueOf(state.getStateId()));
setResponseHeaderState(response, SASL_STATE_COMPLETE);
response.setStatus(HttpServletResponse.SC_OK);
// auth completed, no need to keep authState
authStates.invalidate(state.getStateId());
return false;
} else {
// auth not complete
if (log.isDebugEnabled()) {
log.debug("[{}] SASL server authentication not complete, send {} back to client.",
request.getRequestURI(), HttpServletResponse.SC_UNAUTHORIZED);
}
setResponseHeaderState(response, SASL_STATE_NEGOTIATE);
response.setHeader(SASL_STATE_SERVER, String.valueOf(state.getStateId()));
response.setHeader(SASL_AUTH_TOKEN, Base64.getEncoder().encodeToString(brokerData.getBytes()));
response.sendError(HttpServletResponse.SC_UNAUTHORIZED, "SASL Authentication not complete.");
return false;
}
}
} | 3.68 |
hbase_RowCounter_createSubmittableJob | /** Returns the JobConf */
public JobConf createSubmittableJob(String[] args) throws IOException {
JobConf c = new JobConf(getConf(), getClass());
c.setJobName(NAME);
// Columns are space delimited
StringBuilder sb = new StringBuilder();
final int columnoffset = 2;
for (int i = columnoffset; i < args.length; i++) {
if (i > columnoffset) {
sb.append(" ");
}
sb.append(args[i]);
}
// Second argument is the table name.
TableMapReduceUtil.initTableMapJob(args[1], sb.toString(), RowCounterMapper.class,
ImmutableBytesWritable.class, Result.class, c);
c.setNumReduceTasks(0);
// First arg is the output directory.
FileOutputFormat.setOutputPath(c, new Path(args[0]));
return c;
} | 3.68 |
flink_RocksDBResourceContainer_createBaseCommonDBOptions | /** Create a {@link DBOptions} for RocksDB, including some common settings. */
DBOptions createBaseCommonDBOptions() {
return new DBOptions().setUseFsync(false).setStatsDumpPeriodSec(0);
} | 3.68 |
hibernate-validator_NotEmptyValidatorForCollection_isValid | /**
* Checks the collection is not {@code null} and not empty.
*
* @param collection the collection to validate
* @param constraintValidatorContext context in which the constraint is evaluated
* @return returns {@code true} if the collection is not {@code null} and the collection is not empty
*/
@Override
public boolean isValid(Collection collection, ConstraintValidatorContext constraintValidatorContext) {
if ( collection == null ) {
return false;
}
return collection.size() > 0;
} | 3.68 |
morf_AbstractSqlDialectTest_testDeleteWithTableInDifferentSchema | /**
* Tests that a delete statement is prefixed with the schema name if the schema is specified.
*/
@Test
public void testDeleteWithTableInDifferentSchema() {
DeleteStatement stmt = new DeleteStatement(new TableReference("MYSCHEMA", TEST_TABLE));
String expectedSql = "DELETE FROM " + differentSchemaTableName(TEST_TABLE);
assertEquals("Simple delete", expectedSql, testDialect.convertStatementToSQL(stmt));
} | 3.68 |
morf_DataSetProducerAdapter_getSchema | /**
* @see org.alfasoftware.morf.dataset.DataSetProducer#getSchema()
*/
@Override
public Schema getSchema() {
return delegate.getSchema();
} | 3.68 |
morf_SqlDialect_preInsertWithPresetAutonumStatements | /**
* Creates SQL to execute prior to bulk-inserting to a table.
*
* @param table {@link Table} to be inserted to.
* @param insertingUnderAutonumLimit Determines whether we are inserting under an auto-numbering limit.
* @return SQL statements to be executed prior to insert.
*/
@SuppressWarnings("unused")
public Collection<String> preInsertWithPresetAutonumStatements(Table table, boolean insertingUnderAutonumLimit) {
return ImmutableList.of();
} | 3.68 |
hbase_FirstKeyOnlyFilter_toByteArray | /** Returns The filter serialized using pb */
@Override
public byte[] toByteArray() {
FilterProtos.FirstKeyOnlyFilter.Builder builder = FilterProtos.FirstKeyOnlyFilter.newBuilder();
return builder.build().toByteArray();
} | 3.68 |
flink_Topology_getPipelinedRegionOfVertex | /**
* The pipelined region for a specified vertex.
*
* @param vertexId the vertex id identifying the vertex for which the pipelined region should be
* returned
* @return the pipelined region of the vertex
* @throws IllegalArgumentException if there is no vertex in this topology with the specified
* vertex id
*/
default PR getPipelinedRegionOfVertex(VID vertexId) {
throw new UnsupportedOperationException();
} | 3.68 |