name
stringlengths 12
178
| code_snippet
stringlengths 8
36.5k
| score
float64 3.26
3.68
|
---|---|---|
framework_TextArea_isWordwrap | /**
* Tests if the text area is in word-wrap mode.
*
* @return <code>true</code> if the component is in word-wrap mode,
* <code>false</code> if not.
*/
public boolean isWordwrap() {
return getState(false).wordwrap;
} | 3.68 |
flink_SavepointWriter_withOperator | /**
* Adds a new operator to the savepoint.
*
* @param identifier The identifier of the operator.
* @param transformation The operator to be included.
* @return The modified savepoint.
*/
public <T> SavepointWriter withOperator(
OperatorIdentifier identifier, StateBootstrapTransformation<T> transformation) {
metadata.addOperator(identifier, transformation);
return this;
} | 3.68 |
graphhopper_ValueExpressionVisitor_isValidIdentifier | // allow only methods and other identifiers (constants and encoded values)
boolean isValidIdentifier(String identifier) {
if (variableValidator.isValid(identifier)) {
if (!Character.isUpperCase(identifier.charAt(0)))
result.guessedVariables.add(identifier);
return true;
}
return false;
} | 3.68 |
hudi_S3EventsSource_fetchNextBatch | /**
* Fetches next events from the queue.
*
* @param lastCkptStr The last checkpoint instant string, empty if first run.
* @param sourceLimit Limit on the size of data to fetch. For {@link S3EventsSource},
* {@link S3SourceConfig#S3_SOURCE_QUEUE_MAX_MESSAGES_PER_BATCH} is used.
* @return A pair of dataset of event records and the next checkpoint instant string
*/
@Override
public Pair<Option<Dataset<Row>>, String> fetchNextBatch(Option<String> lastCkptStr, long sourceLimit) {
Pair<List<String>, String> selectPathsWithLatestSqsMessage =
pathSelector.getNextEventsFromQueue(sqs, lastCkptStr, processedMessages);
if (selectPathsWithLatestSqsMessage.getLeft().isEmpty()) {
return Pair.of(Option.empty(), selectPathsWithLatestSqsMessage.getRight());
} else {
Dataset<String> eventRecords = sparkSession.createDataset(selectPathsWithLatestSqsMessage.getLeft(), Encoders.STRING());
StructType sourceSchema = UtilHelpers.getSourceSchema(schemaProvider);
if (sourceSchema != null) {
return Pair.of(
Option.of(sparkSession.read().schema(sourceSchema).json(eventRecords)),
selectPathsWithLatestSqsMessage.getRight());
} else {
return Pair.of(
Option.of(sparkSession.read().json(eventRecords)),
selectPathsWithLatestSqsMessage.getRight());
}
}
} | 3.68 |
dubbo_StandardMetadataServiceURLBuilder_getMetadataServiceURLsParams | /**
* Get the multiple {@link URL urls'} parameters of {@link MetadataService MetadataService's} Metadata
*
* @param serviceInstance the instance of {@link ServiceInstance}
* @return non-null {@link Map}, the key is {@link URL#getProtocol() the protocol of URL}, the value is
*/
private Map<String, String> getMetadataServiceURLsParams(ServiceInstance serviceInstance) {
Map<String, String> metadata = serviceInstance.getMetadata();
String param = metadata.get(METADATA_SERVICE_URL_PARAMS_PROPERTY_NAME);
return isBlank(param) ? emptyMap() : (Map) JsonUtils.toJavaObject(param, Map.class);
} | 3.68 |
flink_DecimalData_fromUnscaledLong | /**
* Creates an instance of {@link DecimalData} from an unscaled long value and the given
* precision and scale.
*/
public static DecimalData fromUnscaledLong(long unscaledLong, int precision, int scale) {
checkArgument(precision > 0 && precision <= MAX_LONG_DIGITS);
return new DecimalData(precision, scale, unscaledLong, null);
} | 3.68 |
flink_KvStateSerializer_serializeMap | /**
* Serializes all values of the Iterable with the given serializer.
*
* @param entries Key-value pairs to serialize
* @param keySerializer Serializer for UK
* @param valueSerializer Serializer for UV
* @param <UK> Type of the keys
* @param <UV> Type of the values
* @return Serialized values or <code>null</code> if values <code>null</code> or empty
* @throws IOException On failure during serialization
*/
public static <UK, UV> byte[] serializeMap(
Iterable<Map.Entry<UK, UV>> entries,
TypeSerializer<UK> keySerializer,
TypeSerializer<UV> valueSerializer)
throws IOException {
if (entries != null) {
// Serialize
DataOutputSerializer dos = new DataOutputSerializer(32);
for (Map.Entry<UK, UV> entry : entries) {
keySerializer.serialize(entry.getKey(), dos);
if (entry.getValue() == null) {
dos.writeBoolean(true);
} else {
dos.writeBoolean(false);
valueSerializer.serialize(entry.getValue(), dos);
}
}
return dos.getCopyOfBuffer();
} else {
return null;
}
} | 3.68 |
flink_StreamExecutionEnvironment_fromCollection | /**
* Creates a data stream from the given iterator.
*
* <p>Because the iterator will remain unmodified until the actual execution happens, the type
* of data returned by the iterator must be given explicitly in the form of the type
* information. This method is useful for cases where the type is generic. In that case, the
* type class (as given in {@link #fromCollection(java.util.Iterator, Class)} does not supply
* all type information.
*
* <p>Note that this operation will result in a non-parallel data stream source, i.e., a data
* stream source with parallelism one.
*
* @param data The iterator of elements to create the data stream from
* @param typeInfo The TypeInformation for the produced data stream
* @param <OUT> The type of the returned data stream
* @return The data stream representing the elements in the iterator
*/
public <OUT> DataStreamSource<OUT> fromCollection(
Iterator<OUT> data, TypeInformation<OUT> typeInfo) {
Preconditions.checkNotNull(data, "The iterator must not be null");
SourceFunction<OUT> function = new FromIteratorFunction<>(data);
return addSource(function, "Collection Source", typeInfo, Boundedness.BOUNDED);
} | 3.68 |
hbase_BlockCache_shouldCacheFile | /**
* Checks whether blocks for the passed file should be cached or not. This method may not be
* overridden by all implementing classes. In such cases, the returned Optional will be empty. For
* subclasses implementing this logic, the returned Optional would contain the boolean value
* reflecting if the passed file should indeed be cached.
* @param fileName to check if it should be cached.
* @return empty optional if this method is not supported, otherwise the returned optional
* contains the boolean value informing if the file should be cached.
*/
default Optional<Boolean> shouldCacheFile(String fileName) {
return Optional.empty();
} | 3.68 |
hbase_FanOutOneBlockAsyncDFSOutput_close | /**
* End the current block and complete file at namenode. You should call
* {@link #recoverAndClose(CancelableProgressable)} if this method throws an exception.
*/
@Override
public void close() throws IOException {
endBlock();
state = State.CLOSED;
closeDataNodeChannelsAndAwait();
block.setNumBytes(ackedBlockLength);
completeFile(client, namenode, src, clientName, block, fileId);
} | 3.68 |
flink_JobVertex_getNumberOfInputs | /**
* Returns the number of inputs.
*
* @return The number of inputs.
*/
public int getNumberOfInputs() {
return this.inputs.size();
} | 3.68 |
hadoop_OBSWriteOperationHelper_writeSuccessful | /**
* Callback on a successful write.
*
* @param destKey object key
*/
void writeSuccessful(final String destKey) {
LOG.debug("Finished write to {}", destKey);
} | 3.68 |
flink_SorterInputGateway_finishReading | /** Signals the end of input. Will flush all buffers and notify later stages. */
public void finishReading() {
if (currentBuffer != null && !currentBuffer.getBuffer().isEmpty()) {
this.dispatcher.send(SortStage.SORT, currentBuffer);
}
// add the sentinel to notify the receivers that the work is done
// send the EOF marker
final CircularElement<E> EOF_MARKER = CircularElement.endMarker();
this.dispatcher.send(SortStage.SORT, EOF_MARKER);
LOG.debug("Reading thread done.");
} | 3.68 |
hadoop_OBSFileSystem_canonicalizeUri | /**
* Canonicalize the given URI.
*
* @param rawUri the URI to be canonicalized
* @return the canonicalized URI
*/
@Override
protected URI canonicalizeUri(final URI rawUri) {
return OBSLoginHelper.canonicalizeUri(rawUri, getDefaultPort());
} | 3.68 |
flink_FunctionContext_getUserCodeClassLoader | /**
* Gets the {@link ClassLoader} to load classes that are not in system's classpath, but are part
* of the JAR file of a user job.
*/
public ClassLoader getUserCodeClassLoader() {
if (context == null && userClassLoader == null) {
throw new TableException(
"Calls to FunctionContext.getUserCodeClassLoader are not available "
+ "at the current location.");
} else if (context == null) {
return userClassLoader;
}
return context.getUserCodeClassLoader();
} | 3.68 |
hudi_Pair_of | /**
* <p>
* Obtains an immutable pair of from two objects inferring the generic types.
* </p>
*
* <p>
* This factory allows the pair to be created using inference to obtain the generic types.
* </p>
*
* @param <L> the left element type
* @param <R> the right element type
* @param left the left element, may be null
* @param right the right element, may be null
* @return a pair formed from the two parameters, not null
*/
public static <L, R> Pair<L, R> of(final L left, final R right) {
return new ImmutablePair<>(left, right);
} | 3.68 |
hbase_HFileArchiveTableMonitor_addTable | /**
* Add the named table to be those being archived. Attempts to register the table
* @param table name of the table to be registered
*/
public synchronized void addTable(String table) {
if (this.shouldArchiveTable(table)) {
LOG.debug("Already archiving table: " + table + ", ignoring it");
return;
}
archivedTables.add(table);
} | 3.68 |
AreaShop_FileManager_addRegion | /**
* Add a region to the list and mark it as to-be-saved.
* @param region Then region to add
* @return true when successful, otherwise false (denied by an event listener)
*/
public AddingRegionEvent addRegion(GeneralRegion region) {
AddingRegionEvent event = addRegionNoSave(region);
if (event.isCancelled()) {
return event;
}
region.saveRequired();
markGroupsAutoDirty();
return event;
} | 3.68 |
morf_GraphBasedUpgradeNode_getSequence | /**
* @return sequence number
*/
public long getSequence() {
return sequence;
} | 3.68 |
flink_MemCheckpointStreamFactory_closeAndGetBytes | /**
* Closes the stream and returns the byte array containing the stream's data.
*
* @return The byte array containing the stream's data.
* @throws IOException Thrown if the size of the data exceeds the maximal
*/
public byte[] closeAndGetBytes() throws IOException {
if (closed.compareAndSet(false, true)) {
checkSize(os.size(), maxSize);
byte[] bytes = os.toByteArray();
closeInternal();
return bytes;
} else {
throw new IOException("stream has already been closed");
}
} | 3.68 |
framework_VaadinService_getSystemMessagesProvider | /**
* Gets the system messages provider currently defined for this service.
* <p>
* By default, the {@link DefaultSystemMessagesProvider} which always
* provides the built-in default {@link SystemMessages} is used.
* </p>
*
* @see #setSystemMessagesProvider(SystemMessagesProvider)
* @see SystemMessagesProvider
* @see SystemMessages
*
* @return the system messages provider; not <code>null</code>
*/
public SystemMessagesProvider getSystemMessagesProvider() {
return systemMessagesProvider;
} | 3.68 |
framework_VCalendarPanel_contains | /**
* Checks if subElement is inside the widget DOM hierarchy.
*
* @param w
* @param subElement
* @return true if {@code w} is a parent of subElement, false otherwise.
*/
private boolean contains(Widget w, Element subElement) {
if (w == null || w.getElement() == null) {
return false;
}
return w.getElement().isOrHasChild(subElement);
} | 3.68 |
hbase_HRegionServer_convertThrowableToIOE | /**
* @param msg Message to put in new IOE if passed <code>t</code> is not an IOE
* @return Make <code>t</code> an IOE if it isn't already.
*/
private IOException convertThrowableToIOE(final Throwable t, final String msg) {
return (t instanceof IOException ? (IOException) t
: msg == null || msg.length() == 0 ? new IOException(t)
: new IOException(msg, t));
} | 3.68 |
pulsar_AbstractDispatcherMultipleConsumers_getNextConsumerFromSameOrLowerLevel | /**
* Finds index of round-robin available consumer that present on same level as consumer on
* currentRoundRobinIndex if doesn't find consumer on same level then it finds first available consumer on lower
* priority level else returns
* index=-1 if couldn't find any available consumer in the list.
*
* @param currentRoundRobinIndex
* @return
*/
private int getNextConsumerFromSameOrLowerLevel(int currentRoundRobinIndex) {
Consumer currentRRConsumer = consumerList.get(currentRoundRobinIndex);
if (isConsumerAvailable(currentRRConsumer)) {
return currentRoundRobinIndex;
}
// scan the consumerList, if consumer in currentRoundRobinIndex is unavailable
int targetPriority = currentRRConsumer.getPriorityLevel();
int scanIndex = currentRoundRobinIndex + 1;
int endPriorityLevelIndex = currentRoundRobinIndex;
do {
Consumer scanConsumer = scanIndex < consumerList.size() ? consumerList.get(scanIndex)
: null /* reached to last consumer of list */;
// if reached to last consumer of list then check from beginning to currentRRIndex of the list
if (scanConsumer == null || scanConsumer.getPriorityLevel() != targetPriority) {
endPriorityLevelIndex = scanIndex; // last consumer on this level
scanIndex = getFirstConsumerIndexOfPriority(targetPriority);
} else {
if (isConsumerAvailable(scanConsumer)) {
return scanIndex;
}
scanIndex++;
}
} while (scanIndex != currentRoundRobinIndex);
// it means: didn't find consumer in the same priority-level so, check available consumer lower than this level
for (int i = endPriorityLevelIndex; i < consumerList.size(); i++) {
if (isConsumerAvailable(consumerList.get(i))) {
return i;
}
}
return -1;
} | 3.68 |
hbase_ClientZKSyncer_setDataForClientZkUntilSuccess | /**
* Set data for client ZK and retry until succeed. Be very careful to prevent dead loop when
* modifying this method
* @param node the znode to set on client ZK
* @param data the data to set to client ZK
* @throws InterruptedException if the thread is interrupted during process
*/
private void setDataForClientZkUntilSuccess(String node, byte[] data)
throws InterruptedException {
boolean create = false;
while (!server.isStopped()) {
try {
LOG.debug("Set data for remote " + node + ", client zk wather: " + clientZkWatcher);
if (create) {
ZKUtil.createNodeIfNotExistsNoWatch(clientZkWatcher, node, data, CreateMode.PERSISTENT);
} else {
ZKUtil.setData(clientZkWatcher, node, data);
}
break;
} catch (KeeperException e) {
LOG.debug("Failed to set data for {} to client ZK, will retry later", node, e);
if (e.code() == KeeperException.Code.SESSIONEXPIRED) {
reconnectAfterExpiration();
}
if (e.code() == KeeperException.Code.NONODE) {
create = true;
}
if (e.code() == KeeperException.Code.NODEEXISTS) {
create = false;
}
}
Threads.sleep(HConstants.SOCKET_RETRY_WAIT_MS);
}
} | 3.68 |
flink_RocksDBResourceContainer_internalGetOption | /**
* Get a value for option from pre-defined option and configurable option settings. The priority
* relationship is as below.
*
* <p>Configured value > pre-defined value > default value.
*
* @param option the wanted option
* @param <T> the value type
* @return the final value for the option according to the priority above.
*/
@Nullable
private <T> T internalGetOption(ConfigOption<T> option) {
return configuration
.getOptional(option)
.orElseGet(() -> predefinedOptions.getValue(option));
} | 3.68 |
pulsar_GrowableArrayBlockingQueue_terminate | /**
* Make the queue not accept new items. if there are still new data trying to enter the queue, it will be handed
* by {@param itemAfterTerminatedHandler}.
*/
public void terminate(@Nullable Consumer<T> itemAfterTerminatedHandler) {
// After wait for the in-flight item enqueue, it means the operation of terminate is finished.
long stamp = tailLock.writeLock();
try {
terminated = true;
if (itemAfterTerminatedHandler != null) {
this.itemAfterTerminatedHandler = itemAfterTerminatedHandler;
}
} finally {
tailLock.unlockWrite(stamp);
}
} | 3.68 |
hadoop_Validate_checkNotNegative | /**
* Validates that the given integer argument is not negative.
* @param value the argument value to validate
* @param argName the name of the argument being validated.
*/
public static void checkNotNegative(long value, String argName) {
checkArgument(value >= 0, "'%s' must not be negative.", argName);
} | 3.68 |
graphhopper_CHStorage_toNodePointer | /**
* To use the node getters/setters you need to convert node IDs to a nodePointer first
*/
public long toNodePointer(int node) {
assert node >= 0 && node < nodeCount : "node not in bounds: [0, " + nodeCount + "[";
return (long) node * nodeCHEntryBytes;
} | 3.68 |
hbase_MiniZooKeeperCluster_killCurrentActiveZooKeeperServer | /**
* @return clientPort return clientPort if there is another ZK backup can run when killing the
* current active; return -1, if there is no backups.
* @throws IOException if waiting for the shutdown of a server fails
*/
public int killCurrentActiveZooKeeperServer() throws IOException, InterruptedException {
if (!started || activeZKServerIndex < 0) {
return -1;
}
// Shutdown the current active one
NIOServerCnxnFactory standaloneServerFactory =
standaloneServerFactoryList.get(activeZKServerIndex);
int clientPort = clientPortList.get(activeZKServerIndex);
standaloneServerFactory.shutdown();
if (!waitForServerDown(clientPort, connectionTimeout)) {
throw new IOException("Waiting for shutdown of standalone server");
}
zooKeeperServers.get(activeZKServerIndex).getZKDatabase().close();
// remove the current active zk server
standaloneServerFactoryList.remove(activeZKServerIndex);
clientPortList.remove(activeZKServerIndex);
zooKeeperServers.remove(activeZKServerIndex);
LOG.info("Kill the current active ZK servers in the cluster on client port: {}", clientPort);
if (standaloneServerFactoryList.isEmpty()) {
// there is no backup servers;
return -1;
}
clientPort = clientPortList.get(activeZKServerIndex);
LOG.info("Activate a backup zk server in the cluster on client port: {}", clientPort);
// return the next back zk server's port
return clientPort;
} | 3.68 |
flink_BinaryStringData_trim | /**
* Returns a string whose value is this string, with any leading and trailing whitespace
* removed.
*
* @return A string whose value is this string, with any leading and trailing white space
* removed, or this string if it has no leading or trailing white space.
*/
public BinaryStringData trim() {
ensureMaterialized();
if (inFirstSegment()) {
int s = 0;
int e = this.binarySection.sizeInBytes - 1;
// skip all of the space (0x20) in the left side
while (s < this.binarySection.sizeInBytes && getByteOneSegment(s) == 0x20) {
s++;
}
// skip all of the space (0x20) in the right side
while (e >= s && getByteOneSegment(e) == 0x20) {
e--;
}
if (s > e) {
// empty string
return EMPTY_UTF8;
} else {
return copyBinaryStringInOneSeg(s, e - s + 1);
}
} else {
return trimMultiSegs();
}
} | 3.68 |
framework_ListenerMethod_receiveEvent | /**
* Receives one event from the <code>EventRouter</code> and calls the
* trigger method if it matches with the criteria defined for the listener.
* Only the events of the same or subclass of the specified event class
* result in the trigger method to be called.
*
* @param event
* the fired event. Unless the trigger method's argument list and
* the index to the to be replaced argument is specified, this
* event will not be passed to the trigger method.
*/
public void receiveEvent(EventObject event) {
// Only send events supported by the method
if (eventType.isAssignableFrom(event.getClass())) {
try {
if (eventArgumentIndex >= 0) {
if (eventArgumentIndex == 0 && arguments.length == 1) {
method.invoke(target, event);
} else {
final Object[] arg = new Object[arguments.length];
System.arraycopy(arguments, 0, arg, 0, arg.length);
arg[eventArgumentIndex] = event;
method.invoke(target, arg);
}
} else {
method.invoke(target, arguments);
}
} catch (final IllegalAccessException e) {
// This should never happen
throw new RuntimeException("Internal error - please report", e);
} catch (final InvocationTargetException e) {
// An exception was thrown by the invocation target. Throw it
// forwards.
throw new MethodException(
"Invocation of method " + method.getName() + " in "
+ target.getClass().getName() + " failed.",
e.getTargetException());
}
}
} | 3.68 |
framework_Escalator_measureAndSetWidthIfNeeded | /**
* Checks if the column needs measuring, and then measures it.
* <p>
* Called by {@link Escalator#onLoad()}.
*/
public boolean measureAndSetWidthIfNeeded() {
assert isAttached() : "Column.measureAndSetWidthIfNeeded() was called even though Escalator was not attached!";
if (measuringRequested) {
measuringRequested = false;
setWidth(definedWidth);
return true;
}
return false;
} | 3.68 |
hbase_ProcedurePrepareLatch_createBlockingLatch | /**
* Creates a latch which blocks.
*/
public static ProcedurePrepareLatch createBlockingLatch() {
return new CompatibilityLatch();
} | 3.68 |
graphhopper_VectorTile_setValues | /**
* <pre>
* Dictionary encoding for values
* </pre>
*
* <code>repeated .vector_tile.Tile.Value values = 4;</code>
*/
public Builder setValues(
int index, vector_tile.VectorTile.Tile.Value.Builder builderForValue) {
if (valuesBuilder_ == null) {
ensureValuesIsMutable();
values_.set(index, builderForValue.build());
onChanged();
} else {
valuesBuilder_.setMessage(index, builderForValue.build());
}
return this;
} | 3.68 |
flink_AbstractKeyedStateBackend_getCurrentKey | /** @see KeyedStateBackend */
@Override
public K getCurrentKey() {
return this.keyContext.getCurrentKey();
} | 3.68 |
framework_VTree_updateDropHandler | /** For internal use only. May be removed or replaced in the future. */
public void updateDropHandler(UIDL childUidl) {
if (dropHandler == null) {
dropHandler = new VAbstractDropHandler() {
@Override
public void dragEnter(VDragEvent drag) {
}
@Override
protected void dragAccepted(final VDragEvent drag) {
}
@Override
public void dragOver(final VDragEvent currentDrag) {
final Object oldIdOver = currentDrag.getDropDetails()
.get("itemIdOver");
final VerticalDropLocation oldDetail = (VerticalDropLocation) currentDrag
.getDropDetails().get("detail");
updateTreeRelatedDragData(currentDrag);
final VerticalDropLocation detail = (VerticalDropLocation) currentDrag
.getDropDetails().get("detail");
boolean nodeHasChanged = (currentMouseOverKey != null
&& currentMouseOverKey != oldIdOver)
|| (currentMouseOverKey == null
&& oldIdOver != null);
boolean detailHasChanded = (detail != null
&& detail != oldDetail)
|| (detail == null && oldDetail != null);
if (nodeHasChanged || detailHasChanded) {
final String newKey = currentMouseOverKey;
TreeNode treeNode = keyToNode.get(oldIdOver);
if (treeNode != null) {
// clear old styles
treeNode.emphasis(null);
}
if (newKey != null) {
validate(new VAcceptCallback() {
@Override
public void accepted(VDragEvent event) {
VerticalDropLocation curDetail = (VerticalDropLocation) event
.getDropDetails().get("detail");
if (curDetail == detail && newKey
.equals(currentMouseOverKey)) {
getNodeByKey(newKey).emphasis(detail);
}
/*
* Else drag is already on a different
* node-detail pair, new criteria check is
* going on
*/
}
}, currentDrag);
}
}
}
@Override
public void dragLeave(VDragEvent drag) {
cleanUp();
}
private void cleanUp() {
if (currentMouseOverKey != null) {
getNodeByKey(currentMouseOverKey).emphasis(null);
currentMouseOverKey = null;
}
}
@Override
public boolean drop(VDragEvent drag) {
cleanUp();
return super.drop(drag);
}
@Override
public ComponentConnector getConnector() {
return ConnectorMap.get(client).getConnector(VTree.this);
}
@Override
public ApplicationConnection getApplicationConnection() {
return client;
}
};
}
dropHandler.updateAcceptRules(childUidl);
} | 3.68 |
flink_ResettableExternalBuffer_upperBound | // Find the index of the first element which is strictly greater than `goal` in `list`.
// `list` must be sorted.
// If every element in `list` is not larger than `goal`, return `list.size()`.
private int upperBound(int goal, List<Integer> list) {
if (list.size() == 0) {
return 0;
}
if (list.get(list.size() - 1) <= goal) {
return list.size();
}
// Binary search
int head = 0;
int tail = list.size() - 1;
int mid;
while (head < tail) {
mid = (head + tail) / 2;
if (list.get(mid) <= goal) {
head = mid + 1;
} else {
tail = mid;
}
}
return head;
} | 3.68 |
flink_HadoopConfigLoader_loadHadoopConfigFromFlink | // add additional config entries from the Flink config to the Hadoop config
private org.apache.hadoop.conf.Configuration loadHadoopConfigFromFlink() {
org.apache.hadoop.conf.Configuration hadoopConfig =
new org.apache.hadoop.conf.Configuration();
for (String key : flinkConfig.keySet()) {
for (String prefix : flinkConfigPrefixes) {
if (key.startsWith(prefix)) {
String newKey = hadoopConfigPrefix + key.substring(prefix.length());
String newValue = fixHadoopConfig(key, flinkConfig.getString(key, null));
hadoopConfig.set(newKey, newValue);
LOG.debug(
"Adding Flink config entry for {} as {} to Hadoop config", key, newKey);
}
}
}
return hadoopConfig;
} | 3.68 |
framework_ConverterUtil_convertToModel | /**
* Convert the given value from the presentation (UI) type to model (data
* source) type.
*
* @param presentationValue
* the presentation value to convert
* @param modelType
* the type of the model
* @param converter
* the converter to use
* @param locale
* the locale to use for conversion
* @param <PRESENTATIONTYPE>
* the presentation type
* @param <MODELTYPE>
* the model type
*
* @return the converted value, compatible with the model type, or the
* original value if its type is compatible and no converter is set.
* @throws Converter.ConversionException
* if there was a problem converting the value
*/
public static <MODELTYPE, PRESENTATIONTYPE> MODELTYPE convertToModel(
PRESENTATIONTYPE presentationValue, Class<MODELTYPE> modelType,
Converter<PRESENTATIONTYPE, MODELTYPE> converter, Locale locale)
throws Converter.ConversionException {
if (converter != null) {
/*
* If there is a converter, always use it. It must convert or throw
* an exception.
*/
MODELTYPE model = converter.convertToModel(presentationValue,
modelType, locale);
if (model != null && !modelType.isInstance(model)) {
throw new Converter.ConversionException(
"Converter returned an object of type "
+ model.getClass().getName()
+ " when expecting " + modelType.getName());
}
return model;
}
if (presentationValue == null) {
// Null should always be passed through the converter but if there
// is no converter we can safely return null
return null;
}
if (modelType == null) {
// No model type, return original value
return (MODELTYPE) presentationValue;
} else if (modelType.isAssignableFrom(presentationValue.getClass())) {
// presentation type directly compatible with model type
return modelType.cast(presentationValue);
} else {
throw new Converter.ConversionException(
"Unable to convert value of type "
+ presentationValue.getClass().getName()
+ " to model type " + modelType
+ ". No converter is set and the types are not compatible.");
}
} | 3.68 |
flink_StreamProjection_projectTuple3 | /**
* Projects a {@link Tuple} {@link DataStream} to the previously selected fields.
*
* @return The projected DataStream.
* @see Tuple
* @see DataStream
*/
public <T0, T1, T2> SingleOutputStreamOperator<Tuple3<T0, T1, T2>> projectTuple3() {
TypeInformation<?>[] fTypes = extractFieldTypes(fieldIndexes, dataStream.getType());
TupleTypeInfo<Tuple3<T0, T1, T2>> tType = new TupleTypeInfo<Tuple3<T0, T1, T2>>(fTypes);
return dataStream.transform(
"Projection",
tType,
new StreamProject<IN, Tuple3<T0, T1, T2>>(
fieldIndexes, tType.createSerializer(dataStream.getExecutionConfig())));
} | 3.68 |
hbase_SimpleRpcServerResponder_processResponse | /**
* Process the response for this call. You need to have the lock on
* {@link org.apache.hadoop.hbase.ipc.SimpleServerRpcConnection#responseWriteLock}
* @return true if we proceed the call fully, false otherwise.
*/
private boolean processResponse(SimpleServerRpcConnection conn, RpcResponse resp)
throws IOException {
boolean error = true;
BufferChain buf = resp.getResponse();
if (conn.useWrap) {
buf = wrapWithSasl(conn.saslServer, buf);
}
try {
// Send as much data as we can in the non-blocking fashion
long numBytes = this.simpleRpcServer.channelWrite(conn.channel, buf);
if (numBytes < 0) {
throw new HBaseIOException("Error writing on the socket " + conn);
}
error = false;
} finally {
if (error) {
SimpleRpcServer.LOG.debug(conn + ": output error -- closing");
// We will be closing this connection itself. Mark this call as done so that all the
// buffer(s) it got from pool can get released
resp.done();
this.simpleRpcServer.closeConnection(conn);
}
}
if (!buf.hasRemaining()) {
resp.done();
return true;
} else {
// set the serve time when the response has to be sent later
conn.lastSentTime = EnvironmentEdgeManager.currentTime();
return false; // Socket can't take more, we will have to come back.
}
} | 3.68 |
framework_DragSourceExtensionConnector_onDragEnd | /**
* Event handler for the {@code dragend} event. Called when {@code dragend}
* event occurs.
*
* @param event
* browser event to be handled
*/
protected void onDragEnd(Event event) {
NativeEvent nativeEvent = (NativeEvent) event;
// for android chrome we use the polyfill, in case browser fires a
// native dragend event after the polyfill dragend, we need to ignore
// that one
if (isAndoidChrome() && isNativeDragEvent((nativeEvent))) {
event.preventDefault();
event.stopPropagation();
return;
}
// Remove dragged element indicator style
removeDraggedStyle(nativeEvent);
// Initiate server start dragend event when there is a DragEndListener
// attached on the server side
if (hasEventListener(DragSourceState.EVENT_DRAGEND)) {
String dropEffect = getDropEffect(nativeEvent.getDataTransfer());
assert dropEffect != null : "Drop effect should never be null";
sendDragEndEventToServer(nativeEvent,
DropEffect.valueOf(dropEffect.toUpperCase(Locale.ROOT)));
}
} | 3.68 |
zxing_ViewfinderView_drawResultBitmap | /**
* Draw a bitmap with the result points highlighted instead of the live scanning display.
*
* @param barcode An image of the decoded barcode.
*/
public void drawResultBitmap(Bitmap barcode) {
resultBitmap = barcode;
invalidate();
} | 3.68 |
hadoop_MutableQuantiles_getInterval | /**
* Get the rollover interval (in seconds) of the estimator.
*
* @return intervalSecs of the estimator.
*/
public synchronized int getInterval() {
return intervalSecs;
} | 3.68 |
hbase_LockProcedure_beforeReplay | /**
* On recovery, re-execute from start to acquire the locks. Need to explicitly set it to RUNNABLE
* because the procedure might have been in WAITING_TIMEOUT state when crash happened. In which
* case, it'll be sent back to timeout queue on recovery, which we don't want since we want to
* require locks.
*/
@Override
protected void beforeReplay(MasterProcedureEnv env) {
setState(ProcedureProtos.ProcedureState.RUNNABLE);
} | 3.68 |
hadoop_CipherSuite_getName | /**
* @return name of cipher suite, as in {@link javax.crypto.Cipher}
*/
public String getName() {
return name;
} | 3.68 |
flink_RocksDBNativeMetricOptions_isStatisticsEnabled | /** @return true if RocksDB statistics metrics are enabled, false otherwise. */
public boolean isStatisticsEnabled() {
return !monitorTickerTypes.isEmpty();
} | 3.68 |
flink_FunctionIdentifier_normalizeObjectIdentifier | /** Normalize an object identifier by only normalizing the function name. */
public static ObjectIdentifier normalizeObjectIdentifier(ObjectIdentifier oi) {
return ObjectIdentifier.of(
oi.getCatalogName(), oi.getDatabaseName(), normalizeName(oi.getObjectName()));
} | 3.68 |
hbase_Response_getHeaders | /** Returns the HTTP response headers */
public Header[] getHeaders() {
return headers;
} | 3.68 |
flink_HsMemoryDataManager_close | /**
* Close this {@link HsMemoryDataManager}, it means no data can append to memory and all buffer
* taken by this class will recycle.
*/
public void close() {
spillAndReleaseAllData();
spiller.close();
poolSizeChecker.shutdown();
} | 3.68 |
hbase_ReplicationSourceManager_getSources | /**
* Get a list of all the normal sources of this rs
* @return list of all normal sources
*/
public List<ReplicationSourceInterface> getSources() {
return new ArrayList<>(this.sources.values());
} | 3.68 |
hudi_HoodieHiveUtils_getNthParent | /**
* Gets the n'th parent for the Path. Assumes the path has at-least n components
*
* @param path
* @param n
* @return
*/
public static Path getNthParent(Path path, int n) {
Path parent = path;
for (int i = 0; i < n; i++) {
parent = parent.getParent();
}
return parent;
} | 3.68 |
framework_VTabsheet_setEnabledOnServer | /**
* Set tab enabled state on server (there is no client-side disabling,
* but the naming convention matches
* {@link #setHiddenOnServer(boolean)}).
*
* @param enabled
* {@code true} if enabled on server, {@code false} otherwise
*/
public void setEnabledOnServer(boolean enabled) {
enabledOnServer = enabled;
Roles.getTabRole().setAriaDisabledState(getElement(), !enabled);
setStyleName(td, TD_DISABLED_CLASSNAME, !enabled);
if (!enabled) {
FOCUS_IMPL.setTabIndex(td, -1);
}
} | 3.68 |
framework_AbstractSplitPanel_removeSplitPositionChangeListener | /**
* Removes a {@link SplitPositionChangeListener}.
*
* @since 7.5.0
* @param listener
* SplitPositionChangeListener to be removed.
*/
@Deprecated
public void removeSplitPositionChangeListener(
SplitPositionChangeListener listener) {
removeListener(SplitPositionChangeEvent.class, listener);
} | 3.68 |
pulsar_SimpleLoadManagerImpl_updateBrokerToNamespaceToBundle | // Update the brokerToNamespaceToBundleRange map with the current preallocated and assigned bundle data.
private synchronized void updateBrokerToNamespaceToBundle() {
resourceUnitRankings.forEach((resourceUnit, ranking) -> {
final String broker = resourceUnit.getResourceId();
final Set<String> loadedBundles = ranking.getLoadedBundles();
final Set<String> preallocatedBundles = resourceUnitRankings.get(resourceUnit).getPreAllocatedBundles();
final ConcurrentOpenHashMap<String, ConcurrentOpenHashSet<String>> namespaceToBundleRange =
brokerToNamespaceToBundleRange
.computeIfAbsent(broker.replace("http://", ""),
k -> ConcurrentOpenHashMap.<String,
ConcurrentOpenHashSet<String>>newBuilder()
.build());
namespaceToBundleRange.clear();
LoadManagerShared.fillNamespaceToBundlesMap(loadedBundles, namespaceToBundleRange);
LoadManagerShared.fillNamespaceToBundlesMap(preallocatedBundles, namespaceToBundleRange);
});
} | 3.68 |
graphhopper_TarjanSCC_findComponents | /**
* Runs Tarjan's algorithm using an explicit stack.
*
* @param excludeSingleNodeComponents if set to true components that only contain a single node will not be
* returned when calling {@link #findComponents} or {@link #findComponentsRecursive()},
* which can be useful to save some memory.
*/
public static ConnectedComponents findComponents(Graph graph, EdgeFilter edgeFilter, boolean excludeSingleNodeComponents) {
return new TarjanSCC(graph, edgeFilter, excludeSingleNodeComponents).findComponents();
} | 3.68 |
flink_ResourceReconcileResult_needReconcile | /**
* Returns whether the cluster resource need reconcile.
*
* @return True if the cluster resource need reconcile, otherwise false.
*/
public boolean needReconcile() {
return pendingTaskManagersToRelease.size() > 0
|| taskManagersToRelease.size() > 0
|| pendingTaskManagersToAllocate.size() > 0;
} | 3.68 |
framework_AutoScroller_reboundScrollArea | /**
* If the scroll are has been offset by the pointer starting out there,
* move it back a bit
*/
private void reboundScrollArea(double timeDiff) {
if (!scrollAreaShouldRebound) {
return;
}
int reboundPx = (int) Math
.ceil(SCROLL_AREA_REBOUND_PX_PER_MS * timeDiff);
if (startBound < finalStartBound) {
startBound += reboundPx;
startBound = Math.min(startBound, finalStartBound);
updateScrollSpeed(scrollingAxisPageCoordinate);
} else if (endBound > finalEndBound) {
endBound -= reboundPx;
endBound = Math.max(endBound, finalEndBound);
updateScrollSpeed(scrollingAxisPageCoordinate);
}
} | 3.68 |
graphhopper_KVStorage_add | /**
* This method writes the specified entryMap (key-value pairs) into the storage. Please note that null keys or null
* values are rejected. The Class of a value can be only: byte[], String, int, long, float or double
* (or more precisely, their wrapper equivalent). For all other types an exception is thrown. The first call of add
* assigns a Class to every key in the Map and future calls of add will throw an exception if this Class differs.
*
* @return entryPointer with which you can later fetch the entryMap via the get or getAll method
*/
public long add(final List<KeyValue> entries) {
if (entries == null) throw new IllegalArgumentException("specified List must not be null");
if (entries.isEmpty()) return EMPTY_POINTER;
else if (entries.size() > 200)
throw new IllegalArgumentException("Cannot store more than 200 entries per entry");
// This is a very important "compression" mechanism because one OSM way is split into multiple edges and so we
// can often re-use the serialized key-value pairs of the previous edge.
if (isEquals(entries, lastEntries)) return lastEntryPointer;
// If the Class of a value is unknown it should already fail here, before we modify internal data. (see #2597#discussion_r896469840)
for (KeyValue kv : entries)
if (keyToIndex.get(kv.key) != null)
getBytesForValue(indexToClass.get(keyToIndex.get(kv.key)), kv.value);
lastEntries = entries;
lastEntryPointer = bytePointer;
vals.ensureCapacity(bytePointer + 1);
vals.setByte(bytePointer, (byte) entries.size());
bytePointer = setKVList(bytePointer, entries);
if (bytePointer < 0)
throw new IllegalStateException("Negative bytePointer in KVStorage");
return lastEntryPointer;
} | 3.68 |
flink_TimeIntervalJoin_removeExpiredRows | /**
* Remove the expired rows. Register a new timer if the cache still holds valid rows after the
* cleaning up.
*
* @param collector the collector to emit results
* @param expirationTime the expiration time for this cache
* @param rowCache the row cache
* @param timerState timer state for the opposite stream
* @param ctx the context to register the cleanup timer
* @param removeLeft whether to remove the left rows
*/
private void removeExpiredRows(
Collector<RowData> collector,
long expirationTime,
MapState<Long, List<Tuple2<RowData, Boolean>>> rowCache,
ValueState<Long> timerState,
OnTimerContext ctx,
boolean removeLeft)
throws Exception {
Iterator<Map.Entry<Long, List<Tuple2<RowData, Boolean>>>> iterator = rowCache.iterator();
long earliestTimestamp = -1L;
// We remove all expired keys and do not leave the loop early.
// Hence, we do a full pass over the state.
while (iterator.hasNext()) {
Map.Entry<Long, List<Tuple2<RowData, Boolean>>> entry = iterator.next();
Long rowTime = entry.getKey();
if (rowTime <= expirationTime) {
if (removeLeft && joinType.isLeftOuter()) {
List<Tuple2<RowData, Boolean>> rows = entry.getValue();
rows.forEach(
(Tuple2<RowData, Boolean> tuple) -> {
if (!tuple.f1) {
// Emit a null padding result if the row has never been
// successfully joined.
collector.collect(paddingUtil.padLeft(tuple.f0));
}
});
} else if (!removeLeft && joinType.isRightOuter()) {
List<Tuple2<RowData, Boolean>> rows = entry.getValue();
rows.forEach(
(Tuple2<RowData, Boolean> tuple) -> {
if (!tuple.f1) {
// Emit a null padding result if the row has never been
// successfully joined.
collector.collect(paddingUtil.padRight(tuple.f0));
}
});
}
iterator.remove();
} else {
// We find the earliest timestamp that is still valid.
if (rowTime < earliestTimestamp || earliestTimestamp < 0) {
earliestTimestamp = rowTime;
}
}
}
if (earliestTimestamp > 0) {
// There are rows left in the cache. Register a timer to expire them later.
registerCleanUpTimer(ctx, earliestTimestamp, removeLeft);
} else {
// No rows left in the cache. Clear the states and the timerState will be 0.
timerState.clear();
rowCache.clear();
}
} | 3.68 |
framework_BootstrapHandler_getUriResolver | /**
* Gets the URI resolver to use for bootstrap resources.
*
* @return the URI resolver
* @since 8.1
*/
public BootstrapUriResolver getUriResolver() {
if (uriResolver == null) {
uriResolver = new BootstrapUriResolver(this);
}
return uriResolver;
} | 3.68 |
framework_VTabsheet_getRightGap | /**
* Returns the gap between the rightmost visible tab and the tab container
* edge. If the tabs have been right-aligned by styling (e.g. Valo style
* {@code right-aligned-tabs}) there should be no gap at all.
*
* @return the right gap (in pixels), or zero if no gap
*/
private int getRightGap() {
int lastVisibleIndex = tb.getLastVisibleTab();
Element tabContainer = tb.getElement().getParentElement();
int gap;
if (lastVisibleIndex < 0) {
// no tabs visible, return the whole available width
gap = getOffsetWidth() - scroller.getOffsetWidth();
} else {
Tab lastVisibleTab = tb.getTab(lastVisibleIndex);
gap = tabContainer.getAbsoluteRight()
- lastVisibleTab.getAbsoluteLeft()
- lastVisibleTab.getOffsetWidth()
- scroller.getOffsetWidth();
}
return gap > 0 ? gap : 0;
} | 3.68 |
hbase_StoreFileTrackerFactory_createForMigration | /**
* Create store file tracker to be used as source or destination for
* {@link MigrationStoreFileTracker}.
*/
static StoreFileTrackerBase createForMigration(Configuration conf, String configName,
boolean isPrimaryReplica, StoreContext ctx) {
Class<? extends StoreFileTrackerBase> tracker =
getStoreFileTrackerClassForMigration(conf, configName);
// prevent nest of MigrationStoreFileTracker, it will cause infinite recursion.
if (MigrationStoreFileTracker.class.isAssignableFrom(tracker)) {
throw new IllegalArgumentException("Should not specify " + configName + " as "
+ Trackers.MIGRATION + " because it can not be nested");
}
LOG.debug("instantiating StoreFileTracker impl {} as {}", tracker.getName(), configName);
return ReflectionUtils.newInstance(tracker, conf, isPrimaryReplica, ctx);
} | 3.68 |
hbase_HRegion_getRegionServerServices | /**
* Returns Instance of {@link RegionServerServices} used by this HRegion. Can be null.
*/
RegionServerServices getRegionServerServices() {
return this.rsServices;
} | 3.68 |
framework_FileDropTarget_disposeStreamVariable | /**
* Calling this method has no effect. DD files are receive only once
* anyway.
*/
@Override
public void disposeStreamVariable() {
} | 3.68 |
framework_Table_disableContentRefreshing | /**
* Go to mode where content updates are not done. This is due we want to
* bypass expensive content for some reason (like when we know we may have
* other content changes on their way).
*
* @return true if content refresh flag was enabled prior this call
*/
protected boolean disableContentRefreshing() {
boolean wasDisabled = isContentRefreshesEnabled;
isContentRefreshesEnabled = false;
return wasDisabled;
} | 3.68 |
hbase_InclusiveCombinedBlockCache_cacheBlock | /**
* @param cacheKey The block's cache key.
* @param buf The block contents wrapped in a ByteBuffer.
* @param inMemory Whether block should be treated as in-memory. This parameter is only useful for
* the L1 lru cache.
*/
@Override
public void cacheBlock(BlockCacheKey cacheKey, Cacheable buf, boolean inMemory) {
// This is the inclusive part of the combined block cache.
// Every block is placed into both block caches.
l1Cache.cacheBlock(cacheKey, buf, inMemory);
// This assumes that insertion into the L2 block cache is either async or very fast.
l2Cache.cacheBlock(cacheKey, buf, inMemory);
} | 3.68 |
morf_AbstractSqlDialectTest_testUseImplicitJoinOrderOnSubquery | /**
* Check that we don't allow the use of the join order hint on a subquery.
*/
@Test(expected = IllegalArgumentException.class)
public void testUseImplicitJoinOrderOnSubquery() {
testDialect.convertStatementToSQL(
select().from(select().from("Foo").useImplicitJoinOrder())
);
} | 3.68 |
hadoop_TFile_upperBound | /**
* @param key
* input key.
* @return the ID of the first block that contains key > input key. Or -1
* if no such block exists.
*/
public int upperBound(RawComparable key) {
if (comparator == null) {
throw new RuntimeException("Cannot search in unsorted TFile");
}
if (firstKey == null) {
return -1; // not found
}
int ret = Utils.upperBound(index, key, comparator);
if (ret == index.size()) {
return -1;
}
return ret;
} | 3.68 |
hibernate-validator_ConstraintViolationImpl_createHashCode | /**
* @see #equals(Object) on which fields are taken into account
*/
private int createHashCode() {
int result = interpolatedMessage != null ? interpolatedMessage.hashCode() : 0;
result = 31 * result + ( propertyPath != null ? propertyPath.hashCode() : 0 );
result = 31 * result + System.identityHashCode( rootBean );
result = 31 * result + System.identityHashCode( leafBeanInstance );
result = 31 * result + System.identityHashCode( value );
result = 31 * result + ( constraintDescriptor != null ? constraintDescriptor.hashCode() : 0 );
result = 31 * result + ( messageTemplate != null ? messageTemplate.hashCode() : 0 );
return result;
} | 3.68 |
framework_VCalendarPanel_updateTimes | /**
* Updates the value to correspond to the values in value.
*/
public void updateTimes() {
if (value == null) {
value = new Date();
}
if (getDateTimeService().isTwelveHourClock()) {
int h = value.getHours();
ampm.setSelectedIndex(h < 12 ? 0 : 1);
h -= ampm.getSelectedIndex() * 12;
hours.setSelectedIndex(h);
} else {
hours.setSelectedIndex(value.getHours());
}
if (getResolution().getCalendarField() >= Resolution.MINUTE
.getCalendarField()) {
mins.setSelectedIndex(value.getMinutes());
}
if (getResolution().getCalendarField() >= Resolution.SECOND
.getCalendarField()) {
sec.setSelectedIndex(value.getSeconds());
}
if (getDateTimeService().isTwelveHourClock()) {
ampm.setSelectedIndex(value.getHours() < 12 ? 0 : 1);
}
hours.setEnabled(isEnabled());
if (mins != null) {
mins.setEnabled(isEnabled());
}
if (sec != null) {
sec.setEnabled(isEnabled());
}
if (ampm != null) {
ampm.setEnabled(isEnabled());
}
} | 3.68 |
hudi_HeartbeatUtils_abortIfHeartbeatExpired | /**
* Check if the heartbeat corresponding to instantTime has expired. If yes, abort by throwing an exception.
* @param instantTime
* @param table
* @param heartbeatClient
* @param config
*/
public static void abortIfHeartbeatExpired(String instantTime, HoodieTable table,
HoodieHeartbeatClient heartbeatClient, HoodieWriteConfig config) {
ValidationUtils.checkArgument(heartbeatClient != null);
try {
if (config.getFailedWritesCleanPolicy().isLazy() && heartbeatClient.isHeartbeatExpired(instantTime)) {
throw new HoodieException("Heartbeat for instant " + instantTime + " has expired, last heartbeat "
+ getLastHeartbeatTime(table.getMetaClient().getFs(), config.getBasePath(), instantTime));
}
} catch (IOException io) {
throw new HoodieException("Unable to read heartbeat", io);
}
} | 3.68 |
pulsar_NarUnpacker_makeFile | /**
* Creates the specified file, whose contents will come from the <tt>InputStream</tt>.
*
* @param inputStream
* the contents of the file to create.
* @param file
* the file to create.
* @throws IOException
* if the file could not be created.
*/
private static void makeFile(final InputStream inputStream, final File file) throws IOException {
try (final InputStream in = inputStream; final FileOutputStream fos = new FileOutputStream(file)) {
byte[] bytes = new byte[65536];
int numRead;
while ((numRead = in.read(bytes)) != -1) {
fos.write(bytes, 0, numRead);
}
}
} | 3.68 |
pulsar_SchemaUtils_serializeSchemaProperties | /**
* Serialize schema properties.
*
* @param properties schema properties
* @return the serialized schema properties
*/
public static String serializeSchemaProperties(Map<String, String> properties) {
GsonBuilder gsonBuilder = new GsonBuilder()
.registerTypeHierarchyAdapter(Map.class, SCHEMA_PROPERTIES_SERIALIZER);
return gsonBuilder.create().toJson(properties);
} | 3.68 |
framework_GridDragStartEvent_getDraggedItems | /**
* Get the dragged row items.
* <p>
* The ordering of the list is the following: first the item that the drag
* started from, optionally followed by all the other selected rows in
* first-to-last order on the client side.
*
* @return an unmodifiable list of items that are being dragged.
*/
public List<T> getDraggedItems() {
return Collections.unmodifiableList(draggedItems);
} | 3.68 |
framework_View_enter | /**
* Called before the view is shown on screen.
* <p>
* The event object contains information about parameters used when showing
* the view, in addition to references to the old view and the new view.
* <p>
* Override this method to perform initialization of your view.
* <p>
* By default does nothing.
*
* @param event
* an event object containing information about the parameters
* given by the user and references to the old view (if any)
*/
public default void enter(ViewChangeEvent event) {
} | 3.68 |
flink_Rowtime_timestampsFromField | /**
* Sets a built-in timestamp extractor that converts an existing {@link Long} or {@link
* Types#SQL_TIMESTAMP} field into the rowtime attribute.
*
* @param fieldName The field to convert into a rowtime attribute.
*/
public Rowtime timestampsFromField(String fieldName) {
internalProperties.putString(
ROWTIME_TIMESTAMPS_TYPE, ROWTIME_TIMESTAMPS_TYPE_VALUE_FROM_FIELD);
internalProperties.putString(ROWTIME_TIMESTAMPS_FROM, fieldName);
return this;
} | 3.68 |
flink_JoinRecordStateViews_create | /** Creates a {@link JoinRecordStateView} depends on {@link JoinInputSideSpec}. */
public static JoinRecordStateView create(
RuntimeContext ctx,
String stateName,
JoinInputSideSpec inputSideSpec,
InternalTypeInfo<RowData> recordType,
long retentionTime) {
StateTtlConfig ttlConfig = createTtlConfig(retentionTime);
if (inputSideSpec.hasUniqueKey()) {
if (inputSideSpec.joinKeyContainsUniqueKey()) {
return new JoinKeyContainsUniqueKey(ctx, stateName, recordType, ttlConfig);
} else {
return new InputSideHasUniqueKey(
ctx,
stateName,
recordType,
inputSideSpec.getUniqueKeyType(),
inputSideSpec.getUniqueKeySelector(),
ttlConfig);
}
} else {
return new InputSideHasNoUniqueKey(ctx, stateName, recordType, ttlConfig);
}
} | 3.68 |
framework_OnStateChangeMethod_getProperties | /**
* Gets the list of state property names to listen for.
*
* @return the list of state property names to listen for
*/
public List<String> getProperties() {
return properties;
} | 3.68 |
hbase_SlowLogPersistentService_addAllLogsToSysTable | /**
* Poll from queueForSysTable and insert 100 records in hbase:slowlog table in single batch
*/
public void addAllLogsToSysTable(Connection connection) {
if (queueForSysTable == null) {
LOG.trace("hbase.regionserver.slowlog.systable.enabled is turned off. Exiting.");
return;
}
if (LOCK.isLocked()) {
return;
}
LOCK.lock();
try {
List<TooSlowLog.SlowLogPayload> slowLogPayloads = new ArrayList<>();
int i = 0;
while (!queueForSysTable.isEmpty()) {
slowLogPayloads.add(queueForSysTable.poll());
i++;
if (i == SYSTABLE_PUT_BATCH_SIZE) {
SlowLogTableAccessor.addSlowLogRecords(slowLogPayloads, connection);
slowLogPayloads.clear();
i = 0;
}
}
if (slowLogPayloads.size() > 0) {
SlowLogTableAccessor.addSlowLogRecords(slowLogPayloads, connection);
}
} finally {
LOCK.unlock();
}
} | 3.68 |
hudi_CleanPlanner_getPartitionPathsToClean | /**
* Returns list of partitions where clean operations needs to be performed.
*
* @param earliestRetainedInstant New instant to be retained after this cleanup operation
* @return list of partitions to scan for cleaning
* @throws IOException when underlying file-system throws this exception
*/
public List<String> getPartitionPathsToClean(Option<HoodieInstant> earliestRetainedInstant) throws IOException {
switch (config.getCleanerPolicy()) {
case KEEP_LATEST_COMMITS:
case KEEP_LATEST_BY_HOURS:
return getPartitionPathsForCleanByCommits(earliestRetainedInstant);
case KEEP_LATEST_FILE_VERSIONS:
return getPartitionPathsForFullCleaning();
default:
throw new IllegalStateException("Unknown Cleaner Policy");
}
} | 3.68 |
flink_PipelinedSubpartition_getNextBuffer | /** for testing only. */
@VisibleForTesting
BufferConsumerWithPartialRecordLength getNextBuffer() {
return buffers.poll();
} | 3.68 |
hbase_OrderedInt32_encodeInt | /**
* Write instance {@code val} into buffer {@code dst}.
* @param dst the {@link PositionedByteRange} to write to
* @param val the value to write to {@code dst}
* @return the number of bytes written
*/
public int encodeInt(PositionedByteRange dst, int val) {
return OrderedBytes.encodeInt32(dst, val, order);
} | 3.68 |
flink_HivePartitionUtils_parsePartitionValues | /** Parse partition string specs into object values. */
public static Map<String, Object> parsePartitionValues(
Map<String, String> partitionSpecs,
String[] fieldNames,
DataType[] fieldTypes,
String defaultPartitionName,
HiveShim shim) {
checkArgument(fieldNames.length == fieldTypes.length);
List<String> fieldNameList = Arrays.asList(fieldNames);
Map<String, Object> partitionColValues = new HashMap<>();
for (Map.Entry<String, String> spec : partitionSpecs.entrySet()) {
String partitionKey = spec.getKey();
String valueString = spec.getValue();
int index = fieldNameList.indexOf(partitionKey);
if (index < 0) {
throw new IllegalStateException(
String.format(
"Partition spec %s and column names %s doesn't match",
partitionSpecs, fieldNameList));
}
LogicalType partitionType = fieldTypes[index].getLogicalType();
final Object value =
restorePartitionValueFromType(
shim, valueString, partitionType, defaultPartitionName);
partitionColValues.put(partitionKey, value);
}
return partitionColValues;
} | 3.68 |
hadoop_ServiceLauncher_parseCommandArgs | /**
* Parse the command arguments, extracting the service class as the last
* element of the list (after extracting all the rest).
*
* The field {@link #commandOptions} field must already have been set.
* @param conf configuration to use
* @param args command line argument list
* @return the remaining arguments
* @throws ServiceLaunchException if processing of arguments failed
*/
protected List<String> parseCommandArgs(Configuration conf,
List<String> args) {
Preconditions.checkNotNull(commandOptions,
"Command options have not been created");
StringBuilder argString = new StringBuilder(args.size() * 32);
for (String arg : args) {
argString.append("\"").append(arg).append("\" ");
}
LOG.debug("Command line: {}", argString);
try {
String[] argArray = args.toArray(new String[args.size()]);
// parse this the standard way. This will
// update the configuration in the parser, and potentially
// patch the user credentials
GenericOptionsParser parser = createGenericOptionsParser(conf, argArray);
if (!parser.isParseSuccessful()) {
throw new ServiceLaunchException(EXIT_COMMAND_ARGUMENT_ERROR,
E_PARSE_FAILED + " %s", argString);
}
CommandLine line = parser.getCommandLine();
List<String> remainingArgs = Arrays.asList(parser.getRemainingArgs());
LOG.debug("Remaining arguments {}", remainingArgs);
// Scan the list of configuration files
// and bail out if they don't exist
if (line.hasOption(ARG_CONF)) {
String[] filenames = line.getOptionValues(ARG_CONF);
verifyConfigurationFilesExist(filenames);
// Add URLs of files as list of URLs to load
for (String filename : filenames) {
File file = new File(filename);
LOG.debug("Configuration files {}", file);
confResourceUrls.add(file.toURI().toURL());
}
}
if (line.hasOption(ARG_CONFCLASS)) {
// new resources to instantiate as configurations
List<String> classnameList = Arrays.asList(
line.getOptionValues(ARG_CONFCLASS));
LOG.debug("Configuration classes {}", classnameList);
confClassnames.addAll(classnameList);
}
// return the remainder
return remainingArgs;
} catch (IOException e) {
// parsing problem: convert to a command argument error with
// the original text
throw new ServiceLaunchException(EXIT_COMMAND_ARGUMENT_ERROR, e);
} catch (RuntimeException e) {
// lower level issue such as XML parse failure
throw new ServiceLaunchException(EXIT_COMMAND_ARGUMENT_ERROR, e,
E_PARSE_FAILED + " %s : %s", argString, e);
}
} | 3.68 |
flink_RocksDBResourceContainer_resolveFileLocation | /**
* Verify log file location.
*
* @param logFilePath Path to log file
* @return File or null if not a valid log file
*/
private File resolveFileLocation(String logFilePath) {
File logFile = new File(logFilePath);
return (logFile.exists() && logFile.canRead()) ? logFile : null;
} | 3.68 |
zxing_MathUtils_distance | /**
* @param aX point A x coordinate
* @param aY point A y coordinate
* @param bX point B x coordinate
* @param bY point B y coordinate
* @return Euclidean distance between points A and B
*/
public static float distance(int aX, int aY, int bX, int bY) {
double xDiff = aX - bX;
double yDiff = aY - bY;
return (float) Math.sqrt(xDiff * xDiff + yDiff * yDiff);
} | 3.68 |
framework_VGridLayout_updateHeight | /** For internal use only. May be removed or replaced in the future. */
public void updateHeight() {
// Detect minimum heights & calculate spans
detectRowHeights();
// Expand
expandRows();
// Position
layoutCellsVertically();
} | 3.68 |
hbase_AccessChecker_requirePermission | /**
* Authorizes that the current user has any of the given permissions for the given table, column
* family and column qualifier.
* @param user Active user to which authorization checks should be applied
* @param request Request type
* @param tableName Table requested
* @param family Column family requested
* @param qualifier Column qualifier requested
* @param filterUser User name to be filtered from permission as requested
* @param permissions Actions being requested
* @throws IOException if obtaining the current user fails
* @throws AccessDeniedException if user has no authorization
*/
public void requirePermission(User user, String request, TableName tableName, byte[] family,
byte[] qualifier, String filterUser, Action... permissions) throws IOException {
AuthResult result = null;
for (Action permission : permissions) {
if (authManager.authorizeUserTable(user, tableName, family, qualifier, permission)) {
result = AuthResult.allow(request, "Table permission granted", user, permission, tableName,
family, qualifier);
break;
} else {
// rest of the world
result = AuthResult.deny(request, "Insufficient permissions", user, permission, tableName,
family, qualifier);
}
}
result.getParams().addExtraParam("filterUser", filterUser);
logResult(result);
if (!result.isAllowed()) {
throw new AccessDeniedException("Insufficient permissions " + result.toContextString());
}
} | 3.68 |
hadoop_FileUnderConstructionFeature_updateLengthOfLastBlock | /**
* Update the length for the last block
*
* @param lastBlockLength
* The length of the last block reported from client
* @throws IOException
*/
void updateLengthOfLastBlock(INodeFile f, long lastBlockLength)
throws IOException {
BlockInfo lastBlock = f.getLastBlock();
assert (lastBlock != null) : "The last block for path "
+ f.getFullPathName() + " is null when updating its length";
assert !lastBlock.isComplete()
: "The last block for path " + f.getFullPathName()
+ " is not under-construction when updating its length";
lastBlock.setNumBytes(lastBlockLength);
} | 3.68 |
hudi_HoodieCombineHiveInputFormat_getNonCombinablePathIndices | /**
* Gets all the path indices that should not be combined.
*/
public Set<Integer> getNonCombinablePathIndices(JobConf job, Path[] paths, int numThreads)
throws ExecutionException, InterruptedException {
LOG.info("Total number of paths: " + paths.length + ", launching " + numThreads
+ " threads to check non-combinable ones.");
int numPathPerThread = (int) Math.ceil((double) paths.length / numThreads);
ExecutorService executor = Executors.newFixedThreadPool(numThreads);
List<Future<Set<Integer>>> futureList = new ArrayList<>(numThreads);
try {
for (int i = 0; i < numThreads; i++) {
int start = i * numPathPerThread;
int length = i != numThreads - 1 ? numPathPerThread : paths.length - start;
futureList.add(executor.submit(new CheckNonCombinablePathCallable(paths, start, length, job)));
}
Set<Integer> nonCombinablePathIndices = new HashSet<>();
for (Future<Set<Integer>> future : futureList) {
nonCombinablePathIndices.addAll(future.get());
}
return nonCombinablePathIndices;
} finally {
executor.shutdownNow();
}
} | 3.68 |
framework_VAbsoluteLayout_setWidgetPosition | /**
* Set the position of the widget in the layout. The position is a CSS
* property string using properties such as top,left,right,top
*
* @param child
* The child widget to set the position for
* @param position
* The position string
*/
public void setWidgetPosition(Widget child, String position) {
AbsoluteWrapper wrapper = getChildWrapper(child);
if (wrapper != null) {
wrapper.setPosition(position);
}
} | 3.68 |
morf_OracleDialect_getFromDummyTable | /**
* @see org.alfasoftware.morf.jdbc.SqlDialect#getFromDummyTable()
*/
@Override
protected String getFromDummyTable() {
return " FROM dual";
} | 3.68 |
hudi_LegacyArchivedMetaEntryReader_loadInstants | /**
* This is method to read selected instants. Do NOT use this directly use one of the helper methods above
* If loadInstantDetails is set to true, this would also update 'readCommits' map with commit details
* If filter is specified, only the filtered instants are loaded
* If commitsFilter is specified, only the filtered records are loaded.
*/
private ClosableIterator<ActiveAction> loadInstants(HoodieArchivedTimeline.TimeRangeFilter filter) {
try {
// List all files
FileStatus[] fsStatuses = metaClient.getFs().globStatus(
new Path(metaClient.getArchivePath() + "/.commits_.archive*"));
// Sort files by version suffix in reverse (implies reverse chronological order)
Arrays.sort(fsStatuses, new ArchiveLogVersionComparator());
ClosableIterator<HoodieRecord<IndexedRecord>> itr = getRecordIterator(fsStatuses);
return new ClosableIterator<ActiveAction>() {
private ActiveAction activeAction;
private Pair<HoodieInstant, Option<byte[]>> nextInstantAndDetail;
@Override
public void close() {
itr.close();
}
@Override
public boolean hasNext() {
List<Pair<HoodieInstant, Option<byte[]>>> instantAndDetails = new ArrayList<>();
String lastInstantTime = null;
if (nextInstantAndDetail != null) {
instantAndDetails.add(nextInstantAndDetail);
lastInstantTime = nextInstantAndDetail.getKey().getTimestamp();
nextInstantAndDetail = null;
}
while (itr.hasNext()) {
HoodieRecord<IndexedRecord> record = itr.next();
Pair<HoodieInstant, Option<byte[]>> instantAndDetail = readInstant((GenericRecord) record.getData());
String instantTime = instantAndDetail.getKey().getTimestamp();
if (filter == null || filter.isInRange(instantTime)) {
if (lastInstantTime == null) {
instantAndDetails.add(instantAndDetail);
lastInstantTime = instantTime;
} else if (lastInstantTime.equals(instantTime)) {
instantAndDetails.add(instantAndDetail);
} else {
nextInstantAndDetail = instantAndDetail;
break;
}
}
}
if (!instantAndDetails.isEmpty()) {
this.activeAction = ActiveActionWithDetails.fromInstantAndDetails(instantAndDetails);
return true;
}
return false;
}
@Override
public ActiveAction next() {
return this.activeAction;
}
};
} catch (IOException e) {
throw new HoodieIOException(
"Could not load archived commit timeline from path " + metaClient.getArchivePath(), e);
}
} | 3.68 |
hadoop_RouterRMAdminService_createRequestInterceptorChain | /**
* This method creates and returns reference of the first interceptor in the
* chain of request interceptor instances.
*
* @return the reference of the first interceptor in the chain
*/
@VisibleForTesting
protected RMAdminRequestInterceptor createRequestInterceptorChain() {
Configuration conf = getConfig();
return RouterServerUtil.createRequestInterceptorChain(conf,
YarnConfiguration.ROUTER_RMADMIN_INTERCEPTOR_CLASS_PIPELINE,
YarnConfiguration.DEFAULT_ROUTER_RMADMIN_INTERCEPTOR_CLASS,
RMAdminRequestInterceptor.class);
} | 3.68 |
flink_DataStreamUtils_collect | /**
* Triggers the distributed execution of the streaming dataflow and returns an iterator over the
* elements of the given DataStream.
*
* <p>The DataStream application is executed in the regular distributed manner on the target
* environment, and the events from the stream are polled back to this application process and
* thread through Flink's REST API.
*
* @deprecated Please use {@link DataStream#executeAndCollect()}.
*/
@Deprecated
public static <OUT> Iterator<OUT> collect(DataStream<OUT> stream, String executionJobName) {
try {
return stream.executeAndCollect(executionJobName);
} catch (Exception e) {
// this "wrap as unchecked" step is here only to preserve the exception signature
// backwards compatible.
throw new RuntimeException("Failed to execute data stream", e);
}
} | 3.68 |
flink_PojoSerializer_findField | /**
* Finds and returns the order (0-based) of a POJO field. Returns -1 if the field does not exist
* for this POJO.
*/
private int findField(String fieldName) {
int foundIndex = 0;
for (Field field : fields) {
if (field != null && fieldName.equals(field.getName())) {
return foundIndex;
}
foundIndex++;
}
return -1;
} | 3.68 |
framework_VComboBox_getPreferredHeight | /*
* Gets the preferred height of the menu including pageItemsCount items.
*/
String getPreferredHeight(int pageItemsCount) {
if (!currentSuggestions.isEmpty()) {
final int pixels = getPreferredHeight()
/ currentSuggestions.size() * pageItemsCount;
return pixels + "px";
}
return "";
} | 3.68 |
framework_AbstractValidator_toResult | /**
* A helper method for creating a {@code Result} from a value and a validity
* flag. If the flag is true, returns {@code Result.ok}, otherwise yields
* {@code Result.error} bearing the error message returned by
* {@link #getMessage(T)}.
* <p>
* For instance, the following {@code apply} method only accepts even
* numbers:
*
* <pre>
* @Override
* public Result<T> apply(Integer value) {
* return toResult(value, value % 2 == 0);
* }
* </pre>
*
* @param value
* the validated value
* @param isValid
* whether the value is valid or not
* @return the validation result
*/
protected ValidationResult toResult(T value, boolean isValid) {
return isValid ? ValidationResult.ok()
: ValidationResult.error(getMessage(value));
} | 3.68 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.