name
stringlengths 12
178
| code_snippet
stringlengths 8
36.5k
| score
float64 3.26
3.68
|
---|---|---|
hudi_HoodieTableMetaClient_scanHoodieInstantsFromFileSystem | /**
* Helper method to scan all hoodie-instant metafiles and construct HoodieInstant objects.
*
* @param timelinePath MetaPath where instant files are stored
* @param includedExtensions Included hoodie extensions
* @param applyLayoutVersionFilters Depending on Timeline layout version, if there are multiple states for the same
* action instant, only include the highest state
* @return List of Hoodie Instants generated
* @throws IOException in case of failure
*/
public List<HoodieInstant> scanHoodieInstantsFromFileSystem(Path timelinePath, Set<String> includedExtensions,
boolean applyLayoutVersionFilters) throws IOException {
Stream<HoodieInstant> instantStream = Arrays.stream(
HoodieTableMetaClient
.scanFiles(getFs(), timelinePath, path -> {
// Include only the meta files with extensions that needs to be included
String extension = HoodieInstant.getTimelineFileExtension(path.getName());
return includedExtensions.contains(extension);
})).map(HoodieInstant::new);
if (applyLayoutVersionFilters) {
instantStream = TimelineLayout.getLayout(getTimelineLayoutVersion()).filterHoodieInstants(instantStream);
}
return instantStream.sorted().collect(Collectors.toList());
} | 3.68 |
morf_AbstractSelectStatementBuilder_orderBy | /**
* Specifies the fields by which to order the result set. For use in builder code.
* See {@link #orderBy(AliasedFieldBuilder...)} for the DSL version.
*
* @param orderFields the fields to order by
* @return this, for method chaining.
*/
public T orderBy(Iterable<? extends AliasedFieldBuilder> orderFields) {
if (orderFields == null) {
throw new IllegalArgumentException("Fields were null in order by clause");
}
if(AliasedField.immutableDslEnabled()) {
Iterables.addAll(orderBys, SqlInternalUtils.transformOrderByToAscending(Builder.Helper.buildAll(orderFields)));
} else {
// Add the list
Iterables.addAll(orderBys, Builder.Helper.buildAll(orderFields));
// Default fields to ascending if no direction has been specified
SqlInternalUtils.defaultOrderByToAscending(orderBys);
}
return castToChild(this);
} | 3.68 |
hbase_QuotaObserverChore_processTablesWithQuotas | /**
* Processes each {@code TableName} which has a quota defined and moves it in or out of violation
* based on the space use.
* @param tablesWithTableQuotas The HBase tables which have quotas defined
*/
void processTablesWithQuotas(final Set<TableName> tablesWithTableQuotas) throws IOException {
long numTablesInViolation = 0L;
for (TableName table : tablesWithTableQuotas) {
final SpaceQuota spaceQuota = tableSnapshotStore.getSpaceQuota(table);
if (spaceQuota == null) {
if (LOG.isDebugEnabled()) {
LOG.debug("Unexpectedly did not find a space quota for " + table
+ ", maybe it was recently deleted.");
}
continue;
}
final SpaceQuotaSnapshot currentSnapshot = tableSnapshotStore.getCurrentState(table);
final SpaceQuotaSnapshot targetSnapshot =
tableSnapshotStore.getTargetState(table, spaceQuota);
if (LOG.isTraceEnabled()) {
LOG.trace("Processing " + table + " with current=" + currentSnapshot + ", target="
+ targetSnapshot);
}
updateTableQuota(table, currentSnapshot, targetSnapshot);
if (targetSnapshot.getQuotaStatus().isInViolation()) {
numTablesInViolation++;
}
}
// Report the number of tables in violation
if (metrics != null) {
metrics.setNumTableInSpaceQuotaViolation(numTablesInViolation);
}
} | 3.68 |
flink_PrioritizedDeque_addPriorityElement | /**
* Adds a priority element to this deque, such that it will be polled after all existing
* priority elements but before any non-priority element.
*
* @param element the element to add
*/
public void addPriorityElement(T element) {
// priority elements are rather rare and short-lived, so most of there are none
if (numPriorityElements == 0) {
deque.addFirst(element);
} else if (numPriorityElements == deque.size()) {
// no non-priority elements
deque.add(element);
} else {
// remove all priority elements
final ArrayDeque<T> priorPriority = new ArrayDeque<>(numPriorityElements);
for (int index = 0; index < numPriorityElements; index++) {
priorPriority.addFirst(deque.poll());
}
deque.addFirst(element);
// read them before the newly added element
for (final T priorityEvent : priorPriority) {
deque.addFirst(priorityEvent);
}
}
numPriorityElements++;
} | 3.68 |
framework_VScrollTable_selectLastRenderedRowInViewPort | /**
* Selects the last row visible in the table
* <p>
* For internal use only. May be removed or replaced in the future.
*
* @param focusOnly
* Should the focus only be moved to the last row
*/
public void selectLastRenderedRowInViewPort(boolean focusOnly) {
int index = firstRowInViewPort + getFullyVisibleRowCount();
VScrollTableRow lastRowInViewport = scrollBody.getRowByRowIndex(index);
if (lastRowInViewport == null) {
// this should not happen in normal situations (white space at the
// end of viewport). Select the last rendered as a fallback.
lastRowInViewport = scrollBody
.getRowByRowIndex(scrollBody.getLastRendered());
if (lastRowInViewport == null) {
return; // empty table
}
}
setRowFocus(lastRowInViewport);
if (!focusOnly) {
selectFocusedRow(false, multiselectPending);
sendSelectedRows();
}
} | 3.68 |
flink_KubernetesUtils_getLeaderInformationFromConfigMap | /**
* Get the {@link LeaderInformation} from ConfigMap.
*
* @param configMap ConfigMap contains the leader information
* @return Parsed leader information. It could be {@link LeaderInformation#empty()} if there is
* no corresponding data in the ConfigMap.
*/
public static LeaderInformation getLeaderInformationFromConfigMap(
KubernetesConfigMap configMap) {
final String leaderAddress = configMap.getData().get(LEADER_ADDRESS_KEY);
final String sessionIDStr = configMap.getData().get(LEADER_SESSION_ID_KEY);
final UUID sessionID = sessionIDStr == null ? null : UUID.fromString(sessionIDStr);
if (leaderAddress == null && sessionIDStr == null) {
return LeaderInformation.empty();
}
return LeaderInformation.known(sessionID, leaderAddress);
} | 3.68 |
hadoop_BaseService_getPrefix | /**
* Returns the service prefix.
*
* @return the service prefix.
*/
protected String getPrefix() {
return prefix;
} | 3.68 |
morf_SchemaChangeSequence_changeIndex | /**
* @see org.alfasoftware.morf.upgrade.SchemaEditor#changeIndex(java.lang.String, org.alfasoftware.morf.metadata.Index, org.alfasoftware.morf.metadata.Index)
*/
@Override
public void changeIndex(String tableName, Index fromIndex, Index toIndex) {
ChangeIndex changeIndex = new ChangeIndex(tableName, fromIndex, toIndex);
visitor.visit(changeIndex);
schemaAndDataChangeVisitor.visit(changeIndex);
} | 3.68 |
hadoop_StripedBlockChecksumReconstructor_getBufferArray | /**
* Gets an array corresponding the buffer.
* @param buffer the input buffer.
* @return the array with content of the buffer.
*/
private static byte[] getBufferArray(ByteBuffer buffer) {
byte[] buff = new byte[buffer.remaining()];
if (buffer.hasArray()) {
buff = buffer.array();
} else {
buffer.slice().get(buff);
}
return buff;
} | 3.68 |
framework_DownloadStream_setStream | /**
* Sets the stream.
*
* @param stream
* The stream to set
*/
public void setStream(InputStream stream) {
this.stream = stream;
} | 3.68 |
pulsar_BacklogQuotaManager_dropBacklogForTimeLimit | /**
* Drop the backlog on the topic.
*
* @param persistentTopic
* The topic from which backlog should be dropped
* @param quota
* Backlog quota set for the topic
*/
private void dropBacklogForTimeLimit(PersistentTopic persistentTopic, BacklogQuota quota,
boolean preciseTimeBasedBacklogQuotaCheck) {
// If enabled precise time based backlog quota check, will expire message based on the timeBaseQuota
if (preciseTimeBasedBacklogQuotaCheck) {
// Set the reduction factor to 90%. The aim is to drop down the backlog to 90% of the quota limit.
double reductionFactor = 0.9;
int target = (int) (reductionFactor * quota.getLimitTime());
if (log.isDebugEnabled()) {
log.debug("[{}] target backlog expire time is [{}]", persistentTopic.getName(), target);
}
persistentTopic.getSubscriptions().forEach((__, subscription) ->
subscription.getExpiryMonitor().expireMessages(target)
);
} else {
// If disabled precise time based backlog quota check, will try to remove whole ledger from cursor's backlog
Long currentMillis = ((ManagedLedgerImpl) persistentTopic.getManagedLedger()).getClock().millis();
ManagedLedgerImpl mLedger = (ManagedLedgerImpl) persistentTopic.getManagedLedger();
try {
for (; ; ) {
ManagedCursor slowestConsumer = mLedger.getSlowestConsumer();
Position oldestPosition = slowestConsumer.getMarkDeletedPosition();
if (log.isDebugEnabled()) {
log.debug("[{}] slowest consumer mark delete position is [{}], read position is [{}]",
slowestConsumer.getName(), oldestPosition, slowestConsumer.getReadPosition());
}
ManagedLedgerInfo.LedgerInfo ledgerInfo = mLedger.getLedgerInfo(oldestPosition.getLedgerId()).get();
if (ledgerInfo == null) {
PositionImpl nextPosition =
PositionImpl.get(mLedger.getNextValidLedger(oldestPosition.getLedgerId()), -1);
slowestConsumer.markDelete(nextPosition);
continue;
}
// Timestamp only > 0 if ledger has been closed
if (ledgerInfo.getTimestamp() > 0
&& currentMillis - ledgerInfo.getTimestamp() > quota.getLimitTime() * 1000) {
// skip whole ledger for the slowest cursor
PositionImpl nextPosition =
PositionImpl.get(mLedger.getNextValidLedger(ledgerInfo.getLedgerId()), -1);
if (!nextPosition.equals(oldestPosition)) {
slowestConsumer.markDelete(nextPosition);
continue;
}
}
break;
}
} catch (Exception e) {
log.error("[{}] Error resetting cursor for slowest consumer [{}]", persistentTopic.getName(),
mLedger.getSlowestConsumer().getName(), e);
}
}
} | 3.68 |
flink_WatermarkOutputMultiplexer_registerNewOutput | /**
* Registers a new multiplexed output, which creates internal states for that output and returns
* an output ID that can be used to get a deferred or immediate {@link WatermarkOutput} for that
* output.
*/
public void registerNewOutput(String id, WatermarkUpdateListener onWatermarkUpdate) {
final PartialWatermark outputState = new PartialWatermark(onWatermarkUpdate);
final PartialWatermark previouslyRegistered =
watermarkPerOutputId.putIfAbsent(id, outputState);
checkState(previouslyRegistered == null, "Already contains an output for ID %s", id);
combinedWatermarkStatus.add(outputState);
} | 3.68 |
framework_BrowserInfo_getBrowserVersion | /**
* Gets the complete browser version in form of a string. The version is
* given by the browser through the user agent string and usually consists
* of dot-separated numbers. Note that the string may contain characters
* other than dots and digits.
*
* @return the complete browser version or {@code null} if unknown
* @since 8.4
*/
public String getBrowserVersion() {
return browserDetails.getBrowserVersion();
} | 3.68 |
hbase_TableRegionModel_getName | /** Returns the region name */
@XmlAttribute
public String getName() {
byte[] tableNameAsBytes = Bytes.toBytes(this.table);
TableName tableName = TableName.valueOf(tableNameAsBytes);
byte[] nameAsBytes =
RegionInfo.createRegionName(tableName, this.startKey, this.id, !tableName.isSystemTable());
return Bytes.toString(nameAsBytes);
} | 3.68 |
hadoop_Chain_createReduceContext | /**
* Create a reduce context that is based on ChainMapContext and the given
* record writer
*/
private <KEYIN, VALUEIN, KEYOUT, VALUEOUT>
Reducer<KEYIN, VALUEIN, KEYOUT, VALUEOUT>.Context createReduceContext(
RecordWriter<KEYOUT, VALUEOUT> rw,
ReduceContext<KEYIN, VALUEIN, KEYOUT, VALUEOUT> context,
Configuration conf) {
ReduceContext<KEYIN, VALUEIN, KEYOUT, VALUEOUT> reduceContext =
new ChainReduceContextImpl<KEYIN, VALUEIN, KEYOUT, VALUEOUT>(
context, rw, conf);
Reducer<KEYIN, VALUEIN, KEYOUT, VALUEOUT>.Context reducerContext =
new WrappedReducer<KEYIN, VALUEIN, KEYOUT, VALUEOUT>()
.getReducerContext(reduceContext);
return reducerContext;
} | 3.68 |
querydsl_GeometryExpression_envelope | /**
* The minimum bounding box for this Geometry, returned as a Geometry. The
* polygon is defined by the corner points of the bounding box [(MINX, MINY), (MAXX, MINY), (MAXX, MAXY),
* (MINX, MAXY), (MINX, MINY)]. Minimums for Z and M may be added. The simplest representation of an
* Envelope is as two direct positions, one containing all the minimums, and another all the maximums. In some
* cases, this coordinate will be outside the range of validity for the Spatial Reference System.
*
* @return envelope
*/
public GeometryExpression<Geometry> envelope() {
if (envelope == null) {
envelope = GeometryExpressions.geometryOperation(SpatialOps.ENVELOPE, mixin);
}
return envelope;
} | 3.68 |
hadoop_BoundedAppender_append | /**
* Append a {@link CharSequence} considering {@link #limit}, truncating
* from the head of {@code csq} or {@link #messages} when necessary.
*
* @param csq the {@link CharSequence} to append
* @return this
*/
public BoundedAppender append(final CharSequence csq) {
appendAndCount(csq);
checkAndCut();
return this;
} | 3.68 |
flink_ExecutionConfig_getNumberOfExecutionRetries | /**
* Gets the number of times the system will try to re-execute failed tasks. A value of {@code
* -1} indicates that the system default value (as defined in the configuration) should be used.
*
* @return The number of times the system will try to re-execute failed tasks.
* @deprecated Should no longer be used because it is subsumed by RestartStrategyConfiguration
*/
@Deprecated
public int getNumberOfExecutionRetries() {
return configuration.get(EXECUTION_RETRIES);
} | 3.68 |
framework_Table_getCauses | /**
* Returns the cause(s) for this exception.
*
* @return the exception(s) which caused this exception
*/
public Throwable[] getCauses() {
return causes;
} | 3.68 |
hbase_AsyncAdmin_getMaster | /** Returns current master server name wrapped by {@link CompletableFuture} */
default CompletableFuture<ServerName> getMaster() {
return getClusterMetrics(EnumSet.of(Option.MASTER)).thenApply(ClusterMetrics::getMasterName);
} | 3.68 |
flink_KubernetesUtils_parsePort | /**
* Parse a valid port for the config option. A fixed port is expected, and do not support a
* range of ports.
*
* @param flinkConfig flink config
* @param port port config option
* @return valid port
*/
public static Integer parsePort(Configuration flinkConfig, ConfigOption<String> port) {
checkNotNull(flinkConfig.get(port), port.key() + " should not be null.");
try {
return Integer.parseInt(flinkConfig.get(port));
} catch (NumberFormatException ex) {
throw new FlinkRuntimeException(
port.key()
+ " should be specified to a fixed port. Do not support a range of ports.",
ex);
}
} | 3.68 |
pulsar_ResourceGroupService_getRgQuotaByteCount | // Visibility for testing.
protected static double getRgQuotaByteCount (String rgName, String monClassName) {
return rgCalculatedQuotaBytes.labels(rgName, monClassName).get();
} | 3.68 |
flink_SessionWithGapOnTime_as | /**
* Assigns an alias for this window that the following {@code groupBy()} and {@code select()}
* clause can refer to. {@code select()} statement can access window properties such as window
* start or end time.
*
* @param alias alias for this window
* @return this window
*/
public SessionWithGapOnTimeWithAlias as(Expression alias) {
return new SessionWithGapOnTimeWithAlias(alias, timeField, gap);
} | 3.68 |
framework_ContainerHierarchicalWrapper_getChildren | /*
* Gets the IDs of the children of the specified Item. Don't add a JavaDoc
* comment here, we use the default documentation from implemented
* interface.
*/
@Override
public Collection<?> getChildren(Object itemId) {
// If the wrapped container implements the method directly, use it
if (hierarchical) {
return ((Container.Hierarchical) container).getChildren(itemId);
}
final Collection<?> c = children.get(itemId);
if (c == null) {
return null;
}
return Collections.unmodifiableCollection(c);
} | 3.68 |
hudi_MergeOnReadInputFormat_builder | /**
* Returns the builder for {@link MergeOnReadInputFormat}.
*/
public static Builder builder() {
return new Builder();
} | 3.68 |
framework_FileDropTarget_createUrl | /**
* Creates an upload URL for the given file and file ID.
*
* @param file
* File to be uploaded.
* @param id
* Generated ID for the file.
* @return Upload URL for uploading the file to the server.
*/
private String createUrl(Html5File file, String id) {
return getStreamVariableTargetUrl("rec-" + id,
new FileReceiver(id, file));
} | 3.68 |
morf_AbstractSqlDialectTest_testClobFieldLiteralWithLongfield | /**
* Test that getSqlFrom((ClobFieldLiteral)) Returns correctly.
*/
@Test
public void testClobFieldLiteralWithLongfield() {
String result = testDialect.getSqlFrom(new ClobFieldLiteral(LONG_FIELD_STRING));
assertEquals(expectedClobLiteralCast(), result);
} | 3.68 |
hmily_DateUtils_getDateYYYY | /**
* Gets date yyyy.
*
* @return the date yyyy
*/
public static Date getDateYYYY() {
LocalDateTime localDateTime = parseLocalDateTime(getCurrentDateTime());
ZoneId zone = ZoneId.systemDefault();
Instant instant = localDateTime.atZone(zone).toInstant();
return Date.from(instant);
} | 3.68 |
framework_LegacyCommunicationManager_encodeState | /**
* @deprecated As of 7.1. See #11411.
*/
@Deprecated
public static JsonObject encodeState(ClientConnector connector,
SharedState state) {
UI uI = connector.getUI();
ConnectorTracker connectorTracker = uI.getConnectorTracker();
Class<? extends SharedState> stateType = connector.getStateType();
JsonValue diffState = connectorTracker.getDiffState(connector);
if (diffState == null) {
// Use an empty state object as reference for full
// repaints
diffState = REFERENCE_DIFF_STATES.get(stateType);
if (diffState == null) {
diffState = createReferenceDiffStateState(stateType);
REFERENCE_DIFF_STATES.put(stateType, diffState);
}
}
EncodeResult encodeResult = JsonCodec.encode(state, diffState,
stateType, uI.getConnectorTracker());
connectorTracker.setDiffState(connector,
(JsonObject) encodeResult.getEncodedValue());
return (JsonObject) encodeResult.getDiff();
} | 3.68 |
hadoop_AzureNativeFileSystemStore_changePermissionStatus | /**
* Changes the permission status on the given key.
*/
@Override
public void changePermissionStatus(String key, PermissionStatus newPermission)
throws AzureException {
try {
checkContainer(ContainerAccessType.ReadThenWrite);
CloudBlobWrapper blob = getBlobReference(key);
blob.downloadAttributes(getInstrumentedContext());
storePermissionStatus(blob, newPermission);
blob.uploadMetadata(getInstrumentedContext());
} catch (Exception e) {
throw new AzureException(e);
}
} | 3.68 |
hbase_Query_getLoadColumnFamiliesOnDemandValue | /**
* Get the raw loadColumnFamiliesOnDemand setting; if it's not set, can be null.
*/
public Boolean getLoadColumnFamiliesOnDemandValue() {
return this.loadColumnFamiliesOnDemand;
} | 3.68 |
hadoop_JobMetaData_setRecurrenceId | /**
* Set {@link RecurrenceId}.
*
* @param recurrenceIdConfig the {@link RecurrenceId}.
* @return the reference to current {@link JobMetaData}.
*/
public final JobMetaData setRecurrenceId(
final RecurrenceId recurrenceIdConfig) {
this.recurrenceId = recurrenceIdConfig;
return this;
} | 3.68 |
hbase_ServerRpcController_getFailedOn | /**
* Returns any exception thrown during service method invocation, or {@code null} if no exception
* was thrown. This can be used by clients to receive exceptions generated by RPC calls, even when
* {@link RpcCallback}s are used and no
* {@link org.apache.hbase.thirdparty.com.google.protobuf.ServiceException} is declared.
*/
public IOException getFailedOn() {
return serviceException;
} | 3.68 |
hbase_CoprocessorHost_load | /**
* Load a coprocessor implementation into the host
* @param path path to implementation jar
* @param className the main class name
* @param priority chaining priority
* @param conf configuration for coprocessor
* @param includedClassPrefixes class name prefixes to include
* @throws java.io.IOException Exception
*/
public E load(Path path, String className, int priority, Configuration conf,
String[] includedClassPrefixes) throws IOException {
Class<?> implClass;
LOG.debug("Loading coprocessor class " + className + " with path " + path + " and priority "
+ priority);
boolean skipLoadDuplicateCoprocessor = conf.getBoolean(SKIP_LOAD_DUPLICATE_TABLE_COPROCESSOR,
DEFAULT_SKIP_LOAD_DUPLICATE_TABLE_COPROCESSOR);
if (skipLoadDuplicateCoprocessor && findCoprocessor(className) != null) {
// If already loaded will just continue
LOG.warn("Attempted duplicate loading of {}; skipped", className);
return null;
}
ClassLoader cl = null;
if (path == null) {
try {
implClass = getClass().getClassLoader().loadClass(className);
} catch (ClassNotFoundException e) {
throw new IOException("No jar path specified for " + className);
}
} else {
cl =
CoprocessorClassLoader.getClassLoader(path, getClass().getClassLoader(), pathPrefix, conf);
try {
implClass = ((CoprocessorClassLoader) cl).loadClass(className, includedClassPrefixes);
} catch (ClassNotFoundException e) {
throw new IOException("Cannot load external coprocessor class " + className, e);
}
}
// load custom code for coprocessor
Thread currentThread = Thread.currentThread();
ClassLoader hostClassLoader = currentThread.getContextClassLoader();
try {
// switch temporarily to the thread classloader for custom CP
currentThread.setContextClassLoader(cl);
E cpInstance = checkAndLoadInstance(implClass, priority, conf);
return cpInstance;
} finally {
// restore the fresh (host) classloader
currentThread.setContextClassLoader(hostClassLoader);
}
} | 3.68 |
pulsar_ModularLoadManagerImpl_needBrokerDataUpdate | // Determine if the broker data requires an update by delegating to the update condition.
private boolean needBrokerDataUpdate() {
final long updateMaxIntervalMillis = TimeUnit.MINUTES
.toMillis(conf.getLoadBalancerReportUpdateMaxIntervalMinutes());
long timeSinceLastReportWrittenToStore = System.currentTimeMillis() - localData.getLastUpdate();
if (timeSinceLastReportWrittenToStore > updateMaxIntervalMillis) {
log.info("Writing local data to metadata store because time since last"
+ " update exceeded threshold of {} minutes",
conf.getLoadBalancerReportUpdateMaxIntervalMinutes());
// Always update after surpassing the maximum interval.
return true;
}
final double maxChange = Math
.max(100.0 * (Math.abs(lastData.getMaxResourceUsage() - localData.getMaxResourceUsage())),
Math.max(percentChange(lastData.getMsgRateIn() + lastData.getMsgRateOut(),
localData.getMsgRateIn() + localData.getMsgRateOut()),
Math.max(
percentChange(lastData.getMsgThroughputIn() + lastData.getMsgThroughputOut(),
localData.getMsgThroughputIn() + localData.getMsgThroughputOut()),
percentChange(lastData.getNumBundles(), localData.getNumBundles()))));
if (maxChange > conf.getLoadBalancerReportUpdateThresholdPercentage()) {
log.info("Writing local data to metadata store because maximum change {}% exceeded threshold {}%; "
+ "time since last report written is {} seconds", maxChange,
conf.getLoadBalancerReportUpdateThresholdPercentage(),
timeSinceLastReportWrittenToStore / 1000.0);
return true;
}
return false;
} | 3.68 |
flink_TypeExtractionUtils_getSingleAbstractMethod | /**
* Extracts a Single Abstract Method (SAM) as defined in Java Specification (4.3.2. The Class
* Object, 9.8 Functional Interfaces, 9.4.3 Interface Method Body) from given class.
*
* @param baseClass a class that is a FunctionalInterface to retrieve a SAM from
* @throws InvalidTypesException if the given class does not implement FunctionalInterface
* @return single abstract method of the given class
*/
public static Method getSingleAbstractMethod(Class<?> baseClass) {
if (!baseClass.isInterface()) {
throw new InvalidTypesException(
"Given class: " + baseClass + "is not a FunctionalInterface.");
}
Method sam = null;
for (Method method : baseClass.getMethods()) {
if (Modifier.isAbstract(method.getModifiers())) {
if (sam == null) {
sam = method;
} else {
throw new InvalidTypesException(
"Given class: "
+ baseClass
+ " is not a FunctionalInterface. It has more than one abstract method.");
}
}
}
if (sam == null) {
throw new InvalidTypesException(
"Given class: "
+ baseClass
+ " is not a FunctionalInterface. It does not have any abstract methods.");
}
return sam;
} | 3.68 |
hadoop_AMRMProxyApplicationContextImpl_setAMRMToken | /**
* Sets the application's AMRMToken.
*
* @param amrmToken the new amrmToken from RM
* @return whether the saved token is updated to a different value
*/
public synchronized boolean setAMRMToken(
Token<AMRMTokenIdentifier> amrmToken) {
Token<AMRMTokenIdentifier> oldValue = this.amrmToken;
this.amrmToken = amrmToken;
return !this.amrmToken.equals(oldValue);
} | 3.68 |
hbase_Constraints_serializeConfiguration | /**
* Write the configuration to a String
* @param conf to write
* @return String representation of that configuration
*/
private static String serializeConfiguration(Configuration conf) throws IOException {
// write the configuration out to the data stream
ByteArrayOutputStream bos = new ByteArrayOutputStream();
DataOutputStream dos = new DataOutputStream(bos);
conf.writeXml(dos);
dos.flush();
byte[] data = bos.toByteArray();
return Bytes.toString(data);
} | 3.68 |
morf_SpreadsheetDataSetProducer_close | /**
* {@inheritDoc}
*
* @see org.alfasoftware.morf.dataset.DataSetProducer#close()
*/
@Override
public void close() {
// Nothing to do
} | 3.68 |
hadoop_VersionedWritable_write | // javadoc from Writable
@Override
public void write(DataOutput out) throws IOException {
out.writeByte(getVersion()); // store version
} | 3.68 |
hudi_HoodieAdbJdbcClient_getPartitionEvents | /**
* TODO align with {@link HoodieSyncClient#getPartitionEvents}
*/
public List<PartitionEvent> getPartitionEvents(Map<List<String>, String> tablePartitions, List<String> partitionStoragePartitions) {
Map<String, String> paths = new HashMap<>();
for (Map.Entry<List<String>, String> entry : tablePartitions.entrySet()) {
List<String> partitionValues = entry.getKey();
String fullTablePartitionPath = entry.getValue();
paths.put(String.join(", ", partitionValues), fullTablePartitionPath);
}
List<PartitionEvent> events = new ArrayList<>();
for (String storagePartition : partitionStoragePartitions) {
Path storagePartitionPath = FSUtils.getPartitionPath(config.getString(META_SYNC_BASE_PATH), storagePartition);
String fullStoragePartitionPath = Path.getPathWithoutSchemeAndAuthority(storagePartitionPath).toUri().getPath();
// Check if the partition values or if hdfs path is the same
List<String> storagePartitionValues = partitionValueExtractor.extractPartitionValuesInPath(storagePartition);
if (config.getBoolean(ADB_SYNC_USE_HIVE_STYLE_PARTITIONING)) {
String partition = String.join("/", storagePartitionValues);
storagePartitionPath = FSUtils.getPartitionPath(config.getString(META_SYNC_BASE_PATH), partition);
fullStoragePartitionPath = Path.getPathWithoutSchemeAndAuthority(storagePartitionPath).toUri().getPath();
}
if (!storagePartitionValues.isEmpty()) {
String storageValue = String.join(", ", storagePartitionValues);
if (!paths.containsKey(storageValue)) {
events.add(PartitionEvent.newPartitionAddEvent(storagePartition));
} else if (!paths.get(storageValue).equals(fullStoragePartitionPath)) {
events.add(PartitionEvent.newPartitionUpdateEvent(storagePartition));
}
}
}
return events;
} | 3.68 |
hbase_BackupAdminImpl_deleteBackup | /**
* Delete single backup and all related backups <br>
* Algorithm:<br>
* Backup type: FULL or INCREMENTAL <br>
* Is this last backup session for table T: YES or NO <br>
* For every table T from table list 'tables':<br>
* if(FULL, YES) deletes only physical data (PD) <br>
* if(FULL, NO), deletes PD, scans all newer backups and removes T from backupInfo,<br>
* until we either reach the most recent backup for T in the system or FULL backup<br>
* which includes T<br>
* if(INCREMENTAL, YES) deletes only physical data (PD) if(INCREMENTAL, NO) deletes physical data
* and for table T scans all backup images between last<br>
* FULL backup, which is older than the backup being deleted and the next FULL backup (if exists)
* <br>
* or last one for a particular table T and removes T from list of backup tables.
* @param backupId backup id
* @param sysTable backup system table
* @return total number of deleted backup images
* @throws IOException if deleting the backup fails
*/
private int deleteBackup(String backupId, BackupSystemTable sysTable) throws IOException {
BackupInfo backupInfo = sysTable.readBackupInfo(backupId);
int totalDeleted = 0;
if (backupInfo != null) {
LOG.info("Deleting backup " + backupInfo.getBackupId() + " ...");
// Step 1: clean up data for backup session (idempotent)
BackupUtils.cleanupBackupData(backupInfo, conn.getConfiguration());
// List of tables in this backup;
List<TableName> tables = backupInfo.getTableNames();
long startTime = backupInfo.getStartTs();
for (TableName tn : tables) {
boolean isLastBackupSession = isLastBackupSession(sysTable, tn, startTime);
if (isLastBackupSession) {
continue;
}
// else
List<BackupInfo> affectedBackups = getAffectedBackupSessions(backupInfo, tn, sysTable);
for (BackupInfo info : affectedBackups) {
if (info.equals(backupInfo)) {
continue;
}
removeTableFromBackupImage(info, tn, sysTable);
}
}
Map<byte[], String> map = sysTable.readBulkLoadedFiles(backupId);
FileSystem fs = FileSystem.get(conn.getConfiguration());
boolean success = true;
int numDeleted = 0;
for (String f : map.values()) {
Path p = new Path(f);
try {
LOG.debug("Delete backup info " + p + " for " + backupInfo.getBackupId());
if (!fs.delete(p)) {
if (fs.exists(p)) {
LOG.warn(f + " was not deleted");
success = false;
}
} else {
numDeleted++;
}
} catch (IOException ioe) {
LOG.warn(f + " was not deleted", ioe);
success = false;
}
}
if (LOG.isDebugEnabled()) {
LOG.debug(numDeleted + " bulk loaded files out of " + map.size() + " were deleted");
}
if (success) {
sysTable.deleteBulkLoadedRows(new ArrayList<>(map.keySet()));
}
sysTable.deleteBackupInfo(backupInfo.getBackupId());
LOG.info("Delete backup " + backupInfo.getBackupId() + " completed.");
totalDeleted++;
} else {
LOG.warn("Delete backup failed: no information found for backupID=" + backupId);
}
return totalDeleted;
} | 3.68 |
flink_CliView_resetAllParts | /** Must be called when values in one or more parts have changed. */
protected void resetAllParts() {
titleLine = null;
headerLines = null;
mainHeaderLines = null;
mainLines = null;
footerLines = null;
totalMainWidth = 0;
} | 3.68 |
AreaShop_GeneralRegion_configurableMessage | /**
* Method to send a message to a CommandSender, using chatprefix if it is a player.
* Automatically includes the region in the message, enabling the use of all variables.
* @param target The CommandSender you wan't to send the message to (e.g. a player)
* @param key The key to get the translation
* @param prefix Specify if the message should have a prefix
* @param params The parameters to inject into the message string
*/
public void configurableMessage(Object target, String key, boolean prefix, Object... params) {
Object[] newParams = new Object[params.length + 1];
newParams[0] = this;
System.arraycopy(params, 0, newParams, 1, params.length);
Message.fromKey(key).prefix(prefix).replacements(newParams).send(target);
} | 3.68 |
flink_FileSource_forBulkFileFormat | /**
* Builds a new {@code FileSource} using a {@link BulkFormat} to read batches of records from
* files.
*
* <p>Examples for bulk readers are compressed and vectorized formats such as ORC or Parquet.
*/
public static <T> FileSourceBuilder<T> forBulkFileFormat(
final BulkFormat<T, FileSourceSplit> bulkFormat, final Path... paths) {
checkNotNull(bulkFormat, "reader");
checkNotNull(paths, "paths");
checkArgument(paths.length > 0, "paths must not be empty");
return new FileSourceBuilder<>(paths, bulkFormat);
} | 3.68 |
framework_VScrollTable_updateDragMode | /** For internal use only. May be removed or replaced in the future. */
public void updateDragMode(UIDL uidl) {
dragmode = uidl.hasAttribute("dragmode")
? uidl.getIntAttribute("dragmode")
: 0;
if (BrowserInfo.get().isIE()) {
if (dragmode > 0) {
getElement().setPropertyJSO("onselectstart",
getPreventTextSelectionIEHack());
} else {
getElement().setPropertyJSO("onselectstart", null);
}
}
} | 3.68 |
hudi_TableChanges_updateColumnType | /**
* Update a column in the schema to a new type.
* only support update primitive type.
* Only updates that widen types are allowed.
*
* @param name name of the column to update
* @param newType new type for the column
* @return this
* @throws IllegalArgumentException
*/
public ColumnUpdateChange updateColumnType(String name, Type newType) {
checkColModifyIsLegal(name);
if (newType.isNestedType()) {
throw new IllegalArgumentException(String.format("only support update primitive type but find nest column: %s", name));
}
Types.Field field = internalSchema.findField(name);
if (field == null) {
throw new IllegalArgumentException(String.format("cannot update a missing column: %s", name));
}
if (!SchemaChangeUtils.isTypeUpdateAllow(field.type(), newType)) {
throw new IllegalArgumentException(String.format("cannot update origin type: %s to a incompatibility type: %s", field.type(), newType));
}
if (field.type().equals(newType)) {
// do nothings
return this;
}
// save update info
Types.Field update = updates.get(field.fieldId());
if (update == null) {
updates.put(field.fieldId(), Types.Field.get(field.fieldId(), field.isOptional(), field.name(), newType, field.doc()));
} else {
updates.put(field.fieldId(), Types.Field.get(field.fieldId(), update.isOptional(), update.name(), newType, update.doc()));
}
return this;
} | 3.68 |
streampipes_DefaultExtractor_getInstance | /**
* Returns the singleton instance for {@link DefaultExtractor}.
*/
public static DefaultExtractor getInstance() {
return INSTANCE;
} | 3.68 |
flink_FlinkCalciteSqlValidator_getExplicitTableOperands | /**
* Returns all {@link SqlKind#EXPLICIT_TABLE} operands within TVF operands. A list entry is
* {@code null} if the operand is not an {@link SqlKind#EXPLICIT_TABLE}.
*/
private static List<SqlIdentifier> getExplicitTableOperands(SqlNode node) {
if (!(node instanceof SqlBasicCall)) {
return null;
}
final SqlBasicCall call = (SqlBasicCall) node;
if (!(call.getOperator() instanceof SqlFunction)) {
return null;
}
final SqlFunction function = (SqlFunction) call.getOperator();
if (!isTableFunction(function)) {
return null;
}
return call.getOperandList().stream()
.map(
op -> {
if (op.getKind() == SqlKind.EXPLICIT_TABLE) {
final SqlBasicCall opCall = (SqlBasicCall) op;
if (opCall.operandCount() == 1
&& opCall.operand(0) instanceof SqlIdentifier) {
return (SqlIdentifier) opCall.operand(0);
}
}
return null;
})
.collect(Collectors.toList());
} | 3.68 |
framework_FilesystemContainer_getItemIds | /*
* Gets the IDs of Items in the filesystem. Don't add a JavaDoc comment
* here, we use the default documentation from implemented interface.
*/
@Override
public Collection<File> getItemIds() {
if (recursive) {
final Collection<File> col = new ArrayList<File>();
for (File root : roots) {
addItemIds(col, root);
}
return Collections.unmodifiableCollection(col);
} else {
File[] f;
if (roots.length == 1) {
if (filter != null) {
f = roots[0].listFiles(filter);
} else {
f = roots[0].listFiles();
}
} else {
f = roots;
}
if (f == null) {
return Collections
.unmodifiableCollection(new LinkedList<File>());
}
final List<File> l = Arrays.asList(f);
Collections.sort(l);
return Collections.unmodifiableCollection(l);
}
} | 3.68 |
flink_PekkoUtils_getConfig | /**
* Creates a pekko config with the provided configuration values. If the listening address is
* specified, then the actor system will listen on the respective address.
*
* @param configuration instance containing the user provided configuration values
* @param externalAddress optional tuple of external address and port to be reachable at. If
* null is given, then a Pekko config for local actor system will be returned
* @param bindAddress optional tuple of bind address and port to be used locally. If null is
* given, wildcard IP address and the external port wil be used. Takes effect only if
* externalAddress is not null.
* @param executorConfig config defining the used executor by the default dispatcher
* @return Pekko config
*/
public static Config getConfig(
Configuration configuration,
@Nullable HostAndPort externalAddress,
@Nullable HostAndPort bindAddress,
Config executorConfig) {
final Config defaultConfig =
PekkoUtils.getBasicConfig(configuration).withFallback(executorConfig);
if (externalAddress != null) {
if (bindAddress != null) {
final Config remoteConfig =
PekkoUtils.getRemoteConfig(
configuration,
bindAddress.getHost(),
bindAddress.getPort(),
externalAddress.getHost(),
externalAddress.getPort());
return remoteConfig.withFallback(defaultConfig);
} else {
final Config remoteConfig =
PekkoUtils.getRemoteConfig(
configuration,
NetUtils.getWildcardIPAddress(),
externalAddress.getPort(),
externalAddress.getHost(),
externalAddress.getPort());
return remoteConfig.withFallback(defaultConfig);
}
}
return defaultConfig;
} | 3.68 |
hbase_ExtendedCell_getSerializedSize | /** Returns Serialized size (defaults to include tag length). */
@Override
default int getSerializedSize() {
return getSerializedSize(true);
} | 3.68 |
hadoop_FSSchedulerConfigurationStore_confirmMutation | /**
* @param pendingMutation the log mutation to apply
* @param isValid if true, finalize temp configuration file
* if false, remove temp configuration file and rollback
* @throws Exception throw IOE when write temp configuration file fail
*/
@Override
public void confirmMutation(LogMutation pendingMutation,
boolean isValid) throws Exception {
if (pendingMutation == null || tempConfigPath == null) {
LOG.warn("pendingMutation or tempConfigPath is null, do nothing");
return;
}
if (isValid) {
finalizeFileSystemFile();
long configVersion = getConfigVersion() + 1L;
writeConfigVersion(configVersion);
} else {
schedConf = oldConf;
removeTmpConfigFile();
}
tempConfigPath = null;
} | 3.68 |
framework_TextArea_getRows | /**
* Gets the number of rows in the text area.
*
* @return number of explicitly set rows.
*/
public int getRows() {
return getState(false).rows;
} | 3.68 |
flink_CrossOperator_projectTuple12 | /**
* Projects a pair of crossed elements to a {@link Tuple} with the previously selected
* fields.
*
* @return The projected data set.
* @see Tuple
* @see DataSet
*/
public <T0, T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11>
ProjectCross<I1, I2, Tuple12<T0, T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11>>
projectTuple12() {
TypeInformation<?>[] fTypes = extractFieldTypes(fieldIndexes);
TupleTypeInfo<Tuple12<T0, T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11>> tType =
new TupleTypeInfo<Tuple12<T0, T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11>>(
fTypes);
return new ProjectCross<
I1, I2, Tuple12<T0, T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11>>(
this.ds1, this.ds2, this.fieldIndexes, this.isFieldInFirst, tType, this, hint);
} | 3.68 |
hadoop_DefaultNoHARMFailoverProxyProvider_init | /**
* Initialize internal data structures, invoked right after instantiation.
*
* @param conf Configuration to use
* @param proxy The {@link RMProxy} instance to use
* @param protocol The communication protocol to use
*/
@Override
public void init(Configuration conf, RMProxy<T> proxy,
Class<T> protocol) {
this.protocol = protocol;
try {
YarnConfiguration yarnConf = new YarnConfiguration(conf);
InetSocketAddress rmAddress =
proxy.getRMAddress(yarnConf, protocol);
LOG.info("Connecting to ResourceManager at {}", rmAddress);
this.proxy = proxy.getProxy(yarnConf, protocol, rmAddress);
} catch (IOException ioe) {
LOG.error("Unable to create proxy to the ResourceManager ", ioe);
}
} | 3.68 |
hbase_ModifyPeerProcedure_reopenRegions | // will be override in test to simulate error
protected void reopenRegions(MasterProcedureEnv env) throws IOException {
ReplicationPeerConfig peerConfig = getNewPeerConfig();
ReplicationPeerConfig oldPeerConfig = getOldPeerConfig();
TableStateManager tsm = env.getMasterServices().getTableStateManager();
for (TableDescriptor td : env.getMasterServices().getTableDescriptors().getAll().values()) {
if (!td.hasGlobalReplicationScope()) {
continue;
}
TableName tn = td.getTableName();
if (!peerConfig.needToReplicate(tn)) {
continue;
}
if (oldPeerConfig != null && oldPeerConfig.isSerial() && oldPeerConfig.needToReplicate(tn)) {
continue;
}
if (needReopen(tsm, tn)) {
addChildProcedure(new ReopenTableRegionsProcedure(tn));
}
}
} | 3.68 |
MagicPlugin_EntityController_handlePlayerDeath | /**
* This death handler fires right away to close the wand inventory before other plugin
* see the drops.
*/
public void handlePlayerDeath(Player player, com.elmakers.mine.bukkit.magic.Mage mage, List<ItemStack> drops, boolean isKeepInventory) {
Wand wand = mage.getActiveWand();
// First, deactivate the active wand.
// If it had a spell inventory open, restore the survival inventory
// If keepInventory is not set, add the survival inventory to drops
if (wand != null) {
// Retrieve stored inventory before deactivating the wand
if (mage.hasStoredInventory()) {
controller.info("** Wand inventory was open, clearing drops: " + drops.size(), 15);
// Remove the wand inventory from drops
drops.clear();
// Deactivate the wand.
wand.deactivate();
// Add restored inventory back to drops
if (!isKeepInventory) {
ItemStack[] stored = player.getInventory().getContents();
for (ItemStack stack : stored) {
if (stack != null) {
// Since armor is not stored in the wand inventory it will be removed from drops
// and added back in, hopefully that causes no issues
drops.add(stack);
}
}
}
controller.info("** Restored inventory added to drops: " + drops.size(), 15);
} else {
wand.deactivate();
}
}
if (isKeepInventory) {
controller.info("** Keep inventory is set,", 15);
return;
}
// The Equip action and other temporary item-giving spells will have given items to the respawn inventory
// on death. Let's take those items out and add them to drops
int dropSize = drops.size();
mage.addRespawnInventories(drops);
mage.restoreRespawnInventories();
dropSize = drops.size() - dropSize;
controller.info("** Dropping " + dropSize + " items that were given on death, drops now: " + drops.size(), 15);
// Now check for undroppable items.
// Remove them from the inventory and drops list, and store them to give back on respawn
// It should be OK if some plugin wants to come in after this and turn keep inventory back on,
// it'll keep the inventory without any of the "keep" items (since we removed them), and hopefully
// Things will merge back together properly in the end.
PlayerInventory inventory = player.getInventory();
ItemStack[] contents = inventory.getContents();
for (int index = 0; index < contents.length; index++) {
ItemStack itemStack = contents[index];
if (itemStack == null || itemStack.getType() == Material.AIR) continue;
// Remove temporary items from inventory and drops
if (CompatibilityLib.getItemUtils().isTemporary(itemStack)) {
ItemStack replacement = CompatibilityLib.getItemUtils().getReplacement(itemStack);
if (!CompatibilityLib.getItemUtils().isEmpty(replacement)) {
drops.add(replacement);
}
drops.remove(itemStack);
controller.info("** Removing temporary item from drops: " + TextUtils.nameItem(itemStack) + " (replaced with " + TextUtils.nameItem(itemStack) + ") drops now: " + drops.size(), 15);
contents[index] = null;
continue;
}
// Save "keep" items to return on respawn
boolean keepItem = CompatibilityLib.getNBTUtils().getBoolean(itemStack, "keep", false);
if (!keepItem && keepWandsOnDeath && Wand.isWand(itemStack)) keepItem = true;
if (keepItem) {
mage.addToRespawnInventory(index, itemStack);
contents[index] = null;
drops.remove(itemStack);
controller.info("** Removing keep item from drops: " + TextUtils.nameItem(itemStack) + ChatColor.RESET + ", drops now: " + drops.size(), 15);
} else if (Wand.isSkill(itemStack)) {
drops.remove(itemStack);
contents[index] = null;
controller.info("** Removing skill item from drops: " + TextUtils.nameItem(itemStack) + ChatColor.RESET + ", drops now: " + drops.size(), 15);
}
}
inventory.setContents(contents);
controller.info("** Done processing death with drops remaining: " + drops.size(), 15);
} | 3.68 |
zxing_GeoParsedResult_getLatitude | /**
* @return latitude in degrees
*/
public double getLatitude() {
return latitude;
} | 3.68 |
framework_DragSourceExtension_registerDragSourceRpc | /**
* Registers the server side RPC methods invoked from client side on
* <code>dragstart</code> and <code>dragend</code> events.
* <p>
* Override this method if you have custom RPC interface for transmitting
* those events with more data. If just need to do additional things before
* firing the events, then you should override {@link #onDragStart()} and
* {@link #onDragEnd(DropEffect)} instead.
*/
protected void registerDragSourceRpc() {
registerRpc(new DragSourceRpc() {
@Override
public void dragStart() {
onDragStart();
}
@Override
public void dragEnd(DropEffect dropEffect) {
onDragEnd(dropEffect);
}
});
} | 3.68 |
flink_ArrowFieldWriter_write | /** Writes the specified ordinal of the specified row. */
public void write(IN row, int ordinal) {
doWrite(row, ordinal);
count += 1;
} | 3.68 |
flink_TableFactoryService_find | /**
* Finds a table factory of the given class, property map, and classloader.
*
* @param factoryClass desired factory class
* @param propertyMap properties that describe the factory configuration
* @param classLoader classloader for service loading
* @param <T> factory class type
* @return the matching factory
*/
public static <T extends TableFactory> T find(
Class<T> factoryClass, Map<String, String> propertyMap, ClassLoader classLoader) {
Preconditions.checkNotNull(classLoader);
return findSingleInternal(factoryClass, propertyMap, Optional.of(classLoader));
} | 3.68 |
flink_StopWithSavepoint_onSavepointFailure | /**
* Restarts the checkpoint scheduler and, if only the savepoint failed without a task failure /
* job termination, transitions back to {@link Executing}.
*
* <p>This method must assume that {@link #onFailure}/{@link #onGloballyTerminalState} MAY
* already be waiting for the savepoint operation to complete, itching to trigger a state
* transition (hence the {@link #hasPendingStateTransition} check).
*
* <p>If the above is violated (e.g., by always transitioning into another state), then
* depending on other implementation details something very bad will happen, like the scheduler
* crashing the JVM because it attempted multiple state transitions OR effectively dropping the
* onFailure/onGloballyTerminalState call OR we trigger state transitions while we are already
* in another state.
*
* <p>For maintainability reasons this method should not mutate any state that affects state
* transitions in other methods.
*/
private void onSavepointFailure(Throwable cause) {
// revert side-effect of Executing#stopWithSavepoint
checkpointScheduling.startCheckpointScheduler();
// a task failed concurrently; defer the error handling to onFailure()
// otherwise we will attempt 2 state transitions, which is forbidden
if (!hasPendingStateTransition) {
operationFailureCause = cause;
context.goToExecuting(
getExecutionGraph(),
getExecutionGraphHandler(),
getOperatorCoordinatorHandler(),
getFailures());
}
} | 3.68 |
hadoop_NamenodeStatusReport_getBlockPoolId | /**
* Get the block pool identifier.
*
* @return The block pool identifier.
*/
public String getBlockPoolId() {
return this.blockPoolId;
} | 3.68 |
hbase_ValueFilter_areSerializedFieldsEqual | /**
* Returns true if and only if the fields of the filter that are serialized are equal to the
* corresponding fields in other. Used for testing.
*/
@Override
boolean areSerializedFieldsEqual(Filter o) {
if (o == this) {
return true;
}
if (!(o instanceof ValueFilter)) {
return false;
}
return super.areSerializedFieldsEqual(o);
} | 3.68 |
querydsl_AbstractSQLClause_endContext | /**
* Called to end a SQL listener context
*
* @param context the listener context to end
*/
protected void endContext(SQLListenerContextImpl context) {
listeners.end(context);
this.context = null;
} | 3.68 |
hudi_BaseHoodieLogRecordReader_getProgress | /**
* Return progress of scanning as a float between 0.0 to 1.0.
*/
public float getProgress() {
return progress;
} | 3.68 |
hudi_ParquetUtils_readRangeFromParquetMetadata | /**
* Parse min/max statistics stored in parquet footers for all columns.
*/
@SuppressWarnings("rawtype")
public List<HoodieColumnRangeMetadata<Comparable>> readRangeFromParquetMetadata(
@Nonnull Configuration conf,
@Nonnull Path parquetFilePath,
@Nonnull List<String> cols
) {
ParquetMetadata metadata = readMetadata(conf, parquetFilePath);
// NOTE: This collector has to have fully specialized generic type params since
// Java 1.8 struggles to infer them
Collector<HoodieColumnRangeMetadata<Comparable>, ?, Map<String, List<HoodieColumnRangeMetadata<Comparable>>>> groupingByCollector =
Collectors.groupingBy(HoodieColumnRangeMetadata::getColumnName);
// Collect stats from all individual Parquet blocks
Map<String, List<HoodieColumnRangeMetadata<Comparable>>> columnToStatsListMap =
(Map<String, List<HoodieColumnRangeMetadata<Comparable>>>) metadata.getBlocks().stream().sequential()
.flatMap(blockMetaData ->
blockMetaData.getColumns().stream()
.filter(f -> cols.contains(f.getPath().toDotString()))
.map(columnChunkMetaData -> {
Statistics stats = columnChunkMetaData.getStatistics();
return HoodieColumnRangeMetadata.<Comparable>create(
parquetFilePath.getName(),
columnChunkMetaData.getPath().toDotString(),
convertToNativeJavaType(
columnChunkMetaData.getPrimitiveType(),
stats.genericGetMin()),
convertToNativeJavaType(
columnChunkMetaData.getPrimitiveType(),
stats.genericGetMax()),
// NOTE: In case when column contains only nulls Parquet won't be creating
// stats for it instead returning stubbed (empty) object. In that case
// we have to equate number of nulls to the value count ourselves
stats.isEmpty() ? columnChunkMetaData.getValueCount() : stats.getNumNulls(),
columnChunkMetaData.getValueCount(),
columnChunkMetaData.getTotalSize(),
columnChunkMetaData.getTotalUncompressedSize());
})
)
.collect(groupingByCollector);
// Combine those into file-level statistics
// NOTE: Inlining this var makes javac (1.8) upset (due to its inability to infer
// expression type correctly)
Stream<HoodieColumnRangeMetadata<Comparable>> stream = columnToStatsListMap.values()
.stream()
.map(this::getColumnRangeInFile);
return stream.collect(Collectors.toList());
} | 3.68 |
hadoop_CommitUtilsWithMR_getAppAttemptId | /**
* Get the Application Attempt ID for this job.
* @param context the context to look in
* @return the Application Attempt ID for a given job, or 0
*/
public static int getAppAttemptId(JobContext context) {
return context.getConfiguration().getInt(
MRJobConfig.APPLICATION_ATTEMPT_ID, 0);
} | 3.68 |
hudi_RequestHandler_syncIfLocalViewBehind | /**
* Syncs data-set view if local view is behind.
*/
private boolean syncIfLocalViewBehind(Context ctx) {
String basePath = ctx.queryParam(RemoteHoodieTableFileSystemView.BASEPATH_PARAM);
SyncableFileSystemView view = viewManager.getFileSystemView(basePath);
synchronized (view) {
if (isLocalViewBehind(ctx)) {
String lastKnownInstantFromClient = ctx.queryParamAsClass(
RemoteHoodieTableFileSystemView.LAST_INSTANT_TS, String.class)
.getOrDefault(HoodieTimeline.INVALID_INSTANT_TS);
HoodieTimeline localTimeline = viewManager.getFileSystemView(basePath).getTimeline();
LOG.info("Syncing view as client passed last known instant " + lastKnownInstantFromClient
+ " as last known instant but server has the following last instant on timeline :"
+ localTimeline.lastInstant());
view.sync();
return true;
}
}
return false;
} | 3.68 |
hadoop_InterruptEscalator_register | /**
* Register an interrupt handler.
* @param signalName signal name
* @throws IllegalArgumentException if the registration failed
*/
public synchronized void register(String signalName) {
IrqHandler handler = new IrqHandler(signalName, this);
handler.bind();
interruptHandlers.add(handler);
} | 3.68 |
morf_HumanReadableStatementHelper_generateBinaryOperatorString | /**
* Generates a string describing a binary criterion operator.
*
* @param criterion the item to describe.
* @param operator the string operator to separate the first and second parameter, not including spaces.
* @return the string.
*/
private static String generateBinaryOperatorString(final Criterion criterion, final String operator) {
final String left = paren(generateFieldSymbolString(criterion.getField()), criterion.getField());
final String right = generateCriterionValueString(criterion.getValue());
return String.format("%s %s %s", left, operator, right);
} | 3.68 |
MagicPlugin_Currency_getMinValue | /**
* Get the minimum value for this currency.
* Player balances will be capped to this value.
* Only has an effect if @hasMinValue returns true.
*
* @return The minimum for player balances of this currency, typically 0 if set
*/
default double getMinValue() { return 0; } | 3.68 |
hbase_Mutation_heapSize | /** Returns Calculate what Mutation adds to class heap size. */
@Override
public long heapSize() {
long heapsize = MUTATION_OVERHEAD;
// Adding row
heapsize += ClassSize.align(ClassSize.ARRAY + this.row.length);
// Adding map overhead
heapsize += ClassSize.align(getFamilyCellMap().size() * ClassSize.MAP_ENTRY);
for (Map.Entry<byte[], List<Cell>> entry : getFamilyCellMap().entrySet()) {
// Adding key overhead
heapsize += ClassSize.align(ClassSize.ARRAY + entry.getKey().length);
// This part is kinds tricky since the JVM can reuse references if you
// store the same value, but have a good match with SizeOf at the moment
// Adding value overhead
heapsize += ClassSize.align(ClassSize.ARRAYLIST);
int size = entry.getValue().size();
heapsize += ClassSize.align(ClassSize.ARRAY + size * ClassSize.REFERENCE);
for (Cell cell : entry.getValue()) {
heapsize += cell.heapSize();
}
}
heapsize += getAttributeSize();
heapsize += extraHeapSize();
return ClassSize.align(heapsize);
} | 3.68 |
flink_ParameterTool_getNumberOfParameters | /** Returns number of parameters in {@link ParameterTool}. */
@Override
public int getNumberOfParameters() {
return data.size();
} | 3.68 |
morf_SqlDialect_getSqlForCount | /**
* Converts the count function into SQL.
*
* @param function the function details
* @return a string representation of the SQL
*/
protected String getSqlForCount(Function function) {
return "COUNT(" + getSqlFrom(function.getArguments().get(0)) + ")";
} | 3.68 |
hbase_SimpleRpcServer_addConnection | /**
* Updating the readSelector while it's being used is not thread-safe, so the connection must
* be queued. The reader will drain the queue and update its readSelector before performing
* the next select
*/
public void addConnection(SimpleServerRpcConnection conn) throws IOException {
pendingConnections.add(conn);
readSelector.wakeup();
} | 3.68 |
framework_VComboBox_updatePopupPositionOnScroll | /**
* Make the popup follow the position of the ComboBox when the page is
* scrolled.
*/
private void updatePopupPositionOnScroll() {
if (!scrollPending) {
AnimationScheduler.get().requestAnimationFrame(timestamp -> {
if (isShowing()) {
leftPosition = getDesiredLeftPosition();
topPosition = getDesiredTopPosition();
setPopupPosition(leftPosition, topPosition);
}
scrollPending = false;
});
scrollPending = true;
}
} | 3.68 |
flink_StreamTask_processInput | /**
* This method implements the default action of the task (e.g. processing one event from the
* input). Implementations should (in general) be non-blocking.
*
* @param controller controller object for collaborative interaction between the action and the
* stream task.
* @throws Exception on any problems in the action.
*/
protected void processInput(MailboxDefaultAction.Controller controller) throws Exception {
DataInputStatus status = inputProcessor.processInput();
switch (status) {
case MORE_AVAILABLE:
if (taskIsAvailable()) {
return;
}
break;
case NOTHING_AVAILABLE:
break;
case END_OF_RECOVERY:
throw new IllegalStateException("We should not receive this event here.");
case STOPPED:
endData(StopMode.NO_DRAIN);
return;
case END_OF_DATA:
endData(StopMode.DRAIN);
notifyEndOfData();
return;
case END_OF_INPUT:
// Suspend the mailbox processor, it would be resumed in afterInvoke and finished
// after all records processed by the downstream tasks. We also suspend the default
// actions to avoid repeat executing the empty default operation (namely process
// records).
controller.suspendDefaultAction();
mailboxProcessor.suspend();
return;
}
TaskIOMetricGroup ioMetrics = getEnvironment().getMetricGroup().getIOMetricGroup();
PeriodTimer timer;
CompletableFuture<?> resumeFuture;
if (!recordWriter.isAvailable()) {
timer = new GaugePeriodTimer(ioMetrics.getSoftBackPressuredTimePerSecond());
resumeFuture = recordWriter.getAvailableFuture();
} else if (!inputProcessor.isAvailable()) {
timer = new GaugePeriodTimer(ioMetrics.getIdleTimeMsPerSecond());
resumeFuture = inputProcessor.getAvailableFuture();
} else if (changelogWriterAvailabilityProvider != null
&& !changelogWriterAvailabilityProvider.isAvailable()) {
// waiting for changelog availability is reported as busy
timer = new GaugePeriodTimer(ioMetrics.getChangelogBusyTimeMsPerSecond());
resumeFuture = changelogWriterAvailabilityProvider.getAvailableFuture();
} else {
// data availability has changed in the meantime; retry immediately
return;
}
assertNoException(
resumeFuture.thenRun(
new ResumeWrapper(controller.suspendDefaultAction(timer), timer)));
} | 3.68 |
hbase_HFileOutputFormat2_configurePartitioner | /**
* Configure <code>job</code> with a TotalOrderPartitioner, partitioning against
* <code>splitPoints</code>. Cleans up the partitions file after job exists.
*/
static void configurePartitioner(Job job, List<ImmutableBytesWritable> splitPoints,
boolean writeMultipleTables) throws IOException {
Configuration conf = job.getConfiguration();
// create the partitions file
FileSystem fs = FileSystem.get(conf);
String hbaseTmpFsDir =
conf.get(HConstants.TEMPORARY_FS_DIRECTORY_KEY, fs.getHomeDirectory() + "/hbase-staging");
Path partitionsPath = new Path(hbaseTmpFsDir, "partitions_" + UUID.randomUUID());
fs.makeQualified(partitionsPath);
writePartitions(conf, partitionsPath, splitPoints, writeMultipleTables);
fs.deleteOnExit(partitionsPath);
// configure job to use it
job.setPartitionerClass(TotalOrderPartitioner.class);
TotalOrderPartitioner.setPartitionFile(conf, partitionsPath);
} | 3.68 |
MagicPlugin_Mage_getProjectileWand | // Gets the main hand wand if it is a bow or crossbow, otherwise gets the offhand wand
public Wand getProjectileWand() {
Wand wand = getActiveWand();
if (wand != null) {
Material wandIcon = wand.getIcon().getMaterial();
if (!DefaultMaterials.isBow(wandIcon)) {
wand = null;
}
}
if (wand == null) {
wand = getOffhandWand();
if (wand != null) {
Material wandIcon = wand.getIcon().getMaterial();
if (!DefaultMaterials.isBow(wandIcon)) {
wand = null;
}
}
}
return wand;
} | 3.68 |
flink_FutureCompletingBlockingQueue_isEmpty | /** Checks whether the queue is empty. */
public boolean isEmpty() {
lock.lock();
try {
return queue.isEmpty();
} finally {
lock.unlock();
}
} | 3.68 |
flink_CheckpointFailureManager_handleJobLevelCheckpointException | /**
* Handle job level checkpoint exception with a handler callback.
*
* @param exception the checkpoint exception.
* @param checkpointId the failed checkpoint id used to count the continuous failure number
* based on checkpoint id sequence. In trigger phase, we may not get the checkpoint id when
* the failure happens before the checkpoint id generation. In this case, it will be
* specified a negative latest generated checkpoint id as a special flag.
*/
void handleJobLevelCheckpointException(
CheckpointProperties checkpointProperties,
CheckpointException exception,
long checkpointId) {
if (!checkpointProperties.isSavepoint()) {
checkFailureAgainstCounter(exception, checkpointId, failureCallback::failJob);
}
} | 3.68 |
framework_VAbstractSplitPanel_showDraggingCurtain | /**
* Used in FF to avoid losing mouse capture when pointer is moved on an
* iframe.
*/
private void showDraggingCurtain() {
if (!isDraggingCurtainRequired()) {
return;
}
if (draggingCurtain == null) {
draggingCurtain = DOM.createDiv();
draggingCurtain.getStyle().setPosition(Position.ABSOLUTE);
draggingCurtain.getStyle().setTop(0, Unit.PX);
draggingCurtain.getStyle().setLeft(0, Unit.PX);
draggingCurtain.getStyle().setWidth(100, Unit.PCT);
draggingCurtain.getStyle().setHeight(100, Unit.PCT);
draggingCurtain.getStyle().setZIndex(Overlay.Z_INDEX);
DOM.appendChild(wrapper, draggingCurtain);
}
} | 3.68 |
framework_UIConnector_removeStylesheet | /**
* Internal helper for removing any stylesheet with the given URL
*
* @since 7.3
* @param url
* the url to match with existing stylesheets
*/
private void removeStylesheet(String url) {
NodeList<Element> linkTags = getHead()
.getElementsByTagName(LinkElement.TAG);
for (int i = 0; i < linkTags.getLength(); i++) {
LinkElement link = LinkElement.as(linkTags.getItem(i));
if (!"stylesheet".equals(link.getRel())) {
continue;
}
if (!"text/css".equals(link.getType())) {
continue;
}
if (url.equals(link.getHref())) {
getHead().removeChild(link);
}
}
} | 3.68 |
querydsl_AliasFactory_getCurrent | /**
* Get the current thread bound expression without resetting it
*
* @param <A>
* @return expression
*/
@SuppressWarnings("unchecked")
@Nullable
public <A extends Expression<?>> A getCurrent() {
return (A) current.get();
} | 3.68 |
hbase_AccessControlUtil_toUserPermission | /**
* Convert a protobuf UserTablePermissions to a ListMultimap<Username, UserPermission>
* @param proto the proto UsersAndPermissions
* @return a ListMultimap with user and its permissions
*/
public static ListMultimap<String, UserPermission>
toUserPermission(AccessControlProtos.UsersAndPermissions proto) {
ListMultimap<String, UserPermission> userPermission = ArrayListMultimap.create();
AccessControlProtos.UsersAndPermissions.UserPermissions userPerm;
for (int i = 0; i < proto.getUserPermissionsCount(); i++) {
userPerm = proto.getUserPermissions(i);
String username = userPerm.getUser().toStringUtf8();
for (int j = 0; j < userPerm.getPermissionsCount(); j++) {
userPermission.put(username,
new UserPermission(username, toPermission(userPerm.getPermissions(j))));
}
}
return userPermission;
} | 3.68 |
morf_HumanReadableStatementHelper_generateLiteral | /**
* Generates a string with a literal value. If the value is numeric then it is unquoted, otherwise it
* is surrounded by single quote characters.
*
* @param value the value to process.
* @return the string form, created by the value's {@link Object#toString} method with quotes if necessary.
*/
private static String generateLiteral(final Object value) {
if (value == null) {
return "null";
} else if (value instanceof Number) {
return value.toString();
} else {
return "'" + value + "'";
}
} | 3.68 |
hadoop_AbfsInputStreamStatisticsImpl_remoteReadOperation | /**
* {@inheritDoc}
*
* Increment the counter when a remote read operation occurs.
*/
@Override
public void remoteReadOperation() {
ioStatisticsStore.incrementCounter(StreamStatisticNames.REMOTE_READ_OP);
} | 3.68 |
framework_ApplicationRunnerServlet_getApplicationRunnerURIs | /**
* Parses application runner URIs.
*
* If request URL is e.g.
* http://localhost:8080/vaadin/run/com.vaadin.demo.Calc then
* <ul>
* <li>context=vaadin</li>
* <li>Runner servlet=run</li>
* <li>Vaadin application=com.vaadin.demo.Calc</li>
* </ul>
*
* @param request
* @return string array containing widgetset URI, application URI and
* context, runner, application classname
*/
private static URIS getApplicationRunnerURIs(HttpServletRequest request) {
final String[] urlParts = request.getRequestURI().split("\\/");
// String runner = null;
URIS uris = new URIS();
String applicationClassname = null;
String contextPath = request.getContextPath();
if (urlParts[1].equals(contextPath.replaceAll("\\/", ""))) {
// class name comes after web context and runner application
// runner = urlParts[2];
if (urlParts.length == 3) {
throw new ApplicationRunnerRedirectException(
findLastModifiedApplication());
} else {
applicationClassname = urlParts[3];
}
// uris.applicationURI = "/" + context + "/" + runner + "/"
// + applicationClassname;
// uris.context = context;
// uris.runner = runner;
uris.applicationClassname = applicationClassname;
} else {
// no context
// runner = urlParts[1];
if (urlParts.length == 2) {
throw new ApplicationRunnerRedirectException(
findLastModifiedApplication());
} else {
applicationClassname = urlParts[2];
}
// uris.applicationURI = "/" + runner + "/" + applicationClassname;
// uris.context = context;
// uris.runner = runner;
uris.applicationClassname = applicationClassname;
}
return uris;
} | 3.68 |
flink_AbstractParameterTool_getDouble | /**
* Returns the Double value for the given key. If the key does not exists it will return the
* default value given. The method fails if the value is not a Double.
*/
public double getDouble(String key, double defaultValue) {
addToDefaults(key, Double.toString(defaultValue));
String value = get(key);
if (value == null) {
return defaultValue;
} else {
return Double.valueOf(value);
}
} | 3.68 |
hbase_Address_getHostname | /**
* @deprecated Use {@link #getHostName()} instead
*/
@Deprecated
public String getHostname() {
return this.hostAndPort.getHost();
} | 3.68 |
hadoop_OBSFileSystem_createNonRecursive | /**
* Open an FSDataOutputStream at the indicated Path with write-progress
* reporting. Same as create(), except fails if parent directory doesn't
* already exist.
*
* @param path the file path to create
* @param permission file permission
* @param flags {@link CreateFlag}s to use for this stream
* @param bufferSize the size of the buffer to be used
* @param replication required block replication for the file
* @param blkSize block size
* @param progress the progress reporter
* @throws IOException IO failure
*/
@Override
public FSDataOutputStream createNonRecursive(
final Path path,
final FsPermission permission,
final EnumSet<CreateFlag> flags,
final int bufferSize,
final short replication,
final long blkSize,
final Progressable progress)
throws IOException {
Path parent = path.getParent();
if (parent != null && !getFileStatus(parent).isDirectory()) {
// expect this to raise an exception if there is no parent
throw new FileAlreadyExistsException("Not a directory: " + parent);
}
return create(
path,
permission,
flags.contains(CreateFlag.OVERWRITE),
bufferSize,
replication,
blkSize,
progress);
} | 3.68 |
framework_VAbsoluteLayout_updateStyleNames | /**
* Updates the style names using the primary style name as prefix.
*/
protected void updateStyleNames() {
setStyleName(
VAbsoluteLayout.this.getStylePrimaryName() + "-wrapper");
if (extraStyleNames != null) {
for (String stylename : extraStyleNames) {
addStyleDependentName(stylename);
}
}
} | 3.68 |
framework_ShortcutAction_setModifiers | /**
* When setting modifiers, make sure that modifiers is a valid array AND
* that it's sorted.
*
* @param modifiers
* the modifier keys for this shortcut
*/
private void setModifiers(int... modifiers) {
if (modifiers == null) {
this.modifiers = new int[0];
} else {
this.modifiers = modifiers;
}
Arrays.sort(this.modifiers);
} | 3.68 |
dubbo_ClassGenerator_toClass | /**
* @param neighbor A class belonging to the same package that this
* class belongs to. It is used to load the class.
*/
public Class<?> toClass(Class<?> neighbor) {
return toClass(neighbor, mClassLoader, getClass().getProtectionDomain());
} | 3.68 |
hadoop_HdfsFileStatus_locations | /**
* Set the block locations for this entity (default = null).
* @param locations HDFS locations
* (see {@link HdfsLocatedFileStatus#makeQualifiedLocated(URI, Path)})
* @return This Builder instance
*/
public Builder locations(LocatedBlocks locations) {
this.locations = locations;
return this;
} | 3.68 |
framework_DateCell_getOverlappingEvents | /**
* Returns all overlapping DayEvent indexes in the Group. Including the
* target.
*
* @param targetIndex
* Index of DayEvent in the current DateCell widget.
* @return Group that contains all Overlapping DayEvent indexes
*/
public DateCellGroup getOverlappingEvents(int targetIndex) {
DateCellGroup g = new DateCellGroup(targetIndex);
int count = getWidgetCount();
DateCellDayEvent target = (DateCellDayEvent) getWidget(targetIndex);
WeekGridMinuteTimeRange targetRange = new WeekGridMinuteTimeRange(
target.getCalendarEvent().getStartTime(),
target.getCalendarEvent().getEndTime());
Date groupStart = targetRange.getStart();
Date groupEnd = targetRange.getEnd();
for (int i = 0; i < count; i++) {
if (targetIndex == i) {
continue;
}
DateCellDayEvent d = (DateCellDayEvent) getWidget(i);
WeekGridMinuteTimeRange nextRange = new WeekGridMinuteTimeRange(
d.getCalendarEvent().getStartTime(),
d.getCalendarEvent().getEndTime());
if (WeekGridMinuteTimeRange.doesOverlap(targetRange, nextRange)) {
g.add(i);
// Update top & bottom values to the greatest
if (nextRange.getStart().before(targetRange.getStart())) {
groupStart = targetRange.getStart();
}
if (nextRange.getEnd().after(targetRange.getEnd())) {
groupEnd = targetRange.getEnd();
}
}
}
g.setDateRange(new WeekGridMinuteTimeRange(groupStart, groupEnd));
return g;
} | 3.68 |
hudi_TimelineDiffHelper_getPendingCompactionTransitions | /**
* Getting pending compaction transitions.
*/
private static List<Pair<HoodieInstant, HoodieInstant>> getPendingCompactionTransitions(HoodieTimeline oldTimeline,
HoodieTimeline newTimeline) {
Set<HoodieInstant> newTimelineInstants = newTimeline.getInstantsAsStream().collect(Collectors.toSet());
return oldTimeline.filterPendingCompactionTimeline().getInstantsAsStream().map(instant -> {
if (newTimelineInstants.contains(instant)) {
return Pair.of(instant, instant);
} else {
HoodieInstant compacted =
new HoodieInstant(State.COMPLETED, HoodieTimeline.COMMIT_ACTION, instant.getTimestamp());
if (newTimelineInstants.contains(compacted)) {
return Pair.of(instant, compacted);
}
HoodieInstant inflightCompacted =
new HoodieInstant(State.INFLIGHT, HoodieTimeline.COMPACTION_ACTION, instant.getTimestamp());
if (newTimelineInstants.contains(inflightCompacted)) {
return Pair.of(instant, inflightCompacted);
}
return Pair.<HoodieInstant, HoodieInstant>of(instant, null);
}
}).collect(Collectors.toList());
} | 3.68 |
querydsl_SQLExpressions_regrCount | /**
* REGR_COUNT returns an integer that is the number of non-null number pairs used to fit the regression line.
*
* @param arg1 first arg
* @param arg2 second arg
* @return regr_count(arg1, arg2)
*/
public static WindowOver<Double> regrCount(Expression<? extends Number> arg1, Expression<? extends Number> arg2) {
return new WindowOver<Double>(Double.class, SQLOps.REGR_COUNT, arg1, arg2);
} | 3.68 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.