name
stringlengths 12
178
| code_snippet
stringlengths 8
36.5k
| score
float64 3.26
3.68
|
---|---|---|
hadoop_PlacementConstraints_delayedOr | /**
* Creates a composite constraint that includes a list of timed placement
* constraints. The scheduler should try to satisfy first the first timed
* child constraint within the specified time window. If this is not possible,
* it should attempt to satisfy the second, and so on.
*
* @param children the timed children constraints
* @return the resulting composite constraint
*/
public static DelayedOr delayedOr(TimedPlacementConstraint... children) {
return new DelayedOr(children);
} | 3.68 |
hadoop_WasbRemoteCallHelper_makeRemoteRequest | /**
* Helper method to make remote HTTP Get request.
*
* @param urls - Service urls to be used, if one fails try another.
* @param path - URL endpoint for the resource.
* @param queryParams - list of query parameters
* @param httpMethod - http Method to be used.
* @return Http Response body returned as a string. The caller
* is expected to semantically understand the response.
* @throws IOException when there an error in executing the remote http request.
*/
public String makeRemoteRequest(String[] urls, String path,
List<NameValuePair> queryParams, String httpMethod) throws IOException {
return retryableRequest(urls, path, queryParams, httpMethod);
} | 3.68 |
hudi_HoodieBackedTableMetadataWriter_updateFromWriteStatuses | /**
* Update from {@code HoodieCommitMetadata}.
*
* @param commitMetadata {@code HoodieCommitMetadata}
* @param instantTime Timestamp at which the commit was performed
*/
@Override
public void updateFromWriteStatuses(HoodieCommitMetadata commitMetadata, HoodieData<WriteStatus> writeStatus, String instantTime) {
processAndCommit(instantTime, () -> {
Map<MetadataPartitionType, HoodieData<HoodieRecord>> partitionToRecordMap =
HoodieTableMetadataUtil.convertMetadataToRecords(engineContext, commitMetadata, instantTime, getRecordsGenerationParams());
// Updates for record index are created by parsing the WriteStatus which is a hudi-client object. Hence, we cannot yet move this code
// to the HoodieTableMetadataUtil class in hudi-common.
HoodieData<HoodieRecord> updatesFromWriteStatuses = getRecordIndexUpdates(writeStatus);
HoodieData<HoodieRecord> additionalUpdates = getRecordIndexAdditionalUpdates(updatesFromWriteStatuses, commitMetadata);
partitionToRecordMap.put(RECORD_INDEX, updatesFromWriteStatuses.union(additionalUpdates));
updateFunctionalIndexIfPresent(commitMetadata, instantTime, partitionToRecordMap);
return partitionToRecordMap;
});
closeInternal();
} | 3.68 |
hbase_RemoteProcedureException_unwrapRemoteIOException | // NOTE: Does not throw DoNotRetryIOE because it does not
// have access (DNRIOE is in the client module). Use
// MasterProcedureUtil.unwrapRemoteIOException if need to
// throw DNRIOE.
public IOException unwrapRemoteIOException() {
final Exception cause = unwrapRemoteException();
if (cause instanceof IOException) {
return (IOException) cause;
}
return new IOException(cause);
} | 3.68 |
pulsar_ManagedLedgerConfig_getMinimumBacklogEntriesForCaching | /**
* Minimum backlog should exist to leverage caching for backlog reads.
*
* @return
*/
public int getMinimumBacklogEntriesForCaching() {
return minimumBacklogEntriesForCaching;
} | 3.68 |
flink_JobVertex_getPreferredResources | /**
* Gets the preferred resource for the task.
*
* @return The preferred resource for the task.
*/
public ResourceSpec getPreferredResources() {
return preferredResources;
} | 3.68 |
framework_MouseEvents_isShiftKey | /**
* Checks if the Shift key was down when the mouse event took place.
*
* @return true if Shift was pressed when the event occurred, false
* otherwise
*/
public boolean isShiftKey() {
return details.isShiftKey();
} | 3.68 |
hbase_IPCUtil_write | /**
* Write out header, param, and cell block if there is one.
* @param dos Stream to write into
* @param header to write
* @param param to write
* @param cellBlock to write
* @return Total number of bytes written.
* @throws IOException if write action fails
*/
public static int write(final OutputStream dos, final Message header, final Message param,
final ByteBuf cellBlock) throws IOException {
// Must calculate total size and write that first so other side can read it all in in one
// swoop. This is dictated by how the server is currently written. Server needs to change
// if we are to be able to write without the length prefixing.
int totalSize = IPCUtil.getTotalSizeWhenWrittenDelimited(header, param);
if (cellBlock != null) {
totalSize += cellBlock.readableBytes();
}
return write(dos, header, param, cellBlock, totalSize);
} | 3.68 |
hbase_ByteBufferUtils_skip | /**
* Increment position in buffer.
* @param buffer In this buffer.
* @param length By that many bytes.
*/
public static void skip(ByteBuffer buffer, int length) {
buffer.position(buffer.position() + length);
} | 3.68 |
flink_PartitionTable_startTrackingPartitions | /** Starts the tracking of the given partition for the given key. */
public void startTrackingPartitions(K key, Collection<ResultPartitionID> newPartitionIds) {
Preconditions.checkNotNull(key);
Preconditions.checkNotNull(newPartitionIds);
if (newPartitionIds.isEmpty()) {
return;
}
trackedPartitionsPerKey.compute(
key,
(ignored, partitionIds) -> {
if (partitionIds == null) {
partitionIds = CollectionUtil.newHashSetWithExpectedSize(8);
}
partitionIds.addAll(newPartitionIds);
return partitionIds;
});
} | 3.68 |
hudi_HoodieTableMetadataUtil_collectColumnRangeMetadata | /**
* Collects {@link HoodieColumnRangeMetadata} for the provided collection of records, pretending
* as if provided records have been persisted w/in given {@code filePath}
*
* @param records target records to compute column range metadata for
* @param targetFields columns (fields) to be collected
* @param filePath file path value required for {@link HoodieColumnRangeMetadata}
*
* @return map of {@link HoodieColumnRangeMetadata} for each of the provided target fields for
* the collection of provided records
*/
public static Map<String, HoodieColumnRangeMetadata<Comparable>> collectColumnRangeMetadata(
List<HoodieRecord> records, List<Schema.Field> targetFields, String filePath, Schema recordSchema) {
// Helper class to calculate column stats
class ColumnStats {
Object minValue;
Object maxValue;
long nullCount;
long valueCount;
}
HashMap<String, ColumnStats> allColumnStats = new HashMap<>();
// Collect stats for all columns by iterating through records while accounting
// corresponding stats
records.forEach((record) -> {
// For each column (field) we have to index update corresponding column stats
// with the values from this record
targetFields.forEach(field -> {
ColumnStats colStats = allColumnStats.computeIfAbsent(field.name(), (ignored) -> new ColumnStats());
Schema fieldSchema = getNestedFieldSchemaFromWriteSchema(recordSchema, field.name());
Object fieldValue;
if (record.getRecordType() == HoodieRecordType.AVRO) {
fieldValue = HoodieAvroUtils.getRecordColumnValues(record, new String[]{field.name()}, recordSchema, false)[0];
} else if (record.getRecordType() == HoodieRecordType.SPARK) {
fieldValue = record.getColumnValues(recordSchema, new String[]{field.name()}, false)[0];
} else {
throw new HoodieException(String.format("Unknown record type: %s", record.getRecordType()));
}
colStats.valueCount++;
if (fieldValue != null && canCompare(fieldSchema)) {
// Set the min value of the field
if (colStats.minValue == null
|| ConvertingGenericData.INSTANCE.compare(fieldValue, colStats.minValue, fieldSchema) < 0) {
colStats.minValue = fieldValue;
}
// Set the max value of the field
if (colStats.maxValue == null || ConvertingGenericData.INSTANCE.compare(fieldValue, colStats.maxValue, fieldSchema) > 0) {
colStats.maxValue = fieldValue;
}
} else {
colStats.nullCount++;
}
});
});
Collector<HoodieColumnRangeMetadata<Comparable>, ?, Map<String, HoodieColumnRangeMetadata<Comparable>>> collector =
Collectors.toMap(colRangeMetadata -> colRangeMetadata.getColumnName(), Function.identity());
return (Map<String, HoodieColumnRangeMetadata<Comparable>>) targetFields.stream()
.map(field -> {
ColumnStats colStats = allColumnStats.get(field.name());
return HoodieColumnRangeMetadata.<Comparable>create(
filePath,
field.name(),
colStats == null ? null : coerceToComparable(field.schema(), colStats.minValue),
colStats == null ? null : coerceToComparable(field.schema(), colStats.maxValue),
colStats == null ? 0 : colStats.nullCount,
colStats == null ? 0 : colStats.valueCount,
// NOTE: Size and compressed size statistics are set to 0 to make sure we're not
// mixing up those provided by Parquet with the ones from other encodings,
// since those are not directly comparable
0,
0
);
})
.collect(collector);
} | 3.68 |
streampipes_AbstractConfigurablePipelineElementBuilder_requiredTextParameterWithLink | /**
* Defines a text-based configuration parameter provided by pipeline developers at pipeline authoring time. The
* value range of the parameter is restricted to the value specification of a selected input event property.
*
* @param label The {@link org.apache.streampipes.sdk.helpers.Label}
* that describes why this parameter is needed in a
* user-friendly manner.
* @param linkedMappingPropertyInternalName The inernalId of the
* {@link org.apache.streampipes.model.staticproperty.MappingProperty}
* @return this
*/
public K requiredTextParameterWithLink(Label label, String
linkedMappingPropertyInternalName) {
FreeTextStaticProperty fsp = prepareFreeTextStaticProperty(label, XSD.STRING.toString());
fsp.setMapsTo(linkedMappingPropertyInternalName);
this.staticProperties.add(fsp);
return me();
} | 3.68 |
hbase_HFileReaderImpl_checkKeyValueLen | /**
* Check key and value lengths are wholesome.
*/
protected final void checkKeyValueLen() {
if (checkKeyLen(this.currKeyLen) || checkLen(this.currValueLen)) {
throw new IllegalStateException("Invalid currKeyLen " + this.currKeyLen
+ " or currValueLen " + this.currValueLen + ". Block offset: " + this.curBlock.getOffset()
+ ", block length: " + this.blockBuffer.limit() + ", position: "
+ this.blockBuffer.position() + " (without header)." + ", path=" + reader.getPath());
}
} | 3.68 |
hbase_PrivateCellUtil_createFirstOnRow | /**
* Create a Cell that is smaller than all other possible Cells for the given Cell's row.
* @return First possible Cell on passed Cell's row.
*/
public static Cell createFirstOnRow(final Cell cell) {
if (cell instanceof ByteBufferExtendedCell) {
return new FirstOnRowByteBufferExtendedCell(
((ByteBufferExtendedCell) cell).getRowByteBuffer(),
((ByteBufferExtendedCell) cell).getRowPosition(), cell.getRowLength());
}
return new FirstOnRowCell(cell.getRowArray(), cell.getRowOffset(), cell.getRowLength());
} | 3.68 |
hadoop_Chunk_readLength | /**
* Reading the length of next chunk.
*
* @throws java.io.IOException
* when no more data is available.
*/
private void readLength() throws IOException {
remain = Utils.readVInt(in);
if (remain >= 0) {
lastChunk = true;
} else {
remain = -remain;
}
} | 3.68 |
hadoop_SaveSuccessFileStage_executeStage | /**
* Execute.
* @param successData success data to save
* @return path saved to.
* @throws IOException failure
*/
@Override
protected Path executeStage(final ManifestSuccessData successData)
throws IOException {
// Save the marker
Path successFile = getStageConfig().getJobSuccessMarkerPath();
Path successTempFile = new Path(getJobAttemptDir(), SUCCESS_MARKER + TMP_SUFFIX);
LOG.debug("{}: Saving _SUCCESS file to {} via {}", successFile,
getName(),
successTempFile);
save(successData, successTempFile, successFile);
return successFile;
} | 3.68 |
hbase_HFileBlock_getMetaData | /**
* For use by bucketcache. This exposes internals.
*/
public ByteBuffer getMetaData(ByteBuffer bb) {
bb = addMetaData(bb, true);
bb.flip();
return bb;
} | 3.68 |
hadoop_S3ADtFetcher_addDelegationTokens | /**
* Returns Token object via FileSystem, null if bad argument.
* @param conf - a Configuration object used with FileSystem.get()
* @param creds - a Credentials object to which token(s) will be added
* @param renewer - the renewer to send with the token request
* @param url - the URL to which the request is sent
* @return a Token, or null if fetch fails.
*/
public Token<?> addDelegationTokens(Configuration conf,
Credentials creds,
String renewer,
String url) throws Exception {
if (!url.startsWith(getServiceName().toString())) {
url = getServiceName().toString() + "://" + url;
}
FileSystem fs = FileSystem.get(URI.create(url), conf);
Token<?> token = fs.getDelegationToken(renewer);
if (token == null) {
throw new DelegationTokenIOException(FETCH_FAILED + ": " + url);
}
creds.addToken(token.getService(), token);
return token;
} | 3.68 |
flink_CastRuleProvider_cast | /**
* Create a {@link CastExecutor} and execute the cast on the provided {@code value}. Fails with
* {@link IllegalArgumentException} if the rule cannot be resolved, or with an exception from
* the {@link CastExecutor} itself if the rule can fail.
*/
@SuppressWarnings("unchecked")
public static @Nullable Object cast(
CastRule.Context context,
LogicalType inputLogicalType,
LogicalType targetLogicalType,
Object value) {
CastExecutor<Object, Object> castExecutor =
(CastExecutor<Object, Object>)
CastRuleProvider.create(context, inputLogicalType, targetLogicalType);
if (castExecutor == null) {
throw new NullPointerException(
"Unsupported casting from " + inputLogicalType + " to " + targetLogicalType);
}
return castExecutor.cast(value);
} | 3.68 |
flink_EnvironmentInformation_getJvmVersion | /**
* Gets the version of the JVM in the form "VM_Name - Vendor - Spec/Version".
*
* @return The JVM version.
*/
public static String getJvmVersion() {
try {
final RuntimeMXBean bean = ManagementFactory.getRuntimeMXBean();
return bean.getVmName()
+ " - "
+ bean.getVmVendor()
+ " - "
+ bean.getSpecVersion()
+ '/'
+ bean.getVmVersion();
} catch (Throwable t) {
return UNKNOWN;
}
} | 3.68 |
framework_CssLayout_iterator | /**
* Gets the component container iterator for going trough all the components
* in the container.
*
* @return the Iterator of the components inside the container.
*/
@Override
public Iterator<Component> iterator() {
return Collections.unmodifiableCollection(components).iterator();
} | 3.68 |
hudi_SyncUtilHelpers_runHoodieMetaSync | /**
* Create an instance of an implementation of {@link HoodieSyncTool} that will sync all the relevant meta information
* with an external metastore such as Hive etc. to ensure Hoodie tables can be queried or read via external systems.
*
* @param syncToolClassName Class name of the {@link HoodieSyncTool} implementation.
* @param props property map.
* @param hadoopConfig Hadoop confs.
* @param fs Filesystem used.
* @param targetBasePath The target base path that contains the hoodie table.
* @param baseFileFormat The file format used by the hoodie table (defaults to PARQUET).
*/
public static void runHoodieMetaSync(String syncToolClassName,
TypedProperties props,
Configuration hadoopConfig,
FileSystem fs,
String targetBasePath,
String baseFileFormat) {
if (targetBasePath == null) {
throw new IllegalArgumentException("Target base path must not be null");
}
// Get or create a lock for the specific table
Lock tableLock = TABLE_LOCKS.computeIfAbsent(targetBasePath, k -> new ReentrantLock());
tableLock.lock();
try {
try (HoodieSyncTool syncTool = instantiateMetaSyncTool(syncToolClassName, props, hadoopConfig, fs, targetBasePath, baseFileFormat)) {
syncTool.syncHoodieTable();
} catch (Throwable e) {
throw new HoodieMetaSyncException("Could not sync using the meta sync class " + syncToolClassName, e);
}
} finally {
tableLock.unlock();
}
} | 3.68 |
open-banking-gateway_BaseDatasafeDbStorageService_remove | /**
* Delete object within Datasafe storage.
* @param absoluteLocation Absolute path of the object to remove. I.e. {@code db://storage/deadbeef}
*/
@Override
@Transactional
public void remove(AbsoluteLocation absoluteLocation) {
handlers.get(deduceTable(absoluteLocation)).delete(deduceId(absoluteLocation));
} | 3.68 |
hibernate-validator_NotEmptyValidatorForArraysOfChar_isValid | /**
* Checks the array is not {@code null} and not empty.
*
* @param array the array to validate
* @param constraintValidatorContext context in which the constraint is evaluated
* @return returns {@code true} if the array is not {@code null} and the array is not empty
*/
@Override
public boolean isValid(char[] array, ConstraintValidatorContext constraintValidatorContext) {
if ( array == null ) {
return false;
}
return array.length > 0;
} | 3.68 |
flink_DefaultJobGraphStore_verifyIsRunning | /** Verifies that the state is running. */
private void verifyIsRunning() {
checkState(running, "Not running. Forgot to call start()?");
} | 3.68 |
hudi_CollectionUtils_toStream | /**
* Collects provided {@link Iterator} to a {@link Stream}
*/
public static <T> Stream<T> toStream(Iterator<T> iterator) {
return StreamSupport.stream(
Spliterators.spliteratorUnknownSize(iterator, Spliterator.ORDERED),
false
);
} | 3.68 |
flink_OptimizerNode_addBroadcastConnection | /**
* Adds the broadcast connection identified by the given {@code name} to this node.
*
* @param broadcastConnection The connection to add.
*/
public void addBroadcastConnection(String name, DagConnection broadcastConnection) {
this.broadcastConnectionNames.add(name);
this.broadcastConnections.add(broadcastConnection);
} | 3.68 |
pulsar_GracefulExecutorServicesShutdown_handle | /**
* Starts the handler for polling frequently for the completed termination of enlisted executors.
*
* If the termination times out or the future is cancelled, all active executors will be forcefully
* terminated by calling {@link ExecutorService#shutdownNow()}.
*
* @return a future which completes when all executors have terminated
*/
public CompletableFuture<Void> handle() {
// if termination timeout isn't provided, calculate a termination timeout based on the shutdown timeout
if (terminationTimeout == null) {
terminationTimeout = Duration.ofNanos((long) (timeout.toNanos() * DEFAULT_TERMINATION_TIMEOUT_RATIO));
}
return new GracefulExecutorServicesTerminationHandler(timeout, terminationTimeout,
executorServices).getFuture();
} | 3.68 |
hadoop_Cluster_getLogParams | /**
* Get log parameters for the specified jobID or taskAttemptID
* @param jobID the job id.
* @param taskAttemptID the task attempt id. Optional.
* @return the LogParams
* @throws IOException
* @throws InterruptedException
*/
public LogParams getLogParams(JobID jobID, TaskAttemptID taskAttemptID)
throws IOException, InterruptedException {
return client.getLogFileParams(jobID, taskAttemptID);
} | 3.68 |
morf_SqlScriptExecutorProvider_beforeExecute | /**
* @see org.alfasoftware.morf.jdbc.SqlScriptExecutor.SqlScriptVisitor#beforeExecute(java.lang.String)
*/
@Override
public void beforeExecute(String sql) {
// Defaults to no-op
} | 3.68 |
hbase_MetricsSink_setAgeOfLastAppliedOp | /**
* Set the age of the last applied operation
* @param timestamp The timestamp of the last operation applied.
* @return the age that was set
*/
public long setAgeOfLastAppliedOp(long timestamp) {
long age = 0;
if (lastTimestampForAge != timestamp) {
lastTimestampForAge = timestamp;
age = EnvironmentEdgeManager.currentTime() - lastTimestampForAge;
}
mss.setLastAppliedOpAge(age);
return age;
} | 3.68 |
hadoop_FileIoProvider_getFileInputStream | /**
* Create a FileInputStream using
* {@link FileInputStream#FileInputStream(File)}.
*
* Wraps the created input stream to intercept read calls
* before delegating to the wrapped stream.
*
* @param volume target volume. null if unavailable.
* @param f File object.
* @return FileInputStream to the given file.
* @throws FileNotFoundException
*/
public FileInputStream getFileInputStream(
@Nullable FsVolumeSpi volume, File f) throws FileNotFoundException {
final long begin = profilingEventHook.beforeMetadataOp(volume, OPEN);
FileInputStream fis = null;
try {
faultInjectorEventHook.beforeMetadataOp(volume, OPEN);
fis = new WrappedFileInputStream(volume, f);
profilingEventHook.afterMetadataOp(volume, OPEN, begin);
return fis;
} catch(Exception e) {
IOUtils.closeStream(fis);
onFailure(volume, begin);
throw e;
}
} | 3.68 |
hadoop_OBSWriteOperationHelper_newObjectMetadata | /**
* Create a new object metadata instance. Any standard metadata headers are
* added here, for example: encryption.
*
* @param length size, if known. Use -1 for not known
* @return a new metadata instance
*/
public ObjectMetadata newObjectMetadata(final long length) {
return OBSObjectBucketUtils.newObjectMetadata(length);
} | 3.68 |
pulsar_PulsarShell_computeDefaultPulsarShellRootDirectory | /**
* Compute the default Pulsar shell root directory.
* If system property "user.home" returns invalid value, the default value will be the current directory.
* @return
*/
private static String computeDefaultPulsarShellRootDirectory() {
final String userHome = System.getProperty("user.home");
if (!StringUtils.isBlank(userHome) && !"?".equals(userHome)) {
return userHome;
}
return System.getProperty("user.dir");
} | 3.68 |
hadoop_TimelineEvents_addEvent | /**
* Add a single event to the existing event list
*
* @param event
* a single event
*/
public void addEvent(TimelineEvent event) {
events.add(event);
} | 3.68 |
hbase_HBaseTestingUtility_getMetaTableRows | /**
* Returns all rows from the hbase:meta table for a given user table
* @throws IOException When reading the rows fails.
*/
public List<byte[]> getMetaTableRows(TableName tableName) throws IOException {
// TODO: Redo using MetaTableAccessor.
Table t = getConnection().getTable(TableName.META_TABLE_NAME);
List<byte[]> rows = new ArrayList<>();
ResultScanner s = t.getScanner(new Scan());
for (Result result : s) {
RegionInfo info = CatalogFamilyFormat.getRegionInfo(result);
if (info == null) {
LOG.error("No region info for row " + Bytes.toString(result.getRow()));
// TODO figure out what to do for this new hosed case.
continue;
}
if (info.getTable().equals(tableName)) {
LOG.info("getMetaTableRows: row -> " + Bytes.toStringBinary(result.getRow()) + info);
rows.add(result.getRow());
}
}
s.close();
t.close();
return rows;
} | 3.68 |
graphhopper_DistanceCalcEuclidean_calcNormalizedDist | /**
* Calculates in normalized meter
*/
@Override
public double calcNormalizedDist(double fromY, double fromX, double toY, double toX) {
double dX = fromX - toX;
double dY = fromY - toY;
return dX * dX + dY * dY;
} | 3.68 |
hadoop_ReplicaInfo_getVolume | /**
* @return the volume where this replica is located on disk
*/
public FsVolumeSpi getVolume() {
return volume;
} | 3.68 |
hbase_TableRecordReaderImpl_setEndRow | /**
* @param endRow the last row in the split
*/
public void setEndRow(final byte[] endRow) {
this.endRow = endRow;
} | 3.68 |
morf_UpdateStatement_deepCopy | /**
* @see org.alfasoftware.morf.util.DeepCopyableWithTransformation#deepCopy(org.alfasoftware.morf.util.DeepCopyTransformation)
*/
@Override
public UpdateStatementBuilder deepCopy(DeepCopyTransformation transformer) {
return new UpdateStatementBuilder(this, transformer);
} | 3.68 |
morf_SqlDialect_appendWhere | /**
* appends where clause to the result
*
* @param result where clause will be appended here
* @param stmt statement with where clause
* @param <T> The type of AbstractSelectStatement
*/
protected <T extends AbstractSelectStatement<T>> void appendWhere(StringBuilder result, AbstractSelectStatement<T> stmt) {
if (stmt.getWhereCriterion() != null) {
result.append(" WHERE ");
result.append(getSqlFrom(stmt.getWhereCriterion()));
}
} | 3.68 |
flink_Tuple5_equals | /**
* Deep equality for tuples by calling equals() on the tuple members.
*
* @param o the object checked for equality
* @return true if this is equal to o.
*/
@Override
public boolean equals(Object o) {
if (this == o) {
return true;
}
if (!(o instanceof Tuple5)) {
return false;
}
@SuppressWarnings("rawtypes")
Tuple5 tuple = (Tuple5) o;
if (f0 != null ? !f0.equals(tuple.f0) : tuple.f0 != null) {
return false;
}
if (f1 != null ? !f1.equals(tuple.f1) : tuple.f1 != null) {
return false;
}
if (f2 != null ? !f2.equals(tuple.f2) : tuple.f2 != null) {
return false;
}
if (f3 != null ? !f3.equals(tuple.f3) : tuple.f3 != null) {
return false;
}
if (f4 != null ? !f4.equals(tuple.f4) : tuple.f4 != null) {
return false;
}
return true;
} | 3.68 |
hbase_HFileBlock_isSharedMem | /**
* Will be override by {@link SharedMemHFileBlock} or {@link ExclusiveMemHFileBlock}. Return true
* by default.
*/
public boolean isSharedMem() {
return true;
} | 3.68 |
morf_DatabaseMetaDataProvider_getView | /**
* @see org.alfasoftware.morf.metadata.Schema#getView(java.lang.String)
*/
@Override
public View getView(String viewName) {
return viewCache.getUnchecked(named(viewName));
} | 3.68 |
hbase_MetricsSink_getAgeOfLastAppliedOp | /**
* Get the Age of Last Applied Op
*/
public long getAgeOfLastAppliedOp() {
return mss.getLastAppliedOpAge();
} | 3.68 |
framework_Page_getPendingDependencies | /**
* Returns all pending dependencies.
* <p>
* For internal use only, calling this method will clear the pending
* dependencies.
*
* @return the pending dependencies to the current page
* @since 8.1
*/
public Collection<Dependency> getPendingDependencies() {
List<Dependency> copy = new ArrayList<>();
if (pendingDependencies != null) {
copy.addAll(pendingDependencies);
}
pendingDependencies = null;
return copy;
} | 3.68 |
flink_RocksDBNativeMetricOptions_enableBackgroundErrors | /** Returns accumulated number of background errors. */
public void enableBackgroundErrors() {
this.properties.add(RocksDBProperty.BackgroundErrors.getRocksDBProperty());
} | 3.68 |
hbase_InclusiveStopFilter_parseFrom | /**
* Parse a serialized representation of {@link InclusiveStopFilter}
* @param pbBytes A pb serialized {@link InclusiveStopFilter} instance
* @return An instance of {@link InclusiveStopFilter} made from <code>bytes</code>
* @throws DeserializationException if an error occurred
* @see #toByteArray
*/
public static InclusiveStopFilter parseFrom(final byte[] pbBytes)
throws DeserializationException {
FilterProtos.InclusiveStopFilter proto;
try {
proto = FilterProtos.InclusiveStopFilter.parseFrom(pbBytes);
} catch (InvalidProtocolBufferException e) {
throw new DeserializationException(e);
}
return new InclusiveStopFilter(
proto.hasStopRowKey() ? proto.getStopRowKey().toByteArray() : null);
} | 3.68 |
framework_FocusableScrollPanel_setScrollPosition | /**
* Sets the vertical scroll position.
*
* @param position
* the new vertical scroll position, in pixels
*/
public void setScrollPosition(int position) {
if (BrowserInfo.get().isAndroidWithBrokenScrollTop()
&& BrowserInfo.get().requiresTouchScrollDelegate()) {
List<com.google.gwt.dom.client.Element> elements = TouchScrollDelegate
.getElements(getElement());
for (com.google.gwt.dom.client.Element el : elements) {
final Style style = el.getStyle();
style.setProperty("webkitTransform",
"translate3d(0px," + -position + "px,0px)");
}
getElement().setPropertyInt("_vScrollTop", position);
} else {
getElement().setScrollTop(position);
}
} | 3.68 |
querydsl_SQLExpressions_date | /**
* Convert timestamp to date
*
* @param type type
* @param dateTime timestamp
* @return date
*/
public static <D extends Comparable> DateExpression<D> date(Class<D> type, DateTimeExpression<?> dateTime) {
return Expressions.dateOperation(type, Ops.DateTimeOps.DATE, dateTime);
} | 3.68 |
hbase_QuotaSettingsFactory_unthrottleRegionServerByThrottleType | /**
* Remove the throttling for the specified region server by throttle type.
* @param regionServer the region Server
* @param type the type of throttling
* @return the quota settings
*/
public static QuotaSettings unthrottleRegionServerByThrottleType(final String regionServer,
final ThrottleType type) {
return throttle(null, null, null, regionServer, type, 0, null, QuotaScope.MACHINE);
} | 3.68 |
hibernate-validator_AnnotationApiHelper_getMirrorForType | /**
* Returns a TypeMirror for the given class.
*
* @param clazz The class of interest.
*
* @return A TypeMirror for the given class.
*/
public TypeMirror getMirrorForType(Class<?> clazz) {
if ( clazz.isArray() ) {
return typeUtils.getArrayType( getMirrorForNonArrayType( clazz.getComponentType() ) );
}
else {
return getMirrorForNonArrayType( clazz );
}
} | 3.68 |
hadoop_OBSCommonUtils_stringify | /**
* String information about a summary entry for debug messages.
*
* @param summary summary object
* @return string value
*/
static String stringify(final ObsObject summary) {
return summary.getObjectKey() + " size=" + summary.getMetadata()
.getContentLength();
} | 3.68 |
pulsar_Dispatcher_trackDelayedDelivery | /**
* Check with dispatcher if the message should be added to the delayed delivery tracker.
* Return true if the message should be delayed and ignored at this point.
*/
default boolean trackDelayedDelivery(long ledgerId, long entryId, MessageMetadata msgMetadata) {
return false;
} | 3.68 |
flink_JsonPlanEdge_fromExecEdge | /** Build {@link JsonPlanEdge} from an {@link ExecEdge}. */
static JsonPlanEdge fromExecEdge(ExecEdge execEdge) {
return new JsonPlanEdge(
execEdge.getSource().getId(),
execEdge.getTarget().getId(),
execEdge.getShuffle(),
execEdge.getExchangeMode());
} | 3.68 |
hadoop_WriteOperationHelper_writeSuccessful | /**
* Callback on a successful write.
* @param length length of the write
*/
public void writeSuccessful(long length) {
} | 3.68 |
hadoop_Sets_union | /**
* Returns the union of two sets as an unmodifiable set.
* The returned set contains all elements that are contained in either
* backing set.
*
* <p>Results are undefined if {@code set1} and {@code set2} are sets
* based on different equivalence relations (as {@link HashSet},
* {@link TreeSet}, and the {@link Map#keySet} of an
* {@code IdentityHashMap} all are).
*
* @param set1 set1.
* @param set2 set2.
* @param <E> Generics Type E.
* @return a new, empty thread-safe {@code Set}.
*/
public static <E> Set<E> union(
final Set<E> set1, final Set<E> set2) {
if (set1 == null) {
throw new NullPointerException("set1");
}
if (set2 == null) {
throw new NullPointerException("set2");
}
Set<E> newSet = new HashSet<>(set1);
newSet.addAll(set2);
return Collections.unmodifiableSet(newSet);
} | 3.68 |
flink_SchedulerBase_getCombinedExecutionTerminationFuture | /**
* Returns a {@code CompletableFuture} collecting the termination states of all {@link Execution
* Executions} of the underlying {@link ExecutionGraph}.
*
* @return a {@code CompletableFuture} that completes after all underlying {@code Executions}
* have been terminated.
*/
private CompletableFuture<Collection<ExecutionState>> getCombinedExecutionTerminationFuture() {
return FutureUtils.combineAll(
StreamSupport.stream(executionGraph.getAllExecutionVertices().spliterator(), false)
.map(ExecutionVertex::getCurrentExecutionAttempt)
.map(Execution::getTerminalStateFuture)
.collect(Collectors.toList()));
} | 3.68 |
querydsl_AntMetaDataExporter_setCustomTypes | /**
* Sets a list of custom types
* @param strings a list of custom types
* @deprecated Use addCustomType instead
*/
public void setCustomTypes(String[] strings) {
this.customTypes.clear();
for (String string : strings) {
CustomType customType = new CustomType();
customType.setClassName(string);
this.customTypes.add(customType);
}
} | 3.68 |
hbase_ExploringCompactionPolicy_selectCompactFiles | /**
* Select at least one file in the candidates list to compact, through choosing files from the
* head to the index that the accumulation length larger the max compaction size. This method is a
* supplementary of the selectSimpleCompaction() method, aims to make sure at least one file can
* be selected to compact, for compactions like L0 files, which need to compact all files and as
* soon as possible.
*/
public List<HStoreFile> selectCompactFiles(final List<HStoreFile> candidates, int maxFiles,
boolean isOffpeak) {
long selectedSize = 0L;
for (int end = 0; end < Math.min(candidates.size(), maxFiles); end++) {
selectedSize += candidates.get(end).getReader().length();
if (selectedSize >= comConf.getMaxCompactSize(isOffpeak)) {
return candidates.subList(0, end + 1);
}
}
return candidates;
} | 3.68 |
streampipes_EventSchemaUtils_getTimestampProperty | /**
* Returns the timestamp property of an event schema as an {@code Optional}.
*
* <p> The method checks all properties if they are of type {@code EventPropertyPrimitive} and if their domain
* properties contains the uri http://schema.org/DateTime </p>
*
* @param eventSchema the event schema for which the timestamp property is to be returned
* @return an {@code Optional} containing the timestamp property, or an empty {@code Optional} if
* no such property was found
*/
public static Optional<EventPropertyPrimitive> getTimestampProperty(EventSchema eventSchema) {
return getTimstampProperty(eventSchema.getEventProperties());
} | 3.68 |
hbase_LruBlockCache_getBlock | /**
* Get the buffer of the block with the specified name.
* @param cacheKey block's cache key
* @param caching true if the caller caches blocks on cache misses
* @param repeat Whether this is a repeat lookup for the same block (used to avoid
* double counting cache misses when doing double-check locking)
* @param updateCacheMetrics Whether to update cache metrics or not
* @return buffer of specified cache key, or null if not in cache
*/
@Override
public Cacheable getBlock(BlockCacheKey cacheKey, boolean caching, boolean repeat,
boolean updateCacheMetrics) {
// Note: 'map' must be a ConcurrentHashMap or the supplier may be invoked more than once.
LruCachedBlock cb = map.computeIfPresent(cacheKey, (key, val) -> {
// It will be referenced by RPC path, so increase here. NOTICE: Must do the retain inside
// this block. because if retain outside the map#computeIfPresent, the evictBlock may remove
// the block and release, then we're retaining a block with refCnt=0 which is disallowed.
// see HBASE-22422.
val.getBuffer().retain();
return val;
});
if (cb == null) {
if (!repeat && updateCacheMetrics) {
stats.miss(caching, cacheKey.isPrimary(), cacheKey.getBlockType());
}
// If there is another block cache then try and read there.
// However if this is a retry ( second time in double checked locking )
// And it's already a miss then the l2 will also be a miss.
if (victimHandler != null && !repeat) {
// The handler will increase result's refCnt for RPC, so need no extra retain.
Cacheable result = victimHandler.getBlock(cacheKey, caching, repeat, updateCacheMetrics);
// Promote this to L1.
if (result != null) {
if (caching) {
cacheBlock(cacheKey, result, /* inMemory = */ false);
}
}
return result;
}
return null;
}
if (updateCacheMetrics) {
stats.hit(caching, cacheKey.isPrimary(), cacheKey.getBlockType());
}
cb.access(count.incrementAndGet());
return cb.getBuffer();
} | 3.68 |
framework_WebBrowser_getAddress | /**
* Gets the IP-address of the web browser. If the application is running
* inside a portlet, this method will return null.
*
* @return IP-address in 1.12.123.123 -format
*/
public String getAddress() {
return address;
} | 3.68 |
flink_FileInputFormat_getSplitStart | /**
* Gets the start of the current split.
*
* @return The start of the split.
*/
public long getSplitStart() {
return splitStart;
} | 3.68 |
hbase_KeyLocker_acquireLocks | /**
* Acquire locks for a set of keys. The keys will be sorted internally to avoid possible deadlock.
* @throws ClassCastException if the given {@code keys} contains elements that are not mutually
* comparable
*/
public Map<K, Lock> acquireLocks(Set<? extends K> keys) {
Object[] keyArray = keys.toArray();
Arrays.sort(keyArray);
lockPool.purge();
Map<K, Lock> locks = new LinkedHashMap<>(keyArray.length);
for (Object o : keyArray) {
@SuppressWarnings("unchecked")
K key = (K) o;
ReentrantLock lock = lockPool.get(key);
locks.put(key, lock);
}
for (Lock lock : locks.values()) {
lock.lock();
}
return locks;
} | 3.68 |
flink_ModifyKindSet_minus | /**
* Returns a new set of ModifyKind which is this set minus the other set, i.e. {@code this.kinds
* - that.kinds}. For example: [I,U,D] minus [I] = [U,D] [I,U] minus [U,D] = [I] [I,U,D] minus
* [I,U,D] = []
*/
public ModifyKindSet minus(ModifyKindSet other) {
Set<ModifyKind> result = EnumSet.noneOf(ModifyKind.class);
result.addAll(this.kinds);
result.removeAll(other.kinds);
return new ModifyKindSet(result);
} | 3.68 |
hbase_HBaseSaslRpcClient_readNextRpcPacket | // unwrap messages with Crypto AES
private void readNextRpcPacket() throws IOException {
LOG.debug("reading next wrapped RPC packet");
DataInputStream dis = new DataInputStream(in);
int rpcLen = dis.readInt();
byte[] rpcBuf = new byte[rpcLen];
dis.readFully(rpcBuf);
// unwrap with Crypto AES
rpcBuf = cryptoAES.unwrap(rpcBuf, 0, rpcBuf.length);
if (LOG.isDebugEnabled()) {
LOG.debug("unwrapping token of length:" + rpcBuf.length);
}
unwrappedRpcBuffer = ByteBuffer.wrap(rpcBuf);
} | 3.68 |
framework_BindingValidationStatus_createUnresolvedStatus | /**
* Convenience method for creating a {@link Status#UNRESOLVED} validation
* status for the given binding.
*
* @param source
* the source binding
* @return unresolved validation status
* @param <TARGET>
* the target data type of the binding for which the validation
* status was reset
*/
public static <TARGET> BindingValidationStatus<TARGET> createUnresolvedStatus(
Binding<?, TARGET> source) {
return new BindingValidationStatus<TARGET>(null, source);
} | 3.68 |
framework_Range_startsBefore | /**
* Checks whether this range starts before the start of another range.
*
* @param other
* the other range to compare against
* @return <code>true</code> if this range starts before the
* <code>other</code>
*/
public boolean startsBefore(final Range other) {
return getStart() < other.getStart();
} | 3.68 |
hadoop_S3ADtFetcher_getServiceName | /**
* Returns the service name for HDFS, which is also a valid URL prefix.
*/
public Text getServiceName() {
return new Text(SERVICE_NAME);
} | 3.68 |
graphhopper_PbfDecoder_signalUpdate | /**
* Any thread can call this method when they wish to signal another thread that an update has
* occurred.
*/
private void signalUpdate() {
dataWaitCondition.signal();
} | 3.68 |
hadoop_OBSFileSystem_isEnableMultiObjectDelete | /**
* Return a flag that indicates if multi-object delete is enabled.
*
* @return the flag
*/
boolean isEnableMultiObjectDelete() {
return enableMultiObjectDelete;
} | 3.68 |
hadoop_AllocateRequest_trackingUrl | /**
* Set the <code>trackingUrl</code> of the request.
* @see AllocateRequest#setTrackingUrl(String)
* @param trackingUrl new tracking url
* @return {@link AllocateRequestBuilder}
*/
@Public
@Unstable
public AllocateRequestBuilder trackingUrl(String trackingUrl) {
allocateRequest.setTrackingUrl(trackingUrl);
return this;
} | 3.68 |
flink_ResourceProfile_allFieldsNoLessThan | /**
* Check whether all fields of this resource profile are no less than the given resource
* profile.
*
* <p>It is not same with the total resource comparison. It return true iff each resource
* field(cpu, task heap memory, managed memory, etc.) is no less than the respective field of
* the given profile.
*
* <p>For example, assume that this profile has 1 core, 50 managed memory and 100 heap memory.
*
* <ol>
* <li>The comparison will return false if the other profile has 2 core, 10 managed memory and
* 1000 heap memory.
* <li>The comparison will return true if the other profile has 1 core, 50 managed memory and
* 150 heap memory.
* </ol>
*
* @param other the other resource profile
* @return true if all fields of this are no less than the other's, otherwise false
*/
public boolean allFieldsNoLessThan(final ResourceProfile other) {
checkNotNull(other, "Cannot compare null resources");
if (this.equals(ANY)) {
return true;
}
if (this.equals(other)) {
return true;
}
if (this.equals(UNKNOWN)) {
return false;
}
if (other.equals(UNKNOWN)) {
return true;
}
if (cpuCores.getValue().compareTo(other.cpuCores.getValue()) >= 0
&& taskHeapMemory.compareTo(other.taskHeapMemory) >= 0
&& taskOffHeapMemory.compareTo(other.taskOffHeapMemory) >= 0
&& managedMemory.compareTo(other.managedMemory) >= 0
&& networkMemory.compareTo(other.networkMemory) >= 0) {
for (Map.Entry<String, ExternalResource> resource :
other.extendedResources.entrySet()) {
if (!extendedResources.containsKey(resource.getKey())
|| extendedResources
.get(resource.getKey())
.getValue()
.compareTo(resource.getValue().getValue())
< 0) {
return false;
}
}
return true;
}
return false;
} | 3.68 |
hbase_KeyValue_compareFlatKey | /**
* Compares left to right assuming that left,loffset,llength and right,roffset,rlength are full
* KVs laid out in a flat byte[]s.
* @param left the left kv serialized byte[] to be compared with
* @param loffset the offset in the left byte[]
* @param llength the length in the left byte[]
* @param right the right kv serialized byte[] to be compared with
* @param roffset the offset in the right byte[]
* @param rlength the length in the right byte[]
* @return 0 if equal, <0 if left smaller, >0 if right smaller
*/
public int compareFlatKey(byte[] left, int loffset, int llength, byte[] right, int roffset,
int rlength) {
// Compare row
short lrowlength = Bytes.toShort(left, loffset);
short rrowlength = Bytes.toShort(right, roffset);
int compare = compareRows(left, loffset + Bytes.SIZEOF_SHORT, lrowlength, right,
roffset + Bytes.SIZEOF_SHORT, rrowlength);
if (compare != 0) {
return compare;
}
// Compare the rest of the two KVs without making any assumptions about
// the common prefix. This function will not compare rows anyway, so we
// don't need to tell it that the common prefix includes the row.
return compareWithoutRow(0, left, loffset, llength, right, roffset, rlength, rrowlength);
} | 3.68 |
flink_DataOutputSerializer_getByteArray | /** @deprecated Replaced by {@link #getSharedBuffer()} for a better, safer name. */
@Deprecated
public byte[] getByteArray() {
return getSharedBuffer();
} | 3.68 |
framework_CustomizedSystemMessages_setAuthenticationErrorCaption | /**
* Sets the caption of the notification. Set to null for no caption. If both
* caption and message is null, the notification is disabled;
*
* @param authenticationErrorCaption
* the caption
*/
public void setAuthenticationErrorCaption(
String authenticationErrorCaption) {
this.authenticationErrorCaption = authenticationErrorCaption;
} | 3.68 |
morf_DatabaseMetaDataProvider_getRealName | /**
* The user-friendly camel-case name of the object,
* often derived by looking at the comment of that object,
* or in schema descriptions.
*
* @return user-friendly camel-case name
*/
public String getRealName() {
return realName;
} | 3.68 |
hbase_ByteBufferUtils_toShort | /**
* Reads a short value at the given buffer's offset.
* @param buffer input byte buffer to read
* @param offset input offset where short is
* @return short value at offset
*/
public static short toShort(ByteBuffer buffer, int offset) {
return ConverterHolder.BEST_CONVERTER.toShort(buffer, offset);
} | 3.68 |
framework_TooltipOnRequiredIndicator_getTestDescription | /*
* (non-Javadoc)
*
* @see com.vaadin.tests.components.AbstractTestUI#getTestDescription()
*/
@Override
protected String getTestDescription() {
return "Show tooltip for caption and required indicator";
} | 3.68 |
framework_ComponentStateUtil_removeRegisteredEventListener | /**
* Removes an event listener id.
*
* @param state
* shared state
* @param eventIdentifier
* The event identifier to remove
* @deprecated Use a {@link Registration} object returned by
* {@link #addRegisteredEventListener(SharedState, String)} to
* remove a listener
*/
@Deprecated
public static final void removeRegisteredEventListener(SharedState state,
String eventIdentifier) {
if (state.registeredEventListeners == null) {
return;
}
state.registeredEventListeners.remove(eventIdentifier);
if (state.registeredEventListeners.size() == 0) {
state.registeredEventListeners = null;
}
} | 3.68 |
querydsl_AbstractHibernateQuery_clone | /**
* Clone the state of this query to a new instance
*
* @return closed query
*/
@Override
public Q clone() {
return this.clone(this.session);
} | 3.68 |
morf_AbstractSqlDialectTest_testDialectHasNoBespokeTests | /**
* Test that ensures that no dialect specific tests exist. I.e. If there is an SQL dialect test
* then it must be defined here so that all descendent dialects are obliged to support it.
*
* <p>This test exists to make sure we test all required SQL constructs on all supported platforms.
* Previously there were lots of features only tested on MySql or Oracle.</p>
*
* <p>The desired format for new tests that have different results for different platforms is
* to define a (possibly abstract) "expectedOutcome" method in this class such as {@link #expectedCreateTableStatements()}.</p>
*/
@Test
public void testDialectHasNoBespokeTests() {
for (Method method : getClass().getDeclaredMethods()) {
if (method.getName().startsWith("test") || method.getAnnotation(Test.class) != null) {
fail("Descendents of " + AbstractSqlDialectTest.class.getSimpleName() + " must not define tests directly");
}
}
} | 3.68 |
druid_MySqlStatementParser_parseDeclareCondition | /**
* zhujun [[email protected]]
* 2016-04-17
* 定义条件
*/
public MySqlDeclareConditionStatement parseDeclareCondition() {
MySqlDeclareConditionStatement stmt = new MySqlDeclareConditionStatement();
accept(Token.DECLARE);
stmt.setConditionName(exprParser.name().toString());
accept(Token.CONDITION);
accept(Token.FOR);
String tokenName = lexer.stringVal();
ConditionValue condition = new ConditionValue();
if (tokenName.equalsIgnoreCase("SQLSTATE")) { //for SQLSTATE (SQLSTATE '10001')
condition.setType(ConditionType.SQLSTATE);
lexer.nextToken();
condition.setValue(exprParser.name().toString());
} else if (lexer.token() == Token.LITERAL_INT) {
condition.setType(ConditionType.MYSQL_ERROR_CODE);
condition.setValue(lexer.integerValue().toString());
lexer.nextToken();
} else {
throw new ParserException("declare condition grammer error. " + lexer.info());
}
stmt.setConditionValue(condition);
accept(Token.SEMI);
return stmt;
} | 3.68 |
shardingsphere-elasticjob_ScheduleJobBootstrap_schedule | /**
* Schedule job.
*/
public void schedule() {
Preconditions.checkArgument(!Strings.isNullOrEmpty(jobScheduler.getJobConfig().getCron()), "Cron can not be empty.");
jobScheduler.getJobScheduleController().scheduleJob(jobScheduler.getJobConfig().getCron(), jobScheduler.getJobConfig().getTimeZone());
} | 3.68 |
dubbo_RegistryDirectory_getUrlInvokerMap | /**
* Haomin: added for test purpose
*/
public Map<URL, Invoker<T>> getUrlInvokerMap() {
return urlInvokerMap;
} | 3.68 |
morf_ColumnBean_getDefaultValue | /**
* @see org.alfasoftware.morf.metadata.Column#getDefaultValue()
*/
@Override
public String getDefaultValue() {
return StringUtils.defaultString(defaultValue);
} | 3.68 |
framework_AbstractBeanContainer_createBeanPropertyResolver | /**
* Create an item identifier resolver using a named bean property.
*
* @param propertyId
* property identifier, which must map to a getter in BEANTYPE
* @return created resolver
*/
protected BeanIdResolver<IDTYPE, BEANTYPE> createBeanPropertyResolver(
Object propertyId) {
return new PropertyBasedBeanIdResolver(propertyId);
} | 3.68 |
flink_GenericWriteAheadSink_cleanRestoredHandles | /**
* Called at {@link #open()} to clean-up the pending handle list. It iterates over all restored
* pending handles, checks which ones are already committed to the outside storage system and
* removes them from the list.
*/
private void cleanRestoredHandles() throws Exception {
synchronized (pendingCheckpoints) {
Iterator<PendingCheckpoint> pendingCheckpointIt = pendingCheckpoints.iterator();
while (pendingCheckpointIt.hasNext()) {
PendingCheckpoint pendingCheckpoint = pendingCheckpointIt.next();
if (committer.isCheckpointCommitted(
pendingCheckpoint.subtaskId, pendingCheckpoint.checkpointId)) {
pendingCheckpoint.stateHandle.discardState();
pendingCheckpointIt.remove();
}
}
}
} | 3.68 |
hadoop_YarnClient_createYarnClient | /**
* Create a new instance of YarnClient.
*/
@Public
public static YarnClient createYarnClient() {
YarnClient client = new YarnClientImpl();
return client;
} | 3.68 |
hadoop_CosNFileSystem_setWorkingDirectory | /**
* Set the working directory to the given directory.
*/
@Override
public void setWorkingDirectory(Path newDir) {
workingDir = newDir;
} | 3.68 |
morf_AbstractSqlDialectTest_testSelectMod | /**
* Tests select statement with mod function.
*/
@Test
public void testSelectMod() {
SelectStatement stmt = new SelectStatement(mod(new FieldReference(INT_FIELD), new FieldLiteral(5))).from(new TableReference(TEST_TABLE));
String expectedSql = expectedSelectModSQL();
assertEquals("Select scripts are not the same", expectedSql, testDialect.convertStatementToSQL(stmt));
} | 3.68 |
hadoop_OBSDataBlocks_hasData | /**
* Predicate to check if there is data in the block.
*
* @return true if there is
*/
boolean hasData() {
return dataSize() > 0;
} | 3.68 |
framework_VTree_onBlur | /*
* (non-Javadoc)
*
* @see
* com.google.gwt.event.dom.client.BlurHandler#onBlur(com.google.gwt.event
* .dom.client.BlurEvent)
*/
@Override
public void onBlur(BlurEvent event) {
treeHasFocus = false;
if (focusedNode != null) {
focusedNode.setFocused(false);
}
} | 3.68 |
hbase_Bytes_padTail | /**
* Make a new byte array from a subset of bytes at the tail of another, zero padded as desired.
* @param a array
* @param length new array size
* @return Value in <code>a</code> plus <code>length</code> appended 0 bytes
*/
public static byte[] padTail(final byte[] a, final int length) {
byte[] padding = new byte[length];
for (int i = 0; i < length; i++) {
padding[i] = 0;
}
return add(a, padding);
} | 3.68 |
hudi_BaseHoodieTableServiceClient_rollbackFailedBootstrap | /**
* Main API to rollback failed bootstrap.
*/
public void rollbackFailedBootstrap() {
LOG.info("Rolling back pending bootstrap if present");
HoodieTable table = createTable(config, hadoopConf);
HoodieTimeline inflightTimeline = table.getMetaClient().getCommitsTimeline().filterPendingExcludingMajorAndMinorCompaction();
Option<String> instant = Option.fromJavaOptional(
inflightTimeline.getReverseOrderedInstants().map(HoodieInstant::getTimestamp).findFirst());
if (instant.isPresent() && HoodieTimeline.compareTimestamps(instant.get(), HoodieTimeline.LESSER_THAN_OR_EQUALS,
HoodieTimeline.FULL_BOOTSTRAP_INSTANT_TS)) {
LOG.info("Found pending bootstrap instants. Rolling them back");
table.rollbackBootstrap(context, createNewInstantTime());
LOG.info("Finished rolling back pending bootstrap");
}
// if bootstrap failed, lets delete metadata and restart from scratch
HoodieTableMetadataUtil.deleteMetadataTable(config.getBasePath(), context);
} | 3.68 |
hbase_HttpServer_addPrivilegedServlet | /**
* Adds a servlet in the server that only administrators can access. This method differs from
* {@link #addUnprivilegedServlet(String, ServletHolder)} in that only those authenticated user
* who are identified as administrators can interact with the servlet added by this method.
*/
public void addPrivilegedServlet(String pathSpec, ServletHolder holder) {
addServletWithAuth(pathSpec, holder, true);
} | 3.68 |
hudi_ActiveAction_getPendingAction | /**
* A COMPACTION action eventually becomes COMMIT when completed.
*/
public String getPendingAction() {
return getPendingInstant().getAction();
} | 3.68 |
flink_BulkIterationNode_setPartialSolution | /**
* Sets the partialSolution for this BulkIterationNode.
*
* @param partialSolution The partialSolution to set.
*/
public void setPartialSolution(BulkPartialSolutionNode partialSolution) {
this.partialSolution = partialSolution;
} | 3.68 |
hadoop_OBSCommonUtils_patchSecurityCredentialProviders | /**
* Patch the security credential provider information in {@link
* #CREDENTIAL_PROVIDER_PATH} with the providers listed in {@link
* OBSConstants#OBS_SECURITY_CREDENTIAL_PROVIDER_PATH}.
*
* <p>This allows different buckets to use different credential files.
*
* @param conf configuration to patch
*/
static void patchSecurityCredentialProviders(final Configuration conf) {
Collection<String> customCredentials =
conf.getStringCollection(
OBSConstants.OBS_SECURITY_CREDENTIAL_PROVIDER_PATH);
Collection<String> hadoopCredentials = conf.getStringCollection(
CREDENTIAL_PROVIDER_PATH);
if (!customCredentials.isEmpty()) {
List<String> all = Lists.newArrayList(customCredentials);
all.addAll(hadoopCredentials);
String joined = StringUtils.join(all, ',');
LOG.debug("Setting {} to {}", CREDENTIAL_PROVIDER_PATH, joined);
conf.set(CREDENTIAL_PROVIDER_PATH, joined, "patch of "
+ OBSConstants.OBS_SECURITY_CREDENTIAL_PROVIDER_PATH);
}
} | 3.68 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.