name
stringlengths 12
178
| code_snippet
stringlengths 8
36.5k
| score
float64 3.26
3.68
|
---|---|---|
hbase_PrettyPrinter_humanReadableSizeToBytes | /**
* Convert a human readable size to bytes. Examples of the human readable size are: 50 GB 20 MB 1
* KB , 25000 B etc. The units of size specified can be in uppercase as well as lowercase. Also,
* if a single number is specified without any time unit, it is assumed to be in bytes.
* @param humanReadableSize human readable size
* @return value in bytes
*/
private static long humanReadableSizeToBytes(final String humanReadableSize)
throws HBaseException {
if (humanReadableSize == null) {
return -1;
}
try {
return Long.parseLong(humanReadableSize);
} catch (NumberFormatException ex) {
LOG.debug("Given size value is not a number, parsing for human readable format");
}
String tb = null;
String gb = null;
String mb = null;
String kb = null;
String b = null;
String expectedSize = null;
long size = 0;
Matcher matcher = PrettyPrinter.SIZE_PATTERN.matcher(humanReadableSize);
if (matcher.matches()) {
expectedSize = matcher.group(2);
tb = matcher.group(4);
gb = matcher.group(6);
mb = matcher.group(8);
kb = matcher.group(10);
b = matcher.group(12);
}
size += tb != null ? Long.parseLong(tb) * HConstants.TB_IN_BYTES : 0;
size += gb != null ? Long.parseLong(gb) * HConstants.GB_IN_BYTES : 0;
size += mb != null ? Long.parseLong(mb) * HConstants.MB_IN_BYTES : 0;
size += kb != null ? Long.parseLong(kb) * HConstants.KB_IN_BYTES : 0;
size += b != null ? Long.parseLong(b) : 0;
if (expectedSize != null && Long.parseLong(expectedSize) != size) {
throw new HBaseException(
"Malformed size string: values in byte and human readable" + "format do not match");
}
return size;
} | 3.68 |
hbase_TableDescriptorBuilder_setRegionReplication | /**
* Sets the number of replicas per region.
* @param regionReplication the replication factor per region
* @return the modifyable TD
*/
public ModifyableTableDescriptor setRegionReplication(int regionReplication) {
return setValue(REGION_REPLICATION_KEY, Integer.toString(regionReplication));
} | 3.68 |
flink_SinkTransformationTranslator_adjustTransformations | /**
* Since user may set specific parallelism on sub topologies, we have to pay attention to
* the priority of parallelism at different levels, i.e. sub topologies customized
* parallelism > sinkTransformation customized parallelism > environment customized
* parallelism. In order to satisfy this rule and keep these customized parallelism values,
* the environment parallelism will be set to be {@link ExecutionConfig#PARALLELISM_DEFAULT}
* before adjusting transformations. SubTransformations, constructed after that, will have
* either the default value or customized value. In this way, any customized value will be
* discriminated from the default value and, for any subTransformation with the default
* parallelism value, we will then be able to let it inherit the parallelism value from the
* previous sinkTransformation. After the adjustment of transformations is closed, the
* environment parallelism will be restored back to its original value to keep the
* customized parallelism value at environment level.
*/
private <I, R> R adjustTransformations(
DataStream<I> inputStream,
Function<DataStream<I>, R> action,
boolean isExpandedTopology,
boolean supportsConcurrentExecutionAttempts) {
// Reset the environment parallelism temporarily before adjusting transformations,
// we can therefore be aware of any customized parallelism of the sub topology
// set by users during the adjustment.
executionEnvironment.setParallelism(ExecutionConfig.PARALLELISM_DEFAULT);
int numTransformsBefore = executionEnvironment.getTransformations().size();
R result = action.apply(inputStream);
List<Transformation<?>> transformations = executionEnvironment.getTransformations();
List<Transformation<?>> expandedTransformations =
transformations.subList(numTransformsBefore, transformations.size());
final CustomSinkOperatorUidHashes operatorsUidHashes =
transformation.getSinkOperatorsUidHashes();
for (Transformation<?> subTransformation : expandedTransformations) {
String subUid = subTransformation.getUid();
if (isExpandedTopology && subUid != null && !subUid.isEmpty()) {
checkState(
transformation.getUid() != null && !transformation.getUid().isEmpty(),
"Sink "
+ transformation.getName()
+ " requires to set a uid since its customized topology"
+ " has set uid for some operators.");
}
// Set the operator uid hashes to support stateful upgrades without prior uids
setOperatorUidHashIfPossible(
subTransformation, WRITER_NAME, operatorsUidHashes.getWriterUidHash());
setOperatorUidHashIfPossible(
subTransformation,
COMMITTER_NAME,
operatorsUidHashes.getCommitterUidHash());
setOperatorUidHashIfPossible(
subTransformation,
StandardSinkTopologies.GLOBAL_COMMITTER_TRANSFORMATION_NAME,
operatorsUidHashes.getGlobalCommitterUidHash());
concatUid(
subTransformation,
Transformation::getUid,
Transformation::setUid,
subTransformation.getName());
concatProperty(
subTransformation,
Transformation::getCoLocationGroupKey,
Transformation::setCoLocationGroupKey);
concatProperty(subTransformation, Transformation::getName, Transformation::setName);
concatProperty(
subTransformation,
Transformation::getDescription,
Transformation::setDescription);
// handle coLocationGroupKey.
String coLocationGroupKey = transformation.getCoLocationGroupKey();
if (coLocationGroupKey != null
&& subTransformation.getCoLocationGroupKey() == null) {
subTransformation.setCoLocationGroupKey(coLocationGroupKey);
}
Optional<SlotSharingGroup> ssg = transformation.getSlotSharingGroup();
if (ssg.isPresent() && !subTransformation.getSlotSharingGroup().isPresent()) {
subTransformation.setSlotSharingGroup(ssg.get());
}
// remember that the environment parallelism has been set to be default
// at the beginning. SubTransformations, whose parallelism has been
// customized, will skip this part. The customized parallelism value set by user
// will therefore be kept.
if (subTransformation.getParallelism() == ExecutionConfig.PARALLELISM_DEFAULT) {
// In this case, the subTransformation does not contain any customized
// parallelism value and will therefore inherit the parallelism value
// from the sinkTransformation.
subTransformation.setParallelism(transformation.getParallelism());
}
if (subTransformation.getMaxParallelism() < 0
&& transformation.getMaxParallelism() > 0) {
subTransformation.setMaxParallelism(transformation.getMaxParallelism());
}
if (subTransformation instanceof PhysicalTransformation) {
PhysicalTransformation<?> physicalSubTransformation =
(PhysicalTransformation<?>) subTransformation;
if (transformation.getChainingStrategy() != null) {
physicalSubTransformation.setChainingStrategy(
transformation.getChainingStrategy());
}
// overrides the supportsConcurrentExecutionAttempts of transformation because
// it's not allowed to specify fine-grained concurrent execution attempts yet
physicalSubTransformation.setSupportsConcurrentExecutionAttempts(
supportsConcurrentExecutionAttempts);
}
}
// Restore the previous parallelism of the environment before adjusting transformations
if (environmentParallelism.isPresent()) {
executionEnvironment.getConfig().setParallelism(environmentParallelism.get());
} else {
executionEnvironment.getConfig().resetParallelism();
}
return result;
} | 3.68 |
flink_AbstractStreamOperator_getInternalTimerService | /**
* Returns a {@link InternalTimerService} that can be used to query current processing time and
* event time and to set timers. An operator can have several timer services, where each has its
* own namespace serializer. Timer services are differentiated by the string key that is given
* when requesting them, if you call this method with the same key multiple times you will get
* the same timer service instance in subsequent requests.
*
* <p>Timers are always scoped to a key, the currently active key of a keyed stream operation.
* When a timer fires, this key will also be set as the currently active key.
*
* <p>Each timer has attached metadata, the namespace. Different timer services can have a
* different namespace type. If you don't need namespace differentiation you can use {@link
* VoidNamespaceSerializer} as the namespace serializer.
*
* @param name The name of the requested timer service. If no service exists under the given
* name a new one will be created and returned.
* @param namespaceSerializer {@code TypeSerializer} for the timer namespace.
* @param triggerable The {@link Triggerable} that should be invoked when timers fire
* @param <N> The type of the timer namespace.
*/
public <K, N> InternalTimerService<N> getInternalTimerService(
String name, TypeSerializer<N> namespaceSerializer, Triggerable<K, N> triggerable) {
if (timeServiceManager == null) {
throw new RuntimeException("The timer service has not been initialized.");
}
@SuppressWarnings("unchecked")
InternalTimeServiceManager<K> keyedTimeServiceHandler =
(InternalTimeServiceManager<K>) timeServiceManager;
KeyedStateBackend<K> keyedStateBackend = getKeyedStateBackend();
checkState(keyedStateBackend != null, "Timers can only be used on keyed operators.");
return keyedTimeServiceHandler.getInternalTimerService(
name, keyedStateBackend.getKeySerializer(), namespaceSerializer, triggerable);
} | 3.68 |
dubbo_AbstractAnnotationBeanPostProcessor_findAnnotatedMethodMetadata | /**
* Finds {@link InjectionMetadata.InjectedElement} Metadata from annotated methods
*
* @param beanClass The {@link Class} of Bean
* @return non-null {@link List}
*/
private List<AbstractAnnotationBeanPostProcessor.AnnotatedMethodElement> findAnnotatedMethodMetadata(
final Class<?> beanClass) {
final List<AbstractAnnotationBeanPostProcessor.AnnotatedMethodElement> elements = new LinkedList<>();
ReflectionUtils.doWithMethods(beanClass, method -> {
Method bridgedMethod = findBridgedMethod(method);
if (!isVisibilityBridgeMethodPair(method, bridgedMethod)) {
return;
}
if (method.getAnnotation(Bean.class) != null) {
// DO NOT inject to Java-config class's @Bean method
return;
}
for (Class<? extends Annotation> annotationType : getAnnotationTypes()) {
AnnotationAttributes attributes = AnnotationUtils.getAnnotationAttributes(
bridgedMethod, annotationType, getEnvironment(), true, true);
if (attributes != null && method.equals(ClassUtils.getMostSpecificMethod(method, beanClass))) {
if (Modifier.isStatic(method.getModifiers())) {
throw new IllegalStateException("When using @" + annotationType.getName()
+ " to inject interface proxy, it is not supported on static methods: " + method);
}
if (method.getParameterTypes().length != 1) {
throw new IllegalStateException("When using @" + annotationType.getName()
+ " to inject interface proxy, the method must have only one parameter: " + method);
}
PropertyDescriptor pd = BeanUtils.findPropertyForMethod(bridgedMethod, beanClass);
elements.add(new AnnotatedMethodElement(method, pd, attributes));
}
}
});
return elements;
} | 3.68 |
hadoop_RouterClientRMService_init | /**
* Initializes the wrapper with the specified parameters.
*
* @param interceptor the first interceptor in the pipeline
*/
public synchronized void init(ClientRequestInterceptor interceptor) {
this.rootInterceptor = interceptor;
} | 3.68 |
framework_TreeGrid_fireCollapseEvent | /**
* Emit a collapse event.
*
* @param item
* the item that was collapsed
* @param userOriginated
* whether the collapse was triggered by a user interaction or
* the server
*/
private void fireCollapseEvent(T item, boolean userOriginated) {
fireEvent(new CollapseEvent<>(this, item, userOriginated));
} | 3.68 |
flink_Configuration_getBytes | /**
* Returns the value associated with the given key as a byte array.
*
* @param key The key pointing to the associated value.
* @param defaultValue The default value which is returned in case there is no value associated
* with the given key.
* @return the (default) value associated with the given key.
*/
public byte[] getBytes(String key, byte[] defaultValue) {
return getRawValue(key)
.map(
o -> {
if (o.getClass().equals(byte[].class)) {
return (byte[]) o;
} else {
throw new IllegalArgumentException(
String.format(
"Configuration cannot evaluate value %s as a byte[] value",
o));
}
})
.orElse(defaultValue);
} | 3.68 |
framework_FilesystemContainer_getContainerProperty | /**
* Gets the specified property of the specified file Item. The available
* file properties are "Name", "Size" and "Last Modified". If propertyId is
* not one of those, <code>null</code> is returned.
*
* @param itemId
* the ID of the file whose property is requested.
* @param propertyId
* the property's ID.
* @return the requested property's value, or <code>null</code>
*/
@Override
public Property getContainerProperty(Object itemId, Object propertyId) {
if (!(itemId instanceof File)) {
return null;
}
if (propertyId.equals(PROPERTY_NAME)) {
return new MethodProperty<Object>(getType(propertyId),
new FileItem((File) itemId), FILEITEM_NAME, null);
}
if (propertyId.equals(PROPERTY_ICON)) {
return new MethodProperty<Object>(getType(propertyId),
new FileItem((File) itemId), FILEITEM_ICON, null);
}
if (propertyId.equals(PROPERTY_SIZE)) {
return new MethodProperty<Object>(getType(propertyId),
new FileItem((File) itemId), FILEITEM_SIZE, null);
}
if (propertyId.equals(PROPERTY_LASTMODIFIED)) {
return new MethodProperty<Object>(getType(propertyId),
new FileItem((File) itemId), FILEITEM_LASTMODIFIED, null);
}
return null;
} | 3.68 |
hbase_SnapshotManager_restoreOrCloneSnapshot | /**
* Restore or Clone the specified snapshot
* @param nonceKey unique identifier to prevent duplicated RPC
*/
public long restoreOrCloneSnapshot(final SnapshotDescription reqSnapshot, final NonceKey nonceKey,
final boolean restoreAcl, String customSFT) throws IOException {
FileSystem fs = master.getMasterFileSystem().getFileSystem();
Path snapshotDir = SnapshotDescriptionUtils.getCompletedSnapshotDir(reqSnapshot, rootDir);
// check if the snapshot exists
if (!fs.exists(snapshotDir)) {
LOG.error("A Snapshot named '" + reqSnapshot.getName() + "' does not exist.");
throw new SnapshotDoesNotExistException(ProtobufUtil.createSnapshotDesc(reqSnapshot));
}
// Get snapshot info from file system. The reqSnapshot is a "fake" snapshotInfo with
// just the snapshot "name" and table name to restore. It does not contains the "real" snapshot
// information.
SnapshotDescription snapshot = SnapshotDescriptionUtils.readSnapshotInfo(fs, snapshotDir);
SnapshotManifest manifest =
SnapshotManifest.open(master.getConfiguration(), fs, snapshotDir, snapshot);
TableDescriptor snapshotTableDesc = manifest.getTableDescriptor();
TableName tableName = TableName.valueOf(reqSnapshot.getTable());
// sanity check the new table descriptor
TableDescriptorChecker.sanityCheck(master.getConfiguration(), snapshotTableDesc);
// stop tracking "abandoned" handlers
cleanupSentinels();
// Verify snapshot validity
SnapshotReferenceUtil.verifySnapshot(master.getConfiguration(), fs, manifest);
// Execute the restore/clone operation
long procId;
if (master.getTableDescriptors().exists(tableName)) {
procId =
restoreSnapshot(reqSnapshot, tableName, snapshot, snapshotTableDesc, nonceKey, restoreAcl);
} else {
procId = cloneSnapshot(reqSnapshot, tableName, snapshot, snapshotTableDesc, nonceKey,
restoreAcl, customSFT);
}
return procId;
} | 3.68 |
morf_DataValueLookup_getLocalDate | /**
* Gets the value as a Joda {@link LocalDate}. Will attempt conversion where possible
* and throw a suitable conversion exception if the conversion fails.
* May return {@code null} if the value is not set or is explicitly set
* to {@code null}.
*
* @param name The column name.
* @return The value.
*/
public default org.joda.time.LocalDate getLocalDate(String name) {
String value = getValue(name);
return value == null ? null : org.joda.time.LocalDate.parse(value, DataValueLookupHelper.FROM_YYYY_MM_DD);
} | 3.68 |
dubbo_FrameworkExecutorRepository_nextScheduledExecutor | /**
* Returns a scheduler from the scheduler list, call this method whenever you need a scheduler for a cron job.
* If your cron cannot burden the possible schedule delay caused by sharing the same scheduler, please consider define a dedicated one.
*
* @return ScheduledExecutorService
*/
public ScheduledExecutorService nextScheduledExecutor() {
return scheduledExecutors.pollItem();
} | 3.68 |
pulsar_LookupProxyHandler_getBrokerServiceUrl | /**
* Get default broker service url or discovery an available broker.
**/
private String getBrokerServiceUrl(long clientRequestId) {
if (StringUtils.isNotBlank(brokerServiceURL)) {
return brokerServiceURL;
}
ServiceLookupData availableBroker;
try {
availableBroker = discoveryProvider.nextBroker();
} catch (Exception e) {
log.warn("[{}] Failed to get next active broker {}", clientAddress, e.getMessage(), e);
writeAndFlush(Commands.newError(
clientRequestId, ServerError.ServiceNotReady, e.getMessage()
));
return null;
}
return this.connectWithTLS ? availableBroker.getPulsarServiceUrlTls() : availableBroker.getPulsarServiceUrl();
} | 3.68 |
hbase_SaslServerAuthenticationProvider_init | /**
* Allows implementations to initialize themselves, prior to creating a server.
*/
default void init(Configuration conf) throws IOException {
} | 3.68 |
hudi_FormatUtils_getRowKind | /**
* Returns the RowKind of the given record, never null.
* Returns RowKind.INSERT when the given field value not found.
*/
private static RowKind getRowKind(IndexedRecord record, int index) {
Object val = record.get(index);
if (val == null) {
return RowKind.INSERT;
}
final HoodieOperation operation = HoodieOperation.fromName(val.toString());
if (HoodieOperation.isInsert(operation)) {
return RowKind.INSERT;
} else if (HoodieOperation.isUpdateBefore(operation)) {
return RowKind.UPDATE_BEFORE;
} else if (HoodieOperation.isUpdateAfter(operation)) {
return RowKind.UPDATE_AFTER;
} else if (HoodieOperation.isDelete(operation)) {
return RowKind.DELETE;
} else {
throw new AssertionError();
}
} | 3.68 |
flink_SimpleCounter_inc | /**
* Increment the current count by the given value.
*
* @param n value to increment the current count by
*/
@Override
public void inc(long n) {
count += n;
} | 3.68 |
streampipes_SwingingDoorTrendingFilter_forward | /**
* output the recently filtered characteristic event to the collector
*
* @param collector the event collector
*/
public void forward(SpOutputCollector collector) {
collector.collect(lastStoredEvent);
} | 3.68 |
framework_DropEvent_getDropEffect | /**
* Get the desired dropEffect for the drop event.
* <p>
* <em>NOTE: Currently you cannot trust this to work on all browsers!
* https://github.com/vaadin/framework/issues/9247 For Chrome & IE11 it is
* never set and always returns {@link DropEffect#NONE} even though the drop
* succeeded!</em>
*
* @return the drop effect
*/
public DropEffect getDropEffect() {
return dropEffect;
} | 3.68 |
morf_SqlDialect_innerJoinKeyword | /**
* @param stmt The statement.
* @return The keyword to use for an inner join on the specified statement. This only differs
* in response to hints.
*/
protected String innerJoinKeyword(@SuppressWarnings("unused") AbstractSelectStatement<?> stmt) {
return "INNER JOIN";
} | 3.68 |
flink_RestfulGateway_triggerSavepoint | /**
* Triggers a savepoint with the given savepoint directory as a target, returning a future that
* completes when the operation is started.
*
* @param operationKey the key of the operation, for deduplication purposes
* @param targetDirectory Target directory for the savepoint.
* @param formatType Binary format of the savepoint.
* @param savepointMode context of the savepoint operation
* @param timeout Timeout for the asynchronous operation
* @return Future which is completed once the operation is triggered successfully
*/
default CompletableFuture<Acknowledge> triggerSavepoint(
AsynchronousJobOperationKey operationKey,
String targetDirectory,
SavepointFormatType formatType,
TriggerSavepointMode savepointMode,
@RpcTimeout Time timeout) {
throw new UnsupportedOperationException();
} | 3.68 |
hadoop_Event_getAcls | /**
* The full set of ACLs currently associated with this file or directory.
* May be null if all ACLs were removed.
*/
public List<AclEntry> getAcls() {
return acls;
} | 3.68 |
hbase_AverageIntervalRateLimiter_setNextRefillTime | // This method is for strictly testing purpose only
@Override
public void setNextRefillTime(long nextRefillTime) {
this.nextRefillTime = nextRefillTime;
} | 3.68 |
morf_AbstractSqlDialectTest_testSelectWithMultipleTableAlias | /**
* Tests a select with table aliases.
*/
@Test
public void testSelectWithMultipleTableAlias() {
SelectStatement stmt = new SelectStatement(new FieldReference(STRING_FIELD),
new FieldReference(INT_FIELD),
new FieldReference(DATE_FIELD).as("aliasDate"))
.from(new TableReference(TEST_TABLE).as("T"))
.innerJoin(new TableReference(ALTERNATE_TABLE).as("A"), eq(new FieldReference(new TableReference("T"), STRING_FIELD), new FieldReference(new TableReference("A"), STRING_FIELD)));
String expectedSql = "SELECT stringField, intField, dateField AS aliasDate FROM " + tableName(TEST_TABLE) + " T INNER JOIN " + tableName(ALTERNATE_TABLE) + " A ON (T.stringField = A.stringField)";
assertEquals("Select scripts are not the same", expectedSql, testDialect.convertStatementToSQL(stmt));
} | 3.68 |
flink_SliceAssigners_withOffset | /** Creates a new {@link CumulativeSliceAssigner} with a new specified offset. */
public CumulativeSliceAssigner withOffset(Duration offset) {
return new CumulativeSliceAssigner(
rowtimeIndex, shiftTimeZone, maxSize, step, offset.toMillis());
} | 3.68 |
rocketmq-connect_WorkerSourceTask_recordSent | /**
* send success record
*
* @param preTransformRecord
* @param sourceMessage
* @param result
*/
private void recordSent(
ConnectRecord preTransformRecord,
Message sourceMessage,
SendResult result) {
commitTaskRecord(preTransformRecord, result);
} | 3.68 |
framework_Page_getBrowserWindowWidth | /**
* Gets the last known width of the browser window in which this uI resides.
*
* @return the browser window width in pixels
*/
public int getBrowserWindowWidth() {
return browserWindowWidth;
} | 3.68 |
hadoop_PreemptionCandidatesSelector_sortContainers | /**
* Compare by reversed priority order first, and then reversed containerId
* order.
*
* @param containers list of containers to sort for.
*/
@VisibleForTesting
static void sortContainers(List<RMContainer> containers) {
Collections.sort(containers, new Comparator<RMContainer>() {
@Override
public int compare(RMContainer a, RMContainer b) {
int schedKeyComp = b.getAllocatedSchedulerKey()
.compareTo(a.getAllocatedSchedulerKey());
if (schedKeyComp != 0) {
return schedKeyComp;
}
return b.getContainerId().compareTo(a.getContainerId());
}
});
} | 3.68 |
hbase_KeyValue_write | /**
* Write out a KeyValue in the manner in which we used to when KeyValue was a Writable.
* @param kv the KeyValue on which write is being requested
* @param out OutputStream to write keyValue to
* @return Length written on stream
* @throws IOException if any IO error happen
* @see #create(DataInput) for the inverse function
*/
public static long write(final KeyValue kv, final DataOutput out) throws IOException {
// This is how the old Writables write used to serialize KVs. Need to figure way to make it
// work for all implementations.
int length = kv.getLength();
out.writeInt(length);
out.write(kv.getBuffer(), kv.getOffset(), length);
return (long) length + Bytes.SIZEOF_INT;
} | 3.68 |
pulsar_NamespacesBase_internalGetReplicatorDispatchRate | /**
* Base method for getReplicatorDispatchRate v1 and v2.
* Notion: don't re-use this logic.
*/
protected void internalGetReplicatorDispatchRate(AsyncResponse asyncResponse) {
validateNamespacePolicyOperationAsync(namespaceName, PolicyName.REPLICATION_RATE, PolicyOperation.READ)
.thenCompose(__ -> namespaceResources().getPoliciesAsync(namespaceName))
.thenApply(policiesOpt -> {
if (!policiesOpt.isPresent()) {
throw new RestException(Response.Status.NOT_FOUND, "Namespace policies does not exist");
}
String clusterName = pulsar().getConfiguration().getClusterName();
return policiesOpt.get().replicatorDispatchRate.get(clusterName);
}).thenAccept(asyncResponse::resume)
.exceptionally(ex -> {
resumeAsyncResponseExceptionally(asyncResponse, ex);
log.error("[{}] Failed to get replicator dispatch-rate configured for the namespace {}",
clientAppId(), namespaceName, ex);
return null;
});
} | 3.68 |
hudi_CkpMetadata_commitInstant | /**
* Add a checkpoint commit message.
*
* @param instant The committed instant
*/
public void commitInstant(String instant) {
Path path = fullPath(CkpMessage.getFileName(instant, CkpMessage.State.COMPLETED));
try {
fs.createNewFile(path);
} catch (IOException e) {
throw new HoodieException("Exception while adding checkpoint commit metadata for instant: " + instant, e);
}
} | 3.68 |
pulsar_NonPersistentSubscription_doUnsubscribe | /**
* Handle unsubscribe command from the client API Check with the dispatcher is this consumer can proceed with
* unsubscribe.
*
* @param consumer consumer object that is initiating the unsubscribe operation
* @return CompletableFuture indicating the completion of ubsubscribe operation
*/
@Override
public CompletableFuture<Void> doUnsubscribe(Consumer consumer) {
CompletableFuture<Void> future = new CompletableFuture<>();
try {
if (dispatcher.canUnsubscribe(consumer)) {
consumer.close();
return delete();
}
future.completeExceptionally(
new ServerMetadataException("Unconnected or shared consumer attempting to unsubscribe"));
} catch (BrokerServiceException e) {
log.warn("Error removing consumer {}", consumer);
future.completeExceptionally(e);
}
return future;
} | 3.68 |
morf_CompositeSchema_isEmptyDatabase | /**
* @see org.alfasoftware.morf.metadata.Schema#isEmptyDatabase()
*/
@Override
public boolean isEmptyDatabase() {
for (Schema schema : delegates)
if (!schema.isEmptyDatabase())
return false;
return true;
} | 3.68 |
flink_CoreOptions_fileSystemConnectionLimitTimeout | /**
* If any connection limit is configured, this option can be optionally set to define after
* which time (in milliseconds) stream opening fails with a timeout exception, if no stream
* connection becomes available. Unlimited timeout be default.
*/
public static ConfigOption<Long> fileSystemConnectionLimitTimeout(String scheme) {
return ConfigOptions.key("fs." + scheme + ".limit.timeout").longType().defaultValue(0L);
} | 3.68 |
flink_CrossOperator_projectTuple8 | /**
* Projects a pair of crossed elements to a {@link Tuple} with the previously selected
* fields.
*
* @return The projected data set.
* @see Tuple
* @see DataSet
*/
public <T0, T1, T2, T3, T4, T5, T6, T7>
ProjectCross<I1, I2, Tuple8<T0, T1, T2, T3, T4, T5, T6, T7>> projectTuple8() {
TypeInformation<?>[] fTypes = extractFieldTypes(fieldIndexes);
TupleTypeInfo<Tuple8<T0, T1, T2, T3, T4, T5, T6, T7>> tType =
new TupleTypeInfo<Tuple8<T0, T1, T2, T3, T4, T5, T6, T7>>(fTypes);
return new ProjectCross<I1, I2, Tuple8<T0, T1, T2, T3, T4, T5, T6, T7>>(
this.ds1, this.ds2, this.fieldIndexes, this.isFieldInFirst, tType, this, hint);
} | 3.68 |
hbase_HRegionServer_walRollRequestFinished | /**
* For testing
* @return whether all wal roll request finished for this regionserver
*/
@InterfaceAudience.Private
public boolean walRollRequestFinished() {
return this.walRoller.walRollFinished();
} | 3.68 |
flink_RemoteInputChannel_onSenderBacklog | /**
* Receives the backlog from the producer's buffer response. If the number of available buffers
* is less than backlog + initialCredit, it will request floating buffers from the buffer
* manager, and then notify unannounced credits to the producer.
*
* @param backlog The number of unsent buffers in the producer's sub partition.
*/
public void onSenderBacklog(int backlog) throws IOException {
notifyBufferAvailable(bufferManager.requestFloatingBuffers(backlog + initialCredit));
} | 3.68 |
framework_Notification_getNotification | /**
* Gets the Notification.
*
* @return The Notification
*/
public Notification getNotification() {
return (Notification) getSource();
} | 3.68 |
framework_GridLayout_getArea | /**
* Gets the area that is out of bounds.
*
* @return the area out of Bound.
*/
public Area getArea() {
return areaOutOfBounds;
} | 3.68 |
hbase_QuotaSettings_buildFromProto | /**
* Converts the protocol buffer request into a QuotaSetting POJO. Arbitrarily enforces that the
* request only contain one "limit", despite the message allowing multiple. The public API does
* not allow such use of the message.
* @param request The protocol buffer request.
* @return A {@link QuotaSettings} POJO.
*/
@InterfaceAudience.Private
public static QuotaSettings buildFromProto(SetQuotaRequest request) {
String username = null;
if (request.hasUserName()) {
username = request.getUserName();
}
TableName tableName = null;
if (request.hasTableName()) {
tableName = ProtobufUtil.toTableName(request.getTableName());
}
String namespace = null;
if (request.hasNamespace()) {
namespace = request.getNamespace();
}
String regionServer = null;
if (request.hasRegionServer()) {
regionServer = request.getRegionServer();
}
if (request.hasBypassGlobals()) {
// Make sure we don't have either of the two below limits also included
if (request.hasSpaceLimit() || request.hasThrottle()) {
throw new IllegalStateException(
"SetQuotaRequest has multiple limits: " + TextFormat.shortDebugString(request));
}
return new QuotaGlobalsSettingsBypass(username, tableName, namespace, regionServer,
request.getBypassGlobals());
} else if (request.hasSpaceLimit()) {
// Make sure we don't have the below limit as well
if (request.hasThrottle()) {
throw new IllegalStateException(
"SetQuotaRequests has multiple limits: " + TextFormat.shortDebugString(request));
}
// Sanity check on the pb received.
if (!request.getSpaceLimit().hasQuota()) {
throw new IllegalArgumentException("SpaceLimitRequest is missing the expected SpaceQuota.");
}
return QuotaSettingsFactory.fromSpace(tableName, namespace,
request.getSpaceLimit().getQuota());
} else if (request.hasThrottle()) {
return new ThrottleSettings(username, tableName, namespace, regionServer,
request.getThrottle());
} else {
throw new IllegalStateException("Unhandled SetRequestRequest state");
}
} | 3.68 |
hibernate-validator_ConstraintViolationImpl_getMessageParameters | /**
* @return the message parameters added using {@link HibernateConstraintValidatorContext#addMessageParameter(String, Object)}
*/
public Map<String, Object> getMessageParameters() {
return messageParameters;
} | 3.68 |
pulsar_TopicEventsDispatcher_notifyOnCompletion | /**
* Dispatches SUCCESS/FAILURE notification to all currently added listeners on completion of the future.
* @param future
* @param topic
* @param event
* @param <T>
* @return future of a new completion stage
*/
public <T> CompletableFuture<T> notifyOnCompletion(CompletableFuture<T> future,
String topic,
TopicEventsListener.TopicEvent event) {
return future.whenComplete((r, ex) -> notify(topic,
event,
ex == null ? TopicEventsListener.EventStage.SUCCESS : TopicEventsListener.EventStage.FAILURE,
ex));
} | 3.68 |
hbase_BulkLoadHFilesTool_createExecutorService | // Initialize a thread pool
private ExecutorService createExecutorService() {
ThreadPoolExecutor pool = new ThreadPoolExecutor(nrThreads, nrThreads, 60, TimeUnit.SECONDS,
new LinkedBlockingQueue<>(),
new ThreadFactoryBuilder().setNameFormat("BulkLoadHFilesTool-%1$d").setDaemon(true).build());
pool.allowCoreThreadTimeOut(true);
return pool;
} | 3.68 |
flink_TemplateUtils_extractProcedureLocalFunctionTemplates | /** Retrieve local templates from procedure method. */
static Set<FunctionTemplate> extractProcedureLocalFunctionTemplates(
DataTypeFactory typeFactory, Method method) {
return asFunctionTemplatesForProcedure(
typeFactory, collectAnnotationsOfMethod(ProcedureHint.class, method));
} | 3.68 |
flink_OpaqueMemoryResource_close | /** Releases this resource. This method is idempotent. */
@Override
public void close() throws Exception {
if (closed.compareAndSet(false, true)) {
disposer.run();
}
} | 3.68 |
framework_VNativeSelect_getSelect | /**
* @return the root select widget
*/
public ListBox getSelect() {
return getOptionsContainer();
} | 3.68 |
morf_MorfModule_configure | /**
* @see com.google.inject.AbstractModule#configure()
*/
@Override
protected void configure() {
Multibinder.newSetBinder(binder(), UpgradeScriptAddition.class);
Multibinder<TableContribution> tableMultibinder = Multibinder.newSetBinder(binder(), TableContribution.class);
tableMultibinder.addBinding().to(DatabaseUpgradeTableContribution.class);
} | 3.68 |
hadoop_RLESparseResourceAllocation_shift | /**
* This method shifts all the timestamp of the {@link Resource} entries by the
* specified "delta".
*
* @param delta the time by which to shift the {@link Resource} allocations
*/
public void shift(long delta) {
writeLock.lock();
try {
TreeMap<Long, Resource> newCum = new TreeMap<>();
long start;
for (Map.Entry<Long, Resource> entry : cumulativeCapacity.entrySet()) {
if (delta > 0) {
start = (entry.getKey() == Long.MAX_VALUE) ? Long.MAX_VALUE
: entry.getKey() + delta;
} else {
start = (entry.getKey() == Long.MIN_VALUE) ? Long.MIN_VALUE
: entry.getKey() + delta;
}
newCum.put(start, entry.getValue());
}
cumulativeCapacity = newCum;
} finally {
writeLock.unlock();
}
} | 3.68 |
hbase_Table_getScanner | /**
* Gets a scanner on the current table for the given family and qualifier.
* @param family The column family to scan.
* @param qualifier The column qualifier to scan.
* @return A scanner.
* @throws IOException if a remote or network exception occurs.
* @since 0.20.0
*/
default ResultScanner getScanner(byte[] family, byte[] qualifier) throws IOException {
throw new NotImplementedException("Add an implementation!");
} | 3.68 |
hbase_ResponseConverter_buildActionResult | /**
* Wrap a throwable to an action result.
* @return an action result builder
*/
public static ResultOrException.Builder buildActionResult(final ClientProtos.Result r) {
ResultOrException.Builder builder = ResultOrException.newBuilder();
if (r != null) builder.setResult(r);
return builder;
} | 3.68 |
hudi_TableChange_checkColModifyIsLegal | // Modify hudi meta columns is prohibited
protected void checkColModifyIsLegal(String colNeedToModify) {
if (HoodieRecord.HOODIE_META_COLUMNS.stream().anyMatch(f -> f.equalsIgnoreCase(colNeedToModify))) {
throw new IllegalArgumentException(String.format("cannot modify hudi meta col: %s", colNeedToModify));
}
} | 3.68 |
flink_TwoPhaseCommitSinkFunction_setTransactionTimeout | /**
* Sets the transaction timeout. Setting only the transaction timeout has no effect in itself.
*
* @param transactionTimeout The transaction timeout in ms.
* @see #ignoreFailuresAfterTransactionTimeout()
* @see #enableTransactionTimeoutWarnings(double)
*/
protected TwoPhaseCommitSinkFunction<IN, TXN, CONTEXT> setTransactionTimeout(
long transactionTimeout) {
checkArgument(transactionTimeout >= 0, "transactionTimeout must not be negative");
this.transactionTimeout = transactionTimeout;
return this;
} | 3.68 |
hadoop_NMTokenSecretManagerInRM_activateNextMasterKey | /**
* Activate the new master-key
*/
@Private
public void activateNextMasterKey() {
super.writeLock.lock();
try {
LOG.info("Activating next master key with id: "
+ this.nextMasterKey.getMasterKey().getKeyId());
this.currentMasterKey = this.nextMasterKey;
this.nextMasterKey = null;
clearApplicationNMTokenKeys();
} finally {
super.writeLock.unlock();
}
} | 3.68 |
flink_StringUtils_hexStringToByte | /**
* Given a hex string this will return the byte array corresponding to the string .
*
* @param hex the hex String array
* @return a byte array that is a hex string representation of the given string. The size of the
* byte array is therefore hex.length/2
*/
public static byte[] hexStringToByte(final String hex) {
final byte[] bts = new byte[hex.length() / 2];
for (int i = 0; i < bts.length; i++) {
bts[i] = (byte) Integer.parseInt(hex.substring(2 * i, 2 * i + 2), 16);
}
return bts;
} | 3.68 |
hadoop_EntityGroupFSTimelineStoreMetrics_addSummaryLogReadTime | // Summary data related
public void addSummaryLogReadTime(long msec) {
summaryLogRead.add(msec);
} | 3.68 |
flink_Tuple18_equals | /**
* Deep equality for tuples by calling equals() on the tuple members.
*
* @param o the object checked for equality
* @return true if this is equal to o.
*/
@Override
public boolean equals(Object o) {
if (this == o) {
return true;
}
if (!(o instanceof Tuple18)) {
return false;
}
@SuppressWarnings("rawtypes")
Tuple18 tuple = (Tuple18) o;
if (f0 != null ? !f0.equals(tuple.f0) : tuple.f0 != null) {
return false;
}
if (f1 != null ? !f1.equals(tuple.f1) : tuple.f1 != null) {
return false;
}
if (f2 != null ? !f2.equals(tuple.f2) : tuple.f2 != null) {
return false;
}
if (f3 != null ? !f3.equals(tuple.f3) : tuple.f3 != null) {
return false;
}
if (f4 != null ? !f4.equals(tuple.f4) : tuple.f4 != null) {
return false;
}
if (f5 != null ? !f5.equals(tuple.f5) : tuple.f5 != null) {
return false;
}
if (f6 != null ? !f6.equals(tuple.f6) : tuple.f6 != null) {
return false;
}
if (f7 != null ? !f7.equals(tuple.f7) : tuple.f7 != null) {
return false;
}
if (f8 != null ? !f8.equals(tuple.f8) : tuple.f8 != null) {
return false;
}
if (f9 != null ? !f9.equals(tuple.f9) : tuple.f9 != null) {
return false;
}
if (f10 != null ? !f10.equals(tuple.f10) : tuple.f10 != null) {
return false;
}
if (f11 != null ? !f11.equals(tuple.f11) : tuple.f11 != null) {
return false;
}
if (f12 != null ? !f12.equals(tuple.f12) : tuple.f12 != null) {
return false;
}
if (f13 != null ? !f13.equals(tuple.f13) : tuple.f13 != null) {
return false;
}
if (f14 != null ? !f14.equals(tuple.f14) : tuple.f14 != null) {
return false;
}
if (f15 != null ? !f15.equals(tuple.f15) : tuple.f15 != null) {
return false;
}
if (f16 != null ? !f16.equals(tuple.f16) : tuple.f16 != null) {
return false;
}
if (f17 != null ? !f17.equals(tuple.f17) : tuple.f17 != null) {
return false;
}
return true;
} | 3.68 |
morf_MySqlDialect_indexDropStatements | /**
* @see org.alfasoftware.morf.jdbc.SqlDialect#indexDropStatements(org.alfasoftware.morf.metadata.Table, org.alfasoftware.morf.metadata.Index)
*/
@Override
public Collection<String> indexDropStatements(Table table, Index indexToBeRemoved) {
StringBuilder statement = new StringBuilder();
statement.append("ALTER TABLE `")
.append(table.getName())
.append("` DROP INDEX `")
.append(indexToBeRemoved.getName())
.append("`");
return Arrays.asList(statement.toString());
} | 3.68 |
pulsar_ModularLoadManagerImpl_updateAllBrokerData | // As the leader broker, update the broker data map in loadData by querying metadata store for the broker data put
// there by each broker via updateLocalBrokerData.
private void updateAllBrokerData() {
final Set<String> activeBrokers = getAvailableBrokers();
final Map<String, BrokerData> brokerDataMap = loadData.getBrokerData();
for (String broker : activeBrokers) {
try {
String key = String.format("%s/%s", LoadManager.LOADBALANCE_BROKERS_ROOT, broker);
Optional<LocalBrokerData> localData = brokersData.readLock(key).get();
if (!localData.isPresent()) {
brokerDataMap.remove(broker);
log.info("[{}] Broker load report is not present", broker);
continue;
}
if (brokerDataMap.containsKey(broker)) {
// Replace previous local broker data.
brokerDataMap.get(broker).setLocalData(localData.get());
} else {
// Initialize BrokerData object for previously unseen
// brokers.
brokerDataMap.put(broker, new BrokerData(localData.get()));
}
} catch (Exception e) {
log.warn("Error reading broker data from cache for broker - [{}], [{}]", broker, e.getMessage());
}
}
// Remove obsolete brokers.
for (final String broker : brokerDataMap.keySet()) {
if (!activeBrokers.contains(broker)) {
brokerDataMap.remove(broker);
}
}
} | 3.68 |
pulsar_AbstractCmdConsume_interpretMessage | /**
* Interprets the message to create a string representation.
*
* @param message
* The message to interpret
* @param displayHex
* Whether to display BytesMessages in hexdump style, ignored for simple text messages
* @return String representation of the message
*/
protected String interpretMessage(Message<?> message, boolean displayHex) throws IOException {
StringBuilder sb = new StringBuilder();
String properties = Arrays.toString(message.getProperties().entrySet().toArray());
String data;
Object value = message.getValue();
if (value == null) {
data = "null";
} else if (value instanceof byte[]) {
byte[] msgData = (byte[]) value;
data = interpretByteArray(displayHex, msgData);
} else if (value instanceof GenericObject) {
Map<String, Object> asMap = genericObjectToMap((GenericObject) value, displayHex);
data = asMap.toString();
} else if (value instanceof ByteBuffer) {
data = new String(getBytes((ByteBuffer) value));
} else {
data = value.toString();
}
String key = null;
if (message.hasKey()) {
key = message.getKey();
}
sb.append("key:[").append(key).append("], ");
if (!properties.isEmpty()) {
sb.append("properties:").append(properties).append(", ");
}
sb.append("content:").append(data);
return sb.toString();
} | 3.68 |
framework_AbstractColorPicker_colorChanged | /**
* Fired when a color change event occurs.
*
* @param event
* The color change event
*/
protected void colorChanged(ColorChangeEvent event) {
setColor(event.getColor());
fireColorChanged();
} | 3.68 |
streampipes_ImageExtractor_process | /**
* Fetches the given {@link URL} using {@link HTMLFetcher} and processes the retrieved HTML using
* the specified {@link BoilerpipeExtractor}.
*
* @param doc The processed {@link TextDocument}.
* @param is The original HTML document.
* @return A List of enclosed {@link Image}s
* @throws BoilerpipeProcessingException
*/
public List<Image> process(final URL url, final BoilerpipeExtractor extractor)
throws IOException, BoilerpipeProcessingException, SAXException {
final HTMLDocument htmlDoc = HTMLFetcher.fetch(url);
final TextDocument doc = new BoilerpipeSAXInput(htmlDoc.toInputSource()).getTextDocument();
extractor.process(doc);
final InputSource is = htmlDoc.toInputSource();
return process(doc, is);
} | 3.68 |
hudi_AvroInternalSchemaConverter_visitInternalRecordToBuildAvroRecord | /**
* Converts hudi RecordType to Avro RecordType.
* this is auxiliary function used by visitInternalSchemaToBuildAvroSchema
*/
private static Schema visitInternalRecordToBuildAvroRecord(Types.RecordType recordType, List<Schema> fieldSchemas, String recordNameFallback) {
List<Types.Field> fields = recordType.fields();
List<Schema.Field> avroFields = new ArrayList<>();
for (int i = 0; i < fields.size(); i++) {
Types.Field f = fields.get(i);
Schema.Field field = new Schema.Field(f.name(), fieldSchemas.get(i), f.doc(), f.isOptional() ? JsonProperties.NULL_VALUE : null);
avroFields.add(field);
}
String recordName = Option.ofNullable(recordType.name()).orElse(recordNameFallback);
return Schema.createRecord(recordName, null, null, false, avroFields);
} | 3.68 |
framework_AbstractMedia_isHtmlContentAllowed | /**
* @return true if the alternative text ({@link #setAltText(String)}) is to
* be rendered as HTML.
*/
public boolean isHtmlContentAllowed() {
return getState(false).htmlContentAllowed;
} | 3.68 |
flink_HiveTypeUtil_toFlinkType | /**
* Convert Hive data type to a Flink data type.
*
* @param hiveType a Hive data type
* @return the corresponding Flink data type
*/
public static DataType toFlinkType(TypeInfo hiveType) {
checkNotNull(hiveType, "hiveType cannot be null");
switch (hiveType.getCategory()) {
case PRIMITIVE:
return toFlinkPrimitiveType((PrimitiveTypeInfo) hiveType);
case LIST:
ListTypeInfo listTypeInfo = (ListTypeInfo) hiveType;
return DataTypes.ARRAY(toFlinkType(listTypeInfo.getListElementTypeInfo()));
case MAP:
MapTypeInfo mapTypeInfo = (MapTypeInfo) hiveType;
return DataTypes.MAP(
toFlinkType(mapTypeInfo.getMapKeyTypeInfo()),
toFlinkType(mapTypeInfo.getMapValueTypeInfo()));
case STRUCT:
StructTypeInfo structTypeInfo = (StructTypeInfo) hiveType;
List<String> names = structTypeInfo.getAllStructFieldNames();
List<TypeInfo> typeInfos = structTypeInfo.getAllStructFieldTypeInfos();
DataTypes.Field[] fields = new DataTypes.Field[names.size()];
for (int i = 0; i < fields.length; i++) {
fields[i] = DataTypes.FIELD(names.get(i), toFlinkType(typeInfos.get(i)));
}
return DataTypes.ROW(fields);
default:
throw new UnsupportedOperationException(
String.format("Flink doesn't support Hive data type %s yet.", hiveType));
}
} | 3.68 |
hbase_HBaseSaslRpcClient_saslConnect | /**
* Do client side SASL authentication with server via the given InputStream and OutputStream
* @param inS InputStream to use
* @param outS OutputStream to use
* @return true if connection is set up, or false if needs to switch to simple Auth.
*/
public boolean saslConnect(InputStream inS, OutputStream outS) throws IOException {
DataInputStream inStream = new DataInputStream(new BufferedInputStream(inS));
DataOutputStream outStream = new DataOutputStream(new BufferedOutputStream(outS));
try {
byte[] saslToken = getInitialResponse();
if (saslToken != null) {
outStream.writeInt(saslToken.length);
outStream.write(saslToken, 0, saslToken.length);
outStream.flush();
if (LOG.isDebugEnabled()) {
LOG.debug("Have sent token of size " + saslToken.length + " from initSASLContext.");
}
}
if (!isComplete()) {
readStatus(inStream);
int len = inStream.readInt();
if (len == SaslUtil.SWITCH_TO_SIMPLE_AUTH) {
if (!fallbackAllowed) {
throw new IOException("Server asks us to fall back to SIMPLE auth, "
+ "but this client is configured to only allow secure connections.");
}
if (LOG.isDebugEnabled()) {
LOG.debug("Server asks us to fall back to simple auth.");
}
dispose();
return false;
}
saslToken = new byte[len];
if (LOG.isDebugEnabled()) {
LOG.debug("Will read input token of size " + saslToken.length
+ " for processing by initSASLContext");
}
inStream.readFully(saslToken);
}
while (!isComplete()) {
saslToken = evaluateChallenge(saslToken);
if (saslToken != null) {
if (LOG.isDebugEnabled()) {
LOG.debug("Will send token of size " + saslToken.length + " from initSASLContext.");
}
outStream.writeInt(saslToken.length);
outStream.write(saslToken, 0, saslToken.length);
outStream.flush();
}
if (!isComplete()) {
readStatus(inStream);
saslToken = new byte[inStream.readInt()];
if (LOG.isDebugEnabled()) {
LOG.debug("Will read input token of size " + saslToken.length
+ " for processing by initSASLContext");
}
inStream.readFully(saslToken);
}
}
if (LOG.isDebugEnabled()) {
LOG.debug("SASL client context established. Negotiated QoP: "
+ saslClient.getNegotiatedProperty(Sasl.QOP));
}
// initial the inputStream, outputStream for both Sasl encryption
// and Crypto AES encryption if necessary
// if Crypto AES encryption enabled, the saslInputStream/saslOutputStream is
// only responsible for connection header negotiation,
// cryptoInputStream/cryptoOutputStream is responsible for rpc encryption with Crypto AES
saslInputStream = new SaslInputStream(inS, saslClient);
saslOutputStream = new SaslOutputStream(outS, saslClient);
if (initStreamForCrypto) {
cryptoInputStream = new WrappedInputStream(inS);
cryptoOutputStream = new WrappedOutputStream(outS);
}
return true;
} catch (IOException e) {
try {
saslClient.dispose();
} catch (SaslException ignored) {
// ignore further exceptions during cleanup
}
throw e;
}
} | 3.68 |
hbase_HMaster_isInitialized | /**
* Report whether this master has completed with its initialization and is ready. If ready, the
* master is also the active master. A standby master is never ready. This method is used for
* testing.
* @return true if master is ready to go, false if not.
*/
@Override
public boolean isInitialized() {
return initialized.isReady();
} | 3.68 |
MagicPlugin_BlinkSpell_delayTeleport | /**
* Delay tp by one tick, mainly for effects.
*/
protected void delayTeleport(final Entity entity, final Location location) {
registerMoved(entity);
Bukkit.getScheduler().scheduleSyncDelayedTask(controller.getPlugin(), new Runnable() {
@Override
public void run() {
entity.teleport(location);
registerForUndo();
playEffects("teleport");
}
}, 1);
} | 3.68 |
framework_InMemoryDataProviderHelpers_createValueProviderFilter | /**
* Creates a new predicate from the given predicate and value provider. This
* allows using a predicate of the value providers return type with objects
* of the value providers type.
*
* @param valueProvider
* the value provider to use
* @param valueFilter
* the original predicate
* @return the created predicate
*/
public static <T, V> SerializablePredicate<T> createValueProviderFilter(
ValueProvider<T, V> valueProvider,
SerializablePredicate<V> valueFilter) {
return item -> valueFilter.test(valueProvider.apply(item));
} | 3.68 |
hadoop_LeveldbIterator_seekToLast | /**
* Repositions the iterator so it is at the end of of the Database.
*/
public void seekToLast() throws DBException {
try {
iter.seekToLast();
} catch (DBException e) {
throw e;
} catch (RuntimeException e) {
throw new DBException(e.getMessage(), e);
}
} | 3.68 |
flink_MemorySegment_getAddress | /**
* Returns the memory address of off-heap memory segments.
*
* @return absolute memory address outside the heap
* @throws IllegalStateException if the memory segment does not represent off-heap memory
*/
public long getAddress() {
if (heapMemory == null) {
return address;
} else {
throw new IllegalStateException("Memory segment does not represent off heap memory");
}
} | 3.68 |
framework_VFilterSelect_inputFieldKeyDown | /**
* Triggered when a key is pressed in the text box
*
* @param event
* The KeyDownEvent
*/
private void inputFieldKeyDown(KeyDownEvent event) {
if (enableDebug) {
debug("VFS: inputFieldKeyDown(" + event.getNativeKeyCode() + ")");
}
switch (event.getNativeKeyCode()) {
case KeyCodes.KEY_DOWN:
case KeyCodes.KEY_UP:
case KeyCodes.KEY_PAGEDOWN:
case KeyCodes.KEY_PAGEUP:
// open popup as from gadget
filterOptions(-1, "");
lastFilter = "";
tb.selectAll();
break;
case KeyCodes.KEY_ENTER:
/*
* This only handles the case when new items is allowed, a text is
* entered, the popup opener button is clicked to close the popup
* and enter is then pressed (see #7560).
*/
if (!allowNewItem) {
return;
}
if (currentSuggestion != null && tb.getText()
.equals(currentSuggestion.getReplacementString())) {
// Retain behavior from #6686 by returning without stopping
// propagation if there's nothing to do
return;
}
suggestionPopup.menu.doSelectedItemAction();
event.stopPropagation();
break;
}
} | 3.68 |
zxing_BitArray_setBulk | /**
* Sets a block of 32 bits, starting at bit i.
*
* @param i first bit to set
* @param newBits the new value of the next 32 bits. Note again that the least-significant bit
* corresponds to bit i, the next-least-significant to i+1, and so on.
*/
public void setBulk(int i, int newBits) {
bits[i / 32] = newBits;
} | 3.68 |
flink_FileSourceSplit_fileSize | /** Returns the full file size in bytes, from {@link FileStatus#getLen()}. */
public long fileSize() {
return fileSize;
} | 3.68 |
morf_SchemaModificationAdapter_table | /**
* @see org.alfasoftware.morf.dataset.DataSetAdapter#table(org.alfasoftware.morf.metadata.Table, java.lang.Iterable)
*/
@Override
public void table(Table table, Iterable<Record> records) {
remainingTables.remove(table.getName().toUpperCase());
initialiseTableSchema(table);
super.table(table, records);
} | 3.68 |
pulsar_ManagedLedgerConfig_getThrottleMarkDelete | /**
* @return the throttling rate limit for mark-delete calls
*/
public double getThrottleMarkDelete() {
return throttleMarkDelete;
} | 3.68 |
zxing_MatrixUtil_makeTypeInfoBits | // Make bit vector of type information. On success, store the result in "bits" and return true.
// Encode error correction level and mask pattern. See 8.9 of
// JISX0510:2004 (p.45) for details.
static void makeTypeInfoBits(ErrorCorrectionLevel ecLevel, int maskPattern, BitArray bits)
throws WriterException {
if (!QRCode.isValidMaskPattern(maskPattern)) {
throw new WriterException("Invalid mask pattern");
}
int typeInfo = (ecLevel.getBits() << 3) | maskPattern;
bits.appendBits(typeInfo, 5);
int bchCode = calculateBCHCode(typeInfo, TYPE_INFO_POLY);
bits.appendBits(bchCode, 10);
BitArray maskBits = new BitArray();
maskBits.appendBits(TYPE_INFO_MASK_PATTERN, 15);
bits.xor(maskBits);
if (bits.getSize() != 15) { // Just in case.
throw new WriterException("should not happen but we got: " + bits.getSize());
}
} | 3.68 |
hbase_RoundRobinTableInputFormat_unconfigure | /**
* @see #configure()
*/
void unconfigure() {
if (this.hbaseRegionsizecalculatorEnableOriginalValue == null) {
getConf().unset(HBASE_REGIONSIZECALCULATOR_ENABLE);
} else {
getConf().setBoolean(HBASE_REGIONSIZECALCULATOR_ENABLE,
this.hbaseRegionsizecalculatorEnableOriginalValue);
}
} | 3.68 |
hadoop_ValidateRenamedFilesStage_validateOneFile | /**
* Validate a file.
* @param entry entry to probe for
* @throws IOException IO problem.
* @throws OutputValidationException if the entry is not valid
*/
private void validateOneFile(FileEntry entry) throws IOException {
updateAuditContext(OP_STAGE_JOB_VALIDATE_OUTPUT);
// report progress back
progress();
// look validate the file.
// raising an FNFE if the file isn't there.
FileStatus destStatus;
final Path sourcePath = entry.getSourcePath();
Path destPath = entry.getDestPath();
try {
destStatus = getFileStatus(destPath);
// it must be a file
if (!destStatus.isFile()) {
throw new OutputValidationException(destPath,
"Expected a file renamed from " + sourcePath
+ "; found " + destStatus);
}
final long sourceSize = entry.getSize();
final long destSize = destStatus.getLen();
// etags, if the source had one.
final String sourceEtag = entry.getEtag();
if (getOperations().storePreservesEtagsThroughRenames(destStatus.getPath())
&& isNotBlank(sourceEtag)) {
final String destEtag = ManifestCommitterSupport.getEtag(destStatus);
if (!sourceEtag.equals(destEtag)) {
LOG.warn("Etag of dest file {}: {} does not match that of manifest entry {}",
destPath, destStatus, entry);
throw new OutputValidationException(destPath,
String.format("Expected the file"
+ " renamed from %s"
+ " with etag %s and length %s"
+ " but found a file with etag %s and length %d",
sourcePath,
sourceEtag,
sourceSize,
destEtag,
destSize));
}
}
// check the expected length after any etag validation
if (destSize != sourceSize) {
LOG.warn("Length of dest file {}: {} does not match that of manifest entry {}",
destPath, destStatus, entry);
throw new OutputValidationException(destPath,
String.format("Expected the file"
+ " renamed from %s"
+ " with length %d"
+ " but found a file of length %d",
sourcePath,
sourceSize,
destSize));
}
} catch (FileNotFoundException e) {
// file didn't exist
throw new OutputValidationException(destPath,
"Expected a file, but it was not found", e);
}
addFileCommitted(entry);
} | 3.68 |
hadoop_OBSInputStream_read | /**
* Read bytes starting from the specified position.
*
* @param position start read from this position
* @param buffer read buffer
* @param offset offset into buffer
* @param length number of bytes to read
* @return actual number of bytes read
* @throws IOException on any failure to read
*/
@Override
public int read(final long position, final byte[] buffer, final int offset,
final int length)
throws IOException {
int len = length;
checkNotClosed();
validatePositionedReadArgs(position, buffer, offset, len);
if (position < 0 || position >= contentLength) {
return -1;
}
if ((position + len) > contentLength) {
len = (int) (contentLength - position);
}
if (fs.isReadTransformEnabled()) {
return super.read(position, buffer, offset, len);
}
return randomReadWithNewInputStream(position, buffer, offset, len);
} | 3.68 |
AreaShop_CancellableRegionEvent_getReason | /**
* Get the reason why this event is cancelled.
* @return null if there is no reason or the event is not cancelled, otherwise a string
*/
public String getReason() {
return reason;
} | 3.68 |
hbase_ByteBufferIOEngine_sync | /**
* No operation for the sync in the memory IO engine
*/
@Override
public void sync() {
// Nothing to do.
} | 3.68 |
pulsar_ManagedCursor_seek | /**
* Move the cursor to a different read position.
*
* <p/>If the new position happens to be before the already mark deleted position, it will be set to the mark
* deleted position instead.
*
* @param newReadPosition
* the position where to move the cursor
*/
default void seek(Position newReadPosition) {
seek(newReadPosition, false);
} | 3.68 |
morf_AbstractSqlDialectTest_testInsertFromSelectStatementWhereJoinOnInnerSelect | /**
* Tests an insert from a select which joins inner selects using a where clause. The fields for selection are not specified.
*/
@Test
public void testInsertFromSelectStatementWhereJoinOnInnerSelect() {
SelectStatement inner1 = select(field(INNER_FIELD_A).as(INNER_FIELD_A), field(INNER_FIELD_B).as(INNER_FIELD_B)).from(tableRef("Inner")).alias("InnerAlias");
SelectStatement outer = select().
from(inner1);
InsertStatement insert = insert().
into(tableRef("InsertAB")).
fields(field(INNER_FIELD_A), field(INNER_FIELD_B)).
from(outer);
String expectedSql =
"INSERT INTO " + tableName("InsertAB") + " (innerFieldA, innerFieldB) " +
"SELECT InnerAlias.innerFieldA, InnerAlias.innerFieldB " +
"FROM (SELECT innerFieldA AS innerFieldA, innerFieldB AS innerFieldB FROM " + tableName("Inner") + ") InnerAlias";
assertEquals("Select with join on where clause", ImmutableList.of(expectedSql), testDialect.convertStatementToSQL(insert, metadata, SqlDialect.IdTable.withDeterministicName(ID_VALUES_TABLE)));
} | 3.68 |
flink_UnsortedGrouping_sortGroup | /**
* Sorts elements within a group on a key extracted by the specified {@link
* org.apache.flink.api.java.functions.KeySelector} in the specified {@link Order}.
*
* <p>Chaining {@link #sortGroup(KeySelector, Order)} calls is not supported.
*
* @param keySelector The KeySelector with which the group is sorted.
* @param order The Order in which the extracted key is sorted.
* @return A SortedGrouping with specified order of group element.
* @see Order
*/
public <K> SortedGrouping<T> sortGroup(KeySelector<T, K> keySelector, Order order) {
if (!(this.getKeys() instanceof Keys.SelectorFunctionKeys)) {
throw new InvalidProgramException(
"KeySelector group-sorting keys can only be used with KeySelector grouping keys.");
}
TypeInformation<K> keyType =
TypeExtractor.getKeySelectorTypes(keySelector, this.inputDataSet.getType());
SortedGrouping<T> sg =
new SortedGrouping<T>(
this.inputDataSet,
this.keys,
new Keys.SelectorFunctionKeys<T, K>(
keySelector, this.inputDataSet.getType(), keyType),
order);
sg.customPartitioner = getCustomPartitioner();
return sg;
} | 3.68 |
flink_TaskStateSnapshot_isTaskDeployedAsFinished | /** Returns whether all the operators of the task are already finished on restoring. */
public boolean isTaskDeployedAsFinished() {
return isTaskDeployedAsFinished;
} | 3.68 |
hbase_HRegionFileSystem_getStoreFiles | /**
* Returns the store files available for the family. This methods performs the filtering based on
* the valid store files.
* @param familyName Column Family Name
* @return a set of {@link StoreFileInfo} for the specified family.
*/
public List<StoreFileInfo> getStoreFiles(final String familyName, final boolean validate)
throws IOException {
Path familyDir = getStoreDir(familyName);
FileStatus[] files = CommonFSUtils.listStatus(this.fs, familyDir);
if (files == null) {
if (LOG.isTraceEnabled()) {
LOG.trace("No StoreFiles for: " + familyDir);
}
return null;
}
ArrayList<StoreFileInfo> storeFiles = new ArrayList<>(files.length);
for (FileStatus status : files) {
if (validate && !StoreFileInfo.isValid(status)) {
// recovered.hfiles directory is expected inside CF path when hbase.wal.split.to.hfile to
// true, refer HBASE-23740
if (!HConstants.RECOVERED_HFILES_DIR.equals(status.getPath().getName())) {
LOG.warn("Invalid StoreFile: {}", status.getPath());
}
continue;
}
StoreFileInfo info = ServerRegionReplicaUtil.getStoreFileInfo(conf, fs, regionInfo,
regionInfoForFs, familyName, status.getPath());
storeFiles.add(info);
}
return storeFiles;
} | 3.68 |
framework_VPanel_setFocus | /**
* Sets the keyboard focus on the Panel.
*
* @param focus
* Should the panel have focus or not.
*/
public void setFocus(boolean focus) {
if (focus) {
getContainerElement().focus();
} else {
getContainerElement().blur();
}
} | 3.68 |
hudi_FailSafeConsistencyGuard_checkFilesVisibility | /**
* Helper to check for file visibility based on {@link org.apache.hudi.common.fs.ConsistencyGuard.FileVisibility} event.
*
* @param retryNum retry attempt count.
* @param dir directory of interest in which list of files are checked for visibility
* @param files List of files to check for visibility
* @param event {@link org.apache.hudi.common.fs.ConsistencyGuard.FileVisibility} event of interest.
* @return {@code true} if condition succeeded. else {@code false}.
*/
protected boolean checkFilesVisibility(int retryNum, Path dir, List<String> files, FileVisibility event) {
try {
LOG.info("Trying " + retryNum);
FileStatus[] entries = fs.listStatus(dir);
List<String> gotFiles = Arrays.stream(entries).map(e -> Path.getPathWithoutSchemeAndAuthority(e.getPath()))
.map(Path::toString).collect(Collectors.toList());
List<String> candidateFiles = new ArrayList<>(files);
boolean altered = candidateFiles.removeAll(gotFiles);
switch (event) {
case DISAPPEAR:
LOG.info("Following files are visible" + candidateFiles);
// If no candidate files gets removed, it means all of them have disappeared
return !altered;
case APPEAR:
default:
// if all files appear, the list is empty
return candidateFiles.isEmpty();
}
} catch (IOException ioe) {
LOG.warn("Got IOException waiting for file event. Have tried " + retryNum + " time(s)", ioe);
}
return false;
} | 3.68 |
hmily_RepositoryPathUtils_buildRedisKeyPrefix | /**
* Build redis key prefix string.
*
* @param applicationName the application name
* @return the string
*/
public static String buildRedisKeyPrefix(final String applicationName) {
return String.format(CommonConstant.RECOVER_REDIS_KEY_PRE, applicationName);
} | 3.68 |
framework_AbsoluteLayoutRelativeSizeContent_createComparisonTableOnFixed | /**
* Creates an {@link AbsoluteLayout} of fixed size that contains a
* full-sized {@link Table} that has been forced to full size with css.
* Represents the workaround given for this ticket.
*
* @return the created layout
*/
private Component createComparisonTableOnFixed() {
AbsoluteLayout absoluteLayout = new AbsoluteLayout();
absoluteLayout.setWidth(200, Unit.PIXELS);
absoluteLayout.setHeight(200, Unit.PIXELS);
absoluteLayout.setCaption("comparison table in full size");
Table table = new Table();
table.setSizeFull();
table.setId("comparison-table");
absoluteLayout.addComponent(table, "top:0;bottom:0;left:0;right:0;");
return absoluteLayout;
} | 3.68 |
flink_MethodlessRouter_routes | /** Returns all routes in this router, an unmodifiable map of {@code PathPattern -> Target}. */
public Map<PathPattern, T> routes() {
return Collections.unmodifiableMap(routes);
} | 3.68 |
flink_LimitedConnectionsFileSystem_getLastCheckTimestampNanos | /** Gets the timestamp when the last inactivity evaluation was made. */
public long getLastCheckTimestampNanos() {
return lastCheckTimestampNanos;
} | 3.68 |
flink_TypeInference_outputTypeStrategy | /**
* Sets the strategy for inferring the final output data type of a function call.
*
* <p>Required.
*/
public Builder outputTypeStrategy(TypeStrategy outputTypeStrategy) {
this.outputTypeStrategy =
Preconditions.checkNotNull(
outputTypeStrategy, "Output type strategy must not be null.");
return this;
} | 3.68 |
flink_ColumnSummary_containsNonNull | /** True if this column contains any non-null values. */
public boolean containsNonNull() {
return getNonNullCount() > 0L;
} | 3.68 |
morf_SelectStatement_isForUpdate | /**
* @return true if the statement should pessimistic lock the tables.
*/
public boolean isForUpdate() {
return forUpdate;
} | 3.68 |
hadoop_StagingCommitter_initFileOutputCommitterOptions | /**
* Init the context config with everything needed for the file output
* committer. In particular, this code currently only works with
* commit algorithm 1.
* @param context context to configure.
*/
protected void initFileOutputCommitterOptions(JobContext context) {
context.getConfiguration()
.setInt(FileOutputCommitter.FILEOUTPUTCOMMITTER_ALGORITHM_VERSION, 1);
} | 3.68 |
hbase_AsyncAdmin_listDeadServers | /**
* List all the dead region servers.
*/
default CompletableFuture<List<ServerName>> listDeadServers() {
return this.getClusterMetrics(EnumSet.of(Option.DEAD_SERVERS))
.thenApply(ClusterMetrics::getDeadServerNames);
} | 3.68 |
hbase_HFileBlockIndex_binarySearchNonRootIndex | /**
* Performs a binary search over a non-root level index block. Utilizes the secondary index,
* which records the offsets of (offset, onDiskSize, firstKey) tuples of all entries. the key we
* are searching for offsets to individual entries in the blockIndex buffer the non-root index
* block buffer, starting with the secondary index. The position is ignored.
* @return the index i in [0, numEntries - 1] such that keys[i] <= key < keys[i + 1], if keys is
* the array of all keys being searched, or -1 otherwise
*/
static int binarySearchNonRootIndex(Cell key, ByteBuff nonRootIndex,
CellComparator comparator) {
int numEntries = nonRootIndex.getIntAfterPosition(0);
int low = 0;
int high = numEntries - 1;
int mid = 0;
// Entries start after the number of entries and the secondary index.
// The secondary index takes numEntries + 1 ints.
int entriesOffset = Bytes.SIZEOF_INT * (numEntries + 2);
// If we imagine that keys[-1] = -Infinity and
// keys[numEntries] = Infinity, then we are maintaining an invariant that
// keys[low - 1] < key < keys[high + 1] while narrowing down the range.
ByteBufferKeyOnlyKeyValue nonRootIndexkeyOnlyKV = new ByteBufferKeyOnlyKeyValue();
ObjectIntPair<ByteBuffer> pair = new ObjectIntPair<>();
while (low <= high) {
mid = low + ((high - low) >> 1);
// Midkey's offset relative to the end of secondary index
int midKeyRelOffset = nonRootIndex.getIntAfterPosition(Bytes.SIZEOF_INT * (mid + 1));
// The offset of the middle key in the blockIndex buffer
int midKeyOffset = entriesOffset // Skip secondary index
+ midKeyRelOffset // Skip all entries until mid
+ SECONDARY_INDEX_ENTRY_OVERHEAD; // Skip offset and on-disk-size
// We subtract the two consecutive secondary index elements, which
// gives us the size of the whole (offset, onDiskSize, key) tuple. We
// then need to subtract the overhead of offset and onDiskSize.
int midLength = nonRootIndex.getIntAfterPosition(Bytes.SIZEOF_INT * (mid + 2))
- midKeyRelOffset - SECONDARY_INDEX_ENTRY_OVERHEAD;
// we have to compare in this order, because the comparator order
// has special logic when the 'left side' is a special key.
// TODO make KeyOnlyKeyValue to be Buffer backed and avoid array() call. This has to be
// done after HBASE-12224 & HBASE-12282
// TODO avoid array call.
nonRootIndex.asSubByteBuffer(midKeyOffset, midLength, pair);
nonRootIndexkeyOnlyKV.setKey(pair.getFirst(), pair.getSecond(), midLength);
int cmp = PrivateCellUtil.compareKeyIgnoresMvcc(comparator, key, nonRootIndexkeyOnlyKV);
// key lives above the midpoint
if (cmp > 0) low = mid + 1; // Maintain the invariant that keys[low - 1] < key
// key lives below the midpoint
else if (cmp < 0) high = mid - 1; // Maintain the invariant that key < keys[high + 1]
else return mid; // exact match
}
// As per our invariant, keys[low - 1] < key < keys[high + 1], meaning
// that low - 1 < high + 1 and (low - high) <= 1. As per the loop break
// condition, low >= high + 1. Therefore, low = high + 1.
if (low != high + 1) {
throw new IllegalStateException(
"Binary search broken: low=" + low + " " + "instead of " + (high + 1));
}
// OK, our invariant says that keys[low - 1] < key < keys[low]. We need to
// return i such that keys[i] <= key < keys[i + 1]. Therefore i = low - 1.
int i = low - 1;
// Some extra validation on the result.
if (i < -1 || i >= numEntries) {
throw new IllegalStateException("Binary search broken: result is " + i
+ " but expected to be between -1 and (numEntries - 1) = " + (numEntries - 1));
}
return i;
} | 3.68 |
dubbo_DubboMergingDigest_compress | /**
* Merges any pending inputs and compresses the data down to the public setting.
* Note that this typically loses a bit of precision and thus isn't a thing to
* be doing all the time. It is best done only when we want to show results to
* the outside world.
*/
@Override
public void compress() {
mergeNewValues(true, publicCompression);
} | 3.68 |
hbase_ConnectionUtils_retries2Attempts | /**
* Return retires + 1. The returned value will be in range [1, Integer.MAX_VALUE].
*/
static int retries2Attempts(int retries) {
return Math.max(1, retries == Integer.MAX_VALUE ? Integer.MAX_VALUE : retries + 1);
} | 3.68 |
flink_TypeInfoLogicalTypeConverter_fromTypeInfoToLogicalType | /**
* It will lose some information. (Like {@link PojoTypeInfo} will converted to {@link RowType})
* It and {@link TypeInfoLogicalTypeConverter#fromLogicalTypeToTypeInfo} not allows
* back-and-forth conversion.
*/
public static LogicalType fromTypeInfoToLogicalType(TypeInformation typeInfo) {
DataType dataType = TypeConversions.fromLegacyInfoToDataType(typeInfo);
return LogicalTypeDataTypeConverter.fromDataTypeToLogicalType(dataType);
} | 3.68 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.