name
stringlengths 12
178
| code_snippet
stringlengths 8
36.5k
| score
float64 3.26
3.68
|
---|---|---|
hibernate-validator_ValidatorFactoryBean_isNullable | // TODO to be removed once using CDI API 4.x
public boolean isNullable() {
return false;
} | 3.68 |
hadoop_ManifestCommitter_abortJob | /**
* Abort the job.
* Invokes
* {@link #executeCleanup(String, JobContext, ManifestCommitterConfig)}
* then saves the (ongoing) job report data if reporting is enabled.
* @param jobContext Context of the job whose output is being written.
* @param state final runstate of the job
* @throws IOException failure during cleanup; report failure are swallowed
*/
@Override
public void abortJob(final JobContext jobContext,
final JobStatus.State state)
throws IOException {
LOG.info("Aborting Job {} in state {}", jobContext.getJobID(), state);
ManifestCommitterConfig committerConfig = enterCommitter(false,
jobContext);
ManifestSuccessData report = getOrCreateSuccessData(
committerConfig);
IOException failure = null;
try {
executeCleanup(OP_STAGE_JOB_ABORT, jobContext, committerConfig);
} catch (IOException e) {
// failure.
failure = e;
}
report.setSuccess(false);
// job abort does not overwrite any existing report, so a job commit
// failure cause will be preserved.
maybeSaveSummary(activeStage, committerConfig, report, failure,
true, false);
// print job stats
LOG.info("Job Abort statistics {}",
ioStatisticsToPrettyString(iostatistics));
updateCommonContextOnCommitterExit();
} | 3.68 |
hbase_ReplicationSourceManager_init | /**
* Adds a normal source per registered peer cluster.
*/
void init() throws IOException {
for (String id : this.replicationPeers.getAllPeerIds()) {
addSource(id, true);
}
} | 3.68 |
flink_TwoPhaseCommitSinkFunction_enableTransactionTimeoutWarnings | /**
* Enables logging of warnings if a transaction's elapsed time reaches a specified ratio of the
* <code>transactionTimeout</code>. If <code>warningRatio</code> is 0, a warning will be always
* logged when committing the transaction.
*
* @param warningRatio A value in the range [0,1].
* @return
*/
protected TwoPhaseCommitSinkFunction<IN, TXN, CONTEXT> enableTransactionTimeoutWarnings(
double warningRatio) {
checkArgument(
warningRatio >= 0 && warningRatio <= 1, "warningRatio must be in range [0,1]");
this.transactionTimeoutWarningRatio = warningRatio;
return this;
} | 3.68 |
flink_RetryingRegistration_cancel | /** Cancels the registration procedure. */
public void cancel() {
canceled = true;
completionFuture.cancel(false);
} | 3.68 |
hmily_DisruptorProvider_onData | /**
* push data to disruptor queue.
*
* @param t the t
*/
public void onData(final T t) {
long position = ringBuffer.next();
try {
DataEvent<T> de = ringBuffer.get(position);
de.setT(t);
ringBuffer.publish(position);
} catch (Exception ex) {
logger.error("push data error:", ex);
}
} | 3.68 |
dubbo_RpcServiceContext_getInvocation | /**
* @deprecated Replace to getMethodName(), getParameterTypes(), getArguments()
*/
@Override
@Deprecated
public Invocation getInvocation() {
return invocation;
} | 3.68 |
morf_ChangelogBuilder_withIncludeDataChanges | /**
* Set whether to include data changes in the log output. The default is to
* omit data changes.
*
* @param includeDataChanges Indicates whether data changes should be included or not.
* @return This builder for chaining
*/
public ChangelogBuilder withIncludeDataChanges(boolean includeDataChanges) {
this.includeDataChanges = includeDataChanges;
return this;
} | 3.68 |
flink_PekkoUtils_terminateActorSystem | /**
* Terminates the given {@link ActorSystem} and returns its termination future.
*
* @param actorSystem to terminate
* @return Termination future
*/
public static CompletableFuture<Void> terminateActorSystem(ActorSystem actorSystem) {
return ScalaFutureUtils.toJava(actorSystem.terminate())
.thenAccept(FunctionUtils.ignoreFn());
} | 3.68 |
dubbo_ConfigUtils_getProperties | /**
* Get dubbo properties.
* It is not recommended using this method to modify dubbo properties.
*
* @return
*/
public static Properties getProperties(Set<ClassLoader> classLoaders) {
String path = System.getProperty(CommonConstants.DUBBO_PROPERTIES_KEY);
if (StringUtils.isEmpty(path)) {
path = System.getenv(CommonConstants.DUBBO_PROPERTIES_KEY);
if (StringUtils.isEmpty(path)) {
path = CommonConstants.DEFAULT_DUBBO_PROPERTIES;
}
}
return ConfigUtils.loadProperties(classLoaders, path, false, true);
} | 3.68 |
framework_FilesystemContainer_accept | /**
* Allows only files with the extension and directories.
*
* @see java.io.FilenameFilter#accept(File, String)
*/
@Override
public boolean accept(File dir, String name) {
if (name.endsWith(filter)) {
return true;
}
return new File(dir, name).isDirectory();
} | 3.68 |
hudi_HoodieWrapperFileSystem_createImmutableFileInPath | /**
* Creates a new file with overwrite set to false. This ensures files are created
* only once and never rewritten, also, here we take care if the content is not
* empty, will first write the content to a temp file if {needCreateTempFile} is
* true, and then rename it back after the content is written.
*
* @param fullPath File Path
* @param content Content to be stored
*/
public void createImmutableFileInPath(Path fullPath, Option<byte[]> content)
throws HoodieIOException {
FSDataOutputStream fsout = null;
Path tmpPath = null;
boolean needTempFile = needCreateTempFile();
try {
if (!content.isPresent()) {
fsout = fileSystem.create(fullPath, false);
}
if (content.isPresent() && needTempFile) {
Path parent = fullPath.getParent();
tmpPath = new Path(parent, fullPath.getName() + TMP_PATH_POSTFIX);
fsout = fileSystem.create(tmpPath, false);
fsout.write(content.get());
}
if (content.isPresent() && !needTempFile) {
fsout = fileSystem.create(fullPath, false);
fsout.write(content.get());
}
} catch (IOException e) {
String errorMsg = "Failed to create file " + (tmpPath != null ? tmpPath : fullPath);
throw new HoodieIOException(errorMsg, e);
} finally {
try {
if (null != fsout) {
fsout.close();
}
} catch (IOException e) {
String errorMsg = "Failed to close file " + (needTempFile ? tmpPath : fullPath);
throw new HoodieIOException(errorMsg, e);
}
boolean renameSuccess = false;
try {
if (null != tmpPath) {
renameSuccess = fileSystem.rename(tmpPath, fullPath);
}
} catch (IOException e) {
throw new HoodieIOException("Failed to rename " + tmpPath + " to the target " + fullPath, e);
} finally {
if (!renameSuccess && null != tmpPath) {
try {
fileSystem.delete(tmpPath, false);
LOG.warn("Fail to rename " + tmpPath + " to " + fullPath + ", target file exists: " + fileSystem.exists(fullPath));
} catch (IOException e) {
throw new HoodieIOException("Failed to delete tmp file " + tmpPath, e);
}
}
}
}
} | 3.68 |
hbase_SplitTableRegionProcedure_preSplitRegionBeforeMETA | /**
* Post split region actions before the Point-of-No-Return step
* @param env MasterProcedureEnv
**/
private void preSplitRegionBeforeMETA(final MasterProcedureEnv env)
throws IOException, InterruptedException {
final List<Mutation> metaEntries = new ArrayList<Mutation>();
final MasterCoprocessorHost cpHost = env.getMasterCoprocessorHost();
if (cpHost != null) {
cpHost.preSplitBeforeMETAAction(getSplitRow(), metaEntries, getUser());
try {
for (Mutation p : metaEntries) {
RegionInfo.parseRegionName(p.getRow());
}
} catch (IOException e) {
LOG.error("pid=" + getProcId() + " row key of mutation from coprocessor not parsable as "
+ "region name." + "Mutations from coprocessor should only for hbase:meta table.");
throw e;
}
}
} | 3.68 |
flink_TemplateUtils_extractProcedureGlobalFunctionTemplates | /** Retrieve global templates from procedure class. */
static Set<FunctionTemplate> extractProcedureGlobalFunctionTemplates(
DataTypeFactory typeFactory, Class<? extends Procedure> procedure) {
return asFunctionTemplatesForProcedure(
typeFactory, collectAnnotationsOfClass(ProcedureHint.class, procedure));
} | 3.68 |
hbase_HFileReaderImpl_checkKeyLen | /** Returns True if v <= 0 or v > current block buffer limit. */
protected final boolean checkKeyLen(final int v) {
return v <= 0 || v > this.blockBuffer.limit();
} | 3.68 |
flink_HiveParserTypeCheckProcFactory_processGByExpr | /**
* Function to do groupby subexpression elimination. This is called by all the processors
* initially. As an example, consider the query select a+b, count(1) from T group by a+b; Then
* a+b is already precomputed in the group by operators key, so we substitute a+b in the select
* list with the internal column name of the a+b expression that appears in the in input row
* resolver.
*
* @param nd The node that is being inspected.
* @param procCtx The processor context.
* @return exprNodeColumnDesc.
*/
public static ExprNodeDesc processGByExpr(Node nd, Object procCtx) throws SemanticException {
// We recursively create the exprNodeDesc. Base cases: when we encounter
// a column ref, we convert that into an exprNodeColumnDesc; when we
// encounter
// a constant, we convert that into an exprNodeConstantDesc. For others we
// just
// build the exprNodeFuncDesc with recursively built children.
HiveParserASTNode expr = (HiveParserASTNode) nd;
HiveParserTypeCheckCtx ctx = (HiveParserTypeCheckCtx) procCtx;
// bypass only if outerRR is not null. Otherwise we need to look for expressions in outerRR
// for
// subqueries e.g. select min(b.value) from table b group by b.key
// having key in (select .. where a = min(b.value)
if (!ctx.isUseCaching() && ctx.getOuterRR() == null) {
return null;
}
HiveParserRowResolver input = ctx.getInputRR();
ExprNodeDesc desc = null;
if (input == null || !ctx.getAllowGBExprElimination()) {
return null;
}
// If the current subExpression is pre-calculated, as in Group-By etc.
ColumnInfo colInfo = input.getExpression(expr);
// try outer row resolver
HiveParserRowResolver outerRR = ctx.getOuterRR();
if (colInfo == null && outerRR != null) {
colInfo = outerRR.getExpression(expr);
}
if (colInfo != null) {
desc = new ExprNodeColumnDesc(colInfo);
HiveParserASTNode source = input.getExpressionSource(expr);
if (source != null) {
ctx.getUnparseTranslator().addCopyTranslation(expr, source);
}
return desc;
}
return desc;
} | 3.68 |
hbase_MasterObserver_preListNamespaces | /**
* Called before a listNamespaces request has been processed.
* @param ctx the environment to interact with the framework and master
* @param namespaces an empty list, can be filled with what to return if bypassing
* @throws IOException if something went wrong
*/
default void preListNamespaces(ObserverContext<MasterCoprocessorEnvironment> ctx,
List<String> namespaces) throws IOException {
} | 3.68 |
flink_ExecutionVertex_resetForNewExecution | /** Archives the current Execution and creates a new Execution for this vertex. */
public void resetForNewExecution() {
resetForNewExecutionInternal(System.currentTimeMillis());
} | 3.68 |
hbase_MultiVersionConcurrencyControl_complete | /**
* Mark the {@link WriteEntry} as complete and advance the read point as much as possible. Call
* this even if the write has FAILED (AFTER backing out the write transaction changes completely)
* so we can clean up the outstanding transaction. How much is the read point advanced? Let S be
* the set of all write numbers that are completed. Set the read point to the highest numbered
* write of S.
* @return true if e is visible to MVCC readers (that is, readpoint >= e.writeNumber)
*/
public boolean complete(WriteEntry writeEntry) {
synchronized (writeQueue) {
writeEntry.markCompleted();
long nextReadValue = NONE;
boolean ranOnce = false;
while (!writeQueue.isEmpty()) {
ranOnce = true;
WriteEntry queueFirst = writeQueue.getFirst();
if (nextReadValue > 0) {
if (nextReadValue + 1 != queueFirst.getWriteNumber()) {
throw new RuntimeException("Invariant in complete violated, nextReadValue="
+ nextReadValue + ", writeNumber=" + queueFirst.getWriteNumber());
}
}
if (queueFirst.isCompleted()) {
nextReadValue = queueFirst.getWriteNumber();
writeQueue.removeFirst();
queueFirst.runCompletionAction();
} else {
break;
}
}
if (!ranOnce) {
throw new RuntimeException("There is no first!");
}
if (nextReadValue > 0) {
synchronized (readWaiters) {
readPoint.set(nextReadValue);
readWaiters.notifyAll();
}
}
return readPoint.get() >= writeEntry.getWriteNumber();
}
} | 3.68 |
querydsl_AbstractMongodbQuery_fetchResults | /**
* Fetch results with the specific fields
*
* @param paths fields to return
* @return results
*/
public QueryResults<K> fetchResults(Path<?>... paths) {
queryMixin.setProjection(paths);
return fetchResults();
} | 3.68 |
hadoop_AllocateResponse_updateErrors | /**
* Set the <code>updateErrors</code> of the response.
* @see AllocateResponse#setUpdateErrors(List)
* @param updateErrors <code>updateErrors</code> of the response
* @return {@link AllocateResponseBuilder}
*/
@Private
@Unstable
public AllocateResponseBuilder updateErrors(
List<UpdateContainerError> updateErrors) {
allocateResponse.setUpdateErrors(updateErrors);
return this;
} | 3.68 |
hibernate-validator_ConstrainedExecutable_merge | /**
* Creates a new constrained executable object by merging this and the given
* other executable. Both executables must have the same location, i.e.
* represent the same executable on the same type.
*
* @param other The executable to merge.
*
* @return A merged executable.
*/
public ConstrainedExecutable merge(ConstrainedExecutable other) {
ConfigurationSource mergedSource = ConfigurationSource.max( source, other.source );
List<ConstrainedParameter> mergedParameterMetaData = newArrayList( parameterMetaData.size() );
int i = 0;
for ( ConstrainedParameter parameter : parameterMetaData ) {
mergedParameterMetaData.add( parameter.merge( other.getParameterMetaData( i ) ) );
i++;
}
Set<MetaConstraint<?>> mergedCrossParameterConstraints = newHashSet( crossParameterConstraints );
mergedCrossParameterConstraints.addAll( other.crossParameterConstraints );
Set<MetaConstraint<?>> mergedReturnValueConstraints = newHashSet( constraints );
mergedReturnValueConstraints.addAll( other.constraints );
Set<MetaConstraint<?>> mergedTypeArgumentConstraints = new HashSet<>( typeArgumentConstraints );
mergedTypeArgumentConstraints.addAll( other.typeArgumentConstraints );
CascadingMetaDataBuilder mergedCascadingMetaDataBuilder = cascadingMetaDataBuilder.merge( other.cascadingMetaDataBuilder );
return new ConstrainedExecutable(
mergedSource,
callable,
mergedParameterMetaData,
mergedCrossParameterConstraints,
mergedReturnValueConstraints,
mergedTypeArgumentConstraints,
mergedCascadingMetaDataBuilder
);
} | 3.68 |
morf_DataValueLookupBuilderImpl_initialiseArray | /**
* Creates the storage array.
*/
private Object[] initialiseArray(int numberOfColumns) {
return new Object[numberOfColumns];
} | 3.68 |
framework_CalendarDropHandler_getConnector | /*
* (non-Javadoc)
*
* @see
* com.vaadin.terminal.gwt.client.ui.dd.VAbstractDropHandler#getConnector()
*/
@Override
public CalendarConnector getConnector() {
return calendarConnector;
} | 3.68 |
hadoop_SignerManager_initCustomSigners | /**
* Initialize custom signers and register them with the AWS SDK.
*
*/
public void initCustomSigners() {
String[] customSigners = ownerConf.getTrimmedStrings(CUSTOM_SIGNERS);
if (customSigners == null || customSigners.length == 0) {
// No custom signers specified, nothing to do.
LOG.debug("No custom signers specified");
return;
}
for (String customSigner : customSigners) {
String[] parts = customSigner.split(":");
if (!(parts.length == 1 || parts.length == 2 || parts.length == 3)) {
String message = "Invalid format (Expected name, name:SignerClass,"
+ " name:SignerClass:SignerInitializerClass)"
+ " for CustomSigner: [" + customSigner + "]";
LOG.error(message);
throw new IllegalArgumentException(message);
}
if (parts.length == 1) {
// Nothing to do. Trying to use a pre-defined Signer
} else {
// Register any custom Signer
maybeRegisterSigner(parts[0], parts[1], ownerConf);
// If an initializer is specified, take care of instantiating it and
// setting it up
if (parts.length == 3) {
Class<? extends AwsSignerInitializer> clazz = null;
try {
clazz = (Class<? extends AwsSignerInitializer>) ownerConf
.getClassByName(parts[2]);
} catch (ClassNotFoundException e) {
throw new RuntimeException(String.format(
"SignerInitializer class" + " [%s] not found for signer [%s]",
parts[2], parts[0]), e);
}
LOG.debug("Creating signer initializer: [{}] for signer: [{}]",
parts[2], parts[0]);
AwsSignerInitializer signerInitializer = ReflectionUtils
.newInstance(clazz, null);
initializers.add(signerInitializer);
signerInitializer
.registerStore(bucketName, ownerConf, delegationTokenProvider,
ownerUgi);
}
}
}
} | 3.68 |
flink_JobEdge_getSource | /**
* Returns the data set at the source of the edge. May be null, if the edge refers to the source
* via an ID and has not been connected.
*
* @return The data set at the source of the edge
*/
public IntermediateDataSet getSource() {
return source;
} | 3.68 |
framework_Window_getAssistiveRole | /**
* Gets the WAI-ARIA role the window.
*
* This role defines how an assistive device handles a window. Available
* roles are alertdialog and dialog (@see
* <a href="http://www.w3.org/TR/2011/CR-wai-aria-20110118/roles">Roles
* Model</a>).
*
* @return WAI-ARIA role set for the window
*/
public WindowRole getAssistiveRole() {
return getState(false).role;
} | 3.68 |
framework_JSR356WebsocketInitializer_initAtmosphereForVaadinServlet | /**
* Initializes Atmosphere for use with the given Vaadin servlet
* <p>
* For JSR 356 websockets to work properly, the initialization must be done
* in the servlet context initialization phase.
*
* @param servletRegistration
* The servlet registration info for the servlet
* @param servletContext
*/
public static void initAtmosphereForVaadinServlet(
ServletRegistration servletRegistration,
ServletContext servletContext) {
String servletName = servletRegistration.getName();
String attributeName = getAttributeName(servletName);
if (servletContext.getAttribute(attributeName) != null) {
// Already initialized
getLogger().warning("Atmosphere already initialized");
return;
}
getLogger().finer("Creating AtmosphereFramework for " + servletName);
AtmosphereFramework framework = PushRequestHandler.initAtmosphere(
new FakeServletConfig(servletRegistration, servletContext));
servletContext.setAttribute(attributeName, framework);
getLogger().finer("Created AtmosphereFramework for " + servletName);
} | 3.68 |
hadoop_NMClientAsync_onContainerReInitialize | /**
* Callback for container re-initialization request.
*
* @param containerId the Id of the container to be Re-Initialized.
*/
public void onContainerReInitialize(ContainerId containerId) {} | 3.68 |
hmily_HmilyRepositoryFacade_findUndoByParticipantId | /**
* Find undo by participant id list.
*
* @param participantId the participant id
* @return the list
*/
public List<HmilyParticipantUndo> findUndoByParticipantId(final Long participantId) {
return hmilyRepository.findHmilyParticipantUndoByParticipantId(participantId);
} | 3.68 |
flink_JobSubmissionResult_isJobExecutionResult | /**
* Checks if this JobSubmissionResult is also a JobExecutionResult. See {@code
* getJobExecutionResult} to retrieve the JobExecutionResult.
*
* @return True if this is a JobExecutionResult, false otherwise
*/
public boolean isJobExecutionResult() {
return false;
} | 3.68 |
querydsl_GeometryExpression_asBinary | /**
* Exports this geometric object to a specific Well-known Binary Representation of
* Geometry.
*
* @return binary representation
*/
public SimpleExpression<byte[]> asBinary() {
if (binary == null) {
binary = Expressions.operation(byte[].class, SpatialOps.AS_BINARY, mixin);
}
return binary;
} | 3.68 |
framework_VScrollTable_updateSelectionProperties | /** For internal use only. May be removed or replaced in the future. */
public void updateSelectionProperties(UIDL uidl,
AbstractComponentState state, boolean readOnly) {
setMultiSelectMode(uidl.hasAttribute("multiselectmode")
? uidl.getIntAttribute("multiselectmode")
: MULTISELECT_MODE_DEFAULT);
nullSelectionAllowed = uidl.hasAttribute("nsa")
? uidl.getBooleanAttribute("nsa")
: true;
if (uidl.hasAttribute("selectmode")) {
if (readOnly) {
selectMode = SelectMode.NONE;
} else if (uidl.getStringAttribute("selectmode").equals("multi")) {
selectMode = SelectMode.MULTI;
} else if (uidl.getStringAttribute("selectmode").equals("single")) {
selectMode = SelectMode.SINGLE;
} else {
selectMode = SelectMode.NONE;
}
if (uidl.hasAttribute("touchdetection")) {
multiSelectTouchDetectionEnabled = uidl
.getBooleanAttribute("touchdetection");
}
}
} | 3.68 |
hadoop_ParsedTaskAttempt_obtainHttpPort | /**
* @return http port if set. Returns null otherwise.
*/
public Integer obtainHttpPort() {
return httpPort;
} | 3.68 |
dubbo_ThreadlessExecutor_execute | /**
* If the calling thread is still waiting for a callback task, add the task into the blocking queue to wait for schedule.
* Otherwise, submit to shared callback executor directly.
*
* @param runnable
*/
@Override
public void execute(Runnable runnable) {
RunnableWrapper run = new RunnableWrapper(runnable);
queue.add(run);
if (waiter.get() != SHUTDOWN) {
LockSupport.unpark((Thread) waiter.get());
} else if (queue.remove(run)) {
throw new RejectedExecutionException();
}
} | 3.68 |
querydsl_BooleanBuilder_and | /**
* Create the intersection of this and the given predicate
*
* @param right right hand side of {@code and} operation
* @return the current object
*/
public BooleanBuilder and(@Nullable Predicate right) {
if (right != null) {
if (predicate == null) {
predicate = right;
} else {
predicate = ExpressionUtils.and(predicate, right);
}
}
return this;
} | 3.68 |
framework_DDEventHandleStrategy_handleMouseMove | /**
* Called to handle {@link Event#ONMOUSEMOVE} event.
*
* @param target
* target element over which DnD event has happened
* @param event
* ONMOUSEMOVE GWT event for active DnD operation
* @param mediator
* VDragAndDropManager data accessor
*/
protected void handleMouseMove(Element target, NativePreviewEvent event,
DDManagerMediator mediator) {
VDragAndDropManager manager = mediator.getManager();
if (manager.getCurrentDropHandler() != null) {
handleDragOver(target, mediator);
}
event.getNativeEvent().preventDefault();
} | 3.68 |
hbase_MasterServices_getSplitWALManager | /** Returns return null if current is zk-based WAL splitting */
default SplitWALManager getSplitWALManager() {
return null;
} | 3.68 |
hbase_HRegionFileSystem_removeStoreFiles | /**
* Closes and archives the specified store files from the specified family.
* @param familyName Family that contains the store files
* @param storeFiles set of store files to remove
* @throws IOException if the archiving fails
*/
public void removeStoreFiles(String familyName, Collection<HStoreFile> storeFiles)
throws IOException {
HFileArchiver.archiveStoreFiles(this.conf, this.fs, this.regionInfoForFs, this.tableDir,
Bytes.toBytes(familyName), storeFiles);
} | 3.68 |
hbase_CacheConfig_getCacheCompactedBlocksOnWriteThreshold | /** Returns total file size in bytes threshold for caching while writing during compaction */
public long getCacheCompactedBlocksOnWriteThreshold() {
return this.cacheCompactedDataOnWriteThreshold;
} | 3.68 |
flink_MemorySegment_putCharBigEndian | /**
* Writes the given character (16 bit, 2 bytes) to the given position in big-endian byte order.
* This method's speed depends on the system's native byte order, and it is possibly slower than
* {@link #putChar(int, char)}. For most cases (such as transient storage in memory or
* serialization for I/O and network), it suffices to know that the byte order in which the
* value is written is the same as the one in which it is read, and {@link #putChar(int, char)}
* is the preferable choice.
*
* @param index The position at which the value will be written.
* @param value The char value to be written.
* @throws IndexOutOfBoundsException Thrown, if the index is negative, or larger than the
* segment size minus 2.
*/
public void putCharBigEndian(int index, char value) {
if (LITTLE_ENDIAN) {
putChar(index, Character.reverseBytes(value));
} else {
putChar(index, value);
}
} | 3.68 |
framework_VAbstractSplitPanel_setFirstWidget | /**
* For internal use only. May be removed or replaced in the future.
*
* @param w
* the widget to set to the first region or {@code null} to
* remove previously set widget
*/
public void setFirstWidget(Widget w) {
if (firstChild == w) {
return;
}
if (firstChild != null) {
firstChild.removeFromParent();
}
if (w != null) {
super.add(w, firstContainer);
}
firstChild = w;
} | 3.68 |
hadoop_TaggedInputSplit_getInputFormatClass | /**
* Retrieves the InputFormat class to use for this split.
*
* @return The InputFormat class to use
*/
public Class<? extends InputFormat> getInputFormatClass() {
return inputFormatClass;
} | 3.68 |
hbase_PrefixFilter_parseFrom | /**
* Parse a serialized representation of {@link PrefixFilter}
* @param pbBytes A pb serialized {@link PrefixFilter} instance
* @return An instance of {@link PrefixFilter} made from <code>bytes</code>
* @throws DeserializationException if an error occurred
* @see #toByteArray
*/
public static PrefixFilter parseFrom(final byte[] pbBytes) throws DeserializationException {
FilterProtos.PrefixFilter proto;
try {
proto = FilterProtos.PrefixFilter.parseFrom(pbBytes);
} catch (InvalidProtocolBufferException e) {
throw new DeserializationException(e);
}
return new PrefixFilter(proto.hasPrefix() ? proto.getPrefix().toByteArray() : null);
} | 3.68 |
framework_Navigator_addViewChangeListener | /**
* Listen to changes of the active view.
* <p>
* Registered listeners are invoked in registration order before (
* {@link ViewChangeListener#beforeViewChange(ViewChangeEvent)
* beforeViewChange()}) and after (
* {@link ViewChangeListener#afterViewChange(ViewChangeEvent)
* afterViewChange()}) a view change occurs.
*
* @param listener
* Listener to invoke during a view change.
* @since 8.0
*/
public Registration addViewChangeListener(ViewChangeListener listener) {
listeners.add(listener);
return () -> listeners.remove(listener);
} | 3.68 |
hadoop_IOStatisticsBinding_trackDurationOfCallable | /**
* Given a callable/lambda expression,
* return a new one which wraps the inner and tracks
* the duration of the operation, including whether
* it passes/fails.
* @param factory factory of duration trackers
* @param statistic statistic key
* @param input input callable.
* @param <B> return type.
* @return a new callable which tracks duration and failure.
*/
public static <B> Callable<B> trackDurationOfCallable(
@Nullable DurationTrackerFactory factory,
String statistic,
Callable<B> input) {
return () -> {
// create the tracker outside try-with-resources so
// that failures can be set in the catcher.
DurationTracker tracker = createTracker(factory, statistic);
try {
// exec the input function and return its value
return input.call();
} catch (RuntimeException e) {
// input function failed: note it
tracker.failed();
// and rethrow
throw e;
} finally {
// update the tracker.
// this is called after any catch() call will have
// set the failed flag.
tracker.close();
}
};
} | 3.68 |
morf_DatabaseType_findByProductName | /**
* Returns the first available database type matching the JDBC product
* name.
*
* <p>Returns empty if no matching database type is found. If there are
* multiple matches for the product name, {@link IllegalStateException}
* will be thrown.</p>
*
* <p>No performance guarantees are made, but it will be <em>at best</em>
* <code>O(n),</code>where <code>n</code> is the number of registered database
* types.</p>
*
* @param product The JDBC database product name.
* @return The {@link DatabaseType}.
* @throws IllegalStateException If more than one matching database type is found.
*/
public static Optional<DatabaseType> findByProductName(final String product) {
List<DatabaseType> result = FluentIterable.from(registeredTypes.values()).filter(new Predicate<DatabaseType>() {
@Override
public boolean apply(DatabaseType input) {
return input.matchesProduct(product);
}
}).toList();
if (result.isEmpty()) return Optional.empty();
if (result.size() > 1) throw new IllegalArgumentException("Database product name [" + product + "] matches "
+ "more than one registered database type " + result);
return Optional.of(result.get(0));
} | 3.68 |
pulsar_OpAddEntry_handleAddFailure | /**
* It handles add failure on the given ledger. it can be triggered when add-entry fails or times out.
*
* @param lh
*/
void handleAddFailure(final LedgerHandle lh) {
// If we get a write error, we will try to create a new ledger and re-submit the pending writes. If the
// ledger creation fails (persistent bk failure, another instance owning the ML, ...), then the writes will
// be marked as failed.
ManagedLedgerImpl finalMl = this.ml;
finalMl.mbean.recordAddEntryError();
finalMl.getExecutor().execute(() -> {
// Force the creation of a new ledger. Doing it in a background thread to avoid acquiring ML lock
// from a BK callback.
finalMl.ledgerClosed(lh);
});
} | 3.68 |
flink_ArrayData_createElementGetter | /**
* Creates an accessor for getting elements in an internal array data structure at the given
* position.
*
* @param elementType the element type of the array
*/
static ElementGetter createElementGetter(LogicalType elementType) {
final ElementGetter elementGetter;
// ordered by type root definition
switch (elementType.getTypeRoot()) {
case CHAR:
case VARCHAR:
elementGetter = ArrayData::getString;
break;
case BOOLEAN:
elementGetter = ArrayData::getBoolean;
break;
case BINARY:
case VARBINARY:
elementGetter = ArrayData::getBinary;
break;
case DECIMAL:
final int decimalPrecision = getPrecision(elementType);
final int decimalScale = getScale(elementType);
elementGetter =
(array, pos) -> array.getDecimal(pos, decimalPrecision, decimalScale);
break;
case TINYINT:
elementGetter = ArrayData::getByte;
break;
case SMALLINT:
elementGetter = ArrayData::getShort;
break;
case INTEGER:
case DATE:
case TIME_WITHOUT_TIME_ZONE:
case INTERVAL_YEAR_MONTH:
elementGetter = ArrayData::getInt;
break;
case BIGINT:
case INTERVAL_DAY_TIME:
elementGetter = ArrayData::getLong;
break;
case FLOAT:
elementGetter = ArrayData::getFloat;
break;
case DOUBLE:
elementGetter = ArrayData::getDouble;
break;
case TIMESTAMP_WITHOUT_TIME_ZONE:
case TIMESTAMP_WITH_LOCAL_TIME_ZONE:
final int timestampPrecision = getPrecision(elementType);
elementGetter = (array, pos) -> array.getTimestamp(pos, timestampPrecision);
break;
case TIMESTAMP_WITH_TIME_ZONE:
throw new UnsupportedOperationException();
case ARRAY:
elementGetter = ArrayData::getArray;
break;
case MULTISET:
case MAP:
elementGetter = ArrayData::getMap;
break;
case ROW:
case STRUCTURED_TYPE:
final int rowFieldCount = getFieldCount(elementType);
elementGetter = (array, pos) -> array.getRow(pos, rowFieldCount);
break;
case DISTINCT_TYPE:
elementGetter = createElementGetter(((DistinctType) elementType).getSourceType());
break;
case RAW:
elementGetter = ArrayData::getRawValue;
break;
case NULL:
case SYMBOL:
case UNRESOLVED:
default:
throw new IllegalArgumentException();
}
if (!elementType.isNullable()) {
return elementGetter;
}
return (array, pos) -> {
if (array.isNullAt(pos)) {
return null;
}
return elementGetter.getElementOrNull(array, pos);
};
} | 3.68 |
hbase_BlockingRpcConnection_closeSocket | // just close socket input and output.
private void closeSocket() {
IOUtils.closeStream(out);
IOUtils.closeStream(in);
IOUtils.closeSocket(socket);
out = null;
in = null;
socket = null;
} | 3.68 |
hbase_MiniBatchOperationInProgress_size | /** Returns The number of operations(Mutations) involved in this batch. */
public int size() {
return this.lastIndexExclusive - this.firstIndex;
} | 3.68 |
framework_VFilterSelect_doSelectedItemAction | /**
* Send the current selection to the server. Triggered when a selection
* is made or on a blur event.
*/
public void doSelectedItemAction() {
debug("VFS.SM: doSelectedItemAction()");
// do not send a value change event if null was and stays selected
final String enteredItemValue = tb.getText();
if (nullSelectionAllowed && "".equals(enteredItemValue)
&& selectedOptionKey != null
&& !"".equals(selectedOptionKey)) {
if (nullSelectItem) {
reset();
return;
}
// null is not visible on pages != 0, and not visible when
// filtering: handle separately
client.updateVariable(paintableId, "filter", "", false);
client.updateVariable(paintableId, "page", 0, false);
client.updateVariable(paintableId, "selected", new String[] {},
immediate);
afterUpdateClientVariables();
suggestionPopup.hide();
return;
}
updateSelectionWhenReponseIsReceived = waitingForFilteringResponse;
if (!waitingForFilteringResponse) {
doPostFilterSelectedItemAction();
}
} | 3.68 |
pulsar_LoadSimulationController_trade | // Trade using the arguments parsed via JCommander and the topic name.
private synchronized void trade(final ShellArguments arguments, final String topic, final int client)
throws Exception {
// Decide which client to send to randomly to preserve statelessness of
// the controller.
outputStreams[client].write(LoadSimulationClient.TRADE_COMMAND);
writeProducerOptions(outputStreams[client], arguments, topic);
outputStreams[client].flush();
} | 3.68 |
flink_ExecutionConfig_disableForceAvro | /** Disables the Apache Avro serializer as the forced serializer for POJOs. */
public void disableForceAvro() {
setForceAvro(false);
} | 3.68 |
hbase_HMaster_getCompactionState | /**
* Get the compaction state of the table
* @param tableName The table name
* @return CompactionState Compaction state of the table
*/
public CompactionState getCompactionState(final TableName tableName) {
CompactionState compactionState = CompactionState.NONE;
try {
List<RegionInfo> regions = assignmentManager.getRegionStates().getRegionsOfTable(tableName);
for (RegionInfo regionInfo : regions) {
ServerName serverName =
assignmentManager.getRegionStates().getRegionServerOfRegion(regionInfo);
if (serverName == null) {
continue;
}
ServerMetrics sl = serverManager.getLoad(serverName);
if (sl == null) {
continue;
}
RegionMetrics regionMetrics = sl.getRegionMetrics().get(regionInfo.getRegionName());
if (regionMetrics == null) {
LOG.warn("Can not get compaction details for the region: {} , it may be not online.",
regionInfo.getRegionNameAsString());
continue;
}
if (regionMetrics.getCompactionState() == CompactionState.MAJOR) {
if (compactionState == CompactionState.MINOR) {
compactionState = CompactionState.MAJOR_AND_MINOR;
} else {
compactionState = CompactionState.MAJOR;
}
} else if (regionMetrics.getCompactionState() == CompactionState.MINOR) {
if (compactionState == CompactionState.MAJOR) {
compactionState = CompactionState.MAJOR_AND_MINOR;
} else {
compactionState = CompactionState.MINOR;
}
}
}
} catch (Exception e) {
compactionState = null;
LOG.error("Exception when get compaction state for " + tableName.getNameAsString(), e);
}
return compactionState;
} | 3.68 |
hbase_BaseReplicationEndpoint_getWALEntryfilter | /** Returns a default set of filters */
@Override
public WALEntryFilter getWALEntryfilter() {
ArrayList<WALEntryFilter> filters = Lists.newArrayList();
WALEntryFilter scopeFilter = getScopeWALEntryFilter();
if (scopeFilter != null) {
filters.add(scopeFilter);
}
WALEntryFilter tableCfFilter = getNamespaceTableCfWALEntryFilter();
if (tableCfFilter != null) {
filters.add(tableCfFilter);
}
if (ctx != null && ctx.getPeerConfig() != null) {
String filterNameCSV =
ctx.getPeerConfig().getConfiguration().get(REPLICATION_WALENTRYFILTER_CONFIG_KEY);
if (filterNameCSV != null && !filterNameCSV.isEmpty()) {
String[] filterNames = filterNameCSV.split(",");
for (String filterName : filterNames) {
try {
Class<?> clazz = Class.forName(filterName);
filters.add((WALEntryFilter) clazz.getDeclaredConstructor().newInstance());
} catch (Exception e) {
LOG.error("Unable to create WALEntryFilter " + filterName, e);
}
}
}
}
return filters.isEmpty() ? null : new ChainWALEntryFilter(filters);
} | 3.68 |
flink_WindowsGrouping_reset | /** Reset for next group. */
public void reset() {
nextWindow = null;
watermark = Long.MIN_VALUE;
triggerWindowStartIndex = 0;
emptyWindowTriggered = true;
resetBuffer();
} | 3.68 |
framework_Table_getPageLength | /**
* Gets the page length.
*
* <p>
* Setting page length 0 disables paging.
* </p>
*
* @return the Length of one page.
*/
public int getPageLength() {
return pageLength;
} | 3.68 |
framework_VGridLayout_getRowHeights | /**
* Returns the row heights measured in pixels.
*
* @return
*/
protected int[] getRowHeights() {
return rowHeights;
} | 3.68 |
framework_VAccordion_getCaptionHeight | /**
* Returns the offset height of the caption node.
*
* @return the height in pixels
*/
public int getCaptionHeight() {
return captionNode.getOffsetHeight();
} | 3.68 |
shardingsphere-elasticjob_JobRegistry_isShutdown | /**
* Judge job is shutdown or not.
*
* @param jobName job name
* @return job is shutdown or not
*/
public boolean isShutdown(final String jobName) {
return !schedulerMap.containsKey(jobName) || !jobInstanceMap.containsKey(jobName);
} | 3.68 |
hbase_EndpointObserver_postEndpointInvocation | /**
* Called after an Endpoint service method is invoked. The response message can be altered using
* the builder.
* @param ctx the environment provided by the region server
* @param service the endpoint service
* @param methodName the invoked service method
* @param request Request message expected by given {@code Service}'s method (by the name
* {@code methodName}).
* @param responseBuilder Builder for final response to the client, with original response from
* Service's method merged into it.
*/
default void postEndpointInvocation(ObserverContext<RegionCoprocessorEnvironment> ctx,
Service service, String methodName, Message request, Message.Builder responseBuilder)
throws IOException {
} | 3.68 |
hbase_FavoredNodesPlan_getFavoredServerPosition | /**
* Return the position of the server in the favoredNodes list. Assumes the favoredNodes list is of
* size 3.
*/
public static Position getFavoredServerPosition(List<ServerName> favoredNodes,
ServerName server) {
if (
favoredNodes == null || server == null
|| favoredNodes.size() != FavoredNodeAssignmentHelper.FAVORED_NODES_NUM
) {
return null;
}
for (Position p : Position.values()) {
if (ServerName.isSameAddress(favoredNodes.get(p.ordinal()), server)) {
return p;
}
}
return null;
} | 3.68 |
hadoop_IdentifierResolver_resolve | /**
* Resolves a given identifier. This method has to be called before calling
* any of the getters.
*/
public void resolve(String identifier) {
if (identifier.equalsIgnoreCase(RAW_BYTES_ID)) {
setInputWriterClass(RawBytesInputWriter.class);
setOutputReaderClass(RawBytesOutputReader.class);
setOutputKeyClass(BytesWritable.class);
setOutputValueClass(BytesWritable.class);
} else if (identifier.equalsIgnoreCase(TYPED_BYTES_ID)) {
setInputWriterClass(TypedBytesInputWriter.class);
setOutputReaderClass(TypedBytesOutputReader.class);
setOutputKeyClass(TypedBytesWritable.class);
setOutputValueClass(TypedBytesWritable.class);
} else if (identifier.equalsIgnoreCase(KEY_ONLY_TEXT_ID)) {
setInputWriterClass(KeyOnlyTextInputWriter.class);
setOutputReaderClass(KeyOnlyTextOutputReader.class);
setOutputKeyClass(Text.class);
setOutputValueClass(NullWritable.class);
} else { // assume TEXT_ID
setInputWriterClass(TextInputWriter.class);
setOutputReaderClass(TextOutputReader.class);
setOutputKeyClass(Text.class);
setOutputValueClass(Text.class);
}
} | 3.68 |
hibernate-validator_ValueExtractorManager_run | /**
* Runs the given privileged action, using a privileged block if required.
* <p>
* <b>NOTE:</b> This must never be changed into a publicly available method to avoid execution of arbitrary
* privileged actions within HV's protection domain.
*/
@IgnoreForbiddenApisErrors(reason = "SecurityManager is deprecated in JDK17")
private static <T> T run(PrivilegedAction<T> action) {
return System.getSecurityManager() != null ? AccessController.doPrivileged( action ) : action.run();
} | 3.68 |
flink_ResultPartitionMetrics_refreshAndGetTotal | /**
* Iterates over all sub-partitions and collects the total number of queued buffers in a
* best-effort way.
*
* @return total number of queued buffers
*/
long refreshAndGetTotal() {
return partition.getNumberOfQueuedBuffers();
} | 3.68 |
flink_JoinedStreams_window | /** Specifies the window on which the join operation works. */
@PublicEvolving
public <W extends Window> WithWindow<T1, T2, KEY, W> window(
WindowAssigner<? super TaggedUnion<T1, T2>, W> assigner) {
return new WithWindow<>(
input1,
input2,
keySelector1,
keySelector2,
keyType,
assigner,
null,
null,
null);
} | 3.68 |
flink_ActorSystemBootstrapTools_startActorSystem | /**
* Starts an Actor System with given Pekko config.
*
* @param config Config of the started ActorSystem.
* @param actorSystemName Name of the started ActorSystem.
* @param logger The logger to output log information.
* @return The ActorSystem which has been started.
*/
private static ActorSystem startActorSystem(
Config config, String actorSystemName, Logger logger) {
logger.debug("Using pekko configuration\n {}", config);
ActorSystem actorSystem = PekkoUtils.createActorSystem(actorSystemName, config);
logger.info("Actor system started at {}", PekkoUtils.getAddress(actorSystem));
return actorSystem;
} | 3.68 |
hadoop_OBSBlockOutputStream_uploadCurrentBlock | /**
* Start an asynchronous upload of the current block.
*
* @throws IOException Problems opening the destination for upload or
* initializing the upload.
*/
private synchronized void uploadCurrentBlock() throws IOException {
Preconditions.checkState(hasActiveBlock(), "No active block");
LOG.debug("Writing block # {}", blockCount);
try {
if (multiPartUpload == null) {
LOG.debug("Initiating Multipart upload");
multiPartUpload = new MultiPartUpload();
}
multiPartUpload.uploadBlockAsync(getActiveBlock());
} catch (IOException e) {
hasException.set(true);
LOG.error("Upload current block on ({}/{}) failed.", fs.getBucket(),
key, e);
throw e;
} finally {
// set the block to null, so the next write will create a new block.
clearActiveBlock();
}
} | 3.68 |
zxing_FormatInformation_decodeFormatInformation | /**
* @param maskedFormatInfo1 format info indicator, with mask still applied
* @param maskedFormatInfo2 second copy of same info; both are checked at the same time
* to establish best match
* @return information about the format it specifies, or {@code null}
* if doesn't seem to match any known pattern
*/
static FormatInformation decodeFormatInformation(int maskedFormatInfo1, int maskedFormatInfo2) {
FormatInformation formatInfo = doDecodeFormatInformation(maskedFormatInfo1, maskedFormatInfo2);
if (formatInfo != null) {
return formatInfo;
}
// Should return null, but, some QR codes apparently
// do not mask this info. Try again by actually masking the pattern
// first
return doDecodeFormatInformation(maskedFormatInfo1 ^ FORMAT_INFO_MASK_QR,
maskedFormatInfo2 ^ FORMAT_INFO_MASK_QR);
} | 3.68 |
flink_Hardware_getNumberCPUCores | /**
* Gets the number of CPU cores (hardware contexts) that the JVM has access to.
*
* @return The number of CPU cores.
*/
public static int getNumberCPUCores() {
return Runtime.getRuntime().availableProcessors();
} | 3.68 |
hadoop_SFTPConnectionPool_shutdown | /** Shutdown the connection pool and close all open connections. */
synchronized void shutdown() {
if (this.con2infoMap == null){
return; // already shutdown in case it is called
}
LOG.info("Inside shutdown, con2infoMap size=" + con2infoMap.size());
this.maxConnection = 0;
Set<ChannelSftp> cons = con2infoMap.keySet();
if (cons != null && cons.size() > 0) {
// make a copy since we need to modify the underlying Map
Set<ChannelSftp> copy = new HashSet<>(cons);
// Initiate disconnect from all outstanding connections
for (ChannelSftp con : copy) {
try {
disconnect(con);
} catch (IOException ioe) {
ConnectionInfo info = con2infoMap.get(con);
LOG.error(
"Error encountered while closing connection to " + info.getHost(),
ioe);
}
}
}
// make sure no further connections can be returned.
this.idleConnections = null;
this.con2infoMap = null;
} | 3.68 |
framework_AbstractComponent_isEnabled | /*
* (non-Javadoc)
*
* @see com.vaadin.ui.Component#isEnabled()
*/
@Override
public boolean isEnabled() {
return getState(false).enabled;
} | 3.68 |
flink_TableResultImpl_jobClient | /**
* Specifies job client which associates the submitted Flink job.
*
* @param jobClient a {@link JobClient} for the submitted Flink job.
*/
public Builder jobClient(JobClient jobClient) {
this.jobClient = jobClient;
return this;
} | 3.68 |
flink_HsBufferContext_release | /** Mark buffer status to release. */
public void release() {
if (isReleased()) {
return;
}
released = true;
// decrease ref count when buffer is released from memory.
buffer.recycleBuffer();
} | 3.68 |
hadoop_LocalCacheDirectoryManager_getCacheDirectoryRoot | /**
* Given a path to a directory within a local cache tree return the
* root of the cache directory.
*
* @param path the directory within a cache directory
* @return the local cache directory root or null if not found
*/
public static Path getCacheDirectoryRoot(Path path) {
while (path != null) {
String name = path.getName();
if (name.length() != 1) {
return path;
}
int dirnum = DIRECTORIES_PER_LEVEL;
try {
dirnum = Integer.parseInt(name, DIRECTORIES_PER_LEVEL);
} catch (NumberFormatException e) {
}
if (dirnum >= DIRECTORIES_PER_LEVEL) {
return path;
}
path = path.getParent();
}
return path;
} | 3.68 |
hadoop_ReduceTaskAttemptInfo_getShuffleRuntime | /**
* Get the runtime for the <b>shuffle</b> phase of the reduce task-attempt.
*
* @return the runtime for the <b>shuffle</b> phase of the reduce task-attempt
*/
public long getShuffleRuntime() {
return shuffleTime;
} | 3.68 |
flink_Tuple15_of | /**
* Creates a new tuple and assigns the given values to the tuple's fields. This is more
* convenient than using the constructor, because the compiler can infer the generic type
* arguments implicitly. For example: {@code Tuple3.of(n, x, s)} instead of {@code new
* Tuple3<Integer, Double, String>(n, x, s)}
*/
public static <T0, T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14>
Tuple15<T0, T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14> of(
T0 f0,
T1 f1,
T2 f2,
T3 f3,
T4 f4,
T5 f5,
T6 f6,
T7 f7,
T8 f8,
T9 f9,
T10 f10,
T11 f11,
T12 f12,
T13 f13,
T14 f14) {
return new Tuple15<>(f0, f1, f2, f3, f4, f5, f6, f7, f8, f9, f10, f11, f12, f13, f14);
} | 3.68 |
hbase_HFileBlock_getHFileContext | /**
* @return This HFileBlocks fileContext which will a derivative of the fileContext for the file
* from which this block's data was originally read.
*/
public HFileContext getHFileContext() {
return this.fileContext;
} | 3.68 |
flink_SplitAssignmentTracker_onCheckpoint | /**
* Behavior of SplitAssignmentTracker on checkpoint. Tracker will mark uncheckpointed assignment
* as checkpointed with current checkpoint ID.
*
* @param checkpointId the id of the ongoing checkpoint
*/
public void onCheckpoint(long checkpointId) throws Exception {
// Include the uncheckpointed assignments to the snapshot.
assignmentsByCheckpointId.put(checkpointId, uncheckpointedAssignments);
uncheckpointedAssignments = new HashMap<>();
} | 3.68 |
streampipes_InfluxStore_close | /**
* Shuts down the connection to the InfluxDB server
*/
public void close() throws SpRuntimeException {
influxDb.flush();
try {
Thread.sleep(1000);
} catch (InterruptedException e) {
throw new SpRuntimeException(e);
}
influxDb.close();
} | 3.68 |
hadoop_ActiveAuditManagerS3A_setUnbondedSpan | /**
* Set the unbonded span.
* @param unbondedSpan the new unbonded span
*/
private void setUnbondedSpan(final WrappingAuditSpan unbondedSpan) {
this.unbondedSpan = unbondedSpan;
} | 3.68 |
zxing_MatrixUtil_embedDarkDotAtLeftBottomCorner | // Embed the lonely dark dot at left bottom corner. JISX0510:2004 (p.46)
private static void embedDarkDotAtLeftBottomCorner(ByteMatrix matrix) throws WriterException {
if (matrix.get(8, matrix.getHeight() - 8) == 0) {
throw new WriterException();
}
matrix.set(8, matrix.getHeight() - 8, 1);
} | 3.68 |
framework_VTree_getNavigationEndKey | /**
* Get the key the moves the selection to the end of the table. By default
* this is the End key but by overriding this you can change the key to
* whatever you want.
*
* @return
*/
protected int getNavigationEndKey() {
return KeyCodes.KEY_END;
} | 3.68 |
hbase_HRegion_getRowLockInternal | // will be override in tests
protected RowLock getRowLockInternal(byte[] row, boolean readLock, RowLock prevRowLock)
throws IOException {
// create an object to use a a key in the row lock map
HashedBytes rowKey = new HashedBytes(row);
RowLockContext rowLockContext = null;
RowLockImpl result = null;
boolean success = false;
try {
// Keep trying until we have a lock or error out.
// TODO: do we need to add a time component here?
while (result == null) {
rowLockContext = computeIfAbsent(lockedRows, rowKey, () -> new RowLockContext(rowKey));
// Now try an get the lock.
// This can fail as
if (readLock) {
// For read lock, if the caller has locked the same row previously, it will not try
// to acquire the same read lock. It simply returns the previous row lock.
RowLockImpl prevRowLockImpl = (RowLockImpl) prevRowLock;
if (
(prevRowLockImpl != null)
&& (prevRowLockImpl.getLock() == rowLockContext.readWriteLock.readLock())
) {
success = true;
return prevRowLock;
}
result = rowLockContext.newReadLock();
} else {
result = rowLockContext.newWriteLock();
}
}
int timeout = rowLockWaitDuration;
boolean reachDeadlineFirst = false;
Optional<RpcCall> call = RpcServer.getCurrentCall();
if (call.isPresent()) {
long deadline = call.get().getDeadline();
if (deadline < Long.MAX_VALUE) {
int timeToDeadline = (int) (deadline - EnvironmentEdgeManager.currentTime());
if (timeToDeadline <= this.rowLockWaitDuration) {
reachDeadlineFirst = true;
timeout = timeToDeadline;
}
}
}
if (timeout <= 0 || !result.getLock().tryLock(timeout, TimeUnit.MILLISECONDS)) {
String message = "Timed out waiting for lock for row: " + rowKey + " in region "
+ getRegionInfo().getEncodedName();
if (reachDeadlineFirst) {
throw new TimeoutIOException(message);
} else {
// If timeToDeadline is larger than rowLockWaitDuration, we can not drop the request.
throw new IOException(message);
}
}
rowLockContext.setThreadName(Thread.currentThread().getName());
success = true;
return result;
} catch (InterruptedException ie) {
if (LOG.isDebugEnabled()) {
LOG.debug("Thread interrupted waiting for lock on row: {}, in region {}", rowKey,
getRegionInfo().getRegionNameAsString());
}
throw throwOnInterrupt(ie);
} catch (Error error) {
// The maximum lock count for read lock is 64K (hardcoded), when this maximum count
// is reached, it will throw out an Error. This Error needs to be caught so it can
// go ahead to process the minibatch with lock acquired.
LOG.warn("Error to get row lock for {}, in region {}, cause: {}", Bytes.toStringBinary(row),
getRegionInfo().getRegionNameAsString(), error);
IOException ioe = new IOException(error);
throw ioe;
} finally {
// Clean up the counts just in case this was the thing keeping the context alive.
if (!success && rowLockContext != null) {
rowLockContext.cleanUp();
}
}
} | 3.68 |
hbase_ZKWatcher_connectionEvent | /**
* Called when there is a connection-related event via the Watcher callback.
* <p>
* If Disconnected or Expired, this should shutdown the cluster. But, since we send a
* KeeperException.SessionExpiredException along with the abort call, it's possible for the
* Abortable to catch it and try to create a new session with ZooKeeper. This is what the client
* does in HCM.
* <p>
* @param event the connection-related event
*/
private void connectionEvent(WatchedEvent event) {
switch (event.getState()) {
case SyncConnected:
this.identifier =
this.prefix + "-0x" + Long.toHexString(this.recoverableZooKeeper.getSessionId());
// Update our identifier. Otherwise ignore.
LOG.debug("{} connected", this.identifier);
break;
// Abort the server if Disconnected or Expired
case Disconnected:
LOG.debug(prefix("Received Disconnected from ZooKeeper, ignoring"));
break;
case Closed:
LOG.debug(prefix("ZooKeeper client closed, ignoring"));
break;
case Expired:
String msg = prefix(this.identifier + " received expired from " + "ZooKeeper, aborting");
// TODO: One thought is to add call to ZKListener so say,
// ZKNodeTracker can zero out its data values.
if (this.abortable != null) {
this.abortable.abort(msg, new KeeperException.SessionExpiredException());
}
break;
case ConnectedReadOnly:
case SaslAuthenticated:
case AuthFailed:
break;
default:
throw new IllegalStateException("Received event is not valid: " + event.getState());
}
} | 3.68 |
hadoop_AzureNativeFileSystemStore_createPermissionJsonSerializer | /**
* Creates a JSON serializer that can serialize a PermissionStatus object into
* the JSON string we want in the blob metadata.
*
* @return The JSON serializer.
*/
private static JSON createPermissionJsonSerializer() {
org.eclipse.jetty.util.log.Log.getProperties().setProperty("org.eclipse.jetty.util.log.announce", "false");
JSON serializer = new JSON();
serializer.addConvertor(PermissionStatus.class,
new PermissionStatusJsonSerializer());
return serializer;
} | 3.68 |
flink_SqlGatewayRestAPIVersion_getStableVersions | /**
* Returns the supported stable versions.
*
* @return the list of the stable versions.
*/
public static List<SqlGatewayRestAPIVersion> getStableVersions() {
return Arrays.stream(SqlGatewayRestAPIVersion.values())
.filter(SqlGatewayRestAPIVersion::isStableVersion)
.collect(Collectors.toList());
} | 3.68 |
hudi_HoodieRecordPayload_getOrderingValue | /**
* This method can be used to extract the ordering value of the payload for combining/merging,
* or 0 if no value is specified which means natural order(arrival time is used).
*
* @return the ordering value
*/
@PublicAPIMethod(maturity = ApiMaturityLevel.STABLE)
default Comparable<?> getOrderingValue() {
// default natural order
return 0;
} | 3.68 |
framework_GridConnector_getRowKey | /**
* Gets the row key for a row object.
*
* @param row
* the row object
* @return the key for the given row
*/
public String getRowKey(JsonObject row) {
final Object key = dataSource.getRowKey(row);
assert key instanceof String : "Internal key was not a String but a "
+ key.getClass().getSimpleName() + " (" + key + ")";
return (String) key;
} | 3.68 |
framework_VAbstractCalendarPanel_getDate | /**
* Returns the current date value.
*
* @return current date value
*/
public Date getDate() {
return value;
} | 3.68 |
flink_SubtaskStateStats_getAlignmentDuration | /**
* @return Duration of the stream alignment (for exactly-once only) or <code>-1</code> if the
* runtime did not report this.
*/
public long getAlignmentDuration() {
return alignmentDuration;
} | 3.68 |
framework_SQLContainer_removeAllContainerFilters | /**
* {@inheritDoc}
*/
@Override
public void removeAllContainerFilters() {
filters.clear();
refresh();
} | 3.68 |
flink_DataSet_max | /**
* Syntactic sugar for {@link #aggregate(Aggregations, int)} using {@link Aggregations#MAX} as
* the aggregation function.
*
* <p><strong>Note:</strong> This operation is not to be confused with {@link #maxBy(int...)},
* which selects one element with maximum value at the specified field positions.
*
* @param field The index of the Tuple field on which the aggregation function is applied.
* @return An AggregateOperator that represents the max'ed DataSet.
* @see #aggregate(Aggregations, int)
* @see #maxBy(int...)
*/
public AggregateOperator<T> max(int field) {
return aggregate(Aggregations.MAX, field);
} | 3.68 |
hadoop_WeightedPolicyInfo_getHeadroomAlpha | /**
* Return the parameter headroomAlpha, used by policies that balance
* weight-based and load-based considerations in their decisions.
*
* For policies that use this parameter, values close to 1 indicate that most
* of the decision should be based on currently observed headroom from various
* sub-clusters, values close to zero, indicate that the decision should be
* mostly based on weights and practically ignore current load.
*
* @return the value of headroomAlpha.
*/
public float getHeadroomAlpha() {
return headroomAlpha;
} | 3.68 |
flink_TableColumn_of | /** @deprecated Use {@link #computed(String, DataType, String)} instead. */
@Deprecated
public static TableColumn of(String name, DataType type, String expression) {
return computed(name, type, expression);
} | 3.68 |
flink_JobEdge_setShipStrategyName | /**
* Sets the name of the ship strategy for the represented input.
*
* @param shipStrategyName The name of the ship strategy.
*/
public void setShipStrategyName(String shipStrategyName) {
this.shipStrategyName = shipStrategyName;
} | 3.68 |
hudi_HoodiePipeline_option | /**
* Add a config option.
*/
public Builder option(ConfigOption<?> option, Object val) {
this.options.put(option.key(), val.toString());
return this;
} | 3.68 |
framework_VOverlay_getApplicationConnection | /**
* Get the {@link ApplicationConnection} that this overlay belongs to. If
* it's not set, {@link #getOwner()} is used to figure it out.
*
* @return
*/
protected ApplicationConnection getApplicationConnection() {
if (ac != null) {
return ac;
} else if (getOwner() != null) {
ComponentConnector c = Util.findConnectorFor(getOwner());
if (c != null) {
ac = c.getConnection();
}
return ac;
} else {
return null;
}
} | 3.68 |
streampipes_PipelineManager_getPipeline | /**
* Returns the stored pipeline with the given pipeline id
*
* @param pipelineId id of pipeline
* @return pipeline resulting pipeline with given id
*/
public static Pipeline getPipeline(String pipelineId) {
return getPipelineStorage().getPipeline(pipelineId);
} | 3.68 |
Subsets and Splits
No saved queries yet
Save your SQL queries to embed, download, and access them later. Queries will appear here once saved.