name
stringlengths 12
178
| code_snippet
stringlengths 8
36.5k
| score
float64 3.26
3.68
|
---|---|---|
hbase_FileChangeWatcher_getState | /**
* Returns the current {@link FileChangeWatcher.State}.
* @return the current state.
*/
public synchronized State getState() {
return state;
} | 3.68 |
hadoop_ManifestCommitter_getOutputPath | /**
* Output path: destination directory of the job.
* @return the overall job destination directory.
*/
@Override
public Path getOutputPath() {
return getDestinationDir();
} | 3.68 |
MagicPlugin_Currency_hasMinValue | /**
* Check to see if this currency has a lower limit for player balances.
*
* @return true if this currency should be limited, typically to prevent negative balances.
*/
default boolean hasMinValue() { return false; } | 3.68 |
hbase_PrivateCellUtil_writeQualifier | /**
* Writes the qualifier from the given cell to the output stream
* @param out The outputstream to which the data has to be written
* @param cell The cell whose contents has to be written
* @param qlength the qualifier length
*/
public static void writeQualifier(OutputStream out, Cell cell, int qlength) throws IOException {
if (cell instanceof ByteBufferExtendedCell) {
ByteBufferUtils.copyBufferToStream(out,
((ByteBufferExtendedCell) cell).getQualifierByteBuffer(),
((ByteBufferExtendedCell) cell).getQualifierPosition(), qlength);
} else {
out.write(cell.getQualifierArray(), cell.getQualifierOffset(), qlength);
}
} | 3.68 |
morf_AbstractSqlDialectTest_testAddDecimalColumn | /**
* Test adding a floating point column.
*/
@Test
public void testAddDecimalColumn() {
testAlterTableColumn(AlterationType.ADD, column("floatField_new", DataType.DECIMAL, 6, 3).nullable(), expectedAlterTableAddDecimalColumnStatement());
} | 3.68 |
rocketmq-connect_CassandraSinkTask_start | /**
* Remember always close the CqlSession according to
* https://docs.datastax.com/en/developer/java-driver/4.5/manual/core/
* @param props
*/
@Override
public void start(KeyValue props) {
try {
ConfigUtil.load(props, this.config);
cqlSession = DBUtils.initCqlSession(config);
log.info("init data source success");
} catch (Exception e) {
log.error("Cannot start Cassandra Sink Task because of configuration error{}", e);
}
String mode = config.getMode();
if (mode.equals("bulk")) {
Updater updater = new Updater(config, cqlSession);
try {
updater.start();
tableQueue.add(updater);
} catch (Exception e) {
log.error("fail to start updater{}", e);
}
}
} | 3.68 |
hadoop_FieldSelectionHelper_extractFields | /**
* Extract the actual field numbers from the given field specs.
* If a field spec is in the form of "n-" (like 3-), then n will be the
* return value. Otherwise, -1 will be returned.
* @param fieldListSpec an array of field specs
* @param fieldList an array of field numbers extracted from the specs.
* @return number n if some field spec is in the form of "n-", -1 otherwise.
*/
private static int extractFields(String[] fieldListSpec,
List<Integer> fieldList) {
int allFieldsFrom = -1;
int i = 0;
int j = 0;
int pos = -1;
String fieldSpec = null;
for (i = 0; i < fieldListSpec.length; i++) {
fieldSpec = fieldListSpec[i];
if (fieldSpec.length() == 0) {
continue;
}
pos = fieldSpec.indexOf('-');
if (pos < 0) {
Integer fn = Integer.valueOf(fieldSpec);
fieldList.add(fn);
} else {
String start = fieldSpec.substring(0, pos);
String end = fieldSpec.substring(pos + 1);
if (start.length() == 0) {
start = "0";
}
if (end.length() == 0) {
allFieldsFrom = Integer.parseInt(start);
continue;
}
int startPos = Integer.parseInt(start);
int endPos = Integer.parseInt(end);
for (j = startPos; j <= endPos; j++) {
fieldList.add(j);
}
}
}
return allFieldsFrom;
} | 3.68 |
AreaShop_BuyRegion_getInactiveTimeUntilSell | /**
* Minutes until automatic unrent when player is offline.
* @return The number of milliseconds until the region is unrented while player is offline
*/
public long getInactiveTimeUntilSell() {
return Utils.getDurationFromMinutesOrStringInput(getStringSetting("buy.inactiveTimeUntilSell"));
} | 3.68 |
framework_VAbstractCalendarPanel_isDateInsideRange | /**
* Checks inclusively whether a date is inside a range of dates or not.
*
* @param date
* @return
*/
private boolean isDateInsideRange(Date date, R minResolution) {
assert (date != null);
return isAcceptedByRangeEnd(date, minResolution)
&& isAcceptedByRangeStart(date, minResolution);
} | 3.68 |
hibernate-validator_NotEmptyValidatorForArraysOfByte_isValid | /**
* Checks the array is not {@code null} and not empty.
*
* @param array the array to validate
* @param constraintValidatorContext context in which the constraint is evaluated
* @return returns {@code true} if the array is not {@code null} and the array is not empty
*/
@Override
public boolean isValid(byte[] array, ConstraintValidatorContext constraintValidatorContext) {
if ( array == null ) {
return false;
}
return array.length > 0;
} | 3.68 |
flink_FlinkHints_mergeTableOptions | /**
* Merges the dynamic table options from {@code hints} and static table options from table
* definition {@code props}.
*
* <p>The options in {@code hints} would override the ones in {@code props} if they have the
* same option key.
*
* @param hints Dynamic table options, usually from the OPTIONS hint
* @param props Static table options defined in DDL or connect API
* @return New options with merged dynamic table options, or the old {@code props} if there is
* no dynamic table options
*/
public static Map<String, String> mergeTableOptions(
Map<String, String> hints, Map<String, String> props) {
if (hints.size() == 0) {
return props;
}
Map<String, String> newProps = new HashMap<>();
newProps.putAll(props);
newProps.putAll(hints);
return Collections.unmodifiableMap(newProps);
} | 3.68 |
hbase_MobUtils_isCacheMobBlocks | /**
* Indicates whether the scan contains the information of caching blocks. The information is set
* in the attribute "hbase.mob.cache.blocks" of scan.
* @param scan The current scan.
* @return True when the Scan attribute specifies to cache the MOB blocks.
*/
public static boolean isCacheMobBlocks(Scan scan) {
byte[] cache = scan.getAttribute(MobConstants.MOB_CACHE_BLOCKS);
try {
return cache != null && Bytes.toBoolean(cache);
} catch (IllegalArgumentException e) {
return false;
}
} | 3.68 |
hbase_VersionInfo_getRevision | /**
* Get the subversion revision number for the root directory
* @return the revision number, eg. "451451"
*/
public static String getRevision() {
return Version.revision;
} | 3.68 |
hudi_SparkValidatorUtils_getRecordsFromCommittedFiles | /**
* Get records from partitions modified as a dataset.
* Note that this only works for COW tables.
*
* @param sqlContext Spark {@link SQLContext} instance.
* @param partitionsAffected A set of affected partitions.
* @param table {@link HoodieTable} instance.
* @param newStructTypeSchema The {@link StructType} schema from after state.
* @return The records in Dataframe from committed files.
*/
public static Dataset<Row> getRecordsFromCommittedFiles(SQLContext sqlContext,
Set<String> partitionsAffected,
HoodieTable table,
StructType newStructTypeSchema) {
List<String> committedFiles = partitionsAffected.stream()
.flatMap(partition -> table.getBaseFileOnlyView().getLatestBaseFiles(partition).map(BaseFile::getPath))
.collect(Collectors.toList());
if (committedFiles.isEmpty()) {
try {
return sqlContext.createDataFrame(
sqlContext.emptyDataFrame().rdd(),
AvroConversionUtils.convertAvroSchemaToStructType(
new TableSchemaResolver(table.getMetaClient()).getTableAvroSchema()));
} catch (Exception e) {
LOG.warn("Cannot get table schema from before state.", e);
LOG.warn("Use the schema from after state (current transaction) to create the empty Spark "
+ "dataframe: " + newStructTypeSchema);
return sqlContext.createDataFrame(
sqlContext.emptyDataFrame().rdd(), newStructTypeSchema);
}
}
return readRecordsForBaseFiles(sqlContext, committedFiles);
} | 3.68 |
framework_ContainerHierarchicalWrapper_removePropertySetChangeListener | /*
* Removes a Property set change listener from the object. Don't add a
* JavaDoc comment here, we use the default documentation from implemented
* interface.
*/
@Override
public void removePropertySetChangeListener(
Container.PropertySetChangeListener listener) {
if (container instanceof Container.PropertySetChangeNotifier) {
((Container.PropertySetChangeNotifier) container)
.removePropertySetChangeListener(
new PiggybackListener(listener));
}
} | 3.68 |
hadoop_ApplicationMaster_waitForCompletion | /**
* Wait until the application has finished and is ready for cleanup.
*/
private void waitForCompletion() throws InterruptedException {
synchronized (completionLock) {
while (!completed) {
completionLock.wait();
}
}
} | 3.68 |
flink_TableChange_getOldColumn | /** Returns the original {@link Column} instance. */
public Column getOldColumn() {
return oldColumn;
} | 3.68 |
hudi_HoodieSimpleIndex_tagLocationInternal | /**
* Tags records location for incoming records.
*
* @param inputRecords {@link HoodieData} of incoming records
* @param context instance of {@link HoodieEngineContext} to use
* @param hoodieTable instance of {@link HoodieTable} to use
* @return {@link HoodieData} of records with record locations set
*/
protected <R> HoodieData<HoodieRecord<R>> tagLocationInternal(
HoodieData<HoodieRecord<R>> inputRecords, HoodieEngineContext context,
HoodieTable hoodieTable) {
if (config.getSimpleIndexUseCaching()) {
inputRecords.persist(new HoodieConfig(config.getProps())
.getString(HoodieIndexConfig.SIMPLE_INDEX_INPUT_STORAGE_LEVEL_VALUE));
}
int inputParallelism = inputRecords.getNumPartitions();
int configuredSimpleIndexParallelism = config.getSimpleIndexParallelism();
// NOTE: Target parallelism could be overridden by the config
int targetParallelism =
configuredSimpleIndexParallelism > 0 ? configuredSimpleIndexParallelism : inputParallelism;
HoodiePairData<HoodieKey, HoodieRecord<R>> keyedInputRecords =
inputRecords.mapToPair(record -> new ImmutablePair<>(record.getKey(), record));
HoodiePairData<HoodieKey, HoodieRecordLocation> existingLocationsOnTable =
fetchRecordLocationsForAffectedPartitions(keyedInputRecords.keys(), context, hoodieTable,
targetParallelism);
HoodieData<HoodieRecord<R>> taggedRecords =
keyedInputRecords.leftOuterJoin(existingLocationsOnTable).map(entry -> {
final HoodieRecord<R> untaggedRecord = entry.getRight().getLeft();
final Option<HoodieRecordLocation> location = Option.ofNullable(entry.getRight().getRight().orElse(null));
return tagAsNewRecordIfNeeded(untaggedRecord, location);
});
if (config.getSimpleIndexUseCaching()) {
inputRecords.unpersist();
}
return taggedRecords;
} | 3.68 |
morf_UpgradeHelper_postSchemaUpgrade | /**
* postUpgrade - generates a collection of SQL statements to run after he upgrade.
* @param upgradeSchemas - source and target schemas for the upgrade.
* @param viewChanges - Changes to be made to views.
* @param viewChangesDeploymentHelper - Deployment helper for the view changes.
* @return - Collection of SQL Statements.
*/
static Collection<String> postSchemaUpgrade(UpgradeSchemas upgradeSchemas,
ViewChanges viewChanges,
ViewChangesDeploymentHelper viewChangesDeploymentHelper) {
ImmutableList.Builder<String> statements = ImmutableList.builder();
final boolean insertToDeployedViews = upgradeSchemas.getTargetSchema().tableExists(DatabaseUpgradeTableContribution.DEPLOYED_VIEWS_NAME);
for (View view : viewChanges.getViewsToDeploy()) {
statements.addAll(viewChangesDeploymentHelper.createView(view, insertToDeployedViews, upgradeSchemas));
}
return statements.build();
} | 3.68 |
hbase_PrivateCellUtil_getCellKeySerializedAsKeyValueKey | /**
* This method exists just to encapsulate how we serialize keys. To be replaced by a factory that
* we query to figure what the Cell implementation is and then, what serialization engine to use
* and further, how to serialize the key for inclusion in hfile index. TODO.
* @return The key portion of the Cell serialized in the old-school KeyValue way or null if passed
* a null <code>cell</code>
*/
public static byte[] getCellKeySerializedAsKeyValueKey(final Cell cell) {
if (cell == null) return null;
byte[] b = new byte[KeyValueUtil.keyLength(cell)];
KeyValueUtil.appendKeyTo(cell, b, 0);
return b;
} | 3.68 |
hbase_CompactingMemStore_startReplayingFromWAL | /**
* This message intends to inform the MemStore that next coming updates are going to be part of
* the replaying edits from WAL
*/
@Override
public void startReplayingFromWAL() {
inWalReplay = true;
} | 3.68 |
flink_Configuration_addAll | /**
* Adds all entries from the given configuration into this configuration. The keys are prepended
* with the given prefix.
*
* @param other The configuration whose entries are added to this configuration.
* @param prefix The prefix to prepend.
*/
public void addAll(Configuration other, String prefix) {
final StringBuilder bld = new StringBuilder();
bld.append(prefix);
final int pl = bld.length();
synchronized (this.confData) {
synchronized (other.confData) {
for (Map.Entry<String, Object> entry : other.confData.entrySet()) {
bld.setLength(pl);
bld.append(entry.getKey());
this.confData.put(bld.toString(), entry.getValue());
}
}
}
} | 3.68 |
dubbo_HashedWheelTimer_expireTimeouts | /**
* Expire all {@link HashedWheelTimeout}s for the given {@code deadline}.
*/
void expireTimeouts(long deadline) {
HashedWheelTimeout timeout = head;
// process all timeouts
while (timeout != null) {
HashedWheelTimeout next = timeout.next;
if (timeout.remainingRounds <= 0) {
next = remove(timeout);
if (timeout.deadline <= deadline) {
timeout.expire();
} else {
// The timeout was placed into a wrong slot. This should never happen.
throw new IllegalStateException(String.format(
"timeout.deadline (%d) > deadline (%d)", timeout.deadline, deadline));
}
} else if (timeout.isCancelled()) {
next = remove(timeout);
} else {
timeout.remainingRounds--;
}
timeout = next;
}
} | 3.68 |
hadoop_NMTokenSecretManagerInRM_rollMasterKey | /**
* Creates a new master-key and sets it as the primary.
*/
@Private
public void rollMasterKey() {
super.writeLock.lock();
try {
LOG.info("Rolling master-key for nm-tokens");
if (this.currentMasterKey == null) { // Setting up for the first time.
this.currentMasterKey = createNewMasterKey();
} else {
this.nextMasterKey = createNewMasterKey();
LOG.info("Going to activate master-key with key-id "
+ this.nextMasterKey.getMasterKey().getKeyId() + " in "
+ this.activationDelay + "ms");
this.timer.schedule(new NextKeyActivator(), this.activationDelay);
}
} finally {
super.writeLock.unlock();
}
} | 3.68 |
hadoop_NodePlan_getNodeUUID | /**
* gets the Node UUID.
*
* @return Node UUID.
*/
public String getNodeUUID() {
return nodeUUID;
} | 3.68 |
hadoop_ReferenceCountMap_getEntries | /**
* Get entries in the reference Map.
*
* @return
*/
@VisibleForTesting
public ImmutableList<E> getEntries() {
return new ImmutableList.Builder<E>().addAll(referenceMap.keySet()).build();
} | 3.68 |
hibernate-validator_ConstraintAnnotationVisitor_visitVariableAsParameter | /**
* <p>
* Checks whether the given annotations are correctly specified at the given
* method parameter. The following checks are performed:
* </p>
* <ul>
* <li>
* Constraint annotation parameter values are meaningful and valid.
* </li>
* </ul>
*/
@Override
public Void visitVariableAsParameter(VariableElement annotatedField, List<AnnotationMirror> mirrors) {
checkConstraints( annotatedField, mirrors );
return null;
} | 3.68 |
hibernate-validator_ConstraintDescriptorImpl_determineConstraintType | /**
* Determines the type of this constraint. The following rules apply in
* descending order:
* <ul>
* <li>If {@code validationAppliesTo()} is set to either
* {@link ConstraintTarget#RETURN_VALUE} or
* {@link ConstraintTarget#PARAMETERS}, this value will be considered.</li>
* <li>Otherwise, if the constraint is either purely generic or purely
* cross-parameter as per its validators, that value will be considered.</li>
* <li>Otherwise, if the constraint is not on an executable, it is
* considered generic.</li>
* <li>Otherwise, the type will be determined based on exclusive existence
* of parameters and return value.</li>
* <li>If that also is not possible, determination fails (i.e. the user must
* specify the target explicitly).</li>
* </ul>
*
* @param constrainable The annotated member
* @param hasGenericValidators Whether the constraint has at least one generic validator or
* not
* @param hasCrossParameterValidator Whether the constraint has a cross-parameter validator
* @param externalConstraintType constraint type as derived from external context, e.g. for
* constraints declared in XML via {@code <return-value/gt;}
*
* @return The type of this constraint
*/
private ConstraintType determineConstraintType(Class<? extends Annotation> constraintAnnotationType,
Constrainable constrainable,
boolean hasGenericValidators,
boolean hasCrossParameterValidator,
ConstraintType externalConstraintType) {
ConstraintTarget constraintTarget = validationAppliesTo;
ConstraintType constraintType = null;
boolean isExecutable = constraintLocationKind.isExecutable();
//target explicitly set to RETURN_VALUE
if ( constraintTarget == ConstraintTarget.RETURN_VALUE ) {
if ( !isExecutable ) {
throw LOG.getParametersOrReturnValueConstraintTargetGivenAtNonExecutableException(
annotationDescriptor.getType(),
ConstraintTarget.RETURN_VALUE
);
}
constraintType = ConstraintType.GENERIC;
}
//target explicitly set to PARAMETERS
else if ( constraintTarget == ConstraintTarget.PARAMETERS ) {
if ( !isExecutable ) {
throw LOG.getParametersOrReturnValueConstraintTargetGivenAtNonExecutableException(
annotationDescriptor.getType(),
ConstraintTarget.PARAMETERS
);
}
constraintType = ConstraintType.CROSS_PARAMETER;
}
//target set by external context (e.g. <return-value> element in XML or returnValue() method in prog. API)
else if ( externalConstraintType != null ) {
constraintType = externalConstraintType;
}
//target set to IMPLICIT or not set at all
else {
//try to derive the type from the existing validators
if ( hasGenericValidators && !hasCrossParameterValidator ) {
constraintType = ConstraintType.GENERIC;
}
else if ( !hasGenericValidators && hasCrossParameterValidator ) {
constraintType = ConstraintType.CROSS_PARAMETER;
}
else if ( !isExecutable ) {
constraintType = ConstraintType.GENERIC;
}
else if ( constraintAnnotationType.isAnnotationPresent( SupportedValidationTarget.class ) ) {
SupportedValidationTarget supportedValidationTarget = constraintAnnotationType.getAnnotation( SupportedValidationTarget.class );
if ( supportedValidationTarget.value().length == 1 ) {
constraintType = supportedValidationTarget.value()[0] == ValidationTarget.ANNOTATED_ELEMENT ? ConstraintType.GENERIC : ConstraintType.CROSS_PARAMETER;
}
}
//try to derive from existence of parameters/return value
//hence look only if it is a callable
else if ( constrainable instanceof Callable ) {
boolean hasParameters = constrainable.as( Callable.class ).hasParameters();
boolean hasReturnValue = constrainable.as( Callable.class ).hasReturnValue();
if ( !hasParameters && hasReturnValue ) {
constraintType = ConstraintType.GENERIC;
}
else if ( hasParameters && !hasReturnValue ) {
constraintType = ConstraintType.CROSS_PARAMETER;
}
}
}
// Now we are out of luck
if ( constraintType == null ) {
throw LOG.getImplicitConstraintTargetInAmbiguousConfigurationException( annotationDescriptor.getType() );
}
if ( constraintType == ConstraintType.CROSS_PARAMETER ) {
validateCrossParameterConstraintType( constrainable, hasCrossParameterValidator );
}
return constraintType;
} | 3.68 |
framework_LayoutDependencyTree_getMeasureTargets | /**
* @return connectors that are waiting for measuring
* @deprecated As of 7.0.1, use {@link #getMeasureTargetsJsArray()} for
* improved performance.
*/
@Deprecated
public Collection<ComponentConnector> getMeasureTargets() {
JsArrayString targetIds = getMeasureTargetsJsArray();
int length = targetIds.length();
List<ComponentConnector> targets = new ArrayList<>(length);
ConnectorMap connectorMap = ConnectorMap.get(connection);
for (int i = 0; i < length; i++) {
targets.add((ComponentConnector) connectorMap
.getConnector(targetIds.get(i)));
}
return targets;
} | 3.68 |
hadoop_JWTRedirectAuthenticationHandler_validateExpiration | /**
* Validate that the expiration time of the JWT token has not been violated.
* If it has then throw an AuthenticationException. Override this method in
* subclasses in order to customize the expiration validation behavior.
*
* @param jwtToken the token that contains the expiration date to validate
* @return valid true if the token has not expired; false otherwise
*/
protected boolean validateExpiration(SignedJWT jwtToken) {
boolean valid = false;
try {
Date expires = jwtToken.getJWTClaimsSet().getExpirationTime();
if (expires == null || new Date().before(expires)) {
LOG.debug("JWT token expiration date has been "
+ "successfully validated");
valid = true;
} else {
LOG.warn("JWT expiration date validation failed.");
}
} catch (ParseException pe) {
LOG.warn("JWT expiration date validation failed.", pe);
}
return valid;
} | 3.68 |
MagicPlugin_DirectionUtils_goLeft | /**
* A helper function to go change a given direction to the direction "to the right".
*
* <p>There's probably some better matrix-y, math-y way to do this.
* It'd be nice if this was in BlockFace.
* @param direction The current direction
* @return The direction to the left
*/
public static org.bukkit.block.BlockFace goLeft(org.bukkit.block.BlockFace direction) {
switch (direction) {
case EAST:
return org.bukkit.block.BlockFace.NORTH;
case NORTH:
return org.bukkit.block.BlockFace.WEST;
case WEST:
return org.bukkit.block.BlockFace.SOUTH;
case SOUTH:
return org.bukkit.block.BlockFace.EAST;
default:
return direction;
}
} | 3.68 |
hbase_ProcedureStoreTracker_toProto | /**
* Builds org.apache.hadoop.hbase.protobuf.generated.ProcedureProtos.ProcedureStoreTracker
* protocol buffer from current state.
*/
public ProcedureProtos.ProcedureStoreTracker toProto() throws IOException {
ProcedureProtos.ProcedureStoreTracker.Builder builder =
ProcedureProtos.ProcedureStoreTracker.newBuilder();
for (Map.Entry<Long, BitSetNode> entry : map.entrySet()) {
builder.addNode(entry.getValue().convert());
}
return builder.build();
} | 3.68 |
hudi_HoodieTable_getSliceView | /**
* Get the full view of the file system for this table.
*/
public SliceView getSliceView() {
return getViewManager().getFileSystemView(metaClient);
} | 3.68 |
framework_AbstractInMemoryContainer_getSortablePropertyIds | /**
* Returns the sortable property identifiers for the container. Can be used
* to implement {@link Sortable#getSortableContainerPropertyIds()}.
*/
protected Collection<?> getSortablePropertyIds() {
LinkedList<Object> sortables = new LinkedList<Object>();
for (Object propertyId : getContainerPropertyIds()) {
Class<?> propertyType = getType(propertyId);
if (Comparable.class.isAssignableFrom(propertyType)
|| propertyType.isPrimitive()) {
sortables.add(propertyId);
}
}
return sortables;
} | 3.68 |
flink_PythonOperatorUtils_setCurrentKeyForTimerService | /** Set the current key for the timer service. */
public static <K, N> void setCurrentKeyForTimerService(
InternalTimerService<N> internalTimerService, K currentKey) throws Exception {
if (internalTimerService instanceof BatchExecutionInternalTimeService) {
((BatchExecutionInternalTimeService<K, N>) internalTimerService)
.setCurrentKey(currentKey);
}
} | 3.68 |
hbase_RegionStates_hasRegionsInTransition | // ==========================================================================
// Region in transition helpers
// ==========================================================================
public boolean hasRegionsInTransition() {
return !regionInTransition.isEmpty();
} | 3.68 |
AreaShop_SignsFeature_addSign | /**
* Add a sign to this region.
* @param location The location of the sign
* @param signType The type of the sign (WALL_SIGN or SIGN_POST)
* @param facing The orientation of the sign
* @param profile The profile to use with this sign (null for default)
*/
public void addSign(Location location, Material signType, BlockFace facing, String profile) {
int i = 0;
while(getRegion().getConfig().isSet("general.signs." + i)) {
i++;
}
String signPath = "general.signs." + i + ".";
getRegion().setSetting(signPath + "location", Utils.locationToConfig(location));
getRegion().setSetting(signPath + "facing", facing != null ? facing.name() : null);
getRegion().setSetting(signPath + "signType", signType != null ? signType.name() : null);
if(profile != null && !profile.isEmpty()) {
getRegion().setSetting(signPath + "profile", profile);
}
// Add to the map
RegionSign sign = new RegionSign(this, i + "");
signs.put(sign.getStringLocation(), sign);
allSigns.put(sign.getStringLocation(), sign);
signsByChunk.computeIfAbsent(sign.getStringChunk(), key -> new ArrayList<>())
.add(sign);
} | 3.68 |
framework_DDUtil_getVerticalDropLocation | /**
* Get vertical drop location.
*
* @param element
* the drop target element
* @param offsetHeight
* the height of an element relative to the layout
* @param clientY
* the y-coordinate of the latest event that relates to this drag
* operation
* @param topBottomRatio
* the ratio that determines how big portion of the element on
* each end counts for indicating desire to drop above or below
* the element rather than on top of it
* @return the drop location
*/
public static VerticalDropLocation getVerticalDropLocation(Element element,
int offsetHeight, int clientY, double topBottomRatio) {
// Event coordinates are relative to the viewport, element absolute
// position is relative to the document. Make element position relative
// to viewport by adjusting for viewport scrolling. See #6021
int elementTop = element.getAbsoluteTop() - Window.getScrollTop();
int fromTop = clientY - elementTop;
float percentageFromTop = (fromTop / (float) offsetHeight);
if (percentageFromTop < topBottomRatio) {
return VerticalDropLocation.TOP;
} else if (percentageFromTop > 1 - topBottomRatio) {
return VerticalDropLocation.BOTTOM;
} else {
return VerticalDropLocation.MIDDLE;
}
} | 3.68 |
flink_NFACompiler_compileFactory | /**
* Compiles the given pattern into a {@link NFAFactory}. The NFA factory can be used to
* create multiple NFAs.
*/
void compileFactory() {
Pattern<T, ?> lastPattern = currentPattern;
checkPatternNameUniqueness();
checkPatternSkipStrategy();
// we're traversing the pattern from the end to the beginning --> the first state is the
// final state
State<T> sinkState = createEndingState();
// add all the normal states
sinkState = createMiddleStates(sinkState);
// add the beginning state
createStartState(sinkState);
// check the window times between events for pattern
checkPatternWindowTimes();
if (lastPattern.getQuantifier().getConsumingStrategy()
== Quantifier.ConsumingStrategy.NOT_FOLLOW
&& (!windowTimes.containsKey(lastPattern.getName())
|| windowTimes.get(lastPattern.getName()) <= 0)
&& getWindowTime() == 0) {
throw new MalformedPatternException(
"NotFollowedBy is not supported without windowTime as a last part of a Pattern!");
}
} | 3.68 |
flink_Configuration_removeConfig | /**
* Removes given config option from the configuration.
*
* @param configOption config option to remove
* @param <T> Type of the config option
* @return true is config has been removed, false otherwise
*/
public <T> boolean removeConfig(ConfigOption<T> configOption) {
synchronized (this.confData) {
final BiFunction<String, Boolean, Optional<Boolean>> applier =
(key, canBePrefixMap) -> {
if (canBePrefixMap && removePrefixMap(this.confData, key)
|| this.confData.remove(key) != null) {
return Optional.of(true);
}
return Optional.empty();
};
return applyWithOption(configOption, applier).orElse(false);
}
} | 3.68 |
hbase_SpaceQuotaSnapshot_isInViolation | /** Returns {@code true} if the quota is being violated, {@code false} otherwise. */
@Override
public boolean isInViolation() {
return inViolation;
} | 3.68 |
druid_SQLCreateTableStatement_isMUL | /**
* only for show columns
*/
public boolean isMUL(String columnName) {
for (SQLTableElement element : this.tableElementList) {
if (element instanceof MySqlUnique) {
MySqlUnique unique = (MySqlUnique) element;
SQLExpr column = unique.getColumns().get(0).getExpr();
if (column instanceof SQLIdentifierExpr
&& SQLUtils.nameEquals(columnName, ((SQLIdentifierExpr) column).getName())) {
return unique.getColumns().size() > 1;
} else if (column instanceof SQLMethodInvokeExpr
&& SQLUtils.nameEquals(((SQLMethodInvokeExpr) column).getMethodName(), columnName)) {
return true;
}
} else if (element instanceof MySqlKey) {
MySqlKey unique = (MySqlKey) element;
SQLExpr column = unique.getColumns().get(0).getExpr();
if (column instanceof SQLIdentifierExpr
&& SQLUtils.nameEquals(columnName, ((SQLIdentifierExpr) column).getName())) {
return true;
} else if (column instanceof SQLMethodInvokeExpr
&& SQLUtils.nameEquals(((SQLMethodInvokeExpr) column).getMethodName(), columnName)) {
return true;
}
}
}
return false;
} | 3.68 |
hbase_MobFileName_getDateFromName | /**
* get date from MobFileName.
* @param fileName file name.
*/
public static String getDateFromName(final String fileName) {
return fileName.substring(STARTKEY_END_INDEX, DATE_END_INDEX);
} | 3.68 |
framework_DDEventHandleStrategy_handleDragLeave | /**
* Handles drag leave on old element.
*
* @param mediator
* VDragAndDropManager data accessor
* @param clearServerCallback
* {@code true} if server communication callback should be
* cleaned up, {@code false} otherwise
*/
protected void handleDragLeave(DDManagerMediator mediator,
boolean clearServerCallback) {
VDragAndDropManager manager = mediator.getManager();
if (manager.getCurrentDropHandler() != null) {
manager.getCurrentDropHandler().dragLeave(mediator.getDragEvent());
mediator.getDragEvent().getDropDetails().clear();
if (clearServerCallback) {
mediator.clearServerCallback();
}
}
} | 3.68 |
hadoop_TypedBytesOutput_writeBytes | /**
* Writes a bytes buffer as a typed bytes sequence.
*
* @param buffer the bytes buffer to be written
* @throws IOException
*/
public void writeBytes(Buffer buffer) throws IOException {
writeBytes(buffer.get(), Type.BYTES.code, buffer.getCount());
} | 3.68 |
framework_AbstractComponent_isConnectorEnabled | /*
* (non-Javadoc)
*
* @see com.vaadin.client.Connector#isConnectorEnabled()
*/
@Override
public boolean isConnectorEnabled() {
if (!isVisible()) {
return false;
} else if (!isEnabled()) {
return false;
} else if (!super.isConnectorEnabled()) {
return false;
}
return !(getParent() instanceof SelectiveRenderer)
|| ((SelectiveRenderer) getParent()).isRendered(this);
} | 3.68 |
framework_Table_isEditable | /**
* Is table editable.
*
* If table is editable a editor of type Field is created for each table
* cell. The assigned FieldFactory is used to create the instances.
*
* To provide custom editors for table cells create a class implementing the
* FieldFactory interface, and assign it to table, and set the editable
* property to true.
*
* @return true if table is editable, false otherwise.
* @see Field
* @see FieldFactory
*
*/
public boolean isEditable() {
return editable;
} | 3.68 |
hadoop_Container_setVersion | /**
* Set the version of this container.
* @param version of this container.
*/
@Private
@Unstable
public void setVersion(int version) {
throw new UnsupportedOperationException();
} | 3.68 |
framework_RadioButtonGroup_setItemDescriptionGenerator | /**
* Sets the description generator that is used for generating descriptions
* for items. Description is shown as a tooltip when hovering on
* corresponding element. If the generator returns {@code null}, no tooltip
* is shown.
*
* @param descriptionGenerator
* the item description generator to set, not {@code null}
*
* @since 8.2
*/
public void setItemDescriptionGenerator(
DescriptionGenerator<T> descriptionGenerator) {
Objects.requireNonNull(descriptionGenerator);
if (this.descriptionGenerator != descriptionGenerator) {
this.descriptionGenerator = descriptionGenerator;
getDataProvider().refreshAll();
}
} | 3.68 |
framework_AbstractComponent_setLocale | /**
* Sets the locale of this component.
*
* <pre>
* // Component for which the locale is meaningful
* InlineDateField date = new InlineDateField("Datum");
*
* // German language specified with ISO 639-1 language
* // code and ISO 3166-1 alpha-2 country code.
* date.setLocale(new Locale("de", "DE"));
*
* date.setResolution(DateField.RESOLUTION_DAY);
* layout.addComponent(date);
* </pre>
*
*
* @param locale
* the locale to become this component's locale.
*/
public void setLocale(Locale locale) {
this.locale = locale;
if (locale != null && isAttached()) {
getUI().getLocaleService().addLocale(locale);
}
markAsDirty();
} | 3.68 |
hbase_HRegion_registerChildren | /**
* {@inheritDoc}
*/
@Override
public void registerChildren(ConfigurationManager manager) {
configurationManager = manager;
stores.values().forEach(manager::registerObserver);
} | 3.68 |
hadoop_FlowActivityRowKey_encode | /*
* (non-Javadoc)
*
* Encodes FlowActivityRowKey object into a byte array with each
* component/field in FlowActivityRowKey separated by Separator#QUALIFIERS.
* This leads to an flow activity table row key of the form
* clusterId!dayTimestamp!user!flowName. If dayTimestamp in passed
* FlowActivityRowKey object is null and clusterId is not null, then this
* returns a row key prefix as clusterId! and if userId in
* FlowActivityRowKey is null (and the fields preceding it i.e. clusterId
* and dayTimestamp are not null), this returns a row key prefix as
* clusterId!dayTimeStamp! dayTimestamp is inverted while encoding as it
* helps maintain a descending order for row keys in flow activity table.
*
* @see org.apache.hadoop.yarn.server.timelineservice.storage.common
* .KeyConverter#encode(java.lang.Object)
*/
@Override
public byte[] encode(FlowActivityRowKey rowKey) {
if (rowKey.getDayTimestamp() == null) {
return Separator.QUALIFIERS.join(Separator.encode(
rowKey.getClusterId(), Separator.SPACE, Separator.TAB,
Separator.QUALIFIERS), Separator.EMPTY_BYTES);
}
if (rowKey.getUserId() == null) {
return Separator.QUALIFIERS.join(Separator.encode(
rowKey.getClusterId(), Separator.SPACE, Separator.TAB,
Separator.QUALIFIERS), Bytes.toBytes(LongConverter
.invertLong(rowKey.getDayTimestamp())), Separator.EMPTY_BYTES);
}
return Separator.QUALIFIERS.join(Separator.encode(rowKey.getClusterId(),
Separator.SPACE, Separator.TAB, Separator.QUALIFIERS), Bytes
.toBytes(LongConverter.invertLong(rowKey.getDayTimestamp())),
Separator.encode(rowKey.getUserId(), Separator.SPACE, Separator.TAB,
Separator.QUALIFIERS), Separator.encode(rowKey.getFlowName(),
Separator.SPACE, Separator.TAB, Separator.QUALIFIERS));
} | 3.68 |
framework_LegacyCommunicationManager_getStreamVariableTargetUrl | /**
* @deprecated As of 7.1. See #11411.
*/
@Deprecated
public String getStreamVariableTargetUrl(ClientConnector owner, String name,
StreamVariable value) {
/*
* We will use the same APP/* URI space as ApplicationResources but
* prefix url with UPLOAD
*
* e.g. APP/UPLOAD/[UIID]/[PID]/[NAME]/[SECKEY]
*
* SECKEY is created on each paint to make URL's unpredictable (to
* prevent CSRF attacks).
*
* NAME and PID from URI forms a key to fetch StreamVariable when
* handling post
*/
String paintableId = owner.getConnectorId();
UI ui = owner.getUI();
int uiId = ui.getUIId();
String key = uiId + "/" + paintableId + "/" + name;
ConnectorTracker connectorTracker = ui.getConnectorTracker();
connectorTracker.addStreamVariable(paintableId, name, value);
String seckey = connectorTracker.getSeckey(value);
return ApplicationConstants.APP_PROTOCOL_PREFIX
+ ServletPortletHelper.UPLOAD_URL_PREFIX + key + "/" + seckey;
} | 3.68 |
hbase_HRegionServer_skipReportingTransition | /**
* Helper method for use in tests. Skip the region transition report when there's no master around
* to receive it.
*/
private boolean skipReportingTransition(final RegionStateTransitionContext context) {
final TransitionCode code = context.getCode();
final long openSeqNum = context.getOpenSeqNum();
long masterSystemTime = context.getMasterSystemTime();
final RegionInfo[] hris = context.getHris();
if (code == TransitionCode.OPENED) {
Preconditions.checkArgument(hris != null && hris.length == 1);
if (hris[0].isMetaRegion()) {
LOG.warn(
"meta table location is stored in master local store, so we can not skip reporting");
return false;
} else {
try {
MetaTableAccessor.updateRegionLocation(asyncClusterConnection.toConnection(), hris[0],
serverName, openSeqNum, masterSystemTime);
} catch (IOException e) {
LOG.info("Failed to update meta", e);
return false;
}
}
}
return true;
} | 3.68 |
morf_SqlDialect_schemaNamePrefix | /**
* @param table The table for which the schema name will be retrieved
* @return Base implementation calls {@link #schemaNamePrefix()}.
*/
protected String schemaNamePrefix(@SuppressWarnings("unused") Table table) {
return schemaNamePrefix();
} | 3.68 |
hbase_ProcedureMember_receiveAbortProcedure | /**
* Send abort to the specified procedure
* @param procName name of the procedure to about
* @param ee exception information about the abort
*/
public void receiveAbortProcedure(String procName, ForeignException ee) {
LOG.debug("Request received to abort procedure " + procName, ee);
// if we know about the procedure, notify it
Subprocedure sub = subprocs.get(procName);
if (sub == null) {
LOG.info(
"Received abort on procedure with no local subprocedure " + procName + ", ignoring it.",
ee);
return; // Procedure has already completed
}
String msg = "Propagating foreign exception to subprocedure " + sub.getName();
LOG.error(msg, ee);
sub.cancel(msg, ee);
} | 3.68 |
hbase_HRegionServer_getBlockCache | /**
* May be null if this is a master which not carry table.
* @return The block cache instance used by the regionserver.
*/
@Override
public Optional<BlockCache> getBlockCache() {
return Optional.ofNullable(this.blockCache);
} | 3.68 |
querydsl_AbstractGeometryCollectionExpression_geometryN | /**
* Returns the Nth geometry in this GeometryCollection.
*
* @param n one based index
* @return matching geometry
*/
public GeometryExpression<Geometry> geometryN(Integer n) {
return GeometryExpressions.geometryOperation(SpatialOps.GEOMETRYN, mixin, ConstantImpl.create(n));
} | 3.68 |
framework_DesignFormatter_decodeFromTextNode | /**
* <p>
* Decodes HTML entities in a text from text node and replaces them with
* actual characters.
* </p>
*
* <p>
* Typically this method will be used by components to read back data (like
* option items in {@code AbstractSelect}) from HTML. Note that this method
* unencodes more characters than {@link #encodeForTextNode(String)} encodes
* </p>
*
* @since 7.6
* @param input
* @return
*/
public static String decodeFromTextNode(String input) {
return Parser.unescapeEntities(input, false);
} | 3.68 |
hudi_HoodieRowDataCreateHandle_createMarkerFile | /**
* Creates an empty marker file corresponding to storage writer path.
*
* @param partitionPath Partition path
*/
private void createMarkerFile(String partitionPath, String dataFileName) {
WriteMarkers writeMarkers = WriteMarkersFactory.get(writeConfig.getMarkersType(), table, instantTime);
writeMarkers.create(partitionPath, dataFileName, IOType.CREATE);
} | 3.68 |
hbase_SnapshotManifest_getSnapshotDir | /**
* Get the current snapshot working dir
*/
public Path getSnapshotDir() {
return this.workingDir;
} | 3.68 |
zxing_MaxiCodeReader_decode | /**
* Locates and decodes a MaxiCode in an image.
*
* @return a String representing the content encoded by the MaxiCode
* @throws NotFoundException if a MaxiCode cannot be found
* @throws FormatException if a MaxiCode cannot be decoded
* @throws ChecksumException if error correction fails
*/
@Override
public Result decode(BinaryBitmap image) throws NotFoundException, ChecksumException, FormatException {
return decode(image, null);
} | 3.68 |
morf_DeleteStatement_where | /**
* Specifies the where criteria
*
* <blockquote><pre>delete([table])
* .where([criteria]);</pre></blockquote>
*
* @param criterion the criteria to filter the results by
* @return a statement with the change applied.
*/
public DeleteStatement where(Criterion criterion) {
return copyOnWriteOrMutate(
b -> b.where(criterion),
() -> {
if (criterion == null)
throw new IllegalArgumentException("Criterion was null in where clause");
whereCriterion = criterion;
}
);
} | 3.68 |
dubbo_ScopeModel_initialize | /**
* NOTE:
* <ol>
* <li>The initialize method only be called in subclass.</li>
* <li>
* In subclass, the extensionDirector and beanFactory are available in initialize but not available in constructor.
* </li>
* </ol>
*/
protected void initialize() {
synchronized (instLock) {
this.extensionDirector =
new ExtensionDirector(parent != null ? parent.getExtensionDirector() : null, scope, this);
this.extensionDirector.addExtensionPostProcessor(new ScopeModelAwareExtensionProcessor(this));
this.beanFactory = new ScopeBeanFactory(parent != null ? parent.getBeanFactory() : null, extensionDirector);
// Add Framework's ClassLoader by default
ClassLoader dubboClassLoader = ScopeModel.class.getClassLoader();
if (dubboClassLoader != null) {
this.addClassLoader(dubboClassLoader);
}
}
} | 3.68 |
hadoop_FederationPolicyStoreInputValidator_checkQueue | /**
* Validate if the queue id is a valid or not.
*
* @param queue the queue id of the policy to be verified
* @throws FederationStateStoreInvalidInputException if the queue id is
* invalid
*/
private static void checkQueue(String queue)
throws FederationStateStoreInvalidInputException {
if (queue == null || queue.isEmpty()) {
String message = "Missing Queue. Please try again by specifying a Queue.";
LOG.warn(message);
throw new FederationStateStoreInvalidInputException(message);
}
} | 3.68 |
hbase_MobUtils_getMobRegionPath | /**
* Gets the region dir of the mob files under the specified root dir. It's
* {rootDir}/mobdir/data/{namespace}/{tableName}/{regionEncodedName}.
* @param rootDir The qualified path of HBase root directory.
* @param tableName The current table name.
* @return The region dir of the mob files.
*/
public static Path getMobRegionPath(Path rootDir, TableName tableName) {
Path tablePath = CommonFSUtils.getTableDir(getMobHome(rootDir), tableName);
RegionInfo regionInfo = getMobRegionInfo(tableName);
return new Path(tablePath, regionInfo.getEncodedName());
} | 3.68 |
hudi_MarkerDirState_flushMarkersToFile | /**
* Flushes markers to the underlying file.
*
* @param markerFileIndex file index to use.
*/
private void flushMarkersToFile(int markerFileIndex) {
LOG.debug("Write to " + markerDirPath + "/" + MARKERS_FILENAME_PREFIX + markerFileIndex);
HoodieTimer timer = HoodieTimer.start();
Path markersFilePath = new Path(markerDirPath, MARKERS_FILENAME_PREFIX + markerFileIndex);
FSDataOutputStream fsDataOutputStream = null;
BufferedWriter bufferedWriter = null;
try {
fsDataOutputStream = fileSystem.create(markersFilePath);
bufferedWriter = new BufferedWriter(new OutputStreamWriter(fsDataOutputStream, StandardCharsets.UTF_8));
bufferedWriter.write(fileMarkersMap.get(markerFileIndex).toString());
} catch (IOException e) {
throw new HoodieIOException("Failed to overwrite marker file " + markersFilePath, e);
} finally {
closeQuietly(bufferedWriter);
closeQuietly(fsDataOutputStream);
}
LOG.debug(markersFilePath.toString() + " written in " + timer.endTimer() + " ms");
} | 3.68 |
hadoop_BufferData_getBlockNumber | /**
* Gets the id of this block.
*
* @return the id of this block.
*/
public int getBlockNumber() {
return this.blockNumber;
} | 3.68 |
rocketmq-connect_ProcessingContext_attempt | /**
* @param attempt the number of attempts made to execute the current operation.
*/
public void attempt(int attempt) {
this.attempt = attempt;
} | 3.68 |
framework_BeanValidationBinder_setRequiredConfigurator | /**
* Sets a logic which allows to configure require indicator via
* {@link HasValue#setRequiredIndicatorVisible(boolean)} based on property
* descriptor.
* <p>
* Required indicator configuration will not be used at all if
* {@code configurator} is null.
* <p>
* By default the {@link RequiredFieldConfigurator#DEFAULT} configurator is
* used.
*
* @param configurator
* required indicator configurator, may be {@code null}
*/
public void setRequiredConfigurator(
RequiredFieldConfigurator configurator) {
requiredConfigurator = configurator;
} | 3.68 |
framework_AbstractBeanContainer_removeValueChangeListener | /**
* Remove this container as a listener for the given property.
*
* @param item
* The {@link Item} that contains the property
* @param propertyId
* The id of the property
*/
private void removeValueChangeListener(Item item, Object propertyId) {
Property<?> property = item.getItemProperty(propertyId);
if (property instanceof ValueChangeNotifier) {
((ValueChangeNotifier) property).removeListener(this);
}
} | 3.68 |
framework_VTree_handleClickSelection | /**
* Handles mouse selection
*
* @param ctrl
* Was the ctrl-key pressed
* @param shift
* Was the shift-key pressed
* @return Returns true if event was handled, else false
*/
private boolean handleClickSelection(final boolean ctrl,
final boolean shift) {
// always when clicking an item, focus it
setFocusedNode(this, false);
if (!BrowserInfo.get().isOpera()) {
/*
* Ensure that the tree's focus element also gains focus
* (TreeNodes focus is faked using FocusElementPanel in browsers
* other than Opera).
*/
focus();
}
executeEventCommand(new ScheduledCommand() {
@Override
public void execute() {
if (multiSelectMode == MultiSelectMode.SIMPLE
|| !isMultiselect) {
toggleSelection();
lastSelection = TreeNode.this;
} else if (multiSelectMode == MultiSelectMode.DEFAULT) {
// Handle ctrl+click
if (isMultiselect && ctrl && !shift) {
toggleSelection();
lastSelection = TreeNode.this;
// Handle shift+click
} else if (isMultiselect && !ctrl && shift) {
deselectAll();
selectNodeRange(lastSelection.key, key);
sendSelectionToServer();
// Handle ctrl+shift click
} else if (isMultiselect && ctrl && shift) {
selectNodeRange(lastSelection.key, key);
// Handle click
} else {
// TODO should happen only if this alone not yet
// selected,
// now sending excess server calls
deselectAll();
toggleSelection();
lastSelection = TreeNode.this;
}
}
}
});
return true;
} | 3.68 |
framework_MarginInfo_hasTop | /**
* Checks if this MarginInfo object has the top edge margin enabled.
*
* @return true if top edge margin is enabled
*/
public boolean hasTop() {
return (bitMask & TOP) == TOP;
} | 3.68 |
querydsl_StringExpression_locate | /**
* Create a {@code locate(str, this, start)} expression
*
* <p>Get the position of the given String in this String, the first position is 1</p>
*
* @param str string
* @param start start
* @return locate(str, this, start)
*/
public NumberExpression<Integer> locate(String str, Expression<Integer> start) {
return Expressions.numberOperation(Integer.class, Ops.StringOps.LOCATE2, ConstantImpl.create(str), mixin, start);
} | 3.68 |
pulsar_MultiRolesTokenAuthorizationProvider_canLookupAsync | /**
* Check whether the specified role can perform a lookup for the specified topic.
* <p>
* For that the caller needs to have producer or consumer permission.
*
* @param topicName
* @param role
* @return
* @throws Exception
*/
@Override
public CompletableFuture<Boolean> canLookupAsync(TopicName topicName, String role,
AuthenticationDataSource authenticationData) {
return authorize(role, authenticationData, r -> super.canLookupAsync(topicName, r, authenticationData));
} | 3.68 |
framework_TableQuery_getTableName | /**
* Returns the table name for the query without catalog and schema
* information.
*
* @return table name, not null
*/
public String getTableName() {
return tableName;
} | 3.68 |
flink_SegmentsUtil_getInt | /**
* get int from segments.
*
* @param segments target segments.
* @param offset value offset.
*/
public static int getInt(MemorySegment[] segments, int offset) {
if (inFirstSegment(segments, offset, 4)) {
return segments[0].getInt(offset);
} else {
return getIntMultiSegments(segments, offset);
}
} | 3.68 |
open-banking-gateway_HbciConsentInfo_isWrongScaChallenge | /**
* Was the SCA challenge result that was sent to ASPSP wrong.
*/
public boolean isWrongScaChallenge(HbciContext ctx) {
return null != ctx.getWrongAuthCredentials() && ctx.getWrongAuthCredentials();
} | 3.68 |
hadoop_OBSDataBlocks_enterState | /**
* Atomically enter a state, verifying current state.
*
* @param current current state. null means "no check"
* @param next next state
* @throws IllegalStateException if the current state is not as expected
*/
protected final synchronized void enterState(final DestState current,
final DestState next)
throws IllegalStateException {
verifyState(current);
LOG.debug("{}: entering state {}", this, next);
state = next;
} | 3.68 |
framework_FieldGroup_firePostCommitEvent | /**
* Sends a postCommit event to all registered commit handlers
*
* @throws CommitException
* If the commit should be aborted
*/
private void firePostCommitEvent() throws CommitException {
CommitHandler[] handlers = commitHandlers
.toArray(new CommitHandler[commitHandlers.size()]);
for (CommitHandler handler : handlers) {
handler.postCommit(new CommitEvent(this));
}
} | 3.68 |
dubbo_RpcStatus_getFailedMaxElapsed | /**
* get failed max elapsed.
*
* @return failed max elapsed
*/
public long getFailedMaxElapsed() {
return failedMaxElapsed.get();
} | 3.68 |
pulsar_InMemoryDelayedDeliveryTracker_getScheduledMessages | /**
* Get a set of position of messages that have already reached.
*/
@Override
public NavigableSet<PositionImpl> getScheduledMessages(int maxMessages) {
int n = maxMessages;
NavigableSet<PositionImpl> positions = new TreeSet<>();
long cutoffTime = getCutoffTime();
while (n > 0 && !priorityQueue.isEmpty()) {
long timestamp = priorityQueue.peekN1();
if (timestamp > cutoffTime) {
break;
}
long ledgerId = priorityQueue.peekN2();
long entryId = priorityQueue.peekN3();
positions.add(new PositionImpl(ledgerId, entryId));
priorityQueue.pop();
--n;
}
if (log.isDebugEnabled()) {
log.debug("[{}] Get scheduled messages - found {}", dispatcher.getName(), positions.size());
}
if (priorityQueue.isEmpty()) {
// Reset to initial state
highestDeliveryTimeTracked = 0;
messagesHaveFixedDelay = true;
}
updateTimer();
return positions;
} | 3.68 |
hudi_Types_isTighterThan | /**
* Returns whether this DecimalType is tighter than `other`. If yes, it means `this`
* can be casted into `other` safely without losing any precision or range.
*/
public boolean isTighterThan(PrimitiveType other) {
if (other instanceof DecimalType) {
DecimalType dt = (DecimalType) other;
return (precision - scale) <= (dt.precision - dt.scale) && scale <= dt.scale;
}
if (other instanceof IntType) {
return isTighterThan(get(10, 0));
}
return false;
} | 3.68 |
hudi_HoodieRecord_deflate | /**
* Release the actual payload, to ease memory pressure. To be called after the record has been written to storage.
* Once deflated, cannot be inflated.
*/
public void deflate() {
this.data = null;
} | 3.68 |
flink_FlinkContainers_withFlinkContainersSettings | /**
* Allows to optionally provide Flink containers settings. {@link FlinkContainersSettings}
* based on defaults will be used otherwise.
*
* @param flinkContainersSettings The Flink containers settings.
* @return A reference to this Builder.
*/
public Builder withFlinkContainersSettings(
FlinkContainersSettings flinkContainersSettings) {
this.flinkContainersSettings = flinkContainersSettings;
return this;
} | 3.68 |
flink_SerdeUtils_deserializeSplitAssignments | /**
* Deserialize the given bytes returned by {@link #serializeSplitAssignments(Map,
* SimpleVersionedSerializer)}.
*
* @param serialized the serialized bytes returned by {@link #serializeSplitAssignments(Map,
* SimpleVersionedSerializer)}.
* @param splitSerializer the split serializer for the splits.
* @param collectionSupplier the supplier for the {@link Collection} instance to hold the
* assigned splits for a subtask.
* @param <SplitT> the type of the splits.
* @param <C> the type of the collection to hold the assigned splits for a subtask.
* @return A mapping from subtask id to its assigned splits.
* @throws IOException when deserialization failed.
*/
public static <SplitT extends SourceSplit, C extends Collection<SplitT>>
Map<Integer, C> deserializeSplitAssignments(
byte[] serialized,
SimpleVersionedSerializer<SplitT> splitSerializer,
Function<Integer, C> collectionSupplier)
throws IOException {
try (ByteArrayInputStream bais = new ByteArrayInputStream(serialized);
DataInputStream in = new DataInputStream(bais)) {
int numSubtasks = in.readInt();
Map<Integer, C> splitsAssignments = new HashMap<>(numSubtasks);
int serializerVersion = in.readInt();
for (int i = 0; i < numSubtasks; i++) {
int subtaskId = in.readInt();
int numAssignedSplits = in.readInt();
C assignedSplits = collectionSupplier.apply(numAssignedSplits);
for (int j = 0; j < numAssignedSplits; j++) {
int serializedSplitSize = in.readInt();
byte[] serializedSplit = new byte[serializedSplitSize];
in.readFully(serializedSplit);
SplitT split = splitSerializer.deserialize(serializerVersion, serializedSplit);
assignedSplits.add(split);
}
splitsAssignments.put(subtaskId, assignedSplits);
}
return splitsAssignments;
}
} | 3.68 |
hudi_SparkHoodieHBaseIndex_canIndexLogFiles | /**
* Mapping is available in HBase already.
*/
@Override
public boolean canIndexLogFiles() {
return true;
} | 3.68 |
flink_ZooKeeperStateHandleStore_getInstanceLockPath | /**
* Returns the path for the lock node relative to the given path.
*
* @param rootPath Root path under which the lock node shall be created
* @return Path for the lock node
*/
@VisibleForTesting
String getInstanceLockPath(String rootPath) {
return getRootLockPath(rootPath) + '/' + lockNode;
} | 3.68 |
dubbo_RpcServiceContext_getLocalPort | /**
* get local port.
*
* @return port
*/
@Override
public int getLocalPort() {
return localAddress == null ? 0 : localAddress.getPort();
} | 3.68 |
hbase_OrderedBytesBase_isNullable | // almost all OrderedBytes implementations are nullable.
@Override
public boolean isNullable() {
return true;
} | 3.68 |
framework_VAbstractCalendarPanel_onKeyDown | /*
* (non-Javadoc)
*
* @see
* com.google.gwt.event.dom.client.KeyDownHandler#onKeyDown(com.google.gwt
* .event.dom.client.KeyDownEvent)
*/
@Override
public void onKeyDown(KeyDownEvent event) {
handleKeyPress(event);
} | 3.68 |
hmily_LogUtil_error | /**
* Error.
*
* @param logger the logger
* @param supplier the supplier
*/
public static void error(final Logger logger, final Supplier<Object> supplier) {
if (logger.isErrorEnabled()) {
logger.error(Objects.toString(supplier.get()));
}
} | 3.68 |
flink_HiveParserCalcitePlanner_genSelectLogicalPlan | // NOTE: there can only be one select clause since we don't handle multi destination insert.
private RelNode genSelectLogicalPlan(
HiveParserQB qb,
RelNode srcRel,
RelNode starSrcRel,
Map<String, Integer> outerNameToPos,
HiveParserRowResolver outerRR)
throws SemanticException {
// 0. Generate a Select Node for Windowing
// Exclude the newly-generated select columns from */etc. resolution.
HashSet<ColumnInfo> excludedColumns = new HashSet<>();
RelNode selForWindow = genSelectForWindowing(qb, srcRel, excludedColumns);
srcRel = (selForWindow == null) ? srcRel : selForWindow;
ArrayList<ExprNodeDesc> exprNodeDescs = new ArrayList<>();
HiveParserASTNode trfm = null;
// 1. Get Select Expression List
HiveParserQBParseInfo qbp = qb.getParseInfo();
String selClauseName = qbp.getClauseNames().iterator().next();
HiveParserASTNode selExprList = qbp.getSelForClause(selClauseName);
// make sure if there is subquery it is top level expression
HiveParserSubQueryUtils.checkForTopLevelSubqueries(selExprList);
final boolean cubeRollupGrpSetPresent =
!qbp.getDestRollups().isEmpty()
|| !qbp.getDestGroupingSets().isEmpty()
|| !qbp.getDestCubes().isEmpty();
// 3. Query Hints
int posn = 0;
boolean hintPresent = selExprList.getChild(0).getType() == HiveASTParser.QUERY_HINT;
if (hintPresent) {
posn++;
}
// 4. Bailout if select involves Transform
boolean isInTransform =
selExprList.getChild(posn).getChild(0).getType() == HiveASTParser.TOK_TRANSFORM;
if (isInTransform) {
trfm = (HiveParserASTNode) selExprList.getChild(posn).getChild(0);
}
// 2.Row resolvers for input, output
HiveParserRowResolver outRR = new HiveParserRowResolver();
// SELECT * or SELECT TRANSFORM(*)
Integer pos = 0;
// TODO: will this also fix windowing? try
HiveParserRowResolver inputRR = relToRowResolver.get(srcRel), starRR = inputRR;
inputRR.setCheckForAmbiguity(true);
if (starSrcRel != null) {
starRR = relToRowResolver.get(starSrcRel);
}
// 5. Check if select involves UDTF
String udtfTableAlias = null;
SqlOperator udtfOperator = null;
String genericUDTFName = null;
ArrayList<String> udtfColAliases = new ArrayList<>();
HiveParserASTNode expr = (HiveParserASTNode) selExprList.getChild(posn).getChild(0);
int exprType = expr.getType();
if (exprType == HiveASTParser.TOK_FUNCTION || exprType == HiveASTParser.TOK_FUNCTIONSTAR) {
String funcName =
HiveParserTypeCheckProcFactory.DefaultExprProcessor.getFunctionText(expr, true);
// we can't just try to get table function here because the operator table throws
// exception if it's not a table function
SqlOperator sqlOperator =
HiveParserUtils.getAnySqlOperator(funcName, frameworkConfig.getOperatorTable());
if (HiveParserUtils.isUDTF(sqlOperator)) {
LOG.debug("Found UDTF " + funcName);
udtfOperator = sqlOperator;
genericUDTFName = funcName;
if (!HiveParserUtils.isNative(sqlOperator)) {
semanticAnalyzer.unparseTranslator.addIdentifierTranslation(
(HiveParserASTNode) expr.getChild(0));
}
if (exprType == HiveASTParser.TOK_FUNCTIONSTAR) {
semanticAnalyzer.genColListRegex(
".*",
null,
(HiveParserASTNode) expr.getChild(0),
exprNodeDescs,
null,
inputRR,
starRR,
pos,
outRR,
qb.getAliases(),
false);
}
}
}
if (udtfOperator != null) {
// Only support a single expression when it's a UDTF
if (selExprList.getChildCount() > 1) {
throw new SemanticException(
generateErrorMessage(
(HiveParserASTNode) selExprList.getChild(1),
ErrorMsg.UDTF_MULTIPLE_EXPR.getMsg()));
}
HiveParserASTNode selExpr = (HiveParserASTNode) selExprList.getChild(posn);
// Get the column / table aliases from the expression. Start from 1 as
// 0 is the TOK_FUNCTION
// column names also can be inferred from result of UDTF
for (int i = 1; i < selExpr.getChildCount(); i++) {
HiveParserASTNode selExprChild = (HiveParserASTNode) selExpr.getChild(i);
switch (selExprChild.getType()) {
case HiveASTParser.Identifier:
udtfColAliases.add(
unescapeIdentifier(selExprChild.getText().toLowerCase()));
semanticAnalyzer.unparseTranslator.addIdentifierTranslation(selExprChild);
break;
case HiveASTParser.TOK_TABALIAS:
assert (selExprChild.getChildCount() == 1);
udtfTableAlias = unescapeIdentifier(selExprChild.getChild(0).getText());
qb.addAlias(udtfTableAlias);
semanticAnalyzer.unparseTranslator.addIdentifierTranslation(
(HiveParserASTNode) selExprChild.getChild(0));
break;
default:
throw new SemanticException(
"Find invalid token type " + selExprChild.getType() + " in UDTF.");
}
}
LOG.debug("UDTF table alias is " + udtfTableAlias);
LOG.debug("UDTF col aliases are " + udtfColAliases);
}
// 6. Iterate over all expression (after SELECT)
HiveParserASTNode exprList;
if (isInTransform) {
exprList = (HiveParserASTNode) trfm.getChild(0);
} else if (udtfOperator != null) {
exprList = expr;
} else {
exprList = selExprList;
}
// For UDTF's, skip the function name to get the expressions
int startPos = udtfOperator != null ? posn + 1 : posn;
if (isInTransform) {
startPos = 0;
}
// track the col aliases provided by user
List<String> colAliases = new ArrayList<>();
for (int i = startPos; i < exprList.getChildCount(); ++i) {
colAliases.add(null);
// 6.1 child can be EXPR AS ALIAS, or EXPR.
HiveParserASTNode child = (HiveParserASTNode) exprList.getChild(i);
boolean hasAsClause = child.getChildCount() == 2 && !isInTransform;
boolean isWindowSpec =
child.getChildCount() == 3
&& child.getChild(2).getType() == HiveASTParser.TOK_WINDOWSPEC;
// 6.2 EXPR AS (ALIAS,...) parses, but is only allowed for UDTF's
// This check is not needed and invalid when there is a transform b/c the AST's are
// slightly different.
if (!isWindowSpec
&& !isInTransform
&& udtfOperator == null
&& child.getChildCount() > 2) {
throw new SemanticException(
generateErrorMessage(
(HiveParserASTNode) child.getChild(2),
ErrorMsg.INVALID_AS.getMsg()));
}
String tabAlias;
String colAlias;
if (isInTransform || udtfOperator != null) {
tabAlias = null;
colAlias = semanticAnalyzer.getAutogenColAliasPrfxLbl() + i;
expr = child;
} else {
// 6.3 Get rid of TOK_SELEXPR
expr = (HiveParserASTNode) child.getChild(0);
String[] colRef =
HiveParserUtils.getColAlias(
child,
semanticAnalyzer.getAutogenColAliasPrfxLbl(),
inputRR,
semanticAnalyzer.autogenColAliasPrfxIncludeFuncName(),
i);
tabAlias = colRef[0];
colAlias = colRef[1];
if (hasAsClause) {
colAliases.set(colAliases.size() - 1, colAlias);
semanticAnalyzer.unparseTranslator.addIdentifierTranslation(
(HiveParserASTNode) child.getChild(1));
}
}
Map<HiveParserASTNode, RelNode> subQueryToRelNode = new HashMap<>();
boolean isSubQuery = genSubQueryRelNode(qb, expr, srcRel, false, subQueryToRelNode);
if (isSubQuery) {
ExprNodeDesc subQueryDesc =
semanticAnalyzer.genExprNodeDesc(
expr,
relToRowResolver.get(srcRel),
outerRR,
subQueryToRelNode,
false);
exprNodeDescs.add(subQueryDesc);
ColumnInfo colInfo =
new ColumnInfo(
getColumnInternalName(pos),
subQueryDesc.getWritableObjectInspector(),
tabAlias,
false);
if (!outRR.putWithCheck(tabAlias, colAlias, null, colInfo)) {
throw new SemanticException(
"Cannot add column to RR: "
+ tabAlias
+ "."
+ colAlias
+ " => "
+ colInfo
+ " due to duplication, see previous warnings");
}
} else {
// 6.4 Build ExprNode corresponding to columns
if (expr.getType() == HiveASTParser.TOK_ALLCOLREF) {
pos =
semanticAnalyzer.genColListRegex(
".*",
expr.getChildCount() == 0
? null
: HiveParserBaseSemanticAnalyzer.getUnescapedName(
(HiveParserASTNode) expr.getChild(0))
.toLowerCase(),
expr,
exprNodeDescs,
excludedColumns,
inputRR,
starRR,
pos,
outRR,
qb.getAliases(),
false /* don't require uniqueness */);
} else if (expr.getType() == HiveASTParser.TOK_TABLE_OR_COL
&& !hasAsClause
&& !inputRR.getIsExprResolver()
&& HiveParserUtils.isRegex(
unescapeIdentifier(expr.getChild(0).getText()),
semanticAnalyzer.getConf())) {
// In case the expression is a regex COL. This can only happen without AS clause
// We don't allow this for ExprResolver - the Group By case
pos =
semanticAnalyzer.genColListRegex(
unescapeIdentifier(expr.getChild(0).getText()),
null,
expr,
exprNodeDescs,
excludedColumns,
inputRR,
starRR,
pos,
outRR,
qb.getAliases(),
true);
} else if (expr.getType() == HiveASTParser.DOT
&& expr.getChild(0).getType() == HiveASTParser.TOK_TABLE_OR_COL
&& inputRR.hasTableAlias(
unescapeIdentifier(
expr.getChild(0).getChild(0).getText().toLowerCase()))
&& !hasAsClause
&& !inputRR.getIsExprResolver()
&& HiveParserUtils.isRegex(
unescapeIdentifier(expr.getChild(1).getText()),
semanticAnalyzer.getConf())) {
// In case the expression is TABLE.COL (col can be regex). This can only happen
// without AS clause
// We don't allow this for ExprResolver - the Group By case
pos =
semanticAnalyzer.genColListRegex(
unescapeIdentifier(expr.getChild(1).getText()),
unescapeIdentifier(
expr.getChild(0).getChild(0).getText().toLowerCase()),
expr,
exprNodeDescs,
excludedColumns,
inputRR,
starRR,
pos,
outRR,
qb.getAliases(),
false /* don't require uniqueness */);
} else if (HiveASTParseUtils.containsTokenOfType(expr, HiveASTParser.TOK_FUNCTIONDI)
&& !(srcRel instanceof Aggregate)) {
// Likely a malformed query eg, select hash(distinct c1) from t1;
throw new SemanticException("Distinct without an aggregation.");
} else {
// Case when this is an expression
HiveParserTypeCheckCtx typeCheckCtx =
new HiveParserTypeCheckCtx(
inputRR, true, true, frameworkConfig, cluster);
// We allow stateful functions in the SELECT list (but nowhere else)
typeCheckCtx.setAllowStatefulFunctions(true);
if (!qbp.getDestToGroupBy().isEmpty()) {
// Special handling of grouping function
expr =
rewriteGroupingFunctionAST(
getGroupByForClause(qbp, selClauseName),
expr,
!cubeRollupGrpSetPresent);
}
ExprNodeDesc exprDesc =
semanticAnalyzer.genExprNodeDesc(expr, inputRR, typeCheckCtx);
String recommended = semanticAnalyzer.recommendName(exprDesc, colAlias);
if (recommended != null && outRR.get(null, recommended) == null) {
colAlias = recommended;
}
exprNodeDescs.add(exprDesc);
ColumnInfo colInfo =
new ColumnInfo(
getColumnInternalName(pos),
exprDesc.getWritableObjectInspector(),
tabAlias,
false);
colInfo.setSkewedCol(
exprDesc instanceof ExprNodeColumnDesc
&& ((ExprNodeColumnDesc) exprDesc).isSkewedCol());
outRR.put(tabAlias, colAlias, colInfo);
if (exprDesc instanceof ExprNodeColumnDesc) {
ExprNodeColumnDesc colExp = (ExprNodeColumnDesc) exprDesc;
String[] altMapping = inputRR.getAlternateMappings(colExp.getColumn());
if (altMapping != null) {
// TODO: this can overwrite the mapping. Should this be allowed?
outRR.put(altMapping[0], altMapping[1], colInfo);
}
}
pos++;
}
}
}
// 7. Convert Hive projections to Calcite
List<RexNode> calciteColLst = new ArrayList<>();
HiveParserRexNodeConverter rexNodeConverter =
new HiveParserRexNodeConverter(
cluster,
srcRel.getRowType(),
outerNameToPos,
buildHiveColNameToInputPosMap(exprNodeDescs, inputRR),
relToRowResolver.get(srcRel),
outerRR,
0,
false,
subqueryId,
funcConverter);
for (ExprNodeDesc colExpr : exprNodeDescs) {
RexNode calciteCol = rexNodeConverter.convert(colExpr);
calciteCol = convertNullLiteral(calciteCol).accept(funcConverter);
calciteColLst.add(calciteCol);
}
// 8. Build Calcite Rel
RelNode res;
if (isInTransform) {
HiveParserScriptTransformHelper transformHelper =
new HiveParserScriptTransformHelper(
cluster, relToRowResolver, relToHiveColNameCalcitePosMap, hiveConf);
res = transformHelper.genScriptPlan(trfm, qb, calciteColLst, srcRel);
} else if (udtfOperator != null) {
// The basic idea for CBO support of UDTF is to treat UDTF as a special project.
res =
genUDTFPlan(
udtfOperator,
genericUDTFName,
udtfTableAlias,
udtfColAliases,
qb,
calciteColLst,
outRR.getColumnInfos(),
srcRel,
true,
false);
} else {
// If it's a subquery and the project is identity, we skip creating this project.
// This is to handle an issue with calcite SubQueryRemoveRule. The rule checks col
// uniqueness by calling
// RelMetadataQuery::areColumnsUnique with an empty col set, which always returns null
// for a project
// and thus introduces unnecessary agg node.
if (HiveParserUtils.isIdentityProject(srcRel, calciteColLst, colAliases)
&& outerRR != null) {
res = srcRel;
} else {
res = genSelectRelNode(calciteColLst, outRR, srcRel);
}
}
// 9. Handle select distinct as GBY if there exist windowing functions
if (selForWindow != null
&& selExprList.getToken().getType() == HiveASTParser.TOK_SELECTDI) {
ImmutableBitSet groupSet =
ImmutableBitSet.range(res.getRowType().getFieldList().size());
res =
LogicalAggregate.create(
res,
ImmutableList.of(),
groupSet,
Collections.emptyList(),
Collections.emptyList());
HiveParserRowResolver groupByOutputRowResolver = new HiveParserRowResolver();
for (int i = 0; i < outRR.getColumnInfos().size(); i++) {
ColumnInfo colInfo = outRR.getColumnInfos().get(i);
ColumnInfo newColInfo =
new ColumnInfo(
colInfo.getInternalName(),
colInfo.getType(),
colInfo.getTabAlias(),
colInfo.getIsVirtualCol());
groupByOutputRowResolver.put(colInfo.getTabAlias(), colInfo.getAlias(), newColInfo);
}
relToHiveColNameCalcitePosMap.put(
res, buildHiveToCalciteColumnMap(groupByOutputRowResolver));
relToRowResolver.put(res, groupByOutputRowResolver);
}
inputRR.setCheckForAmbiguity(false);
if (selForWindow != null && res instanceof Project) {
// if exist windowing expression, trim the project node with window
res =
HiveParserProjectWindowTrimmer.trimProjectWindow(
(Project) res,
(Project) selForWindow,
relToRowResolver,
relToHiveColNameCalcitePosMap);
}
return res;
} | 3.68 |
hadoop_JobID_forName | /** Construct a JobId object from given string
* @return constructed JobId object or null if the given String is null
* @throws IllegalArgumentException if the given string is malformed
*/
public static JobID forName(String str) throws IllegalArgumentException {
return (JobID) org.apache.hadoop.mapreduce.JobID.forName(str);
} | 3.68 |
flink_StreamingRuntimeContext_isCheckpointingEnabled | /**
* Returns true if checkpointing is enabled for the running job.
*
* @return true if checkpointing is enabled.
*/
public boolean isCheckpointingEnabled() {
return streamConfig.isCheckpointingEnabled();
} | 3.68 |
hmily_HmilyApplicationContextAware_postProcessBeanFactory | /**
* Fix metric register happen before initialize.
*/
@Override
public void postProcessBeanFactory(@NonNull final ConfigurableListableBeanFactory beanFactory) throws BeansException {
HmilyBootstrap.getInstance().start();
} | 3.68 |
morf_ConnectionResourcesBean_setInstanceName | /**
* @see org.alfasoftware.morf.jdbc.AbstractConnectionResources#setInstanceName(java.lang.String)
*/
@Override
public void setInstanceName(String instanceName) {
this.instanceName = instanceName;
} | 3.68 |
hbase_BackupManifest_getTableList | /**
* Get the table set of this image.
* @return The table set list
*/
public List<TableName> getTableList() {
return backupImage.getTableNames();
} | 3.68 |
zxing_FinderPatternFinder_crossCheckDiagonal | /**
* After a vertical and horizontal scan finds a potential finder pattern, this method
* "cross-cross-cross-checks" by scanning down diagonally through the center of the possible
* finder pattern to see if the same proportion is detected.
*
* @param centerI row where a finder pattern was detected
* @param centerJ center of the section that appears to cross a finder pattern
* @return true if proportions are withing expected limits
*/
private boolean crossCheckDiagonal(int centerI, int centerJ) {
int[] stateCount = getCrossCheckStateCount();
// Start counting up, left from center finding black center mass
int i = 0;
while (centerI >= i && centerJ >= i && image.get(centerJ - i, centerI - i)) {
stateCount[2]++;
i++;
}
if (stateCount[2] == 0) {
return false;
}
// Continue up, left finding white space
while (centerI >= i && centerJ >= i && !image.get(centerJ - i, centerI - i)) {
stateCount[1]++;
i++;
}
if (stateCount[1] == 0) {
return false;
}
// Continue up, left finding black border
while (centerI >= i && centerJ >= i && image.get(centerJ - i, centerI - i)) {
stateCount[0]++;
i++;
}
if (stateCount[0] == 0) {
return false;
}
int maxI = image.getHeight();
int maxJ = image.getWidth();
// Now also count down, right from center
i = 1;
while (centerI + i < maxI && centerJ + i < maxJ && image.get(centerJ + i, centerI + i)) {
stateCount[2]++;
i++;
}
while (centerI + i < maxI && centerJ + i < maxJ && !image.get(centerJ + i, centerI + i)) {
stateCount[3]++;
i++;
}
if (stateCount[3] == 0) {
return false;
}
while (centerI + i < maxI && centerJ + i < maxJ && image.get(centerJ + i, centerI + i)) {
stateCount[4]++;
i++;
}
if (stateCount[4] == 0) {
return false;
}
return foundPatternDiagonal(stateCount);
} | 3.68 |
hadoop_TFile_getEntryComparator | /**
* Get a Comparator object to compare Entries. It is useful when you want
* stores the entries in a collection (such as PriorityQueue) and perform
* sorting or comparison among entries based on the keys without copying out
* the key.
*
* @return An Entry Comparator..
*/
public Comparator<Scanner.Entry> getEntryComparator() {
if (!isSorted()) {
throw new RuntimeException(
"Entries are not comparable for unsorted TFiles");
}
return new Comparator<Scanner.Entry>() {
/**
* Provide a customized comparator for Entries. This is useful if we
* have a collection of Entry objects. However, if the Entry objects
* come from different TFiles, users must ensure that those TFiles share
* the same RawComparator.
*/
@Override
public int compare(Scanner.Entry o1, Scanner.Entry o2) {
return comparator.compare(o1.getKeyBuffer(), 0, o1.getKeyLength(), o2
.getKeyBuffer(), 0, o2.getKeyLength());
}
};
} | 3.68 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.