name
stringlengths 12
178
| code_snippet
stringlengths 8
36.5k
| score
float64 3.26
3.68
|
---|---|---|
framework_StaticSection_readDesign | /**
* Reads the declarative design from the given table section element.
*
* @since 7.5.0
* @param tableSectionElement
* Element to read design from
* @param designContext
* the design context
* @throws DesignException
* if the table section contains unexpected children
*/
public void readDesign(Element tableSectionElement,
DesignContext designContext) throws DesignException {
while (getRowCount() > 0) {
removeRow(0);
}
for (Element row : tableSectionElement.children()) {
if (!row.tagName().equals("tr")) {
throw new DesignException("Unexpected element in "
+ tableSectionElement.tagName() + ": " + row.tagName());
}
addRowAt(getRowCount()).readDesign(row, designContext);
}
} | 3.68 |
flink_DateTimeUtils_timestampCeil | /**
* Keep the algorithm consistent with Calcite DateTimeUtils.julianDateFloor, but here we take
* time zone into account.
*/
public static long timestampCeil(TimeUnitRange range, long ts, TimeZone tz) {
// assume that we are at UTC timezone, just for algorithm performance
long offset = tz.getOffset(ts);
long utcTs = ts + offset;
switch (range) {
case HOUR:
return ceil(utcTs, MILLIS_PER_HOUR) - offset;
case DAY:
return ceil(utcTs, MILLIS_PER_DAY) - offset;
case MILLENNIUM:
case CENTURY:
case DECADE:
case MONTH:
case YEAR:
case QUARTER:
case WEEK:
int days = (int) (utcTs / MILLIS_PER_DAY + EPOCH_JULIAN);
return julianDateFloor(range, days, false) * MILLIS_PER_DAY - offset;
default:
// for MINUTE and SECONDS etc...,
// it is more effective to use arithmetic Method
throw new AssertionError(range);
}
} | 3.68 |
hadoop_IOStatisticsContextIntegration_isIOStatisticsThreadLevelEnabled | /**
* Static probe to check if the thread-level IO statistics enabled.
*
* @return if the thread-level IO statistics enabled.
*/
public static boolean isIOStatisticsThreadLevelEnabled() {
return isThreadIOStatsEnabled;
} | 3.68 |
flink_CoreOptions_fileSystemConnectionLimit | /**
* The total number of input plus output connections that a file system for the given scheme may
* open. Unlimited be default.
*/
public static ConfigOption<Integer> fileSystemConnectionLimit(String scheme) {
return ConfigOptions.key("fs." + scheme + ".limit.total").intType().defaultValue(-1);
} | 3.68 |
hudi_HoodieFlinkCopyOnWriteTable_delete | /**
* Deletes a list of {@link HoodieKey}s from the Hoodie table, at the supplied instantTime {@link HoodieKey}s will be
* de-duped and non existent keys will be removed before deleting.
*
* <p>Specifies the write handle explicitly in order to have fine-grained control with
* the underneath file.
*
* @param context HoodieEngineContext
* @param writeHandle The write handle
* @param instantTime Instant Time for the action
* @param keys {@link List} of {@link HoodieKey}s to be deleted
* @return HoodieWriteMetadata
*/
public HoodieWriteMetadata<List<WriteStatus>> delete(
HoodieEngineContext context,
HoodieWriteHandle<?, ?, ?, ?> writeHandle,
String instantTime,
List<HoodieKey> keys) {
return new FlinkDeleteCommitActionExecutor<>(context, writeHandle, config, this, instantTime, keys).execute();
} | 3.68 |
flink_SinkTestSuiteBase_compareSinkMetrics | /** Compare the metrics. */
private boolean compareSinkMetrics(
MetricQuerier metricQuerier,
TestEnvironment testEnv,
DataStreamSinkExternalContext<T> context,
JobID jobId,
String sinkName,
String metricsName,
long expectedSize)
throws Exception {
double sumNumRecordsOut =
metricQuerier.getAggregatedMetricsByRestAPI(
testEnv.getRestEndpoint(),
jobId,
sinkName,
metricsName,
getSinkMetricFilter(context));
if (Precision.equals(expectedSize, sumNumRecordsOut)) {
return true;
} else {
LOG.info("expected:<{}> but was <{}>({})", expectedSize, sumNumRecordsOut, metricsName);
return false;
}
} | 3.68 |
flink_HiveDDLUtils_enableConstraint | // returns a constraint trait that requires ENABLE
public static byte enableConstraint(byte trait) {
return (byte) (trait | HIVE_CONSTRAINT_ENABLE);
} | 3.68 |
morf_ResolvedTables_getReadTables | /**
* @return Unmodifiable set of read tables
*/
public Set<String> getReadTables() {
return Collections.unmodifiableSet(readTables);
} | 3.68 |
hadoop_FileIoProvider_rename | /**
* Move the src file to the target using
* {@link Storage#rename(File, File)}.
*
* @param volume target volume. null if unavailable.
* @param src source path.
* @param target target path.
* @throws IOException
*/
public void rename(
@Nullable FsVolumeSpi volume, File src, File target)
throws IOException {
final long begin = profilingEventHook.beforeMetadataOp(volume, MOVE);
try {
faultInjectorEventHook.beforeMetadataOp(volume, MOVE);
Storage.rename(src, target);
profilingEventHook.afterMetadataOp(volume, MOVE, begin);
} catch(Exception e) {
onFailure(volume, begin);
throw e;
}
} | 3.68 |
flink_MessageAcknowledgingSourceBase_addId | /**
* Adds an ID to be stored with the current checkpoint. In order to achieve exactly-once
* guarantees, implementing classes should only emit records with IDs for which this method
* return true.
*
* @param uid The ID to add.
* @return True if the id has not been processed previously.
*/
protected boolean addId(UId uid) {
idsForCurrentCheckpoint.add(uid);
return idsProcessedButNotAcknowledged.add(uid);
} | 3.68 |
hbase_KeyValue_getLength | /**
* Determines the total length of the KeyValue stored in the specified byte array and offset.
* Includes all headers.
* @param bytes byte array
* @param offset offset to start of the KeyValue
* @return length of entire KeyValue, in bytes
*/
private static int getLength(byte[] bytes, int offset) {
int klength = ROW_OFFSET + Bytes.toInt(bytes, offset);
int vlength = Bytes.toInt(bytes, offset + Bytes.SIZEOF_INT);
return klength + vlength;
} | 3.68 |
hmily_SubCoordinator_doCommit | /**
* 2pc.
*/
private synchronized void doCommit() {
state = XaState.STATUS_COMMITTING;
int commitError = 0;
for (int i = 0; i < resources.size(); i++) {
HmilyXaResource xaResource = (HmilyXaResource) resources.elementAt(i);
try {
// false is 2pc.
xaResource.commit(false);
if (logger.isDebugEnabled()) {
logger.debug("xa commit{}", xaResource.getXid());
}
} catch (XAException e) {
logger.error("rollback error,{}:{}", xaResource.getXid(), HmilyXaException.getMessage(e));
commitError++;
}
}
if (commitError > 0) {
state = XaState.STATUS_UNKNOWN;
} else {
state = XaState.STATUS_COMMITTED;
}
afterCompletion();
} | 3.68 |
morf_AddTable_isApplied | /**
* @see org.alfasoftware.morf.upgrade.SchemaChange#isApplied(Schema, ConnectionResources)
*/
@Override
public boolean isApplied(Schema schema, ConnectionResources database) {
for (String tableName : schema.tableNames()) {
if (tableName.equalsIgnoreCase(newTable.getName())) {
return true;
}
}
return false;
} | 3.68 |
AreaShop_FileManager_loadGroupsFile | /**
* Load the groups.yml file from disk
* @return true if succeeded, otherwise false
*/
public boolean loadGroupsFile() {
boolean result = true;
File groupFile = new File(groupsPath);
if(groupFile.exists() && groupFile.isFile()) {
try(
InputStreamReader reader = new InputStreamReader(new FileInputStream(groupFile), Charsets.UTF_8)
) {
groupsConfig = YamlConfiguration.loadConfiguration(reader);
} catch(IOException e) {
AreaShop.warn("Could not load groups.yml file: " + groupFile.getAbsolutePath());
}
}
if(groupsConfig == null) {
groupsConfig = new YamlConfiguration();
}
for(String groupName : groupsConfig.getKeys(false)) {
RegionGroup group = new RegionGroup(plugin, groupName);
groups.put(groupName, group);
}
return result;
} | 3.68 |
AreaShop_FileManager_getGroupSettings | /**
* Get the settings of a group.
* @param groupName Name of the group to get the settings from
* @return The settings of the group
*/
public ConfigurationSection getGroupSettings(String groupName) {
return groupsConfig.getConfigurationSection(groupName.toLowerCase());
} | 3.68 |
framework_Resolution_getResolutionsLowerThan | /**
* Returns the resolutions that are lower than the given resolution,
* starting from the given resolution. In other words passing DAY to this
* methods returns HOUR,MINUTE,SECOND.
*
* @param r
* The resolution to start from
* @return An iterable for the resolutions lower than r
*/
public static List<Resolution> getResolutionsLowerThan(Resolution r) {
List<Resolution> resolutions = new ArrayList<Resolution>();
Resolution[] values = Resolution.values();
for (int i = r.ordinal() - 1; i >= 0; i--) {
resolutions.add(values[i]);
}
return resolutions;
} | 3.68 |
hadoop_ManifestStoreOperations_bindToFileSystem | /**
* Bind to the filesystem.
* This is called by the manifest committer after the operations
* have been instantiated.
* @param fileSystem target FS
* @param path actual path under FS.
* @throws IOException if there are binding problems.
*/
public void bindToFileSystem(FileSystem fileSystem, Path path) throws IOException {
} | 3.68 |
framework_BeanFieldGroup_isBeanValidationImplementationAvailable | /**
* Checks whether a bean validation implementation (e.g. Hibernate Validator
* or Apache Bean Validation) is available.
*
* TODO move this method to some more generic location
*
* @return true if a JSR-303 bean validation implementation is available
*/
protected static boolean isBeanValidationImplementationAvailable() {
if (beanValidationImplementationAvailable != null) {
return beanValidationImplementationAvailable;
}
try {
Class<?> validationClass = Class
.forName("javax.validation.Validation");
Method buildFactoryMethod = validationClass
.getMethod("buildDefaultValidatorFactory");
Object factory = buildFactoryMethod.invoke(null);
beanValidationImplementationAvailable = (factory != null);
} catch (Exception e) {
// no bean validation implementation available
beanValidationImplementationAvailable = false;
}
return beanValidationImplementationAvailable;
} | 3.68 |
framework_StaticSection_setDescriptionContentMode | /**
* Sets the content mode for the tooltip.
*
* @see ContentMode
* @param descriptionContentMode
* the content mode for the tooltip
* @since 8.4
*/
public void setDescriptionContentMode(
ContentMode descriptionContentMode) {
cellState.descriptionContentMode = descriptionContentMode;
} | 3.68 |
flink_ResourceCounter_isEmpty | /**
* Checks whether the resource counter is empty.
*
* @return {@code true} if the counter does not contain any counts; otherwise {@code false}
*/
public boolean isEmpty() {
return resources.isEmpty();
} | 3.68 |
hadoop_FlowRunEntityReader_getTable | /**
* Uses the {@link FlowRunTableRW}.
*/
@Override
protected BaseTableRW<?> getTable() {
return FLOW_RUN_TABLE;
} | 3.68 |
hbase_RegionNormalizerWorkQueue_clear | /**
* Atomically removes all of the elements from this queue. The queue will be empty after this call
* returns.
*/
public void clear() {
lock.writeLock().lock();
try {
delegate.clear();
} finally {
lock.writeLock().unlock();
}
} | 3.68 |
hadoop_SaslParticipant_isNegotiatedQopPrivacy | /**
* After successful SASL negotiation, returns whether it's QOP privacy
*
* @return boolean whether it's QOP privacy
*/
public boolean isNegotiatedQopPrivacy() {
String qop = getNegotiatedQop();
return qop != null && "auth-conf".equalsIgnoreCase(qop);
} | 3.68 |
hadoop_Container_getVersion | /**
* Get the version of this container. The version will be incremented when
* a container is updated.
*
* @return version of this container.
*/
@Private
@Unstable
public int getVersion() {
return 0;
} | 3.68 |
hadoop_DelegatingSSLSocketFactory_initializeDefaultFactory | /**
* Initialize a singleton SSL socket factory.
*
* @param preferredMode applicable only if the instance is not initialized.
* @throws IOException if an error occurs.
*/
public static synchronized void initializeDefaultFactory(
SSLChannelMode preferredMode) throws IOException {
if (instance == null) {
instance = new DelegatingSSLSocketFactory(preferredMode);
}
} | 3.68 |
framework_VAbstractPopupCalendar_setTextFieldEnabled | /**
* Sets the state of the text field of this component. By default the text
* field is enabled. Disabling it causes only the button for date selection
* to be active, thus preventing the user from entering invalid dates. See
* <a href="http://dev.vaadin.com/ticket/6790">#6790</a>.
* <p>
* If the text field is enabled, it represents this widget within the
* browser's tabulator focus cycle. When the text field is disabled, that
* role is instead given to the date selection button. If the entire
* component is disabled, the focus cycle skips this widget altogether.
*
* @param textFieldEnabled
* {@code true} if the text field should be enabled,
* {@code false} if disabled
*/
public void setTextFieldEnabled(boolean textFieldEnabled) {
this.textFieldEnabled = textFieldEnabled;
updateTextFieldEnabled();
} | 3.68 |
shardingsphere-elasticjob_ExecutionService_getMisfiredJobItems | /**
* Get misfired job sharding items.
*
* @param items sharding items need to be judged
* @return misfired job sharding items
*/
public List<Integer> getMisfiredJobItems(final Collection<Integer> items) {
List<Integer> result = new ArrayList<>(items.size());
for (int each : items) {
if (jobNodeStorage.isJobNodeExisted(ShardingNode.getMisfireNode(each))) {
result.add(each);
}
}
return result;
} | 3.68 |
hbase_FlushSnapshotSubprocedure_cleanup | /**
* Cancel threads if they haven't finished.
*/
@Override
public void cleanup(Exception e) {
LOG.info("Aborting all online FLUSH snapshot subprocedure task threads for '"
+ snapshot.getName() + "' due to error", e);
try {
taskManager.cancelTasks();
} catch (InterruptedException e1) {
Thread.currentThread().interrupt();
}
} | 3.68 |
hadoop_RouterFedBalance_build | /**
* Build the balance job.
*/
public BalanceJob build() throws IOException {
// Construct job context.
FedBalanceContext context;
Path dst = new Path(inputDst);
if (dst.toUri().getAuthority() == null) {
throw new IOException("The destination cluster must be specified.");
}
Path src = getSrcPath(inputSrc);
String mount = inputSrc;
context = new FedBalanceContext.Builder(src, dst, mount, getConf())
.setForceCloseOpenFiles(forceCloseOpen).setUseMountReadOnly(true)
.setMapNum(map).setBandwidthLimit(bandwidth).setTrash(trashOpt)
.setDelayDuration(delayDuration).setDiffThreshold(diffThreshold)
.build();
LOG.info(context.toString());
// Construct the balance job.
BalanceJob.Builder<BalanceProcedure> builder = new BalanceJob.Builder<>();
RouterDistCpProcedure dcp =
new RouterDistCpProcedure(DISTCP_PROCEDURE, null, delayDuration,
context);
builder.nextProcedure(dcp);
MountTableProcedure mtp =
new MountTableProcedure(MOUNT_TABLE_PROCEDURE, null, delayDuration,
inputSrc, dst.toUri().getPath(), dst.toUri().getAuthority(),
getConf());
builder.nextProcedure(mtp);
TrashProcedure tp =
new TrashProcedure(TRASH_PROCEDURE, null, delayDuration, context);
builder.nextProcedure(tp);
return builder.build();
} | 3.68 |
hadoop_ActiveAuditManagerS3A_getUnbondedSpan | /**
* Get the unbounded span. Until this manager
* is fully initialized it will return the no-op
* span.
* @return the unbounded span.
*/
private WrappingAuditSpan getUnbondedSpan() {
return unbondedSpan;
} | 3.68 |
hadoop_CommitContext_submit | /**
* Forward to the submitter, wrapping in task
* context setting, so as to ensure that all operations
* have job/task attributes.
* @param task task to execute
* @return the future.
*/
@Override
public Future<?> submit(Runnable task) {
return executor.submit(() -> {
auditContextUpdater.updateCurrentAuditContext();
try {
task.run();
} finally {
auditContextUpdater.resetCurrentAuditContext();
}
});
} | 3.68 |
framework_AbstractField_isDifferentValue | /**
* Called when a new value is set to determine whether the provided new
* value is considered to be a change compared to the current value. This is
* used to determine whether {@link #doSetValue(Object)} should be called
* and a value change event fired.
*
* @param newValue
* the new value candidate to check, may be <code>null</code>
*
* @return <code>true</code> if the provided value is considered to be
* different and a value change event should be fired;
* <code>false</code> if the values are considered to be the same
* and no value change should be fired
*/
protected boolean isDifferentValue(T newValue) {
return !Objects.equals(newValue, getValue());
} | 3.68 |
hbase_MoveWithAck_getServerNameForRegion | /**
* Get servername that is up in hbase:meta hosting the given region. this is hostname + port +
* startcode comma-delimited. Can return null
* @return regionServer hosting the given region
*/
static ServerName getServerNameForRegion(RegionInfo region, Admin admin, Connection conn)
throws IOException {
if (!admin.isTableEnabled(region.getTable())) {
return null;
}
HRegionLocation loc = conn.getRegionLocator(region.getTable())
.getRegionLocation(region.getStartKey(), region.getReplicaId(), true);
if (loc != null) {
return loc.getServerName();
} else {
return null;
}
} | 3.68 |
framework_TabSheet_getSelectedTab | /**
* Gets the selected tab content component.
*
* @return the selected tab contents
*/
public Component getSelectedTab() {
return selected;
} | 3.68 |
hudi_RunLengthDecoder_readIntLittleEndian | /**
* Reads the next 4 byte little endian int.
*/
private int readIntLittleEndian() throws IOException {
int ch4 = in.read();
int ch3 = in.read();
int ch2 = in.read();
int ch1 = in.read();
return ((ch1 << 24) + (ch2 << 16) + (ch3 << 8) + ch4);
} | 3.68 |
hadoop_TaggedInputSplit_getInputSplit | /**
* Retrieves the original InputSplit.
*
* @return The InputSplit that was tagged
*/
public InputSplit getInputSplit() {
return inputSplit;
} | 3.68 |
streampipes_AbstractConfigurablePipelineElementBuilder_requiredMultiValueSelection | /**
* Defines a configuration parameter that lets preprocessing developers
* select from a list of pre-defined configuration options, but multiple selections are allowed.
* The parameter will be rendered as a Checkbox group in the StreamPipes UI.
*
* @param label The {@link org.apache.streampipes.sdk.helpers.Label}
* that describes why this parameter is needed in a user-friendly manner.
* @param options A list of {@link org.apache.streampipes.model.staticproperty.Option} elements. Use
* {@link org.apache.streampipes.sdk.helpers.Options} to create option elements from string values.
* @return this
*/
public K requiredMultiValueSelection(Label label,
List<Option> options) {
AnyStaticProperty asp = new AnyStaticProperty(label.getInternalId(), label.getLabel(), label.getDescription());
asp.setOptions(options);
this.staticProperties.add(asp);
return me();
} | 3.68 |
flink_RocksDBNativeMetricOptions_enableNumSnapshots | /** Returns number of unreleased snapshots of the database. */
public void enableNumSnapshots() {
this.properties.add(RocksDBProperty.NumSnapshots.getRocksDBProperty());
} | 3.68 |
hudi_HoodieTableMetadataUtil_readRecordKeysFromBaseFiles | /**
* Reads the record keys from the base files and returns a {@link HoodieData} of {@link HoodieRecord} to be updated in the metadata table.
* Use {@link #readRecordKeysFromFileSlices(HoodieEngineContext, List, boolean, int, String, HoodieTableMetaClient, EngineType)} instead.
*/
@Deprecated
public static HoodieData<HoodieRecord> readRecordKeysFromBaseFiles(HoodieEngineContext engineContext,
List<Pair<String, HoodieBaseFile>> partitionBaseFilePairs,
boolean forDelete,
int recordIndexMaxParallelism,
String basePath,
SerializableConfiguration configuration,
String activeModule) {
if (partitionBaseFilePairs.isEmpty()) {
return engineContext.emptyHoodieData();
}
engineContext.setJobStatus(activeModule, "Record Index: reading record keys from " + partitionBaseFilePairs.size() + " base files");
final int parallelism = Math.min(partitionBaseFilePairs.size(), recordIndexMaxParallelism);
return engineContext.parallelize(partitionBaseFilePairs, parallelism).flatMap(partitionAndBaseFile -> {
final String partition = partitionAndBaseFile.getKey();
final HoodieBaseFile baseFile = partitionAndBaseFile.getValue();
final String filename = baseFile.getFileName();
Path dataFilePath = new Path(basePath, StringUtils.isNullOrEmpty(partition) ? filename : (partition + Path.SEPARATOR) + filename);
final String fileId = baseFile.getFileId();
final String instantTime = baseFile.getCommitTime();
HoodieFileReader reader = HoodieFileReaderFactory.getReaderFactory(HoodieRecord.HoodieRecordType.AVRO).getFileReader(configuration.get(), dataFilePath);
ClosableIterator<String> recordKeyIterator = reader.getRecordKeyIterator();
return new ClosableIterator<HoodieRecord>() {
@Override
public void close() {
recordKeyIterator.close();
}
@Override
public boolean hasNext() {
return recordKeyIterator.hasNext();
}
@Override
public HoodieRecord next() {
return forDelete
? HoodieMetadataPayload.createRecordIndexDelete(recordKeyIterator.next())
: HoodieMetadataPayload.createRecordIndexUpdate(recordKeyIterator.next(), partition, fileId, instantTime, 0);
}
};
});
} | 3.68 |
hbase_AbstractFSWALProvider_getNumLogFiles0 | /**
* iff the given WALFactory is using the DefaultWALProvider for meta and/or non-meta, count the
* number of files (rolled and active). if either of them aren't, count 0 for that provider.
*/
@Override
protected long getNumLogFiles0() {
T log = this.wal;
return log == null ? 0 : log.getNumLogFiles();
} | 3.68 |
hbase_CompactSplit_requestSplit | /*
* The User parameter allows the split thread to assume the correct user identity
*/
private synchronized void requestSplit(final Region r, byte[] midKey, User user) {
if (midKey == null) {
LOG.debug("Region " + r.getRegionInfo().getRegionNameAsString()
+ " not splittable because midkey=null");
return;
}
try {
this.splits.execute(new SplitRequest(r, midKey, this.server, user));
if (LOG.isDebugEnabled()) {
LOG.debug("Splitting " + r + ", " + this);
}
} catch (RejectedExecutionException ree) {
LOG.info("Could not execute split for " + r, ree);
}
} | 3.68 |
hbase_RootProcedureState_setRollback | /**
* Called by the ProcedureExecutor to mark rollback execution
*/
protected synchronized boolean setRollback() {
if (running == 0 && state == State.FAILED) {
state = State.ROLLINGBACK;
return true;
}
return false;
} | 3.68 |
shardingsphere-elasticjob_JobNodePath_getInstancesNodePath | /**
* Get instances node path.
*
* @return instances node path
*/
public String getInstancesNodePath() {
return String.format("/%s/%s", jobName, INSTANCES_NODE);
} | 3.68 |
dubbo_AnnotatedMethodParameterProcessor_buildDefaultValue | /**
* Build the default value
*
* @param parameterIndex the index of parameter
* @return the placeholder
*/
static String buildDefaultValue(int parameterIndex) {
return "{" + parameterIndex + "}";
} | 3.68 |
hadoop_RouterCacheAdmin_getRemoteMap | /**
* Returns a map with the CacheDirectiveInfo mapped to each location.
* @param path CacheDirectiveInfo to be mapped to the locations.
* @param locations the locations to map.
* @return map with CacheDirectiveInfo mapped to the locations.
*/
private Map<RemoteLocation, CacheDirectiveInfo> getRemoteMap(
CacheDirectiveInfo path, final List<RemoteLocation> locations) {
final Map<RemoteLocation, CacheDirectiveInfo> dstMap = new HashMap<>();
Iterator<RemoteLocation> iterator = locations.iterator();
while (iterator.hasNext()) {
dstMap.put(iterator.next(), path);
}
return dstMap;
} | 3.68 |
morf_AbstractSqlDialectTest_expectedUpdateWithLiteralValues | /**
* @return Expected SQL for {@link #testUpdateWithLiteralValues()}
*/
protected String expectedUpdateWithLiteralValues() {
String value = varCharCast("'Value'");
return String.format(
"UPDATE %s SET stringField = %s%s, blobFieldOne = %s, blobFieldTwo = %s " +
"WHERE ((field1 = %s) AND (field2 = %s) AND (field3 = %s) AND (field4 = %s) AND (field5 = %s) AND (field6 = %s) AND (field7 = %s%s) AND (field8 = %s%s) " +
"AND (field9 = %s) AND (field10 = %s))",
tableName(TEST_TABLE),
stringLiteralPrefix(),
value,
expectedBlobLiteral(NEW_BLOB_VALUE_HEX),
expectedBlobLiteral(NEW_BLOB_VALUE_HEX),
expectedBooleanLiteral(true),
expectedBooleanLiteral(false),
expectedBooleanLiteral(true),
expectedBooleanLiteral(false),
expectedDateLiteral(),
expectedDateLiteral(),
stringLiteralPrefix(),
value,
stringLiteralPrefix(),
value,
expectedBlobLiteral(OLD_BLOB_VALUE_HEX),
expectedBlobLiteral(OLD_BLOB_VALUE_HEX)
);
} | 3.68 |
framework_VProgressBar_setStylePrimaryName | /*
* (non-Javadoc)
*
* @see
* com.google.gwt.user.client.ui.UIObject#setStylePrimaryName(java.lang.
* String)
*/
@Override
public void setStylePrimaryName(String style) {
super.setStylePrimaryName(style);
indicator.setClassName(getStylePrimaryName() + "-indicator");
wrapper.setClassName(getStylePrimaryName() + "-wrapper");
} | 3.68 |
MagicPlugin_SpellResult_isStop | /**
* Determine if this result should stop processing or not.
*
* @return True if this result should stop processing, either
* temporarily (PENDING) or permanently (CANCELLED)
*/
public boolean isStop() {
return stop;
} | 3.68 |
flink_WatermarkOutputMultiplexer_getImmediateOutput | /**
* Returns an immediate {@link WatermarkOutput} for the given output ID.
*
* <p>>See {@link WatermarkOutputMultiplexer} for a description of immediate and deferred
* outputs.
*/
public WatermarkOutput getImmediateOutput(String outputId) {
final PartialWatermark outputState = watermarkPerOutputId.get(outputId);
Preconditions.checkArgument(
outputState != null, "no output registered under id %s", outputId);
return new ImmediateOutput(outputState);
} | 3.68 |
pulsar_JavaInstanceRunnable_close | /**
* NOTE: this method is be synchronized because it is potentially called by two different places
* one inside the run/finally clause and one inside the ThreadRuntime::stop.
*/
@Override
public synchronized void close() {
isInitialized = false;
if (stats != null) {
stats.close();
stats = null;
}
if (source != null) {
if (componentType == org.apache.pulsar.functions.proto.Function.FunctionDetails.ComponentType.SOURCE) {
Thread.currentThread().setContextClassLoader(componentClassLoader);
}
try {
source.close();
} catch (Throwable e) {
log.error("Failed to close source {}", instanceConfig.getFunctionDetails().getSource().getClassName(),
e);
} finally {
Thread.currentThread().setContextClassLoader(instanceClassLoader);
}
source = null;
}
if (sink != null) {
if (componentType == org.apache.pulsar.functions.proto.Function.FunctionDetails.ComponentType.SINK) {
Thread.currentThread().setContextClassLoader(componentClassLoader);
}
try {
sink.close();
} catch (Throwable e) {
log.error("Failed to close sink {}", instanceConfig.getFunctionDetails().getSource().getClassName(), e);
} finally {
Thread.currentThread().setContextClassLoader(instanceClassLoader);
}
sink = null;
}
if (null != javaInstance) {
try {
Thread.currentThread().setContextClassLoader(functionClassLoader);
javaInstance.close();
} finally {
Thread.currentThread().setContextClassLoader(instanceClassLoader);
javaInstance = null;
}
}
if (null != stateManager) {
stateManager.close();
}
if (null != stateStoreProvider) {
stateStoreProvider.close();
}
instanceCache = null;
if (logAppender != null) {
removeLogTopicAppender(LoggerContext.getContext());
removeLogTopicAppender(LoggerContext.getContext(false));
logAppender.stop();
logAppender = null;
}
} | 3.68 |
framework_RefreshRenderedCellsOnlyIfAttached_refreshRenderedCells | /*
* (non-Javadoc)
*
* @see com.vaadin.ui.Table#refreshRenderedCells()
*/
@Override
protected void refreshRenderedCells() {
boolean original = isRowCacheInvalidated();
super.refreshRenderedCells();
if (check) {
l1.setValue("original: " + original + ", now: "
+ isRowCacheInvalidated());
l2.setValue("should be: false & false");
}
} | 3.68 |
hbase_AccessController_checkCoveringPermission | /**
* Determine if cell ACLs covered by the operation grant access. This is expensive.
* @return false if cell ACLs failed to grant access, true otherwise
*/
private boolean checkCoveringPermission(User user, OpType request, RegionCoprocessorEnvironment e,
byte[] row, Map<byte[], ? extends Collection<?>> familyMap, long opTs, Action... actions)
throws IOException {
if (!cellFeaturesEnabled) {
return false;
}
long cellGrants = 0;
long latestCellTs = 0;
Get get = new Get(row);
// Only in case of Put/Delete op, consider TS within cell (if set for individual cells).
// When every cell, within a Mutation, can be linked with diff TS we can not rely on only one
// version. We have to get every cell version and check its TS against the TS asked for in
// Mutation and skip those Cells which is outside this Mutation TS.In case of Put, we have to
// consider only one such passing cell. In case of Delete we have to consider all the cell
// versions under this passing version. When Delete Mutation contains columns which are a
// version delete just consider only one version for those column cells.
boolean considerCellTs = (request == OpType.PUT || request == OpType.DELETE);
if (considerCellTs) {
get.readAllVersions();
} else {
get.readVersions(1);
}
boolean diffCellTsFromOpTs = false;
for (Map.Entry<byte[], ? extends Collection<?>> entry : familyMap.entrySet()) {
byte[] col = entry.getKey();
// TODO: HBASE-7114 could possibly unify the collection type in family
// maps so we would not need to do this
if (entry.getValue() instanceof Set) {
Set<byte[]> set = (Set<byte[]>) entry.getValue();
if (set == null || set.isEmpty()) {
get.addFamily(col);
} else {
for (byte[] qual : set) {
get.addColumn(col, qual);
}
}
} else if (entry.getValue() instanceof List) {
List<Cell> list = (List<Cell>) entry.getValue();
if (list == null || list.isEmpty()) {
get.addFamily(col);
} else {
// In case of family delete, a Cell will be added into the list with Qualifier as null.
for (Cell cell : list) {
if (
cell.getQualifierLength() == 0 && (cell.getTypeByte() == Type.DeleteFamily.getCode()
|| cell.getTypeByte() == Type.DeleteFamilyVersion.getCode())
) {
get.addFamily(col);
} else {
get.addColumn(col, CellUtil.cloneQualifier(cell));
}
if (considerCellTs) {
long cellTs = cell.getTimestamp();
latestCellTs = Math.max(latestCellTs, cellTs);
diffCellTsFromOpTs = diffCellTsFromOpTs || (opTs != cellTs);
}
}
}
} else if (entry.getValue() == null) {
get.addFamily(col);
} else {
throw new RuntimeException(
"Unhandled collection type " + entry.getValue().getClass().getName());
}
}
// We want to avoid looking into the future. So, if the cells of the
// operation specify a timestamp, or the operation itself specifies a
// timestamp, then we use the maximum ts found. Otherwise, we bound
// the Get to the current server time. We add 1 to the timerange since
// the upper bound of a timerange is exclusive yet we need to examine
// any cells found there inclusively.
long latestTs = Math.max(opTs, latestCellTs);
if (latestTs == 0 || latestTs == HConstants.LATEST_TIMESTAMP) {
latestTs = EnvironmentEdgeManager.currentTime();
}
get.setTimeRange(0, latestTs + 1);
// In case of Put operation we set to read all versions. This was done to consider the case
// where columns are added with TS other than the Mutation TS. But normally this wont be the
// case with Put. There no need to get all versions but get latest version only.
if (!diffCellTsFromOpTs && request == OpType.PUT) {
get.readVersions(1);
}
if (LOG.isTraceEnabled()) {
LOG.trace("Scanning for cells with " + get);
}
// This Map is identical to familyMap. The key is a BR rather than byte[].
// It will be easy to do gets over this new Map as we can create get keys over the Cell cf by
// new SimpleByteRange(cell.familyArray, cell.familyOffset, cell.familyLen)
Map<ByteRange, List<Cell>> familyMap1 = new HashMap<>();
for (Entry<byte[], ? extends Collection<?>> entry : familyMap.entrySet()) {
if (entry.getValue() instanceof List) {
familyMap1.put(new SimpleMutableByteRange(entry.getKey()), (List<Cell>) entry.getValue());
}
}
RegionScanner scanner = getRegion(e).getScanner(new Scan(get));
List<Cell> cells = Lists.newArrayList();
Cell prevCell = null;
ByteRange curFam = new SimpleMutableByteRange();
boolean curColAllVersions = (request == OpType.DELETE);
long curColCheckTs = opTs;
boolean foundColumn = false;
try {
boolean more = false;
ScannerContext scannerContext = ScannerContext.newBuilder().setBatchLimit(1).build();
do {
cells.clear();
// scan with limit as 1 to hold down memory use on wide rows
more = scanner.next(cells, scannerContext);
for (Cell cell : cells) {
if (LOG.isTraceEnabled()) {
LOG.trace("Found cell " + cell);
}
boolean colChange = prevCell == null || !CellUtil.matchingColumn(prevCell, cell);
if (colChange) foundColumn = false;
prevCell = cell;
if (!curColAllVersions && foundColumn) {
continue;
}
if (colChange && considerCellTs) {
curFam.set(cell.getFamilyArray(), cell.getFamilyOffset(), cell.getFamilyLength());
List<Cell> cols = familyMap1.get(curFam);
for (Cell col : cols) {
// null/empty qualifier is used to denote a Family delete. The TS and delete type
// associated with this is applicable for all columns within the family. That is
// why the below (col.getQualifierLength() == 0) check.
if (
(col.getQualifierLength() == 0 && request == OpType.DELETE)
|| CellUtil.matchingQualifier(cell, col)
) {
byte type = col.getTypeByte();
if (considerCellTs) {
curColCheckTs = col.getTimestamp();
}
// For a Delete op we pass allVersions as true. When a Delete Mutation contains
// a version delete for a column no need to check all the covering cells within
// that column. Check all versions when Type is DeleteColumn or DeleteFamily
// One version delete types are Delete/DeleteFamilyVersion
curColAllVersions = (KeyValue.Type.DeleteColumn.getCode() == type)
|| (KeyValue.Type.DeleteFamily.getCode() == type);
break;
}
}
}
if (cell.getTimestamp() > curColCheckTs) {
// Just ignore this cell. This is not a covering cell.
continue;
}
foundColumn = true;
for (Action action : actions) {
// Are there permissions for this user for the cell?
if (!getAuthManager().authorizeCell(user, getTableName(e), cell, action)) {
// We can stop if the cell ACL denies access
return false;
}
}
cellGrants++;
}
} while (more);
} catch (AccessDeniedException ex) {
throw ex;
} catch (IOException ex) {
LOG.error("Exception while getting cells to calculate covering permission", ex);
} finally {
scanner.close();
}
// We should not authorize unless we have found one or more cell ACLs that
// grant access. This code is used to check for additional permissions
// after no table or CF grants are found.
return cellGrants > 0;
} | 3.68 |
framework_VSlider_setConnection | /**
* Sets the current client-side communication engine.
*
* @param client
* the application connection that manages this component
* @deprecated the updated field is no longer used by the framework
*/
@Deprecated
public void setConnection(ApplicationConnection client) {
this.client = client;
} | 3.68 |
hadoop_FSDirAppendOp_verifyQuotaForUCBlock | /**
* Verify quota when using the preferred block size for UC block. This is
* usually used by append and truncate.
*
* @throws QuotaExceededException when violating the storage quota
* @return expected quota usage update. null means no change or no need to
* update quota usage later
*/
private static QuotaCounts verifyQuotaForUCBlock(FSNamesystem fsn,
INodeFile file, INodesInPath iip) throws QuotaExceededException {
FSDirectory fsd = fsn.getFSDirectory();
if (!fsn.isImageLoaded() || fsd.shouldSkipQuotaChecks()) {
// Do not check quota if editlog is still being processed
return null;
}
if (file.getLastBlock() != null) {
final QuotaCounts delta = computeQuotaDeltaForUCBlock(fsn, file);
fsd.readLock();
try {
FSDirectory.verifyQuota(iip, iip.length() - 1, delta, null);
return delta;
} finally {
fsd.readUnlock();
}
}
return null;
} | 3.68 |
hbase_Procedure_isSuccess | /** Returns true if the procedure is finished successfully. */
public synchronized boolean isSuccess() {
return state == ProcedureState.SUCCESS && !hasException();
} | 3.68 |
flink_TypeExtractor_createHadoopWritableTypeInfo | // visible for testing
public static <T> TypeInformation<T> createHadoopWritableTypeInfo(Class<T> clazz) {
checkNotNull(clazz);
Class<?> typeInfoClass;
try {
typeInfoClass =
Class.forName(
HADOOP_WRITABLE_TYPEINFO_CLASS,
false,
Thread.currentThread().getContextClassLoader());
} catch (ClassNotFoundException e) {
throw new RuntimeException(
"Could not load the TypeInformation for the class '"
+ HADOOP_WRITABLE_CLASS
+ "'. You may be missing the 'flink-hadoop-compatibility' dependency.");
}
try {
Constructor<?> constr = typeInfoClass.getConstructor(Class.class);
@SuppressWarnings("unchecked")
TypeInformation<T> typeInfo = (TypeInformation<T>) constr.newInstance(clazz);
return typeInfo;
} catch (NoSuchMethodException | IllegalAccessException | InstantiationException e) {
throw new RuntimeException(
"Incompatible versions of the Hadoop Compatibility classes found.");
} catch (InvocationTargetException e) {
throw new RuntimeException(
"Cannot create Hadoop WritableTypeInfo.", e.getTargetException());
}
} | 3.68 |
hudi_OptionsInference_setupSourceTasks | /**
* Sets up the default source task parallelism if it is not specified.
*
* @param conf The configuration
* @param envTasks The parallelism of the execution env
*/
public static void setupSourceTasks(Configuration conf, int envTasks) {
if (!conf.contains(FlinkOptions.READ_TASKS)) {
conf.setInteger(FlinkOptions.READ_TASKS, envTasks);
}
} | 3.68 |
framework_VAbstractCalendarPanel_doRenderCalendar | /**
* Performs the rendering required by the {@link #renderCalendar(boolean)}.
* Subclasses may override this method to provide a custom implementation
* avoiding {@link #renderCalendar(boolean)} override. The latter method
* contains a common logic which should not be overridden.
*
* @param updateDate
* The value false prevents setting the selected date of the
* calendar based on focusedDate. That can be used when only the
* resolution of the calendar is changed and no date has been
* selected.
*/
@SuppressWarnings("deprecation")
protected void doRenderCalendar(boolean updateDate) {
super.setStylePrimaryName(
getDateField().getStylePrimaryName() + "-calendarpanel");
if (focusedDate == null) {
Date date = getDate();
if (date == null) {
date = new Date();
}
// focusedDate must have zero hours, mins, secs, millisecs
focusedDate = new FocusedDate(date.getYear(), date.getMonth(),
date.getDate());
displayedMonth = new FocusedDate(date.getYear(), date.getMonth(),
1);
}
if (updateDate && !isDay(getResolution())
&& focusChangeListener != null) {
focusChangeListener.focusChanged(new Date(focusedDate.getTime()));
}
final boolean needsMonth = !isYear(getResolution());
boolean needsBody = isBelowMonth(resolution);
buildCalendarHeader(needsMonth, needsBody);
clearCalendarBody(!needsBody);
if (needsBody) {
buildCalendarBody();
}
} | 3.68 |
hbase_CommonFSUtils_isHDFS | /**
* Return true if this is a filesystem whose scheme is 'hdfs'.
* @throws IOException from underlying FileSystem
*/
public static boolean isHDFS(final Configuration conf) throws IOException {
FileSystem fs = FileSystem.get(conf);
String scheme = fs.getUri().getScheme();
return scheme.equalsIgnoreCase("hdfs");
} | 3.68 |
hbase_ScannerContext_canEnforceTimeLimitFromScope | /** Returns true when the limit can be enforced from the scope of the checker */
boolean canEnforceTimeLimitFromScope(LimitScope checkerScope) {
return this.timeScope.canEnforceLimitFromScope(checkerScope);
} | 3.68 |
hbase_RegionServerAccounting_isAboveLowWaterMark | /**
* Return the FlushType if we're above the low watermark
* @return the FlushType
*/
public FlushType isAboveLowWaterMark() {
// for onheap memstore we check if the global memstore size and the
// global heap overhead is greater than the global memstore lower mark limit
if (memType == MemoryType.HEAP) {
if (getGlobalMemStoreHeapSize() >= globalMemStoreLimitLowMark) {
return FlushType.ABOVE_ONHEAP_LOWER_MARK;
}
} else {
if (getGlobalMemStoreOffHeapSize() >= globalMemStoreLimitLowMark) {
// Indicates that the offheap memstore's size is greater than the global memstore
// lower limit
return FlushType.ABOVE_OFFHEAP_LOWER_MARK;
} else if (getGlobalMemStoreHeapSize() >= globalOnHeapMemstoreLimitLowMark) {
// Indicates that the offheap memstore's heap overhead is greater than the global memstore
// onheap lower limit
return FlushType.ABOVE_ONHEAP_LOWER_MARK;
}
}
return FlushType.NORMAL;
} | 3.68 |
flink_BufferManager_requestFloatingBuffers | /**
* Requests floating buffers from the buffer pool based on the given required amount, and
* returns the actual requested amount. If the required amount is not fully satisfied, it will
* register as a listener.
*/
int requestFloatingBuffers(int numRequired) {
int numRequestedBuffers = 0;
synchronized (bufferQueue) {
// Similar to notifyBufferAvailable(), make sure that we never add a buffer after
// channel
// released all buffers via releaseAllResources().
if (inputChannel.isReleased()) {
return numRequestedBuffers;
}
numRequiredBuffers = numRequired;
numRequestedBuffers = tryRequestBuffers();
}
return numRequestedBuffers;
} | 3.68 |
hadoop_SelectBinding_executeSelect | /**
* Execute the select request.
* @param readContext read context
* @param objectAttributes object attributes from a HEAD request
* @param builderOptions the options which came in from the openFile builder.
* @param request the built up select request.
* @return a SelectInputStream
* @throws IOException failure
* @throws PathIOException source path is a directory.
*/
@Retries.RetryTranslated
private SelectInputStream executeSelect(
final S3AReadOpContext readContext,
final S3ObjectAttributes objectAttributes,
final Configuration builderOptions,
final SelectObjectContentRequest request) throws IOException {
Path path = readContext.getPath();
if (readContext.getDstFileStatus().isDirectory()) {
throw new PathIOException(path.toString(),
"Can't select " + path
+ " because it is a directory");
}
boolean sqlInErrors = builderOptions.getBoolean(SELECT_ERRORS_INCLUDE_SQL,
errorsIncludeSql);
String expression = request.expression();
final String errorText = sqlInErrors ? expression : "Select";
if (sqlInErrors) {
LOG.info("Issuing SQL request {}", expression);
}
SelectEventStreamPublisher selectPublisher = operations.select(path, request, errorText);
return new SelectInputStream(readContext,
objectAttributes, selectPublisher);
} | 3.68 |
hbase_ColumnFamilyDescriptorBuilder_setMobEnabled | /**
* Enables the mob for the family.
* @param isMobEnabled Whether to enable the mob for the family.
* @return this (for chained invocation)
*/
public ModifyableColumnFamilyDescriptor setMobEnabled(boolean isMobEnabled) {
return setValue(IS_MOB_BYTES, String.valueOf(isMobEnabled));
} | 3.68 |
hbase_ZKProcedureUtil_isInProcedurePath | /**
* Is this a procedure related znode path? TODO: this is not strict, can return true if had name
* just starts with same prefix but is different zdir.
* @return true if starts with baseZnode
*/
boolean isInProcedurePath(String path) {
return path.startsWith(baseZNode);
} | 3.68 |
dubbo_ConfigurableMetadataServiceExporter_getExportedURLs | // for unit test
public List<URL> getExportedURLs() {
return serviceConfig != null ? serviceConfig.getExportedUrls() : emptyList();
} | 3.68 |
pulsar_AuthenticationDataProvider_getTlsPrivateKeyFilePath | /**
*
* @return a private key file path
*/
default String getTlsPrivateKeyFilePath() {
return null;
} | 3.68 |
hadoop_DistributedCache_createAllSymlink | /**
* This method create symlinks for all files in a given dir in another
* directory. Currently symlinks cannot be disabled. This is a NO-OP.
*
* @param conf the configuration
* @param jobCacheDir the target directory for creating symlinks
* @param workDir the directory in which the symlinks are created
* @throws IOException
* @deprecated Internal to MapReduce framework. Use DistributedCacheManager
* instead.
*/
@Deprecated
public static void createAllSymlink(
Configuration conf, File jobCacheDir, File workDir)
throws IOException{
// Do nothing
} | 3.68 |
flink_CopyOnWriteSkipListStateMap_isNodeRemoved | /** Whether the node has been logically removed. */
private boolean isNodeRemoved(long node) {
return SkipListUtils.isNodeRemoved(node, spaceAllocator);
} | 3.68 |
framework_ComboBoxConnector_navigateItemAfterPageChange | /*
* This method navigates to the proper item in the combobox page. This
* should be executed after setSuggestions() method which is called from
* vFilterSelect.showSuggestions(). ShowSuggestions() method builds the page
* content. As far as setSuggestions() method is called as deferred,
* navigateItemAfterPageChange method should be also be called as deferred.
* #11333
*/
private void navigateItemAfterPageChange() {
if (getWidget().selectPopupItemWhenResponseIsReceived == VFilterSelect.Select.LAST) {
getWidget().suggestionPopup.selectLastItem();
} else {
getWidget().suggestionPopup.selectFirstItem();
}
// If you're in between 2 requests both changing the page back and
// forth, you don't want this here, instead you need it before any
// other request.
// getWidget().selectPopupItemWhenResponseIsReceived =
// VFilterSelect.Select.NONE; // reset
} | 3.68 |
flink_RunLengthDecoder_readDictionaryIds | /**
* Decoding for dictionary ids. The IDs are populated into `values` and the nullability is
* populated into `nulls`.
*/
void readDictionaryIds(
int total,
WritableIntVector values,
WritableColumnVector nulls,
int rowId,
int level,
RunLengthDecoder data) {
int left = total;
while (left > 0) {
if (this.currentCount == 0) {
this.readNextGroup();
}
int n = Math.min(left, this.currentCount);
switch (mode) {
case RLE:
if (currentValue == level) {
data.readDictionaryIdData(n, values, rowId);
} else {
nulls.setNulls(rowId, n);
}
break;
case PACKED:
for (int i = 0; i < n; ++i) {
if (currentBuffer[currentBufferIdx++] == level) {
values.setInt(rowId + i, data.readInteger());
} else {
nulls.setNullAt(rowId + i);
}
}
break;
}
rowId += n;
left -= n;
currentCount -= n;
}
} | 3.68 |
hadoop_StripedReconstructor_initDecoderIfNecessary | // Initialize decoder
protected void initDecoderIfNecessary() {
if (decoder == null) {
decoder = CodecUtil.createRawDecoder(conf, ecPolicy.getCodecName(),
coderOptions);
}
} | 3.68 |
pulsar_WindowLifecycleListener_onActivation | /**
* Called on activation of the window due to the {@link TriggerPolicy}.
*
* @param events the list of current events in the window.
* @param newEvents the newly added events since last activation.
* @param expired the expired events since last activation.
* @param referenceTime the reference (event or processing) time that resulted in activation
*/
default void onActivation(List<T> events, List<T> newEvents, List<T> expired, Long
referenceTime) {
throw new UnsupportedOperationException("Not implemented");
} | 3.68 |
morf_ExceptSetOperator_deepCopy | /**
* @see org.alfasoftware.morf.util.DeepCopyableWithTransformation#deepCopy(org.alfasoftware.morf.util.DeepCopyTransformation)
*/
@Override
public Builder<SetOperator> deepCopy(DeepCopyTransformation transformer) {
return TempTransitionalBuilderWrapper.<SetOperator>wrapper(new ExceptSetOperator(transformer.deepCopy(getSelectStatement())));
} | 3.68 |
flink_Tuple14_toString | /**
* Creates a string representation of the tuple in the form (f0, f1, f2, f3, f4, f5, f6, f7, f8,
* f9, f10, f11, f12, f13), where the individual fields are the value returned by calling {@link
* Object#toString} on that field.
*
* @return The string representation of the tuple.
*/
@Override
public String toString() {
return "("
+ StringUtils.arrayAwareToString(this.f0)
+ ","
+ StringUtils.arrayAwareToString(this.f1)
+ ","
+ StringUtils.arrayAwareToString(this.f2)
+ ","
+ StringUtils.arrayAwareToString(this.f3)
+ ","
+ StringUtils.arrayAwareToString(this.f4)
+ ","
+ StringUtils.arrayAwareToString(this.f5)
+ ","
+ StringUtils.arrayAwareToString(this.f6)
+ ","
+ StringUtils.arrayAwareToString(this.f7)
+ ","
+ StringUtils.arrayAwareToString(this.f8)
+ ","
+ StringUtils.arrayAwareToString(this.f9)
+ ","
+ StringUtils.arrayAwareToString(this.f10)
+ ","
+ StringUtils.arrayAwareToString(this.f11)
+ ","
+ StringUtils.arrayAwareToString(this.f12)
+ ","
+ StringUtils.arrayAwareToString(this.f13)
+ ")";
} | 3.68 |
querydsl_ExpressionUtils_allOf | /**
* Create the intersection of the given arguments
*
* @param exprs predicates
* @return intersection
*/
@Nullable
public static Predicate allOf(Predicate... exprs) {
Predicate rv = null;
for (Predicate b : exprs) {
if (b != null) {
rv = rv == null ? b : ExpressionUtils.and(rv,b);
}
}
return rv;
} | 3.68 |
hbase_HFileBlock_cacheNextBlockHeader | /**
* Save away the next blocks header in atomic reference.
* @see #getCachedHeader(long)
* @see PrefetchedHeader
*/
private void cacheNextBlockHeader(final long offset, ByteBuff onDiskBlock,
int onDiskSizeWithHeader, int headerLength) {
PrefetchedHeader ph = new PrefetchedHeader();
ph.offset = offset;
onDiskBlock.get(onDiskSizeWithHeader, ph.header, 0, headerLength);
this.prefetchedHeader.set(ph);
} | 3.68 |
hbase_BulkLoadHFilesTool_visitBulkHFiles | /**
* Iterate over the bulkDir hfiles. Skip reference, HFileLink, files starting with "_". Check and
* skip non-valid hfiles by default, or skip this validation by setting {@link #VALIDATE_HFILES}
* to false.
*/
private static <TFamily> void visitBulkHFiles(FileSystem fs, Path bulkDir,
BulkHFileVisitor<TFamily> visitor, boolean validateHFile) throws IOException {
FileStatus[] familyDirStatuses = fs.listStatus(bulkDir);
for (FileStatus familyStat : familyDirStatuses) {
if (!familyStat.isDirectory()) {
LOG.warn("Skipping non-directory " + familyStat.getPath());
continue;
}
Path familyDir = familyStat.getPath();
byte[] familyName = Bytes.toBytes(familyDir.getName());
// Skip invalid family
try {
ColumnFamilyDescriptorBuilder.isLegalColumnFamilyName(familyName);
} catch (IllegalArgumentException e) {
LOG.warn("Skipping invalid " + familyStat.getPath());
continue;
}
TFamily family = visitor.bulkFamily(familyName);
FileStatus[] hfileStatuses = fs.listStatus(familyDir);
for (FileStatus hfileStatus : hfileStatuses) {
if (!fs.isFile(hfileStatus.getPath())) {
LOG.warn("Skipping non-file " + hfileStatus);
continue;
}
Path hfile = hfileStatus.getPath();
// Skip "_", reference, HFileLink
String fileName = hfile.getName();
if (fileName.startsWith("_")) {
continue;
}
if (StoreFileInfo.isReference(fileName)) {
LOG.warn("Skipping reference " + fileName);
continue;
}
if (HFileLink.isHFileLink(fileName)) {
LOG.warn("Skipping HFileLink " + fileName);
continue;
}
// Validate HFile Format if needed
if (validateHFile) {
try {
if (!HFile.isHFileFormat(fs, hfile)) {
LOG.warn("the file " + hfile + " doesn't seems to be an hfile. skipping");
continue;
}
} catch (FileNotFoundException e) {
LOG.warn("the file " + hfile + " was removed");
continue;
}
}
visitor.bulkHFile(family, hfileStatus);
}
}
} | 3.68 |
hbase_ConnectionRegistryFactory_getRegistry | /** Returns The connection registry implementation to use. */
static ConnectionRegistry getRegistry(Configuration conf) {
Class<? extends ConnectionRegistry> clazz =
conf.getClass(CLIENT_CONNECTION_REGISTRY_IMPL_CONF_KEY, RpcConnectionRegistry.class,
ConnectionRegistry.class);
return ReflectionUtils.newInstance(clazz, conf);
} | 3.68 |
pulsar_AbstractMetadataStore_isValidPath | /**
* valid path in metadata store should be
* 1. not blank
* 2. starts with '/'
* 3. not ends with '/', except root path "/"
*/
static boolean isValidPath(String path) {
return StringUtils.equals(path, "/")
|| StringUtils.isNotBlank(path)
&& path.startsWith("/")
&& !path.endsWith("/");
} | 3.68 |
zxing_QRCode_getMode | /**
* @return the mode. Not relevant if {@link com.google.zxing.EncodeHintType#QR_COMPACT} is selected.
*/
public Mode getMode() {
return mode;
} | 3.68 |
flink_MemoryStateBackend_getMaxStateSize | /**
* Gets the maximum size that an individual state can have, as configured in the constructor (by
* default {@value #DEFAULT_MAX_STATE_SIZE}).
*
* @return The maximum size that an individual state can have
*/
public int getMaxStateSize() {
return maxStateSize;
} | 3.68 |
framework_VScrollTable_getCellExtraWidth | /**
* Method to return the space used for cell paddings + border.
*/
private int getCellExtraWidth() {
if (cellExtraWidth < 0) {
detectExtrawidth();
}
return cellExtraWidth;
} | 3.68 |
framework_StringToBooleanConverter_convertToPresentation | /*
* (non-Javadoc)
*
* @see
* com.vaadin.data.util.converter.Converter#convertToPresentation(java.lang
* .Object, java.lang.Class, java.util.Locale)
*/
@Override
public String convertToPresentation(Boolean value,
Class<? extends String> targetType, Locale locale)
throws ConversionException {
if (value == null) {
return null;
}
if (value) {
return getTrueString(locale);
} else {
return getFalseString(locale);
}
} | 3.68 |
hbase_HRegion_completeMiniBatchOperations | /**
* This method completes mini-batch operations by calling postBatchMutate() CP hook (if
* required) and completing mvcc.
*/
public void completeMiniBatchOperations(
final MiniBatchOperationInProgress<Mutation> miniBatchOp, final WriteEntry writeEntry)
throws IOException {
if (writeEntry != null) {
region.mvcc.completeAndWait(writeEntry);
}
} | 3.68 |
framework_LayoutManager_setNeedsLayout | /**
* Marks that a ManagedLayout should be layouted in the next layout phase
* even if none of the elements managed by the layout have been resized.
* <p>
* This method should not be invoked during a layout phase since it only
* controls what will happen in the beginning of the next phase. If you want
* to explicitly cause some layout to be considered in an ongoing layout
* phase, you should use {@link #setNeedsMeasure(ComponentConnector)}
* instead.
*
* @param layout
* the managed layout that should be layouted
*/
public final void setNeedsLayout(ManagedLayout layout) {
setNeedsHorizontalLayout(layout);
setNeedsVerticalLayout(layout);
} | 3.68 |
morf_ParallelQueryHint_hashCode | /**
* @see java.lang.Object#hashCode()
*/
@Override
public int hashCode() {
return new HashCodeBuilder().append(degreeOfParallelism).build();
} | 3.68 |
flink_TestcontainersSettings_environmentVariable | /**
* Sets an environment variable and returns a reference to this Builder enabling method
* chaining.
*
* @param name The name of the environment variable.
* @param value The value of the environment variable.
* @return A reference to this Builder.
*/
public Builder environmentVariable(String name, String value) {
this.envVars.put(name, value);
return this;
} | 3.68 |
morf_TableDataHomology_getDifferences | /**
* @return The list of differences detected by the comparison.
*/
public List<String> getDifferences() {
return differences;
} | 3.68 |
hudi_HoodieStreamer_onInitializingWriteClient | /**
* Callback to initialize write client and start compaction service if required.
*
* @param writeClient HoodieWriteClient
* @return
*/
protected Boolean onInitializingWriteClient(SparkRDDWriteClient writeClient) {
if (cfg.isAsyncCompactionEnabled()) {
if (asyncCompactService.isPresent()) {
// Update the write client used by Async Compactor.
asyncCompactService.get().updateWriteClient(writeClient);
} else {
asyncCompactService = Option.ofNullable(new SparkAsyncCompactService(hoodieSparkContext, writeClient));
// Enqueue existing pending compactions first
HoodieTableMetaClient meta =
HoodieTableMetaClient.builder().setConf(new Configuration(hoodieSparkContext.hadoopConfiguration())).setBasePath(cfg.targetBasePath).setLoadActiveTimelineOnLoad(true).build();
List<HoodieInstant> pending = CompactionUtils.getPendingCompactionInstantTimes(meta);
pending.forEach(hoodieInstant -> asyncCompactService.get().enqueuePendingAsyncServiceInstant(hoodieInstant));
asyncCompactService.get().start(error -> true);
try {
asyncCompactService.get().waitTillPendingAsyncServiceInstantsReducesTo(cfg.maxPendingCompactions);
if (asyncCompactService.get().hasError()) {
throw new HoodieException("Async compaction failed during write client initialization.");
}
} catch (InterruptedException ie) {
throw new HoodieException(ie);
}
}
}
// start async clustering if required
if (HoodieClusteringConfig.from(props).isAsyncClusteringEnabled()) {
if (asyncClusteringService.isPresent()) {
asyncClusteringService.get().updateWriteClient(writeClient);
} else {
asyncClusteringService = Option.ofNullable(new SparkAsyncClusteringService(hoodieSparkContext, writeClient));
HoodieTableMetaClient meta = HoodieTableMetaClient.builder()
.setConf(new Configuration(hoodieSparkContext.hadoopConfiguration()))
.setBasePath(cfg.targetBasePath)
.setLoadActiveTimelineOnLoad(true).build();
List<HoodieInstant> pending = ClusteringUtils.getPendingClusteringInstantTimes(meta);
LOG.info(format("Found %d pending clustering instants ", pending.size()));
pending.forEach(hoodieInstant -> asyncClusteringService.get().enqueuePendingAsyncServiceInstant(hoodieInstant));
asyncClusteringService.get().start(error -> true);
try {
asyncClusteringService.get().waitTillPendingAsyncServiceInstantsReducesTo(cfg.maxPendingClustering);
if (asyncClusteringService.get().hasError()) {
throw new HoodieException("Async clustering failed during write client initialization.");
}
} catch (InterruptedException e) {
throw new HoodieException(e);
}
}
}
return true;
} | 3.68 |
graphhopper_DirectionResolver_resolveDirections | /**
* @param node the node for which the incoming/outgoing edges should be determined
* @param location the location next to the road relative to which the 'left' and 'right' side edges should be determined
* @see DirectionResolver
*/
public DirectionResolverResult resolveDirections(int node, GHPoint location) {
AdjacentEdges adjacentEdges = calcAdjEdges(node);
if (adjacentEdges.numStandardEdges == 0) {
return DirectionResolverResult.impossible();
}
if (!adjacentEdges.hasInEdges() || !adjacentEdges.hasOutEdges()) {
return DirectionResolverResult.impossible();
}
if (adjacentEdges.nextPoints.isEmpty()) {
return DirectionResolverResult.impossible();
}
if (adjacentEdges.numZeroDistanceEdges > 0) {
// if we snap to a tower node that is adjacent to a barrier edge we apply no restrictions. this is the
// easiest thing to do, but maybe we need a more sophisticated handling of this case in the future.
return DirectionResolverResult.unrestricted();
}
Point snappedPoint = new Point(nodeAccess.getLat(node), nodeAccess.getLon(node));
if (adjacentEdges.nextPoints.contains(snappedPoint)) {
// this might happen if a pillar node of an adjacent edge has the same coordinates as the snapped point,
// but this should be prevented by the map import already
throw new IllegalStateException("Pillar node of adjacent edge matches snapped point, this should not happen");
}
// we can classify the different cases by the number of different next points!
if (adjacentEdges.nextPoints.size() == 1) {
Point neighbor = adjacentEdges.nextPoints.iterator().next();
List<Edge> inEdges = adjacentEdges.getInEdges(neighbor);
List<Edge> outEdges = adjacentEdges.getOutEdges(neighbor);
assert inEdges.size() > 0 && outEdges.size() > 0 : "if there is only one next point there has to be an in edge and an out edge connected with it";
// if there are multiple edges going to the (single) next point we cannot return a reasonable result and
// leave this point unrestricted
if (inEdges.size() > 1 || outEdges.size() > 1) {
return DirectionResolverResult.unrestricted();
}
// since there is only one next point we know this is the end of a dead end street so the right and left
// side are treated equally and for both cases we use the only possible edge ids.
return DirectionResolverResult.restricted(inEdges.get(0).edgeId, outEdges.get(0).edgeId, inEdges.get(0).edgeId, outEdges.get(0).edgeId);
} else if (adjacentEdges.nextPoints.size() == 2) {
Iterator<Point> iter = adjacentEdges.nextPoints.iterator();
Point p1 = iter.next();
Point p2 = iter.next();
List<Edge> in1 = adjacentEdges.getInEdges(p1);
List<Edge> in2 = adjacentEdges.getInEdges(p2);
List<Edge> out1 = adjacentEdges.getOutEdges(p1);
List<Edge> out2 = adjacentEdges.getOutEdges(p2);
if (in1.size() > 1 || in2.size() > 1 || out1.size() > 1 || out2.size() > 1) {
return DirectionResolverResult.unrestricted();
}
if (in1.size() + in2.size() == 0 || out1.size() + out2.size() == 0) {
throw new IllegalStateException("there has to be at least one in and one out edge when there are two next points");
}
if (in1.size() + out1.size() == 0 || in2.size() + out2.size() == 0) {
throw new IllegalStateException("there has to be at least one in or one out edge for each of the two next points");
}
Point locationPoint = new Point(location.lat, location.lon);
if (in1.isEmpty() || out2.isEmpty()) {
return resolveDirections(snappedPoint, locationPoint, in2.get(0), out1.get(0));
} else if (in2.isEmpty() || out1.isEmpty()) {
return resolveDirections(snappedPoint, locationPoint, in1.get(0), out2.get(0));
} else {
return resolveDirections(snappedPoint, locationPoint, in1.get(0), out2.get(0), in2.get(0).edgeId, out1.get(0).edgeId);
}
} else {
// we snapped to a junction, in this case we do not apply restrictions
// note: TOWER and PILLAR mostly occur when location is near the end of a dead end street or a sharp
// curve, like switchbacks in the mountains of Andorra
return DirectionResolverResult.unrestricted();
}
} | 3.68 |
framework_Table_setCacheRate | /**
* This method adjusts a possible caching mechanism of table implementation.
*
* <p>
* Table component may fetch and render some rows outside visible area. With
* complex tables (for example containing layouts and components), the
* client side may become unresponsive. Setting the value lower, UI will
* become more responsive. With higher values scrolling in client will hit
* server less frequently.
*
* <p>
* The amount of cached rows will be cacheRate multiplied with pageLength (
* {@link #setPageLength(int)} both below and above visible area..
*
* @param cacheRate
* a value over 0 (fastest rendering time). Higher value will
* cache more rows on server (smoother scrolling). Default value
* is 2.
*/
public void setCacheRate(double cacheRate) {
if (cacheRate < 0) {
throw new IllegalArgumentException(
"cacheRate cannot be less than zero");
}
if (this.cacheRate != cacheRate) {
this.cacheRate = cacheRate;
markAsDirty();
}
} | 3.68 |
framework_Form_focus | /**
* Focuses the first field in the form.
*
* @see Component.Focusable#focus()
*/
@Override
public void focus() {
final Field<?> f = getFirstFocusableField();
if (f != null) {
f.focus();
}
} | 3.68 |
hudi_PreferWriterConflictResolutionStrategy_getCandidateInstants | /**
* For tableservices like replacecommit and compaction commits this method also returns ingestion inflight commits.
*/
@Override
public Stream<HoodieInstant> getCandidateInstants(HoodieTableMetaClient metaClient, HoodieInstant currentInstant,
Option<HoodieInstant> lastSuccessfulInstant) {
HoodieActiveTimeline activeTimeline = metaClient.reloadActiveTimeline();
if ((REPLACE_COMMIT_ACTION.equals(currentInstant.getAction())
&& ClusteringUtils.isClusteringCommit(metaClient, currentInstant))
|| COMPACTION_ACTION.equals(currentInstant.getAction())) {
return getCandidateInstantsForTableServicesCommits(activeTimeline, currentInstant);
} else {
return getCandidateInstantsForNonTableServicesCommits(activeTimeline, currentInstant);
}
} | 3.68 |
pulsar_AuthenticationProviderOpenID_validateIssuers | /**
* Validate the configured allow list of allowedIssuers. The allowedIssuers set must be nonempty in order for
* the plugin to authenticate any token. Thus, it fails initialization if the configuration is
* missing. Each issuer URL should use the HTTPS scheme. The plugin fails initialization if any
* issuer url is insecure, unless requireHttps is false.
* @param allowedIssuers - issuers to validate
* @param requireHttps - whether to require https for issuers.
* @param allowEmptyIssuers - whether to allow empty issuers. This setting only makes sense when kubernetes is used
* as a fallback issuer.
* @return the validated issuers
* @throws IllegalArgumentException if the allowedIssuers is empty, or contains insecure issuers when required
*/
private Set<String> validateIssuers(Set<String> allowedIssuers, boolean requireHttps, boolean allowEmptyIssuers) {
if (allowedIssuers == null || (allowedIssuers.isEmpty() && !allowEmptyIssuers)) {
throw new IllegalArgumentException("Missing configured value for: " + ALLOWED_TOKEN_ISSUERS);
}
for (String issuer : allowedIssuers) {
if (!issuer.toLowerCase().startsWith("https://")) {
log.warn("Allowed issuer is not using https scheme: {}", issuer);
if (requireHttps) {
throw new IllegalArgumentException("Issuer URL does not use https, but must: " + issuer);
}
}
}
return allowedIssuers;
} | 3.68 |
flink_JoinOperator_projectTuple22 | /**
* Projects a pair of joined elements to a {@link Tuple} with the previously selected
* fields. Requires the classes of the fields of the resulting tuples.
*
* @return The projected data set.
* @see Tuple
* @see DataSet
*/
public <
T0,
T1,
T2,
T3,
T4,
T5,
T6,
T7,
T8,
T9,
T10,
T11,
T12,
T13,
T14,
T15,
T16,
T17,
T18,
T19,
T20,
T21>
ProjectJoin<
I1,
I2,
Tuple22<
T0,
T1,
T2,
T3,
T4,
T5,
T6,
T7,
T8,
T9,
T10,
T11,
T12,
T13,
T14,
T15,
T16,
T17,
T18,
T19,
T20,
T21>>
projectTuple22() {
TypeInformation<?>[] fTypes = extractFieldTypes(fieldIndexes);
TupleTypeInfo<
Tuple22<
T0,
T1,
T2,
T3,
T4,
T5,
T6,
T7,
T8,
T9,
T10,
T11,
T12,
T13,
T14,
T15,
T16,
T17,
T18,
T19,
T20,
T21>>
tType =
new TupleTypeInfo<
Tuple22<
T0,
T1,
T2,
T3,
T4,
T5,
T6,
T7,
T8,
T9,
T10,
T11,
T12,
T13,
T14,
T15,
T16,
T17,
T18,
T19,
T20,
T21>>(fTypes);
return new ProjectJoin<
I1,
I2,
Tuple22<
T0,
T1,
T2,
T3,
T4,
T5,
T6,
T7,
T8,
T9,
T10,
T11,
T12,
T13,
T14,
T15,
T16,
T17,
T18,
T19,
T20,
T21>>(
this.ds1,
this.ds2,
this.keys1,
this.keys2,
this.hint,
this.fieldIndexes,
this.isFieldInFirst,
tType,
this);
} | 3.68 |
framework_Alignment_getBitMask | /**
* Returns a bitmask representation of the alignment value. Used internally
* by terminal.
*
* @return the bitmask representation of the alignment value
*/
public int getBitMask() {
return bitMask;
} | 3.68 |
flink_DeweyNumber_addStage | /**
* Creates a new dewey number from this such that a 0 is appended as new last digit.
*
* @return A new dewey number which contains this as a prefix and has 0 as last digit
*/
public DeweyNumber addStage() {
int[] newDeweyNumber = Arrays.copyOf(deweyNumber, deweyNumber.length + 1);
return new DeweyNumber(newDeweyNumber);
} | 3.68 |
hadoop_WorkReport_getException | /**
* @return Exception thrown while processing work.
*/
public Exception getException() {
return exception;
} | 3.68 |
pulsar_ManagedCursorImpl_isBkErrorNotRecoverable | /**
* return BK error codes that are considered not likely to be recoverable.
*/
public static boolean isBkErrorNotRecoverable(int rc) {
switch (rc) {
case Code.NoSuchLedgerExistsException:
case Code.NoSuchLedgerExistsOnMetadataServerException:
case Code.ReadException:
case Code.LedgerRecoveryException:
case Code.NoSuchEntryException:
return true;
default:
return false;
}
} | 3.68 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.