name
stringlengths 12
178
| code_snippet
stringlengths 8
36.5k
| score
float64 3.26
3.68
|
---|---|---|
hadoop_DataJoinJob_main | /**
* @param args
*/
public static void main(String[] args) {
boolean success;
if (args.length < 8 || args.length > 10) {
System.out.println("usage: DataJoinJob " + "inputdirs outputdir map_input_file_format "
+ "numofParts " + "mapper_class " + "reducer_class "
+ "map_output_value_class "
+ "output_value_class [maxNumOfValuesPerGroup [descriptionOfJob]]]");
System.exit(-1);
}
try {
JobConf job = DataJoinJob.createDataJoinJob(args);
success = DataJoinJob.runJob(job);
if (!success) {
System.out.println("Job failed");
}
} catch (IOException ioe) {
ioe.printStackTrace();
}
} | 3.68 |
hadoop_RoleModel_validate | /**
* Validation includes validating all statements.
*/
@Override
public void validate() {
requireNonNull(statement, "Statement");
checkState(VERSION.equals(version), "Invalid Version: %s", version);
statement.stream().forEach((a) -> a.validate());
} | 3.68 |
flink_BinaryRowDataSerializer_checkSkipReadForFixLengthPart | /**
* We need skip bytes to read when the remain bytes of current segment is not enough to write
* binary row fixed part. See {@link BinaryRowData}.
*/
public void checkSkipReadForFixLengthPart(AbstractPagedInputView source) throws IOException {
// skip if there is no enough size.
// Note: Use currentSegmentLimit instead of segmentSize.
int available = source.getCurrentSegmentLimit() - source.getCurrentPositionInSegment();
if (available < getSerializedRowFixedPartLength()) {
source.advance();
}
} | 3.68 |
framework_GridRowDragger_getDropIndexCalculator | /**
* Gets the drop index calculator.
* <p>
* Default is {@code null} and the dropped items are placed on the drop
* location.
*
* @return the drop index calculator
*/
public DropIndexCalculator<T> getDropIndexCalculator() {
return dropTargetIndexCalculator;
} | 3.68 |
hudi_HoodieTable_getPendingCommitTimeline | /**
* Get only the inflights (no-completed) commit timeline.
*/
public HoodieTimeline getPendingCommitTimeline() {
return metaClient.getCommitsTimeline().filterPendingExcludingMajorAndMinorCompaction();
} | 3.68 |
framework_GridLayout_getColumns | /**
* Get the number of columns in the grid.
*
* @return the number of columns in the grid.
*/
public int getColumns() {
return getState(false).columns;
} | 3.68 |
hbase_ClusterMetricsBuilder_toOptions | /**
* Convert an enum set of ClusterMetrics.Option to a list of ClusterStatusProtos.Option
* @param options the ClusterMetrics options
* @return a list of ClusterStatusProtos.Option
*/
public static List<ClusterStatusProtos.Option> toOptions(EnumSet<ClusterMetrics.Option> options) {
return options.stream().map(ClusterMetricsBuilder::toOption).collect(Collectors.toList());
} | 3.68 |
flink_RocksDBNativeMetricMonitor_registerStatistics | /** Register gauges to pull native metrics for the database. */
private void registerStatistics() {
if (statistics != null) {
for (TickerType tickerType : options.getMonitorTickerTypes()) {
metricGroup.gauge(
String.format("rocksdb.%s", tickerType.name().toLowerCase()),
new RocksDBNativeStatisticsMetricView(tickerType));
}
}
} | 3.68 |
hbase_BalancerClusterState_getRackForRegion | /**
* Maps region index to rack index
*/
public int getRackForRegion(int region) {
return serverIndexToRackIndex[regionIndexToServerIndex[region]];
} | 3.68 |
flink_FileRegionWriteReadUtils_readFixedSizeRegionFromFile | /**
* Read {@link FixedSizeRegion} from {@link FileChannel}.
*
* <p>Note that this type of region's length is fixed.
*
* @param channel the channel to read.
* @param regionBuffer the buffer to read {@link FixedSizeRegion}'s header.
* @param fileOffset the file offset to start read.
* @return the {@link FixedSizeRegion} that read from this channel.
*/
public static FixedSizeRegion readFixedSizeRegionFromFile(
FileChannel channel, ByteBuffer regionBuffer, long fileOffset) throws IOException {
regionBuffer.clear();
BufferReaderWriterUtil.readByteBufferFully(channel, regionBuffer, fileOffset);
regionBuffer.flip();
int firstBufferIndex = regionBuffer.getInt();
int numBuffers = regionBuffer.getInt();
long firstBufferOffset = regionBuffer.getLong();
long lastBufferEndOffset = regionBuffer.getLong();
return new FixedSizeRegion(
firstBufferIndex, firstBufferOffset, lastBufferEndOffset, numBuffers);
} | 3.68 |
framework_VaadinSession_getUIProviders | /**
* Gets the UI providers configured for this session.
*
* @return an unmodifiable list of UI providers
*/
public List<UIProvider> getUIProviders() {
assert hasLock();
return Collections.unmodifiableList(uiProviders);
} | 3.68 |
hadoop_BlockManagerParameters_withBufferPoolSize | /**
* Sets the in-memory cache size as number of blocks.
*
* @param poolSize The buffer pool size as number of blocks.
* @return The builder.
*/
public BlockManagerParameters withBufferPoolSize(
final int poolSize) {
this.bufferPoolSize = poolSize;
return this;
} | 3.68 |
framework_FieldGroup_getPropertyType | /**
* Gets the type of the property with the given property id.
*
* @param propertyId
* The propertyId. Must be find
* @return The type of the property
*/
protected Class<?> getPropertyType(Object propertyId) throws BindException {
if (getItemDataSource() == null) {
throw new BindException("Property type for '" + propertyId
+ "' could not be determined. No item data source has been set.");
}
Property<?> p = getItemDataSource().getItemProperty(propertyId);
if (p == null) {
throw new BindException("Property type for '" + propertyId
+ "' could not be determined. No property with that id was found.");
}
return p.getType();
} | 3.68 |
hudi_BaseHoodieWriteClient_postWrite | /**
* Common method containing steps to be performed after write (upsert/insert/..) operations including auto-commit.
* @param result Commit Action Result
* @param instantTime Instant Time
* @param hoodieTable Hoodie Table
* @return Write Status
*/
public O postWrite(HoodieWriteMetadata<O> result, String instantTime, HoodieTable hoodieTable) {
if (result.getIndexLookupDuration().isPresent()) {
metrics.updateIndexMetrics(getOperationType().name(), result.getIndexUpdateDuration().get().toMillis());
}
if (result.isCommitted()) {
// Perform post commit operations.
if (result.getFinalizeDuration().isPresent()) {
metrics.updateFinalizeWriteMetrics(result.getFinalizeDuration().get().toMillis(),
result.getWriteStats().get().size());
}
postCommit(hoodieTable, result.getCommitMetadata().get(), instantTime, Option.empty());
mayBeCleanAndArchive(hoodieTable);
emitCommitMetrics(instantTime, result.getCommitMetadata().get(), hoodieTable.getMetaClient().getCommitActionType());
}
return result.getWriteStatuses();
} | 3.68 |
hadoop_LightWeightLinkedSet_getBookmark | /**
* Returns a new iterator starting at the bookmarked element.
*
* @return the iterator to the bookmarked element.
*/
public Iterator<T> getBookmark() {
LinkedSetIterator toRet = new LinkedSetIterator();
toRet.next = this.bookmark.next;
this.bookmark = toRet;
return toRet;
} | 3.68 |
dubbo_JValidatorNew_createMemberValue | // Copy from javassist.bytecode.annotation.Annotation.createMemberValue(ConstPool, CtClass);
private static MemberValue createMemberValue(ConstPool cp, CtClass type, Object value) throws NotFoundException {
MemberValue memberValue = javassist.bytecode.annotation.Annotation.createMemberValue(cp, type);
if (memberValue instanceof BooleanMemberValue) {
((BooleanMemberValue) memberValue).setValue((Boolean) value);
} else if (memberValue instanceof ByteMemberValue) {
((ByteMemberValue) memberValue).setValue((Byte) value);
} else if (memberValue instanceof CharMemberValue) {
((CharMemberValue) memberValue).setValue((Character) value);
} else if (memberValue instanceof ShortMemberValue) {
((ShortMemberValue) memberValue).setValue((Short) value);
} else if (memberValue instanceof IntegerMemberValue) {
((IntegerMemberValue) memberValue).setValue((Integer) value);
} else if (memberValue instanceof LongMemberValue) {
((LongMemberValue) memberValue).setValue((Long) value);
} else if (memberValue instanceof FloatMemberValue) {
((FloatMemberValue) memberValue).setValue((Float) value);
} else if (memberValue instanceof DoubleMemberValue) {
((DoubleMemberValue) memberValue).setValue((Double) value);
} else if (memberValue instanceof ClassMemberValue) {
((ClassMemberValue) memberValue).setValue(((Class<?>) value).getName());
} else if (memberValue instanceof StringMemberValue) {
((StringMemberValue) memberValue).setValue((String) value);
} else if (memberValue instanceof EnumMemberValue) {
((EnumMemberValue) memberValue).setValue(((Enum<?>) value).name());
}
/* else if (memberValue instanceof AnnotationMemberValue) */
else if (memberValue instanceof ArrayMemberValue) {
CtClass arrayType = type.getComponentType();
int len = Array.getLength(value);
MemberValue[] members = new MemberValue[len];
for (int i = 0; i < len; i++) {
members[i] = createMemberValue(cp, arrayType, Array.get(value, i));
}
((ArrayMemberValue) memberValue).setValue(members);
}
return memberValue;
} | 3.68 |
framework_AbstractComponent_getHeight | /*
* (non-Javadoc)
*
* @see com.vaadin.Sizeable#getHeight()
*/
@Override
public float getHeight() {
return height;
} | 3.68 |
pulsar_ResourceGroupService_getRgTenantUnRegistersCount | // Visibility for testing.
protected static double getRgTenantUnRegistersCount (String rgName) {
return rgTenantUnRegisters.labels(rgName).get();
} | 3.68 |
hbase_FavoredNodeAssignmentHelper_getOneRandomServer | /**
* Gets a random server from the specified rack and skips anything specified.
* @param rack rack from a server is needed
* @param skipServerSet the server shouldn't belong to this set
*/
protected ServerName getOneRandomServer(String rack, Set<ServerName> skipServerSet) {
// Is the rack valid? Do we recognize it?
if (rack == null || getServersFromRack(rack) == null || getServersFromRack(rack).isEmpty()) {
return null;
}
// Lets use a set so we can eliminate duplicates
Set<StartcodeAgnosticServerName> serversToChooseFrom = Sets.newHashSet();
for (ServerName sn : getServersFromRack(rack)) {
serversToChooseFrom.add(StartcodeAgnosticServerName.valueOf(sn));
}
if (skipServerSet != null && skipServerSet.size() > 0) {
for (ServerName sn : skipServerSet) {
serversToChooseFrom.remove(StartcodeAgnosticServerName.valueOf(sn));
}
// Do we have any servers left to choose from?
if (serversToChooseFrom.isEmpty()) {
return null;
}
}
ServerName randomServer = null;
int randomIndex = ThreadLocalRandom.current().nextInt(serversToChooseFrom.size());
int j = 0;
for (StartcodeAgnosticServerName sn : serversToChooseFrom) {
if (j == randomIndex) {
randomServer = sn;
break;
}
j++;
}
if (randomServer != null) {
return ServerName.valueOf(randomServer.getAddress(), randomServer.getStartcode());
} else {
return null;
}
} | 3.68 |
framework_VColorPickerGradient_setBGColor | /**
* Sets the given css color as the background.
*
* @param bgColor
* the color to set
*/
public void setBGColor(String bgColor) {
if (bgColor == null) {
background.getElement().getStyle().clearBackgroundColor();
} else {
background.getElement().getStyle().setBackgroundColor(bgColor);
}
} | 3.68 |
framework_AbstractSelect_addListener | /**
* @deprecated As of 7.0, replaced by
* {@link #addItemSetChangeListener(Container.ItemSetChangeListener)}
*/
@Override
@Deprecated
public void addListener(Container.ItemSetChangeListener listener) {
addItemSetChangeListener(listener);
} | 3.68 |
framework_Panel_changeVariables | /**
* Called when one or more variables handled by the implementing class are
* changed.
*
* @see com.vaadin.server.VariableOwner#changeVariables(Object, Map)
*/
@Override
public void changeVariables(Object source, Map<String, Object> variables) {
// Get new size
final Integer newWidth = (Integer) variables.get("width");
final Integer newHeight = (Integer) variables.get("height");
if (newWidth != null && newWidth.intValue() != getWidth()) {
setWidth(newWidth.intValue(), UNITS_PIXELS);
}
if (newHeight != null && newHeight.intValue() != getHeight()) {
setHeight(newHeight.intValue(), UNITS_PIXELS);
}
// Scrolling
final Integer newScrollX = (Integer) variables.get("scrollLeft");
final Integer newScrollY = (Integer) variables.get("scrollTop");
if (newScrollX != null && newScrollX.intValue() != getScrollLeft()) {
// set internally, not to fire request repaint
getState().scrollLeft = newScrollX.intValue();
}
if (newScrollY != null && newScrollY.intValue() != getScrollTop()) {
// set internally, not to fire request repaint
getState().scrollTop = newScrollY.intValue();
}
// Actions
if (actionManager != null) {
actionManager.handleActions(variables, this);
}
} | 3.68 |
framework_GridElement_getFooterCell | /**
* Gets footer cell element with given row and column index.
*
* @param rowIndex
* Row index
* @param colIndex
* Column index
* @return Footer cell element with given indices.
*/
public GridCellElement getFooterCell(int rowIndex, int colIndex) {
return getSubPart("#footer[" + rowIndex + "][" + colIndex + "]")
.wrap(GridCellElement.class);
} | 3.68 |
hadoop_BufferPuller_close | /**
* Closes the iterator so that the underlying streams can be closed.
*/
@Override
public void close() throws IOException {
if (closed) {
return;
}
if (null != nativeReader) {
nativeReader.close();
}
closed = true;
} | 3.68 |
hadoop_LightWeightLinkedSet_pollFirst | /**
* Remove and return first element on the linked list of all elements.
*
* @return first element
*/
public T pollFirst() {
if (head == null) {
return null;
}
T first = head.element;
this.remove(first);
return first;
} | 3.68 |
hbase_HFileArchiver_archiveStoreFile | /**
* Archive the store file
* @param fs the filesystem where the store files live
* @param regionInfo region hosting the store files
* @param conf {@link Configuration} to examine to determine the archive directory
* @param tableDir {@link Path} to where the table is being stored (for building the archive
* path)
* @param family the family hosting the store files
* @param storeFile file to be archived
* @throws IOException if the files could not be correctly disposed.
*/
public static void archiveStoreFile(Configuration conf, FileSystem fs, RegionInfo regionInfo,
Path tableDir, byte[] family, Path storeFile) throws IOException {
Path storeArchiveDir = HFileArchiveUtil.getStoreArchivePath(conf, regionInfo, tableDir, family);
// make sure we don't archive if we can't and that the archive dir exists
if (!fs.mkdirs(storeArchiveDir)) {
throw new IOException("Could not make archive directory (" + storeArchiveDir + ") for store:"
+ Bytes.toString(family) + ", deleting compacted files instead.");
}
// do the actual archive
long start = EnvironmentEdgeManager.currentTime();
File file = new FileablePath(fs, storeFile);
if (!resolveAndArchiveFile(storeArchiveDir, file, Long.toString(start))) {
throw new IOException("Failed to archive/delete the file for region:"
+ regionInfo.getRegionNameAsString() + ", family:" + Bytes.toString(family) + " into "
+ storeArchiveDir + ". Something is probably awry on the filesystem.");
}
} | 3.68 |
hbase_SnapshotScannerHDFSAclHelper_getUsersWithTableReadAction | /**
* Return users with table read permission
* @param tableName the table
* @param includeNamespace true if include users with namespace read action
* @param includeGlobal true if include users with global read action
* @return users with table read permission
* @throws IOException if an error occurred
*/
Set<String> getUsersWithTableReadAction(TableName tableName, boolean includeNamespace,
boolean includeGlobal) throws IOException {
Set<String> users =
getUsersWithReadAction(PermissionStorage.getTablePermissions(conf, tableName));
if (includeNamespace) {
users
.addAll(getUsersWithNamespaceReadAction(tableName.getNamespaceAsString(), includeGlobal));
}
return users;
} | 3.68 |
dubbo_AccessLogData_setServiceName | /**
* Add service name.
*
* @param serviceName
*/
public void setServiceName(String serviceName) {
set(SERVICE, serviceName);
} | 3.68 |
hbase_MultiTableInputFormatBase_getScans | /**
* Allows subclasses to get the list of {@link Scan} objects.
*/
protected List<Scan> getScans() {
return this.scans;
} | 3.68 |
framework_VAccordion_replaceWidget | /**
* Replaces the existing wrapped widget (if any) with a new widget.
*
* @param newWidget
* the new widget to wrap
*/
public void replaceWidget(Widget newWidget) {
if (widget != null) {
widgets.remove(widget);
if (open) {
remove(widget);
}
}
widget = newWidget;
widgets.add(newWidget);
if (open) {
add(widget, content);
}
} | 3.68 |
hbase_ChecksumType_codeToType | /**
* Cannot rely on enum ordinals . They change if item is removed or moved. Do our own codes.
* @return Type associated with passed code.
*/
public static ChecksumType codeToType(final byte b) {
for (ChecksumType t : ChecksumType.values()) {
if (t.getCode() == b) {
return t;
}
}
throw new RuntimeException("Unknown checksum type code " + b);
} | 3.68 |
hadoop_ClientToAMTokenSecretManagerInRM_registerMasterKey | // Only for RM recovery
public synchronized SecretKey registerMasterKey(
ApplicationAttemptId applicationAttemptID, byte[] keyData) {
SecretKey key = createSecretKey(keyData);
registerApplication(applicationAttemptID, key);
return key;
} | 3.68 |
hadoop_CachingGetSpaceUsed_getDirPath | /**
* @return The directory path being monitored.
*/
public String getDirPath() {
return dirPath;
} | 3.68 |
flink_PojoFieldUtils_readField | /**
* Reads a field from the given {@link DataInputView}.
*
* <p>This read methods avoids Java serialization, by reading the classname of the field's
* declaring class and dynamically loading it. The field is also read by field name and obtained
* via reflection.
*
* @param in the input view to read from.
* @param userCodeClassLoader the user classloader.
* @return the read field.
*/
static Field readField(DataInputView in, ClassLoader userCodeClassLoader) throws IOException {
Class<?> declaringClass = InstantiationUtil.resolveClassByName(in, userCodeClassLoader);
String fieldName = in.readUTF();
return getField(fieldName, declaringClass);
} | 3.68 |
flink_BinarySegmentUtils_copyToView | /**
* Copy bytes of segments to output view.
*
* <p>Note: It just copies the data in, not include the length.
*
* @param segments source segments
* @param offset offset for segments
* @param sizeInBytes size in bytes
* @param target target output view
*/
public static void copyToView(
MemorySegment[] segments, int offset, int sizeInBytes, DataOutputView target)
throws IOException {
for (MemorySegment sourceSegment : segments) {
int curSegRemain = sourceSegment.size() - offset;
if (curSegRemain > 0) {
int copySize = Math.min(curSegRemain, sizeInBytes);
byte[] bytes = allocateReuseBytes(copySize);
sourceSegment.get(offset, bytes, 0, copySize);
target.write(bytes, 0, copySize);
sizeInBytes -= copySize;
offset = 0;
} else {
offset -= sourceSegment.size();
}
if (sizeInBytes == 0) {
return;
}
}
if (sizeInBytes != 0) {
throw new RuntimeException(
"No copy finished, this should be a bug, "
+ "The remaining length is: "
+ sizeInBytes);
}
} | 3.68 |
hbase_BackupManifest_getAllDependentListByTable | /**
* Get the full dependent image list in the whole dependency scope for a specific table of this
* backup in time order from old to new.
* @param table table
* @return the full backup image list for a table in time order in the whole scope of the
* dependency of this image
*/
public ArrayList<BackupImage> getAllDependentListByTable(TableName table) {
ArrayList<BackupImage> tableImageList = new ArrayList<>();
ArrayList<BackupImage> imageList = getRestoreDependentList(false);
for (BackupImage image : imageList) {
if (image.hasTable(table)) {
tableImageList.add(image);
}
}
return tableImageList;
} | 3.68 |
hbase_CommonFSUtils_getRootDir | /**
* Get the path for the root data directory
* @param c configuration
* @return {@link Path} to hbase root directory from configuration as a qualified Path.
* @throws IOException e
*/
public static Path getRootDir(final Configuration c) throws IOException {
Path p = new Path(c.get(HConstants.HBASE_DIR));
FileSystem fs = p.getFileSystem(c);
return p.makeQualified(fs.getUri(), fs.getWorkingDirectory());
} | 3.68 |
graphhopper_VectorTile_setVersion | /**
* <pre>
* Any compliant implementation must first read the version
* number encoded in this message and choose the correct
* implementation for this version number before proceeding to
* decode other parts of this message.
* </pre>
*
* <code>required uint32 version = 15 [default = 1];</code>
*/
public Builder setVersion(int value) {
bitField0_ |= 0x00000001;
version_ = value;
onChanged();
return this;
} | 3.68 |
framework_VTabsheet_cancelNextBlurSchedule | /**
* Cancel the next scheduled execution. This method must be called only
* from an event occurring before the onBlur event. It's the case of IE
* which doesn't trigger the focus event, so we're using this approach
* to cancel the next blur event prior it's execution, calling the
* method from mouse down event.
*/
public void cancelNextBlurSchedule() {
// Make sure there's still no other command to be executed.
cancelLastBlurSchedule();
nextBlurScheduleCancelled = true;
} | 3.68 |
morf_DatabaseMetaDataProvider_createRealName | /**
* Creates {@link RealName}, which contractually remembers two versions of a
* database object name: the name as retrieved by the JDBC driver, and also
* the user-friendly camel-case name of that same object, often derived by
* looking at the comment of that object, or in schema descriptions.
*
* <p>
* Note: Any {@link RealName} is also {@link AName}, and thus can be used as a
* key in the lookup maps for convenience, just like any other {@link AName}.
*
* <p>
* However,
* the distinction beetween {@link RealName} and {@link AName} is important.
* Strongly typed {@link RealName} is used in places where the two versions
* of a database object name are known, as opposed to {@link AName} being used
* in places where case insensitive map lookup keys are good enough. Method
* signatures for example use {@link RealName} if they need a specific version
* of the database object name, and use {@link AName} if they do not really
* care (or cannot be expected to care) about the true letter case.
*
* <p>
* Never create an instance of {@link RealName} without knowing true values of
* the two versions of a database object name.
*
* @param dbName the name as retrieved by the JDBC driver
* @param realName the user-friendly camel-case name of that same object,
* often derived by looking at the comment of that object,
* or in schema descriptions.
* @return {@link RealName} instance holding the two name versions.
* Can also be used as a key in the lookup maps, like {@link AName}.
*/
protected static RealName createRealName(String dbName, String realName) {
return new RealName(dbName, realName);
} | 3.68 |
flink_CheckpointConfig_disableCheckpointing | /** Disables checkpointing. */
public void disableCheckpointing() {
configuration.removeConfig(ExecutionCheckpointingOptions.CHECKPOINTING_INTERVAL);
} | 3.68 |
framework_GridDropTargetConnector_isDroppingOnRowsPossible | /**
* Inspects whether the current drop would happen on the whole grid instead
* of specific row as the drop target. This is based on used drop mode,
* whether dropping on sorted grid rows is allowed (determined on server
* side and automatically updated to drop mode) and whether the grid is
* empty.
*
* @return {@code true} when the drop target is the whole grid, or
* {@code false} when it is one of the rows
*/
protected boolean isDroppingOnRowsPossible() {
if (getState().dropMode == DropMode.ON_GRID) {
return false;
}
if (getEscalator().getVisibleRowRange().isEmpty()) {
return false;
}
return true;
} | 3.68 |
flink_DynamicConfiguration_addAppConfigurationEntry | /** Add entries for the given application name. */
public void addAppConfigurationEntry(String name, AppConfigurationEntry... entry) {
final AppConfigurationEntry[] existing = dynamicEntries.get(name);
final AppConfigurationEntry[] updated;
if (existing == null) {
updated = Arrays.copyOf(entry, entry.length);
} else {
updated = merge(existing, entry);
}
dynamicEntries.put(name, updated);
} | 3.68 |
framework_StringToIntegerConverter_getModelType | /*
* (non-Javadoc)
*
* @see com.vaadin.data.util.converter.Converter#getModelType()
*/
@Override
public Class<Integer> getModelType() {
return Integer.class;
} | 3.68 |
flink_StreamElement_isLatencyMarker | /**
* Checks whether this element is a latency marker.
*
* @return True, if this element is a latency marker, false otherwise.
*/
public final boolean isLatencyMarker() {
return getClass() == LatencyMarker.class;
} | 3.68 |
morf_SqlScriptExecutorProvider_create | /**
* @param dataSource The database connection source to use
* @param sqlDialect The dialect to use for the dataSource
* @return new instance of {@link SqlScriptExecutorProvider}
*/
public SqlScriptExecutorProvider create(final DataSource dataSource, final SqlDialect sqlDialect) {
return new SqlScriptExecutorProvider(dataSource, sqlDialect);
} | 3.68 |
hbase_TableMapReduceUtil_addDependencyJars | /**
* Add the jars containing the given classes to the job's configuration such that JobClient will
* ship them to the cluster and add them to the DistributedCache.
* @deprecated since 1.3.0 and will be removed in 3.0.0. Use {@link #addDependencyJars(Job)}
* instead.
* @see #addDependencyJars(Job)
* @see <a href="https://issues.apache.org/jira/browse/HBASE-8386">HBASE-8386</a>
*/
@Deprecated
public static void addDependencyJars(Configuration conf, Class<?>... classes) throws IOException {
LOG.warn("The addDependencyJars(Configuration, Class<?>...) method has been deprecated since it"
+ " is easy to use incorrectly. Most users should rely on addDependencyJars(Job) "
+ "instead. See HBASE-8386 for more details.");
addDependencyJarsForClasses(conf, classes);
} | 3.68 |
hadoop_StageConfig_getWriterQueueCapacity | /**
* Get writer queue capacity.
* @return the queue capacity
*/
public int getWriterQueueCapacity() {
return writerQueueCapacity;
} | 3.68 |
hbase_MasterObserver_preDisableTableAction | /**
* Called prior to disabling a table. Called as part of disable table procedure and it is asyn to
* the disable table RPC call.
* @param ctx the environment to interact with the framework and master
* @param tableName the name of the table
*/
default void preDisableTableAction(final ObserverContext<MasterCoprocessorEnvironment> ctx,
final TableName tableName) throws IOException {
} | 3.68 |
hbase_MetricsMaster_setNumSpaceQuotas | /**
* Sets the number of space quotas defined.
* @see MetricsMasterQuotaSource#updateNumSpaceQuotas(long)
*/
public void setNumSpaceQuotas(final long numSpaceQuotas) {
masterQuotaSource.updateNumSpaceQuotas(numSpaceQuotas);
} | 3.68 |
morf_SqlUtils_nullLiteral | /**
* @return a {@link NullFieldLiteral}.
*/
public static FieldLiteral nullLiteral() {
return new NullFieldLiteral();
} | 3.68 |
flink_AbstractMetricGroup_getLogicalScope | /**
* Returns the logical scope of this group, for example {@code "taskmanager.job.task"}.
*
* @param filter character filter which is applied to the scope components
* @param delimiter delimiter to use for concatenating scope components
* @param reporterIndex index of the reporter
* @return logical scope
*/
String getLogicalScope(CharacterFilter filter, char delimiter, int reporterIndex) {
if (logicalScopeStrings.length == 0
|| (reporterIndex < 0 || reporterIndex >= logicalScopeStrings.length)) {
return createLogicalScope(filter, delimiter);
} else {
if (logicalScopeStrings[reporterIndex] == null) {
logicalScopeStrings[reporterIndex] = createLogicalScope(filter, delimiter);
}
return logicalScopeStrings[reporterIndex];
}
} | 3.68 |
hbase_MutableRegionInfo_getStartKey | /** Returns the startKey */
@Override
public byte[] getStartKey() {
return startKey;
} | 3.68 |
hadoop_StageConfig_build | /**
* The build command makes the config immutable.
* Idempotent.
* @return the now-frozen config
*/
public StageConfig build() {
frozen = true;
return this;
} | 3.68 |
hadoop_BCFile_getCompressionName | /**
* Get the name of the compression algorithm used to compress the block.
*
* @return name of the compression algorithm.
*/
public String getCompressionName() {
return rBlkState.getCompressionName();
} | 3.68 |
framework_StreamResource_setMIMEType | /**
* Sets the mime type of the resource.
*
* @param mimeType
* the MIME type to be set.
*/
public void setMIMEType(String mimeType) {
this.mimeType = mimeType;
} | 3.68 |
querydsl_JPAExpressions_selectZero | /**
* Create a new detached JPQLQuery instance with the projection zero
*
* @return select(0)
*/
public static JPQLQuery<Integer> selectZero() {
return select(Expressions.ZERO);
} | 3.68 |
hbase_BufferedMutator_getWriteBufferPeriodicFlushTimerTickMs | /**
* Returns the current periodic flush timertick interval in milliseconds.
* @return The number of milliseconds between each check if the timeout has been exceeded. This
* value only has a real meaning if the timeout has been set to > 0
*/
default long getWriteBufferPeriodicFlushTimerTickMs() {
throw new UnsupportedOperationException(
"The BufferedMutator::getWriteBufferPeriodicFlushTimerTickMs has not been implemented");
} | 3.68 |
pulsar_FileSystemManagedLedgerOffloader_offload | /*
* ledgerMetadata stored in an index of -1
* */
@Override
public CompletableFuture<Void> offload(ReadHandle readHandle, UUID uuid, Map<String, String> extraMetadata) {
CompletableFuture<Void> promise = new CompletableFuture<>();
scheduler.chooseThread(readHandle.getId()).execute(
new LedgerReader(readHandle, uuid, extraMetadata, promise, storageBasePath, configuration,
assignmentScheduler, offloadPolicies.getManagedLedgerOffloadPrefetchRounds(),
this.offloaderStats));
return promise;
} | 3.68 |
hudi_HoodieTableMetaClient_getTempFolderPath | /**
* @return Temp Folder path
*/
public String getTempFolderPath() {
return basePath + Path.SEPARATOR + TEMPFOLDER_NAME;
} | 3.68 |
hadoop_RouterClientRMService_finalize | /**
* Shutdown the chain of interceptors when the object is destroyed.
*/
@Override
protected void finalize() {
rootInterceptor.shutdown();
} | 3.68 |
hudi_LSMTimelineWriter_updateManifest | /**
* Updates a manifest file.
*
* <p>4 steps:
* <ol>
* <li>read the latest manifest version file;</li>
* <li>read the latest manifest file for valid files;</li>
* <li>remove files to the existing file list from step2;</li>
* <li>add this new file to the existing file list from step2.</li>
* </ol>
*
* @param filesToRemove File names to remove
* @param fileToAdd New file name to add
*/
public void updateManifest(List<String> filesToRemove, String fileToAdd) throws IOException {
int latestVersion = LSMTimeline.latestSnapshotVersion(metaClient);
HoodieLSMTimelineManifest latestManifest = LSMTimeline.latestSnapshotManifest(metaClient, latestVersion);
HoodieLSMTimelineManifest newManifest = latestManifest.copy(filesToRemove);
newManifest.addFile(getFileEntry(fileToAdd));
createManifestFile(newManifest, latestVersion);
} | 3.68 |
pulsar_AuthorizationService_allowTopicOperation | /**
* @deprecated - will be removed after 2.12. Use async variant.
*/
@Deprecated
public Boolean allowTopicOperation(TopicName topicName,
TopicOperation operation,
String originalRole,
String role,
AuthenticationDataSource authData) throws Exception {
try {
return allowTopicOperationAsync(topicName, operation, originalRole, role, authData).get(
conf.getMetadataStoreOperationTimeoutSeconds(), SECONDS);
} catch (InterruptedException e) {
throw new RestException(e);
} catch (ExecutionException e) {
throw new RestException(e.getCause());
}
} | 3.68 |
morf_OracleDialect_dropStatements | /**
* @see org.alfasoftware.morf.jdbc.SqlDialect#dropStatements(org.alfasoftware.morf.metadata.View)
*/
@Override
public Collection<String> dropStatements(View view) {
return Arrays.asList("BEGIN FOR i IN (SELECT null FROM all_views WHERE OWNER='" + getSchemaName().toUpperCase() + "' AND VIEW_NAME='" + view.getName().toUpperCase() + "') LOOP EXECUTE IMMEDIATE 'DROP VIEW " + schemaNamePrefix() + view.getName() + "'; END LOOP; END;");
} | 3.68 |
hudi_HoodieTableMetaClient_getBootstrapIndexByFileIdFolderNameFolderPath | /**
* @return Bootstrap Index By Hudi File Id Folder
*/
public String getBootstrapIndexByFileIdFolderNameFolderPath() {
return basePath + Path.SEPARATOR + BOOTSTRAP_INDEX_BY_FILE_ID_FOLDER_PATH;
} | 3.68 |
open-banking-gateway_FintechSecureStorage_psuAspspKeyToPrivate | /**
* Sends PSU/Fintechs' to FinTechs' private storage.
* @param authSession Authorization session for this PSU/Fintech user
* @param fintech FinTech to store to
* @param psuKey Key to store
* @param password FinTechs Datasafe/Keystore password
*/
@SneakyThrows
public void psuAspspKeyToPrivate(AuthSession authSession, Fintech fintech, PubAndPrivKey psuKey, Supplier<char[]> password) {
try (OutputStream os = datasafeServices.privateService().write(
WriteRequest.forDefaultPrivate(
fintech.getUserIdAuth(password),
new FintechPsuAspspTuple(authSession).toDatasafePathWithoutParent()))
) {
serde.writeKey(psuKey.getPublicKey(), psuKey.getPrivateKey(), os);
}
} | 3.68 |
morf_AbstractDatabaseType_extractPath | /**
* Extracts the path part of the URL from the stack returned by
* {@link #splitJdbcUrl(String)}. Assumes that the stack has already
* had the preceding components (e.g. protocol, host and port) popped.
*
* @param splitURL A stack containing the remaining portions of a JDBC URL.
* @return The path.
*/
protected final String extractPath(Stack<String> splitURL) {
StringBuilder path = new StringBuilder();
if (!splitURL.isEmpty()) {
splitURL.pop(); // Remove the delimiter
while (!splitURL.isEmpty()) {
path = path.append(splitURL.pop());
}
}
return path.toString();
} | 3.68 |
hudi_AbstractHoodieLogRecordReader_getProgress | /**
* Return progress of scanning as a float between 0.0 to 1.0.
*/
public float getProgress() {
return progress;
} | 3.68 |
framework_VTextField_updateCursorPosition | /**
* Updates the cursor position variable if it has changed since the last
* update.
*
* @return true if the value was updated
*/
protected boolean updateCursorPosition() {
if (WidgetUtil.isAttachedAndDisplayed(this)) {
int cursorPos = prompting ? 0 : getCursorPos();
if (lastCursorPos != cursorPos) {
client.updateVariable(paintableId,
TextFieldConstants.VAR_CURSOR, cursorPos, false);
lastCursorPos = cursorPos;
return true;
}
}
return false;
} | 3.68 |
framework_Tree_setNullSelectionItemId | /**
* Tree does not support <code>setNullSelectionItemId</code>.
*
* @see AbstractSelect#setNullSelectionItemId(java.lang.Object)
*/
@Override
public void setNullSelectionItemId(Object nullSelectionItemId)
throws UnsupportedOperationException {
if (nullSelectionItemId != null) {
throw new UnsupportedOperationException();
}
} | 3.68 |
framework_Link_setTargetBorder | /**
* Sets the border of the target window.
*
* @param targetBorder
* the targetBorder to set.
*/
public void setTargetBorder(BorderStyle targetBorder) {
getState().targetBorder = targetBorder;
} | 3.68 |
framework_GridElement_getHeaderCellByCaption | /**
* Gets the header cell element with the given caption in the given header
* row. If there are multiple headers with the same name, the first one is
* returned.
*
* @param rowIndex
* The index of the header row
* @param caption
* The header caption
* @return The first header cell element with a given caption.
* @throws NoSuchElementException
* if there is no header row or no header cell with the given
* text.
*/
public GridCellElement getHeaderCellByCaption(int rowIndex,
String caption) {
List<GridCellElement> headerCells = getHeaderCells(rowIndex);
for (GridCellElement cell : headerCells) {
if (caption.equals(cell.getText())) {
return cell;
}
}
String errorMessage = String.format(
"The row with index %d does not have header with %s caption. ",
rowIndex, caption);
throw new NoSuchElementException(errorMessage);
} | 3.68 |
hadoop_LogParserUtil_stringToUnixTimestamp | /**
* Converts String date to unix timestamp. Note that we assume the time in the
* logs has the same time zone with the machine which runs the
* {@link RmSingleLineParser}.
*
* @param date The String date.
* @return Unix time stamp.
* @throws ParseException if data conversion from String to unix timestamp
* fails.
*/
public long stringToUnixTimestamp(final String date) throws ParseException {
return format.parse(date).getTime();
} | 3.68 |
querydsl_MetaDataExporter_setNamingStrategy | /**
* Override the NamingStrategy (default: new DefaultNamingStrategy())
*
* @param namingStrategy naming strategy to override (default: new DefaultNamingStrategy())
*/
public void setNamingStrategy(NamingStrategy namingStrategy) {
module.bind(NamingStrategy.class, namingStrategy);
} | 3.68 |
querydsl_JTSGeometryExpression_disjoint | /**
* Returns 1 (TRUE) if this geometric object is “spatially disjoint” from anotherGeometry.
*
* @param geometry other geometry
* @return true, if disjoint
*/
public BooleanExpression disjoint(Expression<? extends Geometry> geometry) {
return Expressions.booleanOperation(SpatialOps.DISJOINT, mixin, geometry);
} | 3.68 |
hadoop_StagingCommitter_validateContext | /**
* Validate the task attempt context; makes sure
* that the task attempt ID data is valid.
* @param context task context
*/
private static void validateContext(TaskAttemptContext context) {
requireNonNull(context, "null context");
requireNonNull(context.getTaskAttemptID(),
"null task attempt ID");
requireNonNull(context.getTaskAttemptID().getTaskID(),
"null task ID");
requireNonNull(context.getTaskAttemptID().getJobID(),
"null job ID");
} | 3.68 |
framework_AbstractComponentConnector_cancelTouchTimer | /**
* If a long touch event timer is running, cancel it.
*
* @since 7.6
*/
private void cancelTouchTimer() {
WidgetUtil.setTextSelectionEnabled(getWidget().getElement(), true);
if (longTouchTimer != null) {
// Re-enable text selection
longTouchTimer.cancel();
}
} | 3.68 |
flink_TaskExecutorMemoryConfiguration_getManagedMemoryTotal | /** Returns the total amount of memory reserved for by the MemoryManager. */
public Long getManagedMemoryTotal() {
return managedMemoryTotal;
} | 3.68 |
dubbo_AbstractConfigManager_getConfigById | /**
* Get config by id
*
* @param configType
* @param id
* @return
*/
protected <C extends AbstractConfig> C getConfigById(String configType, String id) {
return (C) getConfigsMap(configType).get(id);
} | 3.68 |
framework_WeekGrid_setCalendar | /**
* @param calendar
* the calendar to set
*/
public void setCalendar(VCalendar calendar) {
this.calendar = calendar;
} | 3.68 |
hadoop_Check_ge0 | /**
* Verifies an long is greater or equal to zero.
*
* @param value integer value.
* @param name the name to use in the exception message.
*
* @return the value.
*
* @throws IllegalArgumentException if the long is greater or equal to zero.
*/
public static long ge0(long value, String name) {
if (value < 0) {
throw new IllegalArgumentException(MessageFormat.format(
"parameter [{0}] = [{1}] must be greater than or equals zero", name, value));
}
return value;
} | 3.68 |
hadoop_YarnClientUtils_generateToken | /**
* Generate SPNEGO challenge request token.
*
* @param server - hostname to contact
* @throws IOException thrown if doAs failed
* @throws InterruptedException thrown if doAs is interrupted
* @return SPNEGO token challenge
*/
public static String generateToken(String server) throws IOException,
InterruptedException {
UserGroupInformation currentUser = UserGroupInformation.getCurrentUser();
LOG.debug("The user credential is {}", currentUser);
String challenge = currentUser
.doAs(new PrivilegedExceptionAction<String>() {
@Override
public String run() throws Exception {
try {
GSSManager manager = GSSManager.getInstance();
// GSS name for server
GSSName serverName = manager.createName("HTTP@" + server,
GSSName.NT_HOSTBASED_SERVICE);
// Create a GSSContext for authentication with the service.
// We're passing client credentials as null since we want them to
// be read from the Subject.
// We're passing Oid as null to use the default.
GSSContext gssContext = manager.createContext(
serverName.canonicalize(null), null, null,
GSSContext.DEFAULT_LIFETIME);
gssContext.requestMutualAuth(true);
gssContext.requestCredDeleg(true);
// Establish context
byte[] inToken = new byte[0];
byte[] outToken = gssContext.initSecContext(inToken, 0,
inToken.length);
gssContext.dispose();
// Base64 encoded and stringified token for server
LOG.debug("Got valid challenge for host {}", serverName);
return new String(BASE_64_CODEC.encode(outToken),
StandardCharsets.US_ASCII);
} catch (GSSException e) {
LOG.error("Error: ", e);
throw new AuthenticationException(e);
}
}
});
return challenge;
} | 3.68 |
pulsar_ManagedLedgerConfig_getMetadataAckQuorumSize | /**
* @return the metadataAckQuorumSize
*/
public int getMetadataAckQuorumSize() {
return metadataAckQuorumSize;
} | 3.68 |
dubbo_ReflectUtils_findMethodByMethodSignature | /**
* Find method from method signature
*
* @param clazz Target class to find method
* @param methodName Method signature, e.g.: method1(int, String). It is allowed to provide method name only, e.g.: method2
* @return target method
* @throws NoSuchMethodException
* @throws ClassNotFoundException
* @throws IllegalStateException when multiple methods are found (overridden method when parameter info is not provided)
* @deprecated Recommend {@link MethodUtils#findMethod(Class, String, Class[])}
*/
@Deprecated
public static Method findMethodByMethodSignature(Class<?> clazz, String methodName, String[] parameterTypes)
throws NoSuchMethodException, ClassNotFoundException {
Method method;
if (parameterTypes == null) {
List<Method> finded = new ArrayList<>();
for (Method m : clazz.getMethods()) {
if (m.getName().equals(methodName)) {
finded.add(m);
}
}
if (finded.isEmpty()) {
throw new NoSuchMethodException("No such method " + methodName + " in class " + clazz);
}
if (finded.size() > 1) {
String msg = String.format(
"Not unique method for method name(%s) in class(%s), find %d methods.",
methodName, clazz.getName(), finded.size());
throw new IllegalStateException(msg);
}
method = finded.get(0);
} else {
Class<?>[] types = new Class<?>[parameterTypes.length];
for (int i = 0; i < parameterTypes.length; i++) {
types[i] = ReflectUtils.name2class(parameterTypes[i]);
}
method = clazz.getMethod(methodName, types);
}
return method;
} | 3.68 |
framework_Notification_show | /**
* Shows a notification message the current page. The position and behavior
* of the message depends on the type, which is one of the basic types
* defined in {@link Notification}, for instance
* Notification.TYPE_WARNING_MESSAGE.
*
* The caption is rendered as plain text with HTML automatically escaped.
*
* @see #Notification(String, Type)
* @see #show(Page)
*
* @param caption
* The message
* @param description
* The message description
* @param type
* The message type
* @return The Notification
*/
public static Notification show(String caption, String description,
Type type) {
Notification notification = new Notification(caption, description,
type);
notification.extend(UI.getCurrent());
return notification;
} | 3.68 |
hadoop_LogParserUtil_parseLog | /**
* Parse the log file/directory.
*
* @param logFile the file/directory of the log.
* @throws SkylineStoreException if fails to addHistory to
* {@link SkylineStore}.
* @throws IOException if fails to parse the log.
* @throws ResourceEstimatorException if the {@link LogParser}
* is not initialized.
*/
public final void parseLog(final String logFile)
throws SkylineStoreException, IOException, ResourceEstimatorException {
if (logParser == null) {
throw new ResourceEstimatorException("The log parser is not initialized,"
+ " please try again after initializing.");
}
InputStream inputStream = null;
try {
inputStream = new FileInputStream(logFile);
logParser.parseStream(inputStream);
} finally {
if (inputStream != null) {
inputStream.close();
}
}
} | 3.68 |
hbase_IdentityTableMap_initJob | /**
* Use this before submitting a TableMap job. It will appropriately set up the JobConf.
* @param table table name
* @param columns columns to scan
* @param mapper mapper class
* @param job job configuration
*/
@SuppressWarnings("unchecked")
public static void initJob(String table, String columns, Class<? extends TableMap> mapper,
JobConf job) {
TableMapReduceUtil.initTableMapJob(table, columns, mapper, ImmutableBytesWritable.class,
Result.class, job);
} | 3.68 |
hudi_InLineFSUtils_getOuterFilePathFromInlinePath | /**
* InlineFS Path format:
* "inlinefs://path/to/outer/file/outer_file_scheme/?start_offset=start_offset>&length=<length>"
* <p>
* Outer File Path format:
* "outer_file_scheme://path/to/outer/file"
* <p>
* Example
* Input: "inlinefs://file1/s3a/?start_offset=20&length=40".
* Output: "s3a://file1"
*
* @param inlineFSPath InLineFS Path to get the outer file Path
* @return Outer file Path from the InLineFS Path
*/
public static Path getOuterFilePathFromInlinePath(Path inlineFSPath) {
assertInlineFSPath(inlineFSPath);
final String outerFileScheme = inlineFSPath.getParent().getName();
final Path basePath = inlineFSPath.getParent().getParent();
checkArgument(basePath.toString().contains(SCHEME_SEPARATOR),
"Invalid InLineFS path: " + inlineFSPath);
final String pathExceptScheme = basePath.toString().substring(basePath.toString().indexOf(SCHEME_SEPARATOR) + 1);
final String fullPath = outerFileScheme + SCHEME_SEPARATOR
+ (outerFileScheme.equals(LOCAL_FILESYSTEM_SCHEME) ? PATH_SEPARATOR : "")
+ pathExceptScheme;
return new Path(fullPath);
} | 3.68 |
hbase_HRegion_bulkLoadHFiles | /**
* Attempts to atomically load a group of hfiles. This is critical for loading rows with multiple
* column families atomically.
* @param familyPaths List of Pair<byte[] column family, String hfilePath>
* @param bulkLoadListener Internal hooks enabling massaging/preparation of a file about to be
* bulk loaded
* @param copyFile always copy hfiles if true
* @param clusterIds ids from clusters that had already handled the given bulkload event.
* @return Map from family to List of store file paths if successful, null if failed recoverably
* @throws IOException if failed unrecoverably.
*/
public Map<byte[], List<Path>> bulkLoadHFiles(Collection<Pair<byte[], String>> familyPaths,
boolean assignSeqId, BulkLoadListener bulkLoadListener, boolean copyFile,
List<String> clusterIds, boolean replicate) throws IOException {
long seqId = -1;
Map<byte[], List<Path>> storeFiles = new TreeMap<>(Bytes.BYTES_COMPARATOR);
Map<String, Long> storeFilesSizes = new HashMap<>();
Preconditions.checkNotNull(familyPaths);
// we need writeLock for multi-family bulk load
startBulkRegionOperation(hasMultipleColumnFamilies(familyPaths));
boolean isSuccessful = false;
try {
this.writeRequestsCount.increment();
// There possibly was a split that happened between when the split keys
// were gathered and before the HRegion's write lock was taken. We need
// to validate the HFile region before attempting to bulk load all of them
IOException ioException = null;
List<Pair<byte[], String>> failures = new ArrayList<>();
for (Pair<byte[], String> p : familyPaths) {
byte[] familyName = p.getFirst();
String path = p.getSecond();
HStore store = getStore(familyName);
if (store == null) {
ioException = new org.apache.hadoop.hbase.DoNotRetryIOException(
"No such column family " + Bytes.toStringBinary(familyName));
} else {
try {
store.assertBulkLoadHFileOk(new Path(path));
} catch (WrongRegionException wre) {
// recoverable (file doesn't fit in region)
failures.add(p);
} catch (IOException ioe) {
// unrecoverable (hdfs problem)
ioException = ioe;
}
}
// validation failed because of some sort of IO problem.
if (ioException != null) {
LOG.error("There was IO error when checking if the bulk load is ok in region {}.", this,
ioException);
throw ioException;
}
}
// validation failed, bail out before doing anything permanent.
if (failures.size() != 0) {
StringBuilder list = new StringBuilder();
for (Pair<byte[], String> p : failures) {
list.append("\n").append(Bytes.toString(p.getFirst())).append(" : ")
.append(p.getSecond());
}
// problem when validating
LOG.warn("There was a recoverable bulk load failure likely due to a split. These (family,"
+ " HFile) pairs were not loaded: {}, in region {}", list.toString(), this);
return null;
}
// We need to assign a sequential ID that's in between two memstores in order to preserve
// the guarantee that all the edits lower than the highest sequential ID from all the
// HFiles are flushed on disk. See HBASE-10958. The sequence id returned when we flush is
// guaranteed to be one beyond the file made when we flushed (or if nothing to flush, it is
// a sequence id that we can be sure is beyond the last hfile written).
if (assignSeqId) {
FlushResult fs = flushcache(true, false, FlushLifeCycleTracker.DUMMY);
if (fs.isFlushSucceeded()) {
seqId = ((FlushResultImpl) fs).flushSequenceId;
} else if (fs.getResult() == FlushResult.Result.CANNOT_FLUSH_MEMSTORE_EMPTY) {
seqId = ((FlushResultImpl) fs).flushSequenceId;
} else if (fs.getResult() == FlushResult.Result.CANNOT_FLUSH) {
// CANNOT_FLUSH may mean that a flush is already on-going
// we need to wait for that flush to complete
waitForFlushes();
} else {
throw new IOException("Could not bulk load with an assigned sequential ID because the "
+ "flush didn't run. Reason for not flushing: " + ((FlushResultImpl) fs).failureReason);
}
}
Map<byte[], List<Pair<Path, Path>>> familyWithFinalPath =
new TreeMap<>(Bytes.BYTES_COMPARATOR);
for (Pair<byte[], String> p : familyPaths) {
byte[] familyName = p.getFirst();
String path = p.getSecond();
HStore store = getStore(familyName);
if (!familyWithFinalPath.containsKey(familyName)) {
familyWithFinalPath.put(familyName, new ArrayList<>());
}
List<Pair<Path, Path>> lst = familyWithFinalPath.get(familyName);
String finalPath = path;
try {
boolean reqTmp = store.storeEngine.requireWritingToTmpDirFirst();
if (bulkLoadListener != null) {
finalPath = bulkLoadListener.prepareBulkLoad(familyName, path, copyFile,
reqTmp ? null : fs.getRegionDir().toString());
}
Pair<Path, Path> pair = null;
if (reqTmp || !StoreFileInfo.isHFile(finalPath)) {
pair = store.preBulkLoadHFile(finalPath, seqId);
} else {
Path livePath = new Path(finalPath);
pair = new Pair<>(livePath, livePath);
}
lst.add(pair);
} catch (IOException ioe) {
// A failure here can cause an atomicity violation that we currently
// cannot recover from since it is likely a failed HDFS operation.
LOG.error("There was a partial failure due to IO when attempting to" + " load "
+ Bytes.toString(p.getFirst()) + " : " + p.getSecond(), ioe);
if (bulkLoadListener != null) {
try {
bulkLoadListener.failedBulkLoad(familyName, finalPath);
} catch (Exception ex) {
LOG.error("Error while calling failedBulkLoad for family "
+ Bytes.toString(familyName) + " with path " + path, ex);
}
}
throw ioe;
}
}
if (this.getCoprocessorHost() != null) {
for (Map.Entry<byte[], List<Pair<Path, Path>>> entry : familyWithFinalPath.entrySet()) {
this.getCoprocessorHost().preCommitStoreFile(entry.getKey(), entry.getValue());
}
}
for (Map.Entry<byte[], List<Pair<Path, Path>>> entry : familyWithFinalPath.entrySet()) {
byte[] familyName = entry.getKey();
for (Pair<Path, Path> p : entry.getValue()) {
String path = p.getFirst().toString();
Path commitedStoreFile = p.getSecond();
HStore store = getStore(familyName);
try {
store.bulkLoadHFile(familyName, path, commitedStoreFile);
// Note the size of the store file
try {
FileSystem fs = commitedStoreFile.getFileSystem(baseConf);
storeFilesSizes.put(commitedStoreFile.getName(),
fs.getFileStatus(commitedStoreFile).getLen());
} catch (IOException e) {
LOG.warn("Failed to find the size of hfile " + commitedStoreFile, e);
storeFilesSizes.put(commitedStoreFile.getName(), 0L);
}
if (storeFiles.containsKey(familyName)) {
storeFiles.get(familyName).add(commitedStoreFile);
} else {
List<Path> storeFileNames = new ArrayList<>();
storeFileNames.add(commitedStoreFile);
storeFiles.put(familyName, storeFileNames);
}
if (bulkLoadListener != null) {
bulkLoadListener.doneBulkLoad(familyName, path);
}
} catch (IOException ioe) {
// A failure here can cause an atomicity violation that we currently
// cannot recover from since it is likely a failed HDFS operation.
// TODO Need a better story for reverting partial failures due to HDFS.
LOG.error("There was a partial failure due to IO when attempting to" + " load "
+ Bytes.toString(familyName) + " : " + p.getSecond(), ioe);
if (bulkLoadListener != null) {
try {
bulkLoadListener.failedBulkLoad(familyName, path);
} catch (Exception ex) {
LOG.error("Error while calling failedBulkLoad for family "
+ Bytes.toString(familyName) + " with path " + path, ex);
}
}
throw ioe;
}
}
}
isSuccessful = true;
if (conf.getBoolean(COMPACTION_AFTER_BULKLOAD_ENABLE, false)) {
// request compaction
familyWithFinalPath.keySet().forEach(family -> {
HStore store = getStore(family);
try {
if (this.rsServices != null && store.needsCompaction()) {
this.rsServices.getCompactionRequestor().requestSystemCompaction(this, store,
"bulkload hfiles request compaction", true);
LOG.info("Request compaction for region {} family {} after bulk load",
this.getRegionInfo().getEncodedName(), store.getColumnFamilyName());
}
} catch (IOException e) {
LOG.error("bulkload hfiles request compaction error ", e);
}
});
}
} finally {
if (wal != null && !storeFiles.isEmpty()) {
// Write a bulk load event for hfiles that are loaded
try {
WALProtos.BulkLoadDescriptor loadDescriptor =
ProtobufUtil.toBulkLoadDescriptor(this.getRegionInfo().getTable(),
UnsafeByteOperations.unsafeWrap(this.getRegionInfo().getEncodedNameAsBytes()),
storeFiles, storeFilesSizes, seqId, clusterIds, replicate);
WALUtil.writeBulkLoadMarkerAndSync(this.wal, this.getReplicationScope(), getRegionInfo(),
loadDescriptor, mvcc, regionReplicationSink.orElse(null));
} catch (IOException ioe) {
if (this.rsServices != null) {
// Have to abort region server because some hfiles has been loaded but we can't write
// the event into WAL
isSuccessful = false;
this.rsServices.abort("Failed to write bulk load event into WAL.", ioe);
}
}
}
closeBulkRegionOperation();
}
return isSuccessful ? storeFiles : null;
} | 3.68 |
pulsar_StateChangeListeners_notifyOnCompletion | /**
* Notify all currently added listeners on completion of the future.
*
* @return future of a new completion stage
*/
public <T> CompletableFuture<T> notifyOnCompletion(CompletableFuture<T> future,
String serviceUnit,
ServiceUnitStateData data) {
return future.whenComplete((r, ex) -> notify(serviceUnit, data, ex));
} | 3.68 |
hbase_ProcedureExecutor_removeResult | /**
* Mark the specified completed procedure, as ready to remove.
* @param procId the ID of the procedure to remove
*/
public void removeResult(long procId) {
CompletedProcedureRetainer<TEnvironment> retainer = completed.get(procId);
if (retainer == null) {
assert !procedures.containsKey(procId) : "pid=" + procId + " is still running";
LOG.debug("pid={} already removed by the cleaner.", procId);
return;
}
// The CompletedProcedureCleaner will take care of deletion, once the TTL is expired.
retainer.setClientAckTime(EnvironmentEdgeManager.currentTime());
} | 3.68 |
dubbo_JValidationNew_createValidator | /**
* Return new instance of {@link JValidator}
* @param url Valid URL instance
* @return Instance of JValidator
*/
@Override
protected Validator createValidator(URL url) {
return new JValidatorNew(url);
} | 3.68 |
framework_DefaultConnectionStateHandler_getDialogTextGaveUp | /**
* Gets the text to show in the reconnect dialog after giving up (reconnect
* limit reached).
*
* @param reconnectAttempt
* The number of the current reconnection attempt
* @return The text to show in the reconnect dialog after giving up
*/
protected String getDialogTextGaveUp(int reconnectAttempt) {
return getConfiguration().dialogTextGaveUp.replace("{0}",
reconnectAttempt + "");
} | 3.68 |
hbase_ResponseConverter_buildClearRegionBlockCacheResponse | /**
* Creates a protocol buffer ClearRegionBlockCacheResponse
* @return a ClearRegionBlockCacheResponse
*/
public static AdminProtos.ClearRegionBlockCacheResponse
buildClearRegionBlockCacheResponse(final HBaseProtos.CacheEvictionStats cacheEvictionStats) {
return AdminProtos.ClearRegionBlockCacheResponse.newBuilder().setStats(cacheEvictionStats)
.build();
} | 3.68 |
hbase_RawDouble_decodeDouble | /**
* Read a {@code double} value from the buffer {@code buff}.
*/
public double decodeDouble(byte[] buff, int offset) {
double val = Bytes.toDouble(buff, offset);
return val;
} | 3.68 |
hbase_NewVersionBehaviorTracker_prepare | /**
* Reset the map if it is different with the last Cell. Save the cq array/offset/length for next
* Cell.
* @return If this put has duplicate ts with last cell, return the mvcc of last cell. Else return
* MAX_VALUE.
*/
protected long prepare(Cell cell) {
if (isColumnQualifierChanged(cell)) {
// The last cell is family-level delete and this is not, or the cq is changed,
// we should construct delColMap as a deep copy of delFamMap.
delColMap.clear();
for (Map.Entry<Long, DeleteVersionsNode> e : delFamMap.entrySet()) {
delColMap.put(e.getKey(), e.getValue().getDeepCopy());
}
countCurrentCol = 0;
} else if (
!PrivateCellUtil.isDelete(lastCqType) && lastCqType == cell.getTypeByte()
&& lastCqTs == cell.getTimestamp()
) {
// Put with duplicate timestamp, ignore.
return lastCqMvcc;
}
lastCqArray = cell.getQualifierArray();
lastCqOffset = cell.getQualifierOffset();
lastCqLength = cell.getQualifierLength();
lastCqTs = cell.getTimestamp();
lastCqMvcc = cell.getSequenceId();
lastCqType = cell.getTypeByte();
return Long.MAX_VALUE;
} | 3.68 |
hmily_JavaBeanBinder_getField | /**
* Gets field.
*
* @return the field
*/
public Field getField() {
return field;
} | 3.68 |
hbase_WALFactory_close | /**
* Shutdown all WALs and clean up any underlying storage. Use only when you will not need to
* replay and edits that have gone to any wals from this factory.
*/
public void close() throws IOException {
List<IOException> ioes = new ArrayList<>();
// these fields could be null if the WALFactory is created only for being used in the
// getInstance method.
if (metaProvider != null) {
try {
metaProvider.close();
} catch (IOException e) {
ioes.add(e);
}
}
if (replicationProvider != null) {
try {
replicationProvider.close();
} catch (IOException e) {
ioes.add(e);
}
}
if (provider != null) {
try {
provider.close();
} catch (IOException e) {
ioes.add(e);
}
}
if (!ioes.isEmpty()) {
IOException ioe = new IOException("Failed to close WALFactory");
for (IOException e : ioes) {
ioe.addSuppressed(e);
}
throw ioe;
}
} | 3.68 |
hadoop_StageConfig_exitStage | /**
* Exit the stage; calls back to
* {@link #enterStageEventHandler} if non-null.
* @param stage stage entered
*/
public void exitStage(String stage) {
if (enterStageEventHandler != null) {
enterStageEventHandler.exitStage(stage);
}
} | 3.68 |
dubbo_ServiceInvokeRestFilter_getAcceptMediaType | /**
* return first match , if any multiple content-type
*
* @param request
* @return
*/
public static MediaType getAcceptMediaType(RequestFacade request, Class<?> returnType) {
String accept = request.getHeader(RestHeaderEnum.ACCEPT.getHeader());
accept = Objects.isNull(accept) ? MediaType.ALL_VALUE.value : accept;
MediaType mediaType = MediaTypeUtil.convertMediaType(returnType, accept);
return mediaType;
} | 3.68 |
Subsets and Splits
No saved queries yet
Save your SQL queries to embed, download, and access them later. Queries will appear here once saved.