name
stringlengths 12
178
| code_snippet
stringlengths 8
36.5k
| score
float64 3.26
3.68
|
---|---|---|
framework_CustomLayoutDemo_loginClicked | /**
* Login button clicked. Hide login components and replace username
* component with "Welcome user Username" message.
*
*/
public void loginClicked() {
username.setVisible(false);
loginPwd.setVisible(false);
if (username.getValue().length() < 1) {
username.setValue("Anonymous");
}
mainLayout.replaceComponent(loginButton,
new Label("Welcome user <em>" + username.getValue() + "</em>",
ContentMode.HTML));
} | 3.68 |
framework_AbstractComponentTest_createCategory | /**
* Creates category named "category" with id "categoryId" in parent category
* "parentCategory". Each categoryId must be globally unique.
*
* @param category
* @param categoryId
* @param parentCategory
* @return
*/
protected MenuItem createCategory(String category, String parentCategory) {
if (hasCategory(category)) {
return categoryToMenuItem.get(category);
}
MenuItem item;
if (parentCategory == null) {
item = mainMenu.addItem(category, null);
} else {
item = getCategoryMenuItem(parentCategory).addItem(category, null);
}
categoryToMenuItem.put(category, item);
menuItemToCategory.put(item, category);
return item;
} | 3.68 |
hbase_CanaryTool_write | /**
* Check writes for the canary table
*/
private Void write() {
Table table = null;
TableDescriptor tableDesc = null;
try {
table = connection.getTable(region.getTable());
tableDesc = table.getDescriptor();
byte[] rowToCheck = region.getStartKey();
if (rowToCheck.length == 0) {
rowToCheck = new byte[] { 0x0 };
}
int writeValueSize =
connection.getConfiguration().getInt(HConstants.HBASE_CANARY_WRITE_VALUE_SIZE_KEY, 10);
for (ColumnFamilyDescriptor column : tableDesc.getColumnFamilies()) {
Put put = new Put(rowToCheck);
byte[] value = new byte[writeValueSize];
Bytes.random(value);
put.addColumn(column.getName(), HConstants.EMPTY_BYTE_ARRAY, value);
LOG.debug("Writing to {} {} {} {}", tableDesc.getTableName(),
region.getRegionNameAsString(), column.getNameAsString(),
Bytes.toStringBinary(rowToCheck));
try {
long startTime = EnvironmentEdgeManager.currentTime();
table.put(put);
long time = EnvironmentEdgeManager.currentTime() - startTime;
this.readWriteLatency.add(time);
sink.publishWriteTiming(serverName, region, column, time);
} catch (Exception e) {
sink.publishWriteFailure(serverName, region, column, e);
}
}
table.close();
} catch (IOException e) {
sink.publishWriteFailure(serverName, region, e);
sink.updateWriteFailures(region.getRegionNameAsString(), serverName.getHostname());
}
return null;
} | 3.68 |
hbase_HBaseServerBase_putUpWebUI | /**
* Puts up the webui.
*/
private void putUpWebUI() throws IOException {
int port =
this.conf.getInt(HConstants.REGIONSERVER_INFO_PORT, HConstants.DEFAULT_REGIONSERVER_INFOPORT);
String addr = this.conf.get("hbase.regionserver.info.bindAddress", "0.0.0.0");
if (this instanceof HMaster) {
port = conf.getInt(HConstants.MASTER_INFO_PORT, HConstants.DEFAULT_MASTER_INFOPORT);
addr = this.conf.get("hbase.master.info.bindAddress", "0.0.0.0");
}
// -1 is for disabling info server
if (port < 0) {
return;
}
if (!Addressing.isLocalAddress(InetAddress.getByName(addr))) {
String msg = "Failed to start http info server. Address " + addr
+ " does not belong to this host. Correct configuration parameter: "
+ "hbase.regionserver.info.bindAddress";
LOG.error(msg);
throw new IOException(msg);
}
// check if auto port bind enabled
boolean auto = this.conf.getBoolean(HConstants.REGIONSERVER_INFO_PORT_AUTO, false);
while (true) {
try {
this.infoServer = new InfoServer(getProcessName(), addr, port, false, this.conf);
infoServer.addPrivilegedServlet("dump", "/dump", getDumpServlet());
configureInfoServer(infoServer);
this.infoServer.start();
break;
} catch (BindException e) {
if (!auto) {
// auto bind disabled throw BindException
LOG.error("Failed binding http info server to port: " + port);
throw e;
}
// auto bind enabled, try to use another port
LOG.info("Failed binding http info server to port: " + port);
port++;
LOG.info("Retry starting http info server with port: " + port);
}
}
port = this.infoServer.getPort();
conf.setInt(HConstants.REGIONSERVER_INFO_PORT, port);
int masterInfoPort =
conf.getInt(HConstants.MASTER_INFO_PORT, HConstants.DEFAULT_MASTER_INFOPORT);
conf.setInt("hbase.master.info.port.orig", masterInfoPort);
conf.setInt(HConstants.MASTER_INFO_PORT, port);
} | 3.68 |
framework_BrowserWindowOpener_setWindowName | /**
* Sets the target window name that will be used. If a window has already
* been opened with the same name, the contents of that window will be
* replaced instead of opening a new window. If the name is
* <code>null</code> or <code>"_blank"</code>, a new window will always be
* opened.
*
* @param windowName
* the target name for the window
*/
public void setWindowName(String windowName) {
getState().target = windowName;
} | 3.68 |
hibernate-validator_ModUtil_calculateLuhnMod10Check | /**
* Calculate Luhn Modulo 10 checksum (Luhn algorithm implementation)
*
* @param digits The digits over which to calculate the checksum
*
* @return the result of the mod10 checksum calculation
*/
public static int calculateLuhnMod10Check(final List<Integer> digits) {
int sum = 0;
boolean even = true;
for ( int index = digits.size() - 1; index >= 0; index-- ) {
int digit = digits.get( index );
if ( even ) {
digit <<= 1;
}
if ( digit > 9 ) {
digit -= 9;
}
sum += digit;
even = !even;
}
return ( 10 - ( sum % 10 ) ) % 10;
} | 3.68 |
framework_BarInUIDL_setup | /*
* (non-Javadoc)
*
* @see com.vaadin.tests.components.AbstractTestUI#setup(com.vaadin.server.
* VaadinRequest)
*/
@Override
protected void setup(VaadinRequest request) {
addButton("Click Me", event -> addComponent(
new Label("Thank you for clicking | bar")));
} | 3.68 |
hbase_HBaseCommonTestingUtility_getConfiguration | /**
* Returns this classes's instance of {@link Configuration}.
* @return Instance of Configuration.
*/
public Configuration getConfiguration() {
return this.conf;
} | 3.68 |
framework_ObjectProperty_setValue | /**
* Sets the value of the property.
*
* Note that since Vaadin 7, no conversions are performed and the value must
* be of the correct type.
*
* @param newValue
* the New value of the property.
* @throws <code>Property.ReadOnlyException</code>
* if the object is in read-only mode
*/
@Override
public void setValue(T newValue) throws Property.ReadOnlyException {
// Checks the mode
if (isReadOnly()) {
throw new Property.ReadOnlyException();
}
this.value = newValue;
fireValueChange();
} | 3.68 |
flink_VoidNamespace_readResolve | // make sure that we preserve the singleton properly on serialization
private Object readResolve() throws ObjectStreamException {
return INSTANCE;
} | 3.68 |
rocketmq-connect_ExpressionBuilder_create | /**
* Create a new ExpressionBuilder using the default {@link IdentifierRules}.
*
* @return the expression builder
*/
public static ExpressionBuilder create() {
return new ExpressionBuilder();
} | 3.68 |
morf_AbstractSqlDialectTest_testPostInsertWithPresetAutonumStatementsInsertingUnderAutonumLimit | /**
* Tests the SQL statement that are run after a data insert.
*/
@Test
public void testPostInsertWithPresetAutonumStatementsInsertingUnderAutonumLimit() {
testDialect.postInsertWithPresetAutonumStatements(metadata.getTable(TEST_TABLE), sqlScriptExecutor,connection,true);
testDialect.postInsertWithPresetAutonumStatements(metadata.getTable(AUTO_NUMBER_TABLE), sqlScriptExecutor,connection, true);
verifyPostInsertStatementsInsertingUnderAutonumLimit(sqlScriptExecutor,connection);
} | 3.68 |
hadoop_DiskBalancerDataNode_getDataNodeName | /**
* Get DataNode DNS name.
*
* @return name of the node
*/
public String getDataNodeName() {
return dataNodeName;
} | 3.68 |
hadoop_S3ARemoteInputStream_getPos | /**
* Gets the current position.
*
* @return the current position.
* @throws IOException if there is an IO error during this operation.
*/
public long getPos() throws IOException {
throwIfClosed();
return nextReadPos;
} | 3.68 |
dubbo_ModuleConfigManager_getDefaultConsumer | /**
* Only allows one default ConsumerConfig
*/
public Optional<ConsumerConfig> getDefaultConsumer() {
List<ConsumerConfig> consumerConfigs = getDefaultConfigs(getConfigsMap(getTagName(ConsumerConfig.class)));
if (CollectionUtils.isNotEmpty(consumerConfigs)) {
return Optional.of(consumerConfigs.get(0));
}
return Optional.empty();
} | 3.68 |
graphhopper_VectorTile_getKeysCount | /**
* <pre>
* Dictionary encoding for keys
* </pre>
*
* <code>repeated string keys = 3;</code>
*/
public int getKeysCount() {
return keys_.size();
} | 3.68 |
framework_VColorPicker_refreshColor | /**
* Update color icon to show the currently selected color.
*/
public void refreshColor() {
if (color != null) {
if (colorIcon == null) {
colorIcon = new HTML();
colorIcon.setStylePrimaryName("v-colorpicker-button-color");
wrapper.insertBefore(colorIcon.getElement(), captionElement);
}
// Set the color
colorIcon.getElement().getStyle().setProperty("background", color);
}
} | 3.68 |
flink_TimestampedValue_from | /**
* Creates a TimestampedValue from given {@link StreamRecord}.
*
* @param streamRecord The StreamRecord object from which TimestampedValue is to be created.
*/
public static <T> TimestampedValue<T> from(StreamRecord<T> streamRecord) {
if (streamRecord.hasTimestamp()) {
return new TimestampedValue<>(streamRecord.getValue(), streamRecord.getTimestamp());
} else {
return new TimestampedValue<>(streamRecord.getValue());
}
} | 3.68 |
hbase_HStoreFile_open | /**
* Opens reader on this store file. Called by Constructor.
* @see #closeStoreFile(boolean)
*/
private void open() throws IOException {
fileInfo.initHDFSBlocksDistribution();
long readahead = fileInfo.isNoReadahead() ? 0L : -1L;
ReaderContext context = fileInfo.createReaderContext(false, readahead, ReaderType.PREAD);
fileInfo.initHFileInfo(context);
StoreFileReader reader = fileInfo.preStoreFileReaderOpen(context, cacheConf);
if (reader == null) {
reader = fileInfo.createReader(context, cacheConf);
fileInfo.getHFileInfo().initMetaAndIndex(reader.getHFileReader());
}
this.initialReader = fileInfo.postStoreFileReaderOpen(context, cacheConf, reader);
if (InputStreamBlockDistribution.isEnabled(fileInfo.getConf())) {
boolean useHBaseChecksum = context.getInputStreamWrapper().shouldUseHBaseChecksum();
FSDataInputStream stream = context.getInputStreamWrapper().getStream(useHBaseChecksum);
this.initialReaderBlockDistribution = new InputStreamBlockDistribution(stream, fileInfo);
}
// Load up indices and fileinfo. This also loads Bloom filter type.
metadataMap = Collections.unmodifiableMap(initialReader.loadFileInfo());
// Read in our metadata.
byte[] b = metadataMap.get(MAX_SEQ_ID_KEY);
if (b != null) {
// By convention, if halfhfile, top half has a sequence number > bottom
// half. Thats why we add one in below. Its done for case the two halves
// are ever merged back together --rare. Without it, on open of store,
// since store files are distinguished by sequence id, the one half would
// subsume the other.
this.sequenceid = Bytes.toLong(b);
if (fileInfo.isTopReference()) {
this.sequenceid += 1;
}
}
if (isBulkLoadResult()) {
// generate the sequenceId from the fileName
// fileName is of the form <randomName>_SeqId_<id-when-loaded>_
String fileName = this.getPath().getName();
// Use lastIndexOf() to get the last, most recent bulk load seqId.
int startPos = fileName.lastIndexOf("SeqId_");
if (startPos != -1) {
this.sequenceid =
Long.parseLong(fileName.substring(startPos + 6, fileName.indexOf('_', startPos + 6)));
// Handle reference files as done above.
if (fileInfo.isTopReference()) {
this.sequenceid += 1;
}
}
// SKIP_RESET_SEQ_ID only works in bulk loaded file.
// In mob compaction, the hfile where the cells contain the path of a new mob file is bulk
// loaded to hbase, these cells have the same seqIds with the old ones. We do not want
// to reset new seqIds for them since this might make a mess of the visibility of cells that
// have the same row key but different seqIds.
boolean skipResetSeqId = isSkipResetSeqId(metadataMap.get(SKIP_RESET_SEQ_ID));
if (skipResetSeqId) {
// increase the seqId when it is a bulk loaded file from mob compaction.
this.sequenceid += 1;
}
initialReader.setSkipResetSeqId(skipResetSeqId);
initialReader.setBulkLoaded(true);
}
initialReader.setSequenceID(this.sequenceid);
b = metadataMap.get(HFile.Writer.MAX_MEMSTORE_TS_KEY);
if (b != null) {
this.maxMemstoreTS = Bytes.toLong(b);
}
b = metadataMap.get(MAJOR_COMPACTION_KEY);
if (b != null) {
boolean mc = Bytes.toBoolean(b);
if (this.majorCompaction == null) {
this.majorCompaction = new AtomicBoolean(mc);
} else {
this.majorCompaction.set(mc);
}
} else {
// Presume it is not major compacted if it doesn't explicity say so
// HFileOutputFormat explicitly sets the major compacted key.
this.majorCompaction = new AtomicBoolean(false);
}
b = metadataMap.get(EXCLUDE_FROM_MINOR_COMPACTION_KEY);
this.excludeFromMinorCompaction = (b != null && Bytes.toBoolean(b));
BloomType hfileBloomType = initialReader.getBloomFilterType();
if (cfBloomType != BloomType.NONE) {
initialReader.loadBloomfilter(BlockType.GENERAL_BLOOM_META, metrics);
if (hfileBloomType != cfBloomType) {
LOG.debug("HFile Bloom filter type for " + initialReader.getHFileReader().getName() + ": "
+ hfileBloomType + ", but " + cfBloomType + " specified in column family "
+ "configuration");
}
} else if (hfileBloomType != BloomType.NONE) {
LOG.info(
"Bloom filter turned off by CF config for " + initialReader.getHFileReader().getName());
}
// load delete family bloom filter
initialReader.loadBloomfilter(BlockType.DELETE_FAMILY_BLOOM_META, metrics);
try {
byte[] data = metadataMap.get(TIMERANGE_KEY);
initialReader.timeRange =
data == null ? null : TimeRangeTracker.parseFrom(data).toTimeRange();
} catch (IllegalArgumentException e) {
LOG.error("Error reading timestamp range data from meta -- " + "proceeding without", e);
this.initialReader.timeRange = null;
}
try {
byte[] data = metadataMap.get(COMPACTION_EVENT_KEY);
this.compactedStoreFiles.addAll(ProtobufUtil.toCompactedStoreFiles(data));
} catch (IOException e) {
LOG.error("Error reading compacted storefiles from meta data", e);
}
// initialize so we can reuse them after reader closed.
firstKey = initialReader.getFirstKey();
lastKey = initialReader.getLastKey();
comparator = initialReader.getComparator();
} | 3.68 |
hbase_Constraints_has | /**
* Check to see if the Constraint is currently set.
* @param desc {@link TableDescriptor} to check
* @param clazz {@link Constraint} class to check for.
* @return <tt>true</tt> if the {@link Constraint} is present, even if it is disabled.
* <tt>false</tt> otherwise.
*/
public static boolean has(TableDescriptor desc, Class<? extends Constraint> clazz) {
return getKeyValueForClass(desc, clazz) != null;
} | 3.68 |
dubbo_RpcStatus_getSucceededElapsed | /**
* get succeeded elapsed.
*
* @return succeeded elapsed
*/
public long getSucceededElapsed() {
return getTotalElapsed() - getFailedElapsed();
} | 3.68 |
hbase_PrivateCellUtil_updateLatestStamp | /**
* Sets the given timestamp to the cell iff current timestamp is
* {@link HConstants#LATEST_TIMESTAMP}.
* @return True if cell timestamp is modified.
* @throws IOException when the passed cell is not of type {@link ExtendedCell}
*/
public static boolean updateLatestStamp(Cell cell, byte[] ts) throws IOException {
if (cell.getTimestamp() == HConstants.LATEST_TIMESTAMP) {
setTimestamp(cell, ts);
return true;
}
return false;
} | 3.68 |
flink_RocksDBStateBackend_getPredefinedOptions | /**
* Gets the currently set predefined options for RocksDB. The default options (if nothing was
* set via {@link #setPredefinedOptions(PredefinedOptions)}) are {@link
* PredefinedOptions#DEFAULT}.
*
* <p>If user-configured options within {@link RocksDBConfigurableOptions} is set (through
* flink-conf.yaml) of a user-defined options factory is set (via {@link
* #setRocksDBOptions(RocksDBOptionsFactory)}), then the options from the factory are applied on
* top of the predefined and customized options.
*
* @return The currently set predefined options for RocksDB.
*/
@VisibleForTesting
public PredefinedOptions getPredefinedOptions() {
return rocksDBStateBackend.getPredefinedOptions();
} | 3.68 |
hbase_MobFileName_getRegionName | /**
* Gets region name
* @return name of a region, where this file was created during flush or compaction.
*/
public String getRegionName() {
return regionName;
} | 3.68 |
framework_AbstractSelect_setNullSelectionItemId | /**
* Sets the item id that represents null value of this select.
*
* <p>
* Data interface does not support nulls as item ids. Selecting the item
* identified by this id is the same as selecting no items at all. This
* setting only affects the single select mode.
* </p>
*
* @param nullSelectionItemId
* the nullSelectionItemId to set.
* @see #getNullSelectionItemId()
* @see #isSelected(Object)
* @see #select(Object)
*/
public void setNullSelectionItemId(Object nullSelectionItemId) {
if (nullSelectionItemId != null && isMultiSelect()) {
throw new IllegalStateException(
"Multiselect and NullSelectionItemId can not be set at the same time.");
}
this.nullSelectionItemId = nullSelectionItemId;
} | 3.68 |
dubbo_ExceptionFilter_setLogger | // For test purpose
public void setLogger(ErrorTypeAwareLogger logger) {
this.logger = logger;
} | 3.68 |
morf_SqlQueryDataSetProducer_open | /**
* Opens a connection and prepares it.
*/
@Override
public void open() {
try {
this.connection = dataSource.getConnection();
this.wasAutoCommit = connection.getAutoCommit();
// disable auto-commit on this connection for HSQLDB performance
wasAutoCommit = connection.getAutoCommit();
connection.setAutoCommit(false);
} catch (SQLException e) {
throw new RuntimeSqlException("Error opening connection", e);
}
} | 3.68 |
flink_S3TestCredentials_credentialsAvailable | /**
* Checks whether S3 test credentials are available in the environment variables of this JVM.
*/
private static boolean credentialsAvailable() {
return isNotEmpty(S3_TEST_BUCKET)
&& isNotEmpty(S3_TEST_ACCESS_KEY)
&& isNotEmpty(S3_TEST_SECRET_KEY);
} | 3.68 |
querydsl_QueryResults_isEmpty | /**
* Return whether there are results in the current query window
*
* @return true, if no results where found
*/
public boolean isEmpty() {
return results.isEmpty();
} | 3.68 |
zxing_PlanarYUVLuminanceSource_getThumbnailWidth | /**
* @return width of image from {@link #renderThumbnail()}
*/
public int getThumbnailWidth() {
return getWidth() / THUMBNAIL_SCALE_FACTOR;
} | 3.68 |
pulsar_ManagedLedgerConfig_setMaxSizePerLedgerMb | /**
* @param maxSizePerLedgerMb
* the maxSizePerLedgerMb to set
*/
public ManagedLedgerConfig setMaxSizePerLedgerMb(int maxSizePerLedgerMb) {
this.maxSizePerLedgerMb = maxSizePerLedgerMb;
return this;
} | 3.68 |
hadoop_AlwaysRestartPolicy_hasCompletedSuccessfully | /**
* This is always false since these components never terminate
*
* @param component
* @return
*/
@Override public boolean hasCompletedSuccessfully(Component component) {
return false;
} | 3.68 |
hbase_ScannerModel_setCaching | /**
* @param caching the number of rows to fetch at once
*/
public void setCaching(int caching) {
this.caching = caching;
} | 3.68 |
hbase_WALSplitUtil_isSequenceIdFile | /**
* Is the given file a region open sequence id file.
*/
public static boolean isSequenceIdFile(final Path file) {
return file.getName().endsWith(SEQUENCE_ID_FILE_SUFFIX)
|| file.getName().endsWith(OLD_SEQUENCE_ID_FILE_SUFFIX);
} | 3.68 |
framework_WidgetMap_instantiate | /**
* Create a new instance of a connector based on its type.
*
* @param classType
* {@link ComponentConnector} class to instantiate
* @return new instance of the connector
*/
public ServerConnector instantiate(
Class<? extends ServerConnector> classType) {
return instmap.get(classType).get();
} | 3.68 |
rocketmq-connect_ProcessingContext_consumerRecord | /**
* @param consumedMessage the record
*/
public void consumerRecord(MessageExt consumedMessage) {
this.consumedMessage = consumedMessage;
reset();
} | 3.68 |
morf_ResultSetComparer_callbackValueMismatches | /**
* Fire callbacks for any mismatches on value columns.
*/
private int callbackValueMismatches(ResultSet left, ResultSet right, CompareCallback callBack, ResultSetMetaData metadataRight, List<Integer> valueCols, String[] keys, MismatchType mismatchType) throws SQLException {
int misMatchCount = 0;
for (int i : valueCols) {
Optional<ResultSetMismatch> mismatch = valueCheck(left, right, keys, i, metadataRight.getColumnType(i), mismatchType);
if (mismatch.isPresent()) {
callBack.mismatch(mismatch.get());
misMatchCount++;
}
}
return misMatchCount;
} | 3.68 |
hadoop_ContainerInfo_getAllocatedResources | /**
* Return a map of the allocated resources. The map key is the resource name,
* and the value is the resource value.
*
* @return the allocated resources map
*/
public Map<String, Long> getAllocatedResources() {
return Collections.unmodifiableMap(allocatedResources);
} | 3.68 |
hbase_HbckChore_getLastReport | /**
* Returns Returns last published Report that comes of last successful execution of this chore.
*/
public HbckReport getLastReport() {
return lastReport;
} | 3.68 |
hudi_SimpleExecutor_execute | /**
* Consuming records from input iterator directly without any producers and inner message queue.
*/
@Override
public E execute() {
try {
LOG.info("Starting consumer, consuming records from the records iterator directly");
while (itr.hasNext()) {
O payload = transformFunction.apply(itr.next());
consumer.consume(payload);
}
return consumer.finish();
} catch (Exception e) {
LOG.error("Failed consuming records", e);
throw new HoodieException(e);
}
} | 3.68 |
flink_FutureUtils_runIfNotDoneAndGet | /**
* Run the given {@code RunnableFuture} if it is not done, and then retrieves its result.
*
* @param future to run if not done and get
* @param <T> type of the result
* @return the result after running the future
* @throws ExecutionException if a problem occurred
* @throws InterruptedException if the current thread has been interrupted
*/
public static <T> T runIfNotDoneAndGet(RunnableFuture<T> future)
throws ExecutionException, InterruptedException {
if (null == future) {
return null;
}
if (!future.isDone()) {
future.run();
}
return future.get();
} | 3.68 |
framework_VTransferable_getData | /**
* Returns previously saved data that is referred to by the given
* identifier.
*
* @param dataFlavor
* the identifier for the data object
* @return the data object, or {@code null} if not found
*
* @see #setData(String, Object)
*/
public Object getData(String dataFlavor) {
return variables.get(dataFlavor);
} | 3.68 |
hadoop_SnappyCompressor_finished | /**
* Returns true if the end of the compressed
* data output stream has been reached.
*
* @return <code>true</code> if the end of the compressed
* data output stream has been reached.
*/
@Override
public boolean finished() {
// Check if all uncompressed data has been consumed
return (finish && finished && compressedDirectBuf.remaining() == 0);
} | 3.68 |
hbase_MiniZooKeeperCluster_shutdown | /**
* @throws IOException if waiting for the shutdown of a server fails
*/
public void shutdown() throws IOException {
// shut down all the zk servers
for (int i = 0; i < standaloneServerFactoryList.size(); i++) {
NIOServerCnxnFactory standaloneServerFactory = standaloneServerFactoryList.get(i);
int clientPort = clientPortList.get(i);
standaloneServerFactory.shutdown();
if (!waitForServerDown(clientPort, connectionTimeout)) {
throw new IOException("Waiting for shutdown of standalone server at port=" + clientPort
+ ", timeout=" + this.connectionTimeout);
}
}
standaloneServerFactoryList.clear();
for (ZooKeeperServer zkServer : zooKeeperServers) {
// Explicitly close ZKDatabase since ZookeeperServer does not close them
zkServer.getZKDatabase().close();
}
zooKeeperServers.clear();
// clear everything
if (started) {
started = false;
activeZKServerIndex = 0;
clientPortList.clear();
LOG.info("Shutdown MiniZK cluster with all ZK servers");
}
} | 3.68 |
hadoop_Quota_setQuotaInternal | /**
* Set quota for the federation path.
* @param path Federation path.
* @param locations Locations of the Federation path.
* @param namespaceQuota Name space quota.
* @param storagespaceQuota Storage space quota.
* @param type StorageType that the space quota is intended to be set on.
* @throws IOException If the quota system is disabled.
*/
void setQuotaInternal(String path, List<RemoteLocation> locations,
long namespaceQuota, long storagespaceQuota, StorageType type)
throws IOException {
rpcServer.checkOperation(OperationCategory.WRITE);
// Set quota for current path and its children mount table path.
if (locations == null) {
locations = getQuotaRemoteLocations(path);
}
if (LOG.isDebugEnabled()) {
for (RemoteLocation loc : locations) {
LOG.debug("Set quota for path: nsId: {}, dest: {}.",
loc.getNameserviceId(), loc.getDest());
}
}
RemoteMethod method = new RemoteMethod("setQuota",
new Class<?>[] {String.class, long.class, long.class,
StorageType.class},
new RemoteParam(), namespaceQuota, storagespaceQuota, type);
rpcClient.invokeConcurrent(locations, method, false, false);
} | 3.68 |
dubbo_RpcStatus_getFailed | /**
* get failed.
*
* @return failed
*/
public int getFailed() {
return failed.get();
} | 3.68 |
hudi_SparkUtil_initLauncher | /**
* TODO: Need to fix a bunch of hardcoded stuff here eg: history server, spark distro.
*/
public static SparkLauncher initLauncher(String propertiesFile) throws URISyntaxException {
String currentJar = new File(SparkUtil.class.getProtectionDomain().getCodeSource().getLocation().toURI().getPath())
.getAbsolutePath();
Map<String, String> env = SparkEnvCommand.env;
SparkLauncher sparkLauncher =
new SparkLauncher(env).setAppResource(currentJar).setMainClass(SparkMain.class.getName());
if (!StringUtils.isNullOrEmpty(propertiesFile)) {
sparkLauncher.setPropertiesFile(propertiesFile);
}
File libDirectory = new File(new File(currentJar).getParent(), "lib");
if (libDirectory.exists()) {
// When directly using hudi-cli module, the jars under the lib directory
// generated by the compilation is required
Arrays.stream(libDirectory.list()).forEach(library ->
sparkLauncher.addJar(new File(libDirectory, library).getAbsolutePath()));
} else {
// When using hudi-cli-bundle, we also need to add the hudi-spark*-bundle
// so that the Hudi Spark job can be launched
String sparkBundleJarPath = System.getenv("SPARK_BUNDLE_JAR");
if (!StringUtils.isNullOrEmpty(sparkBundleJarPath)) {
sparkLauncher.addJar(sparkBundleJarPath);
}
}
return sparkLauncher;
} | 3.68 |
pulsar_ClientConfiguration_isTlsAllowInsecureConnection | /**
* @return whether the Pulsar client accept untrusted TLS certificate from broker
*/
public boolean isTlsAllowInsecureConnection() {
return confData.isTlsAllowInsecureConnection();
} | 3.68 |
hbase_ConfServlet_writeResponse | /**
* Guts of the servlet - extracted for easy testing.
*/
static void writeResponse(Configuration conf, Writer out, String format)
throws IOException, BadFormatException {
Configuration maskedConf = mask(conf);
if (FORMAT_JSON.equals(format)) {
Configuration.dumpConfiguration(maskedConf, out);
} else if (FORMAT_XML.equals(format)) {
maskedConf.writeXml(out);
} else {
throw new BadFormatException("Bad format: " + format);
}
} | 3.68 |
hbase_MapReduceBackupMergeJob_copyFile | /**
* Copy file in DFS from p to newPath
* @param fs file system
* @param p old path
* @param newPath new path
* @throws IOException exception
*/
protected void copyFile(FileSystem fs, Path p, Path newPath) throws IOException {
try (InputStream in = fs.open(p); OutputStream out = fs.create(newPath, true)) {
IOUtils.copy(in, out);
}
boolean exists = fs.exists(newPath);
if (!exists) {
throw new IOException("Failed to copy meta file to: " + newPath);
}
} | 3.68 |
flink_PojoSerializer_getSubclassSerializer | /**
* Fetches cached serializer for a non-registered subclass; also creates the serializer if it
* doesn't exist yet.
*
* <p>This method is also exposed to package-private access for testing purposes.
*/
TypeSerializer<?> getSubclassSerializer(Class<?> subclass) {
TypeSerializer<?> result = subclassSerializerCache.get(subclass);
if (result == null) {
result = createSubclassSerializer(subclass);
subclassSerializerCache.put(subclass, result);
}
return result;
} | 3.68 |
hadoop_CachingGetSpaceUsed_initRefreshThread | /**
* RunImmediately should set true, if we skip the first refresh.
* @param runImmediately The param default should be false.
*/
private void initRefreshThread(boolean runImmediately) {
if (refreshInterval > 0) {
refreshUsed = new Thread(new RefreshThread(this, runImmediately),
"refreshUsed-" + dirPath);
refreshUsed.setDaemon(true);
refreshUsed.start();
} else {
running.set(false);
refreshUsed = null;
}
} | 3.68 |
hbase_ReplicationSourceManager_createSource | /**
* @return a new 'classic' user-space replication source.
* @param queueId the id of the replication queue to associate the ReplicationSource with.
* @see #createCatalogReplicationSource(RegionInfo) for creating a ReplicationSource for meta.
*/
private ReplicationSourceInterface createSource(ReplicationQueueData queueData,
ReplicationPeer replicationPeer) throws IOException {
ReplicationSourceInterface src = ReplicationSourceFactory.create(conf, queueData.getId());
// Init the just created replication source. Pass the default walProvider's wal file length
// provider. Presumption is we replicate user-space Tables only. For hbase:meta region replica
// replication, see #createCatalogReplicationSource().
WALFileLengthProvider walFileLengthProvider = this.walFactory.getWALProvider() != null
? this.walFactory.getWALProvider().getWALFileLengthProvider()
: p -> OptionalLong.empty();
src.init(conf, fs, this, queueStorage, replicationPeer, server, queueData, clusterId,
walFileLengthProvider, new MetricsSource(queueData.getId().toString()));
return src;
} | 3.68 |
flink_TableFactoryUtil_createTableSinkForCatalogTable | /**
* Creates a table sink for a {@link CatalogTable} using table factory associated with the
* catalog.
*/
public static Optional<TableSink> createTableSinkForCatalogTable(
Catalog catalog, TableSinkFactory.Context context) {
TableFactory tableFactory = catalog.getTableFactory().orElse(null);
if (tableFactory instanceof TableSinkFactory) {
return Optional.ofNullable(((TableSinkFactory) tableFactory).createTableSink(context));
}
return Optional.empty();
} | 3.68 |
hbase_WALKeyImpl_getOrigLogSeqNum | /**
* Return a positive long if current WALKeyImpl is created from a replay edit; a replay edit is an
* edit that came in when replaying WALs of a crashed server.
* @return original sequence number of the WALEdit
*/
@Override
public long getOrigLogSeqNum() {
return this.origLogSeqNum;
} | 3.68 |
flink_HadoopOutputCollector_setFlinkCollector | /**
* Set the wrapped Flink collector.
*
* @param flinkCollector The wrapped Flink OutputCollector.
*/
public void setFlinkCollector(Collector<Tuple2<KEY, VALUE>> flinkCollector) {
this.flinkCollector = flinkCollector;
} | 3.68 |
rocketmq-connect_WorkerConnector_shutdown | /**
* Stop this connector. This method does not block, it only triggers shutdown. Use
* #{@link #awaitShutdown} to block until completion.
*/
public synchronized void shutdown() {
log.info("Scheduled shutdown for {}", this);
stopping = true;
notify();
} | 3.68 |
pulsar_WatermarkTimeTriggerPolicy_handleWaterMarkEvent | /**
* Invokes the trigger all pending windows up to the
* watermark timestamp. The end ts of the window is set
* in the eviction policy context so that the events falling
* within that window can be processed.
*/
private void handleWaterMarkEvent(Event<T> event) {
long watermarkTs = event.getTimestamp();
long windowEndTs = nextWindowEndTs;
if (log.isDebugEnabled()) {
log.debug("Window end ts {} Watermark ts {}", windowEndTs, watermarkTs);
}
while (windowEndTs <= watermarkTs) {
long currentCount = windowManager.getEventCount(windowEndTs);
evictionPolicy.setContext(new DefaultEvictionContext(windowEndTs, currentCount));
if (handler.onTrigger()) {
windowEndTs += slidingIntervalMs;
} else {
/*
* No events were found in the previous window interval.
* Scan through the events in the queue to find the next
* window intervals based on event ts.
*/
long ts = getNextAlignedWindowTs(windowEndTs, watermarkTs);
if (log.isDebugEnabled()) {
log.debug("Next aligned window end ts {}", ts);
}
if (ts == Long.MAX_VALUE) {
if (log.isDebugEnabled()) {
log.debug("No events to process between {} and watermark ts {}",
windowEndTs, watermarkTs);
}
break;
}
windowEndTs = ts;
}
}
nextWindowEndTs = windowEndTs;
} | 3.68 |
hbase_LruBlockCache_getStats | /**
* Get counter statistics for this cache.
* <p>
* Includes: total accesses, hits, misses, evicted blocks, and runs of the eviction processes.
*/
@Override
public CacheStats getStats() {
return this.stats;
} | 3.68 |
flink_Tuple1_equals | /**
* Deep equality for tuples by calling equals() on the tuple members.
*
* @param o the object checked for equality
* @return true if this is equal to o.
*/
@Override
public boolean equals(Object o) {
if (this == o) {
return true;
}
if (!(o instanceof Tuple1)) {
return false;
}
@SuppressWarnings("rawtypes")
Tuple1 tuple = (Tuple1) o;
if (f0 != null ? !f0.equals(tuple.f0) : tuple.f0 != null) {
return false;
}
return true;
} | 3.68 |
framework_Upload_addSucceededListener | /**
* Adds the upload success event listener.
*
* @param listener
* the Listener to be added, not null
* @since 8.0
*/
public Registration addSucceededListener(SucceededListener listener) {
return addListener(SucceededEvent.class, listener,
UPLOAD_SUCCEEDED_METHOD);
} | 3.68 |
hbase_SyncTable_syncRowCells | /**
* Compare the cells for the given row from the source and target tables. Count and log any
* differences. If not a dry run, output a Put and/or Delete needed to sync the target table to
* match the source table.
*/
private boolean syncRowCells(Context context, byte[] rowKey, CellScanner sourceCells,
CellScanner targetCells) throws IOException, InterruptedException {
Put put = null;
Delete delete = null;
long matchingCells = 0;
boolean matchingRow = true;
Cell sourceCell = sourceCells.nextCellInRow();
Cell targetCell = targetCells.nextCellInRow();
while (sourceCell != null || targetCell != null) {
int cellKeyComparison = compareCellKeysWithinRow(sourceCell, targetCell);
if (cellKeyComparison < 0) {
if (LOG.isDebugEnabled()) {
LOG.debug("Target missing cell: " + sourceCell);
}
context.getCounter(Counter.TARGETMISSINGCELLS).increment(1);
matchingRow = false;
if (!dryRun && doPuts) {
if (put == null) {
put = new Put(rowKey);
}
sourceCell = checkAndResetTimestamp(sourceCell);
put.add(sourceCell);
}
sourceCell = sourceCells.nextCellInRow();
} else if (cellKeyComparison > 0) {
if (LOG.isDebugEnabled()) {
LOG.debug("Source missing cell: " + targetCell);
}
context.getCounter(Counter.SOURCEMISSINGCELLS).increment(1);
matchingRow = false;
if (!dryRun && doDeletes) {
if (delete == null) {
delete = new Delete(rowKey);
}
// add a tombstone to exactly match the target cell that is missing on the source
delete.addColumn(CellUtil.cloneFamily(targetCell), CellUtil.cloneQualifier(targetCell),
targetCell.getTimestamp());
}
targetCell = targetCells.nextCellInRow();
} else {
// the cell keys are equal, now check values
if (CellUtil.matchingValue(sourceCell, targetCell)) {
matchingCells++;
} else {
if (LOG.isDebugEnabled()) {
LOG.debug("Different values: ");
LOG.debug(" source cell: " + sourceCell + " value: "
+ Bytes.toString(sourceCell.getValueArray(), sourceCell.getValueOffset(),
sourceCell.getValueLength()));
LOG.debug(" target cell: " + targetCell + " value: "
+ Bytes.toString(targetCell.getValueArray(), targetCell.getValueOffset(),
targetCell.getValueLength()));
}
context.getCounter(Counter.DIFFERENTCELLVALUES).increment(1);
matchingRow = false;
if (!dryRun && doPuts) {
// overwrite target cell
if (put == null) {
put = new Put(rowKey);
}
sourceCell = checkAndResetTimestamp(sourceCell);
put.add(sourceCell);
}
}
sourceCell = sourceCells.nextCellInRow();
targetCell = targetCells.nextCellInRow();
}
if (!dryRun && sourceTableHash.scanBatch > 0) {
if (put != null && put.size() >= sourceTableHash.scanBatch) {
context.write(new ImmutableBytesWritable(rowKey), put);
put = null;
}
if (delete != null && delete.size() >= sourceTableHash.scanBatch) {
context.write(new ImmutableBytesWritable(rowKey), delete);
delete = null;
}
}
}
if (!dryRun) {
if (put != null) {
context.write(new ImmutableBytesWritable(rowKey), put);
}
if (delete != null) {
context.write(new ImmutableBytesWritable(rowKey), delete);
}
}
if (matchingCells > 0) {
context.getCounter(Counter.MATCHINGCELLS).increment(matchingCells);
}
if (matchingRow) {
context.getCounter(Counter.MATCHINGROWS).increment(1);
return true;
} else {
context.getCounter(Counter.ROWSWITHDIFFS).increment(1);
return false;
}
} | 3.68 |
hbase_SaslClientAuthenticationProviders_selectProvider | /**
* Chooses the best authentication provider and corresponding token given the HBase cluster
* identifier and the user.
*/
public Pair<SaslClientAuthenticationProvider, Token<? extends TokenIdentifier>>
selectProvider(String clusterId, User clientUser) {
return selector.selectProvider(clusterId, clientUser);
} | 3.68 |
framework_VScrollTable_unlinkRow | /**
* @return false if couldn't remove row
*/
protected boolean unlinkRow(boolean fromBeginning) {
if (lastRendered - firstRendered < 0) {
return false;
}
int actualIx;
if (fromBeginning) {
actualIx = 0;
firstRendered++;
} else {
actualIx = renderedRows.size() - 1;
if (postponeSanityCheckForLastRendered) {
--lastRendered;
} else {
setLastRendered(lastRendered - 1);
}
}
if (actualIx >= 0) {
unlinkRowAtActualIndex(actualIx);
fixSpacers();
return true;
}
return false;
} | 3.68 |
flink_ArrayColumnReader_readPrimitiveTypedRow | // Need to be in consistent with that VectorizedPrimitiveColumnReader#readBatchHelper
// TODO Reduce the duplicated code
private Object readPrimitiveTypedRow(LogicalType type) {
switch (type.getTypeRoot()) {
case CHAR:
case VARCHAR:
case BINARY:
case VARBINARY:
return dataColumn.readBytes();
case BOOLEAN:
return dataColumn.readBoolean();
case TIME_WITHOUT_TIME_ZONE:
case DATE:
case INTEGER:
return dataColumn.readInteger();
case TINYINT:
return dataColumn.readTinyInt();
case SMALLINT:
return dataColumn.readSmallInt();
case BIGINT:
return dataColumn.readLong();
case FLOAT:
return dataColumn.readFloat();
case DOUBLE:
return dataColumn.readDouble();
case DECIMAL:
switch (descriptor.getPrimitiveType().getPrimitiveTypeName()) {
case INT32:
return dataColumn.readInteger();
case INT64:
return dataColumn.readLong();
case BINARY:
case FIXED_LEN_BYTE_ARRAY:
return dataColumn.readBytes();
}
case TIMESTAMP_WITHOUT_TIME_ZONE:
case TIMESTAMP_WITH_LOCAL_TIME_ZONE:
return dataColumn.readTimestamp();
default:
throw new RuntimeException("Unsupported type in the list: " + type);
}
} | 3.68 |
flink_TestcontainersSettings_getNetwork | /** @return The network. */
public Network getNetwork() {
return network;
} | 3.68 |
flink_HighAvailabilityServices_getClusterRestEndpointLeaderElection | /** Gets the {@link LeaderElection} for the cluster's rest endpoint. */
default LeaderElection getClusterRestEndpointLeaderElection() {
// for backwards compatibility we delegate to getWebMonitorLeaderElectionService
// all implementations of this interface should override
// getClusterRestEndpointLeaderElectionService, though
return getWebMonitorLeaderElection();
} | 3.68 |
hudi_UpgradeDowngrade_run | /**
* Perform Upgrade or Downgrade steps if required and updated table version if need be.
* <p>
* Starting from version 0.6.0, this upgrade/downgrade step will be added in all write paths.
* <p>
* Essentially, if a dataset was created using an previous table version in an older release,
* and Hoodie version was upgraded to a new release with new table version supported,
* Hoodie table version gets bumped to the new version and there are some upgrade steps need
* to be executed before doing any writes.
* <p>
* Similarly, if a dataset was created using an newer table version in an newer release,
* and then hoodie was downgraded to an older release or to older Hoodie table version,
* then some downgrade steps need to be executed before proceeding w/ any writes.
* <p>
* Below shows the table version corresponding to the Hudi release:
* Hudi release -> table version
* pre 0.6.0 -> v0
* 0.6.0 to 0.8.0 -> v1
* 0.9.0 -> v2
* 0.10.0 -> v3
* 0.11.0 -> v4
* 0.12.0 to 0.13.0 -> v5
* 0.14.0 to current -> v6
* <p>
* On a high level, these are the steps performed
* <p>
* Step1 : Understand current hoodie table version and table version from hoodie.properties file
* Step2 : Delete any left over .updated from previous upgrade/downgrade
* Step3 : If version are different, perform upgrade/downgrade.
* Step4 : Copy hoodie.properties -> hoodie.properties.updated with the version updated
* Step6 : Rename hoodie.properties.updated to hoodie.properties
* </p>
*
* @param toVersion version to which upgrade or downgrade has to be done.
* @param instantTime current instant time that should not be touched.
*/
public void run(HoodieTableVersion toVersion, String instantTime) {
// Change metadata table version automatically
if (toVersion.versionCode() >= HoodieTableVersion.FOUR.versionCode()) {
String metadataTablePath = HoodieTableMetadata.getMetadataTableBasePath(
metaClient.getBasePathV2().toString());
try {
if (metaClient.getFs().exists(new Path(metadataTablePath))) {
HoodieTableMetaClient mdtMetaClient = HoodieTableMetaClient.builder()
.setConf(metaClient.getHadoopConf()).setBasePath(metadataTablePath).build();
HoodieWriteConfig mdtWriteConfig = HoodieMetadataWriteUtils.createMetadataWriteConfig(
config, HoodieFailedWritesCleaningPolicy.EAGER);
new UpgradeDowngrade(mdtMetaClient, mdtWriteConfig, context, upgradeDowngradeHelper)
.run(toVersion, instantTime);
}
} catch (Exception e) {
LOG.warn("Unable to upgrade or downgrade the metadata table to version " + toVersion
+ ", ignoring the error and continue.", e);
}
}
// Fetch version from property file and current version
HoodieTableVersion fromVersion = metaClient.getTableConfig().getTableVersion();
if (!needsUpgradeOrDowngrade(toVersion)) {
return;
}
// Perform the actual upgrade/downgrade; this has to be idempotent, for now.
LOG.info("Attempting to move table from version " + fromVersion + " to " + toVersion);
Map<ConfigProperty, String> tableProps = new Hashtable<>();
if (fromVersion.versionCode() < toVersion.versionCode()) {
// upgrade
while (fromVersion.versionCode() < toVersion.versionCode()) {
HoodieTableVersion nextVersion = HoodieTableVersion.versionFromCode(fromVersion.versionCode() + 1);
tableProps.putAll(upgrade(fromVersion, nextVersion, instantTime));
fromVersion = nextVersion;
}
} else {
// downgrade
while (fromVersion.versionCode() > toVersion.versionCode()) {
HoodieTableVersion prevVersion = HoodieTableVersion.versionFromCode(fromVersion.versionCode() - 1);
tableProps.putAll(downgrade(fromVersion, prevVersion, instantTime));
fromVersion = prevVersion;
}
}
// Reload the meta client to get the latest table config (which could have been updated due to metadata table)
metaClient = HoodieTableMetaClient.reload(metaClient);
// Write out the current version in hoodie.properties.updated file
for (Map.Entry<ConfigProperty, String> entry : tableProps.entrySet()) {
metaClient.getTableConfig().setValue(entry.getKey(), entry.getValue());
}
metaClient.getTableConfig().setTableVersion(toVersion);
HoodieTableConfig.update(metaClient.getFs(), new Path(metaClient.getMetaPath()), metaClient.getTableConfig().getProps());
} | 3.68 |
hadoop_ManifestSuccessData_getDate | /** @return timestamp as date; no expectation of parseability. */
public String getDate() {
return date;
} | 3.68 |
hbase_AbstractFSWAL_markFutureDoneAndOffer | /**
* Helper that marks the future as DONE and offers it back to the cache.
*/
protected void markFutureDoneAndOffer(SyncFuture future, long txid, Throwable t) {
future.done(txid, t);
syncFutureCache.offer(future);
} | 3.68 |
hadoop_TimelineReaderWebServicesUtils_parseLongStr | /**
* Interpret passed string as a long.
* @param str Passed string.
* @return long representation if string is not null, null otherwise.
*/
static Long parseLongStr(String str) {
return str == null ? null : Long.parseLong(str.trim());
} | 3.68 |
flink_FsStateBackend_getCheckpointPath | /**
* Gets the base directory where all the checkpoints are stored. The job-specific checkpoint
* directory is created inside this directory.
*
* @return The base directory for checkpoints.
*/
@Nonnull
@Override
public Path getCheckpointPath() {
// we know that this can never be null by the way of constructor checks
//noinspection ConstantConditions
return super.getCheckpointPath();
} | 3.68 |
MagicPlugin_MapController_createMap | // This is copied from MagicController, which I'm still trying to keep out of this class. Shrug?
public ItemStack createMap(int mapId) {
short durability = CompatibilityLib.isCurrentVersion() ? 0 : (short)mapId;
ItemStack mapItem = CompatibilityLib.getDeprecatedUtils().createItemStack(DefaultMaterials.getFilledMap(), 1, durability);
if (CompatibilityLib.isCurrentVersion()) {
mapItem = CompatibilityLib.getItemUtils().makeReal(mapItem);
CompatibilityLib.getNBTUtils().setInt(mapItem, "map", mapId);
}
return mapItem;
} | 3.68 |
hadoop_MawoConfiguration_getAutoShutdownWorkers | /**
* Check if worker auto shutdown feature is enabled.
* @return value of mawo.master.auto-shutdown-workers
*/
public boolean getAutoShutdownWorkers() {
return Boolean.parseBoolean(configsMap.get(AUTO_SHUTDOWN_WORKERS));
} | 3.68 |
framework_VBrowserFrame_createIFrameElement | /**
* Always creates new iframe inside widget. Will replace previous iframe.
*
* @return
*/
protected IFrameElement createIFrameElement(String src) {
String name = null;
// Remove alt text
if (altElement != null) {
getElement().removeChild(altElement);
altElement = null;
}
// Remove old iframe
if (iframe != null) {
name = iframe.getAttribute("name");
getElement().removeChild(iframe);
iframe = null;
}
iframe = Document.get().createIFrameElement();
iframe.setSrc(src);
iframe.setFrameBorder(0);
iframe.setAttribute("width", "100%");
iframe.setAttribute("height", "100%");
iframe.setAttribute("allowTransparency", "true");
getElement().appendChild(iframe);
// Reset old attributes (except src)
if (name != null) {
iframe.setName(name);
}
return iframe;
} | 3.68 |
framework_AutoScroller_getScrollArea | /**
* Returns the size of the auto scroll area in pixels.
* <p>
* Defaults to 100px.
*
* @return size in pixels
*/
public int getScrollArea() {
return scrollAreaPX;
} | 3.68 |
framework_BindingValidationStatus_getField | /**
* Gets the bound field for this status.
*
* @return the field
*/
public HasValue<?> getField() {
return getBinding().getField();
} | 3.68 |
framework_ComponentRootSetter_canSetRoot | /**
* Checks if the given custom component or composite may accept a root
* component.
* <p>
* For internal use only.
*
* @param customComponent
* the custom component or composite
* @return
* @since 8.4
*
*/
public static boolean canSetRoot(Component customComponent) {
if (customComponent instanceof CustomComponent) {
return true;
}
if (customComponent instanceof Composite) {
return ((Composite) customComponent).getCompositionRoot() == null;
}
return false;
} | 3.68 |
hadoop_IncrementalBlockReportManager_sendIBRs | /** Send IBRs to namenode. */
void sendIBRs(DatanodeProtocol namenode, DatanodeRegistration registration,
String bpid, String nnRpcLatencySuffix) throws IOException {
// Generate a list of the pending reports for each storage under the lock
final StorageReceivedDeletedBlocks[] reports = generateIBRs();
if (reports.length == 0) {
// Nothing new to report.
return;
}
// Send incremental block reports to the Namenode outside the lock
if (LOG.isDebugEnabled()) {
LOG.debug("call blockReceivedAndDeleted: " + Arrays.toString(reports));
}
boolean success = false;
final long startTime = monotonicNow();
try {
namenode.blockReceivedAndDeleted(registration, bpid, reports);
success = true;
} finally {
if (success) {
dnMetrics.addIncrementalBlockReport(monotonicNow() - startTime,
nnRpcLatencySuffix);
lastIBR = startTime;
} else {
// If we didn't succeed in sending the report, put all of the
// blocks back onto our queue, but only in the case where we
// didn't put something newer in the meantime.
putMissing(reports);
LOG.warn("Failed to call blockReceivedAndDeleted: {}, nnId: {}"
+ ", duration(ms): {}", Arrays.toString(reports),
nnRpcLatencySuffix, monotonicNow() - startTime);
}
}
} | 3.68 |
hbase_AsyncNonMetaRegionLocator_getRegionLocationInCache | // only used for testing whether we have cached the location for a region.
RegionLocations getRegionLocationInCache(TableName tableName, byte[] row) {
TableCache tableCache = cache.get(tableName);
if (tableCache == null) {
return null;
}
return locateRowInCache(tableCache, row, RegionReplicaUtil.DEFAULT_REPLICA_ID);
} | 3.68 |
flink_ScalaFutureUtils_toJava | /**
* Converts a Scala {@link Future} to a {@link CompletableFuture}.
*
* @param scalaFuture to convert to a Java 8 CompletableFuture
* @param <T> type of the future value
* @param <U> type of the original future
* @return Java 8 CompletableFuture
*/
public static <T, U extends T> CompletableFuture<T> toJava(Future<U> scalaFuture) {
final CompletableFuture<T> result = new CompletableFuture<>();
scalaFuture.onComplete(
new OnComplete<U>() {
@Override
public void onComplete(Throwable failure, U success) {
if (failure != null) {
result.completeExceptionally(failure);
} else {
result.complete(success);
}
}
},
DirectExecutionContext.INSTANCE);
return result;
} | 3.68 |
hudi_RocksDBDAO_get | /**
* Retrieve a value for a given key in a column family.
*
* @param columnFamilyName Column Family Name
* @param key Key to be retrieved
* @param <T> Type of object stored.
*/
public <K extends Serializable, T extends Serializable> T get(String columnFamilyName, K key) {
ValidationUtils.checkArgument(!closed);
try {
byte[] val = getRocksDB().get(managedHandlesMap.get(columnFamilyName), SerializationUtils.serialize(key));
return val == null ? null : SerializationUtils.deserialize(val);
} catch (Exception e) {
throw new HoodieException(e);
}
} | 3.68 |
flink_SpillingBuffer_moveAll | /**
* Utility method that moves elements. It avoids copying the data into a dedicated array first,
* as the {@link ArrayList#addAll(java.util.Collection)} method does.
*
* @param <E>
* @param source
* @param target
*/
private static final <E> void moveAll(ArrayList<E> source, ArrayList<E> target) {
target.ensureCapacity(target.size() + source.size());
for (int i = source.size() - 1; i >= 0; i--) {
target.add(source.remove(i));
}
} | 3.68 |
framework_ContainerHierarchicalWrapper_size | /*
* Gets the number of Items in the Container. Don't add a JavaDoc comment
* here, we use the default documentation from implemented interface.
*/
@Override
public int size() {
int size = container.size();
assert size >= 0;
return size;
} | 3.68 |
pulsar_ManagedCursorImpl_setReadPosition | /**
* Internal version of seek that doesn't do the validation check.
*
* @param newReadPositionInt
*/
void setReadPosition(Position newReadPositionInt) {
checkArgument(newReadPositionInt instanceof PositionImpl);
if (this.markDeletePosition == null
|| ((PositionImpl) newReadPositionInt).compareTo(this.markDeletePosition) > 0) {
this.readPosition = (PositionImpl) newReadPositionInt;
ledger.onCursorReadPositionUpdated(this, newReadPositionInt);
}
} | 3.68 |
framework_VCalendarAction_getActionEndDate | /**
* Get the date and time when the action ends.
*
* @return
*/
public Date getActionEndDate() {
return actionEndDate;
} | 3.68 |
open-banking-gateway_PathQueryHeadersMapperTemplate_forExecution | /**
* Converts context object into object that can be used for ASPSP API call.
* @param context Context to convert
* @return Object that can be used with {@code Xs2aAdapter} to perform ASPSP API calls
*/
public ValidatedPathQueryHeaders<P, Q, H> forExecution(C context) {
return new ValidatedPathQueryHeaders<>(
toPath.map(context),
toQuery.map(context),
toHeaders.map(context)
);
} | 3.68 |
hbase_HttpServer_getWebAppsPath | /**
* Get the pathname to the webapps files.
* @param appName eg "secondary" or "datanode"
* @return the pathname as a URL
* @throws FileNotFoundException if 'webapps' directory cannot be found on CLASSPATH.
*/
protected String getWebAppsPath(String webapps, String appName) throws FileNotFoundException {
URL url = getClass().getClassLoader().getResource(webapps + "/" + appName);
if (url == null) {
throw new FileNotFoundException(webapps + "/" + appName + " not found in CLASSPATH");
}
String urlString = url.toString();
return urlString.substring(0, urlString.lastIndexOf('/'));
} | 3.68 |
Activiti_AstRightValue_isLiteralText | /**
* Answer <code>false</code>
*/
public final boolean isLiteralText() {
return false;
} | 3.68 |
hadoop_ClientThrottlingAnalyzer_suspendIfNecessary | /**
* Suspends the current storage operation, as necessary, to reduce throughput.
*/
public void suspendIfNecessary() {
int duration = sleepDuration;
if (duration > 0) {
try {
Thread.sleep(duration);
} catch (InterruptedException ie) {
Thread.currentThread().interrupt();
}
}
} | 3.68 |
querydsl_BeanMap_get | /**
* Returns the value of the bean's property with the given name.
* <p>
* The given name must be a {@code String} and must not be
* null; otherwise, this method returns {@code null}.
* If the bean defines a property with the given name, the value of
* that property is returned. Otherwise, {@code null} is
* returned.
* <p>
* Write-only properties will not be matched as the test operates against
* property read methods.
*
* @param name the name of the property whose value to return
* @return the value of the property with that name
*/
public Object get(String name) {
if (bean != null) {
Method method = getReadMethod(name);
if (method != null) {
try {
return method.invoke(bean, NULL_ARGUMENTS);
} catch (IllegalAccessException | NullPointerException | InvocationTargetException | IllegalArgumentException e) {
}
}
}
return null;
} | 3.68 |
hbase_MasterWalManager_getWALDirPaths | /**
* Returns List of all RegionServer WAL dirs; i.e. this.rootDir/HConstants.HREGION_LOGDIR_NAME.
*/
public FileStatus[] getWALDirPaths(final PathFilter filter) throws IOException {
Path walDirPath = new Path(CommonFSUtils.getWALRootDir(conf), HConstants.HREGION_LOGDIR_NAME);
FileStatus[] walDirForServerNames = CommonFSUtils.listStatus(fs, walDirPath, filter);
return walDirForServerNames == null ? new FileStatus[0] : walDirForServerNames;
} | 3.68 |
hbase_CompositeImmutableSegment_getDataSize | /** Returns Sum of all cell sizes. */
@Override
public long getDataSize() {
return this.keySize;
} | 3.68 |
hadoop_ReservationId_parseReservationId | /**
* Parse the string argument as a {@link ReservationId}
*
* @param reservationId the string representation of the {@link ReservationId}
* @return the {@link ReservationId} corresponding to the input string if
* valid, null if input is null
* @throws IOException if unable to parse the input string
*/
@Public
@Unstable
public static ReservationId parseReservationId(String reservationId)
throws IOException {
if (reservationId == null) {
return null;
}
if (!reservationId.startsWith(reserveIdStrPrefix)) {
throw new IOException("The specified reservation id is invalid: "
+ reservationId);
}
String[] resFields = reservationId.split("_");
if (resFields.length != 3) {
throw new IOException("The specified reservation id is not parseable: "
+ reservationId);
}
return newInstance(Long.parseLong(resFields[1]),
Long.parseLong(resFields[2]));
} | 3.68 |
hbase_ExecutorService_getRunningTasks | /**
* @return a map of the threads currently running tasks inside this executor. Each key is an
* active thread, and the value is the task that is currently running. Note that this is
* not a stable snapshot of the map.
*/
public ConcurrentMap<Thread, Runnable> getRunningTasks() {
return running;
} | 3.68 |
framework_ColumnProperty_isReadOnlyChangeAllowed | /**
* Returns whether the read-only status of this property can be changed
* using {@link #setReadOnly(boolean)}.
* <p>
* Used to prevent setting to read/write mode a property that is not allowed
* to be written by the underlying database. Also used for values like
* VERSION and AUTO_INCREMENT fields that might be set to read-only by the
* container but the database still allows writes.
*
* @return true if the read-only status can be changed, false otherwise.
*/
public boolean isReadOnlyChangeAllowed() {
return allowReadOnlyChange;
} | 3.68 |
pulsar_MessageParser_parseMessage | /**
* Parse a raw Pulsar entry payload and extract all the individual message that may be included in the batch. The
* provided {@link MessageProcessor} will be invoked for each individual message.
*/
public static void parseMessage(TopicName topicName, long ledgerId, long entryId, ByteBuf headersAndPayload,
MessageProcessor processor, int maxMessageSize) throws IOException {
ByteBuf payload = headersAndPayload;
ByteBuf uncompressedPayload = null;
ReferenceCountedMessageMetadata refCntMsgMetadata = null;
try {
if (!verifyChecksum(topicName, headersAndPayload, ledgerId, entryId)) {
// discard message with checksum error
return;
}
refCntMsgMetadata = ReferenceCountedMessageMetadata.get(headersAndPayload);
MessageMetadata msgMetadata = refCntMsgMetadata.getMetadata();
try {
Commands.parseMessageMetadata(payload, msgMetadata);
} catch (Throwable t) {
log.warn("[{}] Failed to deserialize metadata for message {}:{} - Ignoring",
topicName, ledgerId, entryId);
return;
}
if (msgMetadata.hasMarkerType()) {
// Ignore marker messages as they don't contain user data
return;
}
if (msgMetadata.getEncryptionKeysCount() > 0) {
throw new IOException("Cannot parse encrypted message " + msgMetadata + " on topic " + topicName);
}
uncompressedPayload = uncompressPayloadIfNeeded(topicName, msgMetadata, headersAndPayload, ledgerId,
entryId, maxMessageSize);
if (uncompressedPayload == null) {
// Message was discarded on decompression error
return;
}
final int numMessages = msgMetadata.getNumMessagesInBatch();
if (numMessages == 1 && !msgMetadata.hasNumMessagesInBatch()) {
processor.process(
RawMessageImpl.get(refCntMsgMetadata, null, uncompressedPayload.retain(), ledgerId, entryId, 0));
} else {
// handle batch message enqueuing; uncompressed payload has all messages in batch
receiveIndividualMessagesFromBatch(
refCntMsgMetadata, uncompressedPayload, ledgerId, entryId, processor);
}
} finally {
ReferenceCountUtil.safeRelease(uncompressedPayload);
ReferenceCountUtil.safeRelease(refCntMsgMetadata);
}
} | 3.68 |
hmily_HmilyXaResource_commit | /**
* Commit.
*
* @param b the b
* @throws XAException the xa exception
*/
public void commit(final boolean b) throws XAException {
this.commit(this.xid, b);
} | 3.68 |
flink_JobEdge_getDistributionPattern | /**
* Returns the distribution pattern used for this edge.
*
* @return The distribution pattern used for this edge.
*/
public DistributionPattern getDistributionPattern() {
return this.distributionPattern;
} | 3.68 |
hbase_HMaster_isSplitOrMergeEnabled | /**
* Queries the state of the {@link SplitOrMergeStateStore}. If it is not initialized, false is
* returned. If switchType is illegal, false will return.
* @param switchType see {@link org.apache.hadoop.hbase.client.MasterSwitchType}
* @return The state of the switch
*/
@Override
public boolean isSplitOrMergeEnabled(MasterSwitchType switchType) {
return !isInMaintenanceMode() && splitOrMergeStateStore != null
&& splitOrMergeStateStore.isSplitOrMergeEnabled(switchType);
} | 3.68 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.