name
stringlengths 12
178
| code_snippet
stringlengths 8
36.5k
| score
float64 3.26
3.68
|
---|---|---|
hibernate-validator_Filters_isWeldProxy | /**
* Whether the given class is a proxy created by Weld or not. This is
* the case if the given class implements the interface
* {@code org.jboss.weld.bean.proxy.ProxyObject}.
*
* @param clazz the class of interest
*
* @return {@code true} if the given class is a Weld proxy,
* {@code false} otherwise
*/
private boolean isWeldProxy(Class<?> clazz) {
for ( Class<?> implementedInterface : clazz.getInterfaces() ) {
if ( implementedInterface.getName().equals( WELD_PROXY_INTERFACE_NAME ) ) {
return true;
}
}
return false;
} | 3.68 |
framework_Slot_setWidgetResizeListener | /**
* Sets the widget resize listener for this slot.
*
* @param widgetResizeListener
* the listener to set, or {@code null} to remove a previously
* set listener
*/
public void setWidgetResizeListener(
ElementResizeListener widgetResizeListener) {
detachListeners();
this.widgetResizeListener = widgetResizeListener;
attachListeners();
} | 3.68 |
morf_AbstractSqlDialectTest_testSelectMinimumWithExpression | /**
* Tests select statement with minimum function using more than a simple field.
*/
@Test
public void testSelectMinimumWithExpression() {
SelectStatement stmt = select(min(field(INT_FIELD).minus(literal(1)))).from(tableRef(TEST_TABLE));
assertEquals("Select scripts are not the same", expectedSelectMinimumWithExpression(), testDialect.convertStatementToSQL(stmt));
} | 3.68 |
morf_AbstractConnectionResources_getDataSource | /**
* {@inheritDoc}
*
* @see org.alfasoftware.morf.jdbc.ConnectionResources#getDataSource()
*/
@Override
public DataSource getDataSource() {
return new ConnectionDetailsDataSource();
} | 3.68 |
framework_Form_isValidationVisibleOnCommit | /**
* Is validation made automatically visible on commit?
*
* See setValidationVisibleOnCommit().
*
* @return true if validation is made automatically visible on commit.
*/
public boolean isValidationVisibleOnCommit() {
return validationVisibleOnCommit;
} | 3.68 |
hudi_ConsistentHashingUpdateStrategyUtils_constructPartitionToIdentifier | /**
* Construct identifier for the given partitions that are under concurrent resizing (i.e., clustering).
* @return map from partition to pair<instant, identifier>, where instant is the clustering instant.
*/
public static Map<String, Pair<String, ConsistentBucketIdentifier>> constructPartitionToIdentifier(Set<String> partitions, HoodieTable table) {
// Read all pending/ongoing clustering plans
List<Pair<HoodieInstant, HoodieClusteringPlan>> instantPlanPairs =
table.getMetaClient().getActiveTimeline().filterInflightsAndRequested().filter(instant -> instant.getAction().equals(HoodieTimeline.REPLACE_COMMIT_ACTION)).getInstantsAsStream()
.map(instant -> ClusteringUtils.getClusteringPlan(table.getMetaClient(), instant))
.flatMap(o -> o.isPresent() ? Stream.of(o.get()) : Stream.empty())
.collect(Collectors.toList());
// Construct child node for each partition & build the bucket identifier
Map<String, HoodieConsistentHashingMetadata> partitionToHashingMeta = new HashMap<>();
Map<String, String> partitionToInstant = new HashMap<>();
for (Pair<HoodieInstant, HoodieClusteringPlan> pair : instantPlanPairs) {
String instant = pair.getLeft().getTimestamp();
HoodieClusteringPlan plan = pair.getRight();
extractHashingMetadataFromClusteringPlan(instant, plan, table, partitions, partitionToHashingMeta, partitionToInstant);
}
return partitionToHashingMeta.entrySet().stream()
.collect(Collectors.toMap(Map.Entry::getKey, e -> Pair.of(partitionToInstant.get(e.getKey()), new ConsistentBucketIdentifier(e.getValue()))));
} | 3.68 |
rocketmq-connect_StandaloneConnectStartup_createConnectController | /**
* Read configs from command line and create connect controller.
*
* @param args
* @return
*/
private static StandaloneConnectController createConnectController(String[] args) {
try {
// Build the command line options.
Options options = ServerUtil.buildCommandlineOptions(new Options());
commandLine = ServerUtil.parseCmdLine("connect", args, buildCommandlineOptions(options),
new PosixParser());
if (null == commandLine) {
System.exit(-1);
}
// Load configs from command line.
StandaloneConfig config = new StandaloneConfig();
if (commandLine.hasOption('c')) {
String file = commandLine.getOptionValue('c').trim();
if (file != null) {
configFile = file;
InputStream in = new BufferedInputStream(new FileInputStream(file));
properties = new Properties();
properties.load(in);
FileAndPropertyUtil.properties2Object(properties, config);
in.close();
}
}
if (StringUtils.isNotEmpty(config.getMetricsConfigPath())) {
String file = config.getMetricsConfigPath();
InputStream in = new BufferedInputStream(new FileInputStream(file));
properties = new Properties();
properties.load(in);
Map<String, String> metricsConfig = new ConcurrentHashMap<>();
if (properties.contains(WorkerConfig.METRIC_CLASS)) {
throw new IllegalArgumentException("[metrics.reporter] is empty");
}
for (Map.Entry<Object, Object> entry : properties.entrySet()) {
if (entry.getKey().equals(WorkerConfig.METRIC_CLASS)) {
continue;
}
metricsConfig.put(entry.getKey().toString(), entry.getValue().toString());
}
config.getMetricsConfig().put(properties.getProperty(WorkerConfig.METRIC_CLASS), metricsConfig);
in.close();
}
if (null == config.getConnectHome()) {
System.out.printf("Please set the %s variable in your environment to match the location of the Connect installation", WorkerConfig.CONNECT_HOME_ENV);
System.exit(-2);
}
LoggerContext lc = (LoggerContext) LoggerFactory.getILoggerFactory();
JoranConfigurator configurator = new JoranConfigurator();
configurator.setContext(lc);
lc.reset();
configurator.doConfigure(config.getConnectHome() + "/conf/logback.xml");
List<String> pluginPaths = new ArrayList<>(16);
if (StringUtils.isNotEmpty(config.getPluginPaths())) {
String[] strArr = config.getPluginPaths().split(",");
for (String path : strArr) {
if (StringUtils.isNotEmpty(path)) {
pluginPaths.add(path);
}
}
}
Plugin plugin = new Plugin(pluginPaths);
ClusterManagementService clusterManagementService = new MemoryClusterManagementServiceImpl();
clusterManagementService.initialize(config);
ConfigManagementService configManagementService = new MemoryConfigManagementServiceImpl();
configManagementService.initialize(config, null, plugin);
PositionManagementService positionManagementServices = new FilePositionManagementServiceImpl();
positionManagementServices.initialize(config, null, null);
StateManagementService stateManagementService = new MemoryStateManagementServiceImpl();
stateManagementService.initialize(config, null);
StandaloneConnectController controller = new StandaloneConnectController(
plugin,
config,
clusterManagementService,
configManagementService,
positionManagementServices,
stateManagementService);
// Invoked when shutdown.
Runtime.getRuntime().addShutdownHook(new Thread(new Runnable() {
private volatile boolean hasShutdown = false;
private AtomicInteger shutdownTimes = new AtomicInteger(0);
@Override
public void run() {
synchronized (this) {
log.info("Shutdown hook was invoked, {}", this.shutdownTimes.incrementAndGet());
if (!this.hasShutdown) {
this.hasShutdown = true;
long beginTime = System.currentTimeMillis();
controller.shutdown();
long consumingTimeTotal = System.currentTimeMillis() - beginTime;
log.info("Shutdown hook over, consuming total time(ms): {}", consumingTimeTotal);
}
}
}
}, "ShutdownHook"));
return controller;
} catch (Throwable e) {
e.printStackTrace();
System.exit(-1);
}
return null;
} | 3.68 |
flink_ImmutableMapState_iterator | /**
* Iterates over all the mappings in the state. The iterator cannot remove elements.
*
* @return A read-only iterator over all the mappings in the state.
*/
@Override
public Iterator<Map.Entry<K, V>> iterator() {
return Collections.unmodifiableSet(state.entrySet()).iterator();
} | 3.68 |
hadoop_OBSFileSystem_checkPath | /**
* Check that a Path belongs to this FileSystem. Unlike the superclass, this
* version does not look at authority, but only hostname.
*
* @param path the path to check
* @throws IllegalArgumentException if there is an FS mismatch
*/
@Override
public void checkPath(final Path path) {
OBSLoginHelper.checkPath(getConf(), getUri(), path, getDefaultPort());
} | 3.68 |
flink_DataSink_getPreferredResources | /**
* Returns the preferred resources of this data sink. If no preferred resources have been set,
* this returns the default resource profile.
*
* @return The preferred resources of this data sink.
*/
@PublicEvolving
public ResourceSpec getPreferredResources() {
return this.preferredResources;
} | 3.68 |
flink_FlinkMetricContainer_updateMetrics | /**
* Update Flink's internal metrics ({@link #flinkCounterCache}) with the latest metrics for a
* given step.
*/
private void updateMetrics(String stepName) {
MetricResults metricResults = asAttemptedOnlyMetricResults(metricsContainers);
MetricQueryResults metricQueryResults =
metricResults.queryMetrics(MetricsFilter.builder().addStep(stepName).build());
updateCounterOrMeter(metricQueryResults.getCounters());
updateDistributions(metricQueryResults.getDistributions());
updateGauge(metricQueryResults.getGauges());
} | 3.68 |
querydsl_SQLExpressions_any | /**
* Get an aggregate any expression for the given boolean expression
*/
public static BooleanExpression any(BooleanExpression expr) {
return Expressions.booleanOperation(Ops.AggOps.BOOLEAN_ANY, expr);
} | 3.68 |
hadoop_JobTokenIdentifier_getUser | /** {@inheritDoc} */
@Override
public UserGroupInformation getUser() {
if (jobid == null || "".equals(jobid.toString())) {
return null;
}
return UserGroupInformation.createRemoteUser(jobid.toString());
} | 3.68 |
morf_AbstractSqlDialectTest_testCastToBigInt | /**
* Tests the output of a cast to a big int.
*/
@Test
public void testCastToBigInt() {
String result = testDialect.getSqlFrom(new Cast(new FieldReference("value"), DataType.BIG_INTEGER, 10));
assertEquals(expectedBigIntCast(), result);
} | 3.68 |
morf_AbstractSqlDialectTest_testCastToDate | /**
* Tests the output of a cast to a date.
*/
@Test
public void testCastToDate() {
String result = testDialect.getSqlFrom(new Cast(new FieldReference("value"), DataType.DATE, 10));
assertEquals(expectedDateCast(), result);
} | 3.68 |
framework_VaadinService_getCurrentResponse | /**
* Gets the currently processed Vaadin response. The current response is
* automatically defined when the request is started. The current response
* can not be used in e.g. background threads because of the way server
* implementations reuse response instances.
*
* @return the current Vaadin response instance if available, otherwise
* <code>null</code>
*
* @see #setCurrentInstances(VaadinRequest, VaadinResponse)
*/
public static VaadinResponse getCurrentResponse() {
return VaadinResponse.getCurrent();
} | 3.68 |
open-banking-gateway_RequestDataToSignNormalizer_canonicalStringToSign | /**
* Computes shortened form (SHA-256 hash) of the request canonical string, so that it can be used in headers
* (i.e. XML payment bodies can be huge, so we can hash request string)
* @param toSign Request that is going to be signed
* @return Short hash value of the {@code toSign} ready to be used as the request signature
*
* Note: Technically hash strength other than collision resistance is not of much importance here as the value
* is going to be signed with JWS
*/
default String canonicalStringToSign(RequestToSign toSign) {
return Hashing.sha256().hashBytes(canonicalString(toSign).getBytes(StandardCharsets.UTF_8)).toString();
} | 3.68 |
hbase_HFileBlock_isWriting | /** Returns true if a block is being written */
boolean isWriting() {
return state == State.WRITING;
} | 3.68 |
hbase_BloomFilterChunk_get | /**
* Check if bit at specified index is 1.
* @param pos index of bit
* @return true if bit at specified index is 1, false if 0.
*/
static boolean get(int pos, ByteBuffer bloomBuf, int bloomOffset) {
int bytePos = pos >> 3; // pos / 8
int bitPos = pos & 0x7; // pos % 8
// TODO access this via Util API which can do Unsafe access if possible(?)
byte curByte = bloomBuf.get(bloomOffset + bytePos);
curByte &= BloomFilterUtil.bitvals[bitPos];
return (curByte != 0);
} | 3.68 |
hbase_BufferedDataBlockEncoder_compareCommonRowPrefix | /********************* common prefixes *************************/
// Having this as static is fine but if META is having DBE then we should
// change this.
public static int compareCommonRowPrefix(Cell left, Cell right, int rowCommonPrefix) {
if (left instanceof ByteBufferExtendedCell) {
ByteBufferExtendedCell bbLeft = (ByteBufferExtendedCell) left;
if (right instanceof ByteBufferExtendedCell) {
ByteBufferExtendedCell bbRight = (ByteBufferExtendedCell) right;
return ByteBufferUtils.compareTo(bbLeft.getRowByteBuffer(),
bbLeft.getRowPosition() + rowCommonPrefix, left.getRowLength() - rowCommonPrefix,
bbRight.getRowByteBuffer(), bbRight.getRowPosition() + rowCommonPrefix,
right.getRowLength() - rowCommonPrefix);
} else {
return ByteBufferUtils.compareTo(bbLeft.getRowByteBuffer(),
bbLeft.getRowPosition() + rowCommonPrefix, left.getRowLength() - rowCommonPrefix,
right.getRowArray(), right.getRowOffset() + rowCommonPrefix,
right.getRowLength() - rowCommonPrefix);
}
} else {
if (right instanceof ByteBufferExtendedCell) {
ByteBufferExtendedCell bbRight = (ByteBufferExtendedCell) right;
return ByteBufferUtils.compareTo(left.getRowArray(), left.getRowOffset() + rowCommonPrefix,
left.getRowLength() - rowCommonPrefix, bbRight.getRowByteBuffer(),
bbRight.getRowPosition() + rowCommonPrefix, right.getRowLength() - rowCommonPrefix);
} else {
return Bytes.compareTo(left.getRowArray(), left.getRowOffset() + rowCommonPrefix,
left.getRowLength() - rowCommonPrefix, right.getRowArray(),
right.getRowOffset() + rowCommonPrefix, right.getRowLength() - rowCommonPrefix);
}
}
} | 3.68 |
flink_MemoryManager_releaseMemory | /**
* Releases a memory chunk of a certain size from an owner to this memory manager.
*
* @param owner The owner to associate with the memory reservation, for the fallback release.
* @param size size of memory to release.
*/
public void releaseMemory(Object owner, long size) {
checkMemoryReservationPreconditions(owner, size);
if (size == 0L) {
return;
}
reservedMemory.compute(
owner,
(o, currentlyReserved) -> {
long newReservedMemory = 0;
if (currentlyReserved != null) {
if (currentlyReserved < size) {
LOG.warn(
"Trying to release more memory {} than it was reserved {} so far for the owner {}",
size,
currentlyReserved,
owner);
}
newReservedMemory =
releaseAndCalculateReservedMemory(size, currentlyReserved);
}
return newReservedMemory == 0 ? null : newReservedMemory;
});
} | 3.68 |
flink_ScalaCsvOutputFormat_setQuoteStrings | /**
* Configures whether the output format should quote string values. String values are fields of
* type {@link String} and {@link org.apache.flink.types.StringValue}, as well as all subclasses
* of the latter.
*
* <p>By default, strings are not quoted.
*
* @param quoteStrings Flag indicating whether string fields should be quoted.
*/
public void setQuoteStrings(boolean quoteStrings) {
this.quoteStrings = quoteStrings;
} | 3.68 |
morf_SqlDialect_viewDeploymentStatementsAsScript | /**
* Creates SQL script to deploy a database view.
*
* @param view The meta data for the view to deploy.
* @return The statements required to deploy the view joined into a script.
*/
public String viewDeploymentStatementsAsScript(View view) {
final String firstLine = "-- " + getDatabaseType().identifier() + "\n";
return viewDeploymentStatements(view)
.stream().collect(Collectors.joining(";\n", firstLine, ";"));
} | 3.68 |
flink_EmptyIterator_get | /**
* Gets a singleton instance of the empty iterator.
*
* @param <E> The type of the objects (not) returned by the iterator.
* @return An instance of the iterator.
*/
public static <E> EmptyIterator<E> get() {
@SuppressWarnings("unchecked")
EmptyIterator<E> iter = (EmptyIterator<E>) INSTANCE;
return iter;
} | 3.68 |
querydsl_SQLExpressions_variance | /**
* returns the variance of expr
*
* @param expr argument
* @return variance(expr)
*/
public static <T extends Number> WindowOver<T> variance(Expression<T> expr) {
return new WindowOver<T>(expr.getType(), SQLOps.VARIANCE, expr);
} | 3.68 |
hbase_SplitLogWorker_splitLog | /** Returns Result either DONE, RESIGNED, or ERR. */
static Status splitLog(String filename, CancelableProgressable p, Configuration conf,
RegionServerServices server, LastSequenceId sequenceIdChecker, WALFactory factory) {
Path walDir;
FileSystem fs;
try {
walDir = CommonFSUtils.getWALRootDir(conf);
fs = walDir.getFileSystem(conf);
} catch (IOException e) {
LOG.warn("Resigning, could not find root dir or fs", e);
return Status.RESIGNED;
}
try {
if (!processSyncReplicationWAL(filename, conf, server, fs, walDir)) {
return Status.DONE;
}
} catch (IOException e) {
LOG.warn("failed to process sync replication wal {}", filename, e);
return Status.RESIGNED;
}
// TODO have to correctly figure out when log splitting has been
// interrupted or has encountered a transient error and when it has
// encountered a bad non-retry-able persistent error.
try {
SplitLogWorkerCoordination splitLogWorkerCoordination =
server.getCoordinatedStateManager() == null
? null
: server.getCoordinatedStateManager().getSplitLogWorkerCoordination();
if (
!WALSplitter.splitLogFile(walDir, fs.getFileStatus(new Path(walDir, filename)), fs, conf, p,
sequenceIdChecker, splitLogWorkerCoordination, factory, server)
) {
return Status.PREEMPTED;
}
} catch (InterruptedIOException iioe) {
LOG.warn("Resigning, interrupted splitting WAL {}", filename, iioe);
return Status.RESIGNED;
} catch (IOException e) {
if (e instanceof FileNotFoundException) {
// A wal file may not exist anymore. Nothing can be recovered so move on
LOG.warn("Done, WAL {} does not exist anymore", filename, e);
return Status.DONE;
}
Throwable cause = e.getCause();
if (
e instanceof RetriesExhaustedException && (cause instanceof NotServingRegionException
|| cause instanceof ConnectException || cause instanceof SocketTimeoutException)
) {
LOG.warn("Resigning, can't connect to target regionserver splitting WAL {}", filename, e);
return Status.RESIGNED;
} else if (cause instanceof InterruptedException) {
LOG.warn("Resigning, interrupted splitting WAL {}", filename, e);
return Status.RESIGNED;
}
LOG.warn("Error splitting WAL {}", filename, e);
return Status.ERR;
}
LOG.debug("Done splitting WAL {}", filename);
return Status.DONE;
} | 3.68 |
flink_PartitioningProperty_isPartitionedOnKey | /**
* Checks if this property presents a partitioning that is not random, but on a partitioning
* key.
*
* @return True, if the data is partitioned on a key.
*/
public boolean isPartitionedOnKey() {
return isPartitioned() && this != RANDOM_PARTITIONED;
} | 3.68 |
flink_PlanNode_getOutgoingChannels | /**
* Gets a list of all outgoing channels leading to successors.
*
* @return A list of all channels leading to successors.
*/
public List<Channel> getOutgoingChannels() {
return this.outChannels;
} | 3.68 |
framework_VSlider_buildBase | /** For internal use only. May be removed or replaced in the future. */
public void buildBase() {
final String styleAttribute = isVertical() ? "height" : "width";
final String oppositeStyleAttribute = isVertical() ? "width" : "height";
final String domProperty = isVertical() ? "offsetHeight"
: "offsetWidth";
// clear unnecessary opposite style attribute
base.getStyle().clearProperty(oppositeStyleAttribute);
/*
* To resolve defect #13681 we should not return from method buildBase()
* if slider has no parentElement, because such operations as
* buildHandle() and setValues(), which are needed for Slider, are
* called at the end of method buildBase(). And these methods will not
* be called if there is no parentElement. So, instead of returning from
* method buildBase() if there is no parentElement "if condition" is
* applied to call code for parentElement only in case it exists.
*/
if (getElement().hasParentElement()) {
final Element p = getElement();
if (p.getPropertyInt(domProperty) > MIN_SIZE) {
if (isVertical()) {
setHeight();
} else {
base.getStyle().clearProperty(styleAttribute);
}
} else {
// Set minimum size and adjust after all components have
// (supposedly) been drawn completely.
base.getStyle().setPropertyPx(styleAttribute, MIN_SIZE);
Scheduler.get().scheduleDeferred(new Command() {
@Override
public void execute() {
final Element p = getElement();
if (p.getPropertyInt(domProperty) > MIN_SIZE + 5
|| propertyNotNullOrEmpty(styleAttribute, p)) {
if (isVertical()) {
setHeight();
} else {
base.getStyle().clearProperty(styleAttribute);
}
// Ensure correct position
setValue(value, false);
}
}
// Style has non empty property
private boolean propertyNotNullOrEmpty(
final String styleAttribute, final Element p) {
return p.getStyle().getProperty(styleAttribute) != null
&& !p.getStyle().getProperty(styleAttribute)
.isEmpty();
}
});
}
}
if (!isVertical()) {
// Draw handle with a delay to allow base to gain maximum width
Scheduler.get().scheduleDeferred(() -> {
buildHandle();
setValue(value, false);
});
} else {
buildHandle();
setValue(value, false);
}
// TODO attach listeners for focusing and arrow keys
} | 3.68 |
hbase_CloneSnapshotProcedure_getMonitorStatus | /**
* Set up monitor status if it is not created.
*/
private MonitoredTask getMonitorStatus() {
if (monitorStatus == null) {
monitorStatus = TaskMonitor.get()
.createStatus("Cloning snapshot '" + snapshot.getName() + "' to table " + getTableName());
}
return monitorStatus;
} | 3.68 |
hudi_AbstractRealtimeRecordReader_init | /**
* Gets schema from HoodieTableMetaClient. If not, falls
* back to the schema from the latest parquet file. Finally, sets the partition column and projection fields into the
* job conf.
*/
private void init() throws Exception {
LOG.info("Getting writer schema from table avro schema ");
writerSchema = new TableSchemaResolver(metaClient).getTableAvroSchema();
// Add partitioning fields to writer schema for resulting row to contain null values for these fields
String partitionFields = jobConf.get(hive_metastoreConstants.META_TABLE_PARTITION_COLUMNS, "");
List<String> partitioningFields =
partitionFields.length() > 0 ? Arrays.stream(partitionFields.split("/")).collect(Collectors.toList())
: new ArrayList<>();
writerSchema = HoodieRealtimeRecordReaderUtils.addPartitionFields(writerSchema, partitioningFields);
List<String> projectionFields = HoodieRealtimeRecordReaderUtils.orderFields(jobConf.get(ColumnProjectionUtils.READ_COLUMN_NAMES_CONF_STR, EMPTY_STRING),
jobConf.get(ColumnProjectionUtils.READ_COLUMN_IDS_CONF_STR, EMPTY_STRING), partitioningFields);
Map<String, Field> schemaFieldsMap = HoodieRealtimeRecordReaderUtils.getNameToFieldMap(writerSchema);
hiveSchema = constructHiveOrderedSchema(writerSchema, schemaFieldsMap, jobConf.get(hive_metastoreConstants.META_TABLE_COLUMNS, EMPTY_STRING));
// TODO(vc): In the future, the reader schema should be updated based on log files & be able
// to null out fields not present before
readerSchema = HoodieRealtimeRecordReaderUtils.generateProjectionSchema(writerSchema, schemaFieldsMap, projectionFields);
LOG.info(String.format("About to read compacted logs %s for base split %s, projecting cols %s",
split.getDeltaLogPaths(), split.getPath(), projectionFields));
// get timestamp columns
supportTimestamp = HoodieColumnProjectionUtils.supportTimestamp(jobConf);
} | 3.68 |
hbase_ZKMainServer_main | /**
* Run the tool.
* @param args Command line arguments. First arg is path to zookeepers file.
*/
public static void main(String[] args) throws Exception {
String[] newArgs = args;
if (!hasServer(args)) {
// Add the zk ensemble from configuration if none passed on command-line.
Configuration conf = HBaseConfiguration.create();
String hostport = new ZKMainServer().parse(conf);
if (hostport != null && hostport.length() > 0) {
newArgs = new String[args.length + 2];
System.arraycopy(args, 0, newArgs, 2, args.length);
newArgs[0] = "-server";
newArgs[1] = hostport;
}
}
// If command-line arguments, run our hack so they are executed.
// ZOOKEEPER-1897 was committed to zookeeper-3.4.6 but elsewhere in this class we say
// 3.4.6 breaks command-processing; TODO.
if (hasCommandLineArguments(args)) {
HACK_UNTIL_ZOOKEEPER_1897_ZooKeeperMain zkm =
new HACK_UNTIL_ZOOKEEPER_1897_ZooKeeperMain(newArgs);
zkm.runCmdLine();
} else {
ZooKeeperMain.main(newArgs);
}
} | 3.68 |
hbase_HFileInfo_checkFileVersion | /**
* File version check is a little sloppy. We read v3 files but can also read v2 files if their
* content has been pb'd; files written with 0.98.
*/
private void checkFileVersion(Path path) {
int majorVersion = trailer.getMajorVersion();
if (majorVersion == getMajorVersion()) {
return;
}
int minorVersion = trailer.getMinorVersion();
if (majorVersion == 2 && minorVersion >= MIN_V2_MINOR_VERSION_WITH_PB) {
return;
}
// We can read v3 or v2 versions of hfile.
throw new IllegalArgumentException("Invalid HFile version: major=" + trailer.getMajorVersion()
+ ", minor=" + trailer.getMinorVersion() + ": expected at least " + "major=2 and minor="
+ MAX_MINOR_VERSION + ", path=" + path);
} | 3.68 |
streampipes_AssetLinkBuilder_withLinkType | /**
* Sets the link type for the AssetLink being built.
*
* @param linkType The link type to set.
* @return The AssetLinkBuilder instance for method chaining.
*/
public AssetLinkBuilder withLinkType(String linkType) {
this.assetLink.setLinkType(linkType);
return this;
} | 3.68 |
morf_SqlDialect_getSqlForSumDistinct | /**
* Converts the sum function into SQL.
*
* @param function the function details
* @return a string representation of the SQL
*/
protected String getSqlForSumDistinct(Function function) {
return "SUM(DISTINCT " + getSqlFrom(function.getArguments().get(0)) + ")";
} | 3.68 |
AreaShop_FileManager_deleteRegion | /**
* Remove a region from the list.
* @param region The region to remove
* @param giveMoneyBack use true to give money back to the player if someone is currently holding this region, otherwise false
* @return true if the region has been removed, false otherwise
*/
public DeletingRegionEvent deleteRegion(GeneralRegion region, boolean giveMoneyBack) {
DeletingRegionEvent event = new DeletingRegionEvent(region);
if(region == null) {
event.cancel("null region");
return event;
}
Bukkit.getPluginManager().callEvent(event);
if (event.isCancelled()) {
return event;
}
region.setDeleted();
if(region instanceof RentRegion && ((RentRegion)region).isRented()) {
((RentRegion)region).unRent(giveMoneyBack, null);
} else if (region instanceof BuyRegion && ((BuyRegion)region).isSold()) {
((BuyRegion)region).sell(giveMoneyBack, null);
}
// Handle schematics
region.handleSchematicEvent(RegionEvent.DELETED);
// Delete the signs
if(region.getWorld() != null) {
for(Location sign : region.getSignsFeature().getSignLocations()) {
sign.getBlock().setType(Material.AIR);
}
}
// Remove from RegionGroups
RegionGroup[] regionGroups = getGroups().toArray(new RegionGroup[0]);
for(RegionGroup group : regionGroups) {
group.removeMember(region);
}
region.resetRegionFlags();
regions.remove(region.getLowerCaseName());
// Remove file
File file = new File(plugin.getDataFolder() + File.separator + AreaShop.regionsFolder + File.separator + region.getLowerCaseName() + ".yml");
if(file.exists()) {
boolean deleted;
try {
deleted = file.delete();
} catch(Exception e) {
deleted = false;
}
if(!deleted) {
AreaShop.warn("File could not be deleted: " + file.toString());
}
}
// Broadcast event
Bukkit.getPluginManager().callEvent(new DeletedRegionEvent(region));
return event;
} | 3.68 |
querydsl_ExpressionUtils_operation | /**
* Create a new Operation expression
*
* @param type type of expression
* @param operator operator
* @param args operation arguments
* @return operation expression
*/
@SuppressWarnings("unchecked")
public static <T> Operation<T> operation(Class<? extends T> type, Operator operator,
List<Expression<?>> args) {
if (type.equals(Boolean.class)) {
return (Operation<T>) new PredicateOperation(operator, args);
} else {
return new OperationImpl<T>(type, operator, args);
}
} | 3.68 |
hbase_ReplicationSourceManager_removeSource | /**
* Clear the metrics and related replication queue of the specified old source
* @param src source to clear
*/
void removeSource(ReplicationSourceInterface src) {
LOG.info("Done with the queue " + src.getQueueId());
this.sources.remove(src.getPeerId());
// Delete queue from storage and memory
deleteQueue(src.getQueueId());
this.walsById.remove(src.getQueueId());
} | 3.68 |
open-banking-gateway_EncryptionWithInitVectorOper_encryptionService | /**
* Symmetric Key based encryption.
* @param keyId Key ID
* @param keyWithIv Key value
* @return Encryption service that encrypts data with symmetric key provided
*/
public EncryptionService encryptionService(String keyId, SecretKeyWithIv keyWithIv) {
return new SymmetricEncryption(
keyId,
() -> encryption(keyWithIv),
() -> decryption(keyWithIv)
);
} | 3.68 |
framework_VFilterSelect_setSelectedCaption | /**
* Sets the caption of selected item, if "scroll to page" is disabled. This
* method is meant for internal use and may change in future versions.
*
* @since 7.7
* @param selectedCaption
* the caption of selected item
*/
public void setSelectedCaption(String selectedCaption) {
explicitSelectedCaption = selectedCaption;
if (selectedCaption != null) {
setPromptingOff(selectedCaption);
}
} | 3.68 |
flink_JoinSpec_getJoinKeySize | /** Gets number of keys in join key. */
@JsonIgnore
public int getJoinKeySize() {
return leftKeys.length;
} | 3.68 |
hbase_RegionCoprocessorHost_prePrepareTimeStampForDeleteVersion | /**
* Supports Coprocessor 'bypass'.
* @param mutation - the current mutation
* @param kv - the current cell
* @param byteNow - current timestamp in bytes
* @param get - the get that could be used Note that the get only does not specify the family
* and qualifier that should be used
* @return true if default processing should be bypassed
* @deprecated In hbase-2.0.0. Will be removed in hbase-3.0.0. Added explicitly for a single
* Coprocessor for its needs only. Will be removed.
*/
@Deprecated
public boolean prePrepareTimeStampForDeleteVersion(final Mutation mutation, final Cell kv,
final byte[] byteNow, final Get get) throws IOException {
if (coprocEnvironments.isEmpty()) {
return false;
}
boolean bypassable = true;
return execOperation(new RegionObserverOperationWithoutResult(bypassable) {
@Override
public void call(RegionObserver observer) throws IOException {
observer.prePrepareTimeStampForDeleteVersion(this, mutation, kv, byteNow, get);
}
});
} | 3.68 |
hbase_PermissionStorage_isAclRegion | /**
* Returns {@code true} if the given region is part of the {@code _acl_} metadata table.
*/
static boolean isAclRegion(Region region) {
return ACL_TABLE_NAME.equals(region.getTableDescriptor().getTableName());
} | 3.68 |
hbase_TableRecordReaderImpl_createKey | /**
* @see org.apache.hadoop.mapred.RecordReader#createKey()
*/
public ImmutableBytesWritable createKey() {
return new ImmutableBytesWritable();
} | 3.68 |
pulsar_ResourceLockImpl_acquireWithNoRevalidation | // Simple operation of acquiring the lock with no retries, or checking for the lock content
private CompletableFuture<Void> acquireWithNoRevalidation(T newValue) {
if (log.isDebugEnabled()) {
log.debug("acquireWithNoRevalidation,newValue={},version={}", newValue, version);
}
byte[] payload;
try {
payload = serde.serialize(path, newValue);
} catch (Throwable t) {
return FutureUtils.exception(t);
}
CompletableFuture<Void> result = new CompletableFuture<>();
store.put(path, payload, Optional.of(version), EnumSet.of(CreateOption.Ephemeral))
.thenAccept(stat -> {
synchronized (ResourceLockImpl.this) {
state = State.Valid;
version = stat.getVersion();
value = newValue;
}
log.info("Acquired resource lock on {}", path);
result.complete(null);
}).exceptionally(ex -> {
if (ex.getCause() instanceof BadVersionException) {
result.completeExceptionally(
new LockBusyException("Resource at " + path + " is already locked"));
} else {
result.completeExceptionally(ex.getCause());
}
return null;
});
return result;
} | 3.68 |
flink_Hardware_getSizeOfPhysicalMemoryForMac | /**
* Returns the size of the physical memory in bytes on a Mac OS-based operating system
*
* @return the size of the physical memory in bytes or {@code -1}, if the size could not be
* determined
*/
private static long getSizeOfPhysicalMemoryForMac() {
BufferedReader bi = null;
try {
Process proc = Runtime.getRuntime().exec("sysctl hw.memsize");
bi =
new BufferedReader(
new InputStreamReader(proc.getInputStream(), StandardCharsets.UTF_8));
String line;
while ((line = bi.readLine()) != null) {
if (line.startsWith("hw.memsize")) {
long memsize = Long.parseLong(line.split(":")[1].trim());
bi.close();
proc.destroy();
return memsize;
}
}
} catch (Throwable t) {
LOG.error("Cannot determine physical memory of machine for MacOS host", t);
return -1;
} finally {
if (bi != null) {
try {
bi.close();
} catch (IOException ignored) {
}
}
}
return -1;
} | 3.68 |
graphhopper_VectorTile_removeValues | /**
* <pre>
* Dictionary encoding for values
* </pre>
*
* <code>repeated .vector_tile.Tile.Value values = 4;</code>
*/
public Builder removeValues(int index) {
if (valuesBuilder_ == null) {
ensureValuesIsMutable();
values_.remove(index);
onChanged();
} else {
valuesBuilder_.remove(index);
}
return this;
} | 3.68 |
hadoop_SolverPreprocessor_getResourceVector | /**
* Return the multi-dimension resource vector consumed by the job at specified
* time.
*
* @param skyList the list of {@link Resource}s used by the job.
* @param index the discretized time index.
* @param containerMemAlloc the multi-dimension resource vector allocated to
* one container.
* @return the multi-dimension resource vector consumed by the job.
*/
public final long getResourceVector(final RLESparseResourceAllocation skyList,
final int index, final long containerMemAlloc) {
return skyList.getCapacityAtTime(index).getMemorySize() / containerMemAlloc;
} | 3.68 |
hbase_ExtendedCell_deepClone | /**
* Does a deep copy of the contents to a new memory area and returns it as a new cell.
* @return The deep cloned cell
*/
default ExtendedCell deepClone() {
// When being added to the memstore, deepClone() is called and KeyValue has less heap overhead.
return new KeyValue(this);
} | 3.68 |
morf_AbstractSqlDialectTest_testSelectOrderByTwoFields | /**
* Tests a select with an "order by" clause with two fields.
*/
@Test
public void testSelectOrderByTwoFields() {
FieldReference fieldReference1 = new FieldReference("stringField1");
FieldReference fieldReference2 = new FieldReference("stringField2");
SelectStatement stmt = new SelectStatement(fieldReference1,fieldReference2)
.from(new TableReference(ALTERNATE_TABLE))
.orderBy(fieldReference1.desc().nullsFirst(),fieldReference2.asc().nullsLast());
assertEquals("Select with descending order by", expectedSelectOrderByTwoFields(), testDialect.convertStatementToSQL(stmt));
} | 3.68 |
flink_OverWindowPartitioned_orderBy | /**
* Specifies the time attribute on which rows are ordered.
*
* <p>For streaming tables, reference a rowtime or proctime time attribute here to specify the
* time mode.
*
* <p>For batch tables, refer to a timestamp or long attribute.
*
* @param orderBy field reference
* @return an over window with defined order
*/
public OverWindowPartitionedOrdered orderBy(Expression orderBy) {
return new OverWindowPartitionedOrdered(partitionBy, orderBy);
} | 3.68 |
framework_DragHandle_removeStyleName | /**
* Removes existing style name from drag handle element.
*
* @param styleName
* a CSS style name
*/
public void removeStyleName(String styleName) {
element.removeClassName(styleName);
} | 3.68 |
flink_DataViewUtils_createDistinctViewSpec | /** Creates a special {@link DistinctViewSpec} for DISTINCT aggregates. */
public static DistinctViewSpec createDistinctViewSpec(
int index, DataType distinctViewDataType) {
return new DistinctViewSpec("distinctAcc_" + index, distinctViewDataType);
} | 3.68 |
cron-utils_FieldParser_parse | /**
* Parse given expression for a single cron field.
*
* @param expression - String
* @return CronFieldExpression object that with interpretation of given String parameter
*/
public FieldExpression parse(final String expression) {
if (!StringUtils.containsAny(expression, SPECIAL_CHARS_MINUS_STAR)) {
if (expression.contains(QUESTION_MARK_STRING) && !fieldConstraints.getSpecialChars().contains(QUESTION_MARK)) {
throw new IllegalArgumentException("Invalid expression: " + expression);
}
return noSpecialCharsNorStar(expression);
} else {
final String[] array = expression.split(",");
if (array.length > 1) {
return commaSplitResult(array);
} else {
final String[] splitted = expression.split("-");
if (expression.contains("-") && splitted.length != 2) {
throw new IllegalArgumentException("Missing values for range: " + expression);
}
return splitted[0].equalsIgnoreCase(L_STRING)
? parseOnWithL(splitted[0], mapToIntegerFieldValue(splitted[1]))
: dashSplitResult(expression, splitted);
}
}
} | 3.68 |
flink_Execution_markFailed | /**
* This method marks the task as failed, but will make no attempt to remove task execution from
* the task manager. It is intended for cases where the task is known not to be running, or then
* the TaskManager reports failure (in which case it has already removed the task).
*
* @param t The exception that caused the task to fail.
*/
public void markFailed(Throwable t) {
processFail(t, false);
} | 3.68 |
hudi_KafkaConnectUtils_getRecordKeyColumns | /**
* Extract the record fields.
*
* @param keyGenerator key generator Instance of the keygenerator.
* @return Returns the record key columns separated by comma.
*/
public static String getRecordKeyColumns(KeyGenerator keyGenerator) {
return String.join(",", keyGenerator.getRecordKeyFieldNames());
} | 3.68 |
hbase_MergeTableRegionsProcedure_getServerName | /**
* The procedure could be restarted from a different machine. If the variable is null, we need to
* retrieve it.
* @param env MasterProcedureEnv
*/
private ServerName getServerName(final MasterProcedureEnv env) {
if (regionLocation == null) {
regionLocation =
env.getAssignmentManager().getRegionStates().getRegionServerOfRegion(regionsToMerge[0]);
// May still be null here but return null and let caller deal.
// Means we lost the in-memory-only location. We are in recovery
// or so. The caller should be able to deal w/ a null ServerName.
// Let them go to the Balancer to find one to use instead.
}
return regionLocation;
} | 3.68 |
hbase_HFileBlock_sanityCheck | /**
* Checks if the block is internally consistent, i.e. the first
* {@link HConstants#HFILEBLOCK_HEADER_SIZE} bytes of the buffer contain a valid header consistent
* with the fields. Assumes a packed block structure. This function is primary for testing and
* debugging, and is not thread-safe, because it alters the internal buffer pointer. Used by tests
* only.
*/
void sanityCheck() throws IOException {
// Duplicate so no side-effects
ByteBuff dup = this.bufWithoutChecksum.duplicate().rewind();
sanityCheckAssertion(BlockType.read(dup), blockType);
sanityCheckAssertion(dup.getInt(), onDiskSizeWithoutHeader, "onDiskSizeWithoutHeader");
sanityCheckAssertion(dup.getInt(), uncompressedSizeWithoutHeader,
"uncompressedSizeWithoutHeader");
sanityCheckAssertion(dup.getLong(), prevBlockOffset, "prevBlockOffset");
if (this.fileContext.isUseHBaseChecksum()) {
sanityCheckAssertion(dup.get(), this.fileContext.getChecksumType().getCode(), "checksumType");
sanityCheckAssertion(dup.getInt(), this.fileContext.getBytesPerChecksum(),
"bytesPerChecksum");
sanityCheckAssertion(dup.getInt(), onDiskDataSizeWithHeader, "onDiskDataSizeWithHeader");
}
if (dup.limit() != onDiskDataSizeWithHeader) {
throw new AssertionError(
"Expected limit " + onDiskDataSizeWithHeader + ", got " + dup.limit());
}
// We might optionally allocate HFILEBLOCK_HEADER_SIZE more bytes to read the next
// block's header, so there are two sensible values for buffer capacity.
int hdrSize = headerSize();
dup.rewind();
if (
dup.remaining() != onDiskDataSizeWithHeader
&& dup.remaining() != onDiskDataSizeWithHeader + hdrSize
) {
throw new AssertionError("Invalid buffer capacity: " + dup.remaining() + ", expected "
+ onDiskDataSizeWithHeader + " or " + (onDiskDataSizeWithHeader + hdrSize));
}
} | 3.68 |
framework_DeclarativeItemEnabledProvider_addDisabled | /**
* Adds the {@code item} to disabled items list.
*
* @param item
* a data item
*/
protected void addDisabled(T item) {
disabled.add(item);
} | 3.68 |
hadoop_BCFile_close | /**
* Finishing reading the BCFile. Release all resources.
*/
@Override
public void close() {
// nothing to be done now
} | 3.68 |
flink_CopyOnWriteStateMap_handleChainedEntryCopyOnWrite | /**
* Perform copy-on-write for entry chains. We iterate the (hopefully and probably) still cached
* chain, replace all links up to the 'untilEntry', which we actually wanted to modify.
*/
private StateMapEntry<K, N, S> handleChainedEntryCopyOnWrite(
StateMapEntry<K, N, S>[] tab, int mapIdx, StateMapEntry<K, N, S> untilEntry) {
final int required = highestRequiredSnapshotVersion;
StateMapEntry<K, N, S> current = tab[mapIdx];
StateMapEntry<K, N, S> copy;
if (current.entryVersion < required) {
copy = new StateMapEntry<>(current, stateMapVersion);
tab[mapIdx] = copy;
} else {
// nothing to do, just advance copy to current
copy = current;
}
// we iterate the chain up to 'until entry'
while (current != untilEntry) {
// advance current
current = current.next;
if (current.entryVersion < required) {
// copy and advance the current's copy
copy.next = new StateMapEntry<>(current, stateMapVersion);
copy = copy.next;
} else {
// nothing to do, just advance copy to current
copy = current;
}
}
return copy;
} | 3.68 |
hbase_BloomFilterChunk_createAnother | /**
* Creates another similar Bloom filter. Does not copy the actual bits, and sets the new filter's
* key count to zero.
* @return a Bloom filter with the same configuration as this
*/
public BloomFilterChunk createAnother() {
BloomFilterChunk bbf = new BloomFilterChunk(hashType, this.bloomType);
bbf.byteSize = byteSize;
bbf.hashCount = hashCount;
bbf.maxKeys = maxKeys;
return bbf;
} | 3.68 |
hadoop_ShadedProtobufHelper_getFixedByteString | /**
* Get the ByteString for frequently used fixed and small set strings.
* @param key string
* @return ByteString for frequently used fixed and small set strings.
*/
public static ByteString getFixedByteString(String key) {
ByteString value = FIXED_BYTESTRING_CACHE.get(key);
if (value == null) {
value = ByteString.copyFromUtf8(key);
FIXED_BYTESTRING_CACHE.put(key, value);
}
return value;
} | 3.68 |
hbase_HDFSBlocksDistribution_addHostsAndBlockWeight | /**
* add some weight to a list of hosts, update the value of unique block weight
* @param hosts the list of the host
* @param weight the weight
*/
public void addHostsAndBlockWeight(String[] hosts, long weight, StorageType[] storageTypes) {
if (hosts == null || hosts.length == 0) {
// erroneous data
return;
}
addUniqueWeight(weight);
if (storageTypes != null && storageTypes.length == hosts.length) {
for (int i = 0; i < hosts.length; i++) {
long weightForSsd = 0;
if (storageTypes[i] == StorageType.SSD) {
weightForSsd = weight;
}
addHostAndBlockWeight(hosts[i], weight, weightForSsd);
}
} else {
for (String hostname : hosts) {
addHostAndBlockWeight(hostname, weight, 0);
}
}
} | 3.68 |
hbase_LoadBalancer_updateBalancerLoadInfo | /**
* In some scenarios, Balancer needs to update internal status or information according to the
* current tables load
* @param loadOfAllTable region load of servers for all table
*/
default void
updateBalancerLoadInfo(Map<TableName, Map<ServerName, List<RegionInfo>>> loadOfAllTable) {
} | 3.68 |
graphhopper_VectorTile_addKeysBytes | /**
* <pre>
* Dictionary encoding for keys
* </pre>
*
* <code>repeated string keys = 3;</code>
*/
public Builder addKeysBytes(
com.google.protobuf.ByteString value) {
if (value == null) {
throw new NullPointerException();
}
ensureKeysIsMutable();
keys_.add(value);
onChanged();
return this;
} | 3.68 |
flink_InternalWindowProcessFunction_cleanupTime | /**
* Returns the cleanup time for a window, which is {@code window.maxTimestamp +
* allowedLateness}. In case this leads to a value greated than {@link Long#MAX_VALUE} then a
* cleanup time of {@link Long#MAX_VALUE} is returned.
*
* @param window the window whose cleanup time we are computing.
*/
private long cleanupTime(W window) {
if (windowAssigner.isEventTime()) {
long cleanupTime = window.maxTimestamp() + allowedLateness;
return cleanupTime >= window.maxTimestamp() ? cleanupTime : Long.MAX_VALUE;
} else {
return window.maxTimestamp();
}
} | 3.68 |
flink_BlockInfo_setAccumulatedRecordCount | /**
* Sets the accumulatedRecordCount to the specified value.
*
* @param accumulatedRecordCount the accumulatedRecordCount to set
*/
public void setAccumulatedRecordCount(long accumulatedRecordCount) {
this.accumulatedRecordCount = accumulatedRecordCount;
} | 3.68 |
flink_KeyGroupsStateHandle_getDelegateStateHandle | /** @return The handle to the actual states */
public StreamStateHandle getDelegateStateHandle() {
return stateHandle;
} | 3.68 |
morf_H2Dialect_getSqlForDateToYyyymmdd | /**
* @see org.alfasoftware.morf.jdbc.SqlDialect#getSqlForDateToYyyymmdd(org.alfasoftware.morf.sql.element.Function)
*/
@Override
protected String getSqlForDateToYyyymmdd(Function function) {
String sqlExpression = getSqlFrom(function.getArguments().get(0));
return String.format("CAST(SUBSTRING(%1$s, 1, 4)||SUBSTRING(%1$s, 6, 2)||SUBSTRING(%1$s, 9, 2) AS DECIMAL(8))",sqlExpression);
} | 3.68 |
hbase_QuotaFilter_getTableFilter | /** Returns the Table filter regex */
public String getTableFilter() {
return tableRegex;
} | 3.68 |
hbase_Interns_tag | /**
* Get a metrics tag
* @param name of the tag
* @param description of the tag
* @param value of the tag
* @return an interned metrics tag
*/
public static MetricsTag tag(String name, String description, String value) {
return tag(info(name, description), value);
} | 3.68 |
hadoop_AllocationFileParser_getReservationPlanner | // Reservation global configuration knobs
public Optional<String> getReservationPlanner() {
return getTextValue(RESERVATION_PLANNER);
} | 3.68 |
framework_IndexedContainer_toString | /**
* Gets the <code>String</code> representation of the contents of the
* Item. The format of the string is a space separated catenation of the
* <code>String</code> representations of the values of the Properties
* contained by the Item.
*
* @return <code>String</code> representation of the Item contents
*/
@Override
public String toString() {
String retValue = "";
for (final Iterator<?> i = propertyIds.iterator(); i.hasNext();) {
final Object propertyId = i.next();
retValue += getItemProperty(propertyId).getValue();
if (i.hasNext()) {
retValue += " ";
}
}
return retValue;
} | 3.68 |
morf_HumanReadableStatementHelper_generateRenameIndexString | /**
* Generates a human-readable "Rename Index" string.
*
* @param tableName the name of the table to rename the index on
* @param fromIndexName the original index name
* @param toIndexName the replacement name for the index
* @return a string containing the human-readable version of the action
*/
public static String generateRenameIndexString(final String tableName, final String fromIndexName, final String toIndexName) {
return String.format("Rename index %s on %s to %s", fromIndexName, tableName, toIndexName);
} | 3.68 |
hadoop_ContainerUpdates_getIncreaseRequests | /**
* Returns Container Increase Requests.
* @return Container Increase Requests.
*/
public List<UpdateContainerRequest> getIncreaseRequests() {
return increaseRequests;
} | 3.68 |
framework_VMenuBar_getNavigationDownKey | /**
* Get the key that moves the selection downwards. By default it is the down
* arrow key but by overriding this you can change the key to whatever you
* want.
*
* @return The keycode of the key
*/
protected int getNavigationDownKey() {
return KeyCodes.KEY_DOWN;
} | 3.68 |
hibernate-validator_DefaultScriptEvaluatorFactory_run | /**
* Runs the given privileged action, using a privileged block if required.
* <p>
* <b>NOTE:</b> This must never be changed into a publicly available method to avoid execution of arbitrary
* privileged actions within HV's protection domain.
*/
@IgnoreForbiddenApisErrors(reason = "SecurityManager is deprecated in JDK17")
private static <T> T run(PrivilegedAction<T> action) {
return System.getSecurityManager() != null ? AccessController.doPrivileged( action ) : action.run();
} | 3.68 |
hbase_TableBackupClient_obtainBackupMetaDataStr | /**
* Get backup request meta data dir as string.
* @param backupInfo backup info
* @return meta data dir
*/
protected String obtainBackupMetaDataStr(BackupInfo backupInfo) {
StringBuilder sb = new StringBuilder();
sb.append("type=" + backupInfo.getType() + ",tablelist=");
for (TableName table : backupInfo.getTables()) {
sb.append(table + ";");
}
if (sb.lastIndexOf(";") > 0) {
sb.delete(sb.lastIndexOf(";"), sb.lastIndexOf(";") + 1);
}
sb.append(",targetRootDir=" + backupInfo.getBackupRootDir());
return sb.toString();
} | 3.68 |
hadoop_AzureBlobFileSystem_listLocatedStatus | /**
* Incremental listing of located status entries,
* preserving etags.
* @param path path to list
* @param filter a path filter
* @return iterator of results.
* @throws FileNotFoundException source path not found.
* @throws IOException other values.
*/
@Override
protected RemoteIterator<LocatedFileStatus> listLocatedStatus(
final Path path,
final PathFilter filter)
throws FileNotFoundException, IOException {
LOG.debug("AzureBlobFileSystem.listStatusIterator path : {}", path);
// get a paged iterator over the source data, filtering out non-matching
// entries.
final RemoteIterator<FileStatus> sourceEntries = filteringRemoteIterator(
listStatusIterator(path),
(st) -> filter.accept(st.getPath()));
// and then map that to a remote iterator of located file status
// entries, propagating any etags.
return mappingRemoteIterator(sourceEntries,
st -> new AbfsLocatedFileStatus(st,
st.isFile()
? getFileBlockLocations(st, 0, st.getLen())
: null));
} | 3.68 |
framework_VTabsheet_focusTabAtIndex | /**
* Focus the specified tab. Make sure to call this only from user
* events, otherwise will break things.
*
* @param tabIndex
* the index of the tab to set.
*/
void focusTabAtIndex(int tabIndex) {
Tab tabToFocus = tb.getTab(tabIndex);
if (tabToFocus != null) {
tabToFocus.focus();
}
} | 3.68 |
hbase_EntityLock_await | /**
* @param timeout in milliseconds. If set to 0, waits indefinitely.
* @return true if lock was acquired; and false if waiting time elapsed before lock could be
* acquired.
*/
public boolean await(long timeout, TimeUnit timeUnit) throws InterruptedException {
final boolean result = latch.await(timeout, timeUnit);
String lockRequestStr = lockRequest.toString().replace("\n", ", ");
if (result) {
LOG.info("Acquired " + lockRequestStr);
} else {
LOG.info(String.format("Failed acquire in %s %s of %s", timeout, timeUnit.toString(),
lockRequestStr));
}
return result;
} | 3.68 |
hbase_AbstractFSWALProvider_getArchivedWALFiles | /**
* List all the old wal files for a dead region server.
* <p/>
* Initially added for supporting replication, where we need to get the wal files to replicate for
* a dead region server.
*/
public static List<Path> getArchivedWALFiles(Configuration conf, ServerName serverName,
String logPrefix) throws IOException {
Path walRootDir = CommonFSUtils.getWALRootDir(conf);
FileSystem fs = walRootDir.getFileSystem(conf);
List<Path> archivedWalFiles = new ArrayList<>();
// list both the root old wal dir and the separate old wal dir, so we will not miss any files if
// the SEPARATE_OLDLOGDIR config is changed
Path oldWalDir = new Path(walRootDir, HConstants.HREGION_OLDLOGDIR_NAME);
try {
for (FileStatus status : fs.listStatus(oldWalDir, p -> p.getName().startsWith(logPrefix))) {
if (status.isFile()) {
archivedWalFiles.add(status.getPath());
}
}
} catch (FileNotFoundException e) {
LOG.info("Old WAL dir {} not exists", oldWalDir);
return Collections.emptyList();
}
Path separatedOldWalDir = new Path(oldWalDir, serverName.toString());
try {
for (FileStatus status : fs.listStatus(separatedOldWalDir,
p -> p.getName().startsWith(logPrefix))) {
if (status.isFile()) {
archivedWalFiles.add(status.getPath());
}
}
} catch (FileNotFoundException e) {
LOG.info("Seprated old WAL dir {} not exists", separatedOldWalDir);
}
return archivedWalFiles;
} | 3.68 |
hadoop_ExitUtil_getFirstHaltException | /**
* @return the first {@code HaltException} thrown, null if none thrown yet.
*/
public static HaltException getFirstHaltException() {
return FIRST_HALT_EXCEPTION.get();
} | 3.68 |
hudi_HoodieGlobalSimpleIndex_tagLocationInternal | /**
* Tags records location for incoming records.
*
* @param inputRecords {@link HoodieData} of incoming records
* @param context instance of {@link HoodieEngineContext} to use
* @param hoodieTable instance of {@link HoodieTable} to use
* @return {@link HoodieData} of records with record locations set
*/
@Override
protected <R> HoodieData<HoodieRecord<R>> tagLocationInternal(
HoodieData<HoodieRecord<R>> inputRecords, HoodieEngineContext context,
HoodieTable hoodieTable) {
List<Pair<String, HoodieBaseFile>> latestBaseFiles = getAllBaseFilesInTable(context, hoodieTable);
HoodiePairData<String, HoodieRecordGlobalLocation> allKeysAndLocations =
fetchRecordGlobalLocations(context, hoodieTable, config.getGlobalSimpleIndexParallelism(), latestBaseFiles);
boolean mayContainDuplicateLookup = hoodieTable.getMetaClient().getTableType() == MERGE_ON_READ;
boolean shouldUpdatePartitionPath = config.getGlobalSimpleIndexUpdatePartitionPath() && hoodieTable.isPartitioned();
return tagGlobalLocationBackToRecords(inputRecords, allKeysAndLocations,
mayContainDuplicateLookup, shouldUpdatePartitionPath, config, hoodieTable);
} | 3.68 |
hadoop_FederationMembershipStateStoreInputValidator_checkAddress | /**
* Validate if the SubCluster Address is a valid URL or not.
*
* @param address the endpoint of the subcluster to be verified
* @throws FederationStateStoreInvalidInputException if the address is invalid
*/
private static void checkAddress(String address)
throws FederationStateStoreInvalidInputException {
// Ensure url is not null
if (address == null || address.isEmpty()) {
String message = "Missing SubCluster Endpoint information."
+ " Please try again by specifying SubCluster Endpoint information.";
LOG.warn(message);
throw new FederationStateStoreInvalidInputException(message);
}
// Validate url is well formed
boolean hasScheme = address.contains("://");
URI uri = null;
try {
uri = hasScheme ? URI.create(address)
: URI.create("dummyscheme://" + address);
} catch (IllegalArgumentException e) {
String message = "The provided SubCluster Endpoint does not contain a"
+ " valid host:port authority: " + address;
LOG.warn(message);
throw new FederationStateStoreInvalidInputException(message);
}
String host = uri.getHost();
int port = uri.getPort();
String path = uri.getPath();
if ((host == null) || (port < 0)
|| (!hasScheme && path != null && !path.isEmpty())) {
String message = "The provided SubCluster Endpoint does not contain a"
+ " valid host:port authority: " + address;
LOG.warn(message);
throw new FederationStateStoreInvalidInputException(message);
}
} | 3.68 |
hudi_CleanerUtils_convertToHoodieCleanFileInfoList | /**
* Convert list of cleanFileInfo instances to list of avro-generated HoodieCleanFileInfo instances.
* @param cleanFileInfoList
* @return
*/
public static List<HoodieCleanFileInfo> convertToHoodieCleanFileInfoList(List<CleanFileInfo> cleanFileInfoList) {
return cleanFileInfoList.stream().map(CleanFileInfo::toHoodieFileCleanInfo).collect(Collectors.toList());
} | 3.68 |
pulsar_AbstractHdfsConnector_getFileSystem | /**
* This exists in order to allow unit tests to override it so that they don't take several
* minutes waiting for UDP packets to be received.
*
* @param config
* the configuration to use
* @return the FileSystem that is created for the given Configuration
* @throws IOException
* if unable to create the FileSystem
*/
protected FileSystem getFileSystem(final Configuration config) throws IOException {
return FileSystem.get(config);
} | 3.68 |
hbase_BufferedMutatorParams_listener | /**
* Override the default error handler. Default handler simply rethrows the exception.
*/
public BufferedMutatorParams listener(BufferedMutator.ExceptionListener listener) {
this.listener = listener;
return this;
} | 3.68 |
hudi_DirectMarkerTransactionManager_createUpdatedLockProps | /**
* Rebuilds lock related configs. Only support ZK related lock for now.
*
* @param writeConfig Hudi write configs.
* @param partitionPath Relative partition path.
* @param fileId File ID.
* @return Updated lock related configs.
*/
private static TypedProperties createUpdatedLockProps(
HoodieWriteConfig writeConfig, String partitionPath, String fileId) {
if (!ZookeeperBasedLockProvider.class.getName().equals(writeConfig.getLockProviderClass())) {
throw new HoodieNotSupportedException("Only Support ZK-based lock for DirectMarkerTransactionManager now.");
}
TypedProperties props = new TypedProperties(writeConfig.getProps());
props.setProperty(LockConfiguration.ZK_LOCK_KEY_PROP_KEY, (null != partitionPath && !partitionPath.isEmpty()) ? partitionPath + "/" + fileId : fileId);
return props;
} | 3.68 |
flink_Predicates_arePublicStaticFinalOfTypeWithAnnotation | /**
* Tests that the field is {@code public static final}, has the fully qualified type name of
* {@code fqClassName} and is annotated with the {@code annotationType}.
*/
public static DescribedPredicate<JavaField> arePublicStaticFinalOfTypeWithAnnotation(
String fqClassName, Class<? extends Annotation> annotationType) {
return arePublicStaticFinalOfType(fqClassName).and(annotatedWith(annotationType));
} | 3.68 |
hadoop_SampleQuantiles_compress | /**
* Try to remove extraneous items from the set of sampled items. This checks
* if an item is unnecessary based on the desired error bounds, and merges it
* with the adjacent item if it is.
*/
private void compress() {
if (samples.size() < 2) {
return;
}
ListIterator<SampleItem> it = samples.listIterator();
SampleItem prev = null;
SampleItem next = it.next();
while (it.hasNext()) {
prev = next;
next = it.next();
if (prev.g + next.g + next.delta <= allowableError(it.previousIndex())) {
next.g += prev.g;
// Remove prev. it.remove() kills the last thing returned.
it.previous();
it.previous();
it.remove();
// it.next() is now equal to next, skip it back forward again
it.next();
}
}
} | 3.68 |
flink_Tuple22_equals | /**
* Deep equality for tuples by calling equals() on the tuple members.
*
* @param o the object checked for equality
* @return true if this is equal to o.
*/
@Override
public boolean equals(Object o) {
if (this == o) {
return true;
}
if (!(o instanceof Tuple22)) {
return false;
}
@SuppressWarnings("rawtypes")
Tuple22 tuple = (Tuple22) o;
if (f0 != null ? !f0.equals(tuple.f0) : tuple.f0 != null) {
return false;
}
if (f1 != null ? !f1.equals(tuple.f1) : tuple.f1 != null) {
return false;
}
if (f2 != null ? !f2.equals(tuple.f2) : tuple.f2 != null) {
return false;
}
if (f3 != null ? !f3.equals(tuple.f3) : tuple.f3 != null) {
return false;
}
if (f4 != null ? !f4.equals(tuple.f4) : tuple.f4 != null) {
return false;
}
if (f5 != null ? !f5.equals(tuple.f5) : tuple.f5 != null) {
return false;
}
if (f6 != null ? !f6.equals(tuple.f6) : tuple.f6 != null) {
return false;
}
if (f7 != null ? !f7.equals(tuple.f7) : tuple.f7 != null) {
return false;
}
if (f8 != null ? !f8.equals(tuple.f8) : tuple.f8 != null) {
return false;
}
if (f9 != null ? !f9.equals(tuple.f9) : tuple.f9 != null) {
return false;
}
if (f10 != null ? !f10.equals(tuple.f10) : tuple.f10 != null) {
return false;
}
if (f11 != null ? !f11.equals(tuple.f11) : tuple.f11 != null) {
return false;
}
if (f12 != null ? !f12.equals(tuple.f12) : tuple.f12 != null) {
return false;
}
if (f13 != null ? !f13.equals(tuple.f13) : tuple.f13 != null) {
return false;
}
if (f14 != null ? !f14.equals(tuple.f14) : tuple.f14 != null) {
return false;
}
if (f15 != null ? !f15.equals(tuple.f15) : tuple.f15 != null) {
return false;
}
if (f16 != null ? !f16.equals(tuple.f16) : tuple.f16 != null) {
return false;
}
if (f17 != null ? !f17.equals(tuple.f17) : tuple.f17 != null) {
return false;
}
if (f18 != null ? !f18.equals(tuple.f18) : tuple.f18 != null) {
return false;
}
if (f19 != null ? !f19.equals(tuple.f19) : tuple.f19 != null) {
return false;
}
if (f20 != null ? !f20.equals(tuple.f20) : tuple.f20 != null) {
return false;
}
if (f21 != null ? !f21.equals(tuple.f21) : tuple.f21 != null) {
return false;
}
return true;
} | 3.68 |
hudi_BaseHoodieWriteClient_scheduleCleaningAtInstant | /**
* Schedules a new cleaning instant with passed-in instant time.
* @param instantTime cleaning Instant Time
* @param extraMetadata Extra Metadata to be stored
*/
protected boolean scheduleCleaningAtInstant(String instantTime, Option<Map<String, String>> extraMetadata) throws HoodieIOException {
return scheduleTableService(instantTime, extraMetadata, TableServiceType.CLEAN).isPresent();
} | 3.68 |
flink_WriteSinkFunction_cleanFile | /**
* Creates target file if it does not exist, cleans it if it exists.
*
* @param path is the path to the location where the tuples are written
*/
protected void cleanFile(String path) {
try {
PrintWriter writer;
writer = new PrintWriter(path);
writer.print("");
writer.close();
} catch (FileNotFoundException e) {
throw new RuntimeException(
"An error occurred while cleaning the file: " + e.getMessage(), e);
}
} | 3.68 |
hadoop_Utils_writeString | /**
* Write a String as a VInt n, followed by n Bytes as in Text format.
*
* @param out out.
* @param s s.
* @throws IOException raised on errors performing I/O.
*/
public static void writeString(DataOutput out, String s) throws IOException {
if (s != null) {
Text text = new Text(s);
byte[] buffer = text.getBytes();
int len = text.getLength();
writeVInt(out, len);
out.write(buffer, 0, len);
} else {
writeVInt(out, -1);
}
} | 3.68 |
hbase_CompactSplit_onConfigurationChange | /**
* {@inheritDoc}
*/
@Override
public void onConfigurationChange(Configuration newConf) {
// Check if number of large / small compaction threads has changed, and then
// adjust the core pool size of the thread pools, by using the
// setCorePoolSize() method. According to the javadocs, it is safe to
// change the core pool size on-the-fly. We need to reset the maximum
// pool size, as well.
int largeThreads =
Math.max(1, newConf.getInt(LARGE_COMPACTION_THREADS, LARGE_COMPACTION_THREADS_DEFAULT));
if (this.longCompactions.getCorePoolSize() != largeThreads) {
LOG.info("Changing the value of " + LARGE_COMPACTION_THREADS + " from "
+ this.longCompactions.getCorePoolSize() + " to " + largeThreads);
if (this.longCompactions.getCorePoolSize() < largeThreads) {
this.longCompactions.setMaximumPoolSize(largeThreads);
this.longCompactions.setCorePoolSize(largeThreads);
} else {
this.longCompactions.setCorePoolSize(largeThreads);
this.longCompactions.setMaximumPoolSize(largeThreads);
}
}
int smallThreads = newConf.getInt(SMALL_COMPACTION_THREADS, SMALL_COMPACTION_THREADS_DEFAULT);
if (this.shortCompactions.getCorePoolSize() != smallThreads) {
LOG.info("Changing the value of " + SMALL_COMPACTION_THREADS + " from "
+ this.shortCompactions.getCorePoolSize() + " to " + smallThreads);
if (this.shortCompactions.getCorePoolSize() < smallThreads) {
this.shortCompactions.setMaximumPoolSize(smallThreads);
this.shortCompactions.setCorePoolSize(smallThreads);
} else {
this.shortCompactions.setCorePoolSize(smallThreads);
this.shortCompactions.setMaximumPoolSize(smallThreads);
}
}
int splitThreads = newConf.getInt(SPLIT_THREADS, SPLIT_THREADS_DEFAULT);
if (this.splits.getCorePoolSize() != splitThreads) {
LOG.info("Changing the value of " + SPLIT_THREADS + " from " + this.splits.getCorePoolSize()
+ " to " + splitThreads);
if (this.splits.getCorePoolSize() < splitThreads) {
this.splits.setMaximumPoolSize(splitThreads);
this.splits.setCorePoolSize(splitThreads);
} else {
this.splits.setCorePoolSize(splitThreads);
this.splits.setMaximumPoolSize(splitThreads);
}
}
ThroughputController old = this.compactionThroughputController;
if (old != null) {
old.stop("configuration change");
}
this.compactionThroughputController =
CompactionThroughputControllerFactory.create(server, newConf);
// We change this atomically here instead of reloading the config in order that upstream
// would be the only one with the flexibility to reload the config.
this.conf.reloadConfiguration();
} | 3.68 |
flink_TumbleWithSizeOnTime_as | /**
* Assigns an alias for this window that the following {@code groupBy()} and {@code select()}
* clause can refer to. {@code select()} statement can access window properties such as window
* start or end time.
*
* @param alias alias for this window
* @return this window
*/
public TumbleWithSizeOnTimeWithAlias as(String alias) {
return as(unresolvedRef(alias));
} | 3.68 |
hbase_RequestConverter_buildIsMasterRunningRequest | /**
* Creates a protocol buffer IsMasterRunningRequest
* @return a IsMasterRunningRequest
*/
public static IsMasterRunningRequest buildIsMasterRunningRequest() {
return IsMasterRunningRequest.newBuilder().build();
} | 3.68 |
framework_MinimalWidthColumns_setup | /*
* (non-Javadoc)
*
* @see com.vaadin.tests.components.AbstractTestUI#setup(com.vaadin.server.
* VaadinRequest)
*/
@Override
protected void setup(VaadinRequest request) {
TreeTable tt = new TreeTable();
tt.addContainerProperty("Foo", String.class, "");
tt.addContainerProperty("Bar", String.class, "");
Object item1 = tt.addItem(new Object[] { "f", "Bar" }, null);
Object item2 = tt.addItem(new Object[] { "Foo2", "Bar2" }, null);
tt.setParent(item2, item1);
tt.setColumnWidth("Foo", 0);
tt.setColumnWidth("Bar", 50);
tt.setWidth("300px");
addComponent(tt);
} | 3.68 |