name
stringlengths 12
178
| code_snippet
stringlengths 8
36.5k
| score
float64 3.26
3.68
|
---|---|---|
morf_HumanReadableStatementHelper_generatePortableSqlStatementString | /**
* Generates a human-readable description of a raw SQL operation.
*
* @param statement the data upgrade statement to describe.
* @param preferredSQLDialect the SQL dialect to use, if available. If this is {@code null} or the preferred
* dialect is not available in the statement bundle then an arbitrary choice is made.
* @return a string containing the human-readable description of the operation.
*/
private static String generatePortableSqlStatementString(final PortableSqlStatement statement, final String preferredSQLDialect) {
final Map<String, String> sqlStrings = statement.getStatements();
String sql = sqlStrings.get(preferredSQLDialect);
if (sql == null) {
sql = sqlStrings.values().iterator().next();
}
final StringBuilder sb = new StringBuilder("Run the following raw SQL statement");
// Raw SQL fragments may have either "\n" written into them or use the platform separator
final String[] lines = sql.split(System.lineSeparator() + "|\\n");
for (int i = 0; i < lines.length; i++) {
if (i > 0) {
sb.append(System.lineSeparator()).append(" ");
} else {
sb.append(System.lineSeparator()).append(" - ");
}
sb.append(lines[i]);
}
return sb.toString();
} | 3.68 |
hadoop_BondedS3AStatisticsContext_newInputStreamStatistics | /**
* Create a stream input statistics instance.
* The FileSystem.Statistics instance of the {@link #statisticsSource}
* is used as the reference to FileSystem statistics to update
* @return the new instance
*/
@Override
public S3AInputStreamStatistics newInputStreamStatistics() {
return getInstrumentation().newInputStreamStatistics(
statisticsSource.getInstanceStatistics());
} | 3.68 |
framework_Design_read | /**
* Loads a design from the given input stream.
*
* @param design
* The stream to read the design from
* @return The root component of the design
*/
public static Component read(InputStream design) {
DesignContext context = read(design, null);
return context.getRootComponent();
} | 3.68 |
framework_EventRouter_removeListener | /*
* Removes the event listener method matching the given given parameters.
* Don't add a JavaDoc comment here, we use the default documentation from
* implemented interface.
*/
@Override
@Deprecated
public void removeListener(Class<?> eventType, Object target,
String methodName) {
// Find the correct method
final Method[] methods = target.getClass().getMethods();
Method method = null;
for (Method m : methods) {
if (m.getName().equals(methodName)) {
method = m;
break;
}
}
if (method == null) {
throw new IllegalArgumentException();
}
// Remove the listeners
if (listenerList != null) {
final Iterator<ListenerMethod> i = listenerList.iterator();
while (i.hasNext()) {
final ListenerMethod lm = i.next();
if (lm.matches(eventType, target, method)) {
i.remove();
return;
}
}
}
} | 3.68 |
flink_GettingStartedExample_eval | // the 'eval()' method defines input and output types (reflectively extracted)
// and contains the runtime logic
public String eval(String street, String zipCode, String city) {
return normalize(street) + ", " + normalize(zipCode) + ", " + normalize(city);
} | 3.68 |
hudi_TableServicePipeline_execute | /**
* Run all table services task sequentially.
*/
public void execute() {
tableServiceTasks.forEach(TableServiceTask::run);
} | 3.68 |
hbase_ServerSideScanMetrics_createCounter | /**
* Create a new counter with the specified name
* @return {@link AtomicLong} instance for the counter with counterName
*/
protected AtomicLong createCounter(String counterName) {
AtomicLong c = new AtomicLong(0);
counters.put(counterName, c);
return c;
} | 3.68 |
dubbo_BaseServiceMetadata_getDisplayServiceKey | /**
* Format : interface:version
*
* @return
*/
public String getDisplayServiceKey() {
StringBuilder serviceNameBuilder = new StringBuilder();
serviceNameBuilder.append(serviceInterfaceName);
serviceNameBuilder.append(COLON_SEPARATOR).append(version);
return serviceNameBuilder.toString();
} | 3.68 |
flink_ProjectOperator_types | /** @deprecated Deprecated method only kept for compatibility. */
@SuppressWarnings("unchecked")
@Deprecated
@PublicEvolving
public <R extends Tuple> ProjectOperator<IN, R> types(Class<?>... types) {
TupleTypeInfo<R> typeInfo = (TupleTypeInfo<R>) this.getResultType();
if (types.length != typeInfo.getArity()) {
throw new InvalidProgramException("Provided types do not match projection.");
}
for (int i = 0; i < types.length; i++) {
Class<?> typeClass = types[i];
if (!typeClass.equals(typeInfo.getTypeAt(i).getTypeClass())) {
throw new InvalidProgramException(
"Provided type "
+ typeClass.getSimpleName()
+ " at position "
+ i
+ " does not match projection");
}
}
return (ProjectOperator<IN, R>) this;
} | 3.68 |
hbase_InclusiveStopFilter_toByteArray | /** Returns The filter serialized using pb */
@Override
public byte[] toByteArray() {
FilterProtos.InclusiveStopFilter.Builder builder =
FilterProtos.InclusiveStopFilter.newBuilder();
if (this.stopRowKey != null)
builder.setStopRowKey(UnsafeByteOperations.unsafeWrap(this.stopRowKey));
return builder.build().toByteArray();
} | 3.68 |
framework_AriaHelper_ensureHasId | /**
* Makes sure that the provided element has an id attribute. Adds a new
* unique id if not.
*
* @param element
* Element to check
* @return String with the id of the element
*/
public static String ensureHasId(Element element) {
assert element != null : "Valid Element required";
String id = element.getId();
if (null == id || id.isEmpty()) {
id = DOM.createUniqueId();
element.setId(id);
}
return id;
} | 3.68 |
hbase_HMaster_getSnapshotManager | /** Returns the underlying snapshot manager */
@Override
public SnapshotManager getSnapshotManager() {
return this.snapshotManager;
} | 3.68 |
shardingsphere-elasticjob_TransactionOperation_opCheckExists | /**
* Operation check exists.
*
* @param key key
* @return TransactionOperation
*/
public static TransactionOperation opCheckExists(final String key) {
return new TransactionOperation(Type.CHECK_EXISTS, key, null);
} | 3.68 |
framework_AbstractMedia_setPreload | /**
* Sets the preload attribute that is intended to provide a hint to the
* browser how the media should be preloaded. Valid values are 'none',
* 'metadata', 'preload', see the <a href=
* "https://developer.mozilla.org/en/docs/Web/HTML/Element/video#attr-preload">
* Mozilla Developer Network</a> for details.
*
* @param preload
* preload mode
* @since 7.7.11
*/
public void setPreload(final PreloadMode preload) {
getState().preload = preload;
} | 3.68 |
framework_PointerDownEvent_getType | /**
* Gets the event type associated with PointerDownEvent events.
*
* @return the handler type
*/
public static Type<PointerDownHandler> getType() {
return TYPE;
} | 3.68 |
framework_ComboBox_setPopupWidth | /**
* Sets the suggestion pop-up's width as a CSS string. By using relative
* units (e.g. "50%") it's possible to set the popup's width relative to the
* ComboBox itself.
*
* @see #getPopupWidth()
* @since 7.7
* @param width
* the width
*/
public void setPopupWidth(String width) {
suggestionPopupWidth = width;
markAsDirty();
} | 3.68 |
hbase_MasterObserver_preDeleteTable | /**
* Called before {@link org.apache.hadoop.hbase.master.HMaster} deletes a table. Called as part of
* delete table RPC call.
* @param ctx the environment to interact with the framework and master
* @param tableName the name of the table
*/
default void preDeleteTable(final ObserverContext<MasterCoprocessorEnvironment> ctx,
TableName tableName) throws IOException {
} | 3.68 |
flink_SlotProfile_getPreferredLocations | /** Returns the preferred locations for the slot. */
public Collection<TaskManagerLocation> getPreferredLocations() {
return preferredLocations;
} | 3.68 |
hbase_RegionCoprocessorHost_preBulkLoadHFile | /**
* @param familyPaths pairs of { CF, file path } submitted for bulk load
*/
public void preBulkLoadHFile(final List<Pair<byte[], String>> familyPaths) throws IOException {
execOperation(coprocEnvironments.isEmpty() ? null : new RegionObserverOperationWithoutResult() {
@Override
public void call(RegionObserver observer) throws IOException {
observer.preBulkLoadHFile(this, familyPaths);
}
});
} | 3.68 |
dubbo_CtClassBuilder_getQualifiedClassName | /**
* get full qualified class name
*
* @param className super class name, maybe qualified or not
*/
protected String getQualifiedClassName(String className) {
if (className.contains(".")) {
return className;
}
if (fullNames.containsKey(className)) {
return fullNames.get(className);
}
return ClassUtils.forName(imports.toArray(new String[0]), className).getName();
} | 3.68 |
hadoop_ExecutorServiceFuturePool_shutdown | /**
* Utility to shutdown the {@link ExecutorService} used by this class. Will wait up to a
* certain timeout for the ExecutorService to gracefully shutdown.
*
* @param logger Logger
* @param timeout the maximum time to wait
* @param unit the time unit of the timeout argument
*/
public void shutdown(Logger logger, long timeout, TimeUnit unit) {
HadoopExecutors.shutdown(executor, logger, timeout, unit);
} | 3.68 |
flink_DataTypeTemplate_isForceAnyPattern | /** Returns whether the given class must be treated as RAW type. */
boolean isForceAnyPattern(@Nullable Class<?> clazz) {
if (forceRawPattern == null || clazz == null) {
return false;
}
final String className = clazz.getName();
for (String pattern : forceRawPattern) {
if (className.startsWith(pattern)) {
return true;
}
}
return false;
} | 3.68 |
hudi_HoodieFlinkWriteClient_waitForCleaningFinish | /**
* Blocks and wait for the async cleaning service to finish.
*
* <p>The Flink write client is designed to write data set as buckets
* but cleaning action should trigger after all the write actions within a
* checkpoint finish.
*/
public void waitForCleaningFinish() {
if (tableServiceClient.asyncCleanerService != null) {
LOG.info("Cleaner has been spawned already. Waiting for it to finish");
tableServiceClient.asyncClean();
LOG.info("Cleaner has finished");
}
} | 3.68 |
hbase_StorageClusterStatusModel_getRootIndexSizeKB | /** Returns The current total size of root-level indexes for the region, in KB. */
@XmlAttribute
public int getRootIndexSizeKB() {
return rootIndexSizeKB;
} | 3.68 |
hudi_HoodieTableMetadataUtil_deleteMetadataTablePartition | /**
* Delete a partition within the metadata table.
* <p>
* This can be used to delete a partition so that it can be re-bootstrapped.
*
* @param dataMetaClient {@code HoodieTableMetaClient} of the dataset for which metadata table is to be deleted
* @param context instance of {@code HoodieEngineContext}.
* @param backup Whether metadata table should be backed up before deletion. If true, the table is backed up to the
* directory with name metadata_<current_timestamp>.
* @param partitionType The partition to delete
* @return The backup directory if backup was requested, null otherwise
*/
public static String deleteMetadataTablePartition(HoodieTableMetaClient dataMetaClient, HoodieEngineContext context,
MetadataPartitionType partitionType, boolean backup) {
if (partitionType.equals(MetadataPartitionType.FILES)) {
return deleteMetadataTable(dataMetaClient, context, backup);
}
final Path metadataTablePartitionPath = new Path(HoodieTableMetadata.getMetadataTableBasePath(dataMetaClient.getBasePath()), partitionType.getPartitionPath());
FileSystem fs = FSUtils.getFs(metadataTablePartitionPath.toString(), context.getHadoopConf().get());
dataMetaClient.getTableConfig().setMetadataPartitionState(dataMetaClient, partitionType, false);
try {
if (!fs.exists(metadataTablePartitionPath)) {
return null;
}
} catch (FileNotFoundException e) {
// Ignoring exception as metadata table already does not exist
LOG.debug("Metadata table partition " + partitionType + " not found at path " + metadataTablePartitionPath);
return null;
} catch (Exception e) {
throw new HoodieMetadataException(String.format("Failed to check existence of MDT partition %s at path %s: ", partitionType, metadataTablePartitionPath), e);
}
if (backup) {
final Path metadataPartitionBackupPath = new Path(metadataTablePartitionPath.getParent().getParent(),
String.format(".metadata_%s_%s", partitionType.getPartitionPath(), dataMetaClient.createNewInstantTime(false)));
LOG.info(String.format("Backing up MDT partition %s to %s before deletion", partitionType, metadataPartitionBackupPath));
try {
if (fs.rename(metadataTablePartitionPath, metadataPartitionBackupPath)) {
return metadataPartitionBackupPath.toString();
}
} catch (Exception e) {
// If rename fails, we will try to delete the table instead
LOG.error(String.format("Failed to backup MDT partition %s using rename", partitionType), e);
}
} else {
LOG.info("Deleting metadata table partition from " + metadataTablePartitionPath);
try {
fs.delete(metadataTablePartitionPath, true);
} catch (Exception e) {
throw new HoodieMetadataException("Failed to delete metadata table partition from path " + metadataTablePartitionPath, e);
}
}
return null;
} | 3.68 |
hadoop_ShortWritable_equals | /** Returns true iff <code>o</code> is a ShortWritable with the same value. */
@Override
public boolean equals(Object o) {
if (!(o instanceof ShortWritable))
return false;
ShortWritable other = (ShortWritable) o;
return this.value == other.value;
} | 3.68 |
flink_HighAvailabilityServices_getWebMonitorLeaderRetriever | /**
* This retriever should no longer be used on the cluster side. The web monitor retriever is
* only required on the client-side and we have a dedicated high-availability services for the
* client, named {@link ClientHighAvailabilityServices}. See also FLINK-13750.
*
* @return the leader retriever for web monitor
* @deprecated just use {@link #getClusterRestEndpointLeaderRetriever()} instead of this method.
*/
@Deprecated
default LeaderRetrievalService getWebMonitorLeaderRetriever() {
throw new UnsupportedOperationException(
"getWebMonitorLeaderRetriever should no longer be used. Instead use "
+ "#getClusterRestEndpointLeaderRetriever to instantiate the cluster "
+ "rest endpoint leader retriever. If you called this method, then "
+ "make sure that #getClusterRestEndpointLeaderRetriever has been "
+ "implemented by your HighAvailabilityServices implementation.");
} | 3.68 |
hbase_SaslAuthMethod_getAuthMethod | /**
* Returns the Hadoop {@link AuthenticationMethod} for this method.
*/
public AuthenticationMethod getAuthMethod() {
return method;
} | 3.68 |
pulsar_TxnMetaImpl_checkTxnStatus | /**
* Check if the transaction is in an expected status.
*
* @param expectedStatus
*/
private synchronized void checkTxnStatus(TxnStatus expectedStatus) throws InvalidTxnStatusException {
if (this.txnStatus != expectedStatus) {
throw new InvalidTxnStatusException(
txnID, expectedStatus, txnStatus
);
}
} | 3.68 |
AreaShop_Utils_getImportantWorldEditRegions | /**
* Get a list of regions around a location.
* - Returns highest priority, child instead of parent regions
* @param location The location to check for regions
* @return empty list if no regions found, 1 member if 1 region is a priority, more if regions with the same priority
*/
public static List<ProtectedRegion> getImportantWorldEditRegions(Location location) {
List<ProtectedRegion> result = new ArrayList<>();
Set<ProtectedRegion> regions = AreaShop.getInstance().getWorldGuardHandler().getApplicableRegionsSet(location);
if(regions != null) {
boolean first = true;
for(ProtectedRegion pr : regions) {
if(first) {
result.add(pr);
first = false;
} else {
if(pr.getPriority() > result.get(0).getPriority()) {
result.clear();
result.add(pr);
} else if(pr.getParent() != null && pr.getParent().equals(result.get(0))) {
result.clear();
result.add(pr);
} else {
result.add(pr);
}
}
}
}
return result;
} | 3.68 |
hbase_ClientMetaTableAccessor_getResults | /** Returns Collected results; wait till visits complete to collect all possible results */
List<T> getResults() {
return this.results;
} | 3.68 |
hudi_CleanPlanner_getSavepointedDataFiles | /**
* Get the list of data file names savepointed.
*/
public Stream<String> getSavepointedDataFiles(String savepointTime) {
if (!hoodieTable.getSavepointTimestamps().contains(savepointTime)) {
throw new HoodieSavepointException(
"Could not get data files for savepoint " + savepointTime + ". No such savepoint.");
}
HoodieInstant instant = new HoodieInstant(false, HoodieTimeline.SAVEPOINT_ACTION, savepointTime);
HoodieSavepointMetadata metadata;
try {
metadata = TimelineMetadataUtils.deserializeHoodieSavepointMetadata(
hoodieTable.getActiveTimeline().getInstantDetails(instant).get());
} catch (IOException e) {
throw new HoodieSavepointException("Could not get savepointed data files for savepoint " + savepointTime, e);
}
return metadata.getPartitionMetadata().values().stream().flatMap(s -> s.getSavepointDataFile().stream());
} | 3.68 |
AreaShop_AreaShop_onEnable | /**
* Called on start or reload of the server.
*/
@Override
public void onEnable() {
AreaShop.instance = this;
Do.init(this);
managers = new HashSet<>();
boolean error = false;
// Find WorldEdit integration version to load
String weVersion = null;
String rawWeVersion = null;
String weBeta = null;
Plugin plugin = getServer().getPluginManager().getPlugin("WorldEdit");
if(!(plugin instanceof WorldEditPlugin) || !plugin.isEnabled()) {
error("WorldEdit plugin is not present or has not loaded correctly");
error = true;
} else {
worldEdit = (WorldEditPlugin)plugin;
rawWeVersion = worldEdit.getDescription().getVersion();
// Find beta version
Pattern pattern = Pattern.compile("beta-?\\d+");
Matcher matcher = pattern.matcher(rawWeVersion);
if (matcher.find()) {
weBeta = matcher.group();
}
// Get correct WorldEditInterface (handles things that changed version to version)
if(worldEdit.getDescription().getVersion().startsWith("5.")) {
weVersion = "5";
} else if(worldEdit.getDescription().getVersion().startsWith("6.")) {
weVersion = "6";
} else if ("beta-01".equalsIgnoreCase(weBeta)) {
weVersion = "7_beta_1";
} else {
// beta-02 and beta-03 also have the new vector system already
weVersion = "7_beta_4";
}
weVersion = "WorldEditHandler" + weVersion;
}
// Find WorldGuard integration version to load
String wgVersion = null;
String rawWgVersion = null;
int major = 0;
int minor = 0;
int fixes = 0;
Integer build = null;
plugin = getServer().getPluginManager().getPlugin("WorldGuard");
if(!(plugin instanceof WorldGuardPlugin) || !plugin.isEnabled()) {
error("WorldGuard plugin is not present or has not loaded correctly");
error = true;
} else {
worldGuard = (WorldGuardPlugin)plugin;
// Get correct WorldGuardInterface (handles things that changed version to version)
try {
rawWgVersion = worldGuard.getDescription().getVersion();
if(rawWgVersion.contains("-SNAPSHOT;")) {
String buildNumber = rawWgVersion.substring(rawWgVersion.indexOf("-SNAPSHOT;") + 10);
if(buildNumber.contains("-")) {
buildNumber = buildNumber.substring(0, buildNumber.indexOf('-'));
if (Utils.isNumeric(buildNumber)) {
build = Integer.parseInt(buildNumber);
} else {
warn("Could not correctly parse the build of WorldGuard, raw version: " + rawWgVersion + ", buildNumber: " + buildNumber);
}
}
}
// Clear stuff from the version string that is not a number
String[] versionParts = rawWgVersion.split("\\.");
for(int i = 0; i < versionParts.length; i++) {
Pattern pattern = Pattern.compile("^\\d+");
Matcher matcher = pattern.matcher(versionParts[i]);
if(matcher.find()) {
versionParts[i] = matcher.group();
}
}
// Find major, minor and fix numbers
try {
if(versionParts.length > 0) {
major = Integer.parseInt(versionParts[0]);
}
if(versionParts.length > 1) {
minor = Integer.parseInt(versionParts[1]);
}
if(versionParts.length > 2) {
fixes = Integer.parseInt(versionParts[2]);
}
} catch(NumberFormatException e) {
warn("Something went wrong while parsing WorldGuard version number: " + rawWgVersion);
}
// Determine correct implementation to use
if(rawWgVersion.startsWith("5.")) {
wgVersion = "5";
} else if(major == 6 && minor == 1 && fixes < 3) {
wgVersion = "6";
} else if(major == 6) {
if(build != null && build == 1672) {
error = true;
error("Build 1672 of WorldGuard is broken, update to a later build or a stable version!");
} else if(build != null && build < 1672) {
wgVersion = "6";
} else {
wgVersion = "6_1_3";
}
} else if ("beta-01".equalsIgnoreCase(weBeta)) {
// When using WorldEdit beta-01, we need to use the WorldGuard variant with the old vector system
wgVersion = "7_beta_1";
} else {
// Even though the WorldGuard file is called beta-02, the reported version is still beta-01!
wgVersion = "7_beta_2";
}
} catch(Exception e) { // If version detection fails, at least try to load the latest version
warn("Parsing the WorldGuard version failed, assuming version 7_beta_2:", rawWgVersion);
wgVersion = "7_beta_2";
}
wgVersion = "WorldGuardHandler" + wgVersion;
}
// Check if FastAsyncWorldEdit is installed
boolean fawe;
try {
Class.forName("com.boydti.fawe.Fawe" );
fawe = true;
} catch (ClassNotFoundException ignore) {
fawe = false;
}
if (fawe) {
boolean useNewIntegration = true;
List<String> standardIntegrationVersions = Arrays.asList("1.7", "1.8", "1.9", "1.10", "1.11", "1.12");
for(String standardIntegrationVersion : standardIntegrationVersions) {
String version = Bukkit.getBukkitVersion();
// Detects '1.8', '1.8.3', '1.8-pre1' style versions
if(version.equals(standardIntegrationVersion)
|| version.startsWith(standardIntegrationVersion + ".")
|| version.startsWith(standardIntegrationVersion + "-")) {
useNewIntegration = false;
break;
}
}
if (useNewIntegration) {
weVersion = "FastAsyncWorldEditHandler";
wgVersion = "FastAsyncWorldEditWorldGuardHandler";
}
}
// Load WorldEdit
try {
Class<?> clazz = Class.forName("me.wiefferink.areashop.handlers." + weVersion);
// Check if we have a NMSHandler class at that location.
if(WorldEditInterface.class.isAssignableFrom(clazz)) { // Make sure it actually implements WorldEditInterface
worldEditInterface = (WorldEditInterface)clazz.getConstructor(AreaShopInterface.class).newInstance(this); // Set our handler
}
} catch(Exception e) {
error("Could not load the handler for WorldEdit (tried to load " + weVersion + "), report this problem to the author: " + ExceptionUtils.getStackTrace(e));
error = true;
weVersion = null;
}
// Load WorldGuard
try {
Class<?> clazz = Class.forName("me.wiefferink.areashop.handlers." + wgVersion);
// Check if we have a NMSHandler class at that location.
if(WorldGuardInterface.class.isAssignableFrom(clazz)) { // Make sure it actually implements WorldGuardInterface
worldGuardInterface = (WorldGuardInterface)clazz.getConstructor(AreaShopInterface.class).newInstance(this); // Set our handler
}
} catch(Exception e) {
error("Could not load the handler for WorldGuard (tried to load " + wgVersion + "), report this problem to the author:" + ExceptionUtils.getStackTrace(e));
error = true;
wgVersion = null;
}
// Load Bukkit implementation
String bukkitHandler;
try {
Class.forName("org.bukkit.block.data.type.WallSign");
bukkitHandler = "1_13";
} catch (ClassNotFoundException e) {
bukkitHandler = "1_12";
}
try {
Class<?> clazz = Class.forName("me.wiefferink.areashop.handlers.BukkitHandler" + bukkitHandler);
bukkitInterface = (BukkitInterface)clazz.getConstructor(AreaShopInterface.class).newInstance(this);
} catch (Exception e) {
error("Could not load the Bukkit handler (used for sign updates), tried to load:", bukkitHandler + ", report this problem to the author:", ExceptionUtils.getStackTrace(e));
error = true;
bukkitHandler = null;
}
// Check if Vault is present
if(getServer().getPluginManager().getPlugin("Vault") == null) {
error("Vault plugin is not present or has not loaded correctly");
error = true;
}
// Load all data from files and check versions
fileManager = new FileManager();
managers.add(fileManager);
boolean loadFilesResult = fileManager.loadFiles(false);
error = error || !loadFilesResult;
// Print loaded version of WG and WE in debug
if(wgVersion != null) {
AreaShop.debug("Loaded ", wgVersion, "(raw version:" + rawWgVersion + ", major:" + major + ", minor:" + minor + ", fixes:" + fixes + ", build:" + build + ", fawe:" + fawe + ")");
}
if(weVersion != null) {
AreaShop.debug("Loaded ", weVersion, "(raw version:" + rawWeVersion + ", beta:" + weBeta + ")");
}
if(bukkitHandler != null) {
AreaShop.debug("Loaded BukkitHandler", bukkitHandler);
}
setupLanguageManager();
if(error) {
error("The plugin has not started, fix the errors listed above");
} else {
featureManager = new FeatureManager();
managers.add(featureManager);
// Register the event listeners
getServer().getPluginManager().registerEvents(new PlayerLoginLogoutListener(this), this);
setupTasks();
// Startup the CommandManager (registers itself for the command)
commandManager = new CommandManager();
managers.add(commandManager);
// Create a signLinkerManager
signLinkerManager = new SignLinkerManager();
managers.add(signLinkerManager);
// Enable Metrics if config allows it
if(getConfig().getBoolean("sendStats")) {
Analytics.start();
}
// Register dynamic permission (things declared in config)
registerDynamicPermissions();
// Don't initialize the updatechecker if disabled in the config
if(getConfig().getBoolean("checkForUpdates")) {
githubUpdateCheck = new GithubUpdateCheck(
AreaShop.getInstance(),
"NLThijs48",
"AreaShop"
).withVersionComparator((latestVersion, currentVersion) ->
!cleanVersion(latestVersion).equals(cleanVersion(currentVersion))
).checkUpdate(result -> {
AreaShop.debug("Update check result:", result);
if(!result.hasUpdate()) {
return;
}
AreaShop.info("Update from AreaShop V" + cleanVersion(result.getCurrentVersion()) + " to AreaShop V" + cleanVersion(result.getLatestVersion()) + " available, get the latest version at https://www.spigotmc.org/resources/areashop.2991/");
for(Player player : Utils.getOnlinePlayers()) {
notifyUpdate(player);
}
});
}
}
} | 3.68 |
flink_JoinedStreams_apply | /**
* Completes the join operation with the user function that is executed for each combination
* of elements with the same key in a window.
*
* <p>Note: This method's return type does not support setting an operator-specific
* parallelism. Due to binary backwards compatibility, this cannot be altered. Use the
* {@link #with(JoinFunction, TypeInformation)}, method to set an operator-specific
* parallelism.
*/
public <T> DataStream<T> apply(
JoinFunction<T1, T2, T> function, TypeInformation<T> resultType) {
// clean the closure
function = input1.getExecutionEnvironment().clean(function);
coGroupedWindowedStream =
input1.coGroup(input2)
.where(keySelector1)
.equalTo(keySelector2)
.window(windowAssigner)
.trigger(trigger)
.evictor(evictor)
.allowedLateness(allowedLateness);
return coGroupedWindowedStream.apply(new JoinCoGroupFunction<>(function), resultType);
} | 3.68 |
framework_VComboBox_updateSelectedIconPosition | /**
* Positions the icon vertically in the middle. Should be called after the
* icon has loaded
*/
private void updateSelectedIconPosition() {
// Position icon vertically to middle
int availableHeight = 0;
availableHeight = getOffsetHeight();
int iconHeight = WidgetUtil.getRequiredHeight(selectedItemIcon);
int marginTop = (availableHeight - iconHeight) / 2;
selectedItemIcon.getElement().getStyle().setMarginTop(marginTop,
Unit.PX);
} | 3.68 |
rocketmq-connect_Worker_stop | /**
* We can choose to persist in-memory task status
* so we can view history tasks
*/
public void stop() {
// stop and await connectors
if (!connectors.isEmpty()) {
log.warn("Shutting down connectors {} uncleanly; herder should have shut down connectors before the Worker is stopped", connectors.keySet());
stopAndAwaitConnectors();
}
executor.shutdown();
// stop connectors
if (workerState != null) {
workerState.set(WorkerState.TERMINATED);
}
Set<Runnable> runningTasks = this.runningTasks;
for (Runnable task : runningTasks) {
awaitStopTask((WorkerTask) task, 5000);
}
taskExecutor.shutdown();
// close offset committer
sourceTaskOffsetCommitter.ifPresent(committer -> committer.close(5000));
stateMachineService.shutdown();
try {
// close metrics
connectMetrics.close();
} catch (Exception e) {
}
} | 3.68 |
flink_EnrichedRowData_from | /**
* Creates a new {@link EnrichedRowData} with the provided {@code fixedRow} as the immutable
* static row, and uses the {@code producedRowFields}, {@code fixedRowFields} and {@code
* mutableRowFields} arguments to compute the indexes mapping.
*
* <p>The {@code producedRowFields} should include the name of fields of the full row once
* mutable and fixed rows are merged, while {@code fixedRowFields} and {@code mutableRowFields}
* should contain respectively the field names of fixed row and mutable row. All the lists are
* ordered with indexes matching the position of the field in the row. As an example, for a
* complete row {@code (a, b, c)} the mutable row might be {@code (a, c)} and the fixed row
* might be {@code (b)}
*/
public static EnrichedRowData from(
RowData fixedRow,
List<String> producedRowFields,
List<String> mutableRowFields,
List<String> fixedRowFields) {
return new EnrichedRowData(
fixedRow, computeIndexMapping(producedRowFields, mutableRowFields, fixedRowFields));
} | 3.68 |
flink_S3TestCredentials_getS3SecretKey | /**
* Gets the S3 Secret Key.
*
* <p>This method throws an exception if the key is not available. Tests should use {@link
* #assumeCredentialsAvailable()} to skip tests when credentials are not available.
*/
public static String getS3SecretKey() {
if (S3_TEST_SECRET_KEY != null) {
return S3_TEST_SECRET_KEY;
} else {
throw new IllegalStateException("S3 test secret key not available");
}
} | 3.68 |
morf_Cast_as | /**
* @see org.alfasoftware.morf.sql.element.AliasedField#as(java.lang.String)
*/
@Override
public Cast as(String aliasName) {
return (Cast) super.as(aliasName);
} | 3.68 |
flink_RoundRobinOperatorStateRepartitioner_repartitionBroadcastState | /** Repartition BROADCAST state. */
private void repartitionBroadcastState(
Map<String, List<Tuple2<StreamStateHandle, OperatorStateHandle.StateMetaInfo>>>
broadcastState,
List<Map<StreamStateHandle, OperatorStateHandle>> mergeMapList) {
int newParallelism = mergeMapList.size();
for (int i = 0; i < newParallelism; ++i) {
final Map<StreamStateHandle, OperatorStateHandle> mergeMap = mergeMapList.get(i);
// for each name, pick the i-th entry
for (Map.Entry<
String,
List<Tuple2<StreamStateHandle, OperatorStateHandle.StateMetaInfo>>>
e : broadcastState.entrySet()) {
int previousParallelism = e.getValue().size();
Tuple2<StreamStateHandle, OperatorStateHandle.StateMetaInfo> handleWithMetaInfo =
e.getValue().get(i % previousParallelism);
OperatorStateHandle operatorStateHandle = mergeMap.get(handleWithMetaInfo.f0);
if (operatorStateHandle == null) {
operatorStateHandle =
new OperatorStreamStateHandle(
CollectionUtil.newHashMapWithExpectedSize(
broadcastState.size()),
handleWithMetaInfo.f0);
mergeMap.put(handleWithMetaInfo.f0, operatorStateHandle);
}
operatorStateHandle
.getStateNameToPartitionOffsets()
.put(e.getKey(), handleWithMetaInfo.f1);
}
}
} | 3.68 |
flink_CrossOperator_projectTuple6 | /**
* Projects a pair of crossed elements to a {@link Tuple} with the previously selected
* fields.
*
* @return The projected data set.
* @see Tuple
* @see DataSet
*/
public <T0, T1, T2, T3, T4, T5>
ProjectCross<I1, I2, Tuple6<T0, T1, T2, T3, T4, T5>> projectTuple6() {
TypeInformation<?>[] fTypes = extractFieldTypes(fieldIndexes);
TupleTypeInfo<Tuple6<T0, T1, T2, T3, T4, T5>> tType =
new TupleTypeInfo<Tuple6<T0, T1, T2, T3, T4, T5>>(fTypes);
return new ProjectCross<I1, I2, Tuple6<T0, T1, T2, T3, T4, T5>>(
this.ds1, this.ds2, this.fieldIndexes, this.isFieldInFirst, tType, this, hint);
} | 3.68 |
morf_SqlUtils_substring | /**
* Returns a SQL DSL expression to return the substring of <strong>field</strong>
* of <strong>length</strong> characters, starting from <strong>from</strong>.
*
* @param field The source expression.
* @param from The start character offset.
* @param length The length of the substring.
* @return The SQL DSL expression applying the substring function.
*/
public static AliasedField substring(AliasedField field, int from, int length) {
return Function.substring(field, literal(from), literal(length));
} | 3.68 |
hadoop_ManifestStoreOperationsThroughFileSystem_storePreservesEtagsThroughRenames | /**
* Probe filesystem capabilities.
* @param path path to probe.
* @return true if the FS declares its renames work.
*/
@Override
public boolean storePreservesEtagsThroughRenames(Path path) {
try {
return fileSystem.hasPathCapability(path,
CommonPathCapabilities.ETAGS_PRESERVED_IN_RENAME);
} catch (IOException ignored) {
return false;
}
} | 3.68 |
framework_Tree_setItemDescriptionGenerator | /**
* Set the item description generator which generates tooltips for the tree
* items.
*
* @param generator
* The generator to use or null to disable
*/
public void setItemDescriptionGenerator(
ItemDescriptionGenerator generator) {
if (generator != itemDescriptionGenerator) {
itemDescriptionGenerator = generator;
markAsDirty();
}
} | 3.68 |
MagicPlugin_MagicController_getNPCSuppliers | /**
* @return The supplier set that is used.
*/
public NPCSupplierSet getNPCSuppliers() {
return npcSuppliers;
} | 3.68 |
morf_AliasedField_getImpliedName | /**
* Returns the name of the field either implied by its source or by its alias.
* @return The implied name of the field
*/
public String getImpliedName() {
return getAlias();
} | 3.68 |
hudi_HoodieTableFactory_inferAvroSchema | /**
* Inferences the deserialization Avro schema from the table schema (e.g. the DDL)
* if both options {@link FlinkOptions#SOURCE_AVRO_SCHEMA_PATH} and
* {@link FlinkOptions#SOURCE_AVRO_SCHEMA} are not specified.
*
* @param conf The configuration
* @param rowType The specified table row type
*/
private static void inferAvroSchema(Configuration conf, LogicalType rowType) {
if (!conf.getOptional(FlinkOptions.SOURCE_AVRO_SCHEMA_PATH).isPresent()
&& !conf.getOptional(FlinkOptions.SOURCE_AVRO_SCHEMA).isPresent()) {
String inferredSchema = AvroSchemaConverter.convertToSchema(rowType, AvroSchemaUtils.getAvroRecordQualifiedName(conf.getString(FlinkOptions.TABLE_NAME))).toString();
conf.setString(FlinkOptions.SOURCE_AVRO_SCHEMA, inferredSchema);
}
} | 3.68 |
hudi_DFSHoodieDatasetInputReader_iteratorSize | /**
* Returns the number of elements remaining in {@code iterator}. The iterator will be left exhausted: its {@code hasNext()} method will return {@code false}.
*/
private static int iteratorSize(Iterator<?> iterator) {
int count = 0;
while (iterator.hasNext()) {
iterator.next();
count++;
}
return count;
} | 3.68 |
querydsl_BeanMap_reinitialise | /**
* Reinitializes this bean. Called during {@link #setBean(Object)}.
* Does introspection to find properties.
*/
protected void reinitialise() {
readMethods.clear();
writeMethods.clear();
types.clear();
initialise();
} | 3.68 |
hadoop_ProxyCombiner_combine | /**
* Combine two or more proxies which together comprise a single proxy
* interface. This can be used for a protocol interface which {@code extends}
* multiple other protocol interfaces. The returned proxy will implement
* all of the methods of the combined proxy interface, delegating calls
* to which proxy implements that method. If multiple proxies implement the
* same method, the first in the list will be used for delegation.
* <p>
* This will check that every method on the combined interface is
* implemented by at least one of the supplied proxy objects.
*
* @param combinedProxyInterface The interface of the combined proxy.
* @param proxies The proxies which should be used as delegates.
* @param <T> The type of the proxy that will be returned.
* @return The combined proxy.
*/
@SuppressWarnings("unchecked")
public static <T> T combine(Class<T> combinedProxyInterface,
Object... proxies) {
methodLoop:
for (Method m : combinedProxyInterface.getMethods()) {
for (Object proxy : proxies) {
try {
proxy.getClass().getMethod(m.getName(), m.getParameterTypes());
continue methodLoop; // go to the next method
} catch (NoSuchMethodException nsme) {
// Continue to try the next proxy
}
}
throw new IllegalStateException("The proxies specified for "
+ combinedProxyInterface + " do not cover method " + m);
}
InvocationHandler handler =
new CombinedProxyInvocationHandler(combinedProxyInterface, proxies);
return (T) Proxy.newProxyInstance(combinedProxyInterface.getClassLoader(),
new Class[] {combinedProxyInterface}, handler);
} | 3.68 |
hadoop_AbfsClient_checkUserError | /**
* Returns true if the status code lies in the range of user error.
* @param responseStatusCode http response status code.
* @return True or False.
*/
private boolean checkUserError(int responseStatusCode) {
return (responseStatusCode >= HttpURLConnection.HTTP_BAD_REQUEST
&& responseStatusCode < HttpURLConnection.HTTP_INTERNAL_ERROR);
} | 3.68 |
morf_SqlDialect_getSqlFrom | /**
* Convert a {@link WindowFunction} into standards compliant SQL.
* @param windowFunctionField The field to convert
* @return The resulting SQL
**/
protected String getSqlFrom(WindowFunction windowFunctionField) {
if (requiresOrderByForWindowFunction(windowFunctionField.getFunction()) && windowFunctionField.getOrderBys().isEmpty()) {
throw new IllegalArgumentException("Window function " + windowFunctionField.getFunction().getType() + " requires an order by clause.");
}
StringBuilder statement = new StringBuilder().append(getSqlForWindowFunction(windowFunctionField.getFunction()));
statement.append(" OVER (");
if (windowFunctionField.getPartitionBys().size() > 0) {
statement.append("PARTITION BY ");
boolean firstField = true;
for (AliasedField field : windowFunctionField.getPartitionBys()) {
if (!firstField) {
statement.append(", ");
}
statement.append(getSqlFrom(field));
firstField = false;
}
}
if (windowFunctionField.getOrderBys().size() > 0) {
statement.append(" ORDER BY ");
boolean firstField = true;
for (AliasedField field : windowFunctionField.getOrderBys()) {
if (!firstField) {
statement.append(", ");
}
statement.append(getSqlForOrderByField(field));
firstField = false;
}
}
statement.append(")");
return statement.toString();
} | 3.68 |
flink_AbstractParameterTool_getShort | /**
* Returns the Short value for the given key. If the key does not exists it will return the
* default value given. The method fails if the value is not a Short.
*/
public short getShort(String key, short defaultValue) {
addToDefaults(key, Short.toString(defaultValue));
String value = get(key);
if (value == null) {
return defaultValue;
} else {
return Short.valueOf(value);
}
} | 3.68 |
morf_ConnectionResourcesBean_setHostName | /**
* @see org.alfasoftware.morf.jdbc.AbstractConnectionResources#setHostName(java.lang.String)
*/
@Override
public void setHostName(String hostName) {
this.hostName = hostName;
} | 3.68 |
morf_AbstractSelectStatementBuilder_getJoins | /**
* Gets the list of joined tables in the order they are joined
*
* @return the joined tables
*/
List<Join> getJoins() {
return joins;
} | 3.68 |
AreaShop_BuyRegion_getPlayerName | /**
* Get the name of the player that owns this region.
* @return The name of the player that owns this region, if unavailable by UUID it will return the old cached name, if that is unavailable it will return <UNKNOWN>
*/
public String getPlayerName() {
String result = Utils.toName(getBuyer());
if(result == null || result.isEmpty()) {
result = getStringSetting("buy.buyerName");
if(result == null || result.isEmpty()) {
result = "<UNKNOWN>";
}
}
return result;
} | 3.68 |
hadoop_StoragePolicySatisfyManager_removeAllPathIds | /**
* Clean up all sps path ids.
*/
public void removeAllPathIds() {
synchronized (pathsToBeTraversed) {
pathsToBeTraversed.clear();
}
} | 3.68 |
flink_ResourceManager_deregisterApplication | /**
* Cleanup application and shut down cluster.
*
* @param finalStatus of the Flink application
* @param diagnostics diagnostics message for the Flink application or {@code null}
*/
@Override
public CompletableFuture<Acknowledge> deregisterApplication(
final ApplicationStatus finalStatus, @Nullable final String diagnostics) {
log.info(
"Shut down cluster because application is in {}, diagnostics {}.",
finalStatus,
diagnostics);
try {
internalDeregisterApplication(finalStatus, diagnostics);
} catch (ResourceManagerException e) {
log.warn("Could not properly shutdown the application.", e);
}
return CompletableFuture.completedFuture(Acknowledge.get());
} | 3.68 |
hadoop_OuterJoinRecordReader_combine | /**
* Emit everything from the collector.
*/
protected boolean combine(Object[] srcs, TupleWritable dst) {
assert srcs.length == dst.size();
return true;
} | 3.68 |
hadoop_CSQueueStore_add | /**
* Method for adding a queue to the store.
* @param queue Queue to be added
*/
public void add(CSQueue queue) {
String fullName = queue.getQueuePath();
String shortName = queue.getQueueShortName();
try {
modificationLock.writeLock().lock();
fullNameQueues.put(fullName, queue);
getMap.put(fullName, queue);
//we only update short queue name ambiguity for non root queues
if (!shortName.equals(CapacitySchedulerConfiguration.ROOT)) {
//getting or creating the ambiguity set for the current queue
Set<String> fullNamesSet =
this.shortNameToLongNames.getOrDefault(shortName, new HashSet<>());
//adding the full name to the queue
fullNamesSet.add(fullName);
this.shortNameToLongNames.put(shortName, fullNamesSet);
}
//updating the getMap references for the queue
updateGetMapForShortName(shortName);
} finally {
modificationLock.writeLock().unlock();
}
} | 3.68 |
dubbo_ReferenceBean_getVersion | /**
* The version of the service
*/
public String getVersion() {
// Compatible with seata-1.4.0: io.seata.rm.tcc.remoting.parser.DubboRemotingParser#getServiceDesc()
return referenceConfig.getVersion();
} | 3.68 |
hadoop_BoundedResourcePool_release | /**
* Releases a previously acquired resource.
*
* @throws IllegalArgumentException if item is null.
*/
@Override
public void release(T item) {
checkNotNull(item, "item");
synchronized (createdItems) {
if (!createdItems.contains(item)) {
throw new IllegalArgumentException("This item is not a part of this pool");
}
}
// Return if this item was released earlier.
// We cannot use items.contains() because that check is not based on reference equality.
for (T entry : items) {
if (entry == item) {
return;
}
}
try {
items.put(item);
} catch (InterruptedException e) {
throw new IllegalStateException("release() should never block", e);
}
} | 3.68 |
framework_DesignContext_getComponentLocalId | /**
* Returns the local id for a component.
*
* @since 7.5.0
*
* @param component
* The component whose local id to get.
* @return the local id of the component, or null if the component has no
* local id assigned
*/
public String getComponentLocalId(Component component) {
return componentToLocalId.get(component);
} | 3.68 |
hadoop_SnappyCompressor_getBytesRead | /**
* Return number of bytes given to this compressor since last reset.
*/
@Override
public long getBytesRead() {
return bytesRead;
} | 3.68 |
flink_SqlAlterTable_getPartitionKVs | /** Get partition spec as key-value strings. */
public LinkedHashMap<String, String> getPartitionKVs() {
return SqlPartitionUtils.getPartitionKVs(getPartitionSpec());
} | 3.68 |
querydsl_SQLExpressions_lead | /**
* expr evaluated at the row that is one row after the current row within the partition;
*
* @param expr expression
* @return lead(expr)
*/
public static <T> WindowOver<T> lead(Expression<T> expr) {
return new WindowOver<T>(expr.getType(), SQLOps.LEAD, expr);
} | 3.68 |
hbase_LockAndQueue_releaseExclusiveLock | /** Returns whether we should wake the procedures waiting on the lock here. */
public boolean releaseExclusiveLock(Procedure<?> proc) {
if (
exclusiveLockOwnerProcedure == null
|| exclusiveLockOwnerProcedure.getProcId() != proc.getProcId()
) {
// We are not the lock owner, it is probably inherited from the parent procedures.
return false;
}
exclusiveLockOwnerProcedure = null;
// This maybe a bit strange so let me explain. We allow acquiring shared lock while the parent
// proc or we have already held the xlock, and also allow releasing the locks in any order, so
// it could happen that the xlock is released but there are still some procs holding the shared
// lock.
// In HBase, this could happen when a proc which holdLock is false and schedules sub procs which
// acquire the shared lock on the same lock. This is because we will schedule the sub proces
// before releasing the lock, so the sub procs could call acquire lock before we releasing the
// xlock.
return sharedLock == 0;
} | 3.68 |
hbase_SnapshotScannerHDFSAclHelper_revokeAcl | /**
* Remove acl when grant or revoke user permission
* @param userPermission the user and permission
* @param skipNamespaces the namespace set to skip remove acl
* @param skipTables the table set to skip remove acl
* @return false if an error occurred, otherwise true
*/
public boolean revokeAcl(UserPermission userPermission, Set<String> skipNamespaces,
Set<TableName> skipTables) {
try {
long start = EnvironmentEdgeManager.currentTime();
handleGrantOrRevokeAcl(userPermission, HDFSAclOperation.OperationType.REMOVE, skipNamespaces,
skipTables);
LOG.info("Set HDFS acl when revoke {}, skipNamespaces: {}, skipTables: {}, cost {} ms",
userPermission, skipNamespaces, skipTables, EnvironmentEdgeManager.currentTime() - start);
return true;
} catch (Exception e) {
LOG.error("Set HDFS acl error when revoke: {}, skipNamespaces: {}, skipTables: {}",
userPermission, skipNamespaces, skipTables, e);
return false;
}
} | 3.68 |
morf_OracleDialect_selectStatementPreFieldDirectives | /**
* @see org.alfasoftware.morf.jdbc.SqlDialect#selectStatementPreFieldDirectives(org.alfasoftware.morf.sql.SelectStatement)
*/
@Override
protected String selectStatementPreFieldDirectives(SelectStatement selectStatement) {
StringBuilder builder = new StringBuilder();
for (Hint hint : selectStatement.getHints()) {
if (hint instanceof OptimiseForRowCount) {
builder.append(" FIRST_ROWS(")
.append(((OptimiseForRowCount)hint).getRowCount())
.append(")");
}
else if (hint instanceof UseIndex) {
UseIndex useIndex = (UseIndex)hint;
builder.append(" INDEX(")
// No schema name - see http://docs.oracle.com/cd/B19306_01/server.102/b14200/sql_elements006.htm#BABIEJEB
.append(StringUtils.isEmpty(useIndex.getTable().getAlias()) ? useIndex.getTable().getName() : useIndex.getTable().getAlias())
.append(" ")
.append(useIndex.getIndexName())
.append(")");
}
else if (hint instanceof UseImplicitJoinOrder) {
builder.append(" ORDERED");
}
else if (hint instanceof ParallelQueryHint) {
builder.append(" PARALLEL");
ParallelQueryHint parallelQueryHint = (ParallelQueryHint) hint;
builder.append(parallelQueryHint.getDegreeOfParallelism().map(d -> "(" + d + ")").orElse(""));
}
else if (hint instanceof AllowParallelDmlHint) {
builder.append(" ENABLE_PARALLEL_DML");
}
else if (hint instanceof OracleCustomHint) {
builder.append(" ")
.append(((OracleCustomHint)hint).getCustomHint());
}
else if ( hint instanceof DialectSpecificHint && ((DialectSpecificHint)hint).isSameDatabaseType(Oracle.IDENTIFIER) ) {
builder.append(" ")
.append(((DialectSpecificHint)hint).getHintContents());
}
}
if (builder.length() == 0) {
return super.selectStatementPreFieldDirectives(selectStatement);
}
return "/*+" + builder.append(" */ ").toString();
} | 3.68 |
hbase_BlockIOUtils_readFullyWithHeapBuffer | /**
* Copying bytes from InputStream to {@link ByteBuff} by using an temporary heap byte[] (default
* size is 1024 now).
* @param in the InputStream to read
* @param out the destination {@link ByteBuff}
* @param length to read
* @throws IOException if any io error encountered.
*/
public static void readFullyWithHeapBuffer(InputStream in, ByteBuff out, int length)
throws IOException {
if (length < 0) {
throw new IllegalArgumentException("Length must not be negative: " + length);
}
int heapBytesRead = 0;
int remain = length, count;
byte[] buffer = new byte[1024];
try {
while (remain > 0) {
count = in.read(buffer, 0, Math.min(remain, buffer.length));
if (count < 0) {
throw new IOException(
"Premature EOF from inputStream, but still need " + remain + " bytes");
}
out.put(buffer, 0, count);
remain -= count;
heapBytesRead += count;
}
} finally {
final Span span = Span.current();
final AttributesBuilder attributesBuilder = builderFromContext(Context.current());
annotateHeapBytesRead(attributesBuilder, heapBytesRead);
span.addEvent("BlockIOUtils.readFullyWithHeapBuffer", attributesBuilder.build());
}
} | 3.68 |
framework_View_getViewComponent | /**
* Gets the component to show when navigating to the view.
*
* By default casts this View to a {@link Component} if possible, otherwise
* throws an IllegalStateException.
*
* @since 8.1
* @return the component to show, by default the view instance itself
*/
public default Component getViewComponent() {
if (!(this instanceof Component)) {
throw new IllegalStateException(
"View is not a Component. Override getViewComponent() to return the root view component");
}
return (Component) this;
} | 3.68 |
hadoop_NamenodeStatusReport_getNumDecommissioningDatanodes | /**
* Get the number of decommissionining nodes.
*
* @return The number of decommissionining nodes.
*/
public int getNumDecommissioningDatanodes() {
return this.decomDatanodes;
} | 3.68 |
hmily_MetricsReporter_counterIncrement | /**
* Counter increment by count.
*
* @param name name
* @param labelValues label values
* @param count count
*/
public static void counterIncrement(final String name, final String[] labelValues, final long count) {
Optional.ofNullable(metricsRegister).ifPresent(register -> register.counterIncrement(name, labelValues, count));
} | 3.68 |
flink_ExceptionUtils_assertThrowableWithMessage | /**
* The same as {@link #findThrowableWithMessage(Throwable, String)}, but rethrows original
* exception if the expected exception was not found.
*/
public static <T extends Throwable> void assertThrowableWithMessage(
Throwable throwable, String searchMessage) throws T {
if (!findThrowableWithMessage(throwable, searchMessage).isPresent()) {
throw (T) throwable;
}
} | 3.68 |
hadoop_S3AReadOpContext_withAuditSpan | /**
* Set builder value.
* @param value new value
* @return the builder
*/
public S3AReadOpContext withAuditSpan(final AuditSpan value) {
auditSpan = value;
return this;
} | 3.68 |
flink_SlotSharingGroup_setManagedMemoryMB | /** Set the task managed memory for this SlotSharingGroup in MB. */
public Builder setManagedMemoryMB(int managedMemoryMB) {
this.managedMemory = MemorySize.ofMebiBytes(managedMemoryMB);
return this;
} | 3.68 |
pulsar_TestRetrySupport_incrementSetupNumber | /**
* This method should be called in the setup method of the concrete class.
*
* This increases an internal counter and resets the failure state which are used to determine
* whether cleanup is needed before a test method is called.
*
*/
protected final void incrementSetupNumber() {
currentSetupNumber++;
failedSetupNumber = -1;
LOG.debug("currentSetupNumber={}", currentSetupNumber);
} | 3.68 |
flink_HybridSource_build | /** Build the source. */
public HybridSource<T> build() {
return new HybridSource(sources);
} | 3.68 |
hbase_RegionReplicaUtil_addReplicas | /**
* Create any replicas for the regions (the default replicas that was already created is passed to
* the method)
* @param regions existing regions
* @param oldReplicaCount existing replica count
* @param newReplicaCount updated replica count due to modify table
* @return the combined list of default and non-default replicas
*/
public static List<RegionInfo> addReplicas(final List<RegionInfo> regions, int oldReplicaCount,
int newReplicaCount) {
if ((newReplicaCount - 1) <= 0) {
return regions;
}
List<RegionInfo> hRegionInfos = new ArrayList<>(newReplicaCount * regions.size());
for (RegionInfo ri : regions) {
if (
RegionReplicaUtil.isDefaultReplica(ri)
&& (!ri.isOffline() || (!ri.isSplit() && !ri.isSplitParent()))
) {
// region level replica index starts from 0. So if oldReplicaCount was 2 then the max
// replicaId for
// the existing regions would be 1
for (int j = oldReplicaCount; j < newReplicaCount; j++) {
hRegionInfos.add(RegionReplicaUtil.getRegionInfoForReplica(ri, j));
}
}
}
hRegionInfos.addAll(regions);
return hRegionInfos;
} | 3.68 |
framework_VCustomLayout_clear | /** Clear all widgets from the layout. */
@Override
public void clear() {
super.clear();
locationToWidget.clear();
childWidgetToCaptionWrapper.clear();
} | 3.68 |
hbase_HMaster_isOnline | /**
* Report whether this master is started This method is used for testing.
* @return true if master is ready to go, false if not.
*/
public boolean isOnline() {
return serviceStarted;
} | 3.68 |
hudi_SparkInternalSchemaConverter_collectColNamesFromSparkStruct | /**
* Collect all the leaf nodes names.
*
* @param sparkSchema a spark schema
* @return leaf nodes full names.
*/
public static List<String> collectColNamesFromSparkStruct(StructType sparkSchema) {
List<String> result = new ArrayList<>();
collectColNamesFromStructType(sparkSchema, new LinkedList<>(), result);
return result;
} | 3.68 |
hudi_AvroSchemaUtils_isNullable | /**
* Returns true in case provided {@link Schema} is nullable (ie accepting null values),
* returns false otherwise
*/
public static boolean isNullable(Schema schema) {
if (schema.getType() != Schema.Type.UNION) {
return false;
}
List<Schema> innerTypes = schema.getTypes();
return innerTypes.size() > 1 && innerTypes.stream().anyMatch(it -> it.getType() == Schema.Type.NULL);
} | 3.68 |
hbase_UnsafeAccess_toShort | /**
* Reads a short value at the given Object's offset considering it was written in big-endian
* format.
* @return short value at offset
*/
public static short toShort(Object ref, long offset) {
if (LITTLE_ENDIAN) {
return Short.reverseBytes(HBasePlatformDependent.getShort(ref, offset));
}
return HBasePlatformDependent.getShort(ref, offset);
} | 3.68 |
framework_StateChangeEvent_getChangedPropertiesFastSet | /**
* Gets the properties that have changed.
*
* @return a set of names of the changed properties
*
* @deprecated As of 7.0.1, use {@link #hasPropertyChanged(String)} instead
* for improved performance.
*/
@Deprecated
public FastStringSet getChangedPropertiesFastSet() {
if (changedProperties == null) {
Profiler.enter(
"StateChangeEvent.getChangedPropertiesFastSet populate");
changedProperties = FastStringSet.create();
addJsonFields(stateJson, changedProperties, "");
if (isInitialStateChange()) {
addAllStateFields(
AbstractConnector.getStateType(getConnector()),
changedProperties, "");
}
Profiler.leave(
"StateChangeEvent.getChangedPropertiesFastSet populate");
}
return changedProperties;
} | 3.68 |
flink_AsynchronousFileIOChannel_registerAllRequestsProcessedListener | /**
* Registers a listener to be notified when all outstanding requests have been processed.
*
* <p>New requests can arrive right after the listener got notified. Therefore, it is not safe
* to assume that the number of outstanding requests is still zero after a notification unless
* there was a close right before the listener got called.
*
* <p>Returns <code>true</code>, if the registration was successful. A registration can fail, if
* there are no outstanding requests when trying to register a listener.
*/
protected boolean registerAllRequestsProcessedListener(NotificationListener listener)
throws IOException {
checkNotNull(listener);
synchronized (listenerLock) {
if (allRequestsProcessedListener == null) {
// There was a race with the processing of the last outstanding request
if (requestsNotReturned.get() == 0) {
return false;
}
allRequestsProcessedListener = listener;
return true;
}
}
throw new IllegalStateException("Already subscribed.");
} | 3.68 |
hbase_SingleColumnValueFilter_parseFrom | /**
* Parse a serialized representation of {@link SingleColumnValueFilter}
* @param pbBytes A pb serialized {@link SingleColumnValueFilter} instance
* @return An instance of {@link SingleColumnValueFilter} made from <code>bytes</code>
* @throws DeserializationException if an error occurred
* @see #toByteArray
*/
public static SingleColumnValueFilter parseFrom(final byte[] pbBytes)
throws DeserializationException {
FilterProtos.SingleColumnValueFilter proto;
try {
proto = FilterProtos.SingleColumnValueFilter.parseFrom(pbBytes);
} catch (InvalidProtocolBufferException e) {
throw new DeserializationException(e);
}
final CompareOperator compareOp = CompareOperator.valueOf(proto.getCompareOp().name());
final org.apache.hadoop.hbase.filter.ByteArrayComparable comparator;
try {
comparator = ProtobufUtil.toComparator(proto.getComparator());
} catch (IOException ioe) {
throw new DeserializationException(ioe);
}
return new SingleColumnValueFilter(
proto.hasColumnFamily() ? proto.getColumnFamily().toByteArray() : null,
proto.hasColumnQualifier() ? proto.getColumnQualifier().toByteArray() : null, compareOp,
comparator, proto.getFilterIfMissing(), proto.getLatestVersionOnly());
} | 3.68 |
hbase_MetricsTableRequests_updateCheckAndDelete | /**
* Update the CheckAndDelete time histogram.
* @param time time it took
*/
public void updateCheckAndDelete(long time) {
if (isEnableTableLatenciesMetrics()) {
checkAndDeleteTimeHistogram.update(time);
}
} | 3.68 |
flink_AbstractCheckpointStats_getTaskStateStats | /**
* Returns the task state stats for the given job vertex ID or <code>null</code> if no task with
* such an ID is available.
*
* @param jobVertexId Job vertex ID of the task stats to look up.
* @return The task state stats instance for the given ID or <code>null</code>.
*/
public TaskStateStats getTaskStateStats(JobVertexID jobVertexId) {
return taskStats.get(jobVertexId);
} | 3.68 |
hadoop_RpcProgramPortmap_set | /**
* When a program first becomes available on a machine, it registers itself
* with the port mapper program on the same machine. The program passes its
* program number "prog", version number "vers", transport protocol number
* "prot", and the port "port" on which it awaits service request. The
* procedure returns a boolean reply whose value is "TRUE" if the procedure
* successfully established the mapping and "FALSE" otherwise. The procedure
* refuses to establish a mapping if one already exists for the tuple
* "(prog, vers, prot)".
*/
private XDR set(int xid, XDR in, XDR out) {
PortmapMapping mapping = PortmapRequest.mapping(in);
String key = PortmapMapping.key(mapping);
if (LOG.isDebugEnabled()) {
LOG.debug("Portmap set key=" + key);
}
map.put(key, mapping);
return PortmapResponse.intReply(out, xid, mapping.getPort());
} | 3.68 |
hbase_CacheConfig_setEvictOnClose | /**
* Only used for testing.
* @param evictOnClose whether blocks should be evicted from the cache when an HFile reader is
* closed
*/
public void setEvictOnClose(boolean evictOnClose) {
this.evictOnClose = evictOnClose;
} | 3.68 |
hudi_CleanActionExecutor_runPendingClean | /**
* Executes the Cleaner plan stored in the instant metadata.
*/
HoodieCleanMetadata runPendingClean(HoodieTable<T, I, K, O> table, HoodieInstant cleanInstant) {
try {
HoodieCleanerPlan cleanerPlan = CleanerUtils.getCleanerPlan(table.getMetaClient(), cleanInstant);
return runClean(table, cleanInstant, cleanerPlan);
} catch (IOException e) {
throw new HoodieIOException(e.getMessage(), e);
}
} | 3.68 |
druid_IPRange_computeMaskFromNetworkPrefix | /**
* Convert a extended network prefix integer into an IP number.
*
* @param prefix The network prefix number.
* @return Return the IP number corresponding to the extended network prefix.
*/
private IPAddress computeMaskFromNetworkPrefix(int prefix) {
/*
* int subnet = 0; for (int i=0; i<prefix; i++) { subnet = subnet << 1; subnet += 1; }
*/
StringBuilder str = new StringBuilder();
for (int i = 0; i < 32; i++) {
if (i < prefix) {
str.append("1");
} else {
str.append("0");
}
}
String decimalString = toDecimalString(str.toString());
return new IPAddress(decimalString);
} | 3.68 |
hadoop_ApplicationMaster_main | /**
* @param args Command line args
*/
public static void main(String[] args) {
boolean result = false;
try {
ApplicationMaster appMaster = new ApplicationMaster();
LOG.info("Initializing ApplicationMaster");
boolean doRun = appMaster.init(args);
if (!doRun) {
System.exit(0);
}
result = appMaster.run();
} catch (Throwable t) {
LOG.error("Error running ApplicationMaster", t);
System.exit(1);
}
if (result) {
LOG.info("Application Master completed successfully. exiting");
System.exit(0);
} else {
LOG.info("Application Master failed. exiting");
System.exit(2);
}
} | 3.68 |
hadoop_ManifestCommitter_abortTask | /**
* Abort a task.
* @param context task context
* @throws IOException failure during the delete
*/
@Override
public void abortTask(final TaskAttemptContext context)
throws IOException {
ManifestCommitterConfig committerConfig = enterCommitter(true,
context);
try {
new AbortTaskStage(
committerConfig.createStageConfig()
.withOperations(createManifestStoreOperations())
.build())
.apply(false);
} finally {
logCommitterStatisticsAtDebug();
updateCommonContextOnCommitterExit();
}
} | 3.68 |
framework_ServerRpcManager_applyInvocation | /**
* Invoke a method in a server side RPC target class. This method is to be
* used by the RPC framework and unit testing tools only.
*
* @param invocation
* method invocation to perform
*/
public void applyInvocation(ServerRpcMethodInvocation invocation)
throws RpcInvocationException {
Method method = invocation.getMethod();
Object[] arguments = invocation.getParameters();
try {
method.invoke(implementation, arguments);
} catch (Exception e) {
throw new RpcInvocationException(
"Unable to invoke method " + invocation.getMethodName()
+ " in " + invocation.getInterfaceName(),
e);
}
} | 3.68 |
morf_OracleDialect_getSqlForAddDays | /**
* @see org.alfasoftware.morf.jdbc.SqlDialect#getSqlForAddDays(org.alfasoftware.morf.sql.element.Function)
*/
@Override
protected String getSqlForAddDays(Function function) {
return String.format(
"(%s) + (%s)",
getSqlFrom(function.getArguments().get(0)),
getSqlFrom(function.getArguments().get(1))
);
} | 3.68 |
framework_AbstractLegacyComponent_isImmediate | /**
* Returns the immediate mode of the component.
* <p>
* Since Vaadin 8, the default mode is immediate.
*
* @return true if the component is in immediate mode (explicitly or
* implicitly set), false if the component if not in immediate mode
*/
public boolean isImmediate() {
if (explicitImmediateValue != null) {
return explicitImmediateValue;
} else {
return true;
}
} | 3.68 |
hadoop_ResourceRequest_executionType | /**
* Set the <code>executionTypeRequest</code> of the request with 'ensure
* execution type' flag set to true.
* @see ResourceRequest#setExecutionTypeRequest(
* ExecutionTypeRequest)
* @param executionType <code>executionType</code> of the request.
* @return {@link ResourceRequestBuilder}
*/
@Public
@Evolving
public ResourceRequestBuilder executionType(ExecutionType executionType) {
resourceRequest.setExecutionTypeRequest(
ExecutionTypeRequest.newInstance(executionType, true));
return this;
} | 3.68 |
querydsl_Expressions_stringOperation | /**
* Create a new Operation expression
*
* @param operator operator
* @param args operation arguments
* @return operation expression
*/
public static StringOperation stringOperation(Operator operator, Expression<?>... args) {
return new StringOperation(operator, args);
} | 3.68 |
Subsets and Splits
No saved queries yet
Save your SQL queries to embed, download, and access them later. Queries will appear here once saved.