name
stringlengths 12
178
| code_snippet
stringlengths 8
36.5k
| score
float64 3.26
3.68
|
---|---|---|
querydsl_BooleanExpression_or | /**
* Create a {@code this || right} expression
*
* <p>Returns a union of this and the given expression</p>
*
* @param right right hand side of the union
* @return this || right
*/
public BooleanExpression or(@Nullable Predicate right) {
right = (Predicate) ExpressionUtils.extract(right);
if (right != null) {
return Expressions.booleanOperation(Ops.OR, mixin, right);
} else {
return this;
}
} | 3.68 |
flink_HiveParserSemanticAnalyzer_processJoin | /**
* Given the AST with TOK_JOIN as the root, get all the aliases for the tables or subqueries in
* the join.
*/
@SuppressWarnings("nls")
private void processJoin(HiveParserQB qb, HiveParserASTNode join) throws SemanticException {
int numChildren = join.getChildCount();
if ((numChildren != 2)
&& (numChildren != 3)
&& join.getToken().getType() != HiveASTParser.TOK_UNIQUEJOIN) {
throw new SemanticException(
HiveParserUtils.generateErrorMessage(join, "Join with multiple children"));
}
queryProperties.incrementJoinCount(HiveParserUtils.isOuterJoinToken(join));
for (int num = 0; num < numChildren; num++) {
HiveParserASTNode child = (HiveParserASTNode) join.getChild(num);
if (child.getToken().getType() == HiveASTParser.TOK_TABREF) {
processTable(qb, child);
} else if (child.getToken().getType() == HiveASTParser.TOK_SUBQUERY) {
processSubQuery(qb, child);
} else if (child.getToken().getType() == HiveASTParser.TOK_PTBLFUNCTION) {
queryProperties.setHasPTF(true);
processPTF(qb, child);
HiveParserPTFInvocationSpec ptfInvocationSpec = qb.getPTFInvocationSpec(child);
String inputAlias =
ptfInvocationSpec == null
? null
: ptfInvocationSpec.getFunction().getAlias();
if (inputAlias == null) {
throw new SemanticException(
HiveParserUtils.generateErrorMessage(
child, "PTF invocation in a Join must have an alias"));
}
} else if (child.getToken().getType() == HiveASTParser.TOK_LATERAL_VIEW
|| child.getToken().getType() == HiveASTParser.TOK_LATERAL_VIEW_OUTER) {
// SELECT * FROM src1 LATERAL VIEW udtf() AS myTable JOIN src2 ...
// is not supported. Instead, the lateral view must be in a subquery
// SELECT * FROM (SELECT * FROM src1 LATERAL VIEW udtf() AS myTable) a
// JOIN src2 ...
throw new SemanticException(
HiveParserErrorMsg.getMsg(ErrorMsg.LATERAL_VIEW_WITH_JOIN, join));
} else if (HiveParserUtils.isJoinToken(child)) {
processJoin(qb, child);
}
}
} | 3.68 |
dubbo_AbstractMetadataReport_getReportCacheExecutor | /**
* @deprecated only for unit test
*/
@Deprecated
protected ExecutorService getReportCacheExecutor() {
return reportCacheExecutor;
} | 3.68 |
morf_XmlDataSetProducer_open | /**
* Creates an XML pull parser based on the XML reader specified at
* construction.
*
* @see org.alfasoftware.morf.dataset.DataSetProducer#open()
*/
@Override
public void open() {
xmlStreamProvider.open();
} | 3.68 |
flink_AbstractServerBase_getServerAddress | /**
* Returns the address of this server.
*
* @return AbstractServerBase address
* @throws IllegalStateException If server has not been started yet
*/
public InetSocketAddress getServerAddress() {
Preconditions.checkState(
serverAddress != null, "Server " + serverName + " has not been started.");
return serverAddress;
} | 3.68 |
flink_PekkoUtils_getBasicConfig | /**
* Gets the basic Pekko config which is shared by remote and local actor systems.
*
* @param configuration instance which contains the user specified values for the configuration
* @return Flink's basic Pekko config
*/
private static Config getBasicConfig(Configuration configuration) {
final int throughput = configuration.getInteger(AkkaOptions.DISPATCHER_THROUGHPUT);
final String jvmExitOnFatalError =
booleanToOnOrOff(configuration.getBoolean(AkkaOptions.JVM_EXIT_ON_FATAL_ERROR));
final String logLifecycleEvents =
booleanToOnOrOff(configuration.getBoolean(AkkaOptions.LOG_LIFECYCLE_EVENTS));
final String supervisorStrategy = EscalatingSupervisorStrategy.class.getCanonicalName();
return new ConfigBuilder()
.add("pekko {")
.add(" daemonic = off")
.add(" loggers = [\"org.apache.pekko.event.slf4j.Slf4jLogger\"]")
.add(" logging-filter = \"org.apache.pekko.event.slf4j.Slf4jLoggingFilter\"")
.add(" log-config-on-start = off")
.add(" logger-startup-timeout = 50s")
.add(" loglevel = " + getLogLevel())
.add(" stdout-loglevel = OFF")
.add(" log-dead-letters = " + logLifecycleEvents)
.add(" log-dead-letters-during-shutdown = " + logLifecycleEvents)
.add(" jvm-exit-on-fatal-error = " + jvmExitOnFatalError)
.add(" serialize-messages = off")
.add(" actor {")
.add(" guardian-supervisor-strategy = " + supervisorStrategy)
.add(" warn-about-java-serializer-usage = off")
.add(" allow-java-serialization = on")
.add(" default-dispatcher {")
.add(" throughput = " + throughput)
.add(" }")
.add(" supervisor-dispatcher {")
.add(" type = Dispatcher")
.add(" executor = \"thread-pool-executor\"")
.add(" thread-pool-executor {")
.add(" core-pool-size-min = 1")
.add(" core-pool-size-max = 1")
.add(" }")
.add(" }")
.add(" }")
.add("}")
.build();
} | 3.68 |
framework_InMemoryDataProvider_filteringByPrefix | /**
* Wraps this data provider to create a new data provider that is filtered
* by a string by checking whether the lower case representation of an item
* property value starts with the lower case representation of the filter
* value provided in the query. Conversion to lower case is done using the
* locale of the {@link UI#getCurrent() current UI} if available, or
* otherwise {@link Locale#getDefault() the default locale}. The filter
* never passes if the item property value is <code>null</code>.
*
* @param valueProvider
* a value provider that gets the string property value, not
* <code>null</code>
* @return a data provider that filters accordingly, not <code>null</code>
*/
public default DataProvider<T, String> filteringByPrefix(
ValueProvider<T, String> valueProvider) {
return InMemoryDataProviderHelpers.filteringByCaseInsensitiveString(
this, valueProvider, String::startsWith,
InMemoryDataProviderHelpers.CURRENT_LOCALE_SUPPLIER);
} | 3.68 |
framework_FocusableScrollPanel_setHorizontalScrollPosition | /**
* Sets the horizontal scroll position.
*
* @param position
* the new horizontal scroll position, in pixels
*/
public void setHorizontalScrollPosition(int position) {
getElement().setScrollLeft(position);
} | 3.68 |
querydsl_ExpressionUtils_anyOf | /**
* Create the union of the given arguments
*
* @param exprs predicates
* @return union
*/
@Nullable
public static Predicate anyOf(Predicate... exprs) {
Predicate rv = null;
for (Predicate b : exprs) {
if (b != null) {
rv = rv == null ? b : ExpressionUtils.or(rv,b);
}
}
return rv;
} | 3.68 |
hadoop_CacheDirectiveStats_setBytesCached | /**
* Sets the bytes cached by this directive.
*
* @param bytesCached The bytes cached.
* @return This builder, for call chaining.
*/
public Builder setBytesCached(long bytesCached) {
this.bytesCached = bytesCached;
return this;
} | 3.68 |
hadoop_MultipartUploaderBuilderImpl_overwrite | /**
* Set to true to overwrite the existing file.
* Set it to false, an exception will be thrown when calling {@link #build()}
* if the file exists.
*/
@Override
public B overwrite(boolean overwrite) {
if (overwrite) {
flags.add(CreateFlag.OVERWRITE);
} else {
flags.remove(CreateFlag.OVERWRITE);
}
return getThisBuilder();
} | 3.68 |
morf_DatabaseSchemaManager_dropTable | /**
* Drops a table and all its dependencies (e.g. indexes).
*
* @param table the table to drop
* @return sql statements
*/
private Collection<String> dropTable(Table table) {
if (log.isDebugEnabled()) log.debug("Dropping table [" + table.getName() + "]");
String upperCase = table.getName().toUpperCase();
tables.get().remove(upperCase);
tablesNotNeedingTruncate.get().remove(upperCase);
return dialect.get().dropStatements(table);
} | 3.68 |
hmily_EtcdClient_pull | /**
* Pull input stream.
*
* @param config the config
* @return the input stream
*/
public InputStream pull(final EtcdConfig config) {
if (client == null) {
client = Client.builder().endpoints(config.getServer()).build();
}
try {
CompletableFuture<GetResponse> future = client.getKVClient().get(ByteSequence.fromString(config.getKey()));
List<KeyValue> kvs;
if (config.getTimeoutMs() > 0L) {
kvs = future.get(config.getTimeoutMs(), TimeUnit.MILLISECONDS).getKvs();
} else {
kvs = future.get().getKvs();
}
if (CollectionUtils.isNotEmpty(kvs)) {
String content = kvs.get(0).getValue().toStringUtf8();
if (LOGGER.isDebugEnabled()) {
LOGGER.debug("etcd content {}", content);
}
if (StringUtils.isBlank(content)) {
return null;
}
return new ByteArrayInputStream(content.getBytes());
}
return null;
} catch (InterruptedException | ExecutionException | TimeoutException e) {
throw new ConfigException(e);
}
} | 3.68 |
flink_PythonShellParser_printHelp | /** Prints the help for the client. */
private static void printHelp() {
System.out.print("Flink Python Shell\n");
System.out.print("Usage: pyflink-shell.sh [local|remote|yarn] [options] <args>...\n");
System.out.print('\n');
printLocalHelp();
printRemoteHelp();
printYarnHelp();
System.out.println("-h | --help");
System.out.println(" Prints this usage text");
System.exit(0);
} | 3.68 |
graphhopper_CustomModelParser_createClassTemplate | /**
* Create the class source file from the detected variables (priorityVariables and speedVariables). We assume that
* these variables are safe although they are user input because we collected them from parsing via Janino. This
* means that the source file is free from user input and could be directly compiled. Before we do this we still
* have to inject that parsed and safe user expressions in a later step.
*/
private static String createClassTemplate(long counter,
Set<String> priorityVariables, double maxPriority,
Set<String> speedVariables, double maxSpeed,
EncodedValueLookup lookup, Map<String, JsonFeature> areas) {
final StringBuilder importSourceCode = new StringBuilder("import com.graphhopper.routing.ev.*;\n");
importSourceCode.append("import java.util.Map;\n");
final StringBuilder classSourceCode = new StringBuilder(100);
boolean includedAreaImports = false;
final StringBuilder initSourceCode = new StringBuilder("this.avg_speed_enc = avgSpeedEnc;\n");
initSourceCode.append("this.priority_enc = priorityEnc;\n");
Set<String> set = new HashSet<>();
for (String prioVar : priorityVariables)
set.add(prioVar.startsWith(BACKWARD_PREFIX) ? prioVar.substring(BACKWARD_PREFIX.length()) : prioVar);
for (String speedVar : speedVariables)
set.add(speedVar.startsWith(BACKWARD_PREFIX) ? speedVar.substring(BACKWARD_PREFIX.length()) : speedVar);
for (String arg : set) {
if (lookup.hasEncodedValue(arg)) {
EncodedValue enc = lookup.getEncodedValue(arg, EncodedValue.class);
classSourceCode.append("protected " + getInterface(enc) + " " + arg + "_enc;\n");
initSourceCode.append("this." + arg + "_enc = (" + getInterface(enc)
+ ") lookup.getEncodedValue(\"" + arg + "\", EncodedValue.class);\n");
} else if (arg.startsWith(IN_AREA_PREFIX)) {
if (!includedAreaImports) {
importSourceCode.append("import " + BBox.class.getName() + ";\n");
importSourceCode.append("import " + GHUtility.class.getName() + ";\n");
importSourceCode.append("import " + PreparedPolygon.class.getName() + ";\n");
importSourceCode.append("import " + Polygonal.class.getName() + ";\n");
importSourceCode.append("import " + JsonFeature.class.getName() + ";\n");
importSourceCode.append("import " + Polygon.class.getName() + ";\n");
includedAreaImports = true;
}
if (!JsonFeature.isValidId(arg))
throw new IllegalArgumentException("Area has invalid name: " + arg);
String id = arg.substring(IN_AREA_PREFIX.length());
JsonFeature feature = areas.get(id);
if (feature == null)
throw new IllegalArgumentException("Area '" + id + "' wasn't found");
if (feature.getGeometry() == null)
throw new IllegalArgumentException("Area '" + id + "' does not contain a geometry");
if (!(feature.getGeometry() instanceof Polygonal))
throw new IllegalArgumentException("Currently only type=Polygon is supported for areas but was " + feature.getGeometry().getGeometryType());
if (feature.getBBox() != null)
throw new IllegalArgumentException("Bounding box of area " + id + " must be empty");
classSourceCode.append("protected " + Polygon.class.getSimpleName() + " " + arg + ";\n");
initSourceCode.append("JsonFeature feature_" + id + " = (JsonFeature) areas.get(\"" + id + "\");\n");
initSourceCode.append("this." + arg + " = new Polygon(new PreparedPolygon((Polygonal) feature_" + id + ".getGeometry()));\n");
} else {
if (!arg.startsWith(IN_AREA_PREFIX))
throw new IllegalArgumentException("Variable not supported: " + arg);
}
}
return ""
+ "package com.graphhopper.routing.weighting.custom;\n"
+ "import " + CustomWeightingHelper.class.getName() + ";\n"
+ "import " + EncodedValueLookup.class.getName() + ";\n"
+ "import " + EdgeIteratorState.class.getName() + ";\n"
+ importSourceCode
+ "\npublic class JaninoCustomWeightingHelperSubclass" + counter + " extends " + CustomWeightingHelper.class.getSimpleName() + " {\n"
+ classSourceCode
+ " @Override\n"
+ " public void init(EncodedValueLookup lookup, " + DecimalEncodedValue.class.getName() + " avgSpeedEnc, "
+ DecimalEncodedValue.class.getName() + " priorityEnc, Map<String, " + JsonFeature.class.getName() + "> areas) {\n"
+ initSourceCode
+ " }\n\n"
// we need these placeholder methods so that the hooks in DeepCopier are invoked
+ " @Override\n"
+ " public double getPriority(EdgeIteratorState edge, boolean reverse) {\n"
+ " return 1; //will be overwritten by code injected in DeepCopier\n"
+ " }\n"
+ " @Override\n"
+ " public double getSpeed(EdgeIteratorState edge, boolean reverse) {\n"
+ " return getRawSpeed(edge, reverse); //will be overwritten by code injected in DeepCopier\n"
+ " }\n"
+ " @Override\n"
+ " protected double getMaxSpeed() {\n"
+ " return " + maxSpeed + ";"
+ " }\n"
+ " @Override\n"
+ " protected double getMaxPriority() {\n"
+ " return " + maxPriority + ";"
+ " }\n"
+ "}";
} | 3.68 |
hbase_SnapshotManifest_getRegionManifests | /**
* Get all the Region Manifest from the snapshot
*/
public List<SnapshotRegionManifest> getRegionManifests() {
return this.regionManifests;
} | 3.68 |
hbase_MasterSnapshotVerifier_verifyRegions | /**
* Check that all the regions in the snapshot are valid, and accounted for.
* @param manifest snapshot manifest to inspect
* @throws IOException if we can't reach hbase:meta or read the files from the FS
*/
private void verifyRegions(SnapshotManifest manifest, boolean verifyRegions) throws IOException {
List<RegionInfo> regions = services.getAssignmentManager().getTableRegions(tableName, false);
// Remove the non-default regions
RegionReplicaUtil.removeNonDefaultRegions(regions);
Map<String, SnapshotRegionManifest> regionManifests = manifest.getRegionManifestsMap();
if (regionManifests == null) {
String msg = "Snapshot " + ClientSnapshotDescriptionUtils.toString(snapshot) + " looks empty";
LOG.error(msg);
throw new CorruptedSnapshotException(msg);
}
String errorMsg = "";
boolean hasMobStore = false;
// the mob region is a dummy region, it's not a real region in HBase.
// the mob region has a special name, it could be found by the region name.
if (regionManifests.get(MobUtils.getMobRegionInfo(tableName).getEncodedName()) != null) {
hasMobStore = true;
}
int realRegionCount = hasMobStore ? regionManifests.size() - 1 : regionManifests.size();
if (realRegionCount != regions.size()) {
errorMsg =
"Regions moved during the snapshot '" + ClientSnapshotDescriptionUtils.toString(snapshot)
+ "'. expected=" + regions.size() + " snapshotted=" + realRegionCount + ".";
LOG.error(errorMsg);
}
// Verify RegionInfo
if (verifyRegions) {
for (RegionInfo region : regions) {
SnapshotRegionManifest regionManifest = regionManifests.get(region.getEncodedName());
if (regionManifest == null) {
// could happen due to a move or split race.
String mesg = " No snapshot region directory found for region:" + region;
if (errorMsg.isEmpty()) {
errorMsg = mesg;
}
LOG.error(mesg);
continue;
}
verifyRegionInfo(region, regionManifest);
}
if (!errorMsg.isEmpty()) {
throw new CorruptedSnapshotException(errorMsg);
}
// Verify Snapshot HFiles
// Requires the root directory file system as HFiles are stored in the root directory
SnapshotReferenceUtil.verifySnapshot(services.getConfiguration(),
CommonFSUtils.getRootDirFileSystem(services.getConfiguration()), manifest);
}
} | 3.68 |
shardingsphere-elasticjob_ElasticJobExecutor_shutdown | /**
* Shutdown executor.
*/
public void shutdown() {
executorServiceReloader.close();
jobErrorHandlerReloader.close();
} | 3.68 |
flink_AvroOutputFormat_setCodec | /**
* Set avro codec for compression.
*
* @param codec avro codec.
*/
public void setCodec(final Codec codec) {
this.codec = checkNotNull(codec, "codec can not be null");
} | 3.68 |
flink_StreamProjection_projectTuple21 | /**
* Projects a {@link Tuple} {@link DataStream} to the previously selected fields.
*
* @return The projected DataStream.
* @see Tuple
* @see DataStream
*/
public <
T0,
T1,
T2,
T3,
T4,
T5,
T6,
T7,
T8,
T9,
T10,
T11,
T12,
T13,
T14,
T15,
T16,
T17,
T18,
T19,
T20>
SingleOutputStreamOperator<
Tuple21<
T0,
T1,
T2,
T3,
T4,
T5,
T6,
T7,
T8,
T9,
T10,
T11,
T12,
T13,
T14,
T15,
T16,
T17,
T18,
T19,
T20>>
projectTuple21() {
TypeInformation<?>[] fTypes = extractFieldTypes(fieldIndexes, dataStream.getType());
TupleTypeInfo<
Tuple21<
T0,
T1,
T2,
T3,
T4,
T5,
T6,
T7,
T8,
T9,
T10,
T11,
T12,
T13,
T14,
T15,
T16,
T17,
T18,
T19,
T20>>
tType =
new TupleTypeInfo<
Tuple21<
T0,
T1,
T2,
T3,
T4,
T5,
T6,
T7,
T8,
T9,
T10,
T11,
T12,
T13,
T14,
T15,
T16,
T17,
T18,
T19,
T20>>(fTypes);
return dataStream.transform(
"Projection",
tType,
new StreamProject<
IN,
Tuple21<
T0,
T1,
T2,
T3,
T4,
T5,
T6,
T7,
T8,
T9,
T10,
T11,
T12,
T13,
T14,
T15,
T16,
T17,
T18,
T19,
T20>>(
fieldIndexes, tType.createSerializer(dataStream.getExecutionConfig())));
} | 3.68 |
flink_TimerGauge_getAccumulatedCount | /** @return the accumulated period by the given * TimerGauge. */
public synchronized long getAccumulatedCount() {
return accumulatedCount;
} | 3.68 |
flink_FutureUtils_switchExecutor | /**
* Switches the execution context of the given source future. This works for normally and
* exceptionally completed futures.
*
* @param source source to switch the execution context for
* @param executor executor representing the new execution context
* @param <T> type of the source
* @return future which is executed by the given executor
*/
public static <T> CompletableFuture<T> switchExecutor(
CompletableFuture<? extends T> source, Executor executor) {
return source.handleAsync(
(t, throwable) -> {
if (throwable != null) {
throw new CompletionException(throwable);
} else {
return t;
}
},
executor);
} | 3.68 |
hudi_IncrSourceHelper_getStrictlyLowerTimestamp | /**
* Get a timestamp which is the next value in a descending sequence.
*
* @param timestamp Timestamp
*/
private static String getStrictlyLowerTimestamp(String timestamp) {
long ts = Long.parseLong(timestamp);
ValidationUtils.checkArgument(ts > 0, "Timestamp must be positive");
long lower = ts - 1;
return "" + lower;
} | 3.68 |
hadoop_CleanerMetrics_reportAFileProcess | /**
* Report a process operation at the current system time
*/
public void reportAFileProcess() {
totalProcessedFiles.incr();
processedFiles.incr();
} | 3.68 |
graphhopper_VectorTile_setName | /**
* <code>required string name = 1;</code>
*/
public Builder setName(
java.lang.String value) {
if (value == null) {
throw new NullPointerException();
}
bitField0_ |= 0x00000002;
name_ = value;
onChanged();
return this;
} | 3.68 |
framework_CalendarMonthDropHandler_dragLeave | /*
* (non-Javadoc)
*
* @see
* com.vaadin.terminal.gwt.client.ui.dd.VAbstractDropHandler#dragLeave(com
* .vaadin.terminal.gwt.client.ui.dd.VDragEvent)
*/
@Override
public void dragLeave(VDragEvent drag) {
deEmphasis();
super.dragLeave(drag);
} | 3.68 |
hadoop_TFile_atEnd | /**
* Is cursor at the end location?
*
* @return true if the cursor is at the end location.
*/
public boolean atEnd() {
return (currentLocation.compareTo(endLocation) >= 0);
} | 3.68 |
hbase_ExecutorService_startExecutorService | /**
* Start an executor service with a given name. If there was a service already started with the
* same name, this throws a RuntimeException.
* @param config Configuration to use for the executor.
*/
public void startExecutorService(final ExecutorConfig config) {
final String name = config.getName();
Executor hbes = this.executorMap.compute(name, (key, value) -> {
if (value != null) {
throw new RuntimeException(
"An executor service with the name " + key + " is already running!");
}
return new Executor(config);
});
LOG.debug("Starting executor service name={}, corePoolSize={}, maxPoolSize={}", name,
hbes.threadPoolExecutor.getCorePoolSize(), hbes.threadPoolExecutor.getMaximumPoolSize());
} | 3.68 |
flink_SlotSharingGroup_setTaskHeapMemory | /** Set the task heap memory for this SlotSharingGroup. */
public Builder setTaskHeapMemory(MemorySize taskHeapMemory) {
checkArgument(
taskHeapMemory.compareTo(MemorySize.ZERO) > 0,
"The task heap memory should be positive.");
this.taskHeapMemory = taskHeapMemory;
return this;
} | 3.68 |
hbase_MetricsSnapshot_addSnapshotRestore | /**
* Record a single instance of a snapshot
* @param time time that the snapshot restore took
*/
public void addSnapshotRestore(long time) {
source.updateSnapshotRestoreTime(time);
} | 3.68 |
morf_AliasedField_lessThan | /**
* @param value object to compare to (right hand side)
* @return a {@link Criterion} for a less than expression of this field.
*/
public Criterion lessThan(Object value) {
return Criterion.lessThan(this, value);
} | 3.68 |
MagicPlugin_MapController_resend | /**
* Force resending all maps to a specific player.
*/
public void resend(String playerName) {
for (URLMap map : keyMap.values()) {
map.resendTo(playerName);
}
} | 3.68 |
morf_CaseStatement_getDefaultValue | /**
* @return the defaultValue
*/
public AliasedField getDefaultValue() {
return defaultValue;
} | 3.68 |
hudi_FlinkOptions_isDefaultValueDefined | /**
* Returns whether the given conf defines default value for the option {@code option}.
*/
public static <T> boolean isDefaultValueDefined(Configuration conf, ConfigOption<T> option) {
return !conf.getOptional(option).isPresent()
|| conf.get(option).equals(option.defaultValue());
} | 3.68 |
hmily_TransactionManagerImpl_isExistDataSources | /**
* Is exist data sources boolean.
*
* @param connection the connection
* @return the boolean
*/
public boolean isExistDataSources(final XAConnection connection) {
boolean contains = enlisted.get().contains(connection);
Transaction transaction = getTransaction();
if (!contains) {
try {
transaction.registerSynchronization(new Synchronization() {
@Override
public void beforeCompletion() {
enlisted.get().remove(connection);
}
@Override
public void afterCompletion(final int status) {
enlisted.get().clear();
enlisted.remove();
}
});
} catch (RollbackException | SystemException e) {
return false;
}
enlisted.get().add(connection);
}
return contains;
} | 3.68 |
hadoop_SampleQuantiles_allowableError | /**
* Specifies the allowable error for this rank, depending on which quantiles
* are being targeted.
*
* This is the f(r_i, n) function from the CKMS paper. It's basically how wide
* the range of this rank can be.
*
* @param rank
* the index in the list of samples
*/
private double allowableError(int rank) {
int size = samples.size();
double minError = size + 1;
for (Quantile q : quantiles) {
double error;
if (rank <= q.quantile * size) {
error = (2.0 * q.error * (size - rank)) / (1.0 - q.quantile);
} else {
error = (2.0 * q.error * rank) / q.quantile;
}
if (error < minError) {
minError = error;
}
}
return minError;
} | 3.68 |
hbase_HFileCorruptionChecker_checkMobColFamDir | /**
* Check all files in a mob column family dir. mob column family directory
*/
protected void checkMobColFamDir(Path cfDir) throws IOException {
FileStatus[] statuses = null;
try {
statuses = fs.listStatus(cfDir); // use same filter as scanner.
} catch (FileNotFoundException fnfe) {
// Hadoop 0.23+ listStatus semantics throws an exception if the path does not exist.
LOG.warn("Mob colfam Directory " + cfDir
+ " does not exist. Likely the table is deleted. Skipping.");
missedMobFiles.add(cfDir);
return;
}
List<FileStatus> hfs = FSUtils.filterFileStatuses(statuses, new HFileFilter(fs));
// Hadoop 1.0 listStatus does not throw an exception if the path does not exist.
if (hfs.isEmpty() && !fs.exists(cfDir)) {
LOG.warn("Mob colfam Directory " + cfDir
+ " does not exist. Likely the table is deleted. Skipping.");
missedMobFiles.add(cfDir);
return;
}
LOG.info("Checking MOB Column Family Directory {}. Number of entries = {}", cfDir, hfs.size());
for (FileStatus hfFs : hfs) {
Path hf = hfFs.getPath();
checkMobFile(hf);
}
} | 3.68 |
flink_CopyOnWriteSkipListStateMap_getRandomIndexLevel | /**
* Return a random level for new node.
*
* <p>The implementation refers to the {@code randomLevel} method of JDK7's
* ConcurrentSkipListMap. See
* https://github.com/openjdk-mirror/jdk7u-jdk/blob/master/src/share/classes/java/util/concurrent/ConcurrentSkipListMap.java#L899
*/
private int getRandomIndexLevel() {
int x = randomSeed;
x ^= x << 13;
x ^= x >>> 17;
x ^= x << 5;
randomSeed = x;
// test highest and lowest bits
if ((x & 0x8001) != 0) {
return 0;
}
int level = 1;
int curMax = levelIndexHeader.getLevel();
x >>>= 1;
while ((x & 1) != 0) {
++level;
x >>>= 1;
// the level only be increased by step
if (level > curMax) {
break;
}
}
return level;
} | 3.68 |
pulsar_ResourceGroup_getID | // Transport manager mandated op.
public String getID() {
return this.resourceGroupName;
} | 3.68 |
flink_DynamicPartitionPruningUtils_visitDimSide | /**
* Visit dim side to judge whether dim side has filter condition and whether dim side's
* source table scan is non partitioned scan.
*/
private void visitDimSide(RelNode rel) {
// TODO Let visitDimSide more efficient and more accurate. Like a filter on dim table or
// a filter for the partition field on fact table.
if (rel instanceof TableScan) {
TableScan scan = (TableScan) rel;
TableSourceTable table = scan.getTable().unwrap(TableSourceTable.class);
if (table == null) {
return;
}
if (!hasFilter
&& table.abilitySpecs() != null
&& table.abilitySpecs().length != 0) {
for (SourceAbilitySpec spec : table.abilitySpecs()) {
if (spec instanceof FilterPushDownSpec) {
List<RexNode> predicates = ((FilterPushDownSpec) spec).getPredicates();
for (RexNode predicate : predicates) {
if (isSuitableFilter(predicate)) {
hasFilter = true;
}
}
}
}
}
CatalogTable catalogTable = table.contextResolvedTable().getResolvedTable();
if (catalogTable.isPartitioned()) {
hasPartitionedScan = true;
return;
}
// To ensure there is only one source on the dim side.
setTables(table.contextResolvedTable());
} else if (rel instanceof HepRelVertex) {
visitDimSide(((HepRelVertex) rel).getCurrentRel());
} else if (rel instanceof Exchange || rel instanceof Project) {
visitDimSide(rel.getInput(0));
} else if (rel instanceof Calc) {
RexProgram origProgram = ((Calc) rel).getProgram();
if (origProgram.getCondition() != null
&& isSuitableFilter(
origProgram.expandLocalRef(origProgram.getCondition()))) {
hasFilter = true;
}
visitDimSide(rel.getInput(0));
} else if (rel instanceof Filter) {
if (isSuitableFilter(((Filter) rel).getCondition())) {
hasFilter = true;
}
visitDimSide(rel.getInput(0));
} else if (rel instanceof Join) {
Join join = (Join) rel;
visitDimSide(join.getLeft());
visitDimSide(join.getRight());
} else if (rel instanceof BatchPhysicalGroupAggregateBase) {
visitDimSide(((BatchPhysicalGroupAggregateBase) rel).getInput());
} else if (rel instanceof Union) {
Union union = (Union) rel;
for (RelNode input : union.getInputs()) {
visitDimSide(input);
}
}
} | 3.68 |
morf_OracleDialect_sequenceName | /**
* Form the standard name for a table's autonumber sequence.
*
* @param tableName Name of the table for which the sequence name is required.
* @return Name of sequence.
*/
private String sequenceName(String tableName) {
return truncatedTableNameWithSuffix(tableName, "_SQ").toUpperCase();
} | 3.68 |
hbase_Bytes_getBestComparer | /**
* Returns the Unsafe-using Comparer, or falls back to the pure-Java implementation if unable to
* do so.
*/
static Comparer<byte[]> getBestComparer() {
try {
Class<?> theClass = Class.forName(UNSAFE_COMPARER_NAME);
// yes, UnsafeComparer does implement Comparer<byte[]>
@SuppressWarnings("unchecked")
Comparer<byte[]> comparer = (Comparer<byte[]>) theClass.getEnumConstants()[0];
return comparer;
} catch (Throwable t) { // ensure we really catch *everything*
return lexicographicalComparerJavaImpl();
}
} | 3.68 |
flink_BatchTask_instantiateUserCode | /**
* Instantiates a user code class from is definition in the task configuration. The class is
* instantiated without arguments using the null-ary constructor. Instantiation will fail if
* this constructor does not exist or is not public.
*
* @param <T> The generic type of the user code class.
* @param config The task configuration containing the class description.
* @param cl The class loader to be used to load the class.
* @param superClass The super class that the user code class extends or implements, for type
* checking.
* @return An instance of the user code class.
*/
public static <T> T instantiateUserCode(
TaskConfig config, ClassLoader cl, Class<? super T> superClass) {
try {
T stub = config.<T>getStubWrapper(cl).getUserCodeObject(superClass, cl);
// check if the class is a subclass, if the check is required
if (superClass != null && !superClass.isAssignableFrom(stub.getClass())) {
throw new RuntimeException(
"The class '"
+ stub.getClass().getName()
+ "' is not a subclass of '"
+ superClass.getName()
+ "' as is required.");
}
return stub;
} catch (ClassCastException ccex) {
throw new RuntimeException(
"The UDF class is not a proper subclass of " + superClass.getName(), ccex);
}
} | 3.68 |
hadoop_OBSPosixBucketUtils_fsRemoveFile | // Delete a file.
private static int fsRemoveFile(final OBSFileSystem owner,
final String sonObjectKey,
final List<KeyAndVersion> files)
throws IOException {
files.add(new KeyAndVersion(sonObjectKey));
if (files.size() == owner.getMaxEntriesToDelete()) {
// batch delete files.
OBSCommonUtils.removeKeys(owner, files, true, false);
return owner.getMaxEntriesToDelete();
}
return 0;
} | 3.68 |
flink_BinaryHashBucketArea_insertToBucket | /**
* Insert into bucket by hashCode and pointer.
*
* @return return false when spill own partition.
*/
boolean insertToBucket(int hashCode, int pointer, boolean sizeAddAndCheckResize)
throws IOException {
final int posHashCode = findBucket(hashCode);
// get the bucket for the given hash code
final int bucketArrayPos = posHashCode >> table.bucketsPerSegmentBits;
final int bucketInSegmentPos =
(posHashCode & table.bucketsPerSegmentMask) << BUCKET_SIZE_BITS;
final MemorySegment bucket = this.buckets[bucketArrayPos];
return insertToBucket(bucket, bucketInSegmentPos, hashCode, pointer, sizeAddAndCheckResize);
} | 3.68 |
morf_ConnectionResourcesBean_equals | /**
* @see java.lang.Object#equals(java.lang.Object)
*/
@Override
public final boolean equals(Object obj) {
if (this == obj) return true;
if (obj == null) return false;
if (getClass() != obj.getClass()) return false;
ConnectionResourcesBean other = (ConnectionResourcesBean) obj;
if (databaseName == null) {
if (other.databaseName != null) return false;
} else if (!databaseName.equals(other.databaseName)) return false;
if (!Objects.equals(databaseType, other.databaseType)) return false;
if (hostName == null) {
if (other.hostName != null) return false;
} else if (!hostName.equals(other.hostName)) return false;
if (instanceName == null) {
if (other.instanceName != null) return false;
} else if (!instanceName.equals(other.instanceName)) return false;
if (port != other.port) return false;
if (schemaName == null) {
if (other.schemaName != null) return false;
} else if (!schemaName.equals(other.schemaName)) return false;
if (statementPoolingMaxStatements != other.statementPoolingMaxStatements) return false;
if (!Objects.equals(fetchSizeForBulkSelects, other.fetchSizeForBulkSelects)) return false;
if (!Objects.equals(fetchSizeForBulkSelects, other.fetchSizeForBulkSelects)) return false;
if (userName == null) {
if (other.userName != null) return false;
} else if (!userName.equals(other.userName)) return false;
return true;
} | 3.68 |
hbase_RpcServer_setErrorHandler | /**
* Set the handler for calling out of RPC for error conditions.
* @param handler the handler implementation
*/
@Override
public void setErrorHandler(HBaseRPCErrorHandler handler) {
this.errorHandler = handler;
} | 3.68 |
hadoop_OBSPosixBucketUtils_fsRemoveKeys | // Remove sub objects of each depth one by one to avoid that parents and
// children in a same batch.
private static void fsRemoveKeys(final OBSFileSystem owner,
final FileStatus[] arFileStatus)
throws ObsException, IOException {
if (arFileStatus.length <= 0) {
// exit fast if there are no keys to delete
return;
}
String key;
for (FileStatus fileStatus : arFileStatus) {
key = OBSCommonUtils.pathToKey(owner, fileStatus.getPath());
OBSCommonUtils.blockRootDelete(owner.getBucket(), key);
}
fsRemoveKeysByDepth(owner, arFileStatus);
} | 3.68 |
hadoop_NMTokenCache_getToken | /**
* Returns NMToken, null if absent
* @param nodeAddr
* @return {@link Token} NMToken required for communicating with node
* manager
*/
@Public
@Evolving
public Token getToken(String nodeAddr) {
return nmTokens.get(nodeAddr);
} | 3.68 |
hbase_HMaster_getRemoteInetAddress | /** Returns Get remote side's InetAddress */
InetAddress getRemoteInetAddress(final int port, final long serverStartCode)
throws UnknownHostException {
// Do it out here in its own little method so can fake an address when
// mocking up in tests.
InetAddress ia = RpcServer.getRemoteIp();
// The call could be from the local regionserver,
// in which case, there is no remote address.
if (ia == null && serverStartCode == startcode) {
InetSocketAddress isa = rpcServices.getSocketAddress();
if (isa != null && isa.getPort() == port) {
ia = isa.getAddress();
}
}
return ia;
} | 3.68 |
hadoop_AllocateResponse_build | /**
* Return generated {@link AllocateResponse} object.
* @return {@link AllocateResponse}
*/
@Private
@Unstable
public AllocateResponse build() {
return allocateResponse;
} | 3.68 |
hbase_TableSpanBuilder_populateTableNameAttributes | /**
* Static utility method that performs the primary logic of this builder. It is visible to other
* classes in this package so that other builders can use this functionality as a mix-in.
* @param attributes the attributes map to be populated.
* @param tableName the source of attribute values.
*/
static void populateTableNameAttributes(final Map<AttributeKey<?>, Object> attributes,
final TableName tableName) {
attributes.put(DB_NAME, tableName.getNamespaceAsString());
attributes.put(TABLE_KEY, tableName.getNameAsString());
} | 3.68 |
framework_SimpleStringFilter_isIgnoreCase | /**
* Returns whether the filter is case-insensitive or case-sensitive.
*
* Note: this method is intended only for implementations of lazy string
* filters and may change in the future.
*
* @return true if performing case-insensitive filtering, false for
* case-sensitive
*/
public boolean isIgnoreCase() {
return ignoreCase;
} | 3.68 |
hibernate-validator_BeanMetaDataImpl_bySignature | /**
* Builds up the method meta data for this type; each meta-data entry will be stored under the signature of the
* represented method and all the methods it overrides.
*/
private Map<Signature, ExecutableMetaData> bySignature(Set<ExecutableMetaData> executables) {
Map<Signature, ExecutableMetaData> theValue = newHashMap();
for ( ExecutableMetaData executableMetaData : executables ) {
for ( Signature signature : executableMetaData.getSignatures() ) {
theValue.put( signature, executableMetaData );
}
}
return theValue;
} | 3.68 |
shardingsphere-elasticjob_JobNodeStorage_createJobNodeIfNeeded | /**
* Create job node if needed.
*
* <p>Do not create node if root node not existed, which means job is shutdown.</p>
*
* @param node node
*/
public void createJobNodeIfNeeded(final String node) {
if (isJobRootNodeExisted() && !isJobNodeExisted(node)) {
regCenter.persist(jobNodePath.getFullPath(node), "");
}
} | 3.68 |
framework_TableSortingIndicator_setup | /*
* (non-Javadoc)
*
* @see com.vaadin.tests.components.AbstractTestUI#setup(com.vaadin.server.
* VaadinRequest)
*/
@Override
protected void setup(VaadinRequest request) {
final Table table = new Table("Test table", buildContainer());
table.setSizeFull();
addComponent(table);
Button sortButton = new Button("Sort", event -> table
.sort(new Object[] { "val1" }, new boolean[] { false }));
addComponent(sortButton);
} | 3.68 |
druid_SQLCreateTableStatement_isExternal | // for odps & hive
public boolean isExternal() {
return external;
} | 3.68 |
hmily_ServerConfigLoader_getDirGlobal | /**
* Get the current project path.
*
* @return Current project path
*/
private String getDirGlobal() {
String userDir = System.getProperty("user.dir");
String fileName = "hmily.yml";
return String.join(String.valueOf(File.separatorChar), userDir, fileName);
} | 3.68 |
flink_AsyncSinkBaseBuilder_setMaxTimeInBufferMS | /**
* @param maxTimeInBufferMS the maximum amount of time an element may remain in the buffer. In
* most cases elements are flushed as a result of the batch size (in bytes or number) being
* reached or during a snapshot. However, there are scenarios where an element may remain in
* the buffer forever or a long period of time. To mitigate this, a timer is constantly
* active in the buffer such that: while the buffer is not empty, it will flush every
* maxTimeInBufferMS milliseconds.
* @return {@link ConcreteBuilderT} itself
*/
public ConcreteBuilderT setMaxTimeInBufferMS(long maxTimeInBufferMS) {
this.maxTimeInBufferMS = maxTimeInBufferMS;
return (ConcreteBuilderT) this;
} | 3.68 |
hadoop_Tracer_getCurrentSpan | /***
* Return active span.
* @return org.apache.hadoop.tracing.Span
*/
public static Span getCurrentSpan() {
return null;
} | 3.68 |
flink_RemoteInputChannel_addPriorityBuffer | /** @return {@code true} if this was first priority buffer added. */
private boolean addPriorityBuffer(SequenceBuffer sequenceBuffer) {
receivedBuffers.addPriorityElement(sequenceBuffer);
return receivedBuffers.getNumPriorityElements() == 1;
} | 3.68 |
framework_VAbstractCalendarPanel_setResolution | /**
* Sets the current date resolution.
*
* @param resolution
* the new resolution
*/
public void setResolution(R resolution) {
this.resolution = resolution;
} | 3.68 |
framework_StringToDoubleConverter_convertToModel | /*
* (non-Javadoc)
*
* @see
* com.vaadin.data.util.converter.Converter#convertToModel(java.lang.Object,
* java.util.Locale)
*/
@Override
public Double convertToModel(String value,
Class<? extends Double> targetType, Locale locale)
throws ConversionException {
Number n = convertToNumber(value, targetType, locale);
return n == null ? null : n.doubleValue();
} | 3.68 |
framework_FileResource_setSourceFile | /**
* Sets the source file.
*
* @param sourceFile
* the source file to set.
*/
private void setSourceFile(File sourceFile) {
this.sourceFile = sourceFile;
} | 3.68 |
druid_MySqlStatementParser_parseIterate | /**
* parse iterate statement
*/
public MySqlIterateStatement parseIterate() {
accept(Token.ITERATE);
MySqlIterateStatement iterateStmt = new MySqlIterateStatement();
iterateStmt.setLabelName(exprParser.name().getSimpleName());
accept(Token.SEMI);
return iterateStmt;
} | 3.68 |
flink_HiveWriterFactory_createRecordWriter | /** Create a {@link RecordWriter} from path. */
public RecordWriter createRecordWriter(Path path) {
try {
checkInitialize();
JobConf conf = new JobConf(confWrapper.conf());
if (isCompressed) {
String codecStr = conf.get(HiveConf.ConfVars.COMPRESSINTERMEDIATECODEC.varname);
if (!StringUtils.isNullOrWhitespaceOnly(codecStr)) {
//noinspection unchecked
Class<? extends CompressionCodec> codec =
(Class<? extends CompressionCodec>)
Class.forName(
codecStr,
true,
Thread.currentThread().getContextClassLoader());
FileOutputFormat.setOutputCompressorClass(conf, codec);
}
String typeStr = conf.get(HiveConf.ConfVars.COMPRESSINTERMEDIATETYPE.varname);
if (!StringUtils.isNullOrWhitespaceOnly(typeStr)) {
SequenceFile.CompressionType style =
SequenceFile.CompressionType.valueOf(typeStr);
SequenceFileOutputFormat.setOutputCompressionType(conf, style);
}
}
return hiveShim.getHiveRecordWriter(
conf,
hiveOutputFormatClz,
recordSerDe.getSerializedClass(),
isCompressed,
tableProperties,
path);
} catch (Exception e) {
throw new FlinkHiveException(e);
}
} | 3.68 |
morf_GraphBasedUpgrade_getPostUpgradeStatements | /**
* @return statements which must be executed after the upgrade
*/
public List<String> getPostUpgradeStatements() {
return postUpgradeStatements;
} | 3.68 |
morf_AbstractSqlDialectTest_testPostInsertWithPresetAutonumStatementsNotInsertingUnderAutonumLimit | /**
* Tests the SQL statement that are run after a data insert.
*/
@Test
public void testPostInsertWithPresetAutonumStatementsNotInsertingUnderAutonumLimit() {
testDialect.postInsertWithPresetAutonumStatements(metadata.getTable(TEST_TABLE), sqlScriptExecutor,connection,false);
testDialect.postInsertWithPresetAutonumStatements(metadata.getTable(AUTO_NUMBER_TABLE), sqlScriptExecutor,connection, false);
verifyPostInsertStatementsNotInsertingUnderAutonumLimit(sqlScriptExecutor,connection);
} | 3.68 |
flink_WindowReader_reduce | /**
* Reads window state generated using a {@link ReduceFunction}.
*
* @param uid The uid of the operator.
* @param function The reduce function used to create the window.
* @param readerFunction The window reader function.
* @param keyType The key type of the window.
* @param reduceType The type information of the reduce function.
* @param outputType The output type of the reader function.
* @param <K> The type of the key.
* @param <T> The type of the reduce function.
* @param <OUT> The output type of the reduce function.
* @return A {@code DataSet} of objects read from keyed state.
* @throws IOException If savepoint does not contain the specified uid.
*/
public <K, T, OUT> DataSource<OUT> reduce(
String uid,
ReduceFunction<T> function,
WindowReaderFunction<T, OUT, K, W> readerFunction,
TypeInformation<K> keyType,
TypeInformation<T> reduceType,
TypeInformation<OUT> outputType)
throws IOException {
WindowReaderOperator<?, K, T, W, OUT> operator =
WindowReaderOperator.reduce(
function, readerFunction, keyType, windowSerializer, reduceType);
return readWindowOperator(uid, outputType, operator);
} | 3.68 |
flink_ConfigOptions_mapType | /**
* Defines that the value of the option should be a set of properties, which can be
* represented as {@code Map<String, String>}.
*/
public TypedConfigOptionBuilder<Map<String, String>> mapType() {
return new TypedConfigOptionBuilder<>(key, PROPERTIES_MAP_CLASS);
} | 3.68 |
dubbo_ThrowableAction_execute | /**
* Executes {@link ThrowableAction}
*
* @param action {@link ThrowableAction}
* @throws RuntimeException wrap {@link Exception} to {@link RuntimeException}
*/
static void execute(ThrowableAction action) throws RuntimeException {
try {
action.execute();
} catch (Throwable e) {
throw new RuntimeException(e);
}
} | 3.68 |
hadoop_ExitUtil_toString | /**
* String value does not include exception type, just exit code and message.
* @return the exit code and any message
*/
@Override
public String toString() {
String message = getMessage();
if (message == null) {
message = super.toString();
}
return Integer.toString(status) + ": " + message;
} | 3.68 |
AreaShop_Utils_getRegions | /**
* Get all AreaShop regions containing a location.
* @param location The location to check
* @return A list with all the AreaShop regions that contain the location
*/
public static List<GeneralRegion> getRegions(Location location) {
return getRegionsInSelection(new WorldEditSelection(location.getWorld(), location, location));
} | 3.68 |
hmily_SubCoordinator_nextXid | /**
* Next xid x id.
*
* @param xId the x id
* @return the x id
*/
public synchronized XidImpl nextXid(final XidImpl xId) {
return xId.newResId(this.resources.size() + 1);
} | 3.68 |
framework_UIInitHandler_reinitUI | /**
* Updates a UI that has already been initialized but is now loaded again,
* e.g. because of {@link PreserveOnRefresh}.
*
* @param ui
* @param request
*/
private void reinitUI(UI ui, VaadinRequest request) {
UI.setCurrent(ui);
ui.doRefresh(request);
} | 3.68 |
hbase_CellModel_getColumn | /** Returns the column */
public byte[] getColumn() {
return column;
} | 3.68 |
framework_AbstractColorPicker_getHistoryVisibility | /**
* Gets the visibility of the Color History.
*
* @since 7.5.0
* @return visibility of color history
*/
public boolean getHistoryVisibility() {
return historyVisible;
} | 3.68 |
flink_Tuple14_copy | /**
* Shallow tuple copy.
*
* @return A new Tuple with the same fields as this.
*/
@Override
@SuppressWarnings("unchecked")
public Tuple14<T0, T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13> copy() {
return new Tuple14<>(
this.f0, this.f1, this.f2, this.f3, this.f4, this.f5, this.f6, this.f7, this.f8,
this.f9, this.f10, this.f11, this.f12, this.f13);
} | 3.68 |
flink_Transformation_getId | /** Returns the unique ID of this {@code Transformation}. */
public int getId() {
return id;
} | 3.68 |
framework_HierarchyMapper_getDirectChildren | /**
* Gets the stream of direct children for given node.
*
* @param parent
* the parent node
* @return the stream of direct children
*/
private Stream<T> getDirectChildren(T parent) {
return doFetchDirectChildren(parent, Range.between(0, getDataProvider()
.getChildCount(new HierarchicalQuery<>(filter, parent))));
} | 3.68 |
hbase_TagCompressionContext_compressTags | /**
* Compress tags one by one and writes to the OutputStream.
* @param out Stream to which the compressed tags to be written
* @param in Source buffer where tags are available
* @param offset Offset for the tags byte buffer
* @param length Length of all tag bytes
*/
public void compressTags(OutputStream out, ByteBuffer in, int offset, int length)
throws IOException {
if (in.hasArray()) {
// Offset we are given is relative to ByteBuffer#arrayOffset
compressTags(out, in.array(), in.arrayOffset() + offset, length);
} else {
int pos = offset;
int endOffset = pos + length;
assert pos < endOffset;
while (pos < endOffset) {
int tagLen = ByteBufferUtils.readAsInt(in, pos, Tag.TAG_LENGTH_SIZE);
pos += Tag.TAG_LENGTH_SIZE;
Dictionary.write(out, in, pos, tagLen, tagDict);
pos += tagLen;
}
}
} | 3.68 |
framework_BootstrapHandler_getWidgetsetInfo | /**
* Gets information about the widgetset to use.
*
* @return the widgetset which will be loaded
*/
public WidgetsetInfo getWidgetsetInfo() {
if (widgetsetInfo == null) {
widgetsetInfo = getWidgetsetForUI(this);
}
return widgetsetInfo;
} | 3.68 |
framework_UIConnector_isMobileHTML5DndEnabled | /**
* Returns whether HTML5 DnD extensions {@link DragSourceExtensionConnector}
* and {@link DropTargetExtensionConnector} and alike should be enabled for
* mobile devices.
* <p>
* By default, it is disabled.
*
* @return {@code true} if enabled, {@code false} if not
* @since 8.1
*/
public boolean isMobileHTML5DndEnabled() {
return getState().enableMobileHTML5DnD;
} | 3.68 |
graphhopper_Helper_round | /**
* Round the value to the specified number of decimal places, i.e. decimalPlaces=2 means we round to two decimal
* places. Using negative values like decimalPlaces=-2 means we round to two places before the decimal point.
*/
public static double round(double value, int decimalPlaces) {
double factor = Math.pow(10, decimalPlaces);
return Math.round(value * factor) / factor;
} | 3.68 |
framework_BrowserWindowOpener_getResource | /**
* Returns the resource for this instance.
*
* @since 7.4
*
* @return resource to open browser window
*/
public Resource getResource() {
return getResource(BrowserWindowOpenerState.locationResource);
} | 3.68 |
framework_LayoutManager_reportWidthAssignedToRelative | /**
* Registers the width reserved for a relatively sized component. This can
* be used as an optimization by ManagedLayouts; by informing the
* LayoutManager about what size a component will have, the layout
* propagation can continue directly without first measuring the potentially
* resized elements.
*
* @param component
* the relatively sized component for which the size is reported
* @param assignedWidth
* the inner width of the relatively sized component's parent
* element in pixels
*/
public void reportWidthAssignedToRelative(ComponentConnector component,
int assignedWidth) {
assert component.isRelativeWidth();
float percentSize = parsePercent(component.getState().width == null ? ""
: component.getState().width);
int effectiveWidth = Math.round(assignedWidth * (percentSize / 100));
reportOuterWidth(component, effectiveWidth);
} | 3.68 |
flink_Path_deserializeFromDataInputView | /**
* Deserialize the Path from {@link DataInputView}.
*
* @param in the data input view.
* @return the path
* @throws IOException if an error happened.
*/
@Nullable
public static Path deserializeFromDataInputView(DataInputView in) throws IOException {
final boolean isNotNull = in.readBoolean();
Path result = null;
if (isNotNull) {
final String scheme = StringUtils.readNullableString(in);
final String userInfo = StringUtils.readNullableString(in);
final String host = StringUtils.readNullableString(in);
final int port = in.readInt();
final String path = StringUtils.readNullableString(in);
final String query = StringUtils.readNullableString(in);
final String fragment = StringUtils.readNullableString(in);
try {
result = new Path(new URI(scheme, userInfo, host, port, path, query, fragment));
} catch (URISyntaxException e) {
throw new IOException("Error reconstructing URI", e);
}
}
return result;
} | 3.68 |
framework_HierarchicalContainer_setParent | /**
* <p>
* Sets the parent of an Item. The new parent item must exist and be able to
* have children. (<code>canHaveChildren(newParentId) == true</code>). It is
* also possible to detach a node from the hierarchy (and thus make it root)
* by setting the parent <code>null</code>.
* </p>
*
* @param itemId
* the ID of the item to be set as the child of the Item
* identified with newParentId.
* @param newParentId
* the ID of the Item that's to be the new parent of the Item
* identified with itemId.
* @return <code>true</code> if the operation succeeded, <code>false</code>
* if not
*/
@Override
public boolean setParent(Object itemId, Object newParentId) {
// Checks that the item is in the container
if (!containsId(itemId)) {
return false;
}
// Gets the old parent
final Object oldParentId = parent.get(itemId);
// Checks if no change is necessary
if ((newParentId == null && oldParentId == null)
|| ((newParentId != null) && newParentId.equals(oldParentId))) {
return true;
}
// Making root?
if (newParentId == null) {
// The itemId should become a root so we need to
// - Remove it from the old parent's children list
// - Add it as a root
// - Remove it from the item -> parent list (parent is null for
// roots)
// Removes from old parents children list
final LinkedList<Object> l = children.get(oldParentId);
if (l != null) {
l.remove(itemId);
if (l.isEmpty()) {
children.remove(oldParentId);
}
}
// Add to be a root
roots.add(itemId);
// Updates parent
parent.remove(itemId);
if (hasFilters()) {
// Refilter the container if setParent is called when filters
// are applied. Changing parent can change what is included in
// the filtered version (if includeParentsWhenFiltering==true).
doFilterContainer(hasFilters());
}
fireItemSetChange();
return true;
}
// We get here when the item should not become a root and we need to
// - Verify the new parent exists and can have children
// - Check that the new parent is not a child of the selected itemId
// - Updated the item -> parent mapping to point to the new parent
// - Remove the item from the roots list if it was a root
// - Remove the item from the old parent's children list if it was not a
// root
// Checks that the new parent exists in container and can have
// children
if (!containsId(newParentId)
|| noChildrenAllowed.contains(newParentId)) {
return false;
}
// Checks that setting parent doesn't result to a loop
Object o = newParentId;
while (o != null && !o.equals(itemId)) {
o = parent.get(o);
}
if (o != null) {
return false;
}
// Updates parent
parent.put(itemId, newParentId);
LinkedList<Object> pcl = children.get(newParentId);
if (pcl == null) {
// Create an empty list for holding children if one were not
// previously created
pcl = new LinkedList<Object>();
children.put(newParentId, pcl);
}
pcl.add(itemId);
// Removes from old parent or root
if (oldParentId == null) {
roots.remove(itemId);
} else {
final LinkedList<Object> l = children.get(oldParentId);
if (l != null) {
l.remove(itemId);
if (l.isEmpty()) {
children.remove(oldParentId);
}
}
}
if (hasFilters()) {
// Refilter the container if setParent is called when filters
// are applied. Changing parent can change what is included in
// the filtered version (if includeParentsWhenFiltering==true).
doFilterContainer(hasFilters());
}
fireItemSetChange();
return true;
} | 3.68 |
flink_BlockResettableMutableObjectIterator_hasFurtherInput | /**
* Checks, whether the input that is blocked by this iterator, has further elements available.
* This method may be used to forecast (for example at the point where a block is full) whether
* there will be more data (possibly in another block).
*
* @return True, if there will be more data, false otherwise.
*/
public boolean hasFurtherInput() {
return !this.noMoreBlocks;
} | 3.68 |
hudi_ClusteringOperator_endInput | /**
* End input action for batch source.
*/
public void endInput() {
// no operation
} | 3.68 |
graphhopper_PrepareLandmarks_setLandmarkSuggestions | /**
* @see LandmarkStorage#setLandmarkSuggestions(List)
*/
public PrepareLandmarks setLandmarkSuggestions(List<LandmarkSuggestion> landmarkSuggestions) {
lms.setLandmarkSuggestions(landmarkSuggestions);
return this;
} | 3.68 |
pulsar_ResourceGroup_setUsageInMonitoredEntity | // Fill usage about a particular monitoring class in the transport-manager callback
// for reporting local stats to other brokers.
// Returns true if something was filled.
// Visibility for unit testing.
protected boolean setUsageInMonitoredEntity(ResourceGroupMonitoringClass monClass, NetworkUsage p) {
long bytesUsed, messagesUsed;
boolean sendReport;
int numSuppressions = 0;
PerMonitoringClassFields monEntity;
final int idx = monClass.ordinal();
monEntity = this.monitoringClassFields[idx];
monEntity.localUsageStatsLock.lock();
try {
sendReport = this.rgs.quotaCalculator.needToReportLocalUsage(
monEntity.usedLocallySinceLastReport.bytes,
monEntity.lastReportedValues.bytes,
monEntity.usedLocallySinceLastReport.messages,
monEntity.lastReportedValues.messages,
monEntity.lastResourceUsageFillTimeMSecsSinceEpoch);
bytesUsed = monEntity.usedLocallySinceLastReport.bytes;
messagesUsed = monEntity.usedLocallySinceLastReport.messages;
monEntity.usedLocallySinceLastReport.bytes = monEntity.usedLocallySinceLastReport.messages = 0;
monEntity.totalUsedLocally.bytes += bytesUsed;
monEntity.totalUsedLocally.messages += messagesUsed;
monEntity.lastResourceUsageFillTimeMSecsSinceEpoch = System.currentTimeMillis();
if (sendReport) {
p.setBytesPerPeriod(bytesUsed);
p.setMessagesPerPeriod(messagesUsed);
monEntity.lastReportedValues.bytes = bytesUsed;
monEntity.lastReportedValues.messages = messagesUsed;
monEntity.numSuppressedUsageReports = 0;
} else {
numSuppressions = monEntity.numSuppressedUsageReports++;
}
} finally {
monEntity.localUsageStatsLock.unlock();
}
final String rgName = this.ruPublisher != null ? this.ruPublisher.getID() : this.resourceGroupName;
double sentCount = sendReport ? 1 : 0;
rgLocalUsageReportCount.labels(rgName, monClass.name()).inc(sentCount);
if (sendReport) {
if (log.isDebugEnabled()) {
log.debug("fillResourceUsage for RG={}: filled a {} update; bytes={}, messages={}",
rgName, monClass, bytesUsed, messagesUsed);
}
} else {
if (log.isDebugEnabled()) {
log.debug("fillResourceUsage for RG={}: report for {} suppressed "
+ "(suppressions={} since last sent report)",
rgName, monClass, numSuppressions);
}
}
return sendReport;
} | 3.68 |
flink_StreamingFileWriter_closePartFileForPartitions | /** Close in-progress part file when partition is committable. */
private void closePartFileForPartitions() throws Exception {
if (partitionCommitPredicate != null) {
final Iterator<Map.Entry<String, Long>> iterator =
inProgressPartitions.entrySet().iterator();
while (iterator.hasNext()) {
Map.Entry<String, Long> entry = iterator.next();
String partition = entry.getKey();
Long creationTime = entry.getValue();
PredicateContext predicateContext =
PartitionCommitPredicate.createPredicateContext(
partition,
creationTime,
processingTimeService.getCurrentProcessingTime(),
currentWatermark);
if (partitionCommitPredicate.isPartitionCommittable(predicateContext)) {
// if partition is committable, close in-progress part file in this partition
buckets.closePartFileForBucket(partition);
iterator.remove();
}
}
}
} | 3.68 |
morf_SqlServerDialect_undecorateName | /**
* Removes any decoration characters from the name. (# for temp table).
*
* @param name name of table
* @return version of name with any decoration removed.
*/
public String undecorateName(String name) {
if (name.startsWith("#")) {
return name.substring(1);
} else {
return name;
}
} | 3.68 |
hbase_MetricsConnection_incrMetaCacheNumClearRegion | /** Increment the number of meta cache drops requested for individual region. */
public void incrMetaCacheNumClearRegion(int count) {
metaCacheNumClearRegion.inc(count);
} | 3.68 |
flink_CliView_resetMainPart | /** Must be called when values in the main part (main header or main) have changed. */
protected void resetMainPart() {
mainHeaderLines = null;
mainLines = null;
totalMainWidth = 0;
} | 3.68 |
hbase_HMobStore_readCell | /**
* Reads the cell from a mob file. The mob file might be located in different directories. 1. The
* working directory. 2. The archive directory. Reads the cell from the files located in both of
* the above directories.
* @param locations The possible locations where the mob files are saved.
* @param fileName The file to be read.
* @param search The cell to be searched.
* @param cacheMobBlocks Whether the scanner should cache blocks.
* @param readPt the read point.
* @param readEmptyValueOnMobCellMiss Whether return null value when the mob file is missing or
* corrupt.
* @return The found cell. Null if there's no such a cell.
*/
private MobCell readCell(List<Path> locations, String fileName, Cell search,
boolean cacheMobBlocks, long readPt, boolean readEmptyValueOnMobCellMiss) throws IOException {
FileSystem fs = getFileSystem();
IOException ioe = null;
for (Path location : locations) {
MobFile file = null;
Path path = new Path(location, fileName);
try {
file = mobFileCache.openFile(fs, path, getCacheConfig());
return readPt != -1
? file.readCell(search, cacheMobBlocks, readPt)
: file.readCell(search, cacheMobBlocks);
} catch (IOException e) {
mobFileCache.evictFile(fileName);
ioe = e;
if (
(e instanceof FileNotFoundException) || (e.getCause() instanceof FileNotFoundException)
) {
LOG.debug("Fail to read the cell, the mob file " + path + " doesn't exist", e);
} else if (e instanceof CorruptHFileException) {
LOG.error("The mob file " + path + " is corrupt", e);
break;
} else {
throw e;
}
} finally {
if (file != null) {
mobFileCache.closeFile(file);
}
}
}
LOG.error("The mob file " + fileName + " could not be found in the locations " + locations
+ " or it is corrupt");
if (readEmptyValueOnMobCellMiss) {
return null;
} else if (
(ioe instanceof FileNotFoundException) || (ioe.getCause() instanceof FileNotFoundException)
) {
// The region is re-opened when FileNotFoundException is thrown.
// This is not necessary when MOB files cannot be found, because the store files
// in a region only contain the references to MOB files and a re-open on a region
// doesn't help fix the lost MOB files.
throw new DoNotRetryIOException(ioe);
} else {
throw ioe;
}
} | 3.68 |
hbase_RpcServer_getRemoteAddress | /** Returns Address of remote client if a request is ongoing, else null */
public static Optional<InetAddress> getRemoteAddress() {
return getCurrentCall().map(RpcCall::getRemoteAddress);
} | 3.68 |
hadoop_RolloverSignerSecretProvider_initSecrets | /**
* Initializes the secrets array. This should typically be called only once,
* during init but some implementations may wish to call it other times.
* previousSecret can be null if there isn't a previous secret, but
* currentSecret should never be null.
* @param currentSecret The current secret
* @param previousSecret The previous secret
*/
protected void initSecrets(byte[] currentSecret, byte[] previousSecret) {
secrets = new byte[][]{currentSecret, previousSecret};
} | 3.68 |
flink_AbstractBytesHashMap_getRecordAreaMemorySegments | /** @return the underlying memory segments of the hash map's record area */
@SuppressWarnings("WeakerAccess")
public ArrayList<MemorySegment> getRecordAreaMemorySegments() {
return ((RecordArea) recordArea).segments;
} | 3.68 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.