name
stringlengths 12
178
| code_snippet
stringlengths 8
36.5k
| score
float64 3.26
3.68
|
---|---|---|
hbase_StorageClusterStatusModel_getAverageLoad | /** Returns the average load of the region servers in the cluster */
@XmlAttribute
public double getAverageLoad() {
return averageLoad;
} | 3.68 |
flink_FileSystemCheckpointStorage_getWriteBufferSize | /**
* Gets the write buffer size for created checkpoint stream.
*
* <p>If not explicitly configured, this is the default value of {@link
* CheckpointingOptions#FS_WRITE_BUFFER_SIZE}.
*
* @return The write buffer size, in bytes.
*/
public int getWriteBufferSize() {
return writeBufferSize >= 0
? writeBufferSize
: CheckpointingOptions.FS_WRITE_BUFFER_SIZE.defaultValue();
} | 3.68 |
framework_AbstractBeanContainer_addContainerProperty | /**
* Adds a property for the container and all its items.
*
* Primarily for internal use, may change in future versions.
*
* @param propertyId
* @param propertyDescriptor
* @return true if the property was added
*/
protected final boolean addContainerProperty(String propertyId,
VaadinPropertyDescriptor<BEANTYPE> propertyDescriptor) {
if (null == propertyId || null == propertyDescriptor) {
return false;
}
// Fails if the Property is already present
if (model.containsKey(propertyId)) {
return false;
}
model.put(propertyId, propertyDescriptor);
for (BeanItem<BEANTYPE> item : itemIdToItem.values()) {
item.addItemProperty(propertyId,
propertyDescriptor.createProperty(item.getBean()));
}
// Sends a change event
fireContainerPropertySetChange();
return true;
} | 3.68 |
hbase_ColumnSchemaModel_addAttribute | /**
* Add an attribute to the column family schema
* @param name the attribute name
* @param value the attribute value
*/
@JsonAnySetter
public void addAttribute(String name, Object value) {
attrs.put(new QName(name), value);
} | 3.68 |
hbase_ScanInfo_customize | /**
* Used by CP users for customizing max versions, ttl, keepDeletedCells, min versions, and time to
* purge deletes.
*/
ScanInfo customize(int maxVersions, long ttl, KeepDeletedCells keepDeletedCells, int minVersions,
long timeToPurgeDeletes) {
return new ScanInfo(family, minVersions, maxVersions, ttl, keepDeletedCells, timeToPurgeDeletes,
comparator, tableMaxRowSize, usePread, cellsPerTimeoutCheck, parallelSeekEnabled,
preadMaxBytes, newVersionBehavior);
} | 3.68 |
flink_HiveFunction_createTypeInference | /** Creates {@link TypeInference} for the function. */
default TypeInference createTypeInference() {
TypeInference.Builder builder = TypeInference.newBuilder();
builder.inputTypeStrategy(new HiveFunctionInputStrategy(this));
builder.outputTypeStrategy(new HiveFunctionOutputStrategy(this));
return builder.build();
} | 3.68 |
framework_VaadinService_addSessionDestroyListener | /**
* Adds a listener that gets notified when a Vaadin service session that has
* been initialized for this service is destroyed.
* <p>
* The session being destroyed is locked and its UIs have been removed when
* the listeners are called.
*
* @see #addSessionInitListener(SessionInitListener)
*
* @param listener
* the vaadin service session destroy listener
* @return a registration object for removing the listener
* @since 8.0
*/
public Registration addSessionDestroyListener(
SessionDestroyListener listener) {
sessionDestroyListeners.add(listener);
return () -> sessionDestroyListeners.remove(listener);
} | 3.68 |
framework_AbstractLegacyComponent_setReadOnly | /**
* Sets the read-only mode of the component to the specified mode. The user
* can not change the value of a read-only component.
*
* <p>
* As only {@code AbstractField} or {@code LegacyField} components normally
* have a value that can be input or changed by the user, this is mostly
* relevant only to field components, though not restricted to them.
* </p>
*
* <p>
* Notice that the read-only mode only affects whether the user can change
* the <i>value</i> of the component; it is possible to, for example, scroll
* a read-only table.
* </p>
*
* <p>
* In Vaadin 8 the read-only property is part of {@link HasValue} API.
* </p>
*
* @param readOnly
* a boolean value specifying whether the component is put
* read-only mode or not
*/
@Override
public void setReadOnly(boolean readOnly) {
getState().readOnly = readOnly;
} | 3.68 |
flink_SharedBufferAccessor_registerEvent | /**
* Adds another unique event to the shared buffer and assigns a unique id for it. It
* automatically creates a lock on this event, so it won't be removed during processing of that
* event. Therefore the lock should be removed after processing all {@link
* org.apache.flink.cep.nfa.ComputationState}s
*
* <p><b>NOTE:</b>Should be called only once for each unique event!
*
* @param value event to be registered
* @return unique id of that event that should be used when putting entries to the buffer.
* @throws Exception Thrown if the system cannot access the state.
*/
public EventId registerEvent(V value, long timestamp) throws Exception {
return sharedBuffer.registerEvent(value, timestamp);
} | 3.68 |
hbase_MobFileCache_getHitRatio | /**
* Gets the hit ratio to the mob file cache.
* @return The hit ratio to the mob file cache.
*/
public double getHitRatio() {
return count.get() == 0 ? 0 : ((float) (count.get() - miss.sum())) / (float) count.get();
} | 3.68 |
hadoop_DockerCommandExecutor_executeStatusCommand | /**
* Execute the docker inspect command to retrieve the docker container's
* status.
*
* @param containerId the id of the container.
* @param privilegedOperationExecutor the privileged operations executor.
* @return the current container status.
* @throws ContainerExecutionException if the docker operation fails to run.
*/
private static String executeStatusCommand(String containerId,
PrivilegedOperationExecutor privilegedOperationExecutor,
Context nmContext)
throws ContainerExecutionException {
DockerInspectCommand dockerInspectCommand =
new DockerInspectCommand(containerId).getContainerStatus();
try {
return DockerCommandExecutor.executeDockerCommand(dockerInspectCommand,
containerId, null, privilegedOperationExecutor, true, nmContext);
} catch (ContainerExecutionException e) {
throw new ContainerExecutionException(e);
}
} | 3.68 |
hadoop_OBSCommonUtils_getPassword | /**
* Get a password from a configuration, or, if a value is passed in, pick that
* up instead.
*
* @param conf configuration
* @param key key to look up
* @param val current value: if non empty this is used instead of querying
* the configuration.
* @return a password or "".
* @throws IOException on any problem
*/
private static String getPassword(final Configuration conf,
final String key, final String val) throws IOException {
return StringUtils.isEmpty(val) ? lookupPassword(conf, key) : val;
} | 3.68 |
hadoop_TimelineReaderWebServicesUtils_parseDataToRetrieve | /**
* Parses confstoretrieve and metricstoretrieve.
* @param str String representing confs/metrics to retrieve expression.
*
* @return a {@link TimelineFilterList} object.
* @throws TimelineParseException if any problem occurs during parsing.
*/
static TimelineFilterList parseDataToRetrieve(String expr)
throws TimelineParseException {
return parseFilters(new TimelineParserForDataToRetrieve(expr));
} | 3.68 |
hbase_RegionMover_unloadFromRack | /**
* Unload regions from given {@link #hostname} using ack/noAck mode and {@link #maxthreads}.In
* noAck mode we do not make sure that region is successfully online on the target region
* server,hence it is best effort.We do not unload regions to hostnames given in
* {@link #excludeFile}. If designatedFile is present with some contents, we will unload regions
* to hostnames provided in {@link #designatedFile}. While unloading regions, destination
* RegionServers are selected from different rack i.e regions should not move to any RegionServers
* that belong to same rack as source RegionServer.
* @return true if unloading succeeded, false otherwise
*/
public boolean unloadFromRack()
throws InterruptedException, ExecutionException, TimeoutException {
return unloadRegions(true);
} | 3.68 |
morf_H2Dialect_getSqlFrom | /**
* TODO
* The following is a workaround to a bug in H2 version 1.4.200 whereby the MERGE...USING statement does not release the source select statement
* Please remove this method once <a href="https://github.com/h2database/h2database/issues/2196">issue 2196</a> has been fixed and H2 upgraded to the fixed version
* This workaround uses the following alternative syntax, which fortunately does not lead to the same bug:
*
* <pre>
* WITH xmergesource AS (SELECT ...)
* MERGE INTO Table
* USING xmergesource
* ON (Table.id = xmergesource.id)
* WHEN MATCHED THEN UPDATE ...
* WHEN NOT MATCHED THEN INSERT ...
* </pre>
*
* @see SqlDialect#getSqlFrom(MergeStatement)
*/
@Override
protected String getSqlFrom(MergeStatement statement) {
// --- TODO
// call the original implementation which performs various consistency checks
super.getSqlFrom(statement);
// --- TODO
// but ignore whatever it produces, and create a slightly different variant
final StringBuilder sqlBuilder = new StringBuilder();
// WITH xmergesource AS (SELECT ...)
sqlBuilder.append("WITH ")
.append(MERGE_SOURCE_ALIAS)
.append(" AS (")
.append(getSqlFrom(statement.getSelectStatement()))
.append(") ");
// MERGE INTO Table USING xmergesource
sqlBuilder.append("MERGE INTO ")
.append(schemaNamePrefix())
.append(statement.getTable().getName())
.append(" USING ")
.append(MERGE_SOURCE_ALIAS);
// ON (Table.id = xmergesource.id)
sqlBuilder.append(" ON (")
.append(matchConditionSqlForMergeFields(statement, MERGE_SOURCE_ALIAS, statement.getTable().getName()))
.append(")");
// WHEN MATCHED THEN UPDATE ...
if (getNonKeyFieldsFromMergeStatement(statement).iterator().hasNext()) {
Iterable<AliasedField> updateExpressions = getMergeStatementUpdateExpressions(statement);
String updateExpressionsSql = getMergeStatementAssignmentsSql(updateExpressions);
sqlBuilder.append(" WHEN MATCHED THEN UPDATE SET ")
.append(updateExpressionsSql);
}
// WHEN NOT MATCHED THEN INSERT ...
Iterable<String> insertField = Iterables.transform(statement.getSelectStatement().getFields(), AliasedField::getImpliedName);
Iterable<String> valueFields = Iterables.transform(statement.getSelectStatement().getFields(), field -> MERGE_SOURCE_ALIAS + "." + field.getImpliedName());
sqlBuilder.append(" WHEN NOT MATCHED THEN INSERT (")
.append(Joiner.on(", ").join(insertField))
.append(") VALUES (")
.append(Joiner.on(", ").join(valueFields))
.append(")");
return sqlBuilder.toString();
} | 3.68 |
pulsar_MessageDeduplication_producerRemoved | /**
* Topic will call this method whenever a producer disconnects.
*/
public void producerRemoved(String producerName) {
// Producer is no-longer active
inactiveProducers.put(producerName, System.currentTimeMillis());
} | 3.68 |
framework_VaadinSession_isClosing | /**
* Returns whether this session is marked to be closed. Note that this
* method also returns true if the session is actually already closed.
*
* @see #close()
*
* @deprecated As of 7.2, use
* <code>{@link #getState() getState() != State.OPEN}</code>
* instead.
*
* @return true if this session is marked to be closed, false otherwise
*/
@Deprecated
public boolean isClosing() {
assert hasLock();
return state == State.CLOSING || state == State.CLOSED;
} | 3.68 |
morf_MySqlDialect_renameTableStatements | /**
* @see org.alfasoftware.morf.jdbc.SqlDialect#renameTableStatements(org.alfasoftware.morf.metadata.Table, org.alfasoftware.morf.metadata.Table)
*/
@Override
public Collection<String> renameTableStatements(Table from, Table to) {
return Collections.singletonList("RENAME TABLE " + from.getName() + " TO " + to.getName());
} | 3.68 |
hadoop_IOStatisticsContextIntegration_getCurrentIOStatisticsContext | /**
* Get the current thread's IOStatisticsContext instance. If no instance is
* present for this thread ID, create one using the factory.
* @return instance of IOStatisticsContext.
*/
public static IOStatisticsContext getCurrentIOStatisticsContext() {
return isThreadIOStatsEnabled
? ACTIVE_IOSTATS_CONTEXT.getForCurrentThread()
: EmptyIOStatisticsContextImpl.getInstance();
} | 3.68 |
morf_SqlScriptExecutor_executeAndCommit | /**
* Runs a batch of SQL statements.
*
* @param sqlScript SQL statements to run.
* @param connection Database against which to run SQL statements.
* @return The number of rows updated/affected by the statements in total.
*/
public int executeAndCommit(Iterable<String> sqlScript, Connection connection) {
int result = 0;
try {
visitor.executionStart();
for (String sql : sqlScript) {
result += executeInternal(sql, connection);
connection.commit();
}
visitor.executionEnd();
} catch (SQLException e) {
throw reclassifiedRuntimeException(e, "Error with statement");
}
return result;
} | 3.68 |
flink_MigrationUtils_skipSerializedStates | /**
* Skips bytes corresponding to serialized states. In flink 1.6+ the states are no longer kept
* in state.
*/
static void skipSerializedStates(DataInputView in) throws IOException {
TypeSerializer<String> nameSerializer = StringSerializer.INSTANCE;
TypeSerializer<State.StateType> stateTypeSerializer =
new EnumSerializer<>(State.StateType.class);
TypeSerializer<StateTransitionAction> actionSerializer =
new EnumSerializer<>(StateTransitionAction.class);
final int noOfStates = in.readInt();
for (int i = 0; i < noOfStates; i++) {
nameSerializer.deserialize(in);
stateTypeSerializer.deserialize(in);
}
for (int i = 0; i < noOfStates; i++) {
String srcName = nameSerializer.deserialize(in);
int noOfTransitions = in.readInt();
for (int j = 0; j < noOfTransitions; j++) {
String src = nameSerializer.deserialize(in);
Preconditions.checkState(
src.equals(srcName),
"Source Edge names do not match (" + srcName + " - " + src + ").");
nameSerializer.deserialize(in);
actionSerializer.deserialize(in);
try {
skipCondition(in);
} catch (ClassNotFoundException e) {
e.printStackTrace();
}
}
}
} | 3.68 |
hadoop_ServiceLauncher_exitWithMessage | /**
* Exit with a printed message.
* @param status status code
* @param message message message to print before exiting
* @throws ExitUtil.ExitException if exceptions are disabled
*/
protected static void exitWithMessage(int status, String message) {
ExitUtil.terminate(new ServiceLaunchException(status, message));
} | 3.68 |
flink_PythonGatewayServer_main | /**
* Main method to start a local GatewayServer on a ephemeral port. It tells python side via a
* file.
*
* <p>See: py4j.GatewayServer.main()
*/
public static void main(String[] args)
throws IOException, ExecutionException, InterruptedException {
GatewayServer gatewayServer = PythonEnvUtils.startGatewayServer();
PythonEnvUtils.setGatewayServer(gatewayServer);
int boundPort = gatewayServer.getListeningPort();
Py4JPythonClient callbackClient = gatewayServer.getCallbackClient();
int callbackPort = callbackClient.getPort();
if (boundPort == -1) {
System.out.println("GatewayServer failed to bind; exiting");
System.exit(1);
}
// Tells python side the port of our java rpc server
String handshakeFilePath = System.getenv("_PYFLINK_CONN_INFO_PATH");
File handshakeFile = new File(handshakeFilePath);
File tmpPath =
Files.createTempFile(handshakeFile.getParentFile().toPath(), "connection", ".info")
.toFile();
FileOutputStream fileOutputStream = new FileOutputStream(tmpPath);
DataOutputStream stream = new DataOutputStream(fileOutputStream);
stream.writeInt(boundPort);
stream.writeInt(callbackPort);
stream.close();
fileOutputStream.close();
if (!tmpPath.renameTo(handshakeFile)) {
System.out.println(
"Unable to write connection information to handshake file: "
+ handshakeFilePath
+ ", now exit...");
System.exit(1);
}
try {
// This ensures that the server dies if its parent program dies.
Map<String, Object> entryPoint =
(Map<String, Object>) gatewayServer.getGateway().getEntryPoint();
for (int i = 0; i < TIMEOUT_MILLIS / CHECK_INTERVAL; i++) {
if (entryPoint.containsKey("Watchdog")) {
break;
}
Thread.sleep(CHECK_INTERVAL);
}
if (!entryPoint.containsKey("Watchdog")) {
System.out.println("Unable to get the Python watchdog object, now exit.");
System.exit(1);
}
Watchdog watchdog = (Watchdog) entryPoint.get("Watchdog");
while (watchdog.ping()) {
Thread.sleep(CHECK_INTERVAL);
}
gatewayServer.shutdown();
System.exit(0);
} finally {
System.exit(1);
}
} | 3.68 |
hadoop_KMSExceptionsProvider_toResponse | /**
* Maps different exceptions thrown by KMS to HTTP status codes.
*/
@Override
public Response toResponse(Exception exception) {
Response.Status status;
boolean doAudit = true;
Throwable throwable = exception;
if (exception instanceof ContainerException) {
throwable = exception.getCause();
}
if (throwable instanceof SecurityException) {
status = Response.Status.FORBIDDEN;
} else if (throwable instanceof AuthenticationException) {
status = Response.Status.FORBIDDEN;
// we don't audit here because we did it already when checking access
doAudit = false;
} else if (throwable instanceof AuthorizationException) {
status = Response.Status.FORBIDDEN;
// we don't audit here because we did it already when checking access
doAudit = false;
} else if (throwable instanceof AccessControlException) {
status = Response.Status.FORBIDDEN;
} else if (exception instanceof IOException) {
status = Response.Status.INTERNAL_SERVER_ERROR;
log(status, throwable);
} else if (exception instanceof UnsupportedOperationException) {
status = Response.Status.BAD_REQUEST;
} else if (exception instanceof IllegalArgumentException) {
status = Response.Status.BAD_REQUEST;
} else {
status = Response.Status.INTERNAL_SERVER_ERROR;
log(status, throwable);
}
if (doAudit) {
KMSWebApp.getKMSAudit().error(KMSMDCFilter.getUgi(),
KMSMDCFilter.getMethod(),
KMSMDCFilter.getURL(), getOneLineMessage(exception));
}
EXCEPTION_LOG.warn("User {} request {} {} caused exception.",
KMSMDCFilter.getUgi(), KMSMDCFilter.getMethod(),
KMSMDCFilter.getURL(), exception);
return createResponse(status, throwable);
} | 3.68 |
flink_CompletedCheckpointStatsSummary_getStateSizeStats | /**
* Returns the summary stats for the state size of completed checkpoints.
*
* @return Summary stats for the state size.
*/
public StatsSummary getStateSizeStats() {
return stateSize;
} | 3.68 |
framework_VaadinSession_addRequestHandler | /**
* Adds a request handler to this session. Request handlers can be added to
* provide responses to requests that are not handled by the default
* functionality of the framework.
* <p>
* Handlers are called in reverse order of addition, so the most recently
* added handler will be called first.
* </p>
*
* @param handler
* the request handler to add
*
* @see #removeRequestHandler(RequestHandler)
*
* @since 7.0
*/
public void addRequestHandler(RequestHandler handler) {
assert hasLock();
requestHandlers.addFirst(handler);
} | 3.68 |
hadoop_PositionedReadable_minSeekForVectorReads | /**
* What is the smallest reasonable seek?
* @return the minimum number of bytes
*/
default int minSeekForVectorReads() {
return 4 * 1024;
} | 3.68 |
hadoop_HostSet_matchedBy | /**
* The function that checks whether there exists an entry foo in the set
* so that foo <= addr.
*/
boolean matchedBy(InetSocketAddress addr) {
Collection<Integer> ports = addrs.get(addr.getAddress());
return addr.getPort() == 0 ? !ports.isEmpty() : ports.contains(addr
.getPort());
} | 3.68 |
pulsar_AuthorizationService_revokeSubscriptionPermissionAsync | /**
* Revoke subscription admin-api access for a role.
*
* @param namespace
* @param subscriptionName
* @param role
* @return
*/
public CompletableFuture<Void> revokeSubscriptionPermissionAsync(NamespaceName namespace, String subscriptionName,
String role, String authDataJson) {
return provider.revokeSubscriptionPermissionAsync(namespace, subscriptionName, role, authDataJson);
} | 3.68 |
querydsl_Projections_array | /**
* Create a typed array projection for the given type and expressions
*
* @param <T> type of projection
* @param type type of the projection
* @param exprs arguments for the projection
* @return factory expression
*/
public static <T> ArrayConstructorExpression<T> array(Class<T[]> type, Expression<T>... exprs) {
return new ArrayConstructorExpression<T>(type, exprs);
} | 3.68 |
hadoop_QueueStateHelper_setQueueState | /**
* Sets the current state of the queue based on its previous state, its parent's state and its
* configured state.
* @param queue the queue whose state is set
*/
public static void setQueueState(AbstractCSQueue queue) {
QueueState previousState = queue.getState();
QueueState configuredState = queue.getQueueContext().getConfiguration().getConfiguredState(
queue.getQueuePath());
QueueState parentState = (queue.getParent() == null) ? null : queue.getParent().getState();
// verify that we can not any value for State other than RUNNING/STOPPED
if (configuredState != null && !VALID_STATE_CONFIGURATIONS.contains(configuredState)) {
throw new IllegalArgumentException("Invalid queue state configuration."
+ " We can only use RUNNING or STOPPED.");
}
if (previousState == null) {
initializeState(queue, configuredState, parentState);
} else {
reinitializeState(queue, previousState, configuredState);
}
} | 3.68 |
flink_PlannerContext_getSqlParserConfig | /**
* Returns the SQL parser config for this environment including a custom Calcite configuration.
*/
private SqlParser.Config getSqlParserConfig() {
return JavaScalaConversionUtil.<SqlParser.Config>toJava(
getCalciteConfig().getSqlParserConfig())
.orElseGet(
// we use Java lex because back ticks are easier than double quotes in
// programming and cases are preserved
() -> {
SqlConformance conformance = getSqlConformance();
return SqlParser.config()
.withParserFactory(FlinkSqlParserFactories.create(conformance))
.withConformance(conformance)
.withLex(Lex.JAVA)
.withIdentifierMaxLength(256);
});
} | 3.68 |
hudi_HoodieTableMetaClient_getArchivedTimeline | /**
* Returns the cached archived timeline if using in-memory cache or a fresh new archived
* timeline if not using cache, from startTs (inclusive).
* <p>
* Instantiating an archived timeline is costly operation if really early startTs is
* specified.
* <p>
* This method is not thread safe.
*
* @param startTs The start instant time (inclusive) of the archived timeline.
* @param useCache Whether to use in-memory cache.
* @return the archived timeline based on the arguments.
*/
public HoodieArchivedTimeline getArchivedTimeline(String startTs, boolean useCache) {
if (useCache) {
if (!archivedTimelineMap.containsKey(startTs)) {
// Only keep one entry in the map
archivedTimelineMap.clear();
archivedTimelineMap.put(startTs, instantiateArchivedTimeline(startTs));
}
return archivedTimelineMap.get(startTs);
}
return instantiateArchivedTimeline(startTs);
} | 3.68 |
hudi_JavaUpsertPartitioner_averageBytesPerRecord | /**
* Obtains the average record size based on records written during previous commits. Used for estimating how many
* records pack into one file.
*/
protected static long averageBytesPerRecord(HoodieTimeline commitTimeline, HoodieWriteConfig hoodieWriteConfig) {
long avgSize = hoodieWriteConfig.getCopyOnWriteRecordSizeEstimate();
long fileSizeThreshold = (long) (hoodieWriteConfig.getRecordSizeEstimationThreshold() * hoodieWriteConfig.getParquetSmallFileLimit());
try {
if (!commitTimeline.empty()) {
// Go over the reverse ordered commits to get a more recent estimate of average record size.
Iterator<HoodieInstant> instants = commitTimeline.getReverseOrderedInstants().iterator();
while (instants.hasNext()) {
HoodieInstant instant = instants.next();
HoodieCommitMetadata commitMetadata = HoodieCommitMetadata
.fromBytes(commitTimeline.getInstantDetails(instant).get(), HoodieCommitMetadata.class);
long totalBytesWritten = commitMetadata.fetchTotalBytesWritten();
long totalRecordsWritten = commitMetadata.fetchTotalRecordsWritten();
if (totalBytesWritten > fileSizeThreshold && totalRecordsWritten > 0) {
avgSize = (long) Math.ceil((1.0 * totalBytesWritten) / totalRecordsWritten);
break;
}
}
}
} catch (Throwable t) {
// make this fail safe.
LOG.error("Error trying to compute average bytes/record ", t);
}
return avgSize;
} | 3.68 |
hadoop_ServiceLauncher_getConfigurationsToCreate | /**
* Override point: Get a list of configuration classes to create.
* @return the array of configs to attempt to create. If any are off the
* classpath, that is logged
*/
@SuppressWarnings("ReturnOfCollectionOrArrayField")
protected List<String> getConfigurationsToCreate() {
return confClassnames;
} | 3.68 |
framework_ServerRpcQueue_flush | /**
* Triggers a send of server RPC and legacy variable changes to the server.
*/
public void flush() {
if (isFlushScheduled() || isEmpty()) {
return;
}
flushPending = true;
doFlushStrategy = this::doFlush;
Scheduler.get().scheduleFinally(() -> doFlushStrategy.run());
} | 3.68 |
rocketmq-connect_JsonConverter_toConnectData | /**
* Convert a native object to a Rocketmq Connect data object.
*
* @param topic the topic associated with the data
* @param value the value to convert
* @return an object containing the {@link Schema} and the converted value
*/
@Override
public SchemaAndValue toConnectData(String topic, byte[] value) {
// This handles a tombstone message
if (value == null) {
return SchemaAndValue.NULL;
}
Object jsonValue;
try {
jsonValue = deserializer.deserialize(topic, value);
} catch (Exception e) {
throw new ConnectException("Converting byte[] to Kafka Connect data failed due to serialization error: ", e);
}
JSONObject newJsonValue;
if (!converterConfig.schemasEnabled()) {
// schema disabled
JSONObject envelope = new JSONObject();
envelope.put(JsonSchema.ENVELOPE_SCHEMA_FIELD_NAME, null);
envelope.put(JsonSchema.ENVELOPE_PAYLOAD_FIELD_NAME, jsonValue);
newJsonValue = envelope;
} else {
// schema enabled
newJsonValue = (JSONObject) jsonValue;
}
Object jsonSchema = newJsonValue.get(JsonSchema.ENVELOPE_SCHEMA_FIELD_NAME);
Schema schema = asConnectSchema(jsonSchema == null ? null : (JSONObject) jsonSchema);
return new SchemaAndValue(
schema,
convertToConnect(schema, newJsonValue.get(JsonSchema.ENVELOPE_PAYLOAD_FIELD_NAME))
);
} | 3.68 |
morf_SchemaBean_isEmptyDatabase | /**
* {@inheritDoc}
*
* @see org.alfasoftware.morf.metadata.Schema#isEmptyDatabase()
*/
@Override
public boolean isEmptyDatabase() {
return tables.isEmpty();
} | 3.68 |
hbase_FilterWrapper_toByteArray | /** Returns The filter serialized using pb */
@Override
public byte[] toByteArray() throws IOException {
FilterProtos.FilterWrapper.Builder builder = FilterProtos.FilterWrapper.newBuilder();
builder.setFilter(ProtobufUtil.toFilter(this.filter));
return builder.build().toByteArray();
} | 3.68 |
flink_FlinkMatchers_willNotComplete | /** Checks that a {@link CompletableFuture} won't complete within the given timeout. */
public static Matcher<CompletableFuture<?>> willNotComplete(Duration timeout) {
return new WillNotCompleteMatcher(timeout);
} | 3.68 |
dubbo_MetricsSupport_incrAndAddRt | /**
* Incr method num&&rt
*/
public static void incrAndAddRt(
MetricsKey metricsKey,
MetricsPlaceValue placeType,
MethodMetricsCollector<TimeCounterEvent> collector,
TimeCounterEvent event) {
collector.increment(
event.getAttachmentValue(METHOD_METRICS),
new MetricsKeyWrapper(metricsKey, placeType),
SELF_INCREMENT_SIZE);
collector.addMethodRt(
event.getAttachmentValue(INVOCATION),
placeType.getType(),
event.getTimePair().calc());
} | 3.68 |
streampipes_StreamPipesClient_pipelines | /**
* Get API to work with pipelines
*
* @return {@link org.apache.streampipes.client.api.PipelineApi}
*/
@Override
public PipelineApi pipelines() {
return new PipelineApi(config);
} | 3.68 |
hadoop_StoreContext_createTempFile | /**
* Create a temporary file somewhere.
* @param prefix prefix for the temporary file
* @param size expected size.
* @return a file reference.
* @throws IOException failure.
*/
public File createTempFile(String prefix, long size) throws IOException {
return contextAccessors.createTempFile(prefix, size);
} | 3.68 |
flink_LocalBufferPool_mayNotifyAvailable | /**
* Notifies the potential segment consumer of the new available segments by completing the
* previous uncompleted future.
*/
private void mayNotifyAvailable(@Nullable CompletableFuture<?> toNotify) {
if (toNotify != null) {
toNotify.complete(null);
}
} | 3.68 |
flink_FlinkRelMetadataQuery_getUpsertKeysInKeyGroupRange | /**
* Determines the set of upsert minimal keys in a single key group range, which means can ignore
* exchange by partition keys.
*
* <p>Some optimizations can rely on this ability to do upsert in a single key group range.
*/
public Set<ImmutableBitSet> getUpsertKeysInKeyGroupRange(RelNode rel, int[] partitionKeys) {
if (rel instanceof Exchange) {
Exchange exchange = (Exchange) rel;
if (Arrays.equals(
exchange.getDistribution().getKeys().stream()
.mapToInt(Integer::intValue)
.toArray(),
partitionKeys)) {
rel = exchange.getInput();
}
}
return getUpsertKeys(rel);
} | 3.68 |
flink_ExceptionUtils_rethrowIfFatalError | /**
* Rethrows the given {@code Throwable}, if it represents an error that is fatal to the JVM. See
* {@link ExceptionUtils#isJvmFatalError(Throwable)} for a definition of fatal errors.
*
* @param t The Throwable to check and rethrow.
*/
public static void rethrowIfFatalError(Throwable t) {
if (isJvmFatalError(t)) {
throw (Error) t;
}
} | 3.68 |
hudi_TimestampBasedAvroKeyGenerator_getDefaultPartitionVal | /**
* Set default value to partitionVal if the input value of partitionPathField is null.
*/
public Object getDefaultPartitionVal() {
Object result = 1L;
if (timestampType == TimestampType.DATE_STRING || timestampType == TimestampType.MIXED) {
// since partitionVal is null, we can set a default value of any format as TIMESTAMP_INPUT_DATE_FORMAT_PROP
// configured, here we take the first.
// {Config.TIMESTAMP_INPUT_DATE_FORMAT_PROP} won't be null, it has been checked in the initialization process of
// inputFormatter
String delimiter = parser.getConfigInputDateFormatDelimiter();
String format = getStringWithAltKeys(config, TIMESTAMP_INPUT_DATE_FORMAT, true).split(delimiter)[0];
// if both input and output timeZone are not configured, use GMT.
if (null != inputDateTimeZone) {
return new DateTime(result, inputDateTimeZone).toString(format);
} else if (null != outputDateTimeZone) {
return new DateTime(result, outputDateTimeZone).toString(format);
} else {
return new DateTime(result, DateTimeZone.forTimeZone(TimeZone.getTimeZone("GMT"))).toString(format);
}
}
return result;
} | 3.68 |
hadoop_ServerWebApp_getHomeDir | /**
* Returns the server home directory.
* <p>
* It is looked up in the Java System property
* <code>#SERVER_NAME#.home.dir</code>.
*
* @param name the server home directory.
*
* @return the server home directory.
*/
static String getHomeDir(String name) {
String homeDir = HOME_DIR_TL.get();
if (homeDir == null) {
String sysProp = name + HOME_DIR;
homeDir = System.getProperty(sysProp);
if (homeDir == null) {
throw new IllegalArgumentException(MessageFormat.format(
"System property [{0}] not defined", sysProp));
}
}
return homeDir;
} | 3.68 |
hadoop_OBSFileSystem_delete | /**
* Delete a Path. This operation is at least {@code O(files)}, with added
* overheads to enumerate the path. It is also not atomic.
*
* @param f the path to delete
* @param recursive if path is a directory and set to true, the directory is
* deleted else throws an exception. In case of a file the
* recursive can be set to either true or false
* @return true if delete is successful else false
* @throws IOException due to inability to delete a directory or file
*/
@Override
public boolean delete(final Path f, final boolean recursive)
throws IOException {
try {
FileStatus status = getFileStatus(f);
LOG.debug("delete: path {} - recursive {}", status.getPath(),
recursive);
if (enablePosix) {
return OBSPosixBucketUtils.fsDelete(this, status, recursive);
}
return OBSObjectBucketUtils.objectDelete(this, status, recursive);
} catch (FileNotFoundException e) {
LOG.warn("Couldn't delete {} - does not exist", f);
return false;
} catch (ObsException e) {
throw OBSCommonUtils.translateException("delete", f, e);
}
} | 3.68 |
flink_FutureUtils_completeFromCallable | /**
* Fakes asynchronous execution by immediately executing the operation and completing the
* supplied future either normally or exceptionally.
*
* @param operation to executed
* @param <T> type of the result
*/
public static <T> void completeFromCallable(
CompletableFuture<T> future, Callable<T> operation) {
try {
future.complete(operation.call());
} catch (Exception e) {
future.completeExceptionally(e);
}
} | 3.68 |
hbase_HFileOutputFormat2_createFamilyBloomTypeMap | /**
* Runs inside the task to deserialize column family to bloom filter type map from the
* configuration.
* @param conf to read the serialized values from
* @return a map from column family to the the configured bloom filter type
*/
@InterfaceAudience.Private
static Map<byte[], BloomType> createFamilyBloomTypeMap(Configuration conf) {
Map<byte[], String> stringMap = createFamilyConfValueMap(conf, BLOOM_TYPE_FAMILIES_CONF_KEY);
Map<byte[], BloomType> bloomTypeMap = new TreeMap<>(Bytes.BYTES_COMPARATOR);
for (Map.Entry<byte[], String> e : stringMap.entrySet()) {
BloomType bloomType = BloomType.valueOf(e.getValue());
bloomTypeMap.put(e.getKey(), bloomType);
}
return bloomTypeMap;
} | 3.68 |
hbase_HFileBlock_release | /**
* Releases resources used by this writer.
*/
void release() {
if (dataBlockEncodingCtx != null) {
dataBlockEncodingCtx.close();
dataBlockEncodingCtx = null;
}
if (defaultBlockEncodingCtx != null) {
defaultBlockEncodingCtx.close();
defaultBlockEncodingCtx = null;
}
} | 3.68 |
hbase_AsyncTableRegionLocator_getStartKeys | /**
* Gets the starting row key for every region in the currently open table.
* <p>
* This is mainly useful for the MapReduce integration.
* @return Array of region starting row keys
*/
default CompletableFuture<List<byte[]>> getStartKeys() {
return getStartEndKeys().thenApply(
startEndKeys -> startEndKeys.stream().map(Pair::getFirst).collect(Collectors.toList()));
} | 3.68 |
framework_TabsheetNotEnoughHorizontalSpace_setup | /*
* (non-Javadoc)
*
* @see com.vaadin.tests.components.AbstractTestUI#setup(com.vaadin.server.
* VaadinRequest)
*/
@Override
protected void setup(VaadinRequest request) {
generateTabs();
tabsheet.setSizeFull();
addComponent(tabsheet);
} | 3.68 |
framework_VPopupView_onDetach | /**
* Make sure that we remove the popup when the main widget is removed.
*
* @see com.google.gwt.user.client.ui.Widget#onUnload()
*/
@Override
protected void onDetach() {
popup.hide();
super.onDetach();
} | 3.68 |
hudi_HoodieTableConfig_getTableType | /**
* Read the table type from the table properties and if not found, return the default.
*/
public HoodieTableType getTableType() {
return HoodieTableType.valueOf(getStringOrDefault(TYPE));
} | 3.68 |
graphhopper_PbfFieldDecoder_decodeLatitude | /**
* Decodes a raw latitude value into degrees.
* <p>
*
* @param rawLatitude The PBF encoded value.
* @return The latitude in degrees.
*/
public double decodeLatitude(long rawLatitude) {
return COORDINATE_SCALING_FACTOR * (coordLatitudeOffset + (coordGranularity * rawLatitude));
} | 3.68 |
flink_TableChange_getNewComment | /** Get the new comment for the column. */
public String getNewComment() {
return newComment;
} | 3.68 |
dubbo_StringUtils_isAnyEmpty | /**
* <p>Checks if the strings contain at least on empty or null element. <p/>
*
* <pre>
* StringUtils.isAnyEmpty(null) = true
* StringUtils.isAnyEmpty("") = true
* StringUtils.isAnyEmpty(" ") = false
* StringUtils.isAnyEmpty("abc") = false
* StringUtils.isAnyEmpty("abc", "def") = false
* StringUtils.isAnyEmpty("abc", null) = true
* StringUtils.isAnyEmpty("abc", "") = true
* StringUtils.isAnyEmpty("abc", " ") = false
* </pre>
*
* @param ss the strings to check
* @return {@code true} if at least one in the strings is empty or null
*/
public static boolean isAnyEmpty(final String... ss) {
return !isNoneEmpty(ss);
} | 3.68 |
hibernate-validator_TraversableResolvers_run | /**
* Runs the given privileged action, using a privileged block if required.
* <p>
* <b>NOTE:</b> This must never be changed into a publicly available method to avoid execution of arbitrary
* privileged actions within HV's protection domain.
*/
@IgnoreForbiddenApisErrors(reason = "SecurityManager is deprecated in JDK17")
private static <T> T run(PrivilegedAction<T> action) {
return System.getSecurityManager() != null ? AccessController.doPrivileged( action ) : action.run();
} | 3.68 |
hbase_Bytes_getBestConverter | /**
* Returns the Unsafe-using Converter, or falls back to the pure-Java implementation if unable
* to do so.
*/
static Converter getBestConverter() {
try {
Class<?> theClass = Class.forName(UNSAFE_CONVERTER_NAME);
// yes, UnsafeComparer does implement Comparer<byte[]>
@SuppressWarnings("unchecked")
Converter converter = (Converter) theClass.getConstructor().newInstance();
return converter;
} catch (Throwable t) { // ensure we really catch *everything*
return PureJavaConverter.INSTANCE;
}
} | 3.68 |
hadoop_OBSFileSystem_exists | /**
* Check if a path exists.
*
* @param f source path
* @return true if the path exists
* @throws IOException IO failure
*/
@Override
public boolean exists(final Path f) throws IOException {
try {
return getFileStatus(f) != null;
} catch (FileNotFoundException | FileConflictException e) {
return false;
}
} | 3.68 |
flink_PartitionLoader_loadPartition | /**
* Load a single partition.
*
* @param partSpec the specification for the single partition
* @param srcPaths the paths for the files used to load to the single partition
* @param srcPathIsDir whether the every path in {@param srcPaths} is directory or not. If true,
* it will load the files under the directory of the every path. If false, every path in
* {@param srcPaths} is considered as single file, and it will load the single file for
* every path.
*/
public void loadPartition(
LinkedHashMap<String, String> partSpec, List<Path> srcPaths, boolean srcPathIsDir)
throws Exception {
Optional<Path> pathFromMeta = metaStore.getPartition(partSpec);
Path path =
pathFromMeta.orElseGet(
() ->
new Path(
metaStore.getLocationPath(),
generatePartitionPath(partSpec)));
overwriteAndMoveFiles(srcPaths, path, srcPathIsDir);
commitPartition(partSpec, path);
} | 3.68 |
zxing_MathUtils_round | /**
* Ends up being a bit faster than {@link Math#round(float)}. This merely rounds its
* argument to the nearest int, where x.5 rounds up to x+1. Semantics of this shortcut
* differ slightly from {@link Math#round(float)} in that half rounds down for negative
* values. -2.5 rounds to -3, not -2. For purposes here it makes no difference.
*
* @param d real value to round
* @return nearest {@code int}
*/
public static int round(float d) {
return (int) (d + (d < 0.0f ? -0.5f : 0.5f));
} | 3.68 |
hadoop_ManifestStoreOperations_storePreservesEtagsThroughRenames | /**
* Does the store preserve etags through renames.
* If true, and if the source listing entry has an etag,
* it will be used to attempt to validate a failed rename.
* @param path path to probe.
* @return true if etag comparison is a valid strategy.
*/
public boolean storePreservesEtagsThroughRenames(Path path) {
return false;
} | 3.68 |
morf_RenameTable_isApplied | /**
* @see org.alfasoftware.morf.upgrade.SchemaChange#isApplied(org.alfasoftware.morf.metadata.Schema, org.alfasoftware.morf.jdbc.ConnectionResources)
*/
@Override
public boolean isApplied(Schema schema, ConnectionResources database) {
return schema.tableExists(newTableName);
} | 3.68 |
hadoop_SharedKeyCredentials_getGMTTime | /**
* Returns the GTM date/time String for the specified value using the RFC1123 pattern.
*
* @param date
* A <code>Date</code> object that represents the date to convert to GMT date/time in the RFC1123
* pattern.
*
* @return A <code>String</code> that represents the GMT date/time for the specified value using the RFC1123
* pattern.
*/
static String getGMTTime(final Date date) {
return rfc1123GmtDateTimeFormatter.get().format(date);
} | 3.68 |
flink_StreamExecutionEnvironment_clean | /**
* Returns a "closure-cleaned" version of the given function. Cleans only if closure cleaning is
* not disabled in the {@link org.apache.flink.api.common.ExecutionConfig}
*/
@Internal
public <F> F clean(F f) {
if (getConfig().isClosureCleanerEnabled()) {
ClosureCleaner.clean(f, getConfig().getClosureCleanerLevel(), true);
}
ClosureCleaner.ensureSerializable(f);
return f;
} | 3.68 |
morf_ViewURLAsFile_close | /**
* Closes all the temporary files opened.
*/
@Override
public void close() {
for (File file : tempFiles) {
try {
java.nio.file.Files.delete(file.toPath());
} catch (Exception e) {
throw new RuntimeException("Could not delete file [" + file.getPath() + "]", e);
}
}
} | 3.68 |
hbase_CheckAndMutateResult_getResult | /** Returns It is used only for CheckAndMutate operations with Increment/Append. Otherwise null */
public Result getResult() {
return result;
} | 3.68 |
framework_TreeGridElement_hasExpandToggle | /**
* Check whether the given indices correspond to a cell that contains a
* visible hierarchy toggle element.
*
* @param rowIndex
* 0-based row index
* @param hierarchyColumnIndex
* 0-based index of the hierarchy column
* @return {@code true} if this cell has the expand toggle visible
*/
public boolean hasExpandToggle(int rowIndex, int hierarchyColumnIndex) {
WebElement expandElement = getExpandElement(rowIndex,
hierarchyColumnIndex);
List<String> classes = Arrays
.asList(expandElement.getAttribute("class").split(" "));
return classes.contains("expanded") || classes.contains("collapsed");
} | 3.68 |
hadoop_AbfsInputStreamStatisticsImpl_bytesReadFromBuffer | /**
* {@inheritDoc}
*
* Total bytes read from the buffer.
*
* @param bytes number of bytes that are read from buffer.
*/
@Override
public void bytesReadFromBuffer(long bytes) {
ioStatisticsStore.incrementCounter(StreamStatisticNames.BYTES_READ_BUFFER, bytes);
} | 3.68 |
flink_StringValueUtils_next | /**
* Gets the next token from the string. If another token is available, the token is stored
* in the given target StringValue object.
*
* @param target The StringValue object to store the next token in.
* @return True, if there was another token, false if not.
*/
public boolean next(StringValue target) {
final char[] data = this.toTokenize.getCharArray();
final int limit = this.limit;
int pos = this.pos;
// skip the delimiter
for (; pos < limit && Character.isWhitespace(data[pos]); pos++) {}
if (pos >= limit) {
this.pos = pos;
return false;
}
final int start = pos;
for (; pos < limit && !Character.isWhitespace(data[pos]); pos++) {}
this.pos = pos;
target.setValue(this.toTokenize, start, pos - start);
return true;
} | 3.68 |
framework_DDEventHandleStrategy_handleMouseOver | /**
* Called to handle {@link Event#ONMOUSEOVER} event.
*
* @param target
* target element over which DnD event has happened
* @param event
* ONMOUSEOVER GWT event for active DnD operation
* @param mediator
* VDragAndDropManager data accessor
*/
protected void handleMouseOver(Element target, NativePreviewEvent event,
DDManagerMediator mediator) {
VDragAndDropManager manager = mediator.getManager();
VDropHandler dragHandler = findDragTarget(target, mediator);
if (dragHandler != null
&& dragHandler != manager.getCurrentDropHandler()) {
handleDragLeave(mediator, false);
manager.setCurrentDropHandler(dragHandler);
// ApplicationConnection.getConsole().log(
// "DropHandler now"
// + currentDropHandler.getPaintable());
handleDragEnter(target, mediator);
} else if (dragHandler == null
&& manager.getCurrentDropHandler() != null) {
// ApplicationConnection.getConsole().log("Invalid state!?");
handleDragLeave(mediator, false);
manager.setCurrentDropHandler(null);
}
} | 3.68 |
framework_ColorPickerPopup_createHSVTab | /**
* Creates the hsv tab.
*
* @return the component
*/
private Component createHSVTab(Color color) {
VerticalLayout hsvLayout = new VerticalLayout();
hsvLayout.setMargin(new MarginInfo(false, false, true, false));
hsvLayout.addComponent(hsvPreview);
hsvLayout.setStyleName("hsvtab");
// Add the hsv gradient
hsvGradient = new ColorPickerGradient("hsv-gradient", hsvConverter);
hsvGradient.setValue(color);
hsvGradient.addValueChangeListener(this::colorChanged);
hsvLayout.addComponent(hsvGradient);
selectors.add(hsvGradient);
VerticalLayout sliders = new VerticalLayout();
sliders.setStyleName("hsv-sliders");
hueSlider = new Slider("Hue", 0, 360);
saturationSlider = new Slider("Saturation", 0, 100);
valueSlider = new Slider("Value", 0, 100);
float[] hsv = color.getHSV();
setHsvSliderValues(hsv);
hueSlider.setStyleName("hsv-slider");
hueSlider.addStyleName("hue-slider");
hueSlider.setWidth("220px");
hueSlider.addValueChangeListener(event -> {
if (!updatingColors) {
float hue = Float.parseFloat(event.getValue().toString())
/ 360f;
float saturation = Float.parseFloat(
saturationSlider.getValue().toString()) / 100f;
float value = Float
.parseFloat(valueSlider.getValue().toString()) / 100f;
// Set the color
Color newColor = new Color(
Color.HSVtoRGB(hue, saturation, value));
setValue(newColor);
/*
* Set the background color of the hue gradient. This has to be
* done here since in the conversion the base color information
* is lost when color is black/white
*/
Color bgColor = new Color(Color.HSVtoRGB(hue, 1f, 1f));
hsvGradient.setBackgroundColor(bgColor);
}
});
sliders.addComponent(hueSlider);
saturationSlider.setStyleName("hsv-slider");
saturationSlider.setWidth("220px");
saturationSlider.addValueChangeListener(event -> {
if (!updatingColors) {
float hue = Float.parseFloat(hueSlider.getValue().toString())
/ 360f;
float saturation = Float.parseFloat(event.getValue().toString())
/ 100f;
float value = Float
.parseFloat(valueSlider.getValue().toString()) / 100f;
Color newColor = new Color(
Color.HSVtoRGB(hue, saturation, value));
setValue(newColor);
}
});
sliders.addComponent(saturationSlider);
valueSlider.setStyleName("hsv-slider");
valueSlider.setWidth("220px");
valueSlider.addValueChangeListener(event -> {
if (!updatingColors) {
float hue = Float.parseFloat(hueSlider.getValue().toString())
/ 360f;
float saturation = Float.parseFloat(
saturationSlider.getValue().toString()) / 100f;
float value = Float.parseFloat(event.getValue().toString())
/ 100f;
Color newColor = new Color(
Color.HSVtoRGB(hue, saturation, value));
setValue(newColor);
}
});
sliders.addComponent(valueSlider);
hsvLayout.addComponent(sliders);
return hsvLayout;
} | 3.68 |
framework_VScrollTable_willHaveScrollbars | /**
* Note: this method is not part of official API although declared as
* protected. Extend at your own risk.
*
* @return true if content area will have scrollbars visible.
*/
protected boolean willHaveScrollbars() {
if (isDynamicHeight()) {
if (pageLength < totalRows) {
return true;
}
} else {
int fakeheight = (int) Math
.round(scrollBody.getRowHeight() * totalRows);
int availableHeight = scrollBodyPanel.getElement()
.getPropertyInt("clientHeight");
if (fakeheight > availableHeight) {
return true;
}
}
return false;
} | 3.68 |
framework_DataCommunicator_getBackEndSorting | /**
* Returns the {@link QuerySortOrder} to use with backend sorting.
*
* @return an unmodifiable list of sort order information to pass to a query
* @since 8.0.6
*/
public List<QuerySortOrder> getBackEndSorting() {
return Collections.unmodifiableList(backEndSorting);
} | 3.68 |
zxing_BinaryBitmap_getHeight | /**
* @return The height of the bitmap.
*/
public int getHeight() {
return binarizer.getHeight();
} | 3.68 |
flink_RestfulGateway_getTriggeredSavepointStatus | /**
* Get the status of a savepoint triggered under the specified operation key.
*
* @param operationKey key of the operation
* @return Future which completes immediately with the status, or fails if no operation is
* registered for the key
*/
default CompletableFuture<OperationResult<String>> getTriggeredSavepointStatus(
AsynchronousJobOperationKey operationKey) {
throw new UnsupportedOperationException();
} | 3.68 |
flink_RawFormatDeserializationSchema_createDataLengthValidator | /** Creates a validator for the received data. */
private static DataLengthValidator createDataLengthValidator(LogicalType type) {
// please keep the order the same with createNotNullConverter()
switch (type.getTypeRoot()) {
case CHAR:
case VARCHAR:
case VARBINARY:
case BINARY:
case RAW:
return data -> {};
case BOOLEAN:
return createDataLengthValidator(1, "BOOLEAN");
case TINYINT:
return createDataLengthValidator(1, "TINYINT");
case SMALLINT:
return createDataLengthValidator(2, "SMALLINT");
case INTEGER:
return createDataLengthValidator(4, "INT");
case BIGINT:
return createDataLengthValidator(8, "BIGINT");
case FLOAT:
return createDataLengthValidator(4, "FLOAT");
case DOUBLE:
return createDataLengthValidator(8, "DOUBLE");
default:
throw new UnsupportedOperationException(
"'raw' format currently doesn't support type: " + type);
}
} | 3.68 |
framework_Navigator_getDisplay | /**
* Return the {@link ViewDisplay} used by the navigator.
*
* @return the ViewDisplay used for displaying views
*/
public ViewDisplay getDisplay() {
return display;
} | 3.68 |
hudi_EmbeddedTimelineService_addBasePath | /**
* Adds a new base path to the set that are managed by this instance.
* @param basePath the new base path to add
*/
private void addBasePath(String basePath) {
basePaths.add(basePath);
} | 3.68 |
hbase_RegionCoprocessorHost_preCompactSelection | /**
* Called prior to selecting the {@link HStoreFile}s for compaction from the list of currently
* available candidates.
* <p>
* Supports Coprocessor 'bypass' -- 'bypass' is how this method indicates that it changed the
* passed in <code>candidates</code>.
* @param store The store where compaction is being requested
* @param candidates The currently available store files
* @param tracker used to track the life cycle of a compaction
* @param user the user
*/
public boolean preCompactSelection(final HStore store, final List<HStoreFile> candidates,
final CompactionLifeCycleTracker tracker, final User user) throws IOException {
if (coprocEnvironments.isEmpty()) {
return false;
}
boolean bypassable = true;
return execOperation(new RegionObserverOperationWithoutResult(user, bypassable) {
@Override
public void call(RegionObserver observer) throws IOException {
observer.preCompactSelection(this, store, candidates, tracker);
}
});
} | 3.68 |
hadoop_SlowPeerTracker_filterNodeReports | /**
* Filter the given reports to return just the valid ones.
*
* @param reports Current set of reports.
* @param now Current time.
* @return Set of valid reports that were created within last reportValidityMs millis.
*/
private SortedSet<SlowPeerLatencyWithReportingNode> filterNodeReports(
ConcurrentMap<String, LatencyWithLastReportTime> reports, long now) {
final SortedSet<SlowPeerLatencyWithReportingNode> validReports = new TreeSet<>();
for (Map.Entry<String, LatencyWithLastReportTime> entry : reports.entrySet()) {
if (now - entry.getValue().getTime() < reportValidityMs) {
OutlierMetrics outlierMetrics = entry.getValue().getLatency();
validReports.add(
new SlowPeerLatencyWithReportingNode(entry.getKey(), outlierMetrics.getActualLatency(),
outlierMetrics.getMedian(), outlierMetrics.getMad(),
outlierMetrics.getUpperLimitLatency()));
}
}
return validReports;
} | 3.68 |
hbase_RegionServerFlushTableProcedureManager_cancelTasks | /**
* This attempts to cancel out all pending and in progress tasks. Does not interrupt the running
* tasks itself. An ongoing HRegion.flush() should not be interrupted (see HBASE-13877).
*/
void cancelTasks() throws InterruptedException {
Collection<Future<Void>> tasks = futures;
LOG.debug("cancelling " + tasks.size() + " flush region tasks " + name);
for (Future<Void> f : tasks) {
f.cancel(false);
}
// evict remaining tasks and futures from taskPool.
futures.clear();
while (taskPool.poll() != null) {
}
stop();
} | 3.68 |
flink_AsyncDynamicTableSinkBuilder_setMaxBufferedRequests | /**
* @param maxBufferedRequests the maximum buffer length. Callbacks to add elements to the buffer
* and calls to write will block if this length has been reached and will only unblock if
* elements from the buffer have been removed for flushing.
* @return {@link ConcreteBuilderT} itself
*/
public ConcreteBuilderT setMaxBufferedRequests(int maxBufferedRequests) {
this.maxBufferedRequests = maxBufferedRequests;
return (ConcreteBuilderT) this;
} | 3.68 |
querydsl_NumberExpression_avg | /**
* Create a {@code avg(this)} expression
*
* <p>Get the average value of this expression (aggregation)</p>
*
* @return avg(this)
*/
public NumberExpression<Double> avg() {
if (avg == null) {
avg = Expressions.numberOperation(Double.class, Ops.AggOps.AVG_AGG, mixin);
}
return avg;
} | 3.68 |
flink_MathUtils_jenkinsHash | /**
* This function hashes an integer value. It is adapted from Bob Jenkins' website <a
* href="http://www.burtleburtle.net/bob/hash/integer.html">http://www.burtleburtle.net/bob/hash/integer.html</a>.
* The hash function has the <i>full avalanche</i> property, meaning that every bit of the value
* to be hashed affects every bit of the hash value.
*
* <p>It is crucial to use different hash functions to partition data across machines and the
* internal partitioning of data structures. This hash function is intended for partitioning
* internally in data structures.
*
* @param code The integer to be hashed.
* @return The non-negative hash code for the integer.
*/
public static int jenkinsHash(int code) {
code = (code + 0x7ed55d16) + (code << 12);
code = (code ^ 0xc761c23c) ^ (code >>> 19);
code = (code + 0x165667b1) + (code << 5);
code = (code + 0xd3a2646c) ^ (code << 9);
code = (code + 0xfd7046c5) + (code << 3);
code = (code ^ 0xb55a4f09) ^ (code >>> 16);
return code >= 0 ? code : -(code + 1);
} | 3.68 |
hadoop_JobMonitor_abort | /**
* Drain all submitted jobs to a queue and stop the monitoring thread.
* Upstream submitter is assumed dead.
*/
public void abort() {
synchronized (mJobs) {
graceful = false;
shutdown = true;
}
executor.shutdown();
} | 3.68 |
flink_SourceCoordinatorContext_schedulePeriodTask | /** To avoid period task lost, we should handle the potential exception throw by task. */
ScheduledFuture<?> schedulePeriodTask(
Runnable command, long initDelay, long period, TimeUnit unit) {
return coordinatorExecutor.scheduleAtFixedRate(
() -> {
try {
command.run();
} catch (Throwable t) {
handleUncaughtExceptionFromAsyncCall(t);
}
},
initDelay,
period,
unit);
} | 3.68 |
hadoop_ClientRegistryBinder_resolveExternalRestAPI | /**
* Resolve a service record then return an external REST API exported it.
*
* @param api API to resolve
* @param path path of the service record
* @return null if the record exists but the API is absent or it has no
* REST endpoints.
* @throws IOException resolution problems, as covered in
* {@link RegistryOperations#resolve(String)}
*/
protected String resolveExternalRestAPI(String api, String path) throws
IOException {
ServiceRecord record = operations.resolve(path);
return lookupRestAPI(record, api, true);
} | 3.68 |
framework_VRadioButtonGroup_setReadonly | /**
* Sets the read-only status of this radio button group.
*
* @param readonly
* {@code true} if this widget should be read-only, {@code false}
* otherwise
*/
public void setReadonly(boolean readonly) {
if (this.readonly != readonly) {
this.readonly = readonly;
updateEnabledState();
}
} | 3.68 |
flink_MiniCluster_create | /**
* Create a new {@link TerminatingFatalErrorHandler} for the {@link TaskExecutor} with the
* given index.
*
* @param index into the {@link #taskManagers} collection to identify the correct {@link
* TaskExecutor}.
* @return {@link TerminatingFatalErrorHandler} for the given index
*/
@GuardedBy("lock")
private TerminatingFatalErrorHandler create(int index) {
return new TerminatingFatalErrorHandler(index);
} | 3.68 |
morf_HumanReadableStatementHelper_generateInsertStatementString | /**
* Generates a human-readable description of a data insert operation.
*
* @param statement the data upgrade statement to describe.
* @return a string containing the human-readable description of the operation.
*/
private static String generateInsertStatementString(final InsertStatement statement) {
final StringBuilder sb = new StringBuilder();
final SelectStatement source = statement.getSelectStatement();
if (source == null) {
// No select statement; single record insert
sb.append(String.format("Add record into %s:", statement.getTable().getName()));
for (AliasedField field : statement.getValues()) {
sb.append(generateAliasedFieldAssignmentString(field));
}
} else {
// Multiple record insert
sb.append(String.format("Add records into %s: ", statement.getTable().getName()));
sb.append(generateFieldSymbolStrings(source.getFields()));
sb.append(generateFromAndWhereClause(source, true));
}
return sb.toString();
} | 3.68 |
flink_Preconditions_checkState | /**
* Checks the given boolean condition, and throws an {@code IllegalStateException} if the
* condition is not met (evaluates to {@code false}).
*
* @param condition The condition to check
* @param errorMessageTemplate The message template for the {@code IllegalStateException} that
* is thrown if the check fails. The template substitutes its {@code %s} placeholders with
* the error message arguments.
* @param errorMessageArgs The arguments for the error message, to be inserted into the message
* template for the {@code %s} placeholders.
* @throws IllegalStateException Thrown, if the condition is violated.
*/
public static void checkState(
boolean condition,
@Nullable String errorMessageTemplate,
@Nullable Object... errorMessageArgs) {
if (!condition) {
throw new IllegalStateException(format(errorMessageTemplate, errorMessageArgs));
}
} | 3.68 |
flink_CsvReader_types | /**
* Specifies the types for the CSV fields. This method parses the CSV data to a 25-tuple which
* has fields of the specified types. This method is overloaded for each possible length of the
* tuples to support type safe creation of data sets through CSV parsing.
*
* @param type0 The type of CSV field 0 and the type of field 0 in the returned tuple type.
* @param type1 The type of CSV field 1 and the type of field 1 in the returned tuple type.
* @param type2 The type of CSV field 2 and the type of field 2 in the returned tuple type.
* @param type3 The type of CSV field 3 and the type of field 3 in the returned tuple type.
* @param type4 The type of CSV field 4 and the type of field 4 in the returned tuple type.
* @param type5 The type of CSV field 5 and the type of field 5 in the returned tuple type.
* @param type6 The type of CSV field 6 and the type of field 6 in the returned tuple type.
* @param type7 The type of CSV field 7 and the type of field 7 in the returned tuple type.
* @param type8 The type of CSV field 8 and the type of field 8 in the returned tuple type.
* @param type9 The type of CSV field 9 and the type of field 9 in the returned tuple type.
* @param type10 The type of CSV field 10 and the type of field 10 in the returned tuple type.
* @param type11 The type of CSV field 11 and the type of field 11 in the returned tuple type.
* @param type12 The type of CSV field 12 and the type of field 12 in the returned tuple type.
* @param type13 The type of CSV field 13 and the type of field 13 in the returned tuple type.
* @param type14 The type of CSV field 14 and the type of field 14 in the returned tuple type.
* @param type15 The type of CSV field 15 and the type of field 15 in the returned tuple type.
* @param type16 The type of CSV field 16 and the type of field 16 in the returned tuple type.
* @param type17 The type of CSV field 17 and the type of field 17 in the returned tuple type.
* @param type18 The type of CSV field 18 and the type of field 18 in the returned tuple type.
* @param type19 The type of CSV field 19 and the type of field 19 in the returned tuple type.
* @param type20 The type of CSV field 20 and the type of field 20 in the returned tuple type.
* @param type21 The type of CSV field 21 and the type of field 21 in the returned tuple type.
* @param type22 The type of CSV field 22 and the type of field 22 in the returned tuple type.
* @param type23 The type of CSV field 23 and the type of field 23 in the returned tuple type.
* @param type24 The type of CSV field 24 and the type of field 24 in the returned tuple type.
* @return The {@link org.apache.flink.api.java.DataSet} representing the parsed CSV data.
*/
public <
T0,
T1,
T2,
T3,
T4,
T5,
T6,
T7,
T8,
T9,
T10,
T11,
T12,
T13,
T14,
T15,
T16,
T17,
T18,
T19,
T20,
T21,
T22,
T23,
T24>
DataSource<
Tuple25<
T0,
T1,
T2,
T3,
T4,
T5,
T6,
T7,
T8,
T9,
T10,
T11,
T12,
T13,
T14,
T15,
T16,
T17,
T18,
T19,
T20,
T21,
T22,
T23,
T24>>
types(
Class<T0> type0,
Class<T1> type1,
Class<T2> type2,
Class<T3> type3,
Class<T4> type4,
Class<T5> type5,
Class<T6> type6,
Class<T7> type7,
Class<T8> type8,
Class<T9> type9,
Class<T10> type10,
Class<T11> type11,
Class<T12> type12,
Class<T13> type13,
Class<T14> type14,
Class<T15> type15,
Class<T16> type16,
Class<T17> type17,
Class<T18> type18,
Class<T19> type19,
Class<T20> type20,
Class<T21> type21,
Class<T22> type22,
Class<T23> type23,
Class<T24> type24) {
TupleTypeInfo<
Tuple25<
T0,
T1,
T2,
T3,
T4,
T5,
T6,
T7,
T8,
T9,
T10,
T11,
T12,
T13,
T14,
T15,
T16,
T17,
T18,
T19,
T20,
T21,
T22,
T23,
T24>>
types =
TupleTypeInfo.getBasicAndBasicValueTupleTypeInfo(
type0, type1, type2, type3, type4, type5, type6, type7, type8,
type9, type10, type11, type12, type13, type14, type15, type16,
type17, type18, type19, type20, type21, type22, type23, type24);
CsvInputFormat<
Tuple25<
T0,
T1,
T2,
T3,
T4,
T5,
T6,
T7,
T8,
T9,
T10,
T11,
T12,
T13,
T14,
T15,
T16,
T17,
T18,
T19,
T20,
T21,
T22,
T23,
T24>>
inputFormat =
new TupleCsvInputFormat<
Tuple25<
T0,
T1,
T2,
T3,
T4,
T5,
T6,
T7,
T8,
T9,
T10,
T11,
T12,
T13,
T14,
T15,
T16,
T17,
T18,
T19,
T20,
T21,
T22,
T23,
T24>>(path, types, this.includedMask);
configureInputFormat(inputFormat);
return new DataSource<
Tuple25<
T0,
T1,
T2,
T3,
T4,
T5,
T6,
T7,
T8,
T9,
T10,
T11,
T12,
T13,
T14,
T15,
T16,
T17,
T18,
T19,
T20,
T21,
T22,
T23,
T24>>(executionContext, inputFormat, types, Utils.getCallLocationName());
} | 3.68 |
hadoop_MySQLDBRecordReader_executeQuery | // Execute statements for mysql in unbuffered mode.
protected ResultSet executeQuery(String query) throws SQLException {
statement = getConnection().prepareStatement(query,
ResultSet.TYPE_FORWARD_ONLY, ResultSet.CONCUR_READ_ONLY);
statement.setFetchSize(Integer.MIN_VALUE); // MySQL: read row-at-a-time.
return statement.executeQuery();
} | 3.68 |
hadoop_AuditingFunctions_withinAuditSpan | /**
* Given a function, return a new function which
* activates and deactivates the span around the inner one.
* @param auditSpan audit span
* @param operation operation
* @param <T> Generics Type T.
* @param <R> Generics Type R.
* @return a new invocation.
*/
public static <T, R> FunctionRaisingIOE<T, R> withinAuditSpan(
@Nullable AuditSpan auditSpan,
FunctionRaisingIOE<T, R> operation) {
return auditSpan == null
? operation
: (x) -> {
auditSpan.activate();
return operation.apply(x);
};
} | 3.68 |
pulsar_PulsarAdminImpl_schemas | /**
* @return the schemas
*/
public Schemas schemas() {
return schemas;
} | 3.68 |
hbase_ResponseConverter_getControllerException | /**
* Retreivies exception stored during RPC invocation.
* @param controller the controller instance provided by the client when calling the service
* @return exception if any, or null; Will return DoNotRetryIOException for string represented
* failure causes in controller.
*/
@Nullable
public static IOException getControllerException(RpcController controller) throws IOException {
if (controller != null && controller.failed()) {
if (controller instanceof ServerRpcController) {
return ((ServerRpcController) controller).getFailedOn();
} else {
return new DoNotRetryIOException(controller.errorText());
}
}
return null;
} | 3.68 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.