name
stringlengths 12
178
| code_snippet
stringlengths 8
36.5k
| score
float64 3.26
3.68
|
---|---|---|
morf_GraphBasedUpgradeTraversalService_allNodesCompleted | /**
* @return true if all nodes of the upgrade have been executed
*/
public boolean allNodesCompleted() {
lock.lock();
try {
return allNodesCompletedNoLock();
} finally {
lock.unlock();
}
} | 3.68 |
flink_ExecutionEnvironment_getJobName | /**
* Gets the job name. If user defined job name is not found in the configuration, the default
* name based on the timestamp when this method is invoked will return.
*
* @return A job name.
*/
private String getJobName() {
return configuration.getString(
PipelineOptions.NAME, "Flink Java Job at " + Calendar.getInstance().getTime());
} | 3.68 |
dubbo_DubboAbstractTDigest_weightedAverage | /**
* Same as {@link #weightedAverageSorted(double, double, double, double)} but flips
* the order of the variables if <code>x2</code> is greater than
* <code>x1</code>.
*/
static double weightedAverage(double x1, double w1, double x2, double w2) {
if (x1 <= x2) {
return weightedAverageSorted(x1, w1, x2, w2);
} else {
return weightedAverageSorted(x2, w2, x1, w1);
}
} | 3.68 |
hbase_AccessController_preCleanupBulkLoad | /**
* Authorization security check for SecureBulkLoadProtocol.cleanupBulkLoad()
* @param ctx the context
*/
@Override
public void preCleanupBulkLoad(ObserverContext<RegionCoprocessorEnvironment> ctx)
throws IOException {
requireAccess(ctx, "preCleanupBulkLoad",
ctx.getEnvironment().getRegion().getTableDescriptor().getTableName(), Action.ADMIN,
Action.CREATE);
} | 3.68 |
pulsar_TokenClient_exchangeClientCredentials | /**
* Performs a token exchange using client credentials.
* @param req the client credentials request details.
* @return a token result
* @throws TokenExchangeException
*/
public TokenResult exchangeClientCredentials(ClientCredentialsExchangeRequest req)
throws TokenExchangeException, IOException {
String body = buildClientCredentialsBody(req);
try {
Response res = httpClient.preparePost(tokenUrl.toString())
.setHeader("Accept", "application/json")
.setHeader("Content-Type", "application/x-www-form-urlencoded")
.setBody(body)
.execute()
.get();
switch (res.getStatusCode()) {
case 200:
return ObjectMapperFactory.getMapper().reader().readValue(res.getResponseBodyAsBytes(),
TokenResult.class);
case 400: // Bad request
case 401: // Unauthorized
throw new TokenExchangeException(
ObjectMapperFactory.getMapper().reader().readValue(res.getResponseBodyAsBytes(),
TokenError.class));
default:
throw new IOException(
"Failed to perform HTTP request. res: " + res.getStatusCode() + " " + res.getStatusText());
}
} catch (InterruptedException | ExecutionException e1) {
throw new IOException(e1);
}
} | 3.68 |
hbase_MasterProcedureScheduler_getGlobalQueue | // ============================================================================
// Global Queue Lookup Helpers
// ============================================================================
private GlobalQueue getGlobalQueue(String globalId) {
GlobalQueue node = AvlTree.get(globalMap, globalId, GLOBAL_QUEUE_KEY_COMPARATOR);
if (node != null) {
return node;
}
node = new GlobalQueue(globalId, locking.getGlobalLock(globalId));
globalMap = AvlTree.insert(globalMap, node);
return node;
} | 3.68 |
framework_ApplicationConfiguration_getServiceUrlParameterName | /**
* Return the name of the parameter used to to send data to the service url.
* This method should only be called if {@link #useServiceUrlPathParam()} is
* true.
*
* @since 7.1.6
* @return The parameter name, by default <code>v-resourcePath</code>
*/
public String getServiceUrlParameterName() {
return getJsoConfiguration(id).getConfigString(
ApplicationConstants.SERVICE_URL_PARAMETER_NAME);
} | 3.68 |
rocketmq-connect_ColumnDefinition_isSignedNumber | /**
* Indicates whether values in the column are signed numbers.
*
* @return <code>true</code> if so; <code>false</code> otherwise
*/
public boolean isSignedNumber() {
return signedNumbers;
} | 3.68 |
querydsl_CollQueryFactory_from | /**
* Create a new query
*
* @param path source expression
* @param col source collection
* @return query
*/
public static <A> CollQuery<A> from(Path<A> path, Iterable<A> col) {
return new CollQuery<Void>().from(path, col).select(path);
} | 3.68 |
morf_AbstractSqlDialectTest_assertSQLEquals | /**
* Helper to allow lists of SQL strings to be compared in Eclipse.
*
* @param message The message to show on failure.
* @param expected The expected list of strings.
* @param actual The actual list of strings.
*/
private void assertSQLEquals(String message, List<String> expected, List<String> actual) {
Assert.assertEquals(message, StringUtils.join(expected, "\n"), StringUtils.join(actual, "\n"));
} | 3.68 |
morf_BaseDataSetReader_clear | /**
* Clear the local table lists.
*/
protected void clear() {
tableNameToFileNameMap.clear();
tableNames.clear();
} | 3.68 |
flink_TypeInformation_getGenericParameters | /**
* Optional method for giving Flink's type extraction system information about the mapping of a
* generic type parameter to the type information of a subtype. This information is necessary in
* cases where type information should be deduced from an input type.
*
* <p>For instance, a method for a {@link Tuple2} would look like this: <code>
* Map m = new HashMap();
* m.put("T0", this.getTypeAt(0));
* m.put("T1", this.getTypeAt(1));
* return m;
* </code>
*
* @return map of inferred subtypes; it does not have to contain all generic parameters as key;
* values may be null if type could not be inferred
*/
@PublicEvolving
public Map<String, TypeInformation<?>> getGenericParameters() {
// return an empty map as the default implementation
return Collections.emptyMap();
} | 3.68 |
framework_StaticSection_isVisible | /**
* Gets the visibility of this section.
*
* @return {@code true} if visible; {@code false} if not
*
* @since 8.1.1
*/
public boolean isVisible() {
return getState(false).visible;
} | 3.68 |
hadoop_S3ALocatedFileStatus_toS3AFileStatus | /**
* Generate an S3AFileStatus instance, including etag and
* version ID, if present.
* @return the S3A status.
*/
public S3AFileStatus toS3AFileStatus() {
return new S3AFileStatus(
getPath(),
isDirectory(),
isEmptyDirectory,
getLen(),
getModificationTime(),
getBlockSize(),
getOwner(),
getEtag(),
getVersionId());
} | 3.68 |
hbase_VersionModel_setJerseyVersion | /**
* @param version the Jersey framework version string
*/
public void setJerseyVersion(String version) {
this.jerseyVersion = version;
} | 3.68 |
druid_DruidAbstractDataSource_isInitExceptionThrow | /**
* @since 1.1.11
*/
public boolean isInitExceptionThrow() {
return initExceptionThrow;
} | 3.68 |
framework_ApplicationConfiguration_getRootElement | /**
* Get the root element instance used for this application.
*
* @return registered root element
* @since 8.4
*/
public Element getRootElement() {
return rootElement;
} | 3.68 |
hadoop_DynoInfraUtils_waitForNameNodeJMXValue | /**
* Poll the launched NameNode's JMX for a specific value, waiting for it to
* cross some threshold. Continues until the threshold has been crossed or
* {@code shouldExit} returns true. Periodically logs the current value.
*
* @param valueName The human-readable name of the value which is being
* polled (for printing purposes only).
* @param jmxBeanQuery The JMX bean query to execute; should return a JMX
* property matching {@code jmxProperty}.
* @param jmxProperty The name of the JMX property whose value should be
* polled.
* @param threshold The threshold value to wait for the JMX property to be
* above/below.
* @param printThreshold The threshold between each log statement; controls
* how frequently the value is printed. For example,
* if this was 10, a statement would be logged every
* time the value has changed by more than 10.
* @param decreasing True iff the property's value is decreasing and this
* should wait until it is lower than threshold; else the
* value is treated as increasing and will wait until it
* is higher than threshold.
* @param nameNodeProperties The set of properties containing information
* about the NameNode.
* @param shouldExit Should return true iff this should stop waiting.
* @param log Where to log information.
*/
@SuppressWarnings("checkstyle:parameternumber")
private static void waitForNameNodeJMXValue(String valueName,
String jmxBeanQuery, String jmxProperty, double threshold,
double printThreshold, boolean decreasing, Properties nameNodeProperties,
Supplier<Boolean> shouldExit, Logger log) throws InterruptedException {
double lastPrintedValue = decreasing ? Double.MAX_VALUE : Double.MIN_VALUE;
double value;
int retryCount = 0;
long startTime = Time.monotonicNow();
while (!shouldExit.get()) {
try {
value = Double.parseDouble(fetchNameNodeJMXValue(nameNodeProperties,
jmxBeanQuery, jmxProperty));
if ((decreasing && value <= threshold)
|| (!decreasing && value >= threshold)) {
log.info(String.format(
"%s = %.2f; %s threshold of %.2f; done waiting after %d ms.",
valueName, value, decreasing ? "below" : "above", threshold,
Time.monotonicNow() - startTime));
break;
} else if (Math.abs(value - lastPrintedValue) >= printThreshold) {
log.info(String.format("%s: %.2f", valueName, value));
lastPrintedValue = value;
}
} catch (IOException ioe) {
if (++retryCount % 20 == 0) {
log.warn("Unable to fetch {}; retried {} times / waited {} ms",
valueName, retryCount, Time.monotonicNow() - startTime, ioe);
}
}
Thread.sleep(3000);
}
} | 3.68 |
hbase_ImplType_toString | /** Returns <code>-option</code> */
@Override
public String toString() {
return "-" + option;
} | 3.68 |
framework_FocusableComplexPanel_addKeyPressHandler | /*
* (non-Javadoc)
*
* @see
* com.google.gwt.event.dom.client.HasKeyPressHandlers#addKeyPressHandler
* (com.google.gwt.event.dom.client.KeyPressHandler)
*/
@Override
public HandlerRegistration addKeyPressHandler(KeyPressHandler handler) {
return addDomHandler(handler, KeyPressEvent.getType());
} | 3.68 |
hudi_GenericRecordFullPayloadGenerator_getNewPayload | /**
* Create a new {@link GenericRecord} with random value according to given schema.
*
* Long fields which are specified within partitionPathFieldNames are constrained to the value of the partition for which records are being generated.
*
* @return {@link GenericRecord} with random value
*/
public GenericRecord getNewPayload(Set<String> partitionPathFieldNames) {
return create(baseSchema, partitionPathFieldNames);
} | 3.68 |
hudi_CatalogOptions_allOptions | /**
* Returns all the config options.
*/
public static List<ConfigOption<?>> allOptions() {
return OptionsResolver.allOptions(CatalogOptions.class);
} | 3.68 |
hadoop_ClusterMetrics_getDecommissionedTaskTrackerCount | /**
* Get the number of decommissioned trackers in the cluster.
*
* @return decommissioned tracker count
*/
public int getDecommissionedTaskTrackerCount() {
return numDecommissionedTrackers;
} | 3.68 |
morf_SqlDialect_indexes | /**
* @see org.alfasoftware.morf.metadata.Table#indexes()
*/
@Override
public List<Index> indexes() {
return new ArrayList<>();
} | 3.68 |
flink_PropertiesUtil_getBoolean | /**
* Get boolean from properties. This method returns {@code true} iff the parsed value is "true".
*
* @param config Properties
* @param key key in Properties
* @param defaultValue default value if value is not set
* @return default or value of key
*/
public static boolean getBoolean(Properties config, String key, boolean defaultValue) {
String val = config.getProperty(key);
if (val == null) {
return defaultValue;
} else {
return Boolean.parseBoolean(val);
}
} | 3.68 |
flink_ExpressionResolver_resolve | /**
* Resolves given expressions with configured set of rules. All expressions of an operation
* should be given at once as some rules might assume the order of expressions.
*
* <p>After this method is applied the returned expressions should be ready to be converted to
* planner specific expressions.
*
* @param expressions list of expressions to resolve.
* @return resolved list of expression
*/
public List<ResolvedExpression> resolve(List<Expression> expressions) {
final Function<List<Expression>, List<Expression>> resolveFunction =
concatenateRules(getAllResolverRules());
final List<Expression> resolvedExpressions = resolveFunction.apply(expressions);
return resolvedExpressions.stream()
.map(e -> e.accept(VERIFY_RESOLUTION_VISITOR))
.collect(Collectors.toList());
} | 3.68 |
hadoop_NMStateStoreService_serviceStop | /** Shutdown the state storage. */
@Override
public void serviceStop() throws IOException {
closeStorage();
} | 3.68 |
framework_VaadinService_writeStringResponse | /**
* Writes the given string as a response using the given content type.
*
* @param response
* The response reference
* @param contentType
* The content type of the response
* @param responseString
* The actual response
* @throws IOException
* If an error occurred while writing the response
*/
public void writeStringResponse(VaadinResponse response, String contentType,
String responseString) throws IOException {
response.setContentType(contentType);
final OutputStream out = response.getOutputStream();
try (PrintWriter outWriter = new PrintWriter(
new BufferedWriter(new OutputStreamWriter(out, UTF_8)))) {
outWriter.print(responseString);
}
} | 3.68 |
hbase_InputStreamBlockDistribution_isEnabled | /**
* True if we should derive StoreFile HDFSBlockDistribution from the underlying input stream
*/
public static boolean isEnabled(Configuration conf) {
return conf.getBoolean(HBASE_LOCALITY_INPUTSTREAM_DERIVE_ENABLED,
DEFAULT_HBASE_LOCALITY_INPUTSTREAM_DERIVE_ENABLED);
} | 3.68 |
hadoop_StoreContext_getRequestFactory | /**
* Get the request factory.
* @return the factory for requests.
*/
public RequestFactory getRequestFactory() {
return contextAccessors.getRequestFactory();
} | 3.68 |
flink_SkipListUtils_helpGetNextNode | /**
* Return the next of the given node at the given level.
*
* @param node the node to find the next node for.
* @param level the level to find the next node.
* @param levelIndexHeader the header of the level index.
* @param spaceAllocator the space allocator.
* @return the pointer to the next node of the given node at the given level.
*/
static long helpGetNextNode(
long node, int level, LevelIndexHeader levelIndexHeader, Allocator spaceAllocator) {
if (node == HEAD_NODE) {
return levelIndexHeader.getNextNode(level);
}
Chunk chunk = spaceAllocator.getChunkById(SpaceUtils.getChunkIdByAddress(node));
int offsetInChunk = SpaceUtils.getChunkOffsetByAddress(node);
MemorySegment segment = chunk.getMemorySegment(offsetInChunk);
int offsetInByteBuffer = chunk.getOffsetInSegment(offsetInChunk);
return level == 0
? getNextKeyPointer(segment, offsetInByteBuffer)
: getNextIndexNode(segment, offsetInByteBuffer, level);
} | 3.68 |
framework_ComponentRootSetter_setRoot | /**
* Sets the composition root for the given custom component or composite.
* <p>
* For internal use only.
*
* @param customComponent
* the custom component or composite
* @param component
* the component to assign as composition root
*/
public static void setRoot(Component customComponent, Component component) {
if (customComponent instanceof CustomComponent) {
((CustomComponent) customComponent).setCompositionRoot(component);
} else if (customComponent instanceof Composite) {
((Composite) customComponent).setCompositionRoot(component);
} else {
throw new IllegalArgumentException(
"Parameter is of an unsupported type: "
+ customComponent.getClass().getName());
}
} | 3.68 |
hbase_HRegionServer_finishRegionProcedure | /**
* See {@link #submitRegionProcedure(long)}.
* @param procId the id of the open/close region procedure
*/
public void finishRegionProcedure(long procId) {
executedRegionProcedures.put(procId, procId);
submittedRegionProcedures.remove(procId);
} | 3.68 |
flink_DeltaIterationBase_getInitialWorkset | /**
* Returns the initial workset input, or null, if none is set.
*
* @return The iteration's workset input.
*/
public Operator<WT> getInitialWorkset() {
return getSecondInput();
} | 3.68 |
hbase_ByteBufferKeyValue_hashCode | /**
* In line with {@link #equals(Object)}, only uses the key portion, not the value.
*/
@Override
public int hashCode() {
return calculateHashForKey(this);
} | 3.68 |
hadoop_StoreContext_isCSEEnabled | /**
* return if the store context have client side encryption enabled.
* @return boolean indicating if CSE is enabled or not.
*/
public boolean isCSEEnabled() {
return isCSEEnabled;
} | 3.68 |
framework_Embedded_setCodebase | /**
* This attribute specifies the base path used to resolve relative URIs
* specified by the classid, data, and archive attributes. When absent, its
* default value is the base URI of the current document.
*
* @param codebase
* The base path
*/
public void setCodebase(String codebase) {
String oldCodebase = getCodebase();
if (codebase != oldCodebase
|| (codebase != null && !codebase.equals(oldCodebase))) {
getState().codebase = codebase;
}
} | 3.68 |
streampipes_EpProperties_listIntegerEp | /**
* Creates a new list-based event property of type integer and with the assigned domain property.
*
* @param label A human-readable label of the property
* @param runtimeName The field identifier of the event property at runtime.
* @param domainProperty The semantics of the list property as a String. The string should correspond to a URI
* provided by a vocabulary. Use one of the vocabularies provided in
* {@link org.apache.streampipes.vocabulary} or create your own domain-specific vocabulary.
* @return {@link org.apache.streampipes.model.schema.EventPropertyPrimitive}
*/
public static EventPropertyList listIntegerEp(Label label, String runtimeName, String domainProperty) {
return listEp(label, runtimeName, Datatypes.Integer, domainProperty);
} | 3.68 |
hudi_MetadataMigrator_migrateToVersion | /**
* Migrate metadata to a specific version.
*
* @param metadata Hoodie Table Meta Client
* @param metadataVersion Metadata Version
* @param targetVersion Target Version
* @return Metadata conforming to the target version
*/
public T migrateToVersion(T metadata, int metadataVersion, int targetVersion) {
ValidationUtils.checkArgument(targetVersion >= oldestVersion);
ValidationUtils.checkArgument(targetVersion <= latestVersion);
if (metadataVersion == targetVersion) {
return metadata;
} else if (metadataVersion > targetVersion) {
return downgradeToVersion(metadata, metadataVersion, targetVersion);
} else {
return upgradeToVersion(metadata, metadataVersion, targetVersion);
}
} | 3.68 |
pulsar_AuthenticationDataSource_hasDataFromHttp | /**
* Check if data from HTTP are available.
*
* @return true if this authentication data contain data from HTTP
*/
default boolean hasDataFromHttp() {
return false;
} | 3.68 |
druid_MySqlStatementParser_parseProcedureStatementList | /**
* parse procedure statement block
*/
private void parseProcedureStatementList(List<SQLStatement> statementList, int max) {
for (; ; ) {
if (max != -1) {
if (statementList.size() >= max) {
return;
}
}
if (lexer.token() == Token.EOF) {
return;
}
if (lexer.token() == Token.END) {
return;
}
if (lexer.token() == Token.ELSE) {
return;
}
if (lexer.token() == (Token.SEMI)) {
lexer.nextToken();
continue;
}
if (lexer.token() == Token.WHEN) {
return;
}
if (lexer.token() == Token.UNTIL) {
return;
}
// select into
if (lexer.token() == (Token.SELECT)) {
statementList.add(this.parseSelectInto());
continue;
}
// update
if (lexer.token() == (Token.UPDATE)) {
statementList.add(parseUpdateStatement());
continue;
}
// create
if (lexer.token() == (Token.CREATE)) {
statementList.add(parseCreate());
continue;
}
// insert
if (lexer.token() == Token.INSERT) {
SQLStatement stmt = parseInsert();
statementList.add(stmt);
continue;
}
// delete
if (lexer.token() == (Token.DELETE)) {
statementList.add(parseDeleteStatement());
continue;
}
// call
if (lexer.token() == Token.LBRACE || lexer.identifierEquals("CALL")) {
statementList.add(this.parseCall());
continue;
}
// begin
if (lexer.token() == Token.BEGIN) {
statementList.add(this.parseBlock());
continue;
}
if (lexer.token() == Token.VARIANT) {
SQLExpr variant = this.exprParser.primary();
if (variant instanceof SQLBinaryOpExpr) {
SQLBinaryOpExpr binaryOpExpr = (SQLBinaryOpExpr) variant;
if (binaryOpExpr.getOperator() == SQLBinaryOperator.Assignment) {
SQLSetStatement stmt = new SQLSetStatement(binaryOpExpr.getLeft(), binaryOpExpr.getRight(),
getDbType());
statementList.add(stmt);
continue;
}
}
accept(Token.COLONEQ);
SQLExpr value = this.exprParser.expr();
SQLSetStatement stmt = new SQLSetStatement(variant, value, getDbType());
statementList.add(stmt);
continue;
}
// select
if (lexer.token() == Token.LPAREN) {
Lexer.SavePoint savePoint = lexer.markOut();
lexer.nextToken();
if (lexer.token() == Token.SELECT) {
lexer.reset(savePoint);
statementList.add(this.parseSelect());
continue;
} else {
throw new ParserException("TODO. " + lexer.info());
}
}
// assign statement
if (lexer.token() == Token.SET) {
statementList.add(this.parseAssign());
continue;
}
// while statement
if (lexer.token() == Token.WHILE) {
SQLStatement stmt = this.parseWhile();
statementList.add(stmt);
continue;
}
// loop statement
if (lexer.token() == Token.LOOP) {
statementList.add(this.parseLoop());
continue;
}
// if statement
if (lexer.token() == Token.IF) {
statementList.add(this.parseIf());
continue;
}
// case statement
if (lexer.token() == Token.CASE) {
statementList.add(this.parseCase());
continue;
}
// declare statement
if (lexer.token() == Token.DECLARE) {
SQLStatement stmt = this.parseDeclare();
statementList.add(stmt);
continue;
}
// leave statement
if (lexer.token() == Token.LEAVE) {
statementList.add(this.parseLeave());
continue;
}
// iterate statement
if (lexer.token() == Token.ITERATE) {
statementList.add(this.parseIterate());
continue;
}
// repeat statement
if (lexer.token() == Token.REPEAT) {
statementList.add(this.parseRepeat());
continue;
}
// open cursor
if (lexer.token() == Token.OPEN) {
statementList.add(this.parseOpen());
continue;
}
// close cursor
if (lexer.token() == Token.CLOSE) {
statementList.add(this.parseClose());
continue;
}
// fetch cursor into
if (lexer.token() == Token.FETCH) {
statementList.add(this.parseFetch());
continue;
}
if (lexer.identifierEquals(FnvHash.Constants.CHECKSUM)) {
statementList.add(this.parseChecksum());
continue;
}
if (lexer.token() == Token.IDENTIFIER) {
String label = lexer.stringVal();
Lexer.SavePoint savePoint = lexer.markOut();
lexer.nextToken();
if (lexer.token() == Token.VARIANT && lexer.stringVal().equals(":")) {
lexer.nextToken();
if (lexer.token() == Token.LOOP) {
// parse loop statement
statementList.add(this.parseLoop(label));
} else if (lexer.token() == Token.WHILE) {
// parse while statement with label
statementList.add(this.parseWhile(label));
} else if (lexer.token() == Token.BEGIN) {
// parse begin-end statement with label
statementList.add(this.parseBlock(label));
} else if (lexer.token() == Token.REPEAT) {
// parse repeat statement with label
statementList.add(this.parseRepeat(label));
}
continue;
} else {
lexer.reset(savePoint);
}
}
throw new ParserException("TODO, " + lexer.info());
}
} | 3.68 |
hadoop_FlowRunRowKeyPrefix_getRowKeyPrefix | /*
* (non-Javadoc)
*
* @see
* org.apache.hadoop.yarn.server.timelineservice.storage.application.
* RowKeyPrefix#getRowKeyPrefix()
*/
public byte[] getRowKeyPrefix() {
// We know we're a FlowRunRowKey with null florRunId, so we can simply
// delegate
return super.getRowKey();
} | 3.68 |
hadoop_TimelineEntity_getEntityType | /**
* Get the entity type
*
* @return the entity type
*/
@XmlElement(name = "entitytype")
public String getEntityType() {
return entityType;
} | 3.68 |
morf_MergeStatement_deepCopy | /**
* @see org.alfasoftware.morf.util.DeepCopyableWithTransformation#deepCopy(org.alfasoftware.morf.util.DeepCopyTransformation)
*/
@Override
public MergeStatementBuilder deepCopy(DeepCopyTransformation transformer) {
return new MergeStatementBuilder(this, transformer);
} | 3.68 |
pulsar_ProducerImpl_maybeScheduleBatchFlushTask | // must acquire semaphore before calling
private void maybeScheduleBatchFlushTask() {
if (this.batchFlushTask != null || getState() != State.Ready) {
return;
}
scheduleBatchFlushTask(conf.getBatchingMaxPublishDelayMicros());
} | 3.68 |
hbase_BaseSourceImpl_setGauge | /**
* Set a single gauge to a value.
* @param gaugeName gauge name
* @param value the new value of the gauge.
*/
@Override
public void setGauge(String gaugeName, long value) {
MutableGaugeLong gaugeInt = metricsRegistry.getGauge(gaugeName, value);
gaugeInt.set(value);
} | 3.68 |
flink_TopNBuffer_lastEntry | /** Returns the last Entry in the buffer. Returns null if the TreeMap is empty. */
public Map.Entry<RowData, Collection<RowData>> lastEntry() {
return treeMap.lastEntry();
} | 3.68 |
morf_UpgradeStatusTableServiceImpl_tidyUp | /**
*
* @see org.alfasoftware.morf.upgrade.UpgradeStatusTableService#tidyUp(javax.sql.DataSource)
*/
@Override
public void tidyUp(DataSource dataSource) {
try {
new SqlScriptExecutorProvider(dataSource, sqlDialect).get().execute(sqlDialect.dropStatements(table(UpgradeStatusTableService.UPGRADE_STATUS)));
log.info("[" + UPGRADE_STATUS + "] table has been removed, upgrade finished!");
} catch (RuntimeSqlException e) {
//Throw exception only if the table still exists
if (getStatus(Optional.of(dataSource)) != NONE) {
throw e;
}
}
} | 3.68 |
flink_HsSpillingStrategyUtils_getBuffersByConsumptionPriorityInOrder | /**
* Calculate and get expected number of buffers with the highest consumption priority. For each
* buffer, The greater the difference between next buffer index to consume of subpartition it
* belongs to and buffer index, the higher the priority.
*
* @param nextBufferIndexToConsume downstream next buffer index to consume.
* @param subpartitionToAllBuffers the buffers want to compute priority, are grouped by
* subpartitionId.
* @param expectedSize number of result buffers.
* @return mapping for subpartitionId to buffers, the value of map entry must be order by
* bufferIndex ascending.
*/
public static TreeMap<Integer, List<BufferIndexAndChannel>>
getBuffersByConsumptionPriorityInOrder(
List<Integer> nextBufferIndexToConsume,
TreeMap<Integer, Deque<BufferIndexAndChannel>> subpartitionToAllBuffers,
int expectedSize) {
if (expectedSize <= 0) {
return new TreeMap<>();
}
PriorityQueue<BufferConsumptionPriorityIterator> heap = new PriorityQueue<>();
subpartitionToAllBuffers.forEach(
(subpartitionId, buffers) -> {
if (!buffers.isEmpty()) {
heap.add(
new BufferConsumptionPriorityIterator(
buffers, nextBufferIndexToConsume.get(subpartitionId)));
}
});
TreeMap<Integer, List<BufferIndexAndChannel>> subpartitionToHighPriorityBuffers =
new TreeMap<>();
for (int i = 0; i < expectedSize; i++) {
if (heap.isEmpty()) {
break;
}
BufferConsumptionPriorityIterator bufferConsumptionPriorityIterator = heap.poll();
BufferIndexAndChannel bufferIndexAndChannel = bufferConsumptionPriorityIterator.next();
subpartitionToHighPriorityBuffers
.computeIfAbsent(bufferIndexAndChannel.getChannel(), k -> new ArrayList<>())
.add(bufferIndexAndChannel);
// if this iterator has next, re-added it.
if (bufferConsumptionPriorityIterator.hasNext()) {
heap.add(bufferConsumptionPriorityIterator);
}
}
// treeMap will ensure that the key are sorted by subpartitionId
// ascending. Within the same subpartition, the larger the bufferIndex,
// the higher the consumption priority, reserve the value so that buffers are ordered
// by (subpartitionId, bufferIndex) ascending.
subpartitionToHighPriorityBuffers.values().forEach(Collections::reverse);
return subpartitionToHighPriorityBuffers;
} | 3.68 |
framework_BrowserInfo_isIOS6 | /**
* Checks if the browser is run on iOS 6.
*
* @since 7.1.1
* @return true if the browser is run on iOS 6, false otherwise
*/
public boolean isIOS6() {
return isIOS() && getOperatingSystemMajorVersion() == 6;
} | 3.68 |
hbase_AccessControlClient_isAuthorizationEnabled | /**
* Return true if authorization is supported and enabled
* @param connection The connection to use
* @return true if authorization is supported and enabled, false otherwise
*/
public static boolean isAuthorizationEnabled(Connection connection) throws IOException {
return connection.getAdmin().getSecurityCapabilities()
.contains(SecurityCapability.AUTHORIZATION);
} | 3.68 |
hadoop_BalanceJob_execute | /**
* Run the state machine.
*/
public void execute() {
boolean quit = false;
try {
while (!jobDone && !quit && scheduler.isRunning()) {
if (curProcedure == null) { // Job done.
finish(null);
quit = true;
} else {
if (curProcedure == firstProcedure || lastProcedure != curProcedure) {
LOG.info("Start procedure {}, last procedure is {}",
curProcedure.name(),
lastProcedure == null ? null : lastProcedure.name());
}
if (curProcedure.execute()) {
lastProcedure = curProcedure;
curProcedure = next();
}
if (!scheduler.writeJournal(this)) {
quit = true; // Write journal failed. Simply quit because this job
// has already been added to the recoverQueue.
LOG.debug("Write journal failed. Quit and wait for recovery.");
}
}
}
} catch (BalanceProcedure.RetryException tre) {
scheduler.delay(this, curProcedure.delayMillisBeforeRetry());
} catch (Exception e) {
finish(e);
} catch (Throwable t) {
IOException err = new IOException("Got throwable error.", t);
finish(err);
}
} | 3.68 |
morf_AbstractSqlDialectTest_testSelectMultipleWhereScript | /**
* Tests a select with multiple where clauses
*/
@Test
public void testSelectMultipleWhereScript() {
SelectStatement stmt = new SelectStatement().from(new TableReference(TEST_TABLE))
.where(and(
eq(new FieldReference(STRING_FIELD), "A0001"),
greaterThan(new FieldReference(INT_FIELD), 20080101),
lessThan(new FieldReference(DATE_FIELD), 20090101)
));
String value = varCharCast("'A0001'");
String expectedSql = "SELECT * FROM " + tableName(TEST_TABLE) + " WHERE ((stringField = " + stringLiteralPrefix() + value+") AND (intField > 20080101) AND (dateField < 20090101))";
assertEquals("Select with multiple where clauses", expectedSql, testDialect.convertStatementToSQL(stmt));
} | 3.68 |
morf_H2Dialect_getSqlForLastDayOfMonth | /**
* @see org.alfasoftware.morf.jdbc.SqlDialect#getSqlForLastDayOfMonth
*/
@Override
protected String getSqlForLastDayOfMonth(AliasedField date) {
return "DATEADD(dd, -DAY(DATEADD(m,1," + getSqlFrom(date) + ")), DATEADD(m,1," + getSqlFrom(date) + "))";
} | 3.68 |
framework_Range_endsAfter | /**
* Checks whether this range ends after the end of another range.
*
* @param other
* the other range to compare against
* @return <code>true</code> if this range ends after the <code>other</code>
*/
public boolean endsAfter(final Range other) {
return getEnd() > other.getEnd();
} | 3.68 |
dubbo_ServiceConfigBase_setInterfaceClass | /**
* @param interfaceClass
* @see #setInterface(Class)
* @deprecated
*/
public void setInterfaceClass(Class<?> interfaceClass) {
setInterface(interfaceClass);
} | 3.68 |
hbase_ProcedurePrepareLatch_createLatch | /**
* Create a latch if the client does not have async proc support
* @param major major version with async proc support
* @param minor minor version with async proc support
* @return a CompatibilityLatch or a NoopLatch if the client has async proc support
*/
public static ProcedurePrepareLatch createLatch(int major, int minor) {
// don't use the latch if we have procedure support
return hasProcedureSupport(major, minor) ? noopLatch : new CompatibilityLatch();
} | 3.68 |
morf_TableReference_as | /**
* Specifies the alias to use for the table.
*
* @param aliasName the name of the alias
* @return an updated {@link TableReference} (this will not be a new object)
*/
public TableReference as(String aliasName) {
if (AliasedField.immutableDslEnabled()) {
return new TableReference(this, aliasName);
} else {
this.alias = aliasName;
return this;
}
} | 3.68 |
morf_AbstractSqlDialectTest_testJoinNoCriteria | /**
* Tests a join with no ON criteria.
*/
@Test
public void testJoinNoCriteria() {
SelectStatement testStatement = select().from(tableRef("TableOne")).crossJoin(tableRef("TableTwo"));
assertEquals(testDialect.convertStatementToSQL(testStatement), expectedJoinOnEverything());
} | 3.68 |
hadoop_WriteOperationHelper_writeFailed | /**
* Callback on a write failure.
* @param ex Any exception raised which triggered the failure.
*/
public void writeFailed(Exception ex) {
LOG.debug("Write to {} failed", this, ex);
} | 3.68 |
streampipes_Labels_withId | /**
* Creates a label with the string value of an enum.
* Static properties require a fully-specified label, see {@link #from(String, String, String)}
* @param internalId The internal identifier of the element, e.g., "LATITUDE-FIELD-MAPPING"
* @return
*/
public static Label withId(Enum<?> internalId) {
return new Label(internalId.name(), "", "");
} | 3.68 |
flink_ThriftObjectConversions_toString | /**
* Converts a {@link Throwable} object into a flattened list of texts including its stack trace
* and the stack traces of the nested causes.
*
* @param ex a {@link Throwable} object
* @return a flattened list of texts including the {@link Throwable} object's stack trace and
* the stack traces of the nested causes.
*/
private static List<String> toString(Throwable ex) {
return toString(ex, null);
} | 3.68 |
framework_LogSection_setLimit | /**
* Sets the log row limit.
*
* @param limit
* The limit
*/
public void setLimit(int limit) {
this.limit = limit;
applyLimit();
// TODO should be persisted
} | 3.68 |
flink_EmbeddedRocksDBStateBackend_setWriteBatchSize | /**
* Sets the max batch size will be used in {@link RocksDBWriteBatchWrapper}, no positive value
* will disable memory size controller, just use item count controller.
*
* @param writeBatchSize The size will used to be used in {@link RocksDBWriteBatchWrapper}.
*/
public void setWriteBatchSize(long writeBatchSize) {
checkArgument(writeBatchSize >= 0, "Write batch size have to be no negative.");
this.writeBatchSize = writeBatchSize;
} | 3.68 |
pulsar_SecurityUtility_processConscryptTrustManager | // workaround https://github.com/google/conscrypt/issues/1015
private static void processConscryptTrustManager(TrustManager trustManager) {
if (trustManager.getClass().getName().equals("org.conscrypt.TrustManagerImpl")) {
try {
Class<?> conscryptClazz = Class.forName("org.conscrypt.Conscrypt");
Object hostnameVerifier = conscryptClazz.getMethod("getHostnameVerifier",
new Class[]{TrustManager.class}).invoke(null, trustManager);
if (hostnameVerifier == null) {
Object defaultHostnameVerifier = conscryptClazz.getMethod("getDefaultHostnameVerifier",
new Class[]{TrustManager.class}).invoke(null, trustManager);
if (defaultHostnameVerifier != null) {
conscryptClazz.getMethod("setHostnameVerifier", new Class[]{
TrustManager.class,
Class.forName("org.conscrypt.ConscryptHostnameVerifier")
}).invoke(null, trustManager, defaultHostnameVerifier);
}
}
} catch (ReflectiveOperationException e) {
log.warn("Unable to set hostname verifier for Conscrypt TrustManager implementation", e);
}
}
} | 3.68 |
morf_SqlDialect_getDeleteLimitPreFromClause | /**
* Returns the SQL that specifies the deletion limit ahead of the FROM clause, if any, for the dialect.
*
* @param limit The delete limit.
* @return The SQL fragment.
*/
protected Optional<String> getDeleteLimitPreFromClause(@SuppressWarnings("unused") int limit) {
return Optional.empty();
} | 3.68 |
hibernate-validator_ConstraintMappingContextImplBase_addConstraint | /**
* Adds a constraint to the set of constraints managed by this creational context.
*
* @param constraint the constraint to add
*/
protected void addConstraint(ConfiguredConstraint<?> constraint) {
constraints.add( constraint );
} | 3.68 |
hadoop_RegistryDNSServer_launchDNSServer | /**
* Launch the server.
* @param conf configuration
* @param rdns registry dns instance
* @return
*/
static RegistryDNSServer launchDNSServer(Configuration conf,
RegistryDNS rdns) {
RegistryDNSServer dnsServer = null;
Thread.setDefaultUncaughtExceptionHandler(
new HadoopUncaughtExceptionHandler());
try {
dnsServer = new RegistryDNSServer("RegistryDNSServer", rdns);
ShutdownHookManager.get().addShutdownHook(
new CompositeService.CompositeServiceShutdownHook(dnsServer),
SHUTDOWN_HOOK_PRIORITY);
dnsServer.init(conf);
dnsServer.start();
} catch (Throwable t) {
LOG.error("Error starting Registry DNS Server", t);
ExitUtil.terminate(-1, "Error starting Registry DNS Server");
}
return dnsServer;
} | 3.68 |
hadoop_ListResultEntrySchema_group | /**
* Get the group value.
*
* @return the group value
*/
public String group() {
return group;
} | 3.68 |
flink_FailureHandlingResultSnapshot_create | /**
* Creates a {@code FailureHandlingResultSnapshot} based on the passed {@link
* FailureHandlingResult} and {@link ExecutionVertex ExecutionVertices}.
*
* @param failureHandlingResult The {@code FailureHandlingResult} that is used for extracting
* the failure information.
* @param currentExecutionsLookup The look-up function for retrieving all the current {@link
* Execution} instances for a given {@link ExecutionVertexID}.
* @return The {@code FailureHandlingResultSnapshot}.
*/
public static FailureHandlingResultSnapshot create(
FailureHandlingResult failureHandlingResult,
Function<ExecutionVertexID, Collection<Execution>> currentExecutionsLookup) {
final Execution rootCauseExecution =
failureHandlingResult.getFailedExecution().orElse(null);
if (rootCauseExecution != null && !rootCauseExecution.getFailureInfo().isPresent()) {
throw new IllegalArgumentException(
String.format(
"The failed execution %s didn't provide a failure info.",
rootCauseExecution.getAttemptId()));
}
final Set<Execution> concurrentlyFailedExecutions =
failureHandlingResult.getVerticesToRestart().stream()
.flatMap(id -> currentExecutionsLookup.apply(id).stream())
.filter(execution -> execution != rootCauseExecution)
.filter(execution -> execution.getFailureInfo().isPresent())
.collect(Collectors.toSet());
return new FailureHandlingResultSnapshot(
rootCauseExecution,
ErrorInfo.handleMissingThrowable(failureHandlingResult.getError()),
failureHandlingResult.getTimestamp(),
failureHandlingResult.getFailureLabels(),
concurrentlyFailedExecutions);
} | 3.68 |
hadoop_AdlFsInputStream_available | /**
* This method returns the remaining bytes in the stream, rather than the
* expected Java
* interpretation of {@link java.io.InputStream#available()}, which expects
* the
* number of remaining
* bytes in the local buffer. Moreover, it caps the value returned to a
* maximum of Integer.MAX_VALUE.
* These changed behaviors are to ensure compatibility with the
* expectations of HBase WAL reader,
* which depends on available() returning the number of bytes in stream.
*
* Given all other FileSystems in the hadoop ecosystem (especially HDFS) do
* this, it is possible other
* apps other than HBase would also pick up expectation of this behavior
* based on HDFS implementation.
* Therefore keeping this quirky behavior here, to ensure compatibility.
*
* @return remaining bytes in the stream, with maximum of Integer.MAX_VALUE.
* @throws IOException If fails to get the position or file length from SDK.
*/
@Override
public synchronized int available() throws IOException {
return (int) Math.min(in.length() - in.getPos(), Integer.MAX_VALUE);
} | 3.68 |
flink_ZooKeeperLeaderElectionHaServices_cleanupZooKeeperPaths | /** Cleans up leftover ZooKeeper paths. */
private void cleanupZooKeeperPaths() throws Exception {
deleteOwnedZNode();
tryDeleteEmptyParentZNodes();
} | 3.68 |
flink_PartitionRequestListenerManager_removeExpiration | /**
* Remove the expire partition request listener and add it to the given timeoutListeners.
*
* @param now the timestamp
* @param timeout the timeout mills
* @param timeoutListeners the expire partition request listeners
*/
public void removeExpiration(
long now, long timeout, Collection<PartitionRequestListener> timeoutListeners) {
Iterator<Map.Entry<InputChannelID, PartitionRequestListener>> iterator =
listeners.entrySet().iterator();
while (iterator.hasNext()) {
Map.Entry<InputChannelID, PartitionRequestListener> entry = iterator.next();
PartitionRequestListener partitionRequestListener = entry.getValue();
if ((now - partitionRequestListener.getCreateTimestamp()) > timeout) {
timeoutListeners.add(partitionRequestListener);
iterator.remove();
}
}
} | 3.68 |
flink_BlobUtils_getStorageLocation | /**
* Returns the (designated) physical storage location of the BLOB with the given key.
*
* @param storageDir storage directory used be the BLOB service
* @param key the key identifying the BLOB
* @param jobId ID of the job for the incoming files (or <tt>null</tt> if job-unrelated)
* @return the (designated) physical storage location of the BLOB
* @throws IOException if creating the directory fails
*/
static File getStorageLocation(File storageDir, @Nullable JobID jobId, BlobKey key)
throws IOException {
File file = new File(getStorageLocationPath(storageDir.getAbsolutePath(), jobId, key));
Files.createDirectories(file.getParentFile().toPath());
return file;
} | 3.68 |
hbase_Filter_setReversed | /**
* alter the reversed scan flag
* @param reversed flag
*/
public void setReversed(boolean reversed) {
this.reversed = reversed;
} | 3.68 |
framework_RpcDataProviderExtension_addDataGenerator | /**
* Adds a {@link DataGenerator} for this {@code RpcDataProviderExtension}.
* DataGenerators are called when sending row data to client. If given
* DataGenerator is already added, this method does nothing.
*
* @since 7.6
* @param generator
* generator to add
*/
public void addDataGenerator(DataGenerator generator) {
dataGenerators.add(generator);
} | 3.68 |
flink_DefaultResourceCleaner_withRegularCleanup | /**
* Regular cleanups are resources for which the cleanup is triggered after all prioritized
* cleanups succeeded. All added regular cleanups will run concurrently to each other.
*
* @param label The label being used when logging errors in the given cleanup.
* @param regularCleanup The cleanup callback that is going to run after all prioritized
* cleanups are finished.
* @see #withPrioritizedCleanup(String, Object)
*/
public Builder<T> withRegularCleanup(String label, T regularCleanup) {
this.regularCleanup.add(new CleanupWithLabel<>(regularCleanup, label));
return this;
} | 3.68 |
hadoop_GangliaConf_getTmax | /**
* @return the tmax
*/
int getTmax() {
return tmax;
} | 3.68 |
hbase_FanOutOneBlockAsyncDFSOutput_completed | // all lock-free to make it run faster
private void completed(Channel channel) {
for (Iterator<Callback> iter = waitingAckQueue.iterator(); iter.hasNext();) {
Callback c = iter.next();
// if the current unfinished replicas does not contain us then it means that we have already
// acked this one, let's iterate to find the one we have not acked yet.
if (c.unfinishedReplicas.remove(channel.id())) {
long current = EnvironmentEdgeManager.currentTime();
streamSlowMonitor.checkProcessTimeAndSpeed(datanodeInfoMap.get(channel), c.packetDataLen,
current - c.flushTimestamp, c.lastAckTimestamp, c.unfinishedReplicas.size());
c.lastAckTimestamp = current;
if (c.unfinishedReplicas.isEmpty()) {
// we need to remove first before complete the future. It is possible that after we
// complete the future the upper layer will call close immediately before we remove the
// entry from waitingAckQueue and lead to an IllegalStateException. And also set the
// ackedBlockLength first otherwise we may use a wrong length to commit the block. This
// may lead to multiple remove and assign but is OK. The semantic of iter.remove is
// removing the entry returned by calling previous next, so if the entry has already been
// removed then it is a no-op, and for the assign, the values are the same so no problem.
iter.remove();
ackedBlockLength = c.ackedLength;
// the future.complete check is to confirm that we are the only one who grabbed the work,
// otherwise just give up and return.
if (c.future.complete(c.ackedLength)) {
// also wake up flush requests which have the same length.
while (iter.hasNext()) {
Callback maybeDummyCb = iter.next();
if (maybeDummyCb.ackedLength == c.ackedLength) {
iter.remove();
maybeDummyCb.future.complete(c.ackedLength);
} else {
break;
}
}
}
}
return;
}
}
} | 3.68 |
framework_DefaultConnectionStateHandler_doReconnect | /**
* Re-sends the payload to the server (if not null) or re-sends a heartbeat
* request immediately.
*
* @param payload
* the payload that did not reach the server, null if the problem
* was detected by a heartbeat
*/
protected void doReconnect(JsonObject payload) {
if (!connection.isApplicationRunning()) {
// This should not happen as nobody should call this if the
// application has been stopped
getLogger().warning(
"Trying to reconnect after application has been stopped. Giving up");
return;
}
if (payload != null) {
getLogger().info("Re-sending last message to the server...");
getConnection().getMessageSender().send(payload);
} else {
// Use heartbeat
getLogger().info("Trying to re-establish server connection...");
getConnection().getHeartbeat().send();
}
} | 3.68 |
hbase_MasterObserver_postMasterStoreFlush | /**
* Called after the master local region memstore is flushed to disk.
* @param ctx the environment to interact with the framework and master
*/
default void postMasterStoreFlush(final ObserverContext<MasterCoprocessorEnvironment> ctx)
throws IOException {
} | 3.68 |
flink_EntropyInjector_removeEntropyMarkerIfPresent | /**
* Removes the entropy marker string from the path, if the given file system is an
* entropy-injecting file system (implements {@link EntropyInjectingFileSystem}) and the entropy
* marker key is present. Otherwise, this returns the path as is.
*
* @param path The path to filter.
* @return The path without the marker string.
*/
public static Path removeEntropyMarkerIfPresent(FileSystem fs, Path path) {
final EntropyInjectingFileSystem efs = getEntropyFs(fs);
if (efs == null) {
return path;
} else {
try {
return resolveEntropy(path, efs, false);
} catch (IOException e) {
// this should never happen, because the path was valid before and we only remove
// characters.
// rethrow to silence the compiler
throw new FlinkRuntimeException(e.getMessage(), e);
}
}
} | 3.68 |
flink_BinaryStringData_copy | /** Copy a new {@code BinaryStringData}. */
public BinaryStringData copy() {
ensureMaterialized();
byte[] copy =
BinarySegmentUtils.copyToBytes(
binarySection.segments, binarySection.offset, binarySection.sizeInBytes);
return new BinaryStringData(
new MemorySegment[] {MemorySegmentFactory.wrap(copy)},
0,
binarySection.sizeInBytes,
javaObject);
} | 3.68 |
morf_Criterion_lessThanOrEqualTo | /**
* Helper method to create a new "LESS THAN OR EQUAL TO" expression.
*
* <blockquote><pre>
* Criterion.lessThanOrEqualTo(new Field("startdate"), 20091001);</pre></blockquote>
*
* @param field the field to evaluate in the expression (the left hand side of the expression)
* @param value the value to evaluate in the expression (the right hand side)
* @return a new Criterion object
*/
public static Criterion lessThanOrEqualTo(AliasedField field, Object value) {
return new Criterion(Operator.LTE, field, value);
} | 3.68 |
hadoop_NMClientAsync_onCommitLastReInitialization | /**
* Callback for commit of last re-initialization.
*
* @param containerId the Id of the container to commit reInitialize.
*/
public void onCommitLastReInitialization(ContainerId containerId) {} | 3.68 |
framework_PureGWTTestApplication_addCommand | /**
* Adds a command item to the menu. When the entry is clicked, the
* command is executed.
*
* @param cmd
* a command object.
*/
public void addCommand(Command cmd) {
menubar.addItem(cmd.title, cmd.command);
items.add(cmd);
} | 3.68 |
AreaShop_RentingRegionEvent_getPlayer | /**
* Get the player that is trying to rent the region.
* @return The player that is trying to rent the region
*/
public OfflinePlayer getPlayer() {
return player;
} | 3.68 |
cron-utils_CronDefinitionBuilder_spring53 | /**
* Creates CronDefinition instance matching Spring (v5.2 onwards) specification.
* https://spring.io/blog/2020/11/10/new-in-spring-5-3-improved-cron-expressions
*
* <p>The cron expression is expected to be a string comprised of 6
* fields separated by white space. Fields can contain any of the allowed
* values, along with various combinations of the allowed special characters
* for that field. The fields are as follows:
*
* <table style="width:100%">
* <tr>
* <th>Field Name</th>
* <th>Mandatory</th>
* <th>Allowed Values</th>
* <th>Allowed Special Characters</th>
* </tr>
* <tr>
* <td>Seconds</td>
* <td>YES</td>
* <td>0-59</td>
* <td>* , - /</td>
* </tr>
* <tr>
* <td>Minutes</td>
* <td>YES</td>
* <td>0-59</td>
* <td>* , - /</td>
* </tr>
* <tr>
* <td>Hours</td>
* <td>YES</td>
* <td>0-23</td>
* <td>* , - /</td>
* </tr>
* <tr>
* <td>Day of month</td>
* <td>YES</td>
* <td>1-31</td>
* <td>* ? , - / L W</td>
* </tr>
* <tr>
* <td>Month</td>
* <td>YES</td>
* <td>1-12 or JAN-DEC</td>
* <td>* , -</td>
* </tr>
* <tr>
* <td>Day of week</td>
* <td>YES</td>
* <td>0-7 or SUN-SAT</td>
* <td>* ? , - / L #</td>
* </tr>
* </table>
*
* <p>Thus in general Spring cron expressions are as follows (from version 5.3 onwards):
*
* <p>S M H DoM M DoW
*
* @return {@link CronDefinition} instance, never {@code null}
*/
private static CronDefinition spring53() {
return CronDefinitionBuilder.defineCron()
.withSeconds().withValidRange(0, 59).withStrictRange().and()
.withMinutes().withValidRange(0, 59).withStrictRange().and()
.withHours().withValidRange(0, 23).withStrictRange().and()
.withDayOfMonth().withValidRange(1, 31).supportsL().supportsW().supportsLW().supportsQuestionMark().and()
.withMonth().withValidRange(1, 12).and()
.withDayOfWeek().withValidRange(0, 7).withMondayDoWValue(1).withIntMapping(7,0)
.supportsHash().supportsL().supportsQuestionMark().and()
.withSupportedNicknameYearly().withSupportedNicknameAnnually()
.withSupportedNicknameMonthly()
.withSupportedNicknameWeekly()
.withSupportedNicknameDaily().withSupportedNicknameMidnight()
.withSupportedNicknameHourly()
.instance();
} | 3.68 |
framework_EditorOpenEvent_getGrid | /**
* Gets the editors' grid.
*
* @return the editors' grid
*/
public Grid<T> getGrid() {
return getSource().getGrid();
} | 3.68 |
flink_FailureHandlingResult_getFailedExecution | /**
* Returns an {@code Optional} with the {@link Execution} causing this failure or an empty
* {@code Optional} if it's a global failure.
*
* @return The {@code Optional} with the failed {@code Execution} or an empty {@code Optional}
* if it's a global failure.
*/
public Optional<Execution> getFailedExecution() {
return Optional.ofNullable(failedExecution);
} | 3.68 |
hbase_MetricsSink_getFailedBatches | /**
* Get the count of the failed bathes
*/
protected long getFailedBatches() {
return mss.getFailedBatches();
} | 3.68 |
framework_VAbstractCalendarPanel_doSetDate | /**
* The actual implementation of the logic which sets the data of the Panel.
* The method {@link #setDate(Date)} just delegate a call to this method
* providing additional config parameters.
*
* @param currentDate
* currentDate The date to set
* @param needRerender
* if {@code true} then calendar will be rerendered regardless of
* internal logic, otherwise the decision will be made on the
* internal state inside the method
* @param focusAction
* an additional action which will be executed in case
* rerendering is not required
*/
@SuppressWarnings("deprecation")
protected void doSetDate(Date currentDate, boolean needRerender,
Runnable focusAction) {
// Check that we are not re-rendering an already active date
if (currentDate == value && currentDate != null) {
return;
}
boolean currentDateWasAdjusted = false;
// Check that selected date is inside the allowed range
if (currentDate != null
&& !isDateInsideRange(currentDate, getResolution())) {
currentDate = adjustDateToFitInsideRange(currentDate);
currentDateWasAdjusted = true;
}
Date oldDisplayedMonth = displayedMonth;
value = currentDate;
// If current date was adjusted, we will not select any date,
// since that will look like a date is selected. Instead we
// only focus on the adjusted value
if (value == null || currentDateWasAdjusted) {
// If ranges enabled, we may need to focus on a different view to
// potentially not get stuck
if (rangeStart != null || rangeEnd != null) {
Date dateThatFitsInsideRange = adjustDateToFitInsideRange(
new Date());
focusedDate = new FocusedDate(dateThatFitsInsideRange.getYear(),
dateThatFitsInsideRange.getMonth(),
dateThatFitsInsideRange.getDate());
displayedMonth = new FocusedDate(
dateThatFitsInsideRange.getYear(),
dateThatFitsInsideRange.getMonth(), 1);
// value was adjusted. Set selected to null to not cause
// confusion, but this is only needed (and allowed) when we have
// a day resolution
if (isDay(getResolution())) {
value = null;
}
} else {
displayedMonth = null;
focusedDate = null;
}
} else {
focusedDate = new FocusedDate(value.getYear(), value.getMonth(),
value.getDate());
displayedMonth = new FocusedDate(value.getYear(), value.getMonth(),
1);
}
// Re-render calendar if the displayed month is changed.
if (needRerender || oldDisplayedMonth == null || value == null
|| oldDisplayedMonth.getYear() != value.getYear()
|| oldDisplayedMonth.getMonth() != value.getMonth()) {
renderCalendar();
} else {
focusDay(focusedDate);
selectFocused();
focusAction.run();
}
if (!hasFocus) {
focusDay(null);
}
} | 3.68 |
dubbo_DataQueueCommand_getData | // for test
public byte[] getData() {
return data;
} | 3.68 |
flink_Over_orderBy | /**
* Specifies the time attribute on which rows are ordered.
*
* <p>For streaming tables, reference a rowtime or proctime time attribute here to specify the
* time mode.
*
* <p>For batch tables, refer to a timestamp or long attribute.
*
* @param orderBy field reference
* @return an over window with defined order
*/
public static OverWindowPartitionedOrdered orderBy(Expression orderBy) {
return partitionBy().orderBy(orderBy);
} | 3.68 |
querydsl_GuavaGroupByBuilder_asSortedTable | /**
* Get the results as sorted table
*
* @param column column expression
* @param expression value expression
* @param rowComparator row comparator
* @param columnComparator column comparator
* @param <C> Column type
* @param <V> Value type
* @return new result transformer
*/
public <C, V> ResultTransformer<TreeBasedTable<K, C, V>> asSortedTable(final Expression<C> column,
final Expression<V> expression,
final Comparator<? super K> rowComparator,
final Comparator<? super C> columnComparator) {
final Expression<C> columnKeyLookup = getLookup(column);
final Expression<V> lookup = getLookup(expression);
return new GroupByTable<K, C, V, TreeBasedTable<K, C, V>>(key, column, expression) {
@Override
protected TreeBasedTable<K, C, V> transform(Table<K, ?, Group> groups) {
TreeBasedTable<K, C, V> results = TreeBasedTable.create(rowComparator, columnComparator);
for (Cell<K, ?, Group> cell : groups.cellSet()) {
K rowKey = cell.getRowKey();
C columnKey = cell.getValue().getOne(columnKeyLookup);
V value = cell.getValue().getOne(lookup);
results.put(rowKey, columnKey, value);
}
return results;
}
};
} | 3.68 |
hadoop_STSClientFactory_requestSessionCredentials | /**
* Request a set of session credentials.
*
* @param duration duration of the credentials
* @param timeUnit time unit of duration
* @return the role result
* @throws IOException on a failure of the request
*/
@Retries.RetryTranslated
public Credentials requestSessionCredentials(
final long duration,
final TimeUnit timeUnit) throws IOException {
int durationSeconds = (int) timeUnit.toSeconds(duration);
LOG.debug("Requesting session token of duration {}", duration);
final GetSessionTokenRequest request =
GetSessionTokenRequest.builder().durationSeconds(durationSeconds).build();
return invoker.retry("request session credentials", "",
true,
() ->{
LOG.info("Requesting Amazon STS Session credentials");
return stsClient.getSessionToken(request).credentials();
});
} | 3.68 |
framework_VTree_getSubPartName | /*
* (non-Javadoc)
*
* @see com.vaadin.client.ui.SubPartAware#getSubPartName(com.google
* .gwt.user.client.Element)
*/
@Override
public String getSubPartName(
com.google.gwt.user.client.Element subElement) {
// Supported identifiers:
//
// n[index]/n[index]/n[index]{/expand}
//
// Ends with "/expand" if the target is expand/collapse indicator,
// otherwise ends with the node
boolean isExpandCollapse = false;
if (!getElement().isOrHasChild(subElement)) {
return null;
}
if (subElement == getFocusElement()) {
return "fe";
}
TreeNode treeNode = WidgetUtil.findWidget(subElement, TreeNode.class);
if (treeNode == null) {
// Did not click on a node, let somebody else take care of the
// locator string
return null;
}
if (subElement == treeNode.getElement()) {
// Targets expand/collapse arrow
isExpandCollapse = true;
}
List<Integer> positions = new ArrayList<Integer>();
while (treeNode.getParentNode() != null) {
positions.add(0,
treeNode.getParentNode().getChildren().indexOf(treeNode));
treeNode = treeNode.getParentNode();
}
positions.add(0, getRootNodes().indexOf(treeNode));
String locator = "";
for (Integer i : positions) {
locator += SUBPART_NODE_PREFIX + "[" + i + "]/";
}
locator = locator.substring(0, locator.length() - 1);
if (isExpandCollapse) {
locator += "/" + EXPAND_IDENTIFIER;
}
return locator;
} | 3.68 |
framework_Button_readDesign | /*
* (non-Javadoc)
*
* @see com.vaadin.ui.AbstractComponent#readDesign(org.jsoup.nodes .Element,
* com.vaadin.ui.declarative.DesignContext)
*/
@Override
public void readDesign(Element design, DesignContext designContext) {
super.readDesign(design, designContext);
Attributes attr = design.attributes();
String content;
// plain-text (default is html)
Boolean plain = DesignAttributeHandler
.readAttribute(DESIGN_ATTR_PLAIN_TEXT, attr, Boolean.class);
if (plain == null || !plain) {
setCaptionAsHtml(true);
content = design.html();
} else {
// content is not intended to be interpreted as HTML,
// so html entities need to be decoded
content = DesignFormatter.decodeFromTextNode(design.html());
}
setCaption(content);
if (attr.hasKey("icon-alt")) {
setIconAlternateText(DesignAttributeHandler
.readAttribute("icon-alt", attr, String.class));
}
// click-shortcut
removeClickShortcut();
ShortcutAction action = DesignAttributeHandler
.readAttribute("click-shortcut", attr, ShortcutAction.class);
if (action != null) {
setClickShortcut(action.getKeyCode(), action.getModifiers());
}
} | 3.68 |
hadoop_SubApplicationEntityReader_getTable | /**
* Uses the {@link SubApplicationTableRW}.
*/
protected BaseTableRW<?> getTable() {
return SUB_APPLICATION_TABLE;
} | 3.68 |
hbase_NamespaceStateManager_start | /**
* Starts the NamespaceStateManager. The boot strap of cache is done in the post master start hook
* of the NamespaceAuditor class.
* @throws IOException Signals that an I/O exception has occurred.
*/
public void start() throws IOException {
LOG.info("Namespace State Manager started.");
initialize();
} | 3.68 |
Subsets and Splits
No saved queries yet
Save your SQL queries to embed, download, and access them later. Queries will appear here once saved.