name
stringlengths 12
178
| code_snippet
stringlengths 8
36.5k
| score
float64 3.26
3.68
|
---|---|---|
morf_AbstractSqlDialectTest_testMathsMultiply | /**
* Test that adding numbers returns as expected.
*/
@Test
public void testMathsMultiply() {
String result = testDialect.getSqlFrom(new MathsField(new FieldLiteral(1), MathsOperator.MULTIPLY, new FieldLiteral(1)));
assertEquals(expectedMathsMultiply(), result);
} | 3.68 |
hbase_QuotaFilter_isNull | /** Returns true if the filter is empty */
public boolean isNull() {
return !hasFilters;
} | 3.68 |
morf_SqlUtils_parameter | /**
* Constructs a new SQL named parameter from a column.
*
* @param column the parameter column.
* @return {@link SqlParameter}
*/
public static SqlParameter parameter(Column column) {
return new SqlParameter(column);
} | 3.68 |
hadoop_LeveldbIterator_hasNext | /**
* Returns <tt>true</tt> if the iteration has more elements.
*/
public boolean hasNext() throws DBException {
try {
return iter.hasNext();
} catch (DBException e) {
throw e;
} catch (RuntimeException e) {
throw new DBException(e.getMessage(), e);
}
} | 3.68 |
hadoop_ECPolicyLoader_loadECPolicies | /**
* Load EC policies from a XML configuration file.
* @param policyFile EC policy file
* @return list of EC policies
* @throws ParserConfigurationException if ParserConfigurationException happen
* @throws IOException if no such EC policy file
* @throws SAXException if the xml file has some invalid elements
*/
private List<ErasureCodingPolicy> loadECPolicies(File policyFile)
throws ParserConfigurationException, IOException, SAXException {
LOG.info("Loading EC policy file " + policyFile);
// Read and parse the EC policy file.
DocumentBuilderFactory dbf = XMLUtils.newSecureDocumentBuilderFactory();
dbf.setIgnoringComments(true);
DocumentBuilder builder = dbf.newDocumentBuilder();
Document doc = builder.parse(policyFile);
Element root = doc.getDocumentElement();
if (!"configuration".equals(root.getTagName())) {
throw new RuntimeException("Bad EC policy configuration file: "
+ "top-level element not <configuration>");
}
List<ErasureCodingPolicy> policies;
if (root.getElementsByTagName("layoutversion").getLength() > 0) {
if (loadLayoutVersion(root) == LAYOUT_VERSION) {
if (root.getElementsByTagName("schemas").getLength() > 0) {
Map<String, ECSchema> schemas = loadSchemas(root);
if (root.getElementsByTagName("policies").getLength() > 0) {
policies = loadPolicies(root, schemas);
} else {
throw new RuntimeException("Bad EC policy configuration file: "
+ "no <policies> element");
}
} else {
throw new RuntimeException("Bad EC policy configuration file: "
+ "no <schemas> element");
}
} else {
throw new RuntimeException("The parse failed because of "
+ "bad layoutversion value");
}
} else {
throw new RuntimeException("Bad EC policy configuration file: "
+ "no <layoutVersion> element");
}
return policies;
} | 3.68 |
shardingsphere-elasticjob_ExecutionContextService_getJobShardingContext | /**
* Get job sharding context.
*
* @param shardingItems sharding items
* @return job sharding context
*/
public ShardingContexts getJobShardingContext(final List<Integer> shardingItems) {
JobConfiguration jobConfig = configService.load(false);
removeRunningIfMonitorExecution(jobConfig.isMonitorExecution(), shardingItems);
if (shardingItems.isEmpty()) {
return new ShardingContexts(buildTaskId(jobConfig, shardingItems), jobConfig.getJobName(), jobConfig.getShardingTotalCount(),
jobConfig.getJobParameter(), Collections.emptyMap());
}
Map<Integer, String> shardingItemParameterMap = new ShardingItemParameters(jobConfig.getShardingItemParameters()).getMap();
return new ShardingContexts(buildTaskId(jobConfig, shardingItems), jobConfig.getJobName(), jobConfig.getShardingTotalCount(),
jobConfig.getJobParameter(), getAssignedShardingItemParameterMap(shardingItems, shardingItemParameterMap));
} | 3.68 |
hbase_Procedure_shouldWaitClientAck | /**
* By default, the executor will keep the procedure result around util the eviction TTL is
* expired. The client can cut down the waiting time by requesting that the result is removed from
* the executor. In case of system started procedure, we can force the executor to auto-ack.
* @param env the environment passed to the ProcedureExecutor
* @return true if the executor should wait the client ack for the result. Defaults to return
* true.
*/
protected boolean shouldWaitClientAck(TEnvironment env) {
return true;
} | 3.68 |
framework_Table_setItemDescriptionGenerator | /**
* Set the item description generator which generates tooltips for cells and
* rows in the Table.
*
* @param generator
* The generator to use or null to disable
*/
public void setItemDescriptionGenerator(
ItemDescriptionGenerator generator) {
if (generator != itemDescriptionGenerator) {
itemDescriptionGenerator = generator;
// Assures the visual refresh. No need to reset the page buffer
// before as the content has not changed, only the descriptions
refreshRenderedCells();
}
} | 3.68 |
querydsl_Expressions_asEnum | /**
* Create a new EnumExpression
*
* @param value enum
* @return new EnumExpression
*/
public static <T extends Enum<T>> EnumExpression<T> asEnum(T value) {
return asEnum(constant(value));
} | 3.68 |
flink_ExecutionConfig_getRestartStrategy | /**
* Returns the restart strategy which has been set for the current job.
*
* @return The specified restart configuration
*/
@PublicEvolving
@SuppressWarnings("deprecation")
public RestartStrategies.RestartStrategyConfiguration getRestartStrategy() {
if (restartStrategyConfiguration
instanceof RestartStrategies.FallbackRestartStrategyConfiguration) {
// support the old API calls by creating a restart strategy from them
if (getNumberOfExecutionRetries() > 0 && getExecutionRetryDelay() >= 0) {
return RestartStrategies.fixedDelayRestart(
getNumberOfExecutionRetries(), getExecutionRetryDelay());
} else if (getNumberOfExecutionRetries() == 0) {
return RestartStrategies.noRestart();
} else {
return restartStrategyConfiguration;
}
} else {
return restartStrategyConfiguration;
}
} | 3.68 |
hadoop_Anonymizer_anonymizeTopology | // anonymize the cluster topology file
private void anonymizeTopology() throws Exception {
if (anonymizeTopology) {
System.out.println("Anonymizing topology file: " + inputTopologyPath);
ClusterTopologyReader reader = null;
JsonGenerator outGen = null;
Configuration conf = getConf();
try {
// create a generator
outGen = createJsonGenerator(conf, outputTopologyPath);
// define the input cluster topology reader
reader = new ClusterTopologyReader(inputTopologyPath, conf);
// read the plain unanonymized logged job
LoggedNetworkTopology job = reader.get();
// write it via an anonymizing channel
outGen.writeObject(job);
System.out.println("Anonymized topology file: " + outputTopologyPath);
} finally {
if (outGen != null) {
outGen.close();
}
}
}
} | 3.68 |
morf_AbstractSqlDialectTest_expectedDecimalRepresentationOfLiteral | /**
* @param literal The literal whose decimal representation will be returned
* @return the decimal representation of a literal for testing
*/
protected String expectedDecimalRepresentationOfLiteral(String literal) {
return literal;
} | 3.68 |
hbase_ScannerContext_setScannerState | /**
* Note that this is not a typical setter. This setter returns the {@link NextState} that was
* passed in so that methods can be invoked against the new state. Furthermore, this pattern
* allows the {@link NoLimitScannerContext} to cleanly override this setter and simply return the
* new state, thus preserving the immutability of {@link NoLimitScannerContext}
* @return The state that was passed in.
*/
NextState setScannerState(NextState state) {
if (!NextState.isValidState(state)) {
throw new IllegalArgumentException("Cannot set to invalid state: " + state);
}
this.scannerState = state;
return state;
} | 3.68 |
hbase_DeadServer_isDeadServer | /**
* @param serverName server name.
* @return true if this server is on the dead servers list false otherwise
*/
public synchronized boolean isDeadServer(final ServerName serverName) {
return deadServers.containsKey(serverName);
} | 3.68 |
hbase_RequestConverter_buildSetSplitOrMergeEnabledRequest | /**
* Creates a protocol buffer SetSplitOrMergeEnabledRequest
* @param enabled switch is enabled or not
* @param synchronous set switch sync?
* @param switchTypes see {@link org.apache.hadoop.hbase.client.MasterSwitchType}, it is a list.
* @return a SetSplitOrMergeEnabledRequest
*/
public static SetSplitOrMergeEnabledRequest buildSetSplitOrMergeEnabledRequest(boolean enabled,
boolean synchronous, MasterSwitchType... switchTypes) {
SetSplitOrMergeEnabledRequest.Builder builder = SetSplitOrMergeEnabledRequest.newBuilder();
builder.setEnabled(enabled);
builder.setSynchronous(synchronous);
for (MasterSwitchType switchType : switchTypes) {
builder.addSwitchTypes(convert(switchType));
}
return builder.build();
} | 3.68 |
hbase_AsyncAdmin_getSlowLogResponses | /**
* Retrieves online slow RPC logs from the provided list of RegionServers
* @param serverNames Server names to get slowlog responses from
* @param logQueryFilter filter to be used if provided
* @return Online slowlog response list. The return value wrapped by a {@link CompletableFuture}
* @deprecated since 2.4.0 and will be removed in 4.0.0. Use
* {@link #getLogEntries(Set, String, ServerType, int, Map)} instead.
*/
@Deprecated
default CompletableFuture<List<OnlineLogRecord>>
getSlowLogResponses(final Set<ServerName> serverNames, final LogQueryFilter logQueryFilter) {
String logType;
if (LogQueryFilter.Type.LARGE_LOG.equals(logQueryFilter.getType())) {
logType = "LARGE_LOG";
} else {
logType = "SLOW_LOG";
}
Map<String, Object> filterParams = new HashMap<>();
filterParams.put("regionName", logQueryFilter.getRegionName());
filterParams.put("clientAddress", logQueryFilter.getClientAddress());
filterParams.put("tableName", logQueryFilter.getTableName());
filterParams.put("userName", logQueryFilter.getUserName());
filterParams.put("filterByOperator", logQueryFilter.getFilterByOperator().toString());
CompletableFuture<List<LogEntry>> logEntries = getLogEntries(serverNames, logType,
ServerType.REGION_SERVER, logQueryFilter.getLimit(), filterParams);
return logEntries.thenApply(logEntryList -> logEntryList.stream()
.map(logEntry -> (OnlineLogRecord) logEntry).collect(Collectors.toList()));
} | 3.68 |
framework_Method_getSignature | /**
* The unique signature used to identify this method. The structure of the
* returned string may change without notice and should not be used for any
* other purpose than identification. The signature is currently based on
* the declaring type's signature and the method's name.
*
* @return the unique signature of this method
*/
public String getSignature() {
return type.getSignature() + "." + name;
} | 3.68 |
flink_TaskStateStats_getSubtaskStats | /**
* Returns the stats for all subtasks.
*
* <p>Elements of the returned array are <code>null</code> if no stats are available yet for the
* respective subtask.
*
* <p>Note: The returned array must not be modified.
*
* @return Array of subtask stats (elements are <code>null</code> if no stats available yet).
*/
public SubtaskStateStats[] getSubtaskStats() {
return subtaskStats;
} | 3.68 |
flink_RequestedLocalProperties_getGroupedFields | /**
* Gets the grouped fields.
*
* @return The grouped fields, or <code>null</code> if nothing is grouped.
*/
public FieldSet getGroupedFields() {
return this.groupedFields;
} | 3.68 |
flink_GenericDataSinkBase_getLocalOrder | /**
* Gets the order, in which the data sink writes its data locally. Local order means that with
* in each fragment of the file inside the distributed file system, the data is ordered, but not
* across file fragments.
*
* @return NONE, if the sink writes data in any order, or ASCENDING (resp. DESCENDING), if the
* sink writes it data with a local ascending (resp. descending) order.
*/
public Ordering getLocalOrder() {
return this.localOrdering;
} | 3.68 |
hbase_OrderedBytes_encodeNumericLarge | /**
* Encode the large magnitude floating point number {@code val} using the key encoding. The caller
* guarantees that {@code val} will be finite and abs(val) >= 1.0.
* <p>
* A floating point value is encoded as an integer exponent {@code E} and a mantissa {@code M}.
* The original value is equal to {@code (M * 100^E)}. {@code E} is set to the smallest value
* possible without making {@code M} greater than or equal to 1.0.
* </p>
* <p>
* Each centimal digit of the mantissa is stored in a byte. If the value of the centimal digit is
* {@code X} (hence {@code X>=0} and {@code X<=99}) then the byte value will be {@code 2*X+1} for
* every byte of the mantissa, except for the last byte which will be {@code 2*X+0}. The mantissa
* must be the minimum number of bytes necessary to represent the value; trailing {@code X==0}
* digits are omitted. This means that the mantissa will never contain a byte with the value
* {@code 0x00}.
* </p>
* <p>
* If {@code E > 10}, then this routine writes of {@code E} as a varint followed by the mantissa
* as described above. Otherwise, if {@code E <= 10}, this routine only writes the mantissa and
* leaves the {@code E} value to be encoded as part of the opening byte of the field by the
* calling function.
*
* <pre>
* Encoding: M (if E<=10)
* E M (if E>10)
* </pre>
* </p>
* @param dst The destination to which encoded digits are written.
* @param val The value to encode.
* @return the number of bytes written.
*/
private static int encodeNumericLarge(PositionedByteRange dst, BigDecimal val) {
// TODO: this can be done faster
BigDecimal abs = val.abs();
byte[] a = dst.getBytes();
boolean isNeg = val.signum() == -1;
final int start = dst.getPosition(), offset = dst.getOffset();
if (isNeg) { /* Large negative number: 0x08, ~E, ~M */
dst.put(NEG_LARGE);
} else { /* Large positive number: 0x22, E, M */
dst.put(POS_LARGE);
}
// normalize abs(val) to determine E
int integerDigits = abs.precision() - abs.scale();
int lengthToMoveLeft = integerDigits % 2 == 0 ? integerDigits : integerDigits + 1;
int e = lengthToMoveLeft / 2;
abs = abs.movePointLeft(lengthToMoveLeft);
// encode appropriate header byte and/or E value.
if (e > 10) { /* large number, write out {~,}E */
putVaruint64(dst, e, isNeg);
} else {
if (isNeg) { /* Medium negative number: 0x13-E, ~M */
dst.put(start, (byte) (NEG_MED_MAX - e));
} else { /* Medium positive number: 0x17+E, M */
dst.put(start, (byte) (POS_MED_MIN + e));
}
}
// encode M by peeling off centimal digits, encoding x as 2x+1
int startM = dst.getPosition();
encodeToCentimal(dst, abs);
// terminal digit should be 2x
a[offset + dst.getPosition() - 1] = (byte) (a[offset + dst.getPosition() - 1] & 0xfe);
if (isNeg) {
// negative values encoded as ~M
DESCENDING.apply(a, offset + startM, dst.getPosition() - startM);
}
return dst.getPosition() - start;
} | 3.68 |
framework_VTooltip_onFocus | /**
* Displays Tooltip when page is navigated with the keyboard.
*
* Tooltip is not visible. This makes it possible for assistive devices
* to recognize the tooltip.
*/
@Override
public void onFocus(FocusEvent fe) {
handleShowHide(fe, true);
} | 3.68 |
flink_ListKeyGroupedIterator_getValues | /**
* Returns an iterator over all values that belong to the current key. The iterator is initially
* <code>null</code> (before the first call to {@link #nextKey()} and after all keys are
* consumed. In general, this method returns always a non-null value, if a previous call to
* {@link #nextKey()} return <code>true</code>.
*
* @return Iterator over all values that belong to the current key.
*/
public ValuesIterator getValues() {
return this.valuesIterator;
} | 3.68 |
framework_PushConfiguration_getFallbackTransport | /*
* (non-Javadoc)
*
* @see com.vaadin.ui.PushConfiguration#getFallbackTransport()
*/
@Override
public Transport getFallbackTransport() {
try {
return Transport.valueOf(getParameter(
PushConfigurationState.FALLBACK_TRANSPORT_PARAM));
} catch (IllegalArgumentException e) {
return null;
}
} | 3.68 |
hadoop_AbstractDTService_requireServiceState | /**
* Require that the service is in a given state.
* @param state desired state.
* @throws IllegalStateException if the condition is not met
*/
protected void requireServiceState(final STATE state)
throws IllegalStateException {
Preconditions.checkState(isInState(state),
"Required State: %s; Actual State %s", state, getServiceState());
} | 3.68 |
hbase_UserScanQueryMatcher_mergeFilterResponse | /**
* Call this when scan has filter. Decide the desired behavior by checkVersions's MatchCode and
* filterCell's ReturnCode. Cell may be skipped by filter, so the column versions in result may be
* less than user need. It need to check versions again when filter and columnTracker both include
* the cell. <br/>
*
* <pre>
* ColumnChecker FilterResponse Desired behavior
* INCLUDE SKIP SKIP
* INCLUDE NEXT_COL SEEK_NEXT_COL or SEEK_NEXT_ROW
* INCLUDE NEXT_ROW SEEK_NEXT_ROW
* INCLUDE SEEK_NEXT_USING_HINT SEEK_NEXT_USING_HINT
* INCLUDE INCLUDE INCLUDE
* INCLUDE INCLUDE_AND_NEXT_COL INCLUDE_AND_SEEK_NEXT_COL
* INCLUDE INCLUDE_AND_SEEK_NEXT_ROW INCLUDE_AND_SEEK_NEXT_ROW
* INCLUDE_AND_SEEK_NEXT_COL SKIP SEEK_NEXT_COL
* INCLUDE_AND_SEEK_NEXT_COL NEXT_COL SEEK_NEXT_COL or SEEK_NEXT_ROW
* INCLUDE_AND_SEEK_NEXT_COL NEXT_ROW SEEK_NEXT_ROW
* INCLUDE_AND_SEEK_NEXT_COL SEEK_NEXT_USING_HINT SEEK_NEXT_USING_HINT
* INCLUDE_AND_SEEK_NEXT_COL INCLUDE INCLUDE_AND_SEEK_NEXT_COL
* INCLUDE_AND_SEEK_NEXT_COL INCLUDE_AND_NEXT_COL INCLUDE_AND_SEEK_NEXT_COL
* INCLUDE_AND_SEEK_NEXT_COL INCLUDE_AND_SEEK_NEXT_ROW INCLUDE_AND_SEEK_NEXT_ROW
* INCLUDE_AND_SEEK_NEXT_ROW SKIP SEEK_NEXT_ROW
* INCLUDE_AND_SEEK_NEXT_ROW NEXT_COL SEEK_NEXT_ROW
* INCLUDE_AND_SEEK_NEXT_ROW NEXT_ROW SEEK_NEXT_ROW
* INCLUDE_AND_SEEK_NEXT_ROW SEEK_NEXT_USING_HINT SEEK_NEXT_USING_HINT
* INCLUDE_AND_SEEK_NEXT_ROW INCLUDE INCLUDE_AND_SEEK_NEXT_ROW
* INCLUDE_AND_SEEK_NEXT_ROW INCLUDE_AND_NEXT_COL INCLUDE_AND_SEEK_NEXT_ROW
* INCLUDE_AND_SEEK_NEXT_ROW INCLUDE_AND_SEEK_NEXT_ROW INCLUDE_AND_SEEK_NEXT_ROW
* </pre>
*/
private final MatchCode mergeFilterResponse(Cell cell, MatchCode matchCode,
ReturnCode filterResponse) {
switch (filterResponse) {
case SKIP:
if (matchCode == MatchCode.INCLUDE) {
return MatchCode.SKIP;
} else if (matchCode == MatchCode.INCLUDE_AND_SEEK_NEXT_COL) {
return MatchCode.SEEK_NEXT_COL;
} else if (matchCode == MatchCode.INCLUDE_AND_SEEK_NEXT_ROW) {
return MatchCode.SEEK_NEXT_ROW;
}
break;
case NEXT_COL:
if (matchCode == MatchCode.INCLUDE || matchCode == MatchCode.INCLUDE_AND_SEEK_NEXT_COL) {
return columns.getNextRowOrNextColumn(cell);
} else if (matchCode == MatchCode.INCLUDE_AND_SEEK_NEXT_ROW) {
return MatchCode.SEEK_NEXT_ROW;
}
break;
case NEXT_ROW:
return MatchCode.SEEK_NEXT_ROW;
case SEEK_NEXT_USING_HINT:
return MatchCode.SEEK_NEXT_USING_HINT;
case INCLUDE:
break;
case INCLUDE_AND_NEXT_COL:
if (matchCode == MatchCode.INCLUDE) {
matchCode = MatchCode.INCLUDE_AND_SEEK_NEXT_COL;
}
break;
case INCLUDE_AND_SEEK_NEXT_ROW:
matchCode = MatchCode.INCLUDE_AND_SEEK_NEXT_ROW;
break;
default:
throw new RuntimeException("UNEXPECTED");
}
// It means it is INCLUDE, INCLUDE_AND_SEEK_NEXT_COL or INCLUDE_AND_SEEK_NEXT_ROW.
assert matchCode == MatchCode.INCLUDE || matchCode == MatchCode.INCLUDE_AND_SEEK_NEXT_COL
|| matchCode == MatchCode.INCLUDE_AND_SEEK_NEXT_ROW;
// We need to make sure that the number of cells returned will not exceed max version in scan
// when the match code is INCLUDE* case.
if (curColCell == null || !CellUtil.matchingRowColumn(cell, curColCell)) {
count = 0;
curColCell = cell;
}
count += 1;
if (count > versionsAfterFilter) {
// when the number of cells exceed max version in scan, we should return SEEK_NEXT_COL match
// code, but if current code is INCLUDE_AND_SEEK_NEXT_ROW, we can optimize to choose the max
// step between SEEK_NEXT_COL and INCLUDE_AND_SEEK_NEXT_ROW, which is SEEK_NEXT_ROW.
if (matchCode == MatchCode.INCLUDE_AND_SEEK_NEXT_ROW) {
matchCode = MatchCode.SEEK_NEXT_ROW;
} else {
matchCode = MatchCode.SEEK_NEXT_COL;
}
}
if (matchCode == MatchCode.INCLUDE_AND_SEEK_NEXT_COL || matchCode == MatchCode.SEEK_NEXT_COL) {
// Update column tracker to next column, As we use the column hint from the tracker to seek
// to next cell (HBASE-19749)
columns.doneWithColumn(cell);
}
return matchCode;
} | 3.68 |
rocketmq-connect_RetryUtil_executeWithRetry | /**
* execute retry with exception
*
* @param callable
* @param retryTimes
* @param sleepTimeInMilliSecond
* @param exponential
* @param retryExceptionClasss
* @param <T>
* @return
* @throws Exception
*/
public static <T> T executeWithRetry(Callable<T> callable,
int retryTimes,
long sleepTimeInMilliSecond,
boolean exponential,
List<Class<?>> retryExceptionClasss) throws Exception {
Retry retry = new Retry();
return retry.doRetry(callable, retryTimes, sleepTimeInMilliSecond, exponential, retryExceptionClasss);
} | 3.68 |
flink_FileInputFormat_configure | /**
* Configures the file input format by reading the file path from the configuration.
*
* @see
* org.apache.flink.api.common.io.InputFormat#configure(org.apache.flink.configuration.Configuration)
*/
@Override
public void configure(Configuration parameters) {
if (getFilePaths().length == 0) {
// file path was not specified yet. Try to set it from the parameters.
String filePath = parameters.getString(FILE_PARAMETER_KEY, null);
if (filePath == null) {
throw new IllegalArgumentException(
"File path was not specified in input format or configuration.");
} else {
setFilePath(filePath);
}
}
if (!this.enumerateNestedFiles) {
this.enumerateNestedFiles = parameters.getBoolean(ENUMERATE_NESTED_FILES_FLAG, false);
}
} | 3.68 |
hadoop_SnappyCodec_createOutputStream | /**
* Create a {@link CompressionOutputStream} that will write to the given
* {@link OutputStream} with the given {@link Compressor}.
*
* @param out the location for the final output stream
* @param compressor compressor to use
* @return a stream the user can write uncompressed data to have it compressed
* @throws IOException raised on errors performing I/O.
*/
@Override
public CompressionOutputStream createOutputStream(OutputStream out,
Compressor compressor)
throws IOException {
int bufferSize = conf.getInt(
CommonConfigurationKeys.IO_COMPRESSION_CODEC_SNAPPY_BUFFERSIZE_KEY,
CommonConfigurationKeys.IO_COMPRESSION_CODEC_SNAPPY_BUFFERSIZE_DEFAULT);
int compressionOverhead = (bufferSize / 6) + 32;
return new BlockCompressorStream(out, compressor, bufferSize,
compressionOverhead);
} | 3.68 |
hbase_BufferedMutatorParams_setWriteBufferPeriodicFlushTimeoutMs | /**
* Set the max timeout before the buffer is automatically flushed.
*/
public BufferedMutatorParams setWriteBufferPeriodicFlushTimeoutMs(long timeoutMs) {
this.writeBufferPeriodicFlushTimeoutMs = timeoutMs;
return this;
} | 3.68 |
hudi_HoodieAvroUtils_getNullableValAsString | /**
* Returns the string value of the given record {@code rec} and field {@code fieldName}.
* The field and value both could be missing.
*
* @param rec The record
* @param fieldName The field name
* @return the string form of the field
* or empty if the schema does not contain the field name or the value is null
*/
public static Option<String> getNullableValAsString(GenericRecord rec, String fieldName) {
Schema.Field field = rec.getSchema().getField(fieldName);
String fieldVal = field == null ? null : StringUtils.objToString(rec.get(field.pos()));
return Option.ofNullable(fieldVal);
} | 3.68 |
querydsl_SQLExpressions_nextval | /**
* Create a nextval(sequence) expression of the given type
*
* <p>Returns the next value from the given sequence</p>
*
* @param type type of call
* @param sequence sequence name
* @return nextval(sequence)
*/
public static <T extends Number> SimpleExpression<T> nextval(Class<T> type, String sequence) {
return Expressions.operation(type, SQLOps.NEXTVAL, ConstantImpl.create(sequence));
} | 3.68 |
rocketmq-connect_ConnectMetrics_templates | /**
* get connect metrics template
*
* @return
*/
public ConnectMetricsTemplates templates() {
return templates;
} | 3.68 |
framework_DefaultConnectionStateHandler_giveUp | /**
* Called when we should give up trying to reconnect and let the user decide
* how to continue.
*
*/
protected void giveUp() {
reconnectionCause = null;
endRequest();
stopDialogTimer();
if (!isDialogVisible()) {
// It SHOULD always be visible at this point, unless you have a
// really strange configuration (grace time longer than total
// reconnect time)
showDialog();
}
reconnectDialog.setText(getDialogTextGaveUp(reconnectAttempt));
reconnectDialog.setReconnecting(false);
// Stopping the application stops heartbeats and push
connection.setApplicationRunning(false);
} | 3.68 |
hadoop_S3AReadOpContext_getAuditSpan | /**
* Get the audit which was active when the file was opened.
* @return active span
*/
public AuditSpan getAuditSpan() {
return auditSpan;
} | 3.68 |
morf_GraphBasedUpgradeNode_getUpgradeStatements | /**
* @return ordered list of statements to be executed by this upgrade node
*/
public List<String> getUpgradeStatements() {
return upgradeStatements;
} | 3.68 |
querydsl_Expressions_enumTemplate | /**
* Create a new Template expression
*
* @param cl type of expression
* @param template template
* @param args template parameters
* @return template expression
*/
public static <T extends Enum<T>> EnumTemplate<T> enumTemplate(Class<? extends T> cl, Template template, List<?> args) {
return new EnumTemplate<T>(cl, template, args);
} | 3.68 |
hbase_AccessChecker_hasUserPermission | /**
* Authorizes that if the current user has the given permissions.
* @param user Active user to which authorization checks should be applied
* @param request Request type
* @param permission Actions being requested
* @return True if the user has the specific permission
*/
public boolean hasUserPermission(User user, String request, Permission permission) {
if (permission instanceof TablePermission) {
TablePermission tPerm = (TablePermission) permission;
for (Permission.Action action : permission.getActions()) {
AuthResult authResult = permissionGranted(request, user, action, tPerm.getTableName(),
tPerm.getFamily(), tPerm.getQualifier());
AccessChecker.logResult(authResult);
if (!authResult.isAllowed()) {
return false;
}
}
} else if (permission instanceof NamespacePermission) {
NamespacePermission nsPerm = (NamespacePermission) permission;
AuthResult authResult;
for (Action action : nsPerm.getActions()) {
if (getAuthManager().authorizeUserNamespace(user, nsPerm.getNamespace(), action)) {
authResult =
AuthResult.allow(request, "Namespace action allowed", user, action, null, null);
} else {
authResult =
AuthResult.deny(request, "Namespace action denied", user, action, null, null);
}
AccessChecker.logResult(authResult);
if (!authResult.isAllowed()) {
return false;
}
}
} else {
AuthResult authResult;
for (Permission.Action action : permission.getActions()) {
if (getAuthManager().authorizeUserGlobal(user, action)) {
authResult = AuthResult.allow(request, "Global action allowed", user, action, null, null);
} else {
authResult = AuthResult.deny(request, "Global action denied", user, action, null, null);
}
AccessChecker.logResult(authResult);
if (!authResult.isAllowed()) {
return false;
}
}
}
return true;
} | 3.68 |
dubbo_DubboShutdownHook_unregister | /**
* Unregister the ShutdownHook
*/
public void unregister() {
if (!ignoreListenShutdownHook && registered.compareAndSet(true, false)) {
if (this.isAlive()) {
// DubboShutdownHook thread is running
return;
}
try {
Runtime.getRuntime().removeShutdownHook(this);
} catch (IllegalStateException e) {
logger.warn(
CONFIG_FAILED_SHUTDOWN_HOOK, "", "", "unregister shutdown hook failed: " + e.getMessage(), e);
} catch (Exception e) {
logger.warn(
CONFIG_FAILED_SHUTDOWN_HOOK, "", "", "unregister shutdown hook failed: " + e.getMessage(), e);
}
}
} | 3.68 |
flink_SegmentsUtil_setShort | /**
* set short from segments.
*
* @param segments target segments.
* @param offset value offset.
*/
public static void setShort(MemorySegment[] segments, int offset, short value) {
if (inFirstSegment(segments, offset, 2)) {
segments[0].putShort(offset, value);
} else {
setShortMultiSegments(segments, offset, value);
}
} | 3.68 |
hbase_RateLimiter_consume | /**
* consume amount available units, amount could be a negative number
* @param amount the number of units to consume
*/
public synchronized void consume(final long amount) {
if (isBypass()) {
return;
}
if (amount >= 0) {
this.avail -= amount;
} else {
if (this.avail <= Long.MAX_VALUE + amount) {
this.avail -= amount;
this.avail = Math.min(this.avail, this.limit);
} else {
this.avail = this.limit;
}
}
} | 3.68 |
flink_ExecNodeConfig_isCompiled | /** @return Whether the {@link ExecNode} translation happens as part of a plan compilation. */
public boolean isCompiled() {
return isCompiled;
} | 3.68 |
framework_Tree_getMultiselectMode | /**
* Returns the mode the multiselect is in. The mode controls how
* multiselection can be done.
*
* @return The mode
*/
public MultiSelectMode getMultiselectMode() {
return multiSelectMode;
} | 3.68 |
zxing_BarcodeRow_getScaledRow | /**
* This function scales the row
*
* @param scale How much you want the image to be scaled, must be greater than or equal to 1.
* @return the scaled row
*/
byte[] getScaledRow(int scale) {
byte[] output = new byte[row.length * scale];
for (int i = 0; i < output.length; i++) {
output[i] = row[i / scale];
}
return output;
} | 3.68 |
flink_HiveParserDMLHelper_isTypeConversionNeeded | // to check whether it's needed to do type conversion
private static boolean isTypeConversionNeeded(
RelNode queryRelNode, List<RelDataType> targetCalcTypes) {
List<RelDataTypeField> fields = queryRelNode.getRowType().getFieldList();
Preconditions.checkState(fields.size() == targetCalcTypes.size());
for (int i = 0; i < fields.size(); i++) {
if (fields.get(i).getType().getSqlTypeName()
!= targetCalcTypes.get(i).getSqlTypeName()) {
return true;
}
}
return false;
} | 3.68 |
querydsl_QueryResults_getOffset | /**
* Get the offset value used for the query
*
* @return applied offset
*/
public long getOffset() {
return offset;
} | 3.68 |
flink_StopWithSavepointTerminationHandlerImpl_terminateExceptionallyWithGlobalFailover | /**
* Handles the termination of the {@code StopWithSavepointTerminationHandler} exceptionally
* after triggering a global job fail-over.
*
* @param unfinishedExecutionStates the unfinished states that caused the failure.
* @param savepointPath the path to the successfully created savepoint.
*/
private void terminateExceptionallyWithGlobalFailover(
Iterable<ExecutionState> unfinishedExecutionStates, String savepointPath) {
StopWithSavepointStoppingException inconsistentFinalStateException =
new StopWithSavepointStoppingException(savepointPath, jobId);
log.warn(
"Inconsistent execution state after stopping with savepoint. At least one"
+ " execution is still in one of the following states: {}.",
StringUtils.join(unfinishedExecutionStates, ", "),
inconsistentFinalStateException);
scheduler.handleGlobalFailure(inconsistentFinalStateException);
result.completeExceptionally(inconsistentFinalStateException);
} | 3.68 |
flink_ZooKeeperUtils_createZooKeeperStateHandleStore | /**
* Creates an instance of {@link ZooKeeperStateHandleStore}.
*
* @param client ZK client
* @param path Path to use for the client namespace
* @param stateStorage RetrievableStateStorageHelper that persist the actual state and whose
* returned state handle is then written to ZooKeeper
* @param <T> Type of state
* @return {@link ZooKeeperStateHandleStore} instance
* @throws Exception ZK errors
*/
public static <T extends Serializable>
ZooKeeperStateHandleStore<T> createZooKeeperStateHandleStore(
final CuratorFramework client,
final String path,
final RetrievableStateStorageHelper<T> stateStorage)
throws Exception {
return new ZooKeeperStateHandleStore<>(
useNamespaceAndEnsurePath(client, path), stateStorage);
} | 3.68 |
framework_XhrConnection_setConnection | /**
* Sets the application connection this instance is connected to. Called
* internally by the framework.
*
* @param connection
* the application connection this instance is connected to
*/
public void setConnection(ApplicationConnection connection) {
this.connection = connection;
connection.addHandler(ResponseHandlingEndedEvent.TYPE,
new CommunicationHandler() {
@Override
public void onRequestStarting(RequestStartingEvent e) {
}
@Override
public void onResponseHandlingStarted(
ResponseHandlingStartedEvent e) {
}
@Override
public void onResponseHandlingEnded(
ResponseHandlingEndedEvent e) {
webkitMaybeIgnoringRequests = false;
}
});
} | 3.68 |
hadoop_FSBuilder_mustLong | /**
* Set mandatory long parameter for the Builder.
*
* @param key key.
* @param value value.
* @return generic type B.
* @see #opt(String, String)
*/
default B mustLong(@Nonnull String key, long value) {
return must(key, Long.toString(value));
} | 3.68 |
framework_CustomLayoutDemo_setBody | /**
* Set body panel caption, remove all existing components and add given
* custom layout in it.
*
*/
public void setBody(String customLayout) {
VerticalLayout bodyLayout = new VerticalLayout();
bodyLayout.setMargin(true);
bodyLayout.addComponent(new CustomLayout(customLayout));
bodyPanel.setContent(bodyLayout);
bodyPanel.setCaption(customLayout + ".html");
} | 3.68 |
dubbo_NacosRegistry_getLegacySubscribedServiceName | /**
* Get the legacy subscribed service name for compatible with Dubbo 2.7.3 and below
*
* @param url {@link URL}
* @return non-null
* @since 2.7.6
*/
private String getLegacySubscribedServiceName(URL url) {
StringBuilder serviceNameBuilder = new StringBuilder(DEFAULT_CATEGORY);
appendIfPresent(serviceNameBuilder, url, INTERFACE_KEY);
appendIfPresent(serviceNameBuilder, url, VERSION_KEY);
appendIfPresent(serviceNameBuilder, url, GROUP_KEY);
return serviceNameBuilder.toString();
} | 3.68 |
hadoop_AbortTaskStage_executeStage | /**
* Delete the task attempt directory.
* @param suppressExceptions should exceptions be ignored?
* @return the directory
* @throws IOException failure when exceptions were not suppressed
*/
@Override
protected Path executeStage(final Boolean suppressExceptions)
throws IOException {
final Path dir = getTaskAttemptDir();
if (dir != null) {
LOG.info("{}: Deleting task attempt directory {}", getName(), dir);
deleteDir(dir, suppressExceptions);
}
return dir;
} | 3.68 |
graphhopper_MinHeapWithUpdate_peekId | /**
* @return the id of the next element to be polled, i.e. the same as calling poll() without removing the element
*/
public int peekId() {
return tree[1];
} | 3.68 |
hadoop_AbfsRestOperation_getLastTracingContext | /**
* Returns the tracing contest used for last rest operation made.
* @return tracingContext lasUserTracingContext.
*/
@VisibleForTesting
public final TracingContext getLastTracingContext() {
return lastUsedTracingContext;
} | 3.68 |
AreaShop_UnrentCommand_canUse | /**
* Check if a person can unrent the region.
* @param person The person to check
* @param region The region to check for
* @return true if the person can unrent it, otherwise false
*/
public static boolean canUse(CommandSender person, GeneralRegion region) {
if(person.hasPermission("areashop.unrent")) {
return true;
}
if(person instanceof Player) {
Player player = (Player)person;
return region.isOwner(player) && person.hasPermission("areashop.unrentown");
}
return false;
} | 3.68 |
hbase_InputStreamBlockDistribution_getHDFSBlockDistribution | /**
* Get the HDFSBlocksDistribution derived from the StoreFile input stream, re-computing if cache
* is expired.
*/
public synchronized HDFSBlocksDistribution getHDFSBlockDistribution() {
if (EnvironmentEdgeManager.currentTime() - lastCachedAt > cachePeriodMs) {
try {
LOG.debug("Refreshing HDFSBlockDistribution for {}", fileInfo);
computeBlockDistribution();
} catch (IOException e) {
LOG.warn("Failed to recompute block distribution for {}. Falling back on cached value.",
fileInfo, e);
}
}
return hdfsBlocksDistribution;
} | 3.68 |
hadoop_LoggedJob_getQueue | /**
* @return job queue name if it is available in job history file or
* job history conf file. Returns null otherwise.
*/
public QueueName getQueue() {
return queue;
} | 3.68 |
hbase_TableRecordReaderImpl_setRowFilter | /**
* @param rowFilter the {@link Filter} to be used.
*/
public void setRowFilter(Filter rowFilter) {
this.trrRowFilter = rowFilter;
} | 3.68 |
hbase_Constraints_changeConstraintEnabled | /**
* Change the whether the constraint (if it is already present) is enabled or disabled.
*/
private static TableDescriptorBuilder changeConstraintEnabled(TableDescriptorBuilder builder,
Class<? extends Constraint> clazz, boolean enabled) throws IOException {
// get the original constraint
Pair<String, String> entry = getKeyValueForClass(builder, clazz);
if (entry == null) {
throw new IllegalArgumentException("Constraint: " + clazz.getName()
+ " is not associated with this table. You can't enable it!");
}
// create a new configuration from that conf
Configuration conf = readConfiguration(entry.getSecond());
// set that it is enabled
conf.setBoolean(ENABLED_KEY, enabled);
// write it back out
return writeConstraint(builder, entry.getFirst(), conf);
} | 3.68 |
zilla_ManyToOneRingBuffer_buffer | /**
* {@inheritDoc}
*/
public AtomicBuffer buffer()
{
return buffer;
} | 3.68 |
flink_TypeExtractor_getClosestFactory | /**
* Traverses the type hierarchy up until a type information factory can be found.
*
* @param typeHierarchy hierarchy to be filled while traversing up
* @param t type for which a factory needs to be found
* @return closest type information factory or null if there is no factory in the type hierarchy
*/
private static <OUT> TypeInfoFactory<? super OUT> getClosestFactory(
List<Type> typeHierarchy, Type t) {
TypeInfoFactory<OUT> factory = null;
while (factory == null && isClassType(t) && !(typeToClass(t).equals(Object.class))) {
typeHierarchy.add(t);
factory = getTypeInfoFactory(t);
t = typeToClass(t).getGenericSuperclass();
if (t == null) {
break;
}
}
return factory;
} | 3.68 |
flink_OperatingSystem_isMac | /**
* Checks whether the operating system this JVM runs on is Windows.
*
* @return <code>true</code> if the operating system this JVM runs on is Windows, <code>false
* </code> otherwise
*/
public static boolean isMac() {
return getCurrentOperatingSystem() == MAC_OS;
} | 3.68 |
open-banking-gateway_PaymentAccessFactory_paymentForFintech | /**
* Create {@code PaymentAccess} object that is similar to consent facing to FinTech.
* @param fintech Fintech that initiates the payment
* @param session Session that identifies the payment.
* @param fintechPassword FinTech Datasafe/KeyStore password
* @return Payment context
*/
public PaymentAccess paymentForFintech(Fintech fintech, ServiceSession session, Supplier<char[]> fintechPassword) {
return new FintechPaymentAccess(fintech, psuEncryption, fintechPsuAspspPrvKeyRepository, fintechVault, paymentRepository, entityManager, session.getId(), fintechPassword);
} | 3.68 |
hudi_HoodieFlinkCopyOnWriteTable_scheduleCleaning | /**
* @param context HoodieEngineContext
* @param instantTime Instant Time for scheduling cleaning
* @param extraMetadata additional metadata to write into plan
* @return
*/
@Override
public Option<HoodieCleanerPlan> scheduleCleaning(HoodieEngineContext context, String instantTime, Option<Map<String, String>> extraMetadata) {
return new CleanPlanActionExecutor(context, config, this, instantTime, extraMetadata).execute();
} | 3.68 |
AreaShop_CommandManager_getCommands | /**
* Get the list with AreaShop commands.
* @return The list with AreaShop commands
*/
public List<CommandAreaShop> getCommands() {
return commands;
} | 3.68 |
hbase_CatalogFamilyFormat_getServerName | /**
* Returns a {@link ServerName} from catalog table {@link Result}.
* @param r Result to pull from
* @return A ServerName instance or null if necessary fields not found or empty.
*/
@Nullable
public static ServerName getServerName(Result r, int replicaId) {
byte[] serverColumn = getServerColumn(replicaId);
Cell cell = r.getColumnLatestCell(HConstants.CATALOG_FAMILY, serverColumn);
if (cell == null || cell.getValueLength() == 0) {
return null;
}
String hostAndPort =
Bytes.toString(cell.getValueArray(), cell.getValueOffset(), cell.getValueLength());
byte[] startcodeColumn = getStartCodeColumn(replicaId);
cell = r.getColumnLatestCell(HConstants.CATALOG_FAMILY, startcodeColumn);
if (cell == null || cell.getValueLength() == 0) {
return null;
}
try {
return ServerName.valueOf(hostAndPort,
Bytes.toLong(cell.getValueArray(), cell.getValueOffset(), cell.getValueLength()));
} catch (IllegalArgumentException e) {
LOG.error("Ignoring invalid region for server " + hostAndPort + "; cell=" + cell, e);
return null;
}
} | 3.68 |
hadoop_BalanceJournalInfoHDFS_saveJob | /**
* Save job journal to HDFS.
*
* All the journals are saved in the path base-dir. Each job has an individual
* directory named after the job id.
* When a job is saved, a new journal file is created. The file's name
* consists of a prefix 'JOB-' and an incremental sequential id. The file with
* the largest id is the latest journal of this job.
*
* Layout:
* base-dir/
* /job-3f1da5e5-2a60-48de-8736-418d134edbe9/
* /JOB-0
* /JOB-3
* /JOB-5
* /job-ebc19478-2324-46c2-8d1a-2f8c4391dc09/
* /JOB-1
* /JOB-2
* /JOB-4
*/
public void saveJob(BalanceJob job) throws IOException {
Path jobFile = getNewStateJobPath(job);
Path tmpJobFile = new Path(jobFile + TMP_TAIL);
FSDataOutputStream out = null;
try {
FileSystem fs = FileSystem.get(workUri, conf);
out = fs.create(tmpJobFile);
job.write(new DataOutputStream(out));
out.close();
out = null;
fs.rename(tmpJobFile, jobFile);
} finally {
IOUtils.closeStream(out);
}
LOG.debug("Save journal of job={}", job);
} | 3.68 |
hbase_HttpServer_hostName | /**
* Set the hostname of the http server. The host name is used to resolve the _HOST field in
* Kerberos principals. The hostname of the first listener will be used if the name is
* unspecified.
*/
public Builder hostName(String hostName) {
this.hostName = hostName;
return this;
} | 3.68 |
hadoop_EntityRowKey_getRowKey | /**
* Constructs a row key for the entity table as follows:
* {@code userName!clusterId!flowName!flowRunId!AppId!entityType!entityId}.
* Typically used while querying a specific entity.
*
* @return byte array with the row key.
*/
public byte[] getRowKey() {
return entityRowKeyConverter.encode(this);
} | 3.68 |
morf_JdbcUrlElements_hashCode | /**
* @see java.lang.Object#hashCode()
*/
@Override
public int hashCode() {
final int prime = 31;
int result = 1;
result = prime * result + (databaseType == null ? 0 : databaseType.hashCode());
result = prime * result + (databaseName == null ? 0 : databaseName.hashCode());
result = prime * result + (hostName == null ? 0 : hostName.hashCode());
result = prime * result + (instanceName == null ? 0 : instanceName.hashCode());
result = prime * result + port;
result = prime * result + (schemaName == null ? 0 : schemaName.hashCode());
return result;
} | 3.68 |
hudi_BaseHoodieTableServiceClient_scheduleLogCompaction | /**
* Schedules a new log compaction instant.
*
* @param extraMetadata Extra Metadata to be stored
*/
public Option<String> scheduleLogCompaction(Option<Map<String, String>> extraMetadata) throws HoodieIOException {
String instantTime = createNewInstantTime();
return scheduleLogCompactionAtInstant(instantTime, extraMetadata) ? Option.of(instantTime) : Option.empty();
} | 3.68 |
hadoop_DatanodeAdminProperties_getAdminState | /**
* Get the admin state of the datanode.
* @return the admin state of the datanode.
*/
public AdminStates getAdminState() {
return adminState;
} | 3.68 |
framework_CaptionLeak_getTicketNumber | /*
* (non-Javadoc)
*
* @see com.vaadin.tests.components.AbstractTestUI#getTicketNumber()
*/
@Override
protected Integer getTicketNumber() {
return null;
} | 3.68 |
flink_SegmentsUtil_copyToBytes | /**
* Copy segments to target byte[].
*
* @param segments Source segments.
* @param offset Source segments offset.
* @param bytes target byte[].
* @param bytesOffset target byte[] offset.
* @param numBytes the number bytes to copy.
*/
public static byte[] copyToBytes(
MemorySegment[] segments, int offset, byte[] bytes, int bytesOffset, int numBytes) {
if (inFirstSegment(segments, offset, numBytes)) {
segments[0].get(offset, bytes, bytesOffset, numBytes);
} else {
copyMultiSegmentsToBytes(segments, offset, bytes, bytesOffset, numBytes);
}
return bytes;
} | 3.68 |
framework_Navigator_getState | /**
* Returns the current navigation state reported by this Navigator's
* {@link NavigationStateManager}.
* <p>
* When the navigation is triggered by the browser (for example by pressing
* the back or forward button in the browser), the navigation state may
* already have been updated to reflect the new address, before the
* {@link #navigateTo(String)} is notified.
*
* @return The navigation state.
*/
public String getState() {
return getStateManager().getState();
} | 3.68 |
hbase_MobFileCache_printStatistics | /**
* Prints the statistics.
*/
public void printStatistics() {
long access = count.get() - lastAccess;
long missed = miss.sum() - lastMiss;
long evicted = evictedFileCount.sum() - lastEvictedFileCount;
int hitRatio = access == 0 ? 0 : (int) (((float) (access - missed)) / (float) access * 100);
LOG.info("MobFileCache Statistics, access: " + access + ", miss: " + missed + ", hit: "
+ (access - missed) + ", hit ratio: " + hitRatio + "%, evicted files: " + evicted);
lastAccess += access;
lastMiss += missed;
lastEvictedFileCount += evicted;
} | 3.68 |
flink_DataStructureConverter_toExternalOrNull | /**
* Converts to external data structure or {@code null}.
*
* <p>The nullability could be derived from the data type. However, this method reduces null
* checks.
*/
default E toExternalOrNull(I internal) {
if (internal == null) {
return null;
}
return toExternal(internal);
} | 3.68 |
morf_InsertStatementDefaulter_defaultMissingFields | /**
* Inserts default values for missing fields into the insert statement.
*
* @param statement the {@link InsertStatement} to add defaults for.
* @return an insert statement with appropriate defaults added.
*/
public InsertStatement defaultMissingFields(InsertStatement statement) {
// Don't fiddle with parameterised statements
if (statement.isParameterisedInsert()) {
return statement;
}
Set<String> columnsWithValues = getColumnsWithValues(statement);
return addColumnDefaults(statement, columnsWithValues);
} | 3.68 |
flink_SplitAssignmentTracker_getAndRemoveUncheckpointedAssignment | /**
* This method is invoked when a source reader fails over. In this case, the source reader will
* restore its split assignment to the last successful checkpoint. Any split assignment to that
* source reader after the last successful checkpoint will be lost on the source reader side as
* if those splits were never assigned. To handle this case, the coordinator needs to find those
* splits and return them back to the SplitEnumerator for re-assignment.
*
* @param subtaskId the subtask id of the reader that failed over.
* @param restoredCheckpointId the ID of the checkpoint that the reader was restored to.
* @return A list of splits that needs to be added back to the {@link SplitEnumerator}.
*/
public List<SplitT> getAndRemoveUncheckpointedAssignment(
int subtaskId, long restoredCheckpointId) {
final ArrayList<SplitT> splits = new ArrayList<>();
for (final Map.Entry<Long, Map<Integer, LinkedHashSet<SplitT>>> entry :
assignmentsByCheckpointId.entrySet()) {
if (entry.getKey() > restoredCheckpointId) {
removeFromAssignment(subtaskId, entry.getValue(), splits);
}
}
removeFromAssignment(subtaskId, uncheckpointedAssignments, splits);
return splits;
} | 3.68 |
framework_CellReference_getRowIndex | /**
* Gets the row index of the row.
*
* @return the index of the row
*/
public int getRowIndex() {
return rowReference.getRowIndex();
} | 3.68 |
hbase_Bytes_toBytes | /** Convert a BigDecimal value to a byte array */
public static byte[] toBytes(BigDecimal val) {
byte[] valueBytes = val.unscaledValue().toByteArray();
byte[] result = new byte[valueBytes.length + SIZEOF_INT];
int offset = putInt(result, 0, val.scale());
putBytes(result, offset, valueBytes, 0, valueBytes.length);
return result;
} | 3.68 |
flink_Pattern_until | /**
* Applies a stop condition for a looping state. It allows cleaning the underlying state.
*
* @param untilCondition a condition an event has to satisfy to stop collecting events into
* looping state
* @return The same pattern with applied untilCondition
*/
public Pattern<T, F> until(IterativeCondition<F> untilCondition) {
Preconditions.checkNotNull(untilCondition, "The condition cannot be null");
if (this.untilCondition != null) {
throw new MalformedPatternException("Only one until condition can be applied.");
}
if (!quantifier.hasProperty(Quantifier.QuantifierProperty.LOOPING)) {
throw new MalformedPatternException(
"The until condition is only applicable to looping states.");
}
ClosureCleaner.clean(untilCondition, ExecutionConfig.ClosureCleanerLevel.RECURSIVE, true);
this.untilCondition = untilCondition;
return this;
} | 3.68 |
pulsar_SecurityUtility_createAutoRefreshSslContextForClient | /**
* Creates {@link SslContext} with capability to do auto-cert refresh.
* @param allowInsecureConnection
* @param trustCertsFilePath
* @param certFilePath
* @param keyFilePath
* @param sslContextAlgorithm
* @param refreshDurationSec
* @param executor
* @return
* @throws GeneralSecurityException
* @throws SSLException
* @throws FileNotFoundException
* @throws IOException
*/
public static SslContext createAutoRefreshSslContextForClient(SslProvider sslProvider,
boolean allowInsecureConnection,
String trustCertsFilePath, String certFilePath,
String keyFilePath, String sslContextAlgorithm,
int refreshDurationSec,
ScheduledExecutorService executor)
throws GeneralSecurityException, SSLException, FileNotFoundException, IOException {
KeyManagerProxy keyManager = new KeyManagerProxy(certFilePath, keyFilePath, refreshDurationSec, executor);
SslContextBuilder sslContexBuilder = SslContextBuilder.forClient().sslProvider(sslProvider);
sslContexBuilder.keyManager(keyManager);
if (allowInsecureConnection) {
sslContexBuilder.trustManager(InsecureTrustManagerFactory.INSTANCE);
} else {
if (StringUtils.isNotBlank(trustCertsFilePath)) {
TrustManagerProxy trustManager =
new TrustManagerProxy(trustCertsFilePath, refreshDurationSec, executor);
sslContexBuilder.trustManager(trustManager);
}
}
return sslContexBuilder.build();
} | 3.68 |
streampipes_EpProperties_listLongEp | /**
* Creates a new list-based event property of type long and with the assigned domain property.
*
* @param label A human-readable label of the property
* @param runtimeName The field identifier of the event property at runtime.
* @param domainProperty The semantics of the list property as a String. The string should correspond to a URI
* provided by a vocabulary. Use one of the vocabularies provided in
* {@link org.apache.streampipes.vocabulary} or create your own domain-specific vocabulary.
* @return {@link org.apache.streampipes.model.schema.EventPropertyPrimitive}
*/
public static EventPropertyList listLongEp(Label label, String runtimeName, String domainProperty) {
return listEp(label, runtimeName, Datatypes.Long, domainProperty);
} | 3.68 |
zxing_Result_getRawBytes | /**
* @return raw bytes encoded by the barcode, if applicable, otherwise {@code null}
*/
public byte[] getRawBytes() {
return rawBytes;
} | 3.68 |
framework_FileTypeResolver_addIcon | /**
* Adds a icon for the given mime-type. If the mime-type also has a
* corresponding icon, it is replaced with the new icon.
*
* @param mimeType
* the mime-type whose icon is to be changed.
* @param icon
* the new icon to be associated with <code>MIMEType</code>.
*/
public static void addIcon(String mimeType, Resource icon) {
MIME_TO_ICON_MAP.put(mimeType, icon);
} | 3.68 |
framework_AbstractInMemoryContainer_addFilter | /**
* Adds a container filter and re-filter the view.
*
* The filter must implement Filter and its sub-filters (if any) must also
* be in-memory filterable.
*
* This can be used to implement
* {@link Filterable#addContainerFilter(Container.Filter)} and optionally
* also
* {@link SimpleFilterable#addContainerFilter(Object, String, boolean, boolean)}
* (with {@link SimpleStringFilter}).
*
* Note that in some cases, incompatible filters cannot be detected when
* added and an {@link UnsupportedFilterException} may occur when performing
* filtering.
*
* @throws UnsupportedFilterException
* if the filter is detected as not supported by the container
*/
protected void addFilter(Filter filter) throws UnsupportedFilterException {
getFilters().add(filter);
filterAll();
} | 3.68 |
hudi_HoodieTableMetaClient_getFs | /**
* Get the FS implementation for this table.
*/
public HoodieWrapperFileSystem getFs() {
if (fs == null) {
FileSystem fileSystem = FSUtils.getFs(metaPath.get(), hadoopConf.newCopy());
if (fileSystemRetryConfig.isFileSystemActionRetryEnable()) {
fileSystem = new HoodieRetryWrapperFileSystem(fileSystem,
fileSystemRetryConfig.getMaxRetryIntervalMs(),
fileSystemRetryConfig.getMaxRetryNumbers(),
fileSystemRetryConfig.getInitialRetryIntervalMs(),
fileSystemRetryConfig.getRetryExceptions());
}
ValidationUtils.checkArgument(!(fileSystem instanceof HoodieWrapperFileSystem),
"File System not expected to be that of HoodieWrapperFileSystem");
fs = new HoodieWrapperFileSystem(fileSystem,
consistencyGuardConfig.isConsistencyCheckEnabled()
? new FailSafeConsistencyGuard(fileSystem, consistencyGuardConfig)
: new NoOpConsistencyGuard());
}
return fs;
} | 3.68 |
hbase_BaseEnvironment_getVersion | /** Returns the coprocessor environment version */
@Override
public int getVersion() {
return Coprocessor.VERSION;
} | 3.68 |
flink_OnMainThreadJobManagerRunnerRegistry_getWrappedDelegate | /**
* Returns the delegated {@link JobManagerRunnerRegistry}. This method can be used to workaround
* the main thread safeguard.
*/
@Override
public JobManagerRunnerRegistry getWrappedDelegate() {
return this.delegate;
} | 3.68 |
hbase_MasterCoprocessorHost_preMergeRegionsCommit | /**
* Invoked before merge regions operation writes the new region to hbase:meta
* @param regionsToMerge the regions to merge
* @param metaEntries the meta entry
* @param user the user
*/
public void preMergeRegionsCommit(final RegionInfo[] regionsToMerge,
final @MetaMutationAnnotation List<Mutation> metaEntries, final User user) throws IOException {
execOperation(coprocEnvironments.isEmpty() ? null : new MasterObserverOperation(user) {
@Override
public void call(MasterObserver observer) throws IOException {
observer.preMergeRegionsCommitAction(this, regionsToMerge, metaEntries);
}
});
} | 3.68 |
framework_BeanItemContainer_addBean | /**
* Adds the bean to the Container.
*
* The bean is used both as the item contents and as the item identifier.
*
* @see com.vaadin.v7.data.Container#addItem(Object) Container#addItem(Object)
*/
@Override
public BeanItem<BEANTYPE> addBean(BEANTYPE bean) {
return addItem(bean);
} | 3.68 |
framework_AbstractComponentConnector_setWidgetStyleNameWithPrefix | /**
* This is used to add / remove state related prefixed style names from the
* widget.
* <p>
* Override this method if the prefixed style name given here should be
* updated in another widget in addition to the one returned by the
* <code>Connector</code>'s {@link #getWidget()}, or if the prefix should be
* different. For example see
* {@link com.vaadin.client.ui.datefield.TextualDateConnector#setWidgetStyleNameWithPrefix(String, String, boolean)}
* </p>
*
* @param prefix
* the prefix for the style name
* @param styleName
* the style name to be added or removed
* @param add
* <code>true</code> to add the given style, <code>false</code>
* to remove it
* @deprecated This will be removed once styles are no longer added with
* prefixes.
*/
@Deprecated
protected void setWidgetStyleNameWithPrefix(String prefix, String styleName,
boolean add) {
if (!styleName.startsWith("-")) {
if (!prefix.endsWith("-")) {
prefix += "-";
}
} else {
if (prefix.endsWith("-")) {
styleName.replaceFirst("-", "");
}
}
getWidget().setStyleName(prefix + styleName, add);
} | 3.68 |
framework_Window_getOrderPosition | /**
* Returns the position of this window in the order of all open windows for
* this UI.
* <p>
* Window with position 0 is on the bottom, and window with greatest
* position is at the top. If window has no position (it's not yet attached
* or hidden) then position is {@code -1}.
*
* @see UI#addWindowOrderUpdateListener(com.vaadin.ui.UI.WindowOrderUpdateListener)
*
* @since 8.0
*
* @return window order position.
*/
public int getOrderPosition() {
return orderPosition;
} | 3.68 |
morf_AbstractSqlDialectTest_expectedSelectMaximumWithExpression | /**
* @return The decimal representation of a literal for testing
*/
protected String expectedSelectMaximumWithExpression() {
return "SELECT MAX(intField + 1) FROM " + tableName(TEST_TABLE);
} | 3.68 |
hadoop_Validate_checkRequired | /**
* Validates that the expression (that checks a required field is present) is true.
* @param isPresent indicates whether the given argument is present.
* @param argName the name of the argument being validated.
*/
public static void checkRequired(boolean isPresent, String argName) {
checkArgument(isPresent, "'%s' is required.", argName);
} | 3.68 |
flink_PartitionRequestQueue_announceBacklog | /**
* Announces remaining backlog to the consumer after the available data notification or data
* consumption resumption.
*/
private void announceBacklog(NetworkSequenceViewReader reader, int backlog) {
checkArgument(backlog > 0, "Backlog must be positive.");
NettyMessage.BacklogAnnouncement announcement =
new NettyMessage.BacklogAnnouncement(backlog, reader.getReceiverId());
ctx.channel()
.writeAndFlush(announcement)
.addListener(
(ChannelFutureListener)
future -> {
if (!future.isSuccess()) {
onChannelFutureFailure(future);
}
});
} | 3.68 |
querydsl_BeanPath_createSimple | /**
* Create a new Simple path
*
* @param <A>
* @param property property name
* @param type property type
* @return property path
*/
@SuppressWarnings("unchecked")
protected <A> SimplePath<A> createSimple(String property, Class<? super A> type) {
return add(new SimplePath<A>((Class<A>) type, forProperty(property)));
} | 3.68 |
graphhopper_InstructionsFromEdges_calcInstructions | /**
* @return the list of instructions for this path.
*/
public static InstructionList calcInstructions(Path path, Graph graph, Weighting weighting, EncodedValueLookup evLookup, final Translation tr) {
final InstructionList ways = new InstructionList(tr);
if (path.isFound()) {
if (path.getEdgeCount() == 0) {
ways.add(new FinishInstruction(graph.getNodeAccess(), path.getEndNode()));
} else {
path.forEveryEdge(new InstructionsFromEdges(graph, weighting, evLookup, ways));
}
}
return ways;
} | 3.68 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.