name
stringlengths 12
178
| code_snippet
stringlengths 8
36.5k
| score
float64 3.26
3.68
|
---|---|---|
hadoop_RouterQuotaUpdateService_isQuotaSet | /**
* Check if the quota was set in given MountTable.
* @param mountTable Mount table entry.
*/
private boolean isQuotaSet(MountTable mountTable) {
if (mountTable != null) {
return this.quotaManager.isQuotaSet(mountTable.getQuota());
}
return false;
} | 3.68 |
hadoop_NamenodeStatusReport_getProvidedSpace | /**
* Get the space occupied by provided storage.
*
* @return the provided capacity.
*/
public long getProvidedSpace() {
return this.providedSpace;
} | 3.68 |
hudi_HiveAvroSerializer_getOtherTypeFromNullableType | /**
* If the union schema is a nullable union, get the schema for the non-nullable type.
* This method does no checking that the provided Schema is nullable. If the provided
* union schema is non-nullable, it simply returns the union schema
*/
public static Schema getOtherTypeFromNullableType(Schema unionSchema) {
final List<Schema> types = unionSchema.getTypes();
if (types.size() == 2) { // most common scenario
if (types.get(0).getType() == Schema.Type.NULL) {
return types.get(1);
}
if (types.get(1).getType() == Schema.Type.NULL) {
return types.get(0);
}
// not a nullable union
return unionSchema;
}
final List<Schema> itemSchemas = new ArrayList<>();
for (Schema itemSchema : types) {
if (!Schema.Type.NULL.equals(itemSchema.getType())) {
itemSchemas.add(itemSchema);
}
}
if (itemSchemas.size() > 1) {
return Schema.createUnion(itemSchemas);
} else {
return itemSchemas.get(0);
}
} | 3.68 |
hadoop_VersionInfoMojo_getSCMCommit | /**
* Parses SCM output and returns commit of SCM.
*
* @param scm SCM in use for this build
* @return String commit of SCM
*/
private String getSCMCommit(SCM scm) {
String commit = "Unknown";
switch (scm) {
case GIT:
for (String s : scmOut) {
if (s.startsWith("commit")) {
commit = s.substring("commit".length());
break;
}
}
break;
}
return commit.trim();
} | 3.68 |
flink_InputTypeStrategies_constraint | /** Strategy for an argument that must fulfill a given constraint. */
public static ConstraintArgumentTypeStrategy constraint(
String constraintMessage, Predicate<List<DataType>> evaluator) {
return new ConstraintArgumentTypeStrategy(constraintMessage, evaluator);
} | 3.68 |
shardingsphere-elasticjob_JobNodeStorage_executeInLeader | /**
* Execute in leader server.
*
* @param latchNode node for leader latch
* @param callback execute callback
*/
public void executeInLeader(final String latchNode, final LeaderExecutionCallback callback) {
regCenter.executeInLeader(jobNodePath.getFullPath(latchNode), callback);
} | 3.68 |
flink_ResourceManagerId_toUUID | /** Creates a UUID with the bits from this ResourceManagerId. */
public UUID toUUID() {
return new UUID(getUpperPart(), getLowerPart());
} | 3.68 |
hbase_Encryption_failOnHashAlgorithmMismatch | /**
* Returns the Hash Algorithm mismatch behaviour defined in the crypto configuration.
*/
public static boolean failOnHashAlgorithmMismatch(Configuration conf) {
return conf.getBoolean(CRYPTO_KEY_FAIL_ON_ALGORITHM_MISMATCH_CONF_KEY,
CRYPTO_KEY_FAIL_ON_ALGORITHM_MISMATCH_CONF_DEFAULT);
} | 3.68 |
Activiti_ProcessEngines_destroy | /**
* closes all process engines. This method should be called when the server shuts down.
*/
public synchronized static void destroy() {
if (isInitialized()) {
Map<String, ProcessEngine> engines = new HashMap<String, ProcessEngine>(processEngines);
processEngines = new HashMap<String, ProcessEngine>();
for (String processEngineName : engines.keySet()) {
ProcessEngine processEngine = engines.get(processEngineName);
try {
processEngine.close();
} catch (Exception e) {
log.error("exception while closing {}", (processEngineName == null ? "the default process engine" : "process engine " + processEngineName), e);
}
}
processEngineInfosByName.clear();
processEngineInfosByResourceUrl.clear();
processEngineInfos.clear();
setInitialized(false);
}
} | 3.68 |
hadoop_ReconfigurationException_getOldValue | /**
* Get old value of property that cannot be changed.
* @return old value.
*/
public String getOldValue() {
return oldVal;
} | 3.68 |
flink_DateTimeUtils_timestampMillisToDate | /**
* Get date from a timestamp.
*
* @param ts the timestamp in milliseconds.
* @return the date in days.
*/
public static int timestampMillisToDate(long ts) {
int days = (int) (ts / MILLIS_PER_DAY);
if (days < 0) {
days = days - 1;
}
return days;
} | 3.68 |
framework_ConnectorInfoPanel_update | /**
* Update the panel to show information about a connector.
*
* @param connector
*/
public void update(ServerConnector connector) {
SharedState state = connector.getState();
Set<String> ignoreProperties = new HashSet<>();
ignoreProperties.add("id");
String html = getRowHTML("Id", connector.getConnectorId());
html += getRowHTML("Connector", connector.getClass().getSimpleName());
if (connector instanceof ComponentConnector) {
ComponentConnector component = (ComponentConnector) connector;
ignoreProperties.addAll(
Arrays.asList("caption", "description", "width", "height"));
AbstractComponentState componentState = component.getState();
html += getRowHTML("Widget",
component.getWidget().getClass().getSimpleName());
html += getRowHTML("Caption", componentState.caption);
html += getRowHTML("Description", componentState.description);
html += getRowHTML("Width", componentState.width + " (actual: "
+ component.getWidget().getOffsetWidth() + "px)");
html += getRowHTML("Height", componentState.height + " (actual: "
+ component.getWidget().getOffsetHeight() + "px)");
}
try {
JsArrayObject<Property> properties = AbstractConnector
.getStateType(connector).getPropertiesAsArray();
for (int i = 0; i < properties.size(); i++) {
Property property = properties.get(i);
String name = property.getName();
if (!ignoreProperties.contains(name)) {
html += getRowHTML(property.getDisplayName(),
property.getValue(state));
}
}
} catch (NoDataException e) {
html += "<div>Could not read state, error has been logged to the console</div>";
getLogger().log(Level.SEVERE, "Could not read state", e);
}
clear();
add(new HTML(html));
} | 3.68 |
morf_TableOutputter_table | /**
* Output the given table to the given workbook.
*
* @param maxSampleRows the maximum number of rows to export in the "sample data" section
* (all rows are included in the "Parameters to set up" section).
* @param workbook to add the table to.
* @param table to add to the workbook.
* @param records of data to output.
*/
public void table(int maxSampleRows, final WritableWorkbook workbook, final Table table, final Iterable<Record> records) {
final WritableSheet workSheet = workbook.createSheet(spreadsheetifyName(table.getName()), workbook.getNumberOfSheets());
boolean columnsTruncated = table.columns().size() > MAX_EXCEL_COLUMNS;
if(columnsTruncated) {
log.warn("Output for table '" + table.getName() + "' exceeds the maximum number of columns (" + MAX_EXCEL_COLUMNS + ") in an Excel worksheet. It will be truncated.");
}
boolean rowsTruncated = false;
try {
int currentRow = NUMBER_OF_ROWS_IN_TITLE + 1;
try {
final Map<String, Integer> helpTextRowNumbers = new HashMap<>();
//Now output....
//Help text
currentRow = outputHelp(workSheet, table, currentRow, helpTextRowNumbers);
//"Example Data"
Label exampleLabel = new Label(0, currentRow, "Example Data");
exampleLabel.setCellFormat(getBoldFormat());
workSheet.addCell(exampleLabel);
currentRow++;
//Headings for example data
currentRow = outputDataHeadings(workSheet, table, currentRow, helpTextRowNumbers);
//Actual example data
currentRow = outputExampleData(maxSampleRows, workSheet, table, currentRow, records);
//"Parameters to Set Up"
Label dataLabel = new Label(0, currentRow, "Parameters to Set Up");
dataLabel.setCellFormat(getBoldFormat());
workSheet.addCell(dataLabel);
currentRow++;
//Headings for parameters to be uploaded
currentRow = outputDataHeadings(workSheet, table, currentRow, helpTextRowNumbers);
currentRow = outputExampleData(null, workSheet, table, currentRow, records);
} catch (RowLimitExceededException e) {
log.warn(e.getMessage());
rowsTruncated = true;
}
}
catch (Exception e) {
throw new RuntimeException("Error outputting table '" + table.getName() + "'", e);
}
/*
* Write the title for the worksheet - adding truncation information if appropriate
*/
if(columnsTruncated || rowsTruncated) {
StringBuilder truncatedSuffix = new StringBuilder();
truncatedSuffix.append(" [");
if(columnsTruncated) {
truncatedSuffix.append("COLUMNS");
}
if(columnsTruncated && rowsTruncated) {
truncatedSuffix.append(" & ");
}
if(rowsTruncated) {
truncatedSuffix.append("ROWS");
}
truncatedSuffix.append(" TRUNCATED]");
createTitle(workSheet, workSheet.getName() + truncatedSuffix.toString(), table.getName());
}
else {
createTitle(workSheet, workSheet.getName(), table.getName());
}
} | 3.68 |
hbase_CommonFSUtils_isStartingWithPath | /**
* Compare of path component. Does not consider schema; i.e. if schemas different but
* <code>path</code> starts with <code>rootPath</code>, then the function returns true
* @param rootPath value to check for
* @param path subject to check
* @return True if <code>path</code> starts with <code>rootPath</code>
*/
public static boolean isStartingWithPath(final Path rootPath, final String path) {
String uriRootPath = rootPath.toUri().getPath();
String tailUriPath = new Path(path).toUri().getPath();
return tailUriPath.startsWith(uriRootPath);
} | 3.68 |
hadoop_HadoopExecutors_newSingleThreadExecutor | //Executors.newSingleThreadExecutor has special semantics - for the
// moment we'll delegate to it rather than implement the semantics here.
public static ExecutorService newSingleThreadExecutor(ThreadFactory
threadFactory) {
return Executors.newSingleThreadExecutor(threadFactory);
} | 3.68 |
druid_ZookeeperNodeListener_destroy | /**
* Close PathChildrenCache and CuratorFramework.
*/
@Override
public void destroy() {
if (cache != null) {
try {
cache.close();
} catch (IOException e) {
LOG.error("IOException occurred while closing PathChildrenCache.", e);
}
}
if (client != null && privateZkClient) {
client.close();
}
} | 3.68 |
hbase_LocalHBaseCluster_getRegionServer | /** Returns region server */
public HRegionServer getRegionServer(int serverNumber) {
return regionThreads.get(serverNumber).getRegionServer();
} | 3.68 |
hudi_AbstractTableFileSystemView_isBaseFileDueToPendingCompaction | /**
* With async compaction, it is possible to see partial/complete base-files due to inflight-compactions, Ignore those
* base-files.
*
* @param baseFile base File
*/
protected boolean isBaseFileDueToPendingCompaction(HoodieBaseFile baseFile) {
final String partitionPath = getPartitionPathFor(baseFile);
Option<Pair<String, CompactionOperation>> compactionWithInstantTime =
getPendingCompactionOperationWithInstant(new HoodieFileGroupId(partitionPath, baseFile.getFileId()));
return (compactionWithInstantTime.isPresent()) && (null != compactionWithInstantTime.get().getKey())
&& baseFile.getCommitTime().equals(compactionWithInstantTime.get().getKey());
} | 3.68 |
hbase_StateMachineProcedure_failIfAborted | /**
* If procedure has more states then abort it otherwise procedure is finished and abort can be
* ignored.
*/
protected final void failIfAborted() {
if (aborted.get()) {
if (hasMoreState()) {
setAbortFailure(getClass().getSimpleName(), "abort requested");
} else {
LOG.warn("Ignoring abort request on state='" + getCurrentState() + "' for " + this);
}
}
} | 3.68 |
hadoop_MappableBlockLoader_verifyChecksum | /**
* Verifies the block's checksum. This is an I/O intensive operation.
*/
protected void verifyChecksum(long length, FileInputStream metaIn,
FileChannel blockChannel, String blockFileName) throws IOException {
// Verify the checksum from the block's meta file
// Get the DataChecksum from the meta file header
BlockMetadataHeader header =
BlockMetadataHeader.readHeader(new DataInputStream(
new BufferedInputStream(metaIn, BlockMetadataHeader
.getHeaderSize())));
try (FileChannel metaChannel = metaIn.getChannel()) {
if (metaChannel == null) {
throw new IOException(
"Block InputStream meta file has no FileChannel.");
}
DataChecksum checksum = header.getChecksum();
final int bytesPerChecksum = checksum.getBytesPerChecksum();
final int checksumSize = checksum.getChecksumSize();
final int numChunks = (8 * 1024 * 1024) / bytesPerChecksum;
ByteBuffer blockBuf = ByteBuffer.allocate(numChunks * bytesPerChecksum);
ByteBuffer checksumBuf = ByteBuffer.allocate(numChunks * checksumSize);
// Verify the checksum
int bytesVerified = 0;
while (bytesVerified < length) {
Preconditions.checkState(bytesVerified % bytesPerChecksum == 0,
"Unexpected partial chunk before EOF");
assert bytesVerified % bytesPerChecksum == 0;
int bytesRead = fillBuffer(blockChannel, blockBuf);
if (bytesRead == -1) {
throw new IOException("checksum verification failed: premature EOF");
}
blockBuf.flip();
// Number of read chunks, including partial chunk at end
int chunks = (bytesRead + bytesPerChecksum - 1) / bytesPerChecksum;
checksumBuf.limit(chunks * checksumSize);
fillBuffer(metaChannel, checksumBuf);
checksumBuf.flip();
checksum.verifyChunkedSums(blockBuf, checksumBuf, blockFileName,
bytesVerified);
// Success
bytesVerified += bytesRead;
blockBuf.clear();
checksumBuf.clear();
}
}
} | 3.68 |
hadoop_RouterHeartbeatService_updateStateStore | /**
* Update the state of the Router in the State Store.
*/
@VisibleForTesting
synchronized void updateStateStore() {
String routerId = router.getRouterId();
if (routerId == null) {
LOG.error("Cannot heartbeat for router: unknown router id");
return;
}
if (isStoreAvailable()) {
RouterStore routerStore = router.getRouterStateManager();
try {
RouterState record = RouterState.newInstance(
routerId, router.getStartTime(), router.getRouterState());
StateStoreVersion stateStoreVersion = StateStoreVersion.newInstance(
getStateStoreVersion(MembershipStore.class),
getStateStoreVersion(MountTableStore.class));
record.setStateStoreVersion(stateStoreVersion);
// if admin server not started then hostPort will be empty
String hostPort =
StateStoreUtils.getHostPortString(router.getAdminServerAddress());
record.setAdminAddress(hostPort);
RouterHeartbeatRequest request =
RouterHeartbeatRequest.newInstance(record);
RouterHeartbeatResponse response = routerStore.routerHeartbeat(request);
if (!response.getStatus()) {
LOG.warn("Cannot heartbeat router {}", routerId);
} else {
LOG.debug("Router heartbeat for router {}", routerId);
}
} catch (IOException e) {
LOG.error("Cannot heartbeat router {}", routerId, e);
}
} else {
LOG.warn("Cannot heartbeat router {}: State Store unavailable", routerId);
}
} | 3.68 |
flink_DualInputOperator_addFirstInputs | /**
* Add to the first input the union of the given operators.
*
* @param inputs The operator(s) to be unioned with the first input.
* @deprecated This method will be removed in future versions. Use the {@link Union} operator
* instead.
*/
@Deprecated
@SuppressWarnings("unchecked")
public void addFirstInputs(List<Operator<IN1>> inputs) {
this.input1 =
Operator.createUnionCascade(
this.input1, inputs.toArray(new Operator[inputs.size()]));
} | 3.68 |
streampipes_Labels_from | /**
* @deprecated Externalize labels by using
* {@link org.apache.streampipes.sdk.builder.AbstractProcessingElementBuilder#withLocales(Locales...)}
* to ease future support for multiple languages.
*
* Creates a new label with internalId, label and description. Fully-configured labels are required by static
* properties and are mandatory for event properties.
*
* @param internalId The internal identifier of the element, e.g., "latitude-field-mapping"
* @param label A human-readable title
* @param description A human-readable brief summary of the element.
* @return
*/
@Deprecated(since = "0.90.0", forRemoval = true)
public static Label from(String internalId, String label, String description) {
return new Label(internalId, label, description);
} | 3.68 |
hadoop_AbfsInputStream_incrementReadOps | /**
* Increment Read Operations.
*/
private void incrementReadOps() {
if (statistics != null) {
statistics.incrementReadOps(1);
}
} | 3.68 |
flink_DefaultExecutionTopology_ensureCoLocatedVerticesInSameRegion | /**
* Co-location constraints are only used for iteration head and tail. A paired head and tail
* needs to be in the same pipelined region so that they can be restarted together.
*/
private static void ensureCoLocatedVerticesInSameRegion(
List<DefaultSchedulingPipelinedRegion> pipelinedRegions,
ExecutionGraph executionGraph) {
final Map<CoLocationConstraint, DefaultSchedulingPipelinedRegion> constraintToRegion =
new HashMap<>();
for (DefaultSchedulingPipelinedRegion region : pipelinedRegions) {
for (DefaultExecutionVertex vertex : region.getVertices()) {
final CoLocationConstraint constraint =
getCoLocationConstraint(vertex.getId(), executionGraph);
if (constraint != null) {
final DefaultSchedulingPipelinedRegion regionOfConstraint =
constraintToRegion.get(constraint);
checkState(
regionOfConstraint == null || regionOfConstraint == region,
"co-located tasks must be in the same pipelined region");
constraintToRegion.putIfAbsent(constraint, region);
}
}
}
} | 3.68 |
hbase_StorageClusterStatusModel_setStorefileIndexSizeKB | /**
* @param storefileIndexSizeKB total size of store file indexes, in KB
*/
public void setStorefileIndexSizeKB(long storefileIndexSizeKB) {
this.storefileIndexSizeKB = storefileIndexSizeKB;
} | 3.68 |
hbase_BufferedMutatorParams_writeBufferSize | /**
* Override the write buffer size specified by the provided {@link Connection}'s
* {@link org.apache.hadoop.conf.Configuration} instance, via the configuration key
* {@code hbase.client.write.buffer}.
*/
public BufferedMutatorParams writeBufferSize(long writeBufferSize) {
this.writeBufferSize = writeBufferSize;
return this;
} | 3.68 |
hadoop_DockerCommandExecutor_parseContainerStatus | /**
* Parses the container status string.
*
* @param containerStatusStr container status.
* @return a {@link DockerContainerStatus} representing the status.
*/
public static DockerContainerStatus parseContainerStatus(
String containerStatusStr) {
DockerContainerStatus dockerContainerStatus;
if (containerStatusStr == null) {
dockerContainerStatus = DockerContainerStatus.UNKNOWN;
} else if (containerStatusStr
.equals(DockerContainerStatus.CREATED.getName())) {
dockerContainerStatus = DockerContainerStatus.CREATED;
} else if (containerStatusStr
.equals(DockerContainerStatus.RUNNING.getName())) {
dockerContainerStatus = DockerContainerStatus.RUNNING;
} else if (containerStatusStr
.equals(DockerContainerStatus.STOPPED.getName())) {
dockerContainerStatus = DockerContainerStatus.STOPPED;
} else if (containerStatusStr
.equals(DockerContainerStatus.RESTARTING.getName())) {
dockerContainerStatus = DockerContainerStatus.RESTARTING;
} else if (containerStatusStr
.equals(DockerContainerStatus.REMOVING.getName())) {
dockerContainerStatus = DockerContainerStatus.REMOVING;
} else if (containerStatusStr
.equals(DockerContainerStatus.DEAD.getName())) {
dockerContainerStatus = DockerContainerStatus.DEAD;
} else if (containerStatusStr
.equals(DockerContainerStatus.EXITED.getName())) {
dockerContainerStatus = DockerContainerStatus.EXITED;
} else if (containerStatusStr
.equals(DockerContainerStatus.NONEXISTENT.getName())) {
dockerContainerStatus = DockerContainerStatus.NONEXISTENT;
} else {
dockerContainerStatus = DockerContainerStatus.UNKNOWN;
}
return dockerContainerStatus;
} | 3.68 |
framework_AbstractConnector_hasEventListener | /*
* (non-Javadoc)
*
* @see com.vaadin.client.ServerConnector#hasEventListener(java.lang.String)
*/
@Override
public boolean hasEventListener(String eventIdentifier) {
Set<String> reg = getState().registeredEventListeners;
return reg != null && reg.contains(eventIdentifier);
} | 3.68 |
morf_AbstractSqlDialectTest_testCreateTableStatementsLongTableName | /**
* Tests the SQL for creating tables with long names
*/
@SuppressWarnings("unchecked")
@Test
public void testCreateTableStatementsLongTableName() {
Table table = metadata.getTable(TABLE_WITH_VERY_LONG_NAME);
compareStatements(
expectedCreateTableStatementsWithLongTableName(),
testDialect.tableDeploymentStatements(table)
);
} | 3.68 |
morf_HumanReadableStatementHelper_generateNullableString | /**
* Generates a nullable / non-null string for the specified definition.
*
* @param definition the column definition
* @return a string representation of nullable / non-null
*/
private static String generateNullableString(final Column definition) {
return definition.isNullable() ? "nullable" : "non-null";
} | 3.68 |
flink_DateTimeUtils_isValidValue | /**
* Returns whether a given value is valid for a field of this time unit.
*
* @param field Field value
* @return Whether value
*/
public boolean isValidValue(BigDecimal field) {
return field.compareTo(BigDecimal.ZERO) >= 0
&& (limit == null || field.compareTo(limit) < 0);
} | 3.68 |
hbase_RpcServer_logResponse | /**
* Logs an RPC response to the LOG file, producing valid JSON objects for client Operations.
* @param param The parameters received in the call.
* @param methodName The name of the method invoked
* @param call The string representation of the call
* @param tooLarge To indicate if the event is tooLarge
* @param tooSlow To indicate if the event is tooSlow
* @param clientAddress The address of the client who made this call.
* @param startTime The time that the call was initiated, in ms.
* @param processingTime The duration that the call took to run, in ms.
* @param qTime The duration that the call spent on the queue prior to being
* initiated, in ms.
* @param responseSize The size in bytes of the response buffer.
* @param blockBytesScanned The size of block bytes scanned to retrieve the response.
* @param userName UserName of the current RPC Call
*/
void logResponse(Message param, String methodName, String call, boolean tooLarge, boolean tooSlow,
String clientAddress, long startTime, int processingTime, int qTime, long responseSize,
long blockBytesScanned, String userName) {
final String className = server == null ? StringUtils.EMPTY : server.getClass().getSimpleName();
// base information that is reported regardless of type of call
Map<String, Object> responseInfo = new HashMap<>();
responseInfo.put("starttimems", startTime);
responseInfo.put("processingtimems", processingTime);
responseInfo.put("queuetimems", qTime);
responseInfo.put("responsesize", responseSize);
responseInfo.put("blockbytesscanned", blockBytesScanned);
responseInfo.put("client", clientAddress);
responseInfo.put("class", className);
responseInfo.put("method", methodName);
responseInfo.put("call", call);
// The params could be really big, make sure they don't kill us at WARN
String stringifiedParam = ProtobufUtil.getShortTextFormat(param);
if (stringifiedParam.length() > 150) {
// Truncate to 1000 chars if TRACE is on, else to 150 chars
stringifiedParam = truncateTraceLog(stringifiedParam);
}
responseInfo.put("param", stringifiedParam);
if (param instanceof ClientProtos.ScanRequest && rsRpcServices != null) {
ClientProtos.ScanRequest request = ((ClientProtos.ScanRequest) param);
String scanDetails;
if (request.hasScannerId()) {
long scannerId = request.getScannerId();
scanDetails = rsRpcServices.getScanDetailsWithId(scannerId);
} else {
scanDetails = rsRpcServices.getScanDetailsWithRequest(request);
}
if (scanDetails != null) {
responseInfo.put("scandetails", scanDetails);
}
}
if (param instanceof ClientProtos.MultiRequest) {
int numGets = 0;
int numMutations = 0;
int numServiceCalls = 0;
ClientProtos.MultiRequest multi = (ClientProtos.MultiRequest) param;
for (ClientProtos.RegionAction regionAction : multi.getRegionActionList()) {
for (ClientProtos.Action action : regionAction.getActionList()) {
if (action.hasMutation()) {
numMutations++;
}
if (action.hasGet()) {
numGets++;
}
if (action.hasServiceCall()) {
numServiceCalls++;
}
}
}
responseInfo.put(MULTI_GETS, numGets);
responseInfo.put(MULTI_MUTATIONS, numMutations);
responseInfo.put(MULTI_SERVICE_CALLS, numServiceCalls);
}
final String tag =
(tooLarge && tooSlow) ? "TooLarge & TooSlow" : (tooSlow ? "TooSlow" : "TooLarge");
LOG.warn("(response" + tag + "): " + GSON.toJson(responseInfo));
} | 3.68 |
hibernate-validator_MetaConstraint_getGroupList | /**
* @return Returns the list of groups this constraint is part of. This might include the default group even when
* it is not explicitly specified, but part of the redefined default group list of the hosting bean.
*/
public final Set<Class<?>> getGroupList() {
return constraintTree.getDescriptor().getGroups();
} | 3.68 |
zilla_HpackContext_staticIndex3 | // Index in static table for the given name of length 3
private static int staticIndex3(DirectBuffer name)
{
switch (name.getByte(2))
{
case 'a':
if (STATIC_TABLE[60].name.equals(name)) // via
{
return 60;
}
break;
case 'e':
if (STATIC_TABLE[21].name.equals(name)) // age
{
return 21;
}
break;
}
return -1;
} | 3.68 |
flink_KeyedStream_minBy | /**
* Applies an aggregation that gives the current element with the minimum value at the given
* position by the given key. An independent aggregate is kept per key. If more elements have
* the minimum value at the given position, the operator returns either the first or last one,
* depending on the parameter set.
*
* @param positionToMinBy The field position in the data points to minimize. This is applicable
* to Tuple types, Scala case classes, and primitive types (which is considered as having
* one field).
* @param first If true, then the operator return the first element with the minimal value,
* otherwise returns the last
* @return The transformed DataStream.
*/
public SingleOutputStreamOperator<T> minBy(int positionToMinBy, boolean first) {
return aggregate(
new ComparableAggregator<>(
positionToMinBy,
getType(),
AggregationFunction.AggregationType.MINBY,
first,
getExecutionConfig()));
} | 3.68 |
framework_Escalator_refreshRows | /**
* {@inheritDoc}
* <p>
* <em>Implementation detail:</em> This method does no DOM modifications
* (i.e. is very cheap to call) if there is no data for columns when
* this method is called.
*
* @see #hasColumnAndRowData()
*/
@Override
// overridden because of JavaDoc
public void refreshRows(final int index, final int numberOfRows) {
Range rowRange = Range.withLength(index, numberOfRows);
Range colRange = Range.withLength(0,
getColumnConfiguration().getColumnCount());
refreshCells(rowRange, colRange);
} | 3.68 |
flink_BinaryStringDataUtil_trimRight | /**
* Walk each character of current string from right end, remove the character if it is in trim
* string. Stops at the first character which is not in trim string. Return the new substring.
*
* @param trimStr the trim string
* @return A subString which removes all of the character from the right side that is in trim
* string.
*/
public static BinaryStringData trimRight(BinaryStringData str, BinaryStringData trimStr) {
str.ensureMaterialized();
if (trimStr == null) {
return null;
}
trimStr.ensureMaterialized();
if (isSpaceString(trimStr)) {
return trimRight(str);
}
if (str.inFirstSegment()) {
int charIdx = 0;
int byteIdx = 0;
// each element in charLens is length of character in the source string
int[] charLens = new int[str.getSizeInBytes()];
// each element in charStartPos is start position of first byte in the source string
int[] charStartPos = new int[str.getSizeInBytes()];
while (byteIdx < str.getSizeInBytes()) {
charStartPos[charIdx] = byteIdx;
charLens[charIdx] = numBytesForFirstByte(str.getByteOneSegment(byteIdx));
byteIdx += charLens[charIdx];
charIdx++;
}
// searchIdx points to the first character which is not in trim string from the right
// end.
int searchIdx = str.getSizeInBytes() - 1;
charIdx -= 1;
while (charIdx >= 0) {
BinaryStringData currentChar =
str.copyBinaryStringInOneSeg(charStartPos[charIdx], charLens[charIdx]);
if (trimStr.contains(currentChar)) {
searchIdx -= charLens[charIdx];
} else {
break;
}
charIdx--;
}
if (searchIdx < 0) {
// empty string
return EMPTY_UTF8;
} else {
return str.copyBinaryStringInOneSeg(0, searchIdx + 1);
}
} else {
return trimRightSlow(str, trimStr);
}
} | 3.68 |
hadoop_TaskManifest_toJson | /**
* To JSON.
* @return json string value.
* @throws IOException failure
*/
public String toJson() throws IOException {
return serializer().toJson(this);
} | 3.68 |
AreaShop_GeneralRegion_getMaximumPoint | /**
* Get the maximum corner of the region.
* @return Vector
*/
public Vector getMaximumPoint() {
return plugin.getWorldGuardHandler().getMaximumPoint(getRegion());
} | 3.68 |
hbase_KeyValue_compareKey | // compare a key against row/fam/qual/ts/type
public int compareKey(Cell cell, byte[] row, int roff, int rlen, byte[] fam, int foff, int flen,
byte[] col, int coff, int clen, long ts, byte type) {
int compare =
compareRows(cell.getRowArray(), cell.getRowOffset(), cell.getRowLength(), row, roff, rlen);
if (compare != 0) {
return compare;
}
// If the column is not specified, the "minimum" key type appears the
// latest in the sorted order, regardless of the timestamp. This is used
// for specifying the last key/value in a given row, because there is no
// "lexicographically last column" (it would be infinitely long). The
// "maximum" key type does not need this behavior.
if (
cell.getFamilyLength() + cell.getQualifierLength() == 0
&& cell.getTypeByte() == Type.Minimum.getCode()
) {
// left is "bigger", i.e. it appears later in the sorted order
return 1;
}
if (flen + clen == 0 && type == Type.Minimum.getCode()) {
return -1;
}
compare = compareFamilies(cell.getFamilyArray(), cell.getFamilyOffset(),
cell.getFamilyLength(), fam, foff, flen);
if (compare != 0) {
return compare;
}
compare = compareColumns(cell.getQualifierArray(), cell.getQualifierOffset(),
cell.getQualifierLength(), col, coff, clen);
if (compare != 0) {
return compare;
}
// Next compare timestamps.
compare = compareTimestamps(cell.getTimestamp(), ts);
if (compare != 0) {
return compare;
}
// Compare types. Let the delete types sort ahead of puts; i.e. types
// of higher numbers sort before those of lesser numbers. Maximum (255)
// appears ahead of everything, and minimum (0) appears after
// everything.
return (0xff & type) - (0xff & cell.getTypeByte());
} | 3.68 |
hbase_ZKUtil_deleteNodeRecursively | /**
* Delete the specified node and all of it's children.
* <p>
* If the node does not exist, just returns.
* <p>
* Sets no watches. Throws all exceptions besides dealing with deletion of children.
*/
public static void deleteNodeRecursively(ZKWatcher zkw, String node) throws KeeperException {
deleteNodeRecursivelyMultiOrSequential(zkw, true, node);
} | 3.68 |
hbase_ReplicationUtils_sleepForRetries | /**
* Do the sleeping logic
* @param msg Why we sleep
* @param sleepForRetries the base sleep time.
* @param sleepMultiplier by how many times the default sleeping time is augmented
* @param maxRetriesMultiplier the max retry multiplier
* @return True if <code>sleepMultiplier</code> is < <code>maxRetriesMultiplier</code>
*/
public static boolean sleepForRetries(String msg, long sleepForRetries, int sleepMultiplier,
int maxRetriesMultiplier) {
try {
LOG.trace("{}, sleeping {} times {}", msg, sleepForRetries, sleepMultiplier);
Thread.sleep(sleepForRetries * sleepMultiplier);
} catch (InterruptedException e) {
LOG.debug("Interrupted while sleeping between retries");
Thread.currentThread().interrupt();
}
return sleepMultiplier < maxRetriesMultiplier;
} | 3.68 |
morf_SqlUtils_cast | /**
* @see CastBuilder#asString(int)
* @param field the field to cast
* @return A builder to produce a {@link Cast}.
*/
public static CastBuilder cast(AliasedField field) {
return new CastBuilder(field);
} | 3.68 |
flink_CsvReaderFormat_forSchema | /**
* Builds a new {@code CsvReaderFormat} using a {@code CsvSchema} generator and {@code
* CsvMapper} factory.
*
* @param mapperFactory The factory creating the {@code CsvMapper}.
* @param schemaGenerator A generator that creates and configures the Jackson CSV schema for
* parsing specific CSV files, from a mapper created by the mapper factory.
* @param typeInformation The Flink type descriptor of the returned elements.
* @param <T> The type of the returned elements.
*/
public static <T> CsvReaderFormat<T> forSchema(
SerializableSupplier<CsvMapper> mapperFactory,
SerializableFunction<CsvMapper, CsvSchema> schemaGenerator,
TypeInformation<T> typeInformation) {
return new CsvReaderFormat<>(
mapperFactory,
schemaGenerator,
typeInformation.getTypeClass(),
(value, context) -> value,
typeInformation,
false);
} | 3.68 |
framework_UidlRequestHandler_createRpcHandler | /**
* Creates the ServerRpcHandler to use.
*
* @since 7.7
* @return the ServerRpcHandler to use
*/
protected ServerRpcHandler createRpcHandler() {
return new ServerRpcHandler();
} | 3.68 |
hudi_OptionsResolver_getPreCombineField | /**
* Returns the preCombine field
* or null if the value is set as {@link FlinkOptions#NO_PRE_COMBINE}.
*/
public static String getPreCombineField(Configuration conf) {
final String preCombineField = conf.getString(FlinkOptions.PRECOMBINE_FIELD);
return preCombineField.equals(FlinkOptions.NO_PRE_COMBINE) ? null : preCombineField;
} | 3.68 |
framework_Range_getEnd | /**
* Returns the <em>exclusive</em> end point of this range.
*
* @return the end point of this range
*/
public int getEnd() {
return end;
} | 3.68 |
hadoop_AbstractS3AStatisticsSource_incCounter | /**DefaultS3ClientFactoryDefaultS3ClientFactory
* Increment a named counter by 1.
* @param name counter name
* @param value value to increment by
* @return the updated value or, if the counter is unknown: 0
*/
public long incCounter(String name, long value) {
return ioStatistics.incrementCounter(name, value);
} | 3.68 |
hbase_BucketCache_freeBucketEntry | /**
* Free the {{@link BucketEntry} actually,which could only be invoked when the
* {@link BucketEntry#refCnt} becoming 0.
*/
void freeBucketEntry(BucketEntry bucketEntry) {
bucketAllocator.freeBlock(bucketEntry.offset(), bucketEntry.getLength());
realCacheSize.add(-1 * bucketEntry.getLength());
} | 3.68 |
Activiti_BpmnDeploymentHelper_copyDeploymentValuesToProcessDefinitions | /**
* Updates all the process definition entities to match the deployment's values for tenant,
* engine version, and deployment id.
*/
public void copyDeploymentValuesToProcessDefinitions(DeploymentEntity deployment,
List<ProcessDefinitionEntity> processDefinitions) {
String engineVersion = deployment.getEngineVersion();
String tenantId = deployment.getTenantId();
String deploymentId = deployment.getId();
for (ProcessDefinitionEntity processDefinition : processDefinitions) {
// Backwards compatibility
if (engineVersion != null) {
processDefinition.setEngineVersion(engineVersion);
}
// process definition inherits the tenant id
if (tenantId != null) {
processDefinition.setTenantId(tenantId);
}
processDefinition.setDeploymentId(deploymentId);
}
} | 3.68 |
hbase_TableDescriptorBuilder_getRegionSplitPolicyClassName | /**
* This gets the class associated with the region split policy which determines when a region
* split should occur. The class used by default is defined in
* org.apache.hadoop.hbase.regionserver.RegionSplitPolicy
* @return the class name of the region split policy for this table. If this returns null, the
* default split policy is used.
*/
@Override
public String getRegionSplitPolicyClassName() {
return getOrDefault(SPLIT_POLICY_KEY, Function.identity(), null);
} | 3.68 |
hbase_QuotaObserverChore_pruneOldRegionReports | /**
* Removes region reports over a certain age.
*/
void pruneOldRegionReports() {
final long now = EnvironmentEdgeManager.currentTime();
final long pruneTime = now - regionReportLifetimeMillis;
final int numRemoved = quotaManager.pruneEntriesOlderThan(pruneTime, this);
if (LOG.isTraceEnabled()) {
LOG.trace("Removed " + numRemoved + " old region size reports that were older than "
+ pruneTime + ".");
}
} | 3.68 |
flink_BufferReaderWriterUtil_positionToNextBuffer | /** Skip one data buffer from the channel's current position by headerBuffer. */
public static void positionToNextBuffer(FileChannel channel, ByteBuffer headerBuffer)
throws IOException {
headerBuffer.clear();
if (!tryReadByteBuffer(channel, headerBuffer)) {
throwCorruptDataException();
}
headerBuffer.flip();
try {
headerBuffer.getShort();
headerBuffer.getShort();
long bufferSize = headerBuffer.getInt();
channel.position(channel.position() + bufferSize);
} catch (BufferUnderflowException | IllegalArgumentException e) {
// buffer underflow if header buffer is undersized
// IllegalArgumentException if size is outside memory segment size
throwCorruptDataException();
}
} | 3.68 |
morf_SqlDialect_getAutoNumberName | /**
* Gets the autonumber name for the {@code destinationReference}.
*
* @param destinationReference the table name to get the autonumber name for.
* @return the autonumber name.
*/
protected String getAutoNumberName(String destinationReference) {
String autoNumberName = destinationReference;
if (autoNumberName.contains("_")) {
autoNumberName = autoNumberName.substring(0, autoNumberName.lastIndexOf('_'));
}
return autoNumberName;
} | 3.68 |
hadoop_AzureNativeFileSystemStore_setToString | /**
* Helper to format a string for log output from Set<String>
*/
private String setToString(Set<String> set) {
StringBuilder sb = new StringBuilder();
int i = 1;
for (String s : set) {
sb.append("/" + s);
if (i != set.size()) {
sb.append(", ");
}
i++;
}
return sb.toString();
} | 3.68 |
querydsl_GeometryExpression_dimension | /**
* The inherent dimension of this geometric object, which must be less than or equal
* to the coordinate dimension. In non-homogeneous collections, this will return the largest topological
* dimension of the contained objects.
*
* @return dimension
*/
public NumberExpression<Integer> dimension() {
if (dimension == null) {
dimension = Expressions.numberOperation(Integer.class, SpatialOps.DIMENSION, mixin);
}
return dimension;
} | 3.68 |
zxing_HybridBinarizer_thresholdBlock | /**
* Applies a single threshold to a block of pixels.
*/
private static void thresholdBlock(byte[] luminances,
int xoffset,
int yoffset,
int threshold,
int stride,
BitMatrix matrix) {
for (int y = 0, offset = yoffset * stride + xoffset; y < BLOCK_SIZE; y++, offset += stride) {
for (int x = 0; x < BLOCK_SIZE; x++) {
// Comparison needs to be <= so that black == 0 pixels are black even if the threshold is 0.
if ((luminances[offset + x] & 0xFF) <= threshold) {
matrix.set(xoffset + x, yoffset + y);
}
}
}
} | 3.68 |
hbase_RpcServer_channelIO | /**
* Helper for {@link #channelRead(java.nio.channels.ReadableByteChannel, java.nio.ByteBuffer)}.
* Only one of readCh or writeCh should be non-null.
* @param readCh read channel
* @param writeCh write channel
* @param buf buffer to read or write into/out of
* @return bytes written
* @throws java.io.IOException e
* @see #channelRead(java.nio.channels.ReadableByteChannel, java.nio.ByteBuffer)
*/
private static int channelIO(ReadableByteChannel readCh, WritableByteChannel writeCh,
ByteBuffer buf) throws IOException {
int originalLimit = buf.limit();
int initialRemaining = buf.remaining();
int ret = 0;
while (buf.remaining() > 0) {
try {
int ioSize = Math.min(buf.remaining(), NIO_BUFFER_LIMIT);
buf.limit(buf.position() + ioSize);
ret = (readCh == null) ? writeCh.write(buf) : readCh.read(buf);
if (ret < ioSize) {
break;
}
} finally {
buf.limit(originalLimit);
}
}
int nBytes = initialRemaining - buf.remaining();
return (nBytes > 0) ? nBytes : ret;
} | 3.68 |
Activiti_LongToInteger_primTransform | /**
* {@inheritDoc}
*/
@Override
protected Object primTransform(Object anObject) throws Exception {
return Integer.valueOf(((Long) anObject).toString());
} | 3.68 |
hbase_TableScanResource_getIterator | // jackson needs an iterator for streaming
@JsonProperty("Row")
public Iterator<RowModel> getIterator() {
return Row.iterator();
} | 3.68 |
flink_StateDescriptor_getName | /** Returns the name of this {@code StateDescriptor}. */
public String getName() {
return name;
} | 3.68 |
framework_VaadinSession_setLocale | /**
* Sets the default locale for this session.
*
* By default this is the preferred locale of the user using the
* application. In most cases it is read from the browser defaults.
*
* @param locale
* the Locale object.
*
*/
public void setLocale(Locale locale) {
assert hasLock();
this.locale = locale;
} | 3.68 |
flink_HiveParserDDLSemanticAnalyzer_convertAlterTableAddParts | /**
* Add one or more partitions to a table. Useful when the data has been copied to the right
* location by some other process.
*/
private Operation convertAlterTableAddParts(String[] qualified, CommonTree ast) {
// ^(TOK_ALTERTABLE_ADDPARTS identifier ifNotExists?
// alterStatementSuffixAddPartitionsElement+)
boolean ifNotExists = ast.getChild(0).getType() == HiveASTParser.TOK_IFNOTEXISTS;
Table tab = getTable(new ObjectPath(qualified[0], qualified[1]));
boolean isView = tab.isView();
validateAlterTableType(tab);
int numCh = ast.getChildCount();
int start = ifNotExists ? 1 : 0;
String currentLocation = null;
Map<String, String> currentPartSpec = null;
// Parser has done some verification, so the order of tokens doesn't need to be verified
// here.
List<CatalogPartitionSpec> specs = new ArrayList<>();
List<CatalogPartition> partitions = new ArrayList<>();
for (int num = start; num < numCh; num++) {
HiveParserASTNode child = (HiveParserASTNode) ast.getChild(num);
switch (child.getToken().getType()) {
case HiveASTParser.TOK_PARTSPEC:
if (currentPartSpec != null) {
specs.add(new CatalogPartitionSpec(currentPartSpec));
Map<String, String> props = new HashMap<>();
if (currentLocation != null) {
props.put(TABLE_LOCATION_URI, currentLocation);
}
partitions.add(new CatalogPartitionImpl(props, null));
currentLocation = null;
}
currentPartSpec = getPartSpec(child);
validatePartitionValues(currentPartSpec); // validate reserved values
break;
case HiveASTParser.TOK_PARTITIONLOCATION:
// if location specified, set in partition
if (isView) {
throw new ValidationException("LOCATION clause illegal for view partition");
}
currentLocation =
HiveParserBaseSemanticAnalyzer.unescapeSQLString(
child.getChild(0).getText());
break;
default:
throw new ValidationException("Unknown child: " + child);
}
}
// add the last one
if (currentPartSpec != null) {
specs.add(new CatalogPartitionSpec(currentPartSpec));
Map<String, String> props = new HashMap<>();
if (currentLocation != null) {
props.put(TABLE_LOCATION_URI, currentLocation);
}
partitions.add(new CatalogPartitionImpl(props, null));
}
ObjectIdentifier tableIdentifier =
tab.getDbName() == null
? parseObjectIdentifier(tab.getTableName())
: catalogRegistry.qualifyIdentifier(
UnresolvedIdentifier.of(tab.getDbName(), tab.getTableName()));
return new AddPartitionsOperation(tableIdentifier, ifNotExists, specs, partitions);
} | 3.68 |
framework_AbstractTransactionalQuery_commit | /**
* Commits (if not in auto-commit mode) and releases the active connection.
*
* @throws SQLException
* if not in a transaction managed by this query
*/
public void commit() throws UnsupportedOperationException, SQLException {
if (!isInTransaction()) {
throw new SQLException("No active transaction");
}
if (!activeConnection.getAutoCommit()) {
activeConnection.commit();
}
connectionPool.releaseConnection(activeConnection);
activeConnection = null;
} | 3.68 |
pulsar_PulsarClientImpl_timer | /** visible for pulsar-functions. **/
public Timer timer() {
return timer;
} | 3.68 |
hbase_TableInputFormatBase_closeTable | /**
* Close the Table and related objects that were initialized via
* {@link #initializeTable(Connection, TableName)}.
*/
protected void closeTable() throws IOException {
close(table, connection);
table = null;
connection = null;
} | 3.68 |
graphhopper_PrepareLandmarks_setMaximumWeight | /**
* @see LandmarkStorage#setMaximumWeight(double)
*/
public PrepareLandmarks setMaximumWeight(double maximumWeight) {
lms.setMaximumWeight(maximumWeight);
return this;
} | 3.68 |
framework_ContainerHierarchicalWrapper_removeContainerProperty | /**
* Removes the specified Property from the underlying container and from the
* hierarchy.
* <p>
* Note : The Property will be removed from all Items in the Container.
* </p>
*
* @param propertyId
* the ID of the Property to remove.
* @return <code>true</code> if the operation succeeded, <code>false</code>
* if not
* @throws UnsupportedOperationException
* if the removeContainerProperty is not supported.
*/
@Override
public boolean removeContainerProperty(Object propertyId)
throws UnsupportedOperationException {
return container.removeContainerProperty(propertyId);
} | 3.68 |
hbase_StoreScanner_selectScannersFrom | /**
* Filters the given list of scanners using Bloom filter, time range, and TTL.
* <p>
* Will be overridden by testcase so declared as protected.
*/
protected List<KeyValueScanner> selectScannersFrom(HStore store,
List<? extends KeyValueScanner> allScanners) {
boolean memOnly;
boolean filesOnly;
if (scan instanceof InternalScan) {
InternalScan iscan = (InternalScan) scan;
memOnly = iscan.isCheckOnlyMemStore();
filesOnly = iscan.isCheckOnlyStoreFiles();
} else {
memOnly = false;
filesOnly = false;
}
List<KeyValueScanner> scanners = new ArrayList<>(allScanners.size());
// We can only exclude store files based on TTL if minVersions is set to 0.
// Otherwise, we might have to return KVs that have technically expired.
long expiredTimestampCutoff = minVersions == 0 ? oldestUnexpiredTS : Long.MIN_VALUE;
// include only those scan files which pass all filters
for (KeyValueScanner kvs : allScanners) {
boolean isFile = kvs.isFileScanner();
if ((!isFile && filesOnly) || (isFile && memOnly)) {
kvs.close();
continue;
}
if (kvs.shouldUseScanner(scan, store, expiredTimestampCutoff)) {
scanners.add(kvs);
} else {
kvs.close();
}
}
return scanners;
} | 3.68 |
dubbo_Bytes_bytes2int | /**
* to int.
*
* @param b byte array.
* @param off offset.
* @return int.
*/
public static int bytes2int(byte[] b, int off) {
return ((b[off + 3] & 0xFF) << 0)
+ ((b[off + 2] & 0xFF) << 8)
+ ((b[off + 1] & 0xFF) << 16)
+ ((b[off + 0]) << 24);
} | 3.68 |
flink_FileWriterBucket_getNew | /**
* Creates a new empty {@code Bucket}.
*
* @param bucketId the identifier of the bucket, as returned by the {@link BucketAssigner}.
* @param bucketPath the path to where the part files for the bucket will be written to.
* @param bucketWriter the {@link BucketWriter} used to write part files in the bucket.
* @param <IN> the type of input elements to the sink.
* @param outputFileConfig the part file configuration.
* @return The new Bucket.
*/
static <IN> FileWriterBucket<IN> getNew(
final String bucketId,
final Path bucketPath,
final BucketWriter<IN, String> bucketWriter,
final RollingPolicy<IN, String> rollingPolicy,
final OutputFileConfig outputFileConfig) {
return new FileWriterBucket<>(
bucketId, bucketPath, bucketWriter, rollingPolicy, outputFileConfig);
} | 3.68 |
hbase_Response_setHeaders | /**
* @param headers the HTTP response headers
*/
public void setHeaders(Header[] headers) {
this.headers = headers;
} | 3.68 |
hbase_ZKProcedureUtil_getAcquireBarrierNode | /**
* Get the full znode path for the node used by the coordinator to trigger a global barrier
* acquire on each subprocedure.
* @param controller controller running the procedure
* @param opInstanceName name of the running procedure instance (not the procedure description).
* @return full znode path to the prepare barrier/start node
*/
public static String getAcquireBarrierNode(ZKProcedureUtil controller, String opInstanceName) {
return ZNodePaths.joinZNode(controller.acquiredZnode, opInstanceName);
} | 3.68 |
flink_WindowedStream_apply | /**
* Applies the given window function to each window. The window function is called for each
* evaluation of the window for each key individually. The output of the window function is
* interpreted as a regular non-windowed stream.
*
* <p>Arriving data is incrementally aggregated using the given reducer.
*
* @param reduceFunction The reduce function that is used for incremental aggregation.
* @param function The window function.
* @param resultType Type information for the result type of the window function
* @return The data stream that is the result of applying the window function to the window.
* @deprecated Use {@link #reduce(ReduceFunction, WindowFunction, TypeInformation)} instead.
*/
@Deprecated
public <R> SingleOutputStreamOperator<R> apply(
ReduceFunction<T> reduceFunction,
WindowFunction<T, R, K, W> function,
TypeInformation<R> resultType) {
// clean the closures
function = input.getExecutionEnvironment().clean(function);
reduceFunction = input.getExecutionEnvironment().clean(reduceFunction);
final String opName = builder.generateOperatorName();
final String opDesc = builder.generateOperatorDescription(reduceFunction, function);
OneInputStreamOperator<T, R> operator = builder.reduce(reduceFunction, function);
return input.transform(opName, resultType, operator).setDescription(opDesc);
} | 3.68 |
hadoop_ECChunk_toBuffers | /**
* Convert an array of this chunks to an array of ByteBuffers
* @param chunks chunks to convert into buffers
* @return an array of ByteBuffers
*/
public static ByteBuffer[] toBuffers(ECChunk[] chunks) {
ByteBuffer[] buffers = new ByteBuffer[chunks.length];
ECChunk chunk;
for (int i = 0; i < chunks.length; i++) {
chunk = chunks[i];
if (chunk == null) {
buffers[i] = null;
} else {
buffers[i] = chunk.getBuffer();
}
}
return buffers;
} | 3.68 |
pulsar_WindowManager_getSlidingCountTimestamps | /**
* Scans the event queue and returns the list of event ts
* falling between startTs (exclusive) and endTs (inclusive)
* at each sliding interval counts.
*
* @param startTs the start timestamp (exclusive)
* @param endTs the end timestamp (inclusive)
* @param slidingCount the sliding interval count
* @return the list of event ts
*/
public List<Long> getSlidingCountTimestamps(long startTs, long endTs, int slidingCount) {
List<Long> timestamps = new ArrayList<>();
if (endTs > startTs) {
int count = 0;
long ts = Long.MIN_VALUE;
for (Event<T> event : queue) {
if (event.getTimestamp() > startTs && event.getTimestamp() <= endTs) {
ts = Math.max(ts, event.getTimestamp());
if (++count % slidingCount == 0) {
timestamps.add(ts);
}
}
}
}
return timestamps;
} | 3.68 |
hibernate-validator_ValidationProviderHelper_determineRequiredQualifiers | /**
* Returns the qualifiers to be used for registering a validator or validator factory.
*/
@SuppressWarnings("serial")
private static Set<Annotation> determineRequiredQualifiers(boolean isDefaultProvider,
boolean isHibernateValidator) {
HashSet<Annotation> qualifiers = newHashSet( 3 );
if ( isDefaultProvider ) {
qualifiers.add(
new AnnotationLiteral<Default>() {
}
);
}
if ( isHibernateValidator ) {
qualifiers.add(
new AnnotationLiteral<HibernateValidator>() {
}
);
}
qualifiers.add(
new AnnotationLiteral<Any>() {
}
);
return qualifiers;
} | 3.68 |
hbase_MD5Hash_getMD5AsHex | /**
* Given a byte array, returns its MD5 hash as a hex string. Only "length" number of bytes
* starting at "offset" within the byte array are used.
* @param key the key to hash (variable length byte array)
* @return MD5 hash as a 32 character hex string.
*/
public static String getMD5AsHex(byte[] key, int offset, int length) {
try {
MessageDigest md = MessageDigest.getInstance("MD5");
md.update(key, offset, length);
byte[] digest = md.digest();
return new String(Hex.encodeHex(digest));
} catch (NoSuchAlgorithmException e) {
// this should never happen unless the JDK is messed up.
throw new RuntimeException("Error computing MD5 hash", e);
}
} | 3.68 |
framework_AbstractComponentContainer_getComponentIterator | /**
* {@inheritDoc}
*
* @deprecated As of 7.0, use {@link #iterator()} instead.
*/
@Deprecated
@Override
public Iterator<Component> getComponentIterator() {
return iterator();
} | 3.68 |
framework_Table_getVisibleCells | /**
* Gets the cached visible table contents.
*
* @return the cached visible table contents.
*/
private Object[][] getVisibleCells() {
if (pageBuffer == null) {
refreshRenderedCells();
}
return pageBuffer;
} | 3.68 |
flink_MemoryManager_verifyEmpty | /**
* Checks if the memory manager's memory is completely available (nothing allocated at the
* moment).
*
* @return True, if the memory manager is empty and valid, false if it is not empty or
* corrupted.
*/
public boolean verifyEmpty() {
return memoryBudget.verifyEmpty();
} | 3.68 |
framework_VUpload_disableUpload | /** For internal use only. May be removed or replaced in the future. */
public void disableUpload() {
if (!submitted) {
// Cannot disable the fileupload while submitting or the file won't
// be submitted at all
fu.getElement().setPropertyBoolean("disabled", true);
}
enabled = false;
updateEnabledForSubmitButton();
} | 3.68 |
hadoop_RPCUtil_getRemoteException | /**
* Returns an instance of {@link YarnException}.
* @param message yarn exception message.
* @return instance of YarnException.
*/
public static YarnException getRemoteException(String message) {
return new YarnException(message);
} | 3.68 |
hadoop_ServiceLauncher_exitWithUsageMessage | /**
* Exit with the usage exit code {@link #EXIT_USAGE}
* and message {@link #USAGE_MESSAGE}.
* @throws ExitUtil.ExitException if exceptions are disabled
*/
protected static void exitWithUsageMessage() {
exitWithMessage(EXIT_USAGE, USAGE_MESSAGE);
} | 3.68 |
hbase_CatalogFamilyFormat_parseReplicaIdFromServerColumn | /**
* Parses the replicaId from the server column qualifier. See top of the class javadoc for the
* actual meta layout
* @param serverColumn the column qualifier
* @return an int for the replicaId
*/
static int parseReplicaIdFromServerColumn(byte[] serverColumn) {
String serverStr = Bytes.toString(serverColumn);
Matcher matcher = SERVER_COLUMN_PATTERN.matcher(serverStr);
if (matcher.matches() && matcher.groupCount() > 0) {
String group = matcher.group(1);
if (group != null && group.length() > 0) {
return Integer.parseInt(group.substring(1), 16);
} else {
return 0;
}
}
return -1;
} | 3.68 |
flink_InPlaceMutableHashTable_reset | /** Seeks to the beginning. */
public void reset() {
seekOutput(segments.get(0), 0);
currentSegmentIndex = 0;
} | 3.68 |
hudi_ArrayColumnReader_collectDataFromParquetPage | /**
* Collects data from a parquet page and returns the final row index where it stopped. The
* returned index can be equal to or less than total.
*
* @param total maximum number of rows to collect
* @param lcv column vector to do initial setup in data collection time
* @param valueList collection of values that will be fed into the vector later
* @param category
* @return int
* @throws IOException
*/
private int collectDataFromParquetPage(
int total, HeapArrayVector lcv, List<Object> valueList, LogicalType category)
throws IOException {
int index = 0;
/*
* Here is a nested loop for collecting all values from a parquet page.
* A column of array type can be considered as a list of lists, so the two loops are as below:
* 1. The outer loop iterates on rows (index is a row index, so points to a row in the batch), e.g.:
* [0, 2, 3] <- index: 0
* [NULL, 3, 4] <- index: 1
*
* 2. The inner loop iterates on values within a row (sets all data from parquet data page
* for an element in ListColumnVector), so fetchNextValue returns values one-by-one:
* 0, 2, 3, NULL, 3, 4
*
* As described below, the repetition level (repetitionLevel != 0)
* can be used to decide when we'll start to read values for the next list.
*/
while (!eof && index < total) {
// add element to ListColumnVector one by one
lcv.offsets[index] = valueList.size();
/*
* Let's collect all values for a single list.
* Repetition level = 0 means that a new list started there in the parquet page,
* in that case, let's exit from the loop, and start to collect value for a new list.
*/
do {
/*
* Definition level = 0 when a NULL value was returned instead of a list
* (this is not the same as a NULL value in of a list).
*/
if (definitionLevel == 0) {
lcv.setNullAt(index);
}
valueList.add(
isCurrentPageDictionaryEncoded
? dictionaryDecodeValue(category, (Integer) lastValue)
: lastValue);
} while (fetchNextValue(category) && (repetitionLevel != 0));
lcv.lengths[index] = valueList.size() - lcv.offsets[index];
index++;
}
return index;
} | 3.68 |
hadoop_BlockBlobAppendStream_getBlockList | /**
* Get the list of block entries. It is used for testing purposes only.
* @return List of block entries.
*/
@VisibleForTesting
List<BlockEntry> getBlockList() throws StorageException, IOException {
return blob.downloadBlockList(
BlockListingFilter.COMMITTED,
new BlobRequestOptions(),
opContext);
} | 3.68 |
framework_FileParameters_setName | /**
* Sets the file name.
*
* @param name
* Name of the file.
*/
public void setName(String name) {
this.name = name;
} | 3.68 |
zxing_ECIStringBuilder_length | /**
* Short for {@code toString().length()} (if possible, use {@link #isEmpty()} instead)
*
* @return length of string representation in characters
*/
public int length() {
return toString().length();
} | 3.68 |
framework_StaticSection_writeCellState | /**
*
* Writes declarative design for the cell using its {@code state} to the
* given table cell element.
* <p>
* The method is used instead of StaticCell::writeDesign because
* sometimes there is no a reference to the cell which should be written
* (merged cell) but only its state is available (the cell is virtual
* and is not stored).
*
* @param cellElement
* Element to write design to
* @param context
* the design context
* @param state
* a cell state
*/
protected void writeCellState(Element cellElement,
DesignContext context, CellState state) {
switch (state.type) {
case TEXT:
cellElement.attr("plain-text", true);
cellElement
.appendText(Optional.ofNullable(state.text).orElse(""));
break;
case HTML:
cellElement.append(Optional.ofNullable(state.html).orElse(""));
break;
case WIDGET:
cellElement.appendChild(
context.createElement((Component) state.connector));
break;
}
} | 3.68 |
hadoop_JavaCommandLineBuilder_getJavaBinary | /**
* Get the java binary. This is called in the constructor so don't try and
* do anything other than return a constant.
* @return the path to the Java binary
*/
protected String getJavaBinary() {
return ApplicationConstants.Environment.JAVA_HOME.$$() + "/bin/java";
} | 3.68 |
hadoop_MountTableProcedure_disableWrite | /**
* Disable write by making the mount point readonly.
*
* @param mount the mount point to set readonly.
* @param conf the configuration of the router.
*/
static void disableWrite(String mount, Configuration conf)
throws IOException {
setMountReadOnly(mount, true, conf);
} | 3.68 |
flink_BinaryRawValueData_fromObject | /** Creates a {@link BinaryRawValueData} instance from the given Java object. */
public static <T> BinaryRawValueData<T> fromObject(T javaObject) {
if (javaObject == null) {
return null;
}
return new BinaryRawValueData<>(javaObject);
} | 3.68 |
flink_OperatingSystem_isFreeBSD | /**
* Checks whether the operating system this JVM runs on is FreeBSD.
*
* @return <code>true</code> if the operating system this JVM runs on is FreeBSD, <code>false
* </code> otherwise
*/
public static boolean isFreeBSD() {
return getCurrentOperatingSystem() == FREE_BSD;
} | 3.68 |
hbase_RSGroupInfoManagerImpl_waitForRegionMovement | /**
* Wait for all the region move to complete. Keep waiting for other region movement completion
* even if some region movement fails.
*/
private void waitForRegionMovement(List<Pair<RegionInfo, Future<byte[]>>> regionMoveFutures,
Set<String> failedRegions, String sourceGroupName, int retryCount) {
LOG.info("Moving {} region(s) to group {}, current retry={}", regionMoveFutures.size(),
sourceGroupName, retryCount);
for (Pair<RegionInfo, Future<byte[]>> pair : regionMoveFutures) {
try {
pair.getSecond().get();
if (
masterServices.getAssignmentManager().getRegionStates().getRegionState(pair.getFirst())
.isFailedOpen()
) {
failedRegions.add(pair.getFirst().getRegionNameAsString());
}
} catch (InterruptedException e) {
// Dont return form there lets wait for other regions to complete movement.
failedRegions.add(pair.getFirst().getRegionNameAsString());
LOG.warn("Sleep interrupted", e);
} catch (Exception e) {
failedRegions.add(pair.getFirst().getRegionNameAsString());
LOG.error("Move region {} to group {} failed, will retry on next attempt",
pair.getFirst().getShortNameToLog(), sourceGroupName, e);
}
}
} | 3.68 |
querydsl_BeanPath_createString | /**
* Create a new String path
*
* @param property property name
* @return property path
*/
protected StringPath createString(String property) {
return add(new StringPath(forProperty(property)));
} | 3.68 |
flink_SemanticPropUtil_addSourceFieldOffset | /**
* Creates SemanticProperties by adding an offset to each input field index of the given
* SemanticProperties.
*
* @param props The SemanticProperties to which the offset is added.
* @param numInputFields The original number of fields of the input.
* @param offset The offset that is added to each input field index.
* @return New SemanticProperties with added offset.
*/
public static SingleInputSemanticProperties addSourceFieldOffset(
SingleInputSemanticProperties props, int numInputFields, int offset) {
SingleInputSemanticProperties offsetProps = new SingleInputSemanticProperties();
if (props.getReadFields(0) != null) {
FieldSet offsetReadFields = new FieldSet();
for (int r : props.getReadFields(0)) {
offsetReadFields = offsetReadFields.addField(r + offset);
}
offsetProps.addReadFields(offsetReadFields);
}
for (int s = 0; s < numInputFields; s++) {
FieldSet targetFields = props.getForwardingTargetFields(0, s);
for (int t : targetFields) {
offsetProps.addForwardedField(s + offset, t);
}
}
return offsetProps;
} | 3.68 |
framework_MethodPropertyDescriptor_readObject | /* Special serialization to handle method references */
private void readObject(ObjectInputStream in)
throws IOException, ClassNotFoundException {
in.defaultReadObject();
try {
@SuppressWarnings("unchecked")
// business assumption; type parameters not checked at runtime
Class<BT> class1 = (Class<BT>) SerializerHelper.readClass(in);
propertyType = ReflectTools.convertPrimitiveType(class1);
String name = (String) in.readObject();
Class<?> writeMethodClass = SerializerHelper.readClass(in);
Class<?>[] paramTypes = SerializerHelper.readClassArray(in);
if (name != null) {
writeMethod = writeMethodClass.getMethod(name, paramTypes);
} else {
writeMethod = null;
}
name = (String) in.readObject();
Class<?> readMethodClass = SerializerHelper.readClass(in);
paramTypes = SerializerHelper.readClassArray(in);
if (name != null) {
readMethod = readMethodClass.getMethod(name, paramTypes);
} else {
readMethod = null;
}
} catch (SecurityException e) {
getLogger().log(Level.SEVERE, "Internal deserialization error", e);
} catch (NoSuchMethodException e) {
getLogger().log(Level.SEVERE, "Internal deserialization error", e);
}
} | 3.68 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.