name
stringlengths 12
178
| code_snippet
stringlengths 8
36.5k
| score
float64 3.26
3.68
|
---|---|---|
framework_WebBrowser_getBrowserMinorVersion | /**
* Gets the minor version of the browser the user is using.
*
* @see #getBrowserMajorVersion()
*
* @return The minor version of the browser or -1 if not known.
*/
public int getBrowserMinorVersion() {
if (browserDetails == null) {
return -1;
}
return browserDetails.getBrowserMinorVersion();
} | 3.68 |
morf_AbstractSqlDialectTest_testDateToYyyymmdd | /**
* Test that YYYYMMDDToDate functionality behaves as expected.
*/
@Test
public void testDateToYyyymmdd() {
String result = testDialect.getSqlFrom(dateToYyyymmdd(field("testField")));
assertEquals(expectedDateToYyyymmdd(), result);
} | 3.68 |
hudi_AbstractTableFileSystemView_fetchAllLogsMergedFileSlice | /**
* Returns the file slice with all the file slice log files merged.
* <p> CAUTION: the method requires that all the file slices must only contain log files.
*
* @param fileGroup File Group for which the file slice belongs to
* @param maxInstantTime The max instant time
*/
private Option<FileSlice> fetchAllLogsMergedFileSlice(HoodieFileGroup fileGroup, String maxInstantTime) {
List<FileSlice> fileSlices = fileGroup.getAllFileSlicesBeforeOn(maxInstantTime).collect(Collectors.toList());
if (fileSlices.size() == 0) {
return Option.empty();
}
if (fileSlices.size() == 1) {
return Option.of(fileSlices.get(0));
}
final FileSlice latestSlice = fileSlices.get(0);
FileSlice merged = new FileSlice(latestSlice.getPartitionPath(), latestSlice.getBaseInstantTime(),
latestSlice.getFileId());
// add log files from the latest slice to the earliest
fileSlices.forEach(slice -> slice.getLogFiles().forEach(merged::addLogFile));
return Option.of(merged);
} | 3.68 |
framework_WebBrowser_isSecureConnection | /** Is the connection made using HTTPS? */
public boolean isSecureConnection() {
return secureConnection;
} | 3.68 |
hbase_LogRollRegionServerProcedureManager_start | /**
* Start accepting backup procedure requests.
*/
@Override
public void start() {
if (!BackupManager.isBackupEnabled(rss.getConfiguration())) {
LOG.warn("Backup is not enabled. Check your " + BackupRestoreConstants.BACKUP_ENABLE_KEY
+ " setting");
return;
}
this.memberRpcs.start(rss.getServerName().toString(), member);
started = true;
LOG.info("Started region server backup manager.");
} | 3.68 |
hadoop_AbstractRouterPolicy_prefilterSubClusters | /**
* Filter chosen SubCluster based on reservationId.
*
* @param reservationId the globally unique identifier for a reservation.
* @param activeSubClusters the map of ids to info for all active subclusters.
* @return the chosen sub-cluster
* @throws YarnException if the policy fails to choose a sub-cluster
*/
protected Map<SubClusterId, SubClusterInfo> prefilterSubClusters(
ReservationId reservationId, Map<SubClusterId, SubClusterInfo> activeSubClusters)
throws YarnException {
// if a reservation exists limit scope to the sub-cluster this
// reservation is mapped to
if (reservationId != null) {
// note this might throw YarnException if the reservation is
// unknown. This is to be expected, and should be handled by
// policy invoker.
FederationStateStoreFacade stateStoreFacade =
getPolicyContext().getFederationStateStoreFacade();
SubClusterId resSubCluster = stateStoreFacade.getReservationHomeSubCluster(reservationId);
SubClusterInfo subClusterInfo = activeSubClusters.get(resSubCluster);
return Collections.singletonMap(resSubCluster, subClusterInfo);
}
return activeSubClusters;
} | 3.68 |
framework_Flash_setCodetype | /**
* This attribute specifies the content type of data expected when
* downloading the object specified by classid. This attribute is optional
* but recommended when classid is specified since it allows the user agent
* to avoid loading information for unsupported content types. When absent,
* it defaults to the value of the type attribute.
*
* @param codetype
* the codetype to set.
*/
public void setCodetype(String codetype) {
if (codetype != getState().codetype || (codetype != null
&& !codetype.equals(getState().codetype))) {
getState().codetype = codetype;
requestRepaint();
}
} | 3.68 |
morf_InsertStatement_values | /**
* Specifies the literal field values to insert.
*
* <p>
* Each field must have an alias which specifies the column to insert into.
* </p>
*
* @see AliasedField#as(String)
* @param fieldValues Literal field values to insert.
* @return a statement with the changes applied.
*/
public InsertStatement values(AliasedFieldBuilder... fieldValues) {
return copyOnWriteOrMutate(
b -> b.values(fieldValues),
() -> {
if (fromTable != null) {
throw new UnsupportedOperationException("Cannot specify both a literal set of field values and a from table.");
}
if (selectStatement != null) {
throw new UnsupportedOperationException("Cannot specify both a literal set of field values and a sub-select statement.");
}
this.values.addAll(Builder.Helper.buildAll(Lists.newArrayList(fieldValues)));
}
);
} | 3.68 |
flink_Path_serializeToDataOutputView | /**
* Serialize the path to {@link DataInputView}.
*
* @param path the file path.
* @param out the data out put view.
* @throws IOException if an error happened.
*/
public static void serializeToDataOutputView(Path path, DataOutputView out) throws IOException {
URI uri = path.toUri();
if (uri == null) {
out.writeBoolean(false);
} else {
out.writeBoolean(true);
StringUtils.writeNullableString(uri.getScheme(), out);
StringUtils.writeNullableString(uri.getUserInfo(), out);
StringUtils.writeNullableString(uri.getHost(), out);
out.writeInt(uri.getPort());
StringUtils.writeNullableString(uri.getPath(), out);
StringUtils.writeNullableString(uri.getQuery(), out);
StringUtils.writeNullableString(uri.getFragment(), out);
}
} | 3.68 |
hbase_ScanWildcardColumnTracker_getColumnHint | /**
* Used by matcher and scan/get to get a hint of the next column to seek to after checkColumn()
* returns SKIP. Returns the next interesting column we want, or NULL there is none (wildcard
* scanner).
* @return The column count.
*/
@Override
public ColumnCount getColumnHint() {
return null;
} | 3.68 |
flink_NettyMessage_readFrom | /**
* Parses the message header part and composes a new BufferResponse with an empty data
* buffer. The data buffer will be filled in later.
*
* @param messageHeader the serialized message header.
* @param bufferAllocator the allocator for network buffer.
* @return a BufferResponse object with the header parsed and the data buffer to fill in
* later. The data buffer will be null if the target channel has been released or the
* buffer size is 0.
*/
static BufferResponse readFrom(
ByteBuf messageHeader, NetworkBufferAllocator bufferAllocator) {
InputChannelID receiverId = InputChannelID.fromByteBuf(messageHeader);
int sequenceNumber = messageHeader.readInt();
int backlog = messageHeader.readInt();
Buffer.DataType dataType = Buffer.DataType.values()[messageHeader.readByte()];
boolean isCompressed = messageHeader.readBoolean();
int size = messageHeader.readInt();
Buffer dataBuffer;
if (dataType.isBuffer()) {
dataBuffer = bufferAllocator.allocatePooledNetworkBuffer(receiverId);
} else {
dataBuffer = bufferAllocator.allocateUnPooledNetworkBuffer(size, dataType);
}
if (size == 0 && dataBuffer != null) {
// recycle the empty buffer directly, we must allocate a buffer for
// the empty data to release the credit already allocated for it
dataBuffer.recycleBuffer();
dataBuffer = null;
}
if (dataBuffer != null) {
dataBuffer.setCompressed(isCompressed);
}
return new BufferResponse(
dataBuffer, dataType, isCompressed, sequenceNumber, receiverId, backlog, size);
} | 3.68 |
framework_ServerRpcQueue_isJavascriptRpc | /**
* Checks if the given method invocation originates from Javascript.
*
* @param invocation
* the invocation to check
* @return true if the method invocation originates from javascript, false
* otherwise
*/
public static boolean isJavascriptRpc(MethodInvocation invocation) {
return invocation instanceof JavaScriptMethodInvocation;
} | 3.68 |
hbase_ReplicationPeerConfigUtil_updateReplicationBasePeerConfigs | /**
* Helper method to add/removev base peer configs from Configuration to ReplicationPeerConfig This
* merges the user supplied peer configuration
* {@link org.apache.hadoop.hbase.replication.ReplicationPeerConfig} with peer configs provided as
* property hbase.replication.peer.base.configs in hbase configuration. Expected format for this
* hbase configuration is "k1=v1;k2=v2,v2_1;k3=""". If value is empty, it will remove the existing
* key-value from peer config.
* @param conf Configuration
* @return ReplicationPeerConfig containing updated configs.
*/
public static ReplicationPeerConfig updateReplicationBasePeerConfigs(Configuration conf,
ReplicationPeerConfig receivedPeerConfig) {
ReplicationPeerConfigBuilder copiedPeerConfigBuilder =
ReplicationPeerConfig.newBuilder(receivedPeerConfig);
Map<String, String> receivedPeerConfigMap = receivedPeerConfig.getConfiguration();
String basePeerConfigs = conf.get(HBASE_REPLICATION_PEER_BASE_CONFIG, "");
if (basePeerConfigs.length() != 0) {
Map<String, String> basePeerConfigMap = Splitter.on(';').trimResults().omitEmptyStrings()
.withKeyValueSeparator("=").split(basePeerConfigs);
for (Map.Entry<String, String> entry : basePeerConfigMap.entrySet()) {
String configName = entry.getKey();
String configValue = entry.getValue();
// If the config is provided with empty value, for eg. k1="",
// we remove it from peer config. Providing config with empty value
// is required so that it doesn't remove any other config unknowingly.
if (Strings.isNullOrEmpty(configValue)) {
copiedPeerConfigBuilder.removeConfiguration(configName);
} else if (!receivedPeerConfigMap.getOrDefault(configName, "").equals(configValue)) {
// update the configuration if exact config and value doesn't exists
copiedPeerConfigBuilder.putConfiguration(configName, configValue);
}
}
}
return copiedPeerConfigBuilder.build();
} | 3.68 |
hadoop_DefaultAppReportFetcher_getApplicationReport | /**
* Get an application report for the specified application id from the RM and
* fall back to the Application History Server if not found in RM.
*
* @param appId id of the application to get.
* @return the ApplicationReport for the appId.
* @throws YarnException on any error.
* @throws IOException connection exception.
*/
@Override
public FetchedAppReport getApplicationReport(ApplicationId appId)
throws YarnException, IOException {
return super.getApplicationReport(applicationsManager, appId);
} | 3.68 |
flink_LogUrlUtil_getValidLogUrlPattern | /** Validate and normalize log url pattern. */
public static Optional<String> getValidLogUrlPattern(
final Configuration config, final ConfigOption<String> option) {
String pattern = config.getString(option);
if (StringUtils.isNullOrWhitespaceOnly(pattern)) {
return Optional.empty();
}
pattern = pattern.trim();
String scheme = pattern.substring(0, Math.max(pattern.indexOf(SCHEME_SEPARATOR), 0));
if (scheme.isEmpty()) {
return Optional.of(HTTP_SCHEME + SCHEME_SEPARATOR + pattern);
} else if (HTTP_SCHEME.equalsIgnoreCase(scheme) || HTTPS_SCHEME.equalsIgnoreCase(scheme)) {
return Optional.of(pattern);
} else {
LOG.warn(
"Ignore configured value for '{}': unsupported scheme {}",
option.key(),
scheme);
return Optional.empty();
}
} | 3.68 |
hbase_Bytes_compareTo | /**
* Lexicographically compare two arrays.
* @param buffer1 left operand
* @param buffer2 right operand
* @param offset1 Where to start comparing in the left buffer
* @param offset2 Where to start comparing in the right buffer
* @param length1 How much to compare from the left buffer
* @param length2 How much to compare from the right buffer
* @return 0 if equal, < 0 if left is less than right, etc.
*/
@Override
public int compareTo(byte[] buffer1, int offset1, int length1, byte[] buffer2, int offset2,
int length2) {
// Short circuit equal case
if (buffer1 == buffer2 && offset1 == offset2 && length1 == length2) {
return 0;
}
final int stride = 8;
final int minLength = Math.min(length1, length2);
int strideLimit = minLength & ~(stride - 1);
final long offset1Adj = offset1 + UnsafeAccess.BYTE_ARRAY_BASE_OFFSET;
final long offset2Adj = offset2 + UnsafeAccess.BYTE_ARRAY_BASE_OFFSET;
int i;
/*
* Compare 8 bytes at a time. Benchmarking on x86 shows a stride of 8 bytes is no slower
* than 4 bytes even on 32-bit. On the other hand, it is substantially faster on 64-bit.
*/
for (i = 0; i < strideLimit; i += stride) {
long lw = HBasePlatformDependent.getLong(buffer1, offset1Adj + i);
long rw = HBasePlatformDependent.getLong(buffer2, offset2Adj + i);
if (lw != rw) {
if (!UnsafeAccess.LITTLE_ENDIAN) {
return ((lw + Long.MIN_VALUE) < (rw + Long.MIN_VALUE)) ? -1 : 1;
}
/*
* We want to compare only the first index where left[index] != right[index]. This
* corresponds to the least significant nonzero byte in lw ^ rw, since lw and rw are
* little-endian. Long.numberOfTrailingZeros(diff) tells us the least significant
* nonzero bit, and zeroing out the first three bits of L.nTZ gives us the shift to get
* that least significant nonzero byte. This comparison logic is based on UnsignedBytes
* comparator from guava v21
*/
int n = Long.numberOfTrailingZeros(lw ^ rw) & ~0x7;
return ((int) ((lw >>> n) & 0xFF)) - ((int) ((rw >>> n) & 0xFF));
}
}
// The epilogue to cover the last (minLength % stride) elements.
for (; i < minLength; i++) {
int a = (buffer1[offset1 + i] & 0xFF);
int b = (buffer2[offset2 + i] & 0xFF);
if (a != b) {
return a - b;
}
}
return length1 - length2;
} | 3.68 |
AreaShop_GeneralRegion_compareTo | /**
* Compare this region to another region by name.
* @param o The region to compare to
* @return 0 if the names are the same, below zero if this region is earlier in the alphabet, otherwise above zero
*/
@Override
public int compareTo(GeneralRegion o) {
return getName().compareTo(o.getName());
} | 3.68 |
pulsar_ResourceGroup_getRgRemoteUsageByteCount | // Visibility for unit testing
protected static double getRgRemoteUsageByteCount (String rgName, String monClassName, String brokerName) {
return rgRemoteUsageReportsBytes.labels(rgName, monClassName, brokerName).get();
} | 3.68 |
morf_XmlDataSetProducer_openPullParser | /**
* @param inputStream The inputstream to read from
* @return A new pull parser
*/
private static XMLStreamReader openPullParser(InputStream inputStream) {
try {
BufferedReader bufferedReader = new BufferedReader(new InputStreamReader(inputStream, Charsets.UTF_8));
Reader reader;
int version = Version2to4TransformingReader.readVersion(bufferedReader);
if (version == 2 || version == 3) {
reader = new Version2to4TransformingReader(bufferedReader, version);
} else {
reader = bufferedReader;
}
if (version > 4) {
throw new IllegalStateException("Unknown XML dataset format: "+version +" This dataset has been produced by a later version of Morf");
}
return FACTORY.createXMLStreamReader(reader);
} catch (XMLStreamException|FactoryConfigurationError e) {
throw new RuntimeException(e);
}
} | 3.68 |
hadoop_RouterDelegationTokenSecretManager_removeStoredMasterKey | /**
* The Router Supports Remove the master key.
* During this Process, Facade will call the specific StateStore to remove the MasterKey.
*
* @param delegationKey DelegationKey
*/
@Override
public void removeStoredMasterKey(DelegationKey delegationKey) {
try {
federationFacade.removeStoredMasterKey(delegationKey);
} catch (Exception e) {
if (!shouldIgnoreException(e)) {
LOG.error("Error in removing master key with KeyID: {}.", delegationKey.getKeyId());
ExitUtil.terminate(1, e);
}
}
} | 3.68 |
cron-utils_TimeNode_getNearestBackwardValue | /**
* We return same reference value if matches or previous one if does not match.
* Then we start applying shifts.
* This way we ensure same value is returned if no shift is requested.
*
* @param reference - reference value
* @param shiftsToApply - shifts to apply
* @return NearestValue instance, never null. Holds information on nearest (backward) value and shifts performed.
*/
@VisibleForTesting
NearestValue getNearestBackwardValue(final int reference, int shiftsToApply) {
final List<Integer> temporaryValues = new ArrayList<>(this.values);
Collections.reverse(temporaryValues);
int index = 0;
boolean foundSmaller = false;
final AtomicInteger shift = new AtomicInteger(0);
if (!temporaryValues.contains(reference)) {
for (final Integer value : temporaryValues) {
if (value < reference) {
index = temporaryValues.indexOf(value);
shiftsToApply--;//we just moved a position!
foundSmaller = true;
break;
}
}
if (!foundSmaller) {
shift.incrementAndGet();
}
} else {
index = temporaryValues.indexOf(reference);
}
int value = temporaryValues.get(index);
for (int j = 0; j < shiftsToApply; j++) {
value = getValueFromList(temporaryValues, index + 1, shift);
index = temporaryValues.indexOf(value);
}
return new NearestValue(value, shift.get());
} | 3.68 |
framework_VAbsoluteLayout_getWidgetSlotWidth | /**
* Get the pixel width of an slot in the layout.
*
* @param child
* The widget in the layout.
* @return Returns the size in pixels, or 0 if child is not in the layout
*/
public int getWidgetSlotWidth(Widget child) {
AbsoluteWrapper wrapper = getChildWrapper(child);
if (wrapper != null) {
return wrapper.getOffsetWidth();
}
return 0;
} | 3.68 |
framework_AbsoluteLayoutConnector_getWidget | /*
* (non-Javadoc)
*
* @see com.vaadin.client.ui.AbstractComponentConnector#getWidget()
*/
@Override
public VAbsoluteLayout getWidget() {
return (VAbsoluteLayout) super.getWidget();
} | 3.68 |
zxing_MatrixUtil_calculateBCHCode | // Calculate BCH (Bose-Chaudhuri-Hocquenghem) code for "value" using polynomial "poly". The BCH
// code is used for encoding type information and version information.
// Example: Calculation of version information of 7.
// f(x) is created from 7.
// - 7 = 000111 in 6 bits
// - f(x) = x^2 + x^1 + x^0
// g(x) is given by the standard (p. 67)
// - g(x) = x^12 + x^11 + x^10 + x^9 + x^8 + x^5 + x^2 + 1
// Multiply f(x) by x^(18 - 6)
// - f'(x) = f(x) * x^(18 - 6)
// - f'(x) = x^14 + x^13 + x^12
// Calculate the remainder of f'(x) / g(x)
// x^2
// __________________________________________________
// g(x) )x^14 + x^13 + x^12
// x^14 + x^13 + x^12 + x^11 + x^10 + x^7 + x^4 + x^2
// --------------------------------------------------
// x^11 + x^10 + x^7 + x^4 + x^2
//
// The remainder is x^11 + x^10 + x^7 + x^4 + x^2
// Encode it in binary: 110010010100
// The return value is 0xc94 (1100 1001 0100)
//
// Since all coefficients in the polynomials are 1 or 0, we can do the calculation by bit
// operations. We don't care if coefficients are positive or negative.
static int calculateBCHCode(int value, int poly) {
if (poly == 0) {
throw new IllegalArgumentException("0 polynomial");
}
// If poly is "1 1111 0010 0101" (version info poly), msbSetInPoly is 13. We'll subtract 1
// from 13 to make it 12.
int msbSetInPoly = findMSBSet(poly);
value <<= msbSetInPoly - 1;
// Do the division business using exclusive-or operations.
while (findMSBSet(value) >= msbSetInPoly) {
value ^= poly << (findMSBSet(value) - msbSetInPoly);
}
// Now the "value" is the remainder (i.e. the BCH code)
return value;
} | 3.68 |
hadoop_Validate_checkNotNullAndNumberOfElements | /**
* Validates that the given set is not null and has an exact number of items.
* @param <T> the type of collection's elements.
* @param collection the argument reference to validate.
* @param numElements the expected number of elements in the collection.
* @param argName the name of the argument being validated.
*/
public static <T> void checkNotNullAndNumberOfElements(
Collection<T> collection, int numElements, String argName) {
checkNotNull(collection, argName);
checkArgument(
collection.size() == numElements,
"Number of elements in '%s' must be exactly %s, %s given.",
argName,
numElements,
collection.size()
);
} | 3.68 |
morf_AbstractSqlDialectTest_testSelectWithGroupBy | /**
* Tests the group by in a select.
*/
@Test
public void testSelectWithGroupBy() {
SelectStatement stmt = new SelectStatement(new FieldReference(STRING_FIELD), count(literal(1)), countDistinct(literal(1)))
.from(new TableReference(ALTERNATE_TABLE))
.groupBy(field(STRING_FIELD), field(INT_FIELD), field(FLOAT_FIELD));
String expectedSql = "SELECT stringField, COUNT(1), COUNT(DISTINCT 1) FROM " + tableName(ALTERNATE_TABLE) + " GROUP BY stringField, intField, floatField";
assertEquals("Select with count function", expectedSql, testDialect.convertStatementToSQL(stmt));
} | 3.68 |
hbase_Bytes_putBigDecimal | /**
* Put a BigDecimal value out to the specified byte array position.
* @param bytes the byte array
* @param offset position in the array
* @param val BigDecimal to write out
* @return incremented offset
*/
public static int putBigDecimal(byte[] bytes, int offset, BigDecimal val) {
if (bytes == null) {
return offset;
}
byte[] valueBytes = val.unscaledValue().toByteArray();
byte[] result = new byte[valueBytes.length + SIZEOF_INT];
offset = putInt(result, offset, val.scale());
return putBytes(result, offset, valueBytes, 0, valueBytes.length);
} | 3.68 |
dubbo_ExpiringMap_startExpiring | /**
* start expiring Thread
*/
public void startExpiring() {
if (!running) {
running = true;
expirerThread.start();
}
} | 3.68 |
framework_DateCellDayEvent_clickTargetsResize | /**
* @return true if the current mouse movement is resizing
*/
private boolean clickTargetsResize() {
return weekGrid.getCalendar().isEventResizeAllowed()
&& (clickTarget == topResizeBar
|| clickTarget == bottomResizeBar);
} | 3.68 |
hbase_MasterProcedureScheduler_wakeGlobalExclusiveLock | /**
* Wake the procedures waiting for global.
* @see #waitGlobalExclusiveLock(Procedure, String)
* @param procedure the procedure releasing the lock
*/
public void wakeGlobalExclusiveLock(Procedure<?> procedure, String globalId) {
schedLock();
try {
final LockAndQueue lock = locking.getGlobalLock(globalId);
lock.releaseExclusiveLock(procedure);
addToRunQueue(globalRunQueue, getGlobalQueue(globalId),
() -> procedure + " released shared lock");
int waitingCount = wakeWaitingProcedures(lock);
wakePollIfNeeded(waitingCount);
} finally {
schedUnlock();
}
} | 3.68 |
hudi_HoodieClusteringJob_validateRunningMode | // make sure that cfg.runningMode couldn't be null
private static void validateRunningMode(Config cfg) {
// --mode has a higher priority than --schedule
// If we remove --schedule option in the future we need to change runningMode default value to EXECUTE
if (StringUtils.isNullOrEmpty(cfg.runningMode)) {
cfg.runningMode = cfg.runSchedule ? SCHEDULE : EXECUTE;
}
} | 3.68 |
hadoop_DatanodeVolumeInfo_getDatanodeVolumeReport | /**
* get volume report.
*/
public String getDatanodeVolumeReport() {
StringBuilder report = new StringBuilder();
report
.append("Directory: " + path)
.append("\nStorageType: " + storageType)
.append(
"\nCapacity Used: " + usedSpace + "("
+ StringUtils.byteDesc(usedSpace) + ")")
.append(
"\nCapacity Left: " + freeSpace + "("
+ StringUtils.byteDesc(freeSpace) + ")")
.append(
"\nCapacity Reserved: " + reservedSpace + "("
+ StringUtils.byteDesc(reservedSpace) + ")")
.append(
"\nReserved Space for Replicas: " + reservedSpaceForReplicas + "("
+ StringUtils.byteDesc(reservedSpaceForReplicas) + ")")
.append("\nBlocks: " + numBlocks);
return report.toString();
} | 3.68 |
flink_ContinuousFileMonitoringFunction_getInputSplitsSortedByModTime | /**
* Creates the input splits to be forwarded to the downstream tasks of the {@link
* ContinuousFileReaderOperator}. Splits are sorted <b>by modification time</b> before being
* forwarded and only splits belonging to files in the {@code eligibleFiles} list will be
* processed.
*
* @param eligibleFiles The files to process.
*/
private Map<Long, List<TimestampedFileInputSplit>> getInputSplitsSortedByModTime(
Map<Path, FileStatus> eligibleFiles) throws IOException {
Map<Long, List<TimestampedFileInputSplit>> splitsByModTime = new TreeMap<>();
if (eligibleFiles.isEmpty()) {
return splitsByModTime;
}
for (FileInputSplit split : format.createInputSplits(readerParallelism)) {
FileStatus fileStatus = eligibleFiles.get(split.getPath());
if (fileStatus != null) {
Long modTime = fileStatus.getModificationTime();
List<TimestampedFileInputSplit> splitsToForward = splitsByModTime.get(modTime);
if (splitsToForward == null) {
splitsToForward = new ArrayList<>();
splitsByModTime.put(modTime, splitsToForward);
}
splitsToForward.add(
new TimestampedFileInputSplit(
modTime,
split.getSplitNumber(),
split.getPath(),
split.getStart(),
split.getLength(),
split.getHostnames()));
}
}
return splitsByModTime;
} | 3.68 |
framework_LayoutDependencyTree_markWidthAsChanged | /**
* Marks the component's width as changed. Iterates through all components
* whose horizontal size depends on this component's size. If the dependent
* is a managed layout triggers need for horizontal layouting, otherwise
* triggers need for horizontal measuring for any dependent components of
* that component in turn. Finally triggers vertical measuring for the
* scrolling boundary, in case horizontal scrollbar has appeared or
* disappeared due the width change.
*
* @param connector
* the connector of the component whose width has changed, should
* not be {@code null}
*/
public void markWidthAsChanged(ComponentConnector connector) {
LayoutDependency dependency = getDependency(connector.getConnectorId(),
HORIZONTAL);
dependency.markSizeAsChanged();
} | 3.68 |
flink_ConfigurationUtils_assembleDynamicConfigsStr | /**
* Creates a dynamic parameter list {@code String} of the passed configuration map.
*
* @param config A {@code Map} containing parameter/value entries that shall be used in the
* dynamic parameter list.
* @return The dynamic parameter list {@code String}.
*/
public static String assembleDynamicConfigsStr(final Map<String, String> config) {
return config.entrySet().stream()
.map(e -> String.format("-D %s=%s", e.getKey(), e.getValue()))
.collect(Collectors.joining(" "));
} | 3.68 |
framework_ShowLastItem_getTestDescription | /*
* (non-Javadoc)
*
* @see com.vaadin.tests.components.AbstractTestUI#getTestDescription()
*/
@Override
protected String getTestDescription() {
return "Show last item in Table by using setCurrentPageFirstItemId";
} | 3.68 |
hudi_CompactionUtils_buildFromFileSlice | /**
* Generate compaction operation from file-slice.
*
* @param partitionPath Partition path
* @param fileSlice File Slice
* @param metricsCaptureFunction Metrics Capture function
* @return Compaction Operation
*/
public static HoodieCompactionOperation buildFromFileSlice(String partitionPath, FileSlice fileSlice,
Option<Function<Pair<String, FileSlice>, Map<String, Double>>> metricsCaptureFunction) {
HoodieCompactionOperation.Builder builder = HoodieCompactionOperation.newBuilder();
builder.setPartitionPath(partitionPath);
builder.setFileId(fileSlice.getFileId());
builder.setBaseInstantTime(fileSlice.getBaseInstantTime());
builder.setDeltaFilePaths(fileSlice.getLogFiles().map(lf -> lf.getPath().getName()).collect(Collectors.toList()));
if (fileSlice.getBaseFile().isPresent()) {
builder.setDataFilePath(fileSlice.getBaseFile().get().getFileName());
builder.setBootstrapFilePath(fileSlice.getBaseFile().get().getBootstrapBaseFile().map(BaseFile::getPath)
.orElse(null));
}
if (metricsCaptureFunction.isPresent()) {
builder.setMetrics(metricsCaptureFunction.get().apply(Pair.of(partitionPath, fileSlice)));
}
return builder.build();
} | 3.68 |
hbase_AbstractHBaseTool_newParser | /**
* Create the parser to use for parsing and validating the command line. Since commons-cli lacks
* the capability to validate arbitrary combination of options, it may be helpful to bake custom
* logic into a specialized parser implementation. See LoadTestTool for examples.
* @return a new parser specific to the current tool
*/
protected CommandLineParser newParser() {
return new DefaultParser();
} | 3.68 |
morf_AnalyseTable_reverse | /**
* {@inheritDoc}
*
* @see org.alfasoftware.morf.upgrade.SchemaChange#reverse(Schema)
*/
@Override
public Schema reverse(Schema schema) {
return schema;
} | 3.68 |
flink_BlobUtils_createBlobCacheService | /**
* Creates the {@link BlobCacheService} from the given configuration, fallback storage
* directory, blob view and blob server address.
*
* @param configuration for the BlobCacheService
* @param fallbackStorageDirectory fallback storage directory
* @param blobView blob view
* @param serverAddress blob server address
* @return new blob cache service instance
* @throws IOException if we could not create the blob storage directory
*/
public static BlobCacheService createBlobCacheService(
Configuration configuration,
Reference<File> fallbackStorageDirectory,
BlobView blobView,
@Nullable InetSocketAddress serverAddress)
throws IOException {
final Reference<File> storageDirectory =
createBlobStorageDirectory(configuration, fallbackStorageDirectory);
return new BlobCacheService(configuration, storageDirectory, blobView, serverAddress);
} | 3.68 |
flink_OneInputStateTransformation_setMaxParallelism | /**
* Sets the maximum parallelism of this operator.
*
* <p>The maximum parallelism specifies the upper bound for dynamic scaling. It also defines the
* number of key groups used for partitioned state.
*
* @param maxParallelism Maximum parallelism
* @return The operator with set maximum parallelism
*/
@PublicEvolving
public OneInputStateTransformation<T> setMaxParallelism(int maxParallelism) {
this.operatorMaxParallelism = OptionalInt.of(maxParallelism);
return this;
} | 3.68 |
hbase_ZKTableArchiveClient_createHFileArchiveManager | /**
* @return A new {@link HFileArchiveManager} to manage which tables' hfiles should be archived
* rather than deleted.
* @throws KeeperException if we can't reach zookeeper
* @throws IOException if an unexpected network issue occurs
*/
private synchronized HFileArchiveManager createHFileArchiveManager()
throws KeeperException, IOException {
return new HFileArchiveManager(this.connection, this.getConf());
} | 3.68 |
flink_TaskSlot_clear | /** Removes all tasks from this task slot. */
public void clear() {
tasks.clear();
} | 3.68 |
hadoop_WriteOperationHelper_putObject | /**
* PUT an object directly (i.e. not via the transfer manager).
* Byte length is calculated from the file length, or, if there is no
* file, from the content length of the header.
* @param putObjectRequest the request
* @param putOptions put object options
* @param durationTrackerFactory factory for duration tracking
* @param uploadData data to be uploaded
* @param isFile is data to be uploaded a file
*
* @return the upload initiated
* @throws IOException on problems
*/
@Retries.RetryTranslated
public PutObjectResponse putObject(PutObjectRequest putObjectRequest,
PutObjectOptions putOptions, S3ADataBlocks.BlockUploadData uploadData, boolean isFile,
DurationTrackerFactory durationTrackerFactory)
throws IOException {
return retry("Writing Object", putObjectRequest.key(), true, withinAuditSpan(getAuditSpan(),
() -> owner.putObjectDirect(putObjectRequest, putOptions, uploadData, isFile,
durationTrackerFactory)));
} | 3.68 |
hibernate-validator_ConstraintViolationAssert_assertCorrectPropertyPathStringRepresentations | /**
* Asserts that the given list of constraint violation paths matches the list of expected property paths.
*
* @param violations The violation list to verify.
* @param expectedPropertyPaths The expected property paths.
*/
public static void assertCorrectPropertyPathStringRepresentations(Set<? extends ConstraintViolation<?>> violations,
String... expectedPropertyPaths) {
List<String> actualPaths = violations.stream()
.map( ConstraintViolation::getPropertyPath )
.map( Path::toString )
.collect( Collectors.toList() );
Assertions.assertThat( actualPaths ).containsExactlyInAnyOrder( expectedPropertyPaths );
} | 3.68 |
dubbo_Configurator_compareTo | /**
* Sort by host, then by priority
* 1. the url with a specific host ip should have higher priority than 0.0.0.0
* 2. if two url has the same host, compare by priority value;
*/
@Override
default int compareTo(Configurator o) {
if (o == null) {
return -1;
}
int ipCompare = getUrl().getHost().compareTo(o.getUrl().getHost());
// host is the same, sort by priority
if (ipCompare == 0) {
int i = getUrl().getParameter(PRIORITY_KEY, 0);
int j = o.getUrl().getParameter(PRIORITY_KEY, 0);
return Integer.compare(i, j);
} else {
return ipCompare;
}
} | 3.68 |
hbase_Append_setReturnResults | /**
* True (default) if the append operation should return the results. A client that is not
* interested in the result can save network bandwidth setting this to false.
*/
@Override
public Append setReturnResults(boolean returnResults) {
super.setReturnResults(returnResults);
return this;
} | 3.68 |
morf_ResultSetComparer_compareKeyColumn | /**
* Works out the mismatch type for a given key column over the two result sets.
*
*/
@SuppressWarnings({ "rawtypes" })
private MismatchType compareKeyColumn(ResultSet left, ResultSet right, int keyCol, int columnType, boolean leftHasRow, boolean rightHasRow) throws SQLException {
Optional<Comparable> leftValue = leftHasRow ? Optional.ofNullable(columnToValue(left, keyCol, columnType)) : null;
Optional<Comparable> rightValue = rightHasRow ? Optional.ofNullable(columnToValue(right, keyCol, columnType)) : null;
return compareKeyValue(leftValue, rightValue);
} | 3.68 |
framework_PropertysetItem_getItem | /**
* Gets the Item whose Property set has changed.
*
* @return source object of the event as an <code>Item</code>
*/
@Override
public Item getItem() {
return (Item) getSource();
} | 3.68 |
hadoop_FederationStateStoreFacade_getCurrentKeyId | /**
* Get CurrentKeyId from stateStore.
*
* @return currentKeyId.
*/
public int getCurrentKeyId() {
return stateStore.getCurrentKeyId();
} | 3.68 |
framework_RpcManager_applyInvocation | /**
* Perform server to client RPC invocation.
*
* @param invocation
* method to invoke
*/
public void applyInvocation(MethodInvocation invocation,
ServerConnector connector) {
Method method = getMethod(invocation);
Collection<ClientRpc> implementations = connector
.getRpcImplementations(invocation.getInterfaceName());
try {
for (ClientRpc clientRpc : implementations) {
method.invoke(clientRpc, invocation.getParameters());
}
} catch (NoDataException e) {
throw new IllegalStateException("There is no information about "
+ method.getSignature()
+ ". Did you remember to compile the right widgetset?", e);
}
} | 3.68 |
framework_CalendarComponentEvents_isMonthlyMode | /**
* Gets the event's view mode. Calendar can be be either in monthly or
* weekly mode, depending on the active date range.
*
* @deprecated User {@link Calendar#isMonthlyMode()} instead
*
* @return Returns true when monthly view is active.
*/
@Deprecated
public boolean isMonthlyMode() {
return monthlyMode;
} | 3.68 |
framework_AbstractTransactionalQuery_rollback | /**
* Rolls back and releases the active connection.
*
* @throws SQLException
* if not in a transaction managed by this query
*/
public void rollback() throws UnsupportedOperationException, SQLException {
if (!isInTransaction()) {
throw new SQLException("No active transaction");
}
activeConnection.rollback();
connectionPool.releaseConnection(activeConnection);
activeConnection = null;
} | 3.68 |
flink_BeamOperatorStateStore_getMapState | /** Returns a {@link BroadcastState} wrapped in {@link MapState} interface. */
@Override
public MapState<ByteArrayWrapper, byte[]> getMapState(BeamFnApi.StateRequest request)
throws Exception {
if (!request.getStateKey().hasMultimapKeysSideInput()) {
throw new RuntimeException("Unsupported broadcast state request: " + request);
}
BeamFnApi.StateKey.MultimapKeysSideInput mapUserState =
request.getStateKey().getMultimapKeysSideInput();
// Retrieve state descriptor
byte[] data = Base64.getDecoder().decode(mapUserState.getSideInputId());
FlinkFnApi.StateDescriptor stateDescriptor = FlinkFnApi.StateDescriptor.parseFrom(data);
String stateName = PYTHON_STATE_PREFIX + stateDescriptor.getStateName();
StateDescriptor cachedStateDescriptor = stateDescriptorCache.get(stateName);
MapStateDescriptor<ByteArrayWrapper, byte[]> mapStateDescriptor;
if (cachedStateDescriptor instanceof MapStateDescriptor) {
mapStateDescriptor =
(MapStateDescriptor<ByteArrayWrapper, byte[]>) cachedStateDescriptor;
} else if (cachedStateDescriptor == null) {
mapStateDescriptor =
new MapStateDescriptor<>(
stateName, ByteArrayWrapperSerializer.INSTANCE, valueSerializer);
if (stateDescriptor.hasStateTtlConfig()) {
FlinkFnApi.StateDescriptor.StateTTLConfig stateTtlConfigProto =
stateDescriptor.getStateTtlConfig();
StateTtlConfig stateTtlConfig =
ProtoUtils.parseStateTtlConfigFromProto(stateTtlConfigProto);
mapStateDescriptor.enableTimeToLive(stateTtlConfig);
}
stateDescriptorCache.put(stateName, mapStateDescriptor);
} else {
throw new RuntimeException(
String.format(
"State name corrupt detected: "
+ "'%s' is used both as MAP state and '%s' state at the same time.",
stateName, cachedStateDescriptor.getType()));
}
// Currently, operator state is only supported to be used as broadcast state in PyFlink
final BroadcastState<ByteArrayWrapper, byte[]> broadcastState =
operatorStateBackend.getBroadcastState(mapStateDescriptor);
return new MapState<ByteArrayWrapper, byte[]>() {
@Override
public byte[] get(ByteArrayWrapper key) throws Exception {
return broadcastState.get(key);
}
@Override
public void put(ByteArrayWrapper key, byte[] value) throws Exception {
broadcastState.put(key, value);
}
@Override
public void putAll(Map<ByteArrayWrapper, byte[]> map) throws Exception {
broadcastState.putAll(map);
}
@Override
public void remove(ByteArrayWrapper key) throws Exception {
broadcastState.remove(key);
}
@Override
public boolean contains(ByteArrayWrapper key) throws Exception {
return broadcastState.contains(key);
}
@Override
public Iterable<Map.Entry<ByteArrayWrapper, byte[]>> entries() throws Exception {
return broadcastState.entries();
}
@Override
public Iterable<ByteArrayWrapper> keys() throws Exception {
final Iterator<Map.Entry<ByteArrayWrapper, byte[]>> iterator = iterator();
return () ->
new Iterator<ByteArrayWrapper>() {
@Override
public boolean hasNext() {
return iterator.hasNext();
}
@Override
public ByteArrayWrapper next() {
return iterator.next().getKey();
}
};
}
@Override
public Iterable<byte[]> values() throws Exception {
final Iterator<Map.Entry<ByteArrayWrapper, byte[]>> iterator = iterator();
return () ->
new Iterator<byte[]>() {
@Override
public boolean hasNext() {
return iterator.hasNext();
}
@Override
public byte[] next() {
return iterator.next().getValue();
}
};
}
@Override
public Iterator<Map.Entry<ByteArrayWrapper, byte[]>> iterator() throws Exception {
return broadcastState.entries().iterator();
}
@Override
public boolean isEmpty() throws Exception {
return iterator().hasNext();
}
@Override
public void clear() {
broadcastState.clear();
}
};
} | 3.68 |
hmily_TransactionImpl_getContext | /**
* Gets context.
*
* @return the context
*/
public TransactionContext getContext() {
return context;
} | 3.68 |
morf_SchemaHomology_checkIndex | /**
* Check two indexes match.
*
* @param index1 index to compare
* @param index2 index to compare
*/
private void checkIndex(String tableName, Index index1, Index index2) {
matches("Index name on table [" + tableName + "]", index1.getName().toUpperCase(), index2.getName().toUpperCase());
matches("Index [" + index1.getName() + "] on table [" + tableName + "] uniqueness", index1.isUnique(), index2.isUnique());
matches("Index [" + index1.getName() + "] on table [" + tableName + "] columnNames", toUpperCase(index1.columnNames()), toUpperCase(index2.columnNames()));
} | 3.68 |
framework_VCalendar_isRangeSelectAllowed | /**
* Is selecting a range allowed?
*/
public boolean isRangeSelectAllowed() {
return rangeSelectAllowed;
} | 3.68 |
shardingsphere-elasticjob_ElasticJobConfigurationProperties_toJobConfiguration | /**
* Convert to job configuration.
*
* @param jobName job name
* @return job configuration
*/
public JobConfiguration toJobConfiguration(final String jobName) {
JobConfiguration result = JobConfiguration.newBuilder(jobName, shardingTotalCount)
.cron(cron).timeZone(timeZone).shardingItemParameters(shardingItemParameters).jobParameter(jobParameter)
.monitorExecution(monitorExecution).failover(failover).misfire(misfire)
.maxTimeDiffSeconds(maxTimeDiffSeconds).reconcileIntervalMinutes(reconcileIntervalMinutes)
.jobShardingStrategyType(jobShardingStrategyType).jobExecutorThreadPoolSizeProviderType(jobExecutorThreadPoolSizeProviderType).jobErrorHandlerType(jobErrorHandlerType)
.jobListenerTypes(jobListenerTypes.toArray(new String[0])).description(description).disabled(disabled).overwrite(overwrite).build();
props.stringPropertyNames().forEach(each -> result.getProps().setProperty(each, props.getProperty(each)));
return result;
} | 3.68 |
flink_HiveTableUtil_relyConstraint | // returns a constraint trait that requires RELY
public static byte relyConstraint(byte trait) {
return (byte) (trait | HIVE_CONSTRAINT_RELY);
} | 3.68 |
flink_ExternalServiceDecorator_getNamespacedExternalServiceName | /**
* Generate namespaced name of the external rest Service by cluster Id, This is used by other
* project, so do not delete it.
*/
public static String getNamespacedExternalServiceName(String clusterId, String namespace) {
return getExternalServiceName(clusterId) + "." + namespace;
} | 3.68 |
hbase_ServerRpcConnection_sendConnectionHeaderResponseIfNeeded | /**
* Send the response for connection header
*/
private void sendConnectionHeaderResponseIfNeeded() throws FatalConnectionException {
Pair<RPCProtos.ConnectionHeaderResponse, CryptoAES> pair = setupCryptoCipher();
// Response the connection header if Crypto AES is enabled
if (pair == null) {
return;
}
try {
int size = pair.getFirst().getSerializedSize();
BufferChain bc;
try (ByteBufferOutputStream bbOut = new ByteBufferOutputStream(4 + size);
DataOutputStream out = new DataOutputStream(bbOut)) {
out.writeInt(size);
pair.getFirst().writeTo(out);
bc = new BufferChain(bbOut.getByteBuffer());
}
doRespond(new RpcResponse() {
@Override
public BufferChain getResponse() {
return bc;
}
@Override
public void done() {
// must switch after sending the connection header response, as the client still uses the
// original SaslClient to unwrap the data we send back
saslServer.switchToCryptoAES(pair.getSecond());
}
});
} catch (IOException ex) {
throw new UnsupportedCryptoException(ex.getMessage(), ex);
}
} | 3.68 |
hadoop_TimelineCollectorWebService_about | /**
* Return the description of the timeline web services.
*
* @param req Servlet request.
* @param res Servlet response.
* @return description of timeline web service.
*/
@GET
@Produces({ MediaType.APPLICATION_JSON + "; " + JettyUtils.UTF_8
/* , MediaType.APPLICATION_XML */})
public AboutInfo about(
@Context HttpServletRequest req,
@Context HttpServletResponse res) {
init(res);
return new AboutInfo("Timeline Collector API");
} | 3.68 |
framework_AbstractOrderedLayout_getComponentAlignment | /*
* (non-Javadoc)
*
* @see com.vaadin.ui.Layout.AlignmentHandler#getComponentAlignment(com
* .vaadin.ui.Component)
*/
@Override
public Alignment getComponentAlignment(Component childComponent) {
ChildComponentData childData = getState().childData.get(childComponent);
if (childData == null) {
throw new IllegalArgumentException(
"The given component is not a child of this layout");
}
return new Alignment(childData.alignmentBitmask);
} | 3.68 |
dubbo_Bytes_int2bytes | /**
* to byte array.
*
* @param v value.
* @param b byte array.
* @param off array offset.
*/
public static void int2bytes(int v, byte[] b, int off) {
b[off + 3] = (byte) v;
b[off + 2] = (byte) (v >>> 8);
b[off + 1] = (byte) (v >>> 16);
b[off + 0] = (byte) (v >>> 24);
} | 3.68 |
hbase_HBaseTestingUtility_getRegions | /**
* Returns all regions of the specified table
* @param tableName the table name
* @return all regions of the specified table
* @throws IOException when getting the regions fails.
*/
private List<RegionInfo> getRegions(TableName tableName) throws IOException {
try (Admin admin = getConnection().getAdmin()) {
return admin.getRegions(tableName);
}
} | 3.68 |
flink_RecordsBySplits_addFinishedSplit | /**
* Mark the split with the given ID as finished.
*
* @param splitId the ID of the finished split.
*/
public void addFinishedSplit(String splitId) {
finishedSplits.add(splitId);
} | 3.68 |
hadoop_Trash_isEnabled | /**
* Returns whether the trash is enabled for this filesystem.
*
* @return return if isEnabled true,not false.
*/
public boolean isEnabled() {
return trashPolicy.isEnabled();
} | 3.68 |
dubbo_ReflectUtils_desc2classArray | /**
* get class array instance.
*
* @param cl ClassLoader instance.
* @param desc desc.
* @return Class[] class array.
* @throws ClassNotFoundException
*/
private static Class<?>[] desc2classArray(ClassLoader cl, String desc) throws ClassNotFoundException {
if (desc.length() == 0) {
return EMPTY_CLASS_ARRAY;
}
List<Class<?>> cs = new ArrayList<>();
Matcher m = DESC_PATTERN.matcher(desc);
while (m.find()) {
cs.add(desc2class(cl, m.group()));
}
return cs.toArray(EMPTY_CLASS_ARRAY);
} | 3.68 |
hadoop_StageConfig_getTaskManifestDir | /**
* Directory to put task manifests into.
* @return a path under the job attempt dir.
*/
public Path getTaskManifestDir() {
return taskManifestDir;
} | 3.68 |
hbase_MutableRegionInfo_getRegionNameAsString | /** Returns Region name as a String for use in logging, etc. */
@Override
public String getRegionNameAsString() {
return RegionInfo.getRegionNameAsString(this, this.regionName);
} | 3.68 |
framework_VSlider_setOrientation | /**
* Sets the slider orientation. Updates the style names if the given
* orientation differs from previously set orientation.
*
* @param orientation
* the orientation to use
*/
public void setOrientation(SliderOrientation orientation) {
if (this.orientation != orientation) {
this.orientation = orientation;
updateStyleNames(getStylePrimaryName(), true);
}
} | 3.68 |
open-banking-gateway_WebDriverBasedPaymentInitiation_sandbox_anton_brueckner_clicks_redirect_back_to_tpp_button_api_localhost_cookie_only | // Sending cookie with last request as it doesn't exist in browser for API tests
// null for cookieDomain is the valid value for localhost tests. This works correctly for localhost.
public SELF sandbox_anton_brueckner_clicks_redirect_back_to_tpp_button_api_localhost_cookie_only(WebDriver driver) {
acc.sandbox_anton_brueckner_clicks_redirect_back_to_tpp_button_api_localhost_cookie_only(driver, authSessionCookie);
return self();
} | 3.68 |
querydsl_GeometryExpressions_asGeometry | /**
* Create a new GeometryExpression
*
* @param value Geometry
* @return new GeometryExpression
*/
public static <T extends Geometry> GeometryExpression<T> asGeometry(T value) {
return asGeometry(Expressions.constant(value));
} | 3.68 |
morf_AbstractSelectStatementBuilder_getTable | /**
* Gets the first table
*
* @return the table
*/
TableReference getTable() {
return table;
} | 3.68 |
graphhopper_PrepareEncoder_getScDirMask | /**
* A bitmask for two directions
*/
public static int getScDirMask() {
return scDirMask;
} | 3.68 |
flink_SimpleVersionedSerialization_readVersionAndDeSerialize | /**
* Deserializes the version and datum from a byte array. The first four bytes will be read as
* the version, in <i>big-endian</i> encoding. The remaining bytes will be passed to the
* serializer for deserialization, via {@link SimpleVersionedSerializer#deserialize(int,
* byte[])}.
*
* @param serializer The serializer to deserialize the datum with.
* @param bytes The bytes to deserialize from.
* @return The deserialized datum.
* @throws IOException Exceptions from the {@link SimpleVersionedSerializer#deserialize(int,
* byte[])} method are forwarded.
*/
public static <T> T readVersionAndDeSerialize(
SimpleVersionedSerializer<T> serializer, byte[] bytes) throws IOException {
checkNotNull(serializer, "serializer");
checkNotNull(bytes, "bytes");
checkArgument(bytes.length >= 8, "byte array below minimum length (8 bytes)");
final byte[] dataOnly = Arrays.copyOfRange(bytes, 8, bytes.length);
final int version =
((bytes[0] & 0xff) << 24)
| ((bytes[1] & 0xff) << 16)
| ((bytes[2] & 0xff) << 8)
| (bytes[3] & 0xff);
final int length =
((bytes[4] & 0xff) << 24)
| ((bytes[5] & 0xff) << 16)
| ((bytes[6] & 0xff) << 8)
| (bytes[7] & 0xff);
if (length == dataOnly.length) {
return serializer.deserialize(version, dataOnly);
} else {
throw new IOException(
"Corrupt data, conflicting lengths. Length fields: "
+ length
+ ", data: "
+ dataOnly.length);
}
} | 3.68 |
framework_ConnectorHierarchyWriter_write | /**
* Writes a JSON object containing the connector hierarchy (parent-child
* mappings) of the dirty connectors in the given UI.
*
* @param ui
* The {@link UI} whose hierarchy to write.
* @param writer
* The {@link Writer} used to write the JSON.
* @param stateUpdateConnectors
* connector ids with state changes
* @throws IOException
* If the serialization fails.
*/
public void write(UI ui, Writer writer, Set<String> stateUpdateConnectors)
throws IOException {
Collection<ClientConnector> dirtyVisibleConnectors = ui
.getConnectorTracker().getDirtyVisibleConnectors();
JsonObject hierarchyInfo = Json.createObject();
for (ClientConnector connector : dirtyVisibleConnectors) {
String connectorId = connector.getConnectorId();
JsonArray children = Json.createArray();
for (ClientConnector child : AbstractClientConnector
.getAllChildrenIterable(connector)) {
if (LegacyCommunicationManager
.isConnectorVisibleToClient(child)) {
children.set(children.length(), child.getConnectorId());
}
}
// Omit for leaf nodes with state changes
if (children.length() > 0
|| !stateUpdateConnectors.contains(connectorId)) {
try {
hierarchyInfo.put(connectorId, children);
} catch (JsonException e) {
throw new PaintException(
"Failed to send hierarchy information about "
+ connectorId + " to the client: "
+ e.getMessage(),
e);
}
}
}
// Dummy assert just for conditionally storing away data that will be
// used by the real assert later on
assert storeSentHierarchy(hierarchyInfo, stateUpdateConnectors);
writer.write(JsonUtil.stringify(hierarchyInfo));
} | 3.68 |
framework_AbstractConnector_registerRpc | /**
* Registers an implementation for a server to client RPC interface.
*
* Multiple registrations can be made for a single interface, in which case
* all of them receive corresponding RPC calls.
*
* @param rpcInterface
* RPC interface
* @param implementation
* implementation that should receive RPC calls
* @param <T>
* The type of the RPC interface that is being registered
*/
protected <T extends ClientRpc> void registerRpc(Class<T> rpcInterface,
T implementation) {
String rpcInterfaceId = rpcInterface.getName().replaceAll("\\$", ".");
if (null == rpcImplementations) {
rpcImplementations = FastStringMap.create();
}
if (null == rpcImplementations.get(rpcInterfaceId)) {
rpcImplementations.put(rpcInterfaceId, new ArrayList<ClientRpc>());
}
rpcImplementations.get(rpcInterfaceId).add(implementation);
} | 3.68 |
framework_AbstractListing_extend | /**
* Adds this extension to the given parent listing.
*
* @param listing
* the parent component to add to
*/
public void extend(AbstractListing<T> listing) {
super.extend(listing);
listing.addDataGenerator(this);
} | 3.68 |
hmily_HmilyXaTransactionManager_getState | /**
* Gets state.
*
* @return the state
* @throws SystemException the system exception
*/
public Integer getState() throws SystemException {
Transaction transaction = getTransaction();
if (transaction == null) {
return XaState.STATUS_NO_TRANSACTION.getState();
}
return transaction.getStatus();
} | 3.68 |
hadoop_ShortWritable_get | /** @return Return the value of this ShortWritable. */
public short get() {
return value;
} | 3.68 |
pulsar_AdminResource_validateAdminAccessForTenant | // This is a stub method for Mockito
@Override
protected void validateAdminAccessForTenant(String property) {
super.validateAdminAccessForTenant(property);
} | 3.68 |
dubbo_ConfigurationUtils_getDynamicConfigurationFactory | /**
* Get an instance of {@link DynamicConfigurationFactory} by the specified name. If not found, take the default
* extension of {@link DynamicConfigurationFactory}
*
* @param name the name of extension of {@link DynamicConfigurationFactory}
* @return non-null
* @see 2.7.4
*/
public static DynamicConfigurationFactory getDynamicConfigurationFactory(
ExtensionAccessor extensionAccessor, String name) {
ExtensionLoader<DynamicConfigurationFactory> loader =
extensionAccessor.getExtensionLoader(DynamicConfigurationFactory.class);
return loader.getOrDefaultExtension(name);
} | 3.68 |
framework_AbsoluteLayoutRelativeSizeContent_createTableOnFixed | /**
* Creates an {@link AbsoluteLayout} of fixed size that contains a
* full-sized {@link Table}.
*
* @return the created layout
*/
private Component createTableOnFixed() {
AbsoluteLayout absoluteLayout = new AbsoluteLayout();
absoluteLayout.setWidth(200, Unit.PIXELS);
absoluteLayout.setHeight(200, Unit.PIXELS);
absoluteLayout.setCaption("full-sized table expected");
Table table = new Table();
table.setSizeFull();
table.setId("full-table");
absoluteLayout.addComponent(table);
return absoluteLayout;
} | 3.68 |
hudi_HoodieTable_scheduleLogCompaction | /**
* Schedule log compaction for the instant time.
*
* @param context HoodieEngineContext
* @param instantTime Instant Time for scheduling log compaction
* @param extraMetadata additional metadata to write into plan
* @return
*/
public Option<HoodieCompactionPlan> scheduleLogCompaction(HoodieEngineContext context,
String instantTime,
Option<Map<String, String>> extraMetadata) {
throw new UnsupportedOperationException("Log compaction is not supported for this table type");
} | 3.68 |
hbase_MiniHBaseCluster_resumeRegionServer | /**
* Resume the specified region server
* @param serverNumber Used as index into a list.
*/
public JVMClusterUtil.RegionServerThread resumeRegionServer(int serverNumber) {
JVMClusterUtil.RegionServerThread server = hbaseCluster.getRegionServers().get(serverNumber);
LOG.info("Resuming {}", server.toString());
server.resume();
return server;
} | 3.68 |
hbase_Query_doLoadColumnFamiliesOnDemand | /**
* Get the logical value indicating whether on-demand CF loading should be allowed.
*/
public boolean doLoadColumnFamiliesOnDemand() {
return (this.loadColumnFamiliesOnDemand != null) && this.loadColumnFamiliesOnDemand;
} | 3.68 |
flink_StreamProjection_projectTuple22 | /**
* Projects a {@link Tuple} {@link DataStream} to the previously selected fields.
*
* @return The projected DataStream.
* @see Tuple
* @see DataStream
*/
public <
T0,
T1,
T2,
T3,
T4,
T5,
T6,
T7,
T8,
T9,
T10,
T11,
T12,
T13,
T14,
T15,
T16,
T17,
T18,
T19,
T20,
T21>
SingleOutputStreamOperator<
Tuple22<
T0,
T1,
T2,
T3,
T4,
T5,
T6,
T7,
T8,
T9,
T10,
T11,
T12,
T13,
T14,
T15,
T16,
T17,
T18,
T19,
T20,
T21>>
projectTuple22() {
TypeInformation<?>[] fTypes = extractFieldTypes(fieldIndexes, dataStream.getType());
TupleTypeInfo<
Tuple22<
T0,
T1,
T2,
T3,
T4,
T5,
T6,
T7,
T8,
T9,
T10,
T11,
T12,
T13,
T14,
T15,
T16,
T17,
T18,
T19,
T20,
T21>>
tType =
new TupleTypeInfo<
Tuple22<
T0,
T1,
T2,
T3,
T4,
T5,
T6,
T7,
T8,
T9,
T10,
T11,
T12,
T13,
T14,
T15,
T16,
T17,
T18,
T19,
T20,
T21>>(fTypes);
return dataStream.transform(
"Projection",
tType,
new StreamProject<
IN,
Tuple22<
T0,
T1,
T2,
T3,
T4,
T5,
T6,
T7,
T8,
T9,
T10,
T11,
T12,
T13,
T14,
T15,
T16,
T17,
T18,
T19,
T20,
T21>>(
fieldIndexes, tType.createSerializer(dataStream.getExecutionConfig())));
} | 3.68 |
hudi_WriteMarkersFactory_get | /**
* @param markerType the type of markers to use
* @param table {@code HoodieTable} instance
* @param instantTime current instant time
* @return {@code WriteMarkers} instance based on the {@code MarkerType}
*/
public static WriteMarkers get(MarkerType markerType, HoodieTable table, String instantTime) {
LOG.debug("Instantiated MarkerFiles with marker type: " + markerType.toString());
switch (markerType) {
case DIRECT:
return new DirectWriteMarkers(table, instantTime);
case TIMELINE_SERVER_BASED:
if (!table.getConfig().isEmbeddedTimelineServerEnabled()) {
LOG.warn("Timeline-server-based markers are configured as the marker type "
+ "but embedded timeline server is not enabled. Falling back to direct markers.");
return new DirectWriteMarkers(table, instantTime);
}
String basePath = table.getMetaClient().getBasePath();
if (StorageSchemes.HDFS.getScheme().equals(
FSUtils.getFs(basePath, table.getContext().getHadoopConf().newCopy()).getScheme())) {
LOG.warn("Timeline-server-based markers are not supported for HDFS: "
+ "base path " + basePath + ". Falling back to direct markers.");
return new DirectWriteMarkers(table, instantTime);
}
return new TimelineServerBasedWriteMarkers(table, instantTime);
default:
throw new HoodieException("The marker type \"" + markerType.name() + "\" is not supported.");
}
} | 3.68 |
querydsl_JTSGeometryExpressions_pointOperation | /**
* Create a new Point operation expression
*
* @param op operator
* @param args arguments
* @return operation expression
*/
public static JTSPointExpression<Point> pointOperation(Operator op, Expression<?>... args) {
return new JTSPointOperation<Point>(Point.class, op, args);
} | 3.68 |
druid_SQLCreateTableStatement_isUNI | /**
* only for show columns
*/
public boolean isUNI(String columnName) {
for (SQLTableElement element : this.tableElementList) {
if (element instanceof MySqlUnique) {
MySqlUnique unique = (MySqlUnique) element;
if (unique.getColumns().isEmpty()) {
continue;
}
SQLExpr column = unique.getColumns().get(0).getExpr();
if (column instanceof SQLIdentifierExpr
&& SQLUtils.nameEquals(columnName, ((SQLIdentifierExpr) column).getName())) {
return unique.getColumns().size() == 1;
} else if (column instanceof SQLMethodInvokeExpr
&& SQLUtils.nameEquals(((SQLMethodInvokeExpr) column).getMethodName(), columnName)) {
return true;
}
}
}
return false;
} | 3.68 |
hbase_ZNodeClearer_deleteMyEphemeralNodeOnDisk | /**
* delete the znode file
*/
public static void deleteMyEphemeralNodeOnDisk() {
String fileName = getMyEphemeralNodeFileName();
if (fileName != null) {
new File(fileName).delete();
}
} | 3.68 |
hibernate-validator_NotEmptyValidatorForCharSequence_isValid | /**
* Checks the character sequence is not {@code null} and not empty.
*
* @param charSequence the character sequence to validate
* @param constraintValidatorContext context in which the constraint is evaluated
* @return returns {@code true} if the character sequence is not {@code null} and not empty.
*/
@Override
public boolean isValid(CharSequence charSequence, ConstraintValidatorContext constraintValidatorContext) {
if ( charSequence == null ) {
return false;
}
return charSequence.length() > 0;
} | 3.68 |
morf_SelectStatement_unionAll | /**
* Perform an UNION set operation with another {@code selectStatement},
* keeping all duplicate rows.
*
* @param selectStatement the other select statement to be united with the current select statement;
* @return a new select statement with the change applied.
* @see #union(SelectStatement)
*/
public SelectStatement unionAll(SelectStatement selectStatement) {
return copyOnWriteOrMutate(
(SelectStatementBuilder b) -> b.unionAll(selectStatement),
() -> setOperators.add(new UnionSetOperator(UnionStrategy.ALL, this, selectStatement))
);
} | 3.68 |
hbase_MasterAddressTracker_hasMaster | /**
* Check if there is a master available.
* @return true if there is a master set, false if not.
*/
public boolean hasMaster() {
return super.getData(false) != null;
} | 3.68 |
flink_ExecutionConfig_setTaskCancellationInterval | /**
* Sets the configuration parameter specifying the interval (in milliseconds) between
* consecutive attempts to cancel a running task.
*
* @param interval the interval (in milliseconds).
*/
public ExecutionConfig setTaskCancellationInterval(long interval) {
configuration.set(TaskManagerOptions.TASK_CANCELLATION_INTERVAL, interval);
return this;
} | 3.68 |
flink_ParquetVectorizedInputFormat_clipParquetSchema | /** Clips `parquetSchema` according to `fieldNames`. */
private MessageType clipParquetSchema(
GroupType parquetSchema, Collection<Integer> unknownFieldsIndices) {
Type[] types = new Type[projectedFields.length];
if (isCaseSensitive) {
for (int i = 0; i < projectedFields.length; ++i) {
String fieldName = projectedFields[i];
if (!parquetSchema.containsField(fieldName)) {
LOG.warn(
"{} does not exist in {}, will fill the field with null.",
fieldName,
parquetSchema);
types[i] =
ParquetSchemaConverter.convertToParquetType(
fieldName, projectedTypes[i]);
unknownFieldsIndices.add(i);
} else {
types[i] = parquetSchema.getType(fieldName);
}
}
} else {
Map<String, Type> caseInsensitiveFieldMap = new HashMap<>();
for (Type type : parquetSchema.getFields()) {
caseInsensitiveFieldMap.compute(
type.getName().toLowerCase(Locale.ROOT),
(key, previousType) -> {
if (previousType != null) {
throw new FlinkRuntimeException(
"Parquet with case insensitive mode should have no duplicate key: "
+ key);
}
return type;
});
}
for (int i = 0; i < projectedFields.length; ++i) {
Type type =
caseInsensitiveFieldMap.get(projectedFields[i].toLowerCase(Locale.ROOT));
if (type == null) {
LOG.warn(
"{} does not exist in {}, will fill the field with null.",
projectedFields[i],
parquetSchema);
type =
ParquetSchemaConverter.convertToParquetType(
projectedFields[i].toLowerCase(Locale.ROOT), projectedTypes[i]);
unknownFieldsIndices.add(i);
}
// TODO clip for array,map,row types.
types[i] = type;
}
}
return Types.buildMessage().addFields(types).named("flink-parquet");
} | 3.68 |
morf_TableReference_equals | /**
* @see java.lang.Object#equals(java.lang.Object)
*/
@Override
public boolean equals(Object obj) {
if (obj == null)
return false;
if (obj == this)
return true;
// TODO incorrect - permits other types. Can't change this - need to fix existing misuse in subtypes
if (!(obj instanceof TableReference))
return false;
TableReference rhs = (TableReference)obj;
return new EqualsBuilder()
.append(schemaName, rhs.schemaName)
.append(name, rhs.name)
.append(alias, rhs.alias)
.append(temporary, rhs.temporary)
.append(dblink, rhs.dblink)
.isEquals();
} | 3.68 |
flink_TestcontainersSettings_getBaseImage | /** @return The base image. */
public String getBaseImage() {
return baseImage;
} | 3.68 |
hudi_RocksDBDAO_addColumnFamily | /**
* Add a new column family to store.
*
* @param columnFamilyName Column family name
*/
public void addColumnFamily(String columnFamilyName) {
ValidationUtils.checkArgument(!closed);
managedDescriptorMap.computeIfAbsent(columnFamilyName, colFamilyName -> {
try {
ColumnFamilyDescriptor descriptor = getColumnFamilyDescriptor(StringUtils.getUTF8Bytes(colFamilyName));
ColumnFamilyHandle handle = getRocksDB().createColumnFamily(descriptor);
managedHandlesMap.put(colFamilyName, handle);
return descriptor;
} catch (RocksDBException e) {
throw new HoodieException(e);
}
});
} | 3.68 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.