name
stringlengths 12
178
| code_snippet
stringlengths 8
36.5k
| score
float64 3.26
3.68
|
---|---|---|
pulsar_ProducerConfiguration_getCompressionType | /**
* @return the configured compression type for this producer
*/
public CompressionType getCompressionType() {
return conf.getCompressionType();
} | 3.68 |
flink_FactoryUtil_discoverDecodingFormat | /**
* Discovers a {@link DecodingFormat} of the given type using the given option as factory
* identifier.
*/
public <I, F extends DecodingFormatFactory<I>> DecodingFormat<I> discoverDecodingFormat(
Class<F> formatFactoryClass, ConfigOption<String> formatOption) {
return discoverOptionalDecodingFormat(formatFactoryClass, formatOption)
.orElseThrow(
() ->
new ValidationException(
String.format(
"Could not find required scan format '%s'.",
formatOption.key())));
} | 3.68 |
hbase_Import_usage | /*
* @param errorMsg Error message. Can be null.
*/
private static void usage(final String errorMsg) {
if (errorMsg != null && errorMsg.length() > 0) {
System.err.println("ERROR: " + errorMsg);
}
System.err.println("Usage: Import [options] <tablename> <inputdir>");
System.err.println("By default Import will load data directly into HBase. To instead generate");
System.err.println("HFiles of data to prepare for a bulk data load, pass the option:");
System.err.println(" -D" + BULK_OUTPUT_CONF_KEY + "=/path/for/output");
System.err.println("If there is a large result that includes too much Cell "
+ "whitch can occur OOME caused by the memery sort in reducer, pass the option:");
System.err.println(" -D" + HAS_LARGE_RESULT + "=true");
System.err
.println(" To apply a generic org.apache.hadoop.hbase.filter.Filter to the input, use");
System.err.println(" -D" + FILTER_CLASS_CONF_KEY + "=<name of filter class>");
System.err.println(" -D" + FILTER_ARGS_CONF_KEY + "=<comma separated list of args for filter");
System.err.println(" NOTE: The filter will be applied BEFORE doing key renames via the "
+ CF_RENAME_PROP + " property. Futher, filters will only use the"
+ " Filter#filterRowKey(byte[] buffer, int offset, int length) method to identify "
+ " whether the current row needs to be ignored completely for processing and "
+ " Filter#filterCell(Cell) method to determine if the Cell should be added;"
+ " Filter.ReturnCode#INCLUDE and #INCLUDE_AND_NEXT_COL will be considered as including"
+ " the Cell.");
System.err.println("To import data exported from HBase 0.94, use");
System.err.println(" -Dhbase.import.version=0.94");
System.err.println(" -D " + JOB_NAME_CONF_KEY
+ "=jobName - use the specified mapreduce job name for the import");
System.err.println("For performance consider the following options:\n"
+ " -Dmapreduce.map.speculative=false\n" + " -Dmapreduce.reduce.speculative=false\n"
+ " -D" + WAL_DURABILITY + "=<Used while writing data to hbase."
+ " Allowed values are the supported durability values"
+ " like SKIP_WAL/ASYNC_WAL/SYNC_WAL/...>");
} | 3.68 |
framework_DateField_getRangeEnd | /**
* Returns the precise rangeEnd used.
*
* @param startDate
*/
public Date getRangeEnd() {
return getState(false).rangeEnd;
} | 3.68 |
hbase_StorageClusterStatusModel_addRegion | /**
* Add a region name to the list
* @param name the region name
*/
public void addRegion(byte[] name, int stores, int storefiles, int storefileSizeMB,
int memstoreSizeMB, long storefileIndexSizeKB, long readRequestsCount, long cpRequestsCount,
long writeRequestsCount, int rootIndexSizeKB, int totalStaticIndexSizeKB,
int totalStaticBloomSizeKB, long totalCompactingKVs, long currentCompactedKVs) {
regions.add(
new Region(name, stores, storefiles, storefileSizeMB, memstoreSizeMB, storefileIndexSizeKB,
readRequestsCount, cpRequestsCount, writeRequestsCount, rootIndexSizeKB,
totalStaticIndexSizeKB, totalStaticBloomSizeKB, totalCompactingKVs, currentCompactedKVs));
} | 3.68 |
flink_HivePartitionUtils_getPartitionNames | /**
* Get the partitions' name by partitions' spec.
*
* @param partitionsSpec a list contains the spec of the partitions, one of which is for one
* partition. The map for the spec of partition can be unordered.
* @param partitionColNames the partition column's name
* @param defaultStr the default value used to make partition name when the key or value for the
* partition's spec partition column in the spec is null or empty string.
* @return a list contains the partitions' name like "p1=v1/p2=v2", one of which is for one
* partition.
*/
public static List<String> getPartitionNames(
List<Map<String, String>> partitionsSpec,
List<String> partitionColNames,
String defaultStr) {
List<String> partitionNames = new ArrayList<>(partitionsSpec.size());
for (Map<String, String> partitionSpec : partitionsSpec) {
List<String> pVals = partitionSpecToValues(partitionSpec, partitionColNames);
// Construct a pattern of the form: partKey=partVal/partKey2=partVal2/...
partitionNames.add(FileUtils.makePartName(partitionColNames, pVals, defaultStr));
}
return partitionNames;
} | 3.68 |
open-banking-gateway_ServiceContextProviderForFintech_validateRedirectCode | /**
* Validates redirect code (Xsrf protection) for current request
* @param request Request to validate for
* @param session Service session that has expected redirect code value
* @param <REQUEST> Request class
*/
protected <REQUEST extends FacadeServiceableGetter> void validateRedirectCode(REQUEST request, AuthSession session) {
if (Strings.isNullOrEmpty(request.getFacadeServiceable().getRedirectCode())) {
throw new IllegalArgumentException("Missing redirect code");
}
if (!Objects.equals(session.getRedirectCode(), request.getFacadeServiceable().getRedirectCode())) {
throw new IllegalArgumentException("Wrong redirect code");
}
} | 3.68 |
hbase_BinaryComponentComparator_toByteArray | /** Returns The comparator serialized using pb */
@Override
public byte[] toByteArray() {
ComparatorProtos.BinaryComponentComparator.Builder builder =
ComparatorProtos.BinaryComponentComparator.newBuilder();
builder.setValue(ByteString.copyFrom(this.value));
builder.setOffset(this.offset);
return builder.build().toByteArray();
} | 3.68 |
dubbo_AbstractMetadataReport_getMetadataReportRetry | /**
* @deprecated only for unit test
*/
@Deprecated
protected MetadataReportRetry getMetadataReportRetry() {
return metadataReportRetry;
} | 3.68 |
hadoop_SubApplicationRowKey_encode | /*
* (non-Javadoc)
*
* Encodes SubApplicationRowKey object into a byte array with each
* component/field in SubApplicationRowKey separated by
* Separator#QUALIFIERS.
* This leads to an sub app table row key of the form
* subAppUserId!clusterId!entityType!entityPrefix!entityId!userId
*
* subAppUserId is usually the doAsUser.
* userId is the yarn user that the AM runs as.
*
* If entityType in passed SubApplicationRowKey object is null (and the
* fields preceding it are not null i.e. clusterId, subAppUserId), this
* returns a row key prefix of the form subAppUserId!clusterId!
* If entityId in SubApplicationRowKey is null
* (other components are not null), this returns a row key prefix
* of the form subAppUserId!clusterId!entityType!
*
* @see org.apache.hadoop.yarn.server.timelineservice.storage.common
* .KeyConverter#encode(java.lang.Object)
*/
@Override
public byte[] encode(SubApplicationRowKey rowKey) {
byte[] subAppUser = Separator.encode(rowKey.getSubAppUserId(),
Separator.SPACE, Separator.TAB, Separator.QUALIFIERS);
byte[] cluster = Separator.encode(rowKey.getClusterId(), Separator.SPACE,
Separator.TAB, Separator.QUALIFIERS);
byte[] first = Separator.QUALIFIERS.join(subAppUser, cluster);
if (rowKey.getEntityType() == null) {
return first;
}
byte[] entityType = Separator.encode(rowKey.getEntityType(),
Separator.SPACE, Separator.TAB, Separator.QUALIFIERS);
if (rowKey.getEntityIdPrefix() == null) {
return Separator.QUALIFIERS.join(first, entityType,
Separator.EMPTY_BYTES);
}
byte[] entityIdPrefix = Bytes.toBytes(rowKey.getEntityIdPrefix());
if (rowKey.getEntityId() == null) {
return Separator.QUALIFIERS.join(first, entityType, entityIdPrefix,
Separator.EMPTY_BYTES);
}
byte[] entityId = Separator.encode(rowKey.getEntityId(), Separator.SPACE,
Separator.TAB, Separator.QUALIFIERS);
byte[] userId = Separator.encode(rowKey.getUserId(),
Separator.SPACE, Separator.TAB, Separator.QUALIFIERS);
byte[] second = Separator.QUALIFIERS.join(entityType, entityIdPrefix,
entityId, userId);
return Separator.QUALIFIERS.join(first, second);
} | 3.68 |
hadoop_BufferData_setPrefetch | /**
* Indicates that a prefetch operation is in progress.
*
* @param actionFuture the {@code Future} of a prefetch action.
*
* @throws IllegalArgumentException if actionFuture is null.
*/
public synchronized void setPrefetch(Future<Void> actionFuture) {
Validate.checkNotNull(actionFuture, "actionFuture");
this.updateState(State.PREFETCHING, State.BLANK);
this.action = actionFuture;
} | 3.68 |
aws-saas-boost_UpdateWorkflow_findChangedPaths | // TODO git functionality should be extracted to a "gitToolbox" object for easier mock/testing
protected List<Path> findChangedPaths(Map<String, String> cloudFormationParamMap) {
// list all staged and committed changes against the last updated commit
String versionParameter = cloudFormationParamMap.get("Version");
LOGGER.debug("Found existing version: {}", versionParameter);
String commitHash = null;
if (versionParameter.startsWith("{") && versionParameter.endsWith("}")) {
// we know this is a JSON-created versionParameter, so attempt deserialization to GitVersionInfo
GitVersionInfo parsedInfo = Utils.fromJson(versionParameter, GitVersionInfo.class);
if (parsedInfo != null) {
commitHash = parsedInfo.getCommit();
} else {
// we cannot continue with an update without being able to parse the version information
throw new RuntimeException("Unable to continue with update; cannot parse VERSION as JSON: "
+ versionParameter);
}
} else {
// this versionParameter was created before the JSON migration of git information,
// so parse using the old logic
// if Version was created with "Commit time", we need to remove that to get commit hash
if (versionParameter.contains(",")) {
versionParameter = versionParameter.split(",")[0];
}
// if last update or install was created with uncommitted code, assume we're working from
// the last information we have: the commit on top of which the uncommitted code was written
if (versionParameter.contains("-dirty")) {
versionParameter = versionParameter.split("-")[0];
}
commitHash = versionParameter;
}
LOGGER.debug("Parsed commit hash to: {}", commitHash);
List<Path> changedPaths = new ArrayList<>();
// -b : ignore whitespace-only changes
// --name-only : only output the filename (for easy parsing)
// $(version) : output changes since $(version)
String gitDiffCommand = "git diff -b --name-only " + commitHash;
changedPaths.addAll(listPathsFromGitCommand(gitDiffCommand));
// list all untracked changes (i.e. net new un-added files)
String gitListUntrackedFilesCommand = "git ls-files --others --exclude-standard";
if (SaaSBoostInstall.isWindows()) {
gitListUntrackedFilesCommand = "cmd /c " + gitListUntrackedFilesCommand;
}
changedPaths.addAll(listPathsFromGitCommand(gitListUntrackedFilesCommand));
return changedPaths;
} | 3.68 |
graphhopper_ResponsePath_getDescription | /**
* @return the description of this route alternative to make it meaningful for the user e.g. it
* displays one or two main roads of the route.
*/
public List<String> getDescription() {
if (description == null)
return Collections.emptyList();
return description;
} | 3.68 |
flink_ExecutionEnvironment_registerJobListener | /**
* Register a {@link JobListener} in this environment. The {@link JobListener} will be notified
* on specific job status changed.
*/
@PublicEvolving
public void registerJobListener(JobListener jobListener) {
checkNotNull(jobListener, "JobListener cannot be null");
jobListeners.add(jobListener);
} | 3.68 |
hadoop_ResourceVector_increment | /**
* Increments the given resource by the specified value.
* @param resourceName name of the resource
* @param value value to be added to the resource's current value
*/
public void increment(String resourceName, double value) {
setValue(resourceName, getValue(resourceName) + value);
} | 3.68 |
hibernate-validator_ValueExtractorResolver_getRuntimeCompliantValueExtractors | /**
* @return a set of runtime compliant value extractors based on a runtime type. If there are no available value extractors
* an empty set will be returned which means the type is not a container.
*/
private Set<ValueExtractorDescriptor> getRuntimeCompliantValueExtractors(Class<?> runtimeType, Set<ValueExtractorDescriptor> potentialValueExtractorDescriptors) {
if ( nonContainerTypes.contains( runtimeType ) ) {
return Collections.emptySet();
}
Set<ValueExtractorDescriptor> valueExtractorDescriptors = possibleValueExtractorsByRuntimeType.get( runtimeType );
if ( valueExtractorDescriptors != null ) {
return valueExtractorDescriptors;
}
Set<ValueExtractorDescriptor> possibleValueExtractors = potentialValueExtractorDescriptors
.stream()
.filter( e -> TypeHelper.isAssignable( e.getContainerType(), runtimeType ) )
.collect( Collectors.toSet() );
valueExtractorDescriptors = getMaximallySpecificValueExtractors( possibleValueExtractors );
if ( valueExtractorDescriptors.isEmpty() ) {
nonContainerTypes.add( runtimeType );
return Collections.emptySet();
}
Set<ValueExtractorDescriptor> valueExtractorDescriptorsToCache = CollectionHelper.toImmutableSet( valueExtractorDescriptors );
Set<ValueExtractorDescriptor> cachedValueExtractorDescriptors = possibleValueExtractorsByRuntimeType.putIfAbsent( runtimeType,
valueExtractorDescriptorsToCache );
return cachedValueExtractorDescriptors != null ? cachedValueExtractorDescriptors : valueExtractorDescriptorsToCache;
} | 3.68 |
framework_VGridLayout_getColumnWidths | /**
* Returns the column widths measured in pixels.
*
* @return
*/
protected int[] getColumnWidths() {
return columnWidths;
} | 3.68 |
zilla_ManyToOneRingBuffer_capacity | /**
* {@inheritDoc}
*/
public int capacity()
{
return capacity;
} | 3.68 |
hbase_JobUtil_getQualifiedStagingDir | /**
* Initializes the staging directory and returns the qualified path.
* @param conf conf system configuration
* @return qualified staging directory path
* @throws IOException if the ownership on the staging directory is not as expected
* @throws InterruptedException if the thread getting the staging directory is interrupted
*/
public static Path getQualifiedStagingDir(Configuration conf)
throws IOException, InterruptedException {
Cluster cluster = new Cluster(conf);
Path stagingDir = JobSubmissionFiles.getStagingDir(cluster, conf);
return cluster.getFileSystem().makeQualified(stagingDir);
} | 3.68 |
hadoop_CopyCommandWithMultiThread_isMultiThreadNecessary | // if thread count is 1 or the source is only one single file,
// don't init executor to avoid threading overhead.
@VisibleForTesting
protected boolean isMultiThreadNecessary(LinkedList<PathData> args)
throws IOException {
return this.threadCount > 1 && hasMoreThanOneSourcePaths(args);
} | 3.68 |
flink_HiveParserUtils_toRelDataType | // converts a hive TypeInfo to RelDataType
public static RelDataType toRelDataType(TypeInfo typeInfo, RelDataTypeFactory relTypeFactory)
throws SemanticException {
RelDataType res;
switch (typeInfo.getCategory()) {
case PRIMITIVE:
// hive sets NULLABLE for all primitive types, revert that
res = HiveParserTypeConverter.convert(typeInfo, relTypeFactory);
return relTypeFactory.createTypeWithNullability(res, false);
case LIST:
RelDataType elementType =
toRelDataType(
((ListTypeInfo) typeInfo).getListElementTypeInfo(), relTypeFactory);
return relTypeFactory.createArrayType(elementType, -1);
case MAP:
RelDataType keyType =
toRelDataType(((MapTypeInfo) typeInfo).getMapKeyTypeInfo(), relTypeFactory);
RelDataType valType =
toRelDataType(
((MapTypeInfo) typeInfo).getMapValueTypeInfo(), relTypeFactory);
return relTypeFactory.createMapType(keyType, valType);
case STRUCT:
List<TypeInfo> types = ((StructTypeInfo) typeInfo).getAllStructFieldTypeInfos();
List<RelDataType> convertedTypes = new ArrayList<>(types.size());
for (TypeInfo type : types) {
convertedTypes.add(toRelDataType(type, relTypeFactory));
}
return relTypeFactory.createStructType(
convertedTypes, ((StructTypeInfo) typeInfo).getAllStructFieldNames());
case UNION:
default:
throw new SemanticException(
String.format(
"%s type is not supported yet", typeInfo.getCategory().name()));
}
} | 3.68 |
AreaShop_FileManager_addGroup | /**
* Add a RegionGroup.
* @param group The RegionGroup to add
*/
public void addGroup(RegionGroup group) {
groups.put(group.getName().toLowerCase(), group);
String lowGroup = group.getName().toLowerCase();
groupsConfig.set(lowGroup + ".name", group.getName());
groupsConfig.set(lowGroup + ".priority", 0);
saveGroupsIsRequired();
} | 3.68 |
flink_FlinkMatchers_futureFailedWith | /**
* Checks whether {@link CompletableFuture} completed already exceptionally with a specific
* exception type.
*/
public static <T, E extends Throwable> FutureFailedMatcher<T> futureFailedWith(
Class<E> exceptionType) {
Objects.requireNonNull(exceptionType, "exceptionType should not be null");
return new FutureFailedMatcher<>(exceptionType);
} | 3.68 |
hbase_PreemptiveFastFailException_wasOperationAttemptedByServer | /** Returns true if operation was attempted by server, false otherwise. */
public boolean wasOperationAttemptedByServer() {
return false;
} | 3.68 |
hadoop_KMSAudit_initializeAuditLoggers | /**
* Create a collection of KMSAuditLoggers from configuration, and initialize
* them. If any logger failed to be created or initialized, a RunTimeException
* is thrown.
*/
private void initializeAuditLoggers(Configuration conf) {
Set<Class<? extends KMSAuditLogger>> classes = getAuditLoggerClasses(conf);
Preconditions
.checkState(!classes.isEmpty(), "Should have at least 1 audit logger.");
for (Class<? extends KMSAuditLogger> c : classes) {
final KMSAuditLogger logger = ReflectionUtils.newInstance(c, conf);
auditLoggers.add(logger);
}
for (KMSAuditLogger logger: auditLoggers) {
try {
LOG.info("Initializing audit logger {}", logger.getClass());
logger.initialize(conf);
} catch (Exception ex) {
throw new RuntimeException(
"Failed to initialize " + logger.getClass().getName(), ex);
}
}
} | 3.68 |
hbase_FavoredStochasticBalancer_retainAssignment | /**
* Reuse BaseLoadBalancer's retainAssignment, but generate favored nodes when its missing.
*/
@Override
@NonNull
public Map<ServerName, List<RegionInfo>> retainAssignment(Map<RegionInfo, ServerName> regions,
List<ServerName> servers) throws HBaseIOException {
Map<ServerName, List<RegionInfo>> assignmentMap = Maps.newHashMap();
Map<ServerName, List<RegionInfo>> result = super.retainAssignment(regions, servers);
if (result.isEmpty()) {
LOG.warn("Nothing to assign to, probably no servers or no regions");
return result;
}
// Lets check if favored nodes info is in META, if not generate now.
FavoredNodeAssignmentHelper helper = new FavoredNodeAssignmentHelper(servers, getConf());
helper.initialize();
LOG.debug("Generating favored nodes for regions missing them.");
Map<RegionInfo, List<ServerName>> regionFNMap = Maps.newHashMap();
try {
for (Map.Entry<ServerName, List<RegionInfo>> entry : result.entrySet()) {
ServerName sn = entry.getKey();
ServerName primary = ServerName.valueOf(sn.getHostname(), sn.getPort(), NON_STARTCODE);
for (RegionInfo hri : entry.getValue()) {
if (FavoredNodesManager.isFavoredNodeApplicable(hri)) {
List<ServerName> favoredNodes = fnm.getFavoredNodes(hri);
if (favoredNodes == null || favoredNodes.size() < FAVORED_NODES_NUM) {
LOG.debug("Generating favored nodes for: " + hri + " with primary: " + primary);
ServerName[] secondaryAndTertiaryNodes = helper.getSecondaryAndTertiary(hri, primary);
if (secondaryAndTertiaryNodes != null && secondaryAndTertiaryNodes.length == 2) {
List<ServerName> newFavoredNodes = Lists.newArrayList();
newFavoredNodes.add(primary);
newFavoredNodes.add(ServerName.valueOf(secondaryAndTertiaryNodes[0].getHostname(),
secondaryAndTertiaryNodes[0].getPort(), NON_STARTCODE));
newFavoredNodes.add(ServerName.valueOf(secondaryAndTertiaryNodes[1].getHostname(),
secondaryAndTertiaryNodes[1].getPort(), NON_STARTCODE));
regionFNMap.put(hri, newFavoredNodes);
addRegionToMap(assignmentMap, hri, sn);
} else {
throw new HBaseIOException(
"Cannot generate secondary/tertiary FN for " + hri + " generated "
+ (secondaryAndTertiaryNodes != null ? secondaryAndTertiaryNodes : " nothing"));
}
} else {
List<ServerName> onlineFN = getOnlineFavoredNodes(servers, favoredNodes);
if (onlineFN.isEmpty()) {
// All favored nodes are dead, lets assign it to BOGUS
addRegionToMap(assignmentMap, hri, BOGUS_SERVER_NAME);
} else {
// Is primary not on FN? Less likely, but we can still take care of this.
if (FavoredNodesPlan.getFavoredServerPosition(favoredNodes, sn) != null) {
addRegionToMap(assignmentMap, hri, sn);
} else {
ServerName destination =
onlineFN.get(ThreadLocalRandom.current().nextInt(onlineFN.size()));
LOG.warn("Region: " + hri + " not hosted on favored nodes: " + favoredNodes
+ " current: " + sn + " moving to: " + destination);
addRegionToMap(assignmentMap, hri, destination);
}
}
}
} else {
addRegionToMap(assignmentMap, hri, sn);
}
}
}
if (!regionFNMap.isEmpty()) {
LOG.debug("Updating FN in meta for missing regions, count: " + regionFNMap.size());
fnm.updateFavoredNodes(regionFNMap);
}
} catch (IOException e) {
throw new HBaseIOException("Cannot generate/update FN for regions: " + regionFNMap.keySet());
}
return assignmentMap;
} | 3.68 |
framework_Calendar_getTimeFormat | /**
* Gets currently active time format. Value is either TimeFormat.Format12H
* or TimeFormat.Format24H.
*
* @return TimeFormat Format for the time.
*/
public TimeFormat getTimeFormat() {
if (currentTimeFormat == null) {
SimpleDateFormat f;
if (getLocale() == null) {
f = (SimpleDateFormat) SimpleDateFormat
.getTimeInstance(SimpleDateFormat.SHORT);
} else {
f = (SimpleDateFormat) SimpleDateFormat
.getTimeInstance(SimpleDateFormat.SHORT, getLocale());
}
String p = f.toPattern();
if (p.indexOf("HH") != -1 || p.indexOf("H") != -1) {
return TimeFormat.Format24H;
}
return TimeFormat.Format12H;
}
return currentTimeFormat;
} | 3.68 |
flink_Plan_registerCachedFile | /**
* Register cache files at program level.
*
* @param entry contains all relevant information
* @param name user defined name of that file
* @throws java.io.IOException
*/
public void registerCachedFile(String name, DistributedCacheEntry entry) throws IOException {
if (!this.cacheFile.containsKey(name)) {
this.cacheFile.put(name, entry);
} else {
throw new IOException("cache file " + name + "already exists!");
}
} | 3.68 |
framework_TreeGridConnector_updateHierarchyColumn | /**
* This method has been scheduled finally to avoid possible race conditions
* between state change handling for the Grid and its columns. The renderer
* of the column is set in a state change handler, and might not be
* available when this method is executed.
*/
@SuppressWarnings("unchecked")
@OnStateChange("hierarchyColumnId")
void updateHierarchyColumn() {
if (hierarchyColumnUpdateScheduled) {
return;
}
Scheduler.get().scheduleFinally(() -> {
hierarchyColumnUpdateScheduled = false;
// Id of old hierarchy column
String oldHierarchyColumnId = hierarchyColumnId;
// Id of new hierarchy column. Choose first when nothing explicitly
// set
String newHierarchyColumnId = getState().hierarchyColumnId;
if (newHierarchyColumnId == null
&& !getState().columnOrder.isEmpty()) {
newHierarchyColumnId = getState().columnOrder.get(0);
}
// Columns
Grid.Column<?, ?> newColumn = getColumn(newHierarchyColumnId);
Grid.Column<?, ?> oldColumn = getColumn(oldHierarchyColumnId);
if (newColumn == null && oldColumn == null) {
// No hierarchy column defined
return;
}
// Unwrap renderer of old column
if (oldColumn != null
&& oldColumn.getRenderer() instanceof HierarchyRenderer) {
oldColumn.setRenderer(
((HierarchyRenderer) oldColumn.getRenderer())
.getInnerRenderer());
}
// Wrap renderer of new column
if (newColumn != null) {
HierarchyRenderer wrapperRenderer = getHierarchyRenderer();
wrapperRenderer.setInnerRenderer(newColumn.getRenderer());
newColumn.setRenderer(wrapperRenderer);
// Set frozen columns again after setting hierarchy column as
// setRenderer() replaces DOM elements
getWidget().setFrozenColumnCount(getState().frozenColumnCount);
hierarchyColumnId = newHierarchyColumnId;
} else {
Logger.getLogger(TreeGridConnector.class.getName()).warning(
"Couldn't find column: " + newHierarchyColumnId);
}
});
hierarchyColumnUpdateScheduled = true;
} | 3.68 |
framework_VTabsheet_scrollIntoView | /**
* Scrolls the given tab into view. If the tab is hidden on the server,
* nothing is done.
*
* @param tab
* the tab to scroll to
*/
private void scrollIntoView(Tab tab) {
if (!tab.isHiddenOnServer()) {
// Check for visibility first as clipped tabs to the right are
// always visible.
// On IE8 a tab with false visibility would have the bounds of the
// full TabBar.
if (!tab.isVisible()) {
while (!tab.isVisible() && scrollerIndex > 0) {
scrollerIndex = tb.scrollLeft(scrollerIndex);
}
updateTabScroller();
} else if (isClipped(tab)
&& scrollerIndex < tb.getLastVisibleTab()) {
int tabIndex = tb.getTabIndex(tab);
while (isClipped(tab) && scrollerIndex >= 0
&& scrollerIndex < tabIndex) {
scrollerIndex = tb.scrollRight(scrollerIndex);
}
updateTabScroller();
}
if (scrollerIndex >= 0 && scrollerIndex < tb.getTabCount()) {
Tab currentFirst = tb.getTab(scrollerIndex);
// keep the previous keyboard focus style, focus change should
// be handled elsewhere if needed
currentFirst.setStyleNames(scrollerIndex == activeTabIndex,
true, currentFirst.td
.hasClassName(Tab.TD_FOCUS_FIRST_CLASSNAME));
scrollerPositionTabId = currentFirst.id;
} else {
scrollerPositionTabId = null;
}
}
} | 3.68 |
hbase_OperationWithAttributes_getId | /**
* This method allows you to retrieve the identifier for the operation if one was set.
* @return the id or null if not set
*/
public String getId() {
byte[] attr = getAttribute(ID_ATRIBUTE);
return attr == null ? null : Bytes.toString(attr);
} | 3.68 |
hbase_SimpleServerRpcConnection_decRpcCount | /* Decrement the outstanding RPC count */
protected void decRpcCount() {
rpcCount.decrement();
} | 3.68 |
hbase_HBaseSaslRpcClient_getOutputStream | /**
* Get a SASL wrapped OutputStream. Can be called only after saslConnect() has been called.
* @return a SASL wrapped OutputStream
*/
public OutputStream getOutputStream() throws IOException {
if (!saslClient.isComplete()) {
throw new IOException("Sasl authentication exchange hasn't completed yet");
}
// If Crypto AES is enabled, return cryptoOutputStream which wrap the data with Crypto AES.
if (cryptoAesEnable && cryptoOutputStream != null) {
return cryptoOutputStream;
}
return saslOutputStream;
} | 3.68 |
morf_OracleDialect_changePrimaryKeyColumns | /**
* @see org.alfasoftware.morf.jdbc.SqlDialect#changePrimaryKeyColumns(Table, java.util.List, java.util.List)
*/
@Override
public Collection<String> changePrimaryKeyColumns(Table table, List<String> oldPrimaryKeyColumns, List<String> newPrimaryKeyColumns) {
List<String> result = new ArrayList<>();
String tableName = table.getName();
// Drop existing primary key and make columns not null
if (!oldPrimaryKeyColumns.isEmpty()) {
result.add(dropPrimaryKeyConstraint(tableName));
for (String columnName : oldPrimaryKeyColumns) {
result.add(makeColumnNotNull(tableName, columnName));
}
}
//Create new primary key constraint
if (!newPrimaryKeyColumns.isEmpty()) {
result.add(generatePrimaryKeyStatement(newPrimaryKeyColumns, table.getName()));
}
return result;
} | 3.68 |
pulsar_BytesSchemaVersion_toString | /**
* Write a printable representation of a byte array. Non-printable
* characters are hex escaped in the format \\x%02X, eg:
* \x00 \x05 etc.
*
* <p>This function is brought from org.apache.hadoop.hbase.util.Bytes
*
* @param b array to write out
* @param off offset to start at
* @param len length to write
* @return string output
*/
private static String toString(final byte[] b, int off, int len) {
StringBuilder result = new StringBuilder();
if (b == null) {
return result.toString();
}
// just in case we are passed a 'len' that is > buffer length...
if (off >= b.length) {
return result.toString();
}
if (off + len > b.length) {
len = b.length - off;
}
for (int i = off; i < off + len; ++i) {
int ch = b[i] & 0xFF;
if (ch >= ' ' && ch <= '~' && ch != '\\') {
result.append((char) ch);
} else {
result.append("\\x");
result.append(HEX_CHARS_UPPER[ch / 0x10]);
result.append(HEX_CHARS_UPPER[ch % 0x10]);
}
}
return result.toString();
} | 3.68 |
hadoop_JsonSerDeser_fromResource | /**
* Convert from a JSON file
* @param resource input file
* @return the parsed JSON
* @throws IOException IO problems
* @throws JsonMappingException failure to map from the JSON to this class
*/
public T fromResource(String resource)
throws IOException, JsonParseException, JsonMappingException {
try(InputStream resStream = this.getClass().getResourceAsStream(resource)) {
if (resStream == null) {
throw new FileNotFoundException(resource);
}
return (T) (mapper.readValue(resStream, classType));
} catch (IOException e) {
log.error("Exception while parsing json resource {}", resource, e);
throw e;
}
} | 3.68 |
flink_SkipListValueSerializer_deserializeState | /**
* Deserialize the state from the byte buffer which stores skip list value.
*
* @param memorySegment the memory segment which stores the skip list value.
* @param offset the start position of the skip list value in the byte buffer.
* @param len length of the skip list value.
*/
S deserializeState(MemorySegment memorySegment, int offset, int len) {
final MemorySegmentInputStreamWithPos src =
new MemorySegmentInputStreamWithPos(memorySegment, offset, len);
final DataInputViewStreamWrapper in = new DataInputViewStreamWrapper(src);
try {
return stateSerializer.deserialize(in);
} catch (IOException e) {
throw new RuntimeException("deserialize state failed", e);
}
} | 3.68 |
hadoop_RpcProgram_register | /**
* Register the program with Portmap or Rpcbind.
* @param mapEntry port map entries
* @param set specifies registration or not
*/
protected void register(PortmapMapping mapEntry, boolean set) {
XDR mappingRequest = PortmapRequest.create(mapEntry, set);
SimpleUdpClient registrationClient = new SimpleUdpClient(host, RPCB_PORT,
mappingRequest, true, registrationSocket, portmapUdpTimeoutMillis);
try {
registrationClient.run();
} catch (IOException e) {
String request = set ? "Registration" : "Unregistration";
LOG.error(request + " failure with " + host + ":" + port
+ ", portmap entry: " + mapEntry);
throw new RuntimeException(request + " failure", e);
}
} | 3.68 |
hbase_SingleColumnValueFilter_getComparator | /** Returns the comparator */
public org.apache.hadoop.hbase.filter.ByteArrayComparable getComparator() {
return comparator;
} | 3.68 |
flink_BinaryExternalSorter_setResultIterator | /**
* Sets the result iterator. By setting the result iterator, all threads that are waiting for
* the result iterator are notified and will obtain it.
*
* @param iterator The result iterator to set.
*/
private void setResultIterator(MutableObjectIterator<BinaryRowData> iterator) {
synchronized (this.iteratorLock) {
// set the result iterator only, if no exception has occurred
if (this.iteratorException == null) {
this.iterator = iterator;
this.iteratorLock.notifyAll();
}
}
} | 3.68 |
hadoop_AbfsManifestStoreOperations_storeSupportsResilientCommit | /**
* Resilient commits available on hierarchical stores.
* @return true if the FS can use etags on renames.
*/
@Override
public boolean storeSupportsResilientCommit() {
return resilientCommitByRename != null;
} | 3.68 |
flink_InterestingProperties_getLocalProperties | /**
* Gets the interesting local properties.
*
* @return The interesting local properties.
*/
public Set<RequestedLocalProperties> getLocalProperties() {
return this.localProps;
} | 3.68 |
hbase_CacheConfig_getBlockCache | /**
* Returns the block cache.
* @return the block cache, or null if caching is completely disabled
*/
public Optional<BlockCache> getBlockCache() {
return Optional.ofNullable(this.blockCache);
} | 3.68 |
dubbo_ValidationFilter_setValidation | /**
* Sets the validation instance for ValidationFilter
*
* @param validation Validation instance injected by dubbo framework based on "validation" attribute value.
*/
public void setValidation(Validation validation) {
this.validation = validation;
} | 3.68 |
open-banking-gateway_ProtocolResultHandler_handleResult | /**
* Handles the result from protocol for the {@code FacadeService} to pass it to API.
* This class must ensure that it is separate transaction - so it won't join any other as is used with
* CompletableFuture.
*/
@Transactional(propagation = Propagation.REQUIRES_NEW)
public <RESULT, REQUEST extends FacadeServiceableGetter> FacadeResult<RESULT> handleResult(Result<RESULT> result, FacadeServiceableRequest request, ServiceContext<REQUEST> session) {
SecretKeyWithIv sessionKey = provider.deregister(session.getRequestScoped()).getKey();
return doHandleResult(result, request, session, sessionKey);
} | 3.68 |
hudi_BufferedRandomAccessFile_alignDiskPositionToBufferStartIfNeeded | /**
* If the diskPosition differs from the startPosition, flush the data in the buffer
* and realign/fill the buffer at startPosition.
* @throws IOException
*/
private void alignDiskPositionToBufferStartIfNeeded() throws IOException {
if (this.diskPosition != this.startPosition) {
super.seek(this.startPosition);
this.diskPosition = this.startPosition;
}
} | 3.68 |
flink_RouteResult_queryParams | /** Returns all params in the query part of the request URI. */
public Map<String, List<String>> queryParams() {
return queryParams;
} | 3.68 |
dubbo_TriHttp2RemoteFlowController_incrementWindowSize | /**
* Increment the window size for a particular stream.
* @param state the state associated with the stream whose window is being incremented.
* @param delta The amount to increment by.
* @throws Http2Exception If this operation overflows the window for {@code state}.
*/
void incrementWindowSize(FlowState state, int delta) throws Http2Exception {
state.incrementStreamWindow(delta);
} | 3.68 |
shardingsphere-elasticjob_JobScheduleController_triggerJob | /**
* Trigger job.
*/
public synchronized void triggerJob() {
try {
if (scheduler.isShutdown()) {
return;
}
if (!scheduler.checkExists(jobDetail.getKey())) {
scheduler.scheduleJob(jobDetail, createOneOffTrigger());
} else {
scheduler.triggerJob(jobDetail.getKey());
}
if (!scheduler.isStarted()) {
scheduler.start();
}
} catch (final SchedulerException ex) {
throw new JobSystemException(ex);
}
} | 3.68 |
graphhopper_AlternativeRoute_getWorstSortBy | /**
* Return the current worst weight for all alternatives
*/
double getWorstSortBy() {
if (alternatives.isEmpty())
throw new IllegalStateException("Empty alternative list cannot happen");
return alternatives.get(alternatives.size() - 1).sortBy;
} | 3.68 |
framework_Table_setFooterVisible | /**
* Sets the footer visible in the bottom of the table.
* <p>
* The footer can be used to add column related data like sums to the bottom
* of the Table using setColumnFooter(Object propertyId, String footer).
* </p>
*
* @param visible
* Should the footer be visible
*/
public void setFooterVisible(boolean visible) {
if (visible != columnFootersVisible) {
columnFootersVisible = visible;
markAsDirty();
}
} | 3.68 |
hmily_HmilyRepositoryFacade_findHmilyLockById | /**
* Find hmily lock by id.
*
* @param lockId lock id
* @return hmily lock
*/
public Optional<HmilyLock> findHmilyLockById(final String lockId) {
return hmilyRepository.findHmilyLockById(lockId);
} | 3.68 |
hadoop_TaskContainerDefinition_withDurationLegacy | /**
* Also support "duration.ms" for backward compatibility.
* @param jsonTask the json representation of the task.
* @param key The json key.
* @return the builder
*/
public Builder withDurationLegacy(Map<String, String> jsonTask, String key) {
if (jsonTask.containsKey(key)) {
this.durationLegacy = Integer.parseInt(jsonTask.get(key));
}
return this;
} | 3.68 |
hbase_HBaseTestingUtility_deleteTableData | /**
* Provide an existing table name to truncate. Scans the table and issues a delete for each row
* read.
* @param tableName existing table
* @return HTable to that new table
*/
public Table deleteTableData(TableName tableName) throws IOException {
Table table = getConnection().getTable(tableName);
Scan scan = new Scan();
ResultScanner resScan = table.getScanner(scan);
for (Result res : resScan) {
Delete del = new Delete(res.getRow());
table.delete(del);
}
resScan = table.getScanner(scan);
resScan.close();
return table;
} | 3.68 |
zxing_UPCEANReader_decodeDigit | /**
* Attempts to decode a single UPC/EAN-encoded digit.
*
* @param row row of black/white values to decode
* @param counters the counts of runs of observed black/white/black/... values
* @param rowOffset horizontal offset to start decoding from
* @param patterns the set of patterns to use to decode -- sometimes different encodings
* for the digits 0-9 are used, and this indicates the encodings for 0 to 9 that should
* be used
* @return horizontal offset of first pixel beyond the decoded digit
* @throws NotFoundException if digit cannot be decoded
*/
static int decodeDigit(BitArray row, int[] counters, int rowOffset, int[][] patterns)
throws NotFoundException {
recordPattern(row, rowOffset, counters);
float bestVariance = MAX_AVG_VARIANCE; // worst variance we'll accept
int bestMatch = -1;
int max = patterns.length;
for (int i = 0; i < max; i++) {
int[] pattern = patterns[i];
float variance = patternMatchVariance(counters, pattern, MAX_INDIVIDUAL_VARIANCE);
if (variance < bestVariance) {
bestVariance = variance;
bestMatch = i;
}
}
if (bestMatch >= 0) {
return bestMatch;
} else {
throw NotFoundException.getNotFoundInstance();
}
} | 3.68 |
morf_UpgradeStatusTableServiceImpl_create | /**
* @see UpgradeStatusTableService.Factory#create(ConnectionResources)
*/
public UpgradeStatusTableService create(final ConnectionResources connectionResources) {
return new UpgradeStatusTableServiceImpl(connectionResources);
} | 3.68 |
hadoop_CapacitySchedulerPreemptionUtils_tryPreemptContainerAndDeductResToObtain | /**
* Invoke this method to preempt container based on resToObtain.
*
* @param rc
* resource calculator
* @param context
* preemption context
* @param resourceToObtainByPartitions
* map to hold resource to obtain per partition
* @param rmContainer
* container
* @param clusterResource
* total resource
* @param preemptMap
* map to hold preempted containers
* @param totalPreemptionAllowed
* total preemption allowed per round
* @param conservativeDRF
* should we do conservativeDRF preemption or not.
* When true:
* stop preempt container when any major resource type
* {@literal <=} 0 for to-preempt.
* This is default preemption behavior of intra-queue preemption
* When false:
* stop preempt container when: all major resource type
* {@literal <=} 0 for to-preempt.
* This is default preemption behavior of inter-queue preemption
* @param curCandidates RMContainer Set.
* @return should we preempt rmContainer. If we should, deduct from
* <code>resourceToObtainByPartition</code>
*/
public static boolean tryPreemptContainerAndDeductResToObtain(
ResourceCalculator rc, CapacitySchedulerPreemptionContext context,
Map<String, Resource> resourceToObtainByPartitions,
RMContainer rmContainer, Resource clusterResource,
Map<ApplicationAttemptId, Set<RMContainer>> preemptMap,
Map<ApplicationAttemptId, Set<RMContainer>> curCandidates,
Resource totalPreemptionAllowed, boolean conservativeDRF) {
ApplicationAttemptId attemptId = rmContainer.getApplicationAttemptId();
// We will not account resource of a container twice or more
if (preemptMapContains(preemptMap, attemptId, rmContainer)) {
return false;
}
String nodePartition = getPartitionByNodeId(context,
rmContainer.getAllocatedNode());
Resource toObtainByPartition = resourceToObtainByPartitions
.get(nodePartition);
if (null == toObtainByPartition) {
return false;
}
// If a toObtain resource type == 0, set it to -1 to avoid 0 resource
// type affect following doPreemption check: isAnyMajorResourceZero
for (ResourceInformation ri : toObtainByPartition.getResources()) {
if (ri.getValue() == 0) {
ri.setValue(-1);
}
}
if (rc.isAnyMajorResourceAboveZero(toObtainByPartition) && Resources.fitsIn(
rc, rmContainer.getAllocatedResource(), totalPreemptionAllowed)) {
boolean doPreempt;
// How much resource left after preemption happen.
Resource toObtainAfterPreemption = Resources.subtract(toObtainByPartition,
rmContainer.getAllocatedResource());
if (conservativeDRF) {
doPreempt = !rc.isAnyMajorResourceZeroOrNegative(toObtainByPartition);
} else {
// When we want to do more aggressive preemption, we will do preemption
// only if:
// - The preempt of the container makes positive contribution to the
// to-obtain resource. Positive contribution means any positive
// resource type decreases.
//
// This is example of positive contribution:
// * before: <30, 10, 5>, after <20, 10, -10>
// But this not positive contribution:
// * before: <30, 10, 0>, after <30, 10, -15>
doPreempt = Resources.lessThan(rc, clusterResource,
Resources
.componentwiseMax(toObtainAfterPreemption, Resources.none()),
Resources.componentwiseMax(toObtainByPartition, Resources.none()));
}
if (!doPreempt) {
return false;
}
Resources.subtractFrom(toObtainByPartition,
rmContainer.getAllocatedResource());
Resources.subtractFrom(totalPreemptionAllowed,
rmContainer.getAllocatedResource());
// When we have no more resource need to obtain, remove from map.
if (Resources.lessThanOrEqual(rc, clusterResource, toObtainByPartition,
Resources.none())) {
resourceToObtainByPartitions.remove(nodePartition);
}
// Add to preemptMap
addToPreemptMap(preemptMap, curCandidates, attemptId, rmContainer);
return true;
}
return false;
} | 3.68 |
hadoop_CachingGetSpaceUsed_getJitter | /**
* Randomize the refresh interval timing by this amount, the actual interval will be chosen
* uniformly between {@code interval-jitter} and {@code interval+jitter}.
*
* @return between interval-jitter and interval+jitter.
*/
@VisibleForTesting
public long getJitter() {
return jitter;
} | 3.68 |
graphhopper_GraphHopper_setStoreOnFlush | /**
* Only valid option for in-memory graph and if you e.g. want to disable store on flush for unit
* tests. Specify storeOnFlush to true if you want that existing data will be loaded FROM disc
* and all in-memory data will be flushed TO disc after flush is called e.g. while OSM import.
*
* @param storeOnFlush true by default
*/
public GraphHopper setStoreOnFlush(boolean storeOnFlush) {
ensureNotLoaded();
if (storeOnFlush)
dataAccessDefaultType = DAType.RAM_STORE;
else
dataAccessDefaultType = DAType.RAM;
return this;
} | 3.68 |
flink_JobClient_triggerSavepoint | /**
* Triggers a savepoint for the associated job. The savepoint will be written to the given
* savepoint directory, or {@link
* org.apache.flink.configuration.CheckpointingOptions#SAVEPOINT_DIRECTORY} if it is null.
*
* @param savepointDirectory directory the savepoint should be written to
* @return a {@link CompletableFuture} containing the path where the savepoint is located
* @deprecated pass the format explicitly
*/
@Deprecated
default CompletableFuture<String> triggerSavepoint(@Nullable String savepointDirectory) {
return triggerSavepoint(savepointDirectory, SavepointFormatType.DEFAULT);
} | 3.68 |
flink_MailboxProcessor_getMailboxExecutor | /**
* Returns an executor service facade to submit actions to the mailbox.
*
* @param priority the priority of the {@link MailboxExecutor}.
*/
public MailboxExecutor getMailboxExecutor(int priority) {
return new MailboxExecutorImpl(mailbox, priority, actionExecutor, this);
} | 3.68 |
graphhopper_VectorTile_getValuesBuilder | /**
* <pre>
* Dictionary encoding for values
* </pre>
*
* <code>repeated .vector_tile.Tile.Value values = 4;</code>
*/
public vector_tile.VectorTile.Tile.Value.Builder getValuesBuilder(
int index) {
return getValuesFieldBuilder().getBuilder(index);
} | 3.68 |
hudi_BufferedRandomAccessFile_flushBuffer | /**
* Flush any dirty bytes in the buffer to disk.
* @throws IOException
*/
private void flushBuffer() throws IOException {
if (this.isDirty) {
alignDiskPositionToBufferStartIfNeeded();
int len = (int) (this.currentPosition - this.startPosition);
super.write(this.dataBuffer.array(), 0, len);
this.diskPosition = this.currentPosition;
this.isDirty = false;
}
} | 3.68 |
graphhopper_GraphHopper_getCHGraphs | /**
* @return a mapping between profile names and according CH preparations. The map will be empty before loading
* or import.
*/
public Map<String, RoutingCHGraph> getCHGraphs() {
return chGraphs;
} | 3.68 |
hbase_ResponseConverter_buildGetOnlineRegionResponse | /**
* A utility to build a GetOnlineRegionResponse.
* @return the response
*/
public static GetOnlineRegionResponse
buildGetOnlineRegionResponse(final List<RegionInfo> regions) {
GetOnlineRegionResponse.Builder builder = GetOnlineRegionResponse.newBuilder();
for (RegionInfo region : regions) {
builder.addRegionInfo(ProtobufUtil.toRegionInfo(region));
}
return builder.build();
} | 3.68 |
hudi_HoodieTableConfig_getBootstrapIndexClass | /**
* Read the payload class for HoodieRecords from the table properties.
*/
public String getBootstrapIndexClass() {
if (!props.getBoolean(BOOTSTRAP_INDEX_ENABLE.key(), BOOTSTRAP_INDEX_ENABLE.defaultValue())) {
return BootstrapIndexType.NO_OP.getClassName();
}
String bootstrapIndexClassName;
if (contains(BOOTSTRAP_INDEX_TYPE)) {
bootstrapIndexClassName = BootstrapIndexType.valueOf(getString(BOOTSTRAP_INDEX_TYPE)).getClassName();
} else if (contains(BOOTSTRAP_INDEX_CLASS_NAME)) {
bootstrapIndexClassName = getString(BOOTSTRAP_INDEX_CLASS_NAME);
} else {
bootstrapIndexClassName = BootstrapIndexType.valueOf(BOOTSTRAP_INDEX_TYPE.defaultValue()).getClassName();
}
return bootstrapIndexClassName;
} | 3.68 |
flink_Types_EITHER | /**
* Returns type information for Flink's {@link org.apache.flink.types.Either} type. Null values
* are not supported.
*
* <p>Either type can be used for a value of two possible types.
*
* <p>Example use: <code>Types.EITHER(Types.VOID, Types.INT)</code>
*
* @param leftType type information of left side / {@link org.apache.flink.types.Either.Left}
* @param rightType type information of right side / {@link org.apache.flink.types.Either.Right}
*/
public static <L, R> TypeInformation<Either<L, R>> EITHER(
TypeInformation<L> leftType, TypeInformation<R> rightType) {
return new EitherTypeInfo<>(leftType, rightType);
} | 3.68 |
morf_AbstractSqlDialectTest_expectedJoinOnEverything | /**
* @return The expected SQL for a join with no ON criteria
*/
protected String expectedJoinOnEverything() {
return "SELECT * FROM " + tableName("TableOne") + " INNER JOIN " + tableName("TableTwo") + " ON 1=1";
} | 3.68 |
hbase_TableDescriptorBuilder_setSplitEnabled | /**
* Setting the table region split enable flag.
* @param isEnable True if enable region split.
* @return the modifyable TD
*/
public ModifyableTableDescriptor setSplitEnabled(final boolean isEnable) {
return setValue(SPLIT_ENABLED_KEY, Boolean.toString(isEnable));
} | 3.68 |
hadoop_ApplicationServiceRecordProcessor_getRecordTypes | /**
* Returns the record types associated with a container service record.
*
* @return the record type array
*/
@Override public int[] getRecordTypes() {
return new int[] {Type.A, Type.AAAA, Type.CNAME, Type.SRV, Type.TXT};
} | 3.68 |
flink_SkipListKeySerializer_serialize | /**
* Serialize the key and namespace to bytes. The format is - int: length of serialized namespace
* - byte[]: serialized namespace - int: length of serialized key - byte[]: serialized key
*/
byte[] serialize(K key, N namespace) {
// we know that the segment contains a byte[], because it is created
// in the method below by wrapping a byte[]
return serializeToSegment(key, namespace).getArray();
} | 3.68 |
morf_AbstractSqlDialectTest_testSelectSumWithExpression | /**
* Tests select statement with SUM function using more than a simple field.
*/
@Test
public void testSelectSumWithExpression() {
SelectStatement stmt = select(sum(field(INT_FIELD).multiplyBy(literal(2)).divideBy(literal(3)))).from(tableRef(TEST_TABLE));
assertEquals("Select scripts are not the same", expectedSelectSumWithExpression(), testDialect.convertStatementToSQL(stmt));
} | 3.68 |
flink_TSetClientInfoResp_findByThriftId | /** Find the _Fields constant that matches fieldId, or null if its not found. */
public static _Fields findByThriftId(int fieldId) {
switch (fieldId) {
case 1: // STATUS
return STATUS;
default:
return null;
}
} | 3.68 |
hadoop_MutableInverseQuantiles_setQuantiles | /**
* Sets quantileInfo.
*
* @param ucName capitalized name of the metric
* @param uvName capitalized type of the values
* @param desc uncapitalized long-form textual description of the metric
* @param lvName uncapitalized type of the values
* @param df Number formatter for inverse percentile value
*/
void setQuantiles(String ucName, String uvName, String desc, String lvName, DecimalFormat df) {
for (int i = 0; i < INVERSE_QUANTILES.length; i++) {
double inversePercentile = 100 * (1 - INVERSE_QUANTILES[i].quantile);
String nameTemplate = ucName + df.format(inversePercentile) + "thInversePercentile" + uvName;
String descTemplate = df.format(inversePercentile) + " inverse percentile " + lvName
+ " with " + getInterval() + " second interval for " + desc;
addQuantileInfo(i, info(nameTemplate, descTemplate));
}
} | 3.68 |
hbase_CellChunkMap_createSubCellFlatMap | /*
* To be used by base (CellFlatMap) class only to create a sub-CellFlatMap Should be used only to
* create only CellChunkMap from CellChunkMap
*/
@Override
protected CellFlatMap createSubCellFlatMap(int min, int max, boolean descending) {
return new CellChunkMap(this.comparator(), this.chunks, min, max, descending);
} | 3.68 |
hmily_HmilyInsertStatement_getColumns | /**
* Get columns.
*
* @return columns
*/
public Collection<HmilyColumnSegment> getColumns() {
return null == insertColumns ? Collections.emptyList() : insertColumns.getColumns();
} | 3.68 |
hudi_OverwriteWithLatestAvroPayload_overwriteField | /**
* Return true if value equals defaultValue otherwise false.
*/
public Boolean overwriteField(Object value, Object defaultValue) {
if (JsonProperties.NULL_VALUE.equals(defaultValue)) {
return value == null;
}
return Objects.equals(value, defaultValue);
} | 3.68 |
morf_AbstractSqlDialectTest_testParameterisedInsert | /**
* Tests that an unparameterised insert where field values have been supplied
* via a list of {@link FieldLiteral}s results in the field literals' values
* being used as the inserted values.
*
* <p>By way of a regression test, this test omits some {@linkplain FieldLiteral}s
* from its 'fields' array (namely 'charField', 'decimalField' and the internal 'version').
* It checks that the value for these in the resulting sql statement's 'VALUE' part are
* '?' as it won't know what to substitute for these.</p>
*/
@Test
public void testParameterisedInsert() {
AliasedField[] fields = new AliasedField[] {
new FieldLiteral(5).as("id"),
new FieldLiteral("Escap'd").as(STRING_FIELD),
new FieldLiteral(20100405).as(DATE_FIELD),
new FieldLiteral(7).as(INT_FIELD),
new FieldLiteral(true).as(BOOLEAN_FIELD),
};
InsertStatement stmt = new InsertStatement().into(new TableReference(TEST_TABLE)).fields(fields);
String sql = testDialect.convertStatementToSQL(stmt, metadata);
assertEquals("Generated SQL not as expected", expectedParameterisedInsertStatement(), sql);
} | 3.68 |
hadoop_ClientGSIContext_updateResponseState | /**
* Client side implementation only receives state alignment info.
* It does not provide state alignment info therefore this does nothing.
*/
@Override
public void updateResponseState(RpcResponseHeaderProto.Builder header) {
// Do nothing.
} | 3.68 |
hbase_MurmurHash3_hash | /** Returns the MurmurHash3_x86_32 hash. */
@edu.umd.cs.findbugs.annotations.SuppressWarnings("SF")
@Override
public <T> int hash(HashKey<T> hashKey, int initval) {
final int c1 = 0xcc9e2d51;
final int c2 = 0x1b873593;
int length = hashKey.length();
int h1 = initval;
int roundedEnd = (length & 0xfffffffc); // round down to 4 byte block
for (int i = 0; i < roundedEnd; i += 4) {
// little endian load order
int k1 = (hashKey.get(i) & 0xff) | ((hashKey.get(i + 1) & 0xff) << 8)
| ((hashKey.get(i + 2) & 0xff) << 16) | (hashKey.get(i + 3) << 24);
k1 *= c1;
k1 = (k1 << 15) | (k1 >>> 17); // ROTL32(k1,15);
k1 *= c2;
h1 ^= k1;
h1 = (h1 << 13) | (h1 >>> 19); // ROTL32(h1,13);
h1 = h1 * 5 + 0xe6546b64;
}
// tail
int k1 = 0;
switch (length & 0x03) {
case 3:
k1 = (hashKey.get(roundedEnd + 2) & 0xff) << 16;
// FindBugs SF_SWITCH_FALLTHROUGH
case 2:
k1 |= (hashKey.get(roundedEnd + 1) & 0xff) << 8;
// FindBugs SF_SWITCH_FALLTHROUGH
case 1:
k1 |= (hashKey.get(roundedEnd) & 0xff);
k1 *= c1;
k1 = (k1 << 15) | (k1 >>> 17); // ROTL32(k1,15);
k1 *= c2;
h1 ^= k1;
default:
// fall out
}
// finalization
h1 ^= length;
// fmix(h1);
h1 ^= h1 >>> 16;
h1 *= 0x85ebca6b;
h1 ^= h1 >>> 13;
h1 *= 0xc2b2ae35;
h1 ^= h1 >>> 16;
return h1;
} | 3.68 |
hadoop_AbstractS3ACommitter_getUUIDSource | /**
* Source of the UUID.
* @return how the job UUID was retrieved/generated.
*/
@VisibleForTesting
public final JobUUIDSource getUUIDSource() {
return uuidSource;
} | 3.68 |
flink_TaskManagerSlotInformation_isMatchingRequirement | /**
* Returns true if the required {@link ResourceProfile} can be fulfilled by this slot.
*
* @param required resources
* @return true if the this slot can fulfill the resource requirements
*/
default boolean isMatchingRequirement(ResourceProfile required) {
return getResourceProfile().isMatching(required);
} | 3.68 |
framework_ApplicationConnection_isLoadingIndicatorVisible | /**
* Determines whether or not the loading indicator is showing.
*
* @return true if the loading indicator is visible
* @deprecated As of 7.1. Use {@link #getLoadingIndicator()} and
* {@link VLoadingIndicator#isVisible()}.isVisible() instead.
*/
@Deprecated
public boolean isLoadingIndicatorVisible() {
return getLoadingIndicator().isVisible();
} | 3.68 |
flink_CheckpointConfig_getTolerableCheckpointFailureNumber | /**
* Get the defined number of consecutive checkpoint failures that will be tolerated, before the
* whole job is failed over.
*
* <p>If the {@link ExecutionCheckpointingOptions#TOLERABLE_FAILURE_NUMBER} has not been
* configured, this method would return 0 which means the checkpoint failure manager would not
* tolerate any declined checkpoint failure.
*/
public int getTolerableCheckpointFailureNumber() {
return configuration
.getOptional(ExecutionCheckpointingOptions.TOLERABLE_FAILURE_NUMBER)
.orElse(0);
} | 3.68 |
flink_TransformationMetadata_fill | /** Fill a transformation with this meta. */
public <T extends Transformation<?>> T fill(T transformation) {
transformation.setName(getName());
transformation.setDescription(getDescription());
if (getUid() != null) {
transformation.setUid(getUid());
}
return transformation;
} | 3.68 |
hbase_FavoredStochasticBalancer_getServerFromFavoredNode | /**
* Get the ServerName for the FavoredNode. Since FN's startcode is -1, we could want to get the
* ServerName with the correct start code from the list of provided servers.
*/
private ServerName getServerFromFavoredNode(List<ServerName> servers, ServerName fn) {
for (ServerName server : servers) {
if (ServerName.isSameAddress(fn, server)) {
return server;
}
}
return null;
} | 3.68 |
hibernate-validator_SizeValidatorForCharSequence_isValid | /**
* Checks the length of the specified character sequence (e.g. string).
*
* @param charSequence The character sequence to validate.
* @param constraintValidatorContext context in which the constraint is evaluated.
*
* @return Returns {@code true} if the string is {@code null} or the length of {@code charSequence} between the specified
* {@code min} and {@code max} values (inclusive), {@code false} otherwise.
*/
@Override
public boolean isValid(CharSequence charSequence, ConstraintValidatorContext constraintValidatorContext) {
if ( charSequence == null ) {
return true;
}
int length = charSequence.length();
return length >= min && length <= max;
} | 3.68 |
pulsar_ConfigValidationUtils_mapFv | /**
* Returns a new NestableFieldValidator for a Map.
*
* @param key a validator for the keys in the map
* @param val a validator for the values in the map
* @param notNull whether or not a value of null is valid
* @return a NestableFieldValidator for a Map
*/
public static NestableFieldValidator mapFv(final NestableFieldValidator key,
final NestableFieldValidator val, final boolean notNull) {
return new NestableFieldValidator() {
@SuppressWarnings("unchecked")
@Override
public void validateField(String pd, String name, Object field)
throws IllegalArgumentException {
if (field == null) {
if (notNull) {
throw new IllegalArgumentException("Field " + name + " must not be null");
} else {
return;
}
}
if (field instanceof Map) {
for (Map.Entry<Object, Object> entry : ((Map<Object, Object>) field).entrySet()) {
key.validateField("Each key of the map ", name, entry.getKey());
val.validateField("Each value in the map ", name, entry.getValue());
}
return;
}
throw new IllegalArgumentException(
"Field " + name + " must be a Map");
}
};
} | 3.68 |
querydsl_PathBuilder_getTime | /**
* Create a new Time typed path
*
* @param <A>
* @param property property name
* @param type property type
* @return property path
*/
@SuppressWarnings("unchecked")
public <A extends Comparable<?>> TimePath<A> getTime(String property, Class<A> type) {
Class<? extends A> vtype = validate(property, type);
return super.createTime(property, (Class<? super A>) vtype);
} | 3.68 |
framework_JsonPaintTarget_escapeJSON | /**
* Escapes the given string so it can safely be used as a JSON string.
*
* @param s
* The string to escape
* @return Escaped version of the string
*/
public static String escapeJSON(String s) {
// FIXME: Move this method to another class as other classes use it
// also.
if (s == null) {
return "";
}
final StringBuilder sb = new StringBuilder();
for (int i = 0; i < s.length(); i++) {
final char ch = s.charAt(i);
switch (ch) {
case '"':
sb.append("\\\"");
break;
case '\\':
sb.append("\\\\");
break;
case '\b':
sb.append("\\b");
break;
case '\f':
sb.append("\\f");
break;
case '\n':
sb.append("\\n");
break;
case '\r':
sb.append("\\r");
break;
case '\t':
sb.append("\\t");
break;
case '/':
sb.append("\\/");
break;
default:
if (ch >= '\u0000' && ch <= '\u001F') {
final String ss = Integer.toHexString(ch);
sb.append("\\u");
for (int k = 0; k < 4 - ss.length(); k++) {
sb.append('0');
}
sb.append(ss.toUpperCase(Locale.ROOT));
} else {
sb.append(ch);
}
}
}
return sb.toString();
} | 3.68 |
hbase_WALProcedureStore_initTrackerFromOldLogs | /**
* If last log's tracker is not null, use it as {@link #storeTracker}. Otherwise, set storeTracker
* as partial, and let {@link ProcedureWALFormatReader} rebuild it using entries in the log.
*/
private void initTrackerFromOldLogs() {
if (logs.isEmpty() || !isRunning()) {
return;
}
ProcedureWALFile log = logs.getLast();
if (!log.getTracker().isPartial()) {
storeTracker.resetTo(log.getTracker());
} else {
storeTracker.reset();
storeTracker.setPartialFlag(true);
}
} | 3.68 |
hudi_BaseTableMetadata_fetchAllPartitionPaths | /**
* Returns a list of all partitions.
*/
protected List<String> fetchAllPartitionPaths() {
HoodieTimer timer = HoodieTimer.start();
Option<HoodieRecord<HoodieMetadataPayload>> recordOpt = getRecordByKey(RECORDKEY_PARTITION_LIST,
MetadataPartitionType.FILES.getPartitionPath());
metrics.ifPresent(m -> m.updateMetrics(HoodieMetadataMetrics.LOOKUP_PARTITIONS_STR, timer.endTimer()));
List<String> partitions = recordOpt.map(record -> {
HoodieMetadataPayload metadataPayload = record.getData();
checkForSpuriousDeletes(metadataPayload, "\"all partitions\"");
List<String> relativePaths = metadataPayload.getFilenames();
// Non-partitioned tables have a single empty partition
if (relativePaths.size() == 1 && relativePaths.get(0).equals(NON_PARTITIONED_NAME)) {
return Collections.singletonList("");
} else {
return relativePaths;
}
})
.orElse(Collections.emptyList());
LOG.info("Listed partitions from metadata: #partitions=" + partitions.size());
return partitions;
} | 3.68 |
flink_FlinkCalciteSqlValidator_getSnapShotNode | /**
* Get the {@link SqlSnapshot} node in a {@link SqlValidatorNamespace}.
*
* <p>In general, if there is a snapshot expression, the enclosing node of IdentifierNamespace
* is usually SqlSnapshot. However, if we encounter a situation with an "as" operator, we need
* to identify whether the enclosingNode is an "as" call and if its first operand is
* SqlSnapshot.
*
* @param ns The namespace used to find SqlSnapshot
* @return SqlSnapshot found in {@param ns}, empty if not found
*/
private Optional<SqlSnapshot> getSnapShotNode(SqlValidatorNamespace ns) {
if (ns instanceof IdentifierNamespace) {
SqlNode enclosingNode = ns.getEnclosingNode();
// FOR SYSTEM_TIME AS OF [expression]
if (enclosingNode instanceof SqlSnapshot) {
return Optional.of((SqlSnapshot) enclosingNode);
// FOR SYSTEM_TIME AS OF [expression] as [identifier]
} else if (enclosingNode instanceof SqlBasicCall
&& ((SqlBasicCall) enclosingNode).getOperator() instanceof SqlAsOperator
&& ((SqlBasicCall) enclosingNode).getOperandList().get(0)
instanceof SqlSnapshot) {
return Optional.of(
(SqlSnapshot) ((SqlBasicCall) enclosingNode).getOperandList().get(0));
}
}
return Optional.empty();
} | 3.68 |
hbase_EncryptionUtil_unwrapWALKey | /**
* Unwrap a wal key by decrypting it with the secret key of the given subject. The configuration
* must be set up correctly for key alias resolution.
* @param conf configuration
* @param subject subject key alias
* @param value the encrypted key bytes
* @return the raw key bytes
* @throws IOException if key is not found for the subject, or if some I/O error occurs
* @throws KeyException if fail to unwrap the key
*/
public static Key unwrapWALKey(Configuration conf, String subject, byte[] value)
throws IOException, KeyException {
EncryptionProtos.WrappedKey wrappedKey =
EncryptionProtos.WrappedKey.PARSER.parseDelimitedFrom(new ByteArrayInputStream(value));
String algorithm = conf.get(HConstants.CRYPTO_WAL_ALGORITHM_CONF_KEY, HConstants.CIPHER_AES);
Cipher cipher = Encryption.getCipher(conf, algorithm);
if (cipher == null) {
throw new RuntimeException("Cipher '" + algorithm + "' not available");
}
return getUnwrapKey(conf, subject, wrappedKey, cipher);
} | 3.68 |
morf_TableOutputter_outputHelp | /**
* @param workSheet to add the help to
* @param table to fetch metadata from
* @param startRow to start adding rows at
* @param helpTextRowNumbers - map to insert row numbers for each help field into
* @return the index of the next row to use
* @throws WriteException if any of the writes to workSheet failed
*/
private int outputHelp(WritableSheet workSheet, Table table, final int startRow, final Map<String, Integer> helpTextRowNumbers) throws WriteException {
int currentRow = startRow;
// Title for the descriptions
Label dataLabel = new Label(0, currentRow, "Column Descriptions");
dataLabel.setCellFormat(getBoldFormat());
workSheet.addCell(dataLabel);
currentRow++;
int currentColumn = 0;
for (Column column : table.columns()) {
if (!column.getName().equals("id") && !column.getName().equals("version")) {
// Field name to go with the description
Label fieldName = new Label(0, currentRow, spreadsheetifyName(column.getName()));
fieldName.setCellFormat(getBoldFormat());
workSheet.addCell(fieldName);
// The type/width
String typeString = column.getType() + "(" + column.getWidth() + (column.getScale() == 0 ? "" : "," + column.getScale()) + ")";
Label fieldType = new Label(1, currentRow, typeString);
fieldType.setCellFormat(getStandardFormat());
workSheet.addCell(fieldType);
// The default
String defaultValue = additionalSchemaData.columnDefaultValue(table, column.getName());
Label fieldDefault = new Label(2, currentRow, defaultValue);
fieldDefault.setCellFormat(getStandardFormat());
workSheet.addCell(fieldDefault);
// The field documentation
workSheet.mergeCells(3, currentRow, 12, currentRow);
String documentation = additionalSchemaData.columnDocumentation(table, column.getName());
Label documentationLabel = new Label(3, currentRow, documentation);
WritableCellFormat format = new WritableCellFormat(getStandardFormat());
format.setWrap(true);
format.setVerticalAlignment(VerticalAlignment.TOP);
documentationLabel.setCellFormat(format);
workSheet.addCell(documentationLabel);
//If we've exceed the maximum number of columns - then output truncated warnings
if(currentColumn >= MAX_EXCEL_COLUMNS) {
Label truncatedWarning = new Label(13, currentRow, "[TRUNCATED]");
truncatedWarning.setCellFormat(getBoldFormat());
workSheet.addCell(truncatedWarning);
}
// We are aiming for 150px. 1px is 15 Excel "Units"
workSheet.setRowView(currentRow, 150 * 15);
// Remember at what row we created the help text for this column
helpTextRowNumbers.put(column.getName(), currentRow);
currentRow++;
currentColumn++;
}
}
// Group all the help rows together
workSheet.setRowGroup(startRow + 1, currentRow - 1, true);
// Some extra blank space for neatness
currentRow++;
return currentRow;
} | 3.68 |
open-banking-gateway_EncryptionWithInitVectorOper_generateKey | /**
* Generate random symmetric key with initialization vector (IV)
* @return Secret key with IV
*/
@SneakyThrows
public SecretKeyWithIv generateKey() {
byte[] iv = new byte[encSpec.getIvSize()];
SecureRandom random = new SecureRandom();
random.nextBytes(iv);
KeyGenerator keyGen = KeyGenerator.getInstance(encSpec.getKeyAlgo());
keyGen.init(encSpec.getLen());
return new SecretKeyWithIv(iv, keyGen.generateKey());
} | 3.68 |
flink_TwoInputTransformation_getInputType1 | /** Returns the {@code TypeInformation} for the elements from the first input. */
public TypeInformation<IN1> getInputType1() {
return input1.getOutputType();
} | 3.68 |
flink_LinkedListSerializer_getElementSerializer | /**
* Gets the serializer for the elements of the list.
*
* @return The serializer for the elements of the list
*/
public TypeSerializer<T> getElementSerializer() {
return elementSerializer;
} | 3.68 |
hadoop_HsAboutPage_content | /**
* The content of this page is the attempts block
* @return AttemptsBlock.class
*/
@Override protected Class<? extends SubView> content() {
HistoryInfo info = new HistoryInfo();
info("History Server").
__("BuildVersion", info.getHadoopBuildVersion()
+ " on " + info.getHadoopVersionBuiltOn()).
__("History Server started on", Times.format(info.getStartedOn()));
return InfoBlock.class;
} | 3.68 |
hbase_KeyValueCodecWithTags_getDecoder | /**
* Implementation depends on {@link InputStream#available()}
*/
@Override
public Decoder getDecoder(final InputStream is) {
return new KeyValueDecoder(is);
} | 3.68 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.