name
stringlengths 12
178
| code_snippet
stringlengths 8
36.5k
| score
float64 3.26
3.68
|
---|---|---|
hadoop_DataNodeFaultInjector_delayAckLastPacket | /**
* Used as a hook to delay sending the response of the last packet.
*/
public void delayAckLastPacket() throws IOException {
} | 3.68 |
hadoop_ManifestCommitter_setupTask | /**
* Set up a task through a {@link SetupTaskStage}.
* Classic FileOutputCommitter is a no-op here, relying
* on RecordWriters to create the dir implicitly on file
* create().
* FileOutputCommitter also uses the existence of that
* file as a flag to indicate task commit is needed.
* @param context task context.
* @throws IOException IO Failure.
*/
@Override
public void setupTask(final TaskAttemptContext context)
throws IOException {
ManifestCommitterConfig committerConfig =
enterCommitter(true, context);
StageConfig stageConfig =
committerConfig
.createStageConfig()
.withOperations(createManifestStoreOperations())
.build();
// create task attempt dir; delete if present. Or fail?
new SetupTaskStage(stageConfig).apply("");
logCommitterStatisticsAtDebug();
} | 3.68 |
hbase_HeapMemoryManager_getHeapOccupancyPercent | /** Returns heap occupancy percentage, 0 <= n <= 1. or -0.0 for error asking JVM */
public float getHeapOccupancyPercent() {
return this.heapOccupancyPercent == Float.MAX_VALUE
? HEAP_OCCUPANCY_ERROR_VALUE
: this.heapOccupancyPercent;
} | 3.68 |
framework_Design_createHtml | /**
* Generates an html tree representation of the component hierarchy having
* the root designContext.getRootComponent(). The hierarchy is stored under
* <body> in the tree. The generated tree represents a valid html
* document.
*
*
* @param designContext
* a DesignContext object specifying the root component
* (designContext.getRootComponent()) of the hierarchy
* @return an html tree representation of the component hierarchy
*/
private static Document createHtml(DesignContext designContext) {
// Create the html tree skeleton.
Document doc = new Document("");
DocumentType docType = new DocumentType("html", "", "");
doc.appendChild(docType);
Element html = doc.createElement("html");
doc.appendChild(html);
html.appendChild(doc.createElement("head"));
Element body = doc.createElement("body");
html.appendChild(body);
// Append the design under <body> in the html tree. createNode
// creates the entire component hierarchy rooted at the
// given root node.
Component root = designContext.getRootComponent();
if (root != null) {
Node rootNode = designContext.createElement(root);
body.appendChild(rootNode);
}
designContext.writePackageMappings(doc);
return doc;
} | 3.68 |
graphhopper_VectorTile_clearTags | /**
* <pre>
* Tags of this feature are encoded as repeated pairs of
* integers.
* A detailed description of tags is located in sections
* 4.2 and 4.4 of the specification
* </pre>
*
* <code>repeated uint32 tags = 2 [packed = true];</code>
*/
public Builder clearTags() {
tags_ = java.util.Collections.emptyList();
bitField0_ = (bitField0_ & ~0x00000002);
onChanged();
return this;
} | 3.68 |
shardingsphere-elasticjob_TaskContext_getExecutorId | /**
* Get executor ID.
*
* @param appName application name
* @return executor ID
*/
public String getExecutorId(final String appName) {
return String.join(DELIMITER, appName, slaveId);
} | 3.68 |
hbase_MasterObserver_postUpdateReplicationPeerConfig | /**
* Called after update peerConfig for the specified peer
* @param ctx the environment to interact with the framework and master
* @param peerId a short name that identifies the peer
*/
default void postUpdateReplicationPeerConfig(
final ObserverContext<MasterCoprocessorEnvironment> ctx, String peerId,
ReplicationPeerConfig peerConfig) throws IOException {
} | 3.68 |
framework_Button_removeClickShortcut | /**
* Removes the keyboard shortcut previously set with
* {@link #setClickShortcut(int, int...)}.
*/
public void removeClickShortcut() {
if (clickShortcut != null) {
removeShortcutListener(clickShortcut);
clickShortcut = null;
getState().clickShortcutKeyCode = 0;
}
} | 3.68 |
hbase_Procedure_isLockedWhenLoading | /**
* Can only be called when restarting, before the procedure actually being executed, as after we
* actually call the {@link #doAcquireLock(Object, ProcedureStore)} method, we will reset
* {@link #lockedWhenLoading} to false.
* <p/>
* Now it is only used in the ProcedureScheduler to determine whether we should put a Procedure in
* front of a queue.
*/
public boolean isLockedWhenLoading() {
return lockedWhenLoading;
} | 3.68 |
morf_DataValueLookupMetadata_getIndexInArray | /**
* Get the array position of the specified column.
*
* @param columnName The column name.
* @return The array position, or null if not present.
*/
@Nullable
Integer getIndexInArray(CaseInsensitiveString columnName) {
return lookups.get(columnName);
} | 3.68 |
hbase_RingBufferEnvelope_getPayload | /**
* Retrieve current namedQueue payload {@link NamedQueuePayload} available on Envelope and free up
* the Envelope
* @return Retrieve rpc log details
*/
public NamedQueuePayload getPayload() {
final NamedQueuePayload namedQueuePayload = this.namedQueuePayload;
this.namedQueuePayload = null;
return namedQueuePayload;
} | 3.68 |
flink_ReOpenableHashPartition_spillInMemoryPartition | /**
* Spills this partition to disk. This method is invoked once after the initial open() method
*
* @return Number of memorySegments in the writeBehindBuffers!
*/
int spillInMemoryPartition(
FileIOChannel.ID targetChannel,
IOManager ioManager,
LinkedBlockingQueue<MemorySegment> writeBehindBuffers)
throws IOException {
this.initialPartitionBuffersCount = partitionBuffers.length; // for ReOpenableHashMap
this.initialBuildSideChannel = targetChannel;
initialBuildSideWriter =
ioManager.createBlockChannelWriter(targetChannel, writeBehindBuffers);
final int numSegments = this.partitionBuffers.length;
for (int i = 0; i < numSegments; i++) {
initialBuildSideWriter.writeBlock(partitionBuffers[i]);
}
this.partitionBuffers = null;
initialBuildSideWriter.close();
// num partitions are now in the writeBehindBuffers. We propagate this information back
return numSegments;
} | 3.68 |
hbase_HFileBlock_checkCallerProvidedOnDiskSizeWithHeader | /**
* Check that {@code value} provided by the calling context seems reasonable, within a large
* margin of error.
* @return {@code true} if the value is safe to proceed, {@code false} otherwise.
*/
private boolean checkCallerProvidedOnDiskSizeWithHeader(long value) {
// same validation logic as is used by Math.toIntExact(long)
int intValue = (int) value;
if (intValue != value) {
if (LOG.isTraceEnabled()) {
LOG.trace("onDiskSizeWithHeaderL={}; value exceeds int size limits.", value);
}
return false;
}
if (intValue == -1) {
// a magic value we expect to see.
return true;
}
return checkOnDiskSizeWithHeader(intValue);
} | 3.68 |
morf_AbstractSqlDialectTest_testInsertFromSelectIgnoresCase | /**
* Tests that when forming an insert/select from two table references, the {@link SqlDialect} acts in a case insensitive manner
* when determining if source fields are present in the destination table.
*/
@Test
public void testInsertFromSelectIgnoresCase() {
InsertStatement insertStatement = new InsertStatement().into(new TableReference(UPPER_TABLE)).from(new TableReference(MIXED_TABLE));
String expectedSql = "INSERT INTO " + tableName(UPPER_TABLE) + " (id, version, FIELDA) SELECT id, version, FIELDA FROM " + tableName(MIXED_TABLE);
List<String> sql = testDialect.convertStatementToSQL(insertStatement, metadata, SqlDialect.IdTable.withDeterministicName(ID_VALUES_TABLE));
assertEquals("Expected INSERT to be case insensitive", expectedSql, sql.get(sql.size() - 1));
} | 3.68 |
flink_AbstractStreamOperatorV2_getPartitionedState | /**
* Creates a partitioned state handle, using the state backend configured for this task.
*
* @throws IllegalStateException Thrown, if the key/value state was already initialized.
* @throws Exception Thrown, if the state backend cannot create the key/value state.
*/
protected <S extends State, N> S getPartitionedState(
N namespace,
TypeSerializer<N> namespaceSerializer,
StateDescriptor<S, ?> stateDescriptor)
throws Exception {
return stateHandler.getPartitionedState(namespace, namespaceSerializer, stateDescriptor);
} | 3.68 |
morf_OracleDialect_legacyFetchSizeForBulkSelects | /**
* @deprecated this method returns the legacy fetch size value for Oracle and is primarily for backwards compatibility.
* Please use {@link SqlDialect#fetchSizeForBulkSelects()} for the new recommended default value.
* @see SqlDialect#fetchSizeForBulkSelects()
*/
@Override
@Deprecated
public int legacyFetchSizeForBulkSelects() {
return 200;
} | 3.68 |
flink_FieldSet_toFieldList | /**
* Turns the FieldSet into an ordered FieldList.
*
* @return An ordered FieldList.
*/
public FieldList toFieldList() {
int[] pos = toArray();
Arrays.sort(pos);
return new FieldList(pos);
} | 3.68 |
pulsar_SecretsProvider_interpolateSecretForValue | /**
* If the passed value is formatted as a reference to a secret, as defined by the implementation, return the
* referenced secret. If the value is not formatted as a secret reference or the referenced secret does not exist,
* return null.
*
* @param value a config value that may be formatted as a reference to a secret
* @return the materialized secret. Otherwise, null.
*/
default String interpolateSecretForValue(String value) {
return null;
} | 3.68 |
pulsar_PulsarClientException_setPreviousExceptions | /**
* Add a list of previous exception which occurred for the same operation
* and have been retried.
*
* @param previous A collection of throwables that triggered retries
*/
public void setPreviousExceptions(Collection<Throwable> previous) {
this.previous = previous;
} | 3.68 |
hmily_PropertyName_getElementSize | /**
* Gets element size.
*
* @return the element size
*/
public int getElementSize() {
return this.elements.length;
} | 3.68 |
hbase_LeaseManager_getListener | /** Returns listener */
public LeaseListener getListener() {
return this.listener;
} | 3.68 |
shardingsphere-elasticjob_FailoverService_getAllFailoveringItems | /**
* Get all failovering items.
*
* @return all failovering items
*/
public Map<Integer, JobInstance> getAllFailoveringItems() {
int shardingTotalCount = configService.load(true).getShardingTotalCount();
Map<Integer, JobInstance> result = new LinkedHashMap<>(shardingTotalCount, 1);
for (int i = 0; i < shardingTotalCount; i++) {
String data = jobNodeStorage.getJobNodeData(FailoverNode.getExecutingFailoverNode(i));
if (!Strings.isNullOrEmpty(data)) {
result.put(i, new JobInstance(data));
}
}
return result;
} | 3.68 |
graphhopper_VectorTile_getIntValue | /**
* <code>optional int64 int_value = 4;</code>
*/
public long getIntValue() {
return intValue_;
} | 3.68 |
hadoop_NativeTaskOutputFiles_getOutputIndexFileForWrite | /**
* Create a local map output index file name.
*
* @param size the size of the file
*/
public Path getOutputIndexFileForWrite(long size) throws IOException {
String path = String.format(OUTPUT_FILE_INDEX_FORMAT_STRING, TASKTRACKER_OUTPUT, id);
return lDirAlloc.getLocalPathForWrite(path, size, conf);
} | 3.68 |
flink_PekkoRpcServiceUtils_getRpcUrl | /**
* @param hostname The hostname or address where the target RPC service is listening.
* @param port The port where the target RPC service is listening.
* @param endpointName The name of the RPC endpoint.
* @param addressResolution Whether to try address resolution of the given hostname or not. This
* allows to fail fast in case that the hostname cannot be resolved.
* @param protocol True, if security/encryption is enabled, false otherwise.
* @return The RPC URL of the specified RPC endpoint.
*/
public static String getRpcUrl(
String hostname,
int port,
String endpointName,
AddressResolution addressResolution,
Protocol protocol)
throws UnknownHostException {
checkNotNull(hostname, "hostname is null");
checkNotNull(endpointName, "endpointName is null");
checkArgument(isValidClientPort(port), "port must be in [1, 65535]");
if (addressResolution == AddressResolution.TRY_ADDRESS_RESOLUTION) {
// Fail fast if the hostname cannot be resolved
//noinspection ResultOfMethodCallIgnored
InetAddress.getByName(hostname);
}
final String hostPort = NetUtils.unresolvedHostAndPortToNormalizedString(hostname, port);
return internalRpcUrl(
endpointName, Optional.of(new RemoteAddressInformation(hostPort, protocol)));
} | 3.68 |
flink_RowTimeMiniBatchAssginerOperator_getMiniBatchStart | // ------------------------------------------------------------------------
// Utilities
// ------------------------------------------------------------------------
/** Method to get the mini-batch start for a watermark. */
private static long getMiniBatchStart(long watermark, long interval) {
return watermark - (watermark + interval) % interval;
} | 3.68 |
dubbo_InvokerAndRestMethodMetadataPair_compareServiceMethod | /**
* same interface & same method desc
*
* @param beforeMetadata
* @return
*/
public boolean compareServiceMethod(InvokerAndRestMethodMetadataPair beforeMetadata) {
Class currentServiceInterface = this.invoker.getInterface();
Class<?> beforeServiceInterface = beforeMetadata.getInvoker().getInterface();
if (!currentServiceInterface.equals(beforeServiceInterface)) {
return false;
}
Method beforeServiceMethod = beforeMetadata.getRestMethodMetadata().getReflectMethod();
Method currentReflectMethod = this.restMethodMetadata.getReflectMethod();
if (beforeServiceMethod.getName().equals(currentReflectMethod.getName()) // method name
// method param types
&& Arrays.toString(beforeServiceMethod.getParameterTypes())
.equals(Arrays.toString(currentReflectMethod.getParameterTypes()))) {
return true;
}
return false;
} | 3.68 |
morf_AbstractSelectStatement_getJoins | /**
* Gets the list of joined tables in the order they are joined
*
* @return the joined tables
*/
public List<Join> getJoins() {
return joins;
} | 3.68 |
framework_VGridLayout_updateSpacingStyleName | /** For internal use only. May be removed or replaced in the future. */
public void updateSpacingStyleName(boolean spacingEnabled) {
String styleName = getStylePrimaryName();
if (spacingEnabled) {
spacingMeasureElement.addClassName(styleName + "-spacing-on");
spacingMeasureElement.removeClassName(styleName + "-spacing-off");
} else {
spacingMeasureElement.removeClassName(styleName + "-spacing-on");
spacingMeasureElement.addClassName(styleName + "-spacing-off");
}
} | 3.68 |
AreaShop_GeneralRegion_getLimitingGroup | /**
* Get the name of the group that is limiting the action, assuming actionAllowed() is false.
* @return The name of the group
*/
public String getLimitingGroup() {
return limitingGroup;
} | 3.68 |
framework_AtmospherePushConnection_onError | /**
* Called if the push connection fails. Atmosphere will automatically retry
* the connection until successful.
*
*/
protected void onError(AtmosphereResponse response) {
state = State.DISCONNECTED;
getConnectionStateHandler().pushError(this, response);
} | 3.68 |
MagicPlugin_ActionFactory_construct | /**
* Constructs a new action from a class name.
*
* @param actionClassName
* The class name of the action.
* @return The constructed action.
* @throws ActionFactoryException
* If no action could be constructed.
*/
public static BaseSpellAction construct(String actionClassName)
throws ActionFactoryException {
List<String> attempts = new ArrayList<>();
for (ActionResolver resolver : resolvers) {
ActionConstructor constructor = resolver.resolve(actionClassName,
attempts);
if (constructor != null) {
return constructor.construct();
}
}
throw new ActionFactoryException(
"Failed to resolve class: " + actionClassName + "\nTried: "
+ attempts);
} | 3.68 |
hadoop_RMDelegatedNodeLabelsUpdater_createRMNodeLabelsMappingProvider | /**
* Get the RMNodeLabelsMappingProvider which is used to provide node labels.
*/
private RMNodeLabelsMappingProvider createRMNodeLabelsMappingProvider(
Configuration conf) throws IOException {
RMNodeLabelsMappingProvider nodeLabelsMappingProvider = null;
try {
Class<? extends RMNodeLabelsMappingProvider> labelsProviderClass =
conf.getClass(YarnConfiguration.RM_NODE_LABELS_PROVIDER_CONFIG,
null, RMNodeLabelsMappingProvider.class);
if (labelsProviderClass != null) {
nodeLabelsMappingProvider = labelsProviderClass.newInstance();
}
} catch (InstantiationException | IllegalAccessException
| RuntimeException e) {
LOG.error("Failed to create RMNodeLabelsMappingProvider based on"
+ " Configuration", e);
throw new IOException("Failed to create RMNodeLabelsMappingProvider : "
+ e.getMessage(), e);
}
if (nodeLabelsMappingProvider == null) {
String msg = "RMNodeLabelsMappingProvider should be configured when "
+ "delegated-centralized node label configuration is enabled";
LOG.error(msg);
throw new IOException(msg);
} else {
LOG.debug("RM Node labels mapping provider class is : {}",
nodeLabelsMappingProvider.getClass());
}
return nodeLabelsMappingProvider;
} | 3.68 |
morf_InlineTableUpgrader_writeStatements | /**
* Write out SQL
*/
private void writeStatements(Collection<String> statements) {
sqlStatementWriter.writeSql(statements);
} | 3.68 |
pulsar_ClientConfiguration_setUseTcpNoDelay | /**
* Configure whether to use TCP no-delay flag on the connection, to disable Nagle algorithm.
* <p>
* No-delay features make sure packets are sent out on the network as soon as possible, and it's critical to achieve
* low latency publishes. On the other hand, sending out a huge number of small packets might limit the overall
* throughput, so if latency is not a concern, it's advisable to set the <code>useTcpNoDelay</code> flag to false.
* <p>
* Default value is true
*
* @param useTcpNoDelay
*/
public void setUseTcpNoDelay(boolean useTcpNoDelay) {
confData.setUseTcpNoDelay(useTcpNoDelay);
} | 3.68 |
zxing_AztecCode_getSize | /**
* @return size in pixels (width and height)
*/
public int getSize() {
return size;
} | 3.68 |
framework_VTree_doRelationSelection | /**
* Selects a range of items which are in direct relation with each
* other.<br/>
* NOTE: The start node <b>MUST</b> be before the end node!
*
* @param startNode
*
* @param endNode
*/
private void doRelationSelection(TreeNode startNode, TreeNode endNode) {
TreeNode currentNode = endNode;
while (currentNode != startNode) {
currentNode.setSelected(true);
selectedIds.add(currentNode.key);
// Traverse children above the selection
List<TreeNode> subChildren = currentNode.getParentNode()
.getChildren();
if (subChildren.size() > 1) {
selectNodeRange(subChildren.iterator().next().key,
currentNode.key);
} else if (subChildren.size() == 1) {
TreeNode n = subChildren.get(0);
n.setSelected(true);
selectedIds.add(n.key);
}
currentNode = currentNode.getParentNode();
}
startNode.setSelected(true);
selectedIds.add(startNode.key);
selectionHasChanged = true;
} | 3.68 |
hbase_CompoundConfiguration_freezeMutableConf | /**
* If set has been called, it will create a mutableConf. This converts the mutableConf to an
* immutable one and resets it to allow a new mutable conf. This is used when a new map or conf is
* added to the compound configuration to preserve proper override semantics.
*/
void freezeMutableConf() {
if (mutableConf == null) {
// do nothing if there is no current mutableConf
return;
}
this.configs.add(0, new ImmutableConfWrapper(mutableConf));
mutableConf = null;
} | 3.68 |
querydsl_GeometryExpression_contains | /**
* Returns 1 (TRUE) if this geometric object “spatially contains” anotherGeometry.
*
* @param geometry other geometry
* @return true, if contains
*/
public BooleanExpression contains(Expression<? extends Geometry> geometry) {
return Expressions.booleanOperation(SpatialOps.CONTAINS, mixin, geometry);
} | 3.68 |
framework_AbstractSplitPanel_readDesign | /*
* (non-Javadoc)
*
* @see com.vaadin.ui.AbstractComponent#readDesign(org.jsoup.nodes .Element,
* com.vaadin.ui.declarative.DesignContext)
*/
@Override
public void readDesign(Element design, DesignContext designContext) {
// handle default attributes
super.readDesign(design, designContext);
// handle custom attributes, use default values if no explicit value
// set
// There is no setter for reversed, so it will be handled using
// setSplitPosition.
boolean reversed = false;
if (design.hasAttr("reversed")) {
reversed = DesignAttributeHandler.readAttribute("reversed",
design.attributes(), Boolean.class);
setSplitPosition(getSplitPosition(), reversed);
}
if (design.hasAttr("split-position")) {
SizeWithUnit splitPosition = SizeWithUnit.parseStringSize(
design.attr("split-position"), Unit.PERCENTAGE);
setSplitPosition(splitPosition.getSize(), splitPosition.getUnit(),
reversed);
}
if (design.hasAttr("min-split-position")) {
SizeWithUnit minSplitPosition = SizeWithUnit.parseStringSize(
design.attr("min-split-position"), Unit.PERCENTAGE);
setMinSplitPosition(minSplitPosition.getSize(),
minSplitPosition.getUnit());
}
if (design.hasAttr("max-split-position")) {
SizeWithUnit maxSplitPosition = SizeWithUnit.parseStringSize(
design.attr("max-split-position"), Unit.PERCENTAGE);
setMaxSplitPosition(maxSplitPosition.getSize(),
maxSplitPosition.getUnit());
}
// handle children
if (design.children().size() > 2) {
throw new DesignException(
"A split panel can contain at most two components.");
}
for (Element childElement : design.children()) {
Component childComponent = designContext.readDesign(childElement);
if (childElement.hasAttr(":second")) {
setSecondComponent(childComponent);
} else {
addComponent(childComponent);
}
}
} | 3.68 |
hadoop_XMLUtils_newSecureSAXParserFactory | /**
* This method should be used if you need a {@link SAXParserFactory}. Use this method
* instead of {@link SAXParserFactory#newInstance()}. The factory that is returned has
* secure configuration enabled.
*
* @return a {@link SAXParserFactory} with secure configuration enabled
* @throws ParserConfigurationException if the {@code JAXP} parser does not support the
* secure configuration
* @throws SAXException if there are another issues when creating the factory
*/
public static SAXParserFactory newSecureSAXParserFactory()
throws SAXException, ParserConfigurationException {
SAXParserFactory spf = SAXParserFactory.newInstance();
spf.setFeature(XMLConstants.FEATURE_SECURE_PROCESSING, true);
spf.setFeature(DISALLOW_DOCTYPE_DECL, true);
spf.setFeature(LOAD_EXTERNAL_DECL, false);
spf.setFeature(EXTERNAL_GENERAL_ENTITIES, false);
spf.setFeature(EXTERNAL_PARAMETER_ENTITIES, false);
return spf;
} | 3.68 |
hbase_MetaTableAccessor_addSplitsToParent | /**
* Adds daughter region infos to hbase:meta row for the specified region.
* <p/>
* Note that this does not add its daughter's as different rows, but adds information about the
* daughters in the same row as the parent. Now only used in snapshot. Use
* {@link org.apache.hadoop.hbase.master.assignment.RegionStateStore} if you want to split a
* region.
* @param connection connection we're using
* @param regionInfo RegionInfo of parent region
* @param splitA first split daughter of the parent regionInfo
* @param splitB second split daughter of the parent regionInfo
* @throws IOException if problem connecting or updating meta
*/
public static void addSplitsToParent(Connection connection, RegionInfo regionInfo,
RegionInfo splitA, RegionInfo splitB) throws IOException {
try (Table meta = getMetaHTable(connection)) {
Put put = makePutFromRegionInfo(regionInfo);
addDaughtersToPut(put, splitA, splitB);
meta.put(put);
debugLogMutation(put);
LOG.debug("Added region {}", regionInfo.getRegionNameAsString());
}
} | 3.68 |
hbase_CellUtil_makeColumn | /**
* Makes a column in family:qualifier form from separate byte arrays.
* <p>
* Not recommended for usage as this is old-style API.
* @return family:qualifier
*/
public static byte[] makeColumn(byte[] family, byte[] qualifier) {
return Bytes.add(family, COLUMN_FAMILY_DELIM_ARRAY, qualifier);
} | 3.68 |
hadoop_RegistryTypeUtils_requireAddressType | /**
* Require a specific address type on an endpoint
* @param required required type
* @param epr endpoint
* @throws InvalidRecordException if the type is wrong
*/
public static void requireAddressType(String required, Endpoint epr) throws
InvalidRecordException {
if (!required.equals(epr.addressType)) {
throw new InvalidRecordException(
epr.toString(),
"Address type of " + epr.addressType
+ " does not match required type of "
+ required);
}
} | 3.68 |
framework_DeclarativeIconGenerator_setIcon | /**
* Sets an {@code icon} for the {@code item}.
*
* @param item
* a data item
* @param icon
* an icon for the {@code item}
*/
protected void setIcon(T item, Resource icon) {
captions.put(item, icon);
} | 3.68 |
querydsl_SQLExpressions_unionAll | /**
* Create a new UNION ALL clause
*
* @param sq subqueries
* @param <T>
* @return union
*/
public static <T> Union<T> unionAll(List<SubQueryExpression<T>> sq) {
return new SQLQuery<Void>().unionAll(sq);
} | 3.68 |
flink_RestServerEndpoint_getServerAddress | /**
* Returns the address on which this endpoint is accepting requests.
*
* @return address on which this endpoint is accepting requests or null if none
*/
@Nullable
public InetSocketAddress getServerAddress() {
synchronized (lock) {
assertRestServerHasBeenStarted();
Channel server = this.serverChannel;
if (server != null) {
try {
return ((InetSocketAddress) server.localAddress());
} catch (Exception e) {
log.error("Cannot access local server address", e);
}
}
return null;
}
} | 3.68 |
flink_KeyedStream_getKeyType | /**
* Gets the type of the key by which the stream is partitioned.
*
* @return The type of the key by which the stream is partitioned.
*/
@Internal
public TypeInformation<KEY> getKeyType() {
return keyType;
} | 3.68 |
hbase_NullComparator_toByteArray | /** Returns The comparator serialized using pb */
@Override
public byte[] toByteArray() {
ComparatorProtos.NullComparator.Builder builder = ComparatorProtos.NullComparator.newBuilder();
return builder.build().toByteArray();
} | 3.68 |
hadoop_MRJobConfUtil_getTaskProgressWaitDeltaTimeThreshold | /**
* Retrieves the min time required to log the task attempt current
* progress.
* @return the defined threshold in the conf.
* returns the default value if
* {@link #setTaskLogProgressDeltaThresholds} has not been called.
*/
public static long getTaskProgressWaitDeltaTimeThreshold() {
if (progressMaxWaitDeltaTimeThreshold == null) {
return TimeUnit.SECONDS.toMillis(
MRJobConfig.TASK_LOG_PROGRESS_WAIT_INTERVAL_SECONDS_DEFAULT);
}
return progressMaxWaitDeltaTimeThreshold.longValue();
} | 3.68 |
hudi_HoodieSparkKeyGeneratorFactory_inferKeyGeneratorTypeFromWriteConfig | /**
* Infers the key generator type based on the record key and partition fields.
* If neither of the record key and partition fields are set, the default type is returned.
*
* @param props Properties from the write config.
* @return Inferred key generator type.
*/
public static KeyGeneratorType inferKeyGeneratorTypeFromWriteConfig(TypedProperties props) {
String partitionFields = props.getString(KeyGeneratorOptions.PARTITIONPATH_FIELD_NAME.key(), null);
String recordsKeyFields = props.getString(KeyGeneratorOptions.RECORDKEY_FIELD_NAME.key(), null);
return inferKeyGeneratorType(Option.ofNullable(recordsKeyFields), partitionFields);
} | 3.68 |
hbase_RequestConverter_toAssignRegionsRequest | // HBCK2
public static MasterProtos.AssignsRequest toAssignRegionsRequest(List<String> encodedRegionNames,
boolean override) {
MasterProtos.AssignsRequest.Builder b = MasterProtos.AssignsRequest.newBuilder();
return b.addAllRegion(toEncodedRegionNameRegionSpecifiers(encodedRegionNames))
.setOverride(override).build();
} | 3.68 |
hibernate-validator_ValidationProviderHelper_getValidatorBeanClass | /**
* Determines the class of the {@link Validator} corresponding to the given configuration object.
*/
Class<? extends Validator> getValidatorBeanClass() {
return validatorClass;
} | 3.68 |
framework_ConnectorTracker_getLogger | /**
* Gets a logger for this class
*
* @return A logger instance for logging within this class
*
*/
private static Logger getLogger() {
return Logger.getLogger(ConnectorTracker.class.getName());
} | 3.68 |
flink_CopyOnWriteSkipListStateMap_compareSegmentAndNode | /**
* Compare the first skip list key in the given memory segment with the second skip list key in
* the given node.
*
* @param keySegment memory segment storing the first key.
* @param keyOffset offset of the first key in memory segment.
* @param keyLen length of the first key.
* @param targetNode the node storing the second key.
* @return Returns a negative integer, zero, or a positive integer as the first key is less
* than, equal to, or greater than the second.
*/
private int compareSegmentAndNode(
MemorySegment keySegment, int keyOffset, int keyLen, long targetNode) {
return SkipListUtils.compareSegmentAndNode(
keySegment, keyOffset, targetNode, spaceAllocator);
} | 3.68 |
morf_TableDataHomology_copyAndSort | /**
* @return A sorted copy of the Record list
*/
private List<Record> copyAndSort(final Table table, Iterable<Record> records, Comparator<Record> comparator) {
// we need to transform the records to RecordBeans so we don't re-use the Record
return FluentIterable.from(records)
.transform(r -> RecordHelper.copy(r, table.columns()))
.toSortedList(comparator);
} | 3.68 |
morf_SqlUtils_type | /**
* Specifies the data type for the parameter.
*
* @param dataType The data type
* @return the next phase of the parameter builder.
*/
public SqlParameterWidthBuilder type(DataType dataType) {
return new SqlParameterWidthBuilder(name, dataType);
} | 3.68 |
hadoop_CommonAuditContext_remove | /**
* Remove a context entry.
* @param key key
*/
public void remove(String key) {
if (LOG.isTraceEnabled()) {
LOG.trace("Remove context entry {}", key);
}
evaluatedEntries.remove(key);
} | 3.68 |
hbase_User_getGroupNames | /**
* Returns the list of groups of which this user is a member. On secure Hadoop this returns the
* group information for the user as resolved on the server. For 0.20 based Hadoop, the group
* names are passed from the client.
*/
public String[] getGroupNames() {
return ugi.getGroupNames();
} | 3.68 |
hudi_SparkBootstrapCommitActionExecutor_listAndProcessSourcePartitions | /**
* Return Bootstrap Mode selections for partitions listed and figure out bootstrap Schema.
* @return
* @throws IOException
*/
private Map<BootstrapMode, List<Pair<String, List<HoodieFileStatus>>>> listAndProcessSourcePartitions() throws IOException {
List<Pair<String, List<HoodieFileStatus>>> folders = BootstrapUtils.getAllLeafFoldersWithFiles(
table.getBaseFileFormat(), bootstrapSourceFileSystem, config.getBootstrapSourceBasePath(), context);
LOG.info("Fetching Bootstrap Schema !!");
HoodieBootstrapSchemaProvider sourceSchemaProvider = new HoodieSparkBootstrapSchemaProvider(config);
bootstrapSchema = sourceSchemaProvider.getBootstrapSchema(context, folders).toString();
LOG.info("Bootstrap Schema :" + bootstrapSchema);
BootstrapModeSelector selector =
(BootstrapModeSelector) ReflectionUtils.loadClass(config.getBootstrapModeSelectorClass(), config);
Map<BootstrapMode, List<String>> result = selector.select(folders);
Map<String, List<HoodieFileStatus>> partitionToFiles = folders.stream().collect(
Collectors.toMap(Pair::getKey, Pair::getValue));
// Ensure all partitions are accounted for
checkArgument(partitionToFiles.keySet().equals(
result.values().stream().flatMap(Collection::stream).collect(Collectors.toSet())));
return result.entrySet().stream().map(e -> Pair.of(e.getKey(), e.getValue().stream()
.map(p -> Pair.of(p, partitionToFiles.get(p))).collect(Collectors.toList())))
.collect(Collectors.toMap(Pair::getKey, Pair::getValue));
} | 3.68 |
morf_OracleDialect_createTrigger | /**
* Returns a list of SQL statement to create a trigger to populate a table's autonumber column
* from a sequence.
*
* @param table Table for which the trigger should be created.
* @param onColumn The autonumber column.
* @return SQL string list.
*/
private List<String> createTrigger(Table table, Column onColumn) {
List<String> createTriggerStatements = new ArrayList<>();
createTriggerStatements.add(String.format("ALTER SESSION SET CURRENT_SCHEMA = %s", getSchemaName()));
String tableName = truncatedTableName(table.getName());
String sequenceName = sequenceName(table.getName());
String triggerName = schemaNamePrefix() + triggerName(table.getName());
createTriggerStatements.add(new StringBuilder("CREATE TRIGGER ").append(triggerName).append(" \n")
.append("BEFORE INSERT ON ").append(tableName).append(" FOR EACH ROW \n")
.append("BEGIN \n")
.append(" IF (:new.").append(onColumn.getName()).append(" IS NULL) THEN \n")
.append(" SELECT ").append(sequenceName).append(".nextval \n")
.append(" INTO :new.").append(onColumn.getName()).append(" \n")
.append(" FROM DUAL; \n")
.append(" END IF; \n")
.append("END;")
.toString());
return createTriggerStatements;
} | 3.68 |
zxing_OneDReader_doDecode | /**
* We're going to examine rows from the middle outward, searching alternately above and below the
* middle, and farther out each time. rowStep is the number of rows between each successive
* attempt above and below the middle. So we'd scan row middle, then middle - rowStep, then
* middle + rowStep, then middle - (2 * rowStep), etc.
* rowStep is bigger as the image is taller, but is always at least 1. We've somewhat arbitrarily
* decided that moving up and down by about 1/16 of the image is pretty good; we try more of the
* image if "trying harder".
*
* @param image The image to decode
* @param hints Any hints that were requested
* @return The contents of the decoded barcode
* @throws NotFoundException Any spontaneous errors which occur
*/
private Result doDecode(BinaryBitmap image,
Map<DecodeHintType,?> hints) throws NotFoundException {
int width = image.getWidth();
int height = image.getHeight();
BitArray row = new BitArray(width);
boolean tryHarder = hints != null && hints.containsKey(DecodeHintType.TRY_HARDER);
int rowStep = Math.max(1, height >> (tryHarder ? 8 : 5));
int maxLines;
if (tryHarder) {
maxLines = height; // Look at the whole image, not just the center
} else {
maxLines = 15; // 15 rows spaced 1/32 apart is roughly the middle half of the image
}
int middle = height / 2;
for (int x = 0; x < maxLines; x++) {
// Scanning from the middle out. Determine which row we're looking at next:
int rowStepsAboveOrBelow = (x + 1) / 2;
boolean isAbove = (x & 0x01) == 0; // i.e. is x even?
int rowNumber = middle + rowStep * (isAbove ? rowStepsAboveOrBelow : -rowStepsAboveOrBelow);
if (rowNumber < 0 || rowNumber >= height) {
// Oops, if we run off the top or bottom, stop
break;
}
// Estimate black point for this row and load it:
try {
row = image.getBlackRow(rowNumber, row);
} catch (NotFoundException ignored) {
continue;
}
// While we have the image data in a BitArray, it's fairly cheap to reverse it in place to
// handle decoding upside down barcodes.
for (int attempt = 0; attempt < 2; attempt++) {
if (attempt == 1) { // trying again?
row.reverse(); // reverse the row and continue
// This means we will only ever draw result points *once* in the life of this method
// since we want to avoid drawing the wrong points after flipping the row, and,
// don't want to clutter with noise from every single row scan -- just the scans
// that start on the center line.
if (hints != null && hints.containsKey(DecodeHintType.NEED_RESULT_POINT_CALLBACK)) {
Map<DecodeHintType,Object> newHints = new EnumMap<>(DecodeHintType.class);
newHints.putAll(hints);
newHints.remove(DecodeHintType.NEED_RESULT_POINT_CALLBACK);
hints = newHints;
}
}
try {
// Look for a barcode
Result result = decodeRow(rowNumber, row, hints);
// We found our barcode
if (attempt == 1) {
// But it was upside down, so note that
result.putMetadata(ResultMetadataType.ORIENTATION, 180);
// And remember to flip the result points horizontally.
ResultPoint[] points = result.getResultPoints();
if (points != null) {
points[0] = new ResultPoint(width - points[0].getX() - 1, points[0].getY());
points[1] = new ResultPoint(width - points[1].getX() - 1, points[1].getY());
}
}
return result;
} catch (ReaderException re) {
// continue -- just couldn't decode this row
}
}
}
throw NotFoundException.getNotFoundInstance();
} | 3.68 |
flink_RunLengthDecoder_readUnsignedVarInt | /** Reads the next varint encoded int. */
private int readUnsignedVarInt() throws IOException {
int value = 0;
int shift = 0;
int b;
do {
b = in.read();
value |= (b & 0x7F) << shift;
shift += 7;
} while ((b & 0x80) != 0);
return value;
} | 3.68 |
flink_TransientBlobCache_deleteInternal | /**
* Deletes the file associated with the blob key in this BLOB cache.
*
* @param jobId ID of the job this blob belongs to (or <tt>null</tt> if job-unrelated)
* @param key blob key associated with the file to be deleted
* @return <tt>true</tt> if the given blob is successfully deleted or non-existing;
* <tt>false</tt> otherwise
*/
private boolean deleteInternal(@Nullable JobID jobId, TransientBlobKey key) {
final File localFile =
new File(
BlobUtils.getStorageLocationPath(
storageDir.deref().getAbsolutePath(), jobId, key));
readWriteLock.writeLock().lock();
try {
if (!localFile.delete() && localFile.exists()) {
log.warn(
"Failed to delete locally cached BLOB {} at {}",
key,
localFile.getAbsolutePath());
return false;
} else {
// this needs to happen inside the write lock in case of concurrent getFile() calls
blobExpiryTimes.remove(Tuple2.of(jobId, key));
}
} finally {
readWriteLock.writeLock().unlock();
}
return true;
} | 3.68 |
hbase_HFileReaderImpl_next | /**
* Go to the next key/value in the block section. Loads the next block if necessary. If
* successful, {@link #getKey()} and {@link #getValue()} can be called.
* @return true if successfully navigated to the next key/value
*/
@Override
public boolean next() throws IOException {
// This is a hot method so extreme measures taken to ensure it is small and inlineable.
// Checked by setting: -XX:+UnlockDiagnosticVMOptions -XX:+PrintInlining -XX:+PrintCompilation
assertSeeked();
positionThisBlockBuffer();
return _next();
} | 3.68 |
flink_FlinkAssertions_assertThatChainOfCauses | /**
* Shorthand to assert chain of causes. Same as:
*
* <pre>{@code
* assertThat(throwable)
* .extracting(FlinkAssertions::chainOfCauses, FlinkAssertions.STREAM_THROWABLE)
* }</pre>
*/
public static ListAssert<Throwable> assertThatChainOfCauses(Throwable root) {
return assertThat(root).extracting(FlinkAssertions::chainOfCauses, STREAM_THROWABLE);
} | 3.68 |
flink_SortingThread_go | /** Entry point of the thread. */
@Override
public void go() throws InterruptedException {
boolean alive = true;
// loop as long as the thread is marked alive
while (isRunning() && alive) {
final CircularElement<E> element = this.dispatcher.take(SortStage.SORT);
if (element != EOF_MARKER && element != SPILLING_MARKER) {
if (element.getBuffer().size() == 0) {
element.getBuffer().reset();
this.dispatcher.send(SortStage.READ, element);
continue;
}
LOG.debug("Sorting buffer {}.", element.getId());
this.sorter.sort(element.getBuffer());
LOG.debug("Sorted buffer {}.", element.getId());
} else if (element == EOF_MARKER) {
LOG.debug("Sorting thread done.");
alive = false;
}
this.dispatcher.send(SortStage.SPILL, element);
}
} | 3.68 |
framework_UIConnector_getPageState | /**
* Returns the state of the Page associated with the UI.
* <p>
* Note that state is considered an internal part of the connector. You
* should not rely on the state object outside of the connector who owns it.
* If you depend on the state of other connectors you should use their
* public API instead of their state object directly. The page state might
* not be an independent state object but can be embedded in UI state.
* </p>
*
* @since 7.1
* @return state object of the page
*/
public PageState getPageState() {
return getState().pageState;
} | 3.68 |
hbase_DeadServer_removeDeadServer | /**
* Called from rpc by operator cleaning up deadserver list.
* @param deadServerName the dead server name
* @return true if this server was removed
*/
public synchronized boolean removeDeadServer(final ServerName deadServerName) {
return this.deadServers.remove(deadServerName) != null;
} | 3.68 |
framework_VRadioButtonGroup_getItem | /**
* Returns the JsonObject used to populate the RadioButton widget that
* contains given Element.
*
* @since 8.2
* @param element
* the element to search for
* @return the related JsonObject; {@code null} if not found
*/
public JsonObject getItem(Element element) {
// The HTML populated in updateItem does not match RadioButton directly,
// which is why tryGetItem is also attempted on the parent element
return tryGetItem(element)
.orElse(tryGetItem(element.getParentElement()).orElse(null));
} | 3.68 |
framework_Escalator_reapplyColumnWidths | /**
* Reapplies all the cells' widths according to the calculated widths in
* the column configuration.
*/
public void reapplyColumnWidths() {
Element row = root.getFirstChildElement();
while (row != null) {
// Only handle non-spacer rows
if (!body.spacerContainer.isSpacer(row)) {
Element cell = row.getFirstChildElement();
int columnIndex = 0;
while (cell != null) {
final double width = getCalculatedColumnWidthWithColspan(
cell, columnIndex);
/*
* TODO Should Escalator implement ProvidesResize at
* some point, this is where we need to do that.
*/
cell.getStyle().setWidth(width, Unit.PX);
cell = cell.getNextSiblingElement();
columnIndex++;
}
}
row = row.getNextSiblingElement();
}
reapplyRowWidths();
} | 3.68 |
hbase_VersionModel_getRESTVersion | /** Returns the REST gateway version */
@XmlAttribute(name = "REST")
public String getRESTVersion() {
return restVersion;
} | 3.68 |
framework_Calendar_fireEventMove | /**
* Fires an event move event to all server side move listeners.
*
* @param index
* The index of the event in the events list
* @param newFromDatetime
* The changed from date time
*/
protected void fireEventMove(int index, Date newFromDatetime) {
MoveEvent event = new MoveEvent(this, events.get(index),
newFromDatetime);
if (calendarEventProvider instanceof EventMoveHandler) {
// Notify event provider if it is an event move handler
((EventMoveHandler) calendarEventProvider).eventMove(event);
}
// Notify event move handler attached by using the
// setHandler(EventMoveHandler) method
fireEvent(event);
} | 3.68 |
hadoop_FutureIO_raiseInnerCause | /**
* Extract the cause of a completion failure and rethrow it if an IOE
* or RTE.
* @param e exception.
* @param <T> type of return value.
* @return nothing, ever.
* @throws IOException either the inner IOException, or a wrapper around
* any non-Runtime-Exception
* @throws RuntimeException if that is the inner cause.
*/
public static <T> T raiseInnerCause(final CompletionException e)
throws IOException {
throw unwrapInnerException(e);
} | 3.68 |
zxing_MaskUtil_applyMaskPenaltyRule4 | /**
* Apply mask penalty rule 4 and return the penalty. Calculate the ratio of dark cells and give
* penalty if the ratio is far from 50%. It gives 10 penalty for 5% distance.
*/
static int applyMaskPenaltyRule4(ByteMatrix matrix) {
int numDarkCells = 0;
byte[][] array = matrix.getArray();
int width = matrix.getWidth();
int height = matrix.getHeight();
for (int y = 0; y < height; y++) {
byte[] arrayY = array[y];
for (int x = 0; x < width; x++) {
if (arrayY[x] == 1) {
numDarkCells++;
}
}
}
int numTotalCells = matrix.getHeight() * matrix.getWidth();
int fivePercentVariances = Math.abs(numDarkCells * 2 - numTotalCells) * 10 / numTotalCells;
return fivePercentVariances * N4;
} | 3.68 |
hadoop_RBFMetrics_getJson | /**
* Get JSON for this record.
*
* @return Map representing the data for the JSON representation.
*/
private static Map<String, Object> getJson(BaseRecord record) {
Map<String, Object> json = new HashMap<>();
Map<String, Class<?>> fields = getFields(record);
for (String fieldName : fields.keySet()) {
if (!fieldName.equalsIgnoreCase("proto")) {
try {
Object value = getField(record, fieldName);
if (value instanceof BaseRecord) {
BaseRecord recordField = (BaseRecord) value;
json.putAll(getJson(recordField));
} else {
json.put(fieldName, value == null ? JSONObject.NULL : value);
}
} catch (Exception e) {
throw new IllegalArgumentException(
"Cannot serialize field " + fieldName + " into JSON");
}
}
}
return json;
} | 3.68 |
hadoop_StageAllocatorLowCostAligned_canAllocate | // canAllocate() - boolean function, returns whether requestedResources
// can be allocated during the durationInterval without
// violating capacity constraints
public boolean canAllocate() {
return (gangsCanFit > 0);
} | 3.68 |
framework_VCalendarPanel_onCancel | /**
* Notifies submit-listeners of a cancel event
*/
private void onCancel() {
if (getSubmitListener() != null) {
getSubmitListener().onCancel();
}
} | 3.68 |
hbase_StorageClusterStatusModel_getLiveNodes | /** Returns the list of live nodes */
@XmlElement(name = "Node")
@XmlElementWrapper(name = "LiveNodes")
// workaround https://github.com/FasterXML/jackson-dataformat-xml/issues/192
@JsonProperty("LiveNodes")
public List<Node> getLiveNodes() {
return liveNodes;
} | 3.68 |
hbase_SnapshotManifest_convertToV2SingleManifest | /*
* In case of rolling-upgrade, we try to read all the formats and build the snapshot with the
* latest format.
*/
private void convertToV2SingleManifest() throws IOException {
// Try to load v1 and v2 regions
List<SnapshotRegionManifest> v1Regions, v2Regions;
ThreadPoolExecutor tpool = createExecutor("SnapshotManifestLoader");
setStatusMsg("Loading Region manifests for " + this.desc.getName());
try {
v1Regions =
SnapshotManifestV1.loadRegionManifests(conf, tpool, workingDirFs, workingDir, desc);
v2Regions = SnapshotManifestV2.loadRegionManifests(conf, tpool, workingDirFs, workingDir,
desc, manifestSizeLimit);
SnapshotDataManifest.Builder dataManifestBuilder = SnapshotDataManifest.newBuilder();
dataManifestBuilder.setTableSchema(ProtobufUtil.toTableSchema(htd));
if (v1Regions != null && v1Regions.size() > 0) {
dataManifestBuilder.addAllRegionManifests(v1Regions);
}
if (v2Regions != null && v2Regions.size() > 0) {
dataManifestBuilder.addAllRegionManifests(v2Regions);
}
// Write the v2 Data Manifest.
// Once the data-manifest is written, the snapshot can be considered complete.
// Currently snapshots are written in a "temporary" directory and later
// moved to the "complated" snapshot directory.
setStatusMsg("Writing data manifest for " + this.desc.getName());
SnapshotDataManifest dataManifest = dataManifestBuilder.build();
writeDataManifest(dataManifest);
this.regionManifests = dataManifest.getRegionManifestsList();
// Remove the region manifests. Everything is now in the data-manifest.
// The delete operation is "relaxed", unless we get an exception we keep going.
// The extra files in the snapshot directory will not give any problem,
// since they have the same content as the data manifest, and even by re-reading
// them we will get the same information.
int totalDeletes = 0;
ExecutorCompletionService<Void> completionService = new ExecutorCompletionService<>(tpool);
if (v1Regions != null) {
for (SnapshotRegionManifest regionManifest : v1Regions) {
++totalDeletes;
completionService.submit(() -> {
SnapshotManifestV1.deleteRegionManifest(workingDirFs, workingDir, regionManifest);
return null;
});
}
}
if (v2Regions != null) {
for (SnapshotRegionManifest regionManifest : v2Regions) {
++totalDeletes;
completionService.submit(() -> {
SnapshotManifestV2.deleteRegionManifest(workingDirFs, workingDir, regionManifest);
return null;
});
}
}
// Wait for the deletes to finish.
for (int i = 0; i < totalDeletes; i++) {
try {
completionService.take().get();
} catch (InterruptedException ie) {
throw new InterruptedIOException(ie.getMessage());
} catch (ExecutionException e) {
throw new IOException("Error deleting region manifests", e.getCause());
}
}
} finally {
tpool.shutdown();
}
} | 3.68 |
hadoop_BCFile_getAPIVersion | /**
* Get version of BCFile API.
*
* @return version of BCFile API.
*/
public Version getAPIVersion() {
return API_VERSION;
} | 3.68 |
flink_CheckpointStorageLocationReference_getReferenceBytes | /**
* Gets the reference bytes.
*
* <p><b>Important:</b> For efficiency, this method does not make a defensive copy, so the
* caller must not modify the bytes in the array.
*/
public byte[] getReferenceBytes() {
// return a non null object always
return encodedReference != null ? encodedReference : new byte[0];
} | 3.68 |
hbase_NamespaceAuditor_getRegionCountOfTable | /**
* Get region count for table
* @param tName - table name
* @return cached region count, or -1 if table status not found
* @throws IOException Signals that the namespace auditor has not been initialized
*/
public int getRegionCountOfTable(TableName tName) throws IOException {
if (stateManager.isInitialized()) {
NamespaceTableAndRegionInfo state = stateManager.getState(tName.getNamespaceAsString());
return state != null ? state.getRegionCountOfTable(tName) : -1;
}
checkTableTypeAndThrowException(tName);
return -1;
} | 3.68 |
hudi_HoodieBackedTableMetadata_closePartitionReaders | /**
* Close and clear all the partitions readers.
*/
private void closePartitionReaders() {
for (Pair<String, String> partitionFileSlicePair : partitionReaders.get().keySet()) {
close(partitionFileSlicePair);
}
partitionReaders.get().clear();
} | 3.68 |
flink_PlanReference_fromFile | /** Create a reference starting from a file path. */
public static PlanReference fromFile(File file) {
Objects.requireNonNull(file, "File cannot be null");
return new FilePlanReference(file);
} | 3.68 |
hadoop_FederationPolicyUtils_validateSubClusterAvailability | /**
* Validate if there is any active subcluster that is not blacklisted, it will
* throw an exception if there are no usable subclusters.
*
* @param activeSubClusters the list of subClusters as identified by
* {@link SubClusterId} currently active.
* @param blackListSubClusters the list of subClusters as identified by
* {@link SubClusterId} to blackList from the selection of the home
* subCluster.
* @throws FederationPolicyException if there are no usable subclusters.
*/
public static void validateSubClusterAvailability(
Collection<SubClusterId> activeSubClusters,
Collection<SubClusterId> blackListSubClusters)
throws FederationPolicyException {
if (activeSubClusters != null && !activeSubClusters.isEmpty()) {
if (blackListSubClusters == null) {
return;
}
for (SubClusterId scId : activeSubClusters) {
if (!blackListSubClusters.contains(scId)) {
// There is at least one active subcluster
return;
}
}
}
throw new FederationPolicyException(
FederationPolicyUtils.NO_ACTIVE_SUBCLUSTER_AVAILABLE);
} | 3.68 |
hbase_SampleUploader_run | /**
* Main entry point.
* @param otherArgs The command line parameters after ToolRunner handles standard.
* @throws Exception When running the job fails.
*/
@Override
public int run(String[] otherArgs) throws Exception {
if (otherArgs.length != 2) {
System.err.println("Wrong number of arguments: " + otherArgs.length);
System.err.println("Usage: " + NAME + " <input> <tablename>");
return -1;
}
Job job = configureJob(getConf(), otherArgs);
return (job.waitForCompletion(true) ? 0 : 1);
} | 3.68 |
hbase_ProcedureCoordinator_getProcedureNames | /** Returns Return set of all procedure names. */
public Set<String> getProcedureNames() {
return new HashSet<>(procedures.keySet());
} | 3.68 |
graphhopper_PrepareContractionHierarchies_useFixedNodeOrdering | /**
* Instead of heuristically determining a node ordering for the graph contraction it is also possible
* to use a fixed ordering. For example this allows re-using a previously calculated node ordering.
* This will speed up CH preparation, but might lead to slower queries.
*/
public PrepareContractionHierarchies useFixedNodeOrdering(NodeOrderingProvider nodeOrderingProvider) {
if (nodeOrderingProvider.getNumNodes() != nodes) {
throw new IllegalArgumentException(
"contraction order size (" + nodeOrderingProvider.getNumNodes() + ")" +
" must be equal to number of nodes in graph (" + nodes + ").");
}
this.nodeOrderingProvider = nodeOrderingProvider;
return this;
} | 3.68 |
framework_CompositeValidator_removeValidator | /**
* Removes a validator from the composite.
*
* @param validator
* the Validator object which performs validation checks on this
* set of data field values.
*/
public void removeValidator(Validator validator) {
validators.remove(validator);
} | 3.68 |
flink_MetricListener_getCounter | /**
* Get registered {@link Counter} with identifier relative to the root metric group.
*
* @param identifier identifier relative to the root metric group
* @return Optional registered counter
*/
public Optional<Counter> getCounter(String... identifier) {
return getMetric(Counter.class, identifier);
} | 3.68 |
pulsar_PulsarAdminImpl_brokerStats | /**
* @return the broker statics
*/
public BrokerStats brokerStats() {
return brokerStats;
} | 3.68 |
querydsl_ComparableExpression_loe | /**
* Create a {@code this <= right} expression
*
* @param right rhs of the comparison
* @return this <= right
* @see java.lang.Comparable#compareTo(Object)
*/
public BooleanExpression loe(Expression<T> right) {
return Expressions.booleanOperation(Ops.LOE, mixin, right);
} | 3.68 |
hbase_Scan_getFamilyMap | /**
* Getting the familyMap
*/
public Map<byte[], NavigableSet<byte[]>> getFamilyMap() {
return this.familyMap;
} | 3.68 |
hadoop_AbfsHttpOperation_processStorageErrorResponse | /**
* When the request fails, this function is used to parse the responseAbfsHttpClient.LOG.debug("ExpectedError: ", ex);
* and extract the storageErrorCode and storageErrorMessage. Any errors
* encountered while attempting to process the error response are logged,
* but otherwise ignored.
*
* For storage errors, the response body *usually* has the following format:
*
* {
* "error":
* {
* "code": "string",
* "message": "string"
* }
* }
*
*/
private void processStorageErrorResponse() {
try (InputStream stream = connection.getErrorStream()) {
if (stream == null) {
return;
}
JsonFactory jf = new JsonFactory();
try (JsonParser jp = jf.createParser(stream)) {
String fieldName, fieldValue;
jp.nextToken(); // START_OBJECT - {
jp.nextToken(); // FIELD_NAME - "error":
jp.nextToken(); // START_OBJECT - {
jp.nextToken();
while (jp.hasCurrentToken()) {
if (jp.getCurrentToken() == JsonToken.FIELD_NAME) {
fieldName = jp.getCurrentName();
jp.nextToken();
fieldValue = jp.getText();
switch (fieldName) {
case "code":
storageErrorCode = fieldValue;
break;
case "message":
storageErrorMessage = fieldValue;
break;
case "ExpectedAppendPos":
expectedAppendPos = fieldValue;
break;
default:
break;
}
}
jp.nextToken();
}
}
} | 3.68 |
druid_DruidAbstractDataSource_setSocketTimeout | /**
* @since 1.2.12
*/
public void setSocketTimeout(int milliSeconds) {
this.socketTimeout = milliSeconds;
this.socketTimeoutSr = null;
} | 3.68 |
hbase_RootProcedureState_addRollbackStep | /**
* Called by the ProcedureExecutor after the procedure step is completed, to add the step to the
* rollback list (or procedure stack)
*/
protected synchronized void addRollbackStep(Procedure<TEnvironment> proc) {
if (proc.isFailed()) {
state = State.FAILED;
}
if (subprocStack == null) {
subprocStack = new ArrayList<>();
}
proc.addStackIndex(subprocStack.size());
LOG.trace("Add procedure {} as the {}th rollback step", proc, subprocStack.size());
subprocStack.add(proc);
} | 3.68 |
dubbo_PojoUtils_updatePropertyIfAbsent | /**
* Update the property if absent
*
* @param getterMethod the getter method
* @param setterMethod the setter method
* @param newValue the new value
* @param <T> the value type
* @since 2.7.8
*/
public static <T> void updatePropertyIfAbsent(Supplier<T> getterMethod, Consumer<T> setterMethod, T newValue) {
if (newValue != null && getterMethod.get() == null) {
setterMethod.accept(newValue);
}
} | 3.68 |
flink_ResourceCounter_getResourcesWithCount | /**
* Gets the stored resources and their counts. The counts are guaranteed to be positive (> 0).
*
* @return collection of {@link ResourceProfile} and count pairs
*/
public Collection<Map.Entry<ResourceProfile, Integer>> getResourcesWithCount() {
return resources.entrySet();
} | 3.68 |
flink_StreamSourceContexts_getSourceContext | /**
* Depending on the {@link TimeCharacteristic}, this method will return the adequate {@link
* org.apache.flink.streaming.api.functions.source.SourceFunction.SourceContext}. That is:
*
* <ul>
* <li>{@link TimeCharacteristic#IngestionTime} = {@code AutomaticWatermarkContext}
* <li>{@link TimeCharacteristic#ProcessingTime} = {@code NonTimestampContext}
* <li>{@link TimeCharacteristic#EventTime} = {@code ManualWatermarkContext}
* </ul>
*/
public static <OUT> SourceFunction.SourceContext<OUT> getSourceContext(
TimeCharacteristic timeCharacteristic,
ProcessingTimeService processingTimeService,
Object checkpointLock,
Output<StreamRecord<OUT>> output,
long watermarkInterval,
long idleTimeout,
boolean emitProgressiveWatermarks) {
final SourceFunction.SourceContext<OUT> ctx;
switch (timeCharacteristic) {
case EventTime:
ctx =
new ManualWatermarkContext<>(
output,
processingTimeService,
checkpointLock,
idleTimeout,
emitProgressiveWatermarks);
break;
case IngestionTime:
Preconditions.checkState(
emitProgressiveWatermarks,
"Ingestion time is not available when emitting progressive watermarks "
+ "is disabled.");
ctx =
new AutomaticWatermarkContext<>(
output,
watermarkInterval,
processingTimeService,
checkpointLock,
idleTimeout);
break;
case ProcessingTime:
ctx = new NonTimestampContext<>(checkpointLock, output);
break;
default:
throw new IllegalArgumentException(String.valueOf(timeCharacteristic));
}
return new SwitchingOnClose<>(ctx);
} | 3.68 |