name
stringlengths 12
178
| code_snippet
stringlengths 8
36.5k
| score
float64 3.26
3.68
|
---|---|---|
morf_CaseStatement_deepCopyInternal | /**
* @see org.alfasoftware.morf.sql.element.AliasedField#deepCopyInternal(DeepCopyTransformation)
*/
@Override
protected CaseStatement deepCopyInternal(DeepCopyTransformation transformer) {
return new CaseStatement(this,transformer);
} | 3.68 |
hudi_SparkHoodieHBaseIndex_isImplicitWithStorage | /**
* Index needs to be explicitly updated after storage write.
*/
@Override
public boolean isImplicitWithStorage() {
return false;
} | 3.68 |
framework_Label_getContentMode | /**
* Gets the content mode of the Label.
*
* @return the Content mode of the label.
*
* @see ContentMode
*/
public ContentMode getContentMode() {
return getState(false).contentMode;
} | 3.68 |
hbase_JMXJsonServlet_checkCallbackName | /**
* Verifies that the callback property, if provided, is purely alphanumeric. This prevents a
* malicious callback name (that is javascript code) from being returned by the UI to an
* unsuspecting user.
* @param callbackName The callback name, can be null.
* @return The callback name
* @throws IOException If the name is disallowed.
*/
private String checkCallbackName(String callbackName) throws IOException {
if (null == callbackName) {
return null;
}
if (callbackName.matches("[A-Za-z0-9_]+")) {
return callbackName;
}
throw new IOException("'callback' must be alphanumeric");
} | 3.68 |
framework_ComponentConnectorLayoutSlot_getChild | /**
* Returns the connector of the child component that has been assigned to
* this slot.
*
* @return the content connector
*/
public ComponentConnector getChild() {
return child;
} | 3.68 |
flink_TemplateUtils_extractGlobalFunctionTemplates | /** Retrieve global templates from function class. */
static Set<FunctionTemplate> extractGlobalFunctionTemplates(
DataTypeFactory typeFactory, Class<? extends UserDefinedFunction> function) {
return asFunctionTemplates(
typeFactory, collectAnnotationsOfClass(FunctionHint.class, function));
} | 3.68 |
AreaShop_AreaShop_debugI | /**
* Non-static debug to use as implementation of the interface.
* @param message Object parts of the message that should be logged, toString() will be used
*/
@Override
public void debugI(Object... message) {
AreaShop.debug(StringUtils.join(message, " "));
} | 3.68 |
hbase_MobFile_close | /**
* Closes the underlying reader, but do no evict blocks belonging to this file. It's not
* thread-safe. Use MobFileCache.closeFile() instead.
*/
public void close() throws IOException {
if (sf != null) {
sf.closeStoreFile(false);
sf = null;
}
} | 3.68 |
framework_AbstractDataProvider_fireEvent | /**
* Sends the event to all listeners.
*
* @param event
* the Event to be sent to all listeners.
*/
protected void fireEvent(EventObject event) {
if (eventRouter != null) {
eventRouter.fireEvent(event);
}
} | 3.68 |
hadoop_AbstractRESTRequestInterceptor_getConf | /**
* Gets the {@link Configuration}.
*/
@Override
public Configuration getConf() {
return this.conf;
} | 3.68 |
pulsar_SaslAuthenticationState_authenticate | /**
* Returns null if authentication has completed, and no auth data is required to send back to client.
* Do auth and Returns the auth data back to client, if authentication has not completed.
*/
@Override
public AuthData authenticate(AuthData authData) throws AuthenticationException {
return pulsarSaslServer.response(authData);
} | 3.68 |
zxing_ResultHandler_getType | /**
* A convenience method to get the parsed type. Should not be overridden.
*
* @return The parsed type, e.g. URI or ISBN
*/
public final ParsedResultType getType() {
return result.getType();
} | 3.68 |
flink_PhysicalSlotRequestBulkCheckerImpl_isSlotRequestBulkFulfillable | /**
* Returns whether the given bulk of slot requests are possible to be fulfilled at the same time
* with all the reusable slots in the slot pool. A reusable slot means the slot is available or
* will not be occupied indefinitely.
*
* @param slotRequestBulk bulk of slot requests to check
* @param slotsRetriever supplies slots to be used for the fulfill-ability check
* @return true if the slot requests are possible to be fulfilled, otherwise false
*/
@VisibleForTesting
static boolean isSlotRequestBulkFulfillable(
final PhysicalSlotRequestBulk slotRequestBulk,
final Supplier<Set<SlotInfo>> slotsRetriever) {
final Set<AllocationID> assignedSlots =
slotRequestBulk.getAllocationIdsOfFulfilledRequests();
final Set<SlotInfo> reusableSlots = getReusableSlots(slotsRetriever, assignedSlots);
return areRequestsFulfillableWithSlots(slotRequestBulk.getPendingRequests(), reusableSlots);
} | 3.68 |
hbase_CompactSplit_getCompactionQueueSize | /**
* Returns the current size of the queue containing regions that are processed.
* @return The current size of the regions queue.
*/
public int getCompactionQueueSize() {
return longCompactions.getQueue().size() + shortCompactions.getQueue().size();
} | 3.68 |
hadoop_RegistryOperationsFactory_createAuthenticatedInstance | /**
* Create and initialize an operations instance authenticated with write
* access via an <code>id:password</code> pair.
*
* The instance will have the read access
* across the registry, but write access only to that part of the registry
* to which it has been give the relevant permissions.
* @param conf configuration
* @param id user ID
* @param password password
* @return a registry operations instance
* @throws ServiceStateException on any failure to initialize
* @throws IllegalArgumentException if an argument is invalid
*/
public static RegistryOperations createAuthenticatedInstance(Configuration conf,
String id,
String password) {
Preconditions.checkArgument(!StringUtils.isEmpty(id), "empty Id");
Preconditions.checkArgument(!StringUtils.isEmpty(password), "empty Password");
Preconditions.checkArgument(conf != null, "Null configuration");
conf.set(KEY_REGISTRY_CLIENT_AUTH, REGISTRY_CLIENT_AUTH_DIGEST);
conf.set(KEY_REGISTRY_CLIENT_AUTHENTICATION_ID, id);
conf.set(KEY_REGISTRY_CLIENT_AUTHENTICATION_PASSWORD, password);
return createInstance("DigestRegistryOperations", conf);
} | 3.68 |
graphhopper_RamerDouglasPeucker_subSimplify | // keep the points of fromIndex and lastIndex
int subSimplify(PointList points, int fromIndex, int lastIndex) {
if (lastIndex - fromIndex < 2) {
return 0;
}
int indexWithMaxDist = -1;
double maxDist = -1;
double elevationFactor = maxDistance / elevationMaxDistance;
double firstLat = points.getLat(fromIndex);
double firstLon = points.getLon(fromIndex);
double firstEle = points.getEle(fromIndex);
double lastLat = points.getLat(lastIndex);
double lastLon = points.getLon(lastIndex);
double lastEle = points.getEle(lastIndex);
for (int i = fromIndex + 1; i < lastIndex; i++) {
double lat = points.getLat(i);
if (Double.isNaN(lat)) {
continue;
}
double lon = points.getLon(i);
double ele = points.getEle(i);
double dist = (points.is3D() && elevationMaxDistance < Double.MAX_VALUE && !Double.isNaN(firstEle) && !Double.isNaN(lastEle) && !Double.isNaN(ele))
? calc.calcNormalizedEdgeDistance3D(
lat, lon, ele * elevationFactor,
firstLat, firstLon, firstEle * elevationFactor,
lastLat, lastLon, lastEle * elevationFactor)
: calc.calcNormalizedEdgeDistance(lat, lon, firstLat, firstLon, lastLat, lastLon);
if (maxDist < dist) {
indexWithMaxDist = i;
maxDist = dist;
}
}
if (indexWithMaxDist < 0) {
throw new IllegalStateException("maximum not found in [" + fromIndex + "," + lastIndex + "]");
}
int counter = 0;
if (maxDist < normedMaxDist) {
for (int i = fromIndex + 1; i < lastIndex; i++) {
points.set(i, Double.NaN, Double.NaN, Double.NaN);
counter++;
}
} else {
counter = subSimplify(points, fromIndex, indexWithMaxDist);
counter += subSimplify(points, indexWithMaxDist, lastIndex);
}
return counter;
} | 3.68 |
hbase_KeyValueHeap_generalizedSeek | /**
* @param isLazy whether we are trying to seek to exactly the given row/col. Enables Bloom
* filter and most-recent-file-first optimizations for multi-column get/scan
* queries.
* @param seekKey key to seek to
* @param forward whether to seek forward (also known as reseek)
* @param useBloom whether to optimize seeks using Bloom filters
*/
private boolean generalizedSeek(boolean isLazy, Cell seekKey, boolean forward, boolean useBloom)
throws IOException {
if (!isLazy && useBloom) {
throw new IllegalArgumentException(
"Multi-column Bloom filter " + "optimization requires a lazy seek");
}
if (current == null) {
return false;
}
KeyValueScanner scanner = current;
try {
while (scanner != null) {
Cell topKey = scanner.peek();
if (comparator.getComparator().compare(seekKey, topKey) <= 0) {
// Top KeyValue is at-or-after Seek KeyValue. We only know that all
// scanners are at or after seekKey (because fake keys of
// scanners where a lazy-seek operation has been done are not greater
// than their real next keys) but we still need to enforce our
// invariant that the top scanner has done a real seek. This way
// StoreScanner and RegionScanner do not have to worry about fake
// keys.
heap.add(scanner);
scanner = null;
current = pollRealKV();
return current != null;
}
boolean seekResult;
if (isLazy && heap.size() > 0) {
// If there is only one scanner left, we don't do lazy seek.
seekResult = scanner.requestSeek(seekKey, forward, useBloom);
} else {
seekResult = NonLazyKeyValueScanner.doRealSeek(scanner, seekKey, forward);
}
if (!seekResult) {
this.scannersForDelayedClose.add(scanner);
} else {
heap.add(scanner);
}
scanner = heap.poll();
if (scanner == null) {
current = null;
}
}
} catch (Exception e) {
if (scanner != null) {
try {
scanner.close();
} catch (Exception ce) {
LOG.warn("close KeyValueScanner error", ce);
}
}
throw e;
}
// Heap is returning empty, scanner is done
return false;
} | 3.68 |
framework_AbstractListingConnector_getRowKey | /**
* Returns the key of the given data row.
*
* @param row
* the row
* @return the row key
*/
protected static String getRowKey(JsonObject row) {
return row.getString(DataCommunicatorConstants.KEY);
} | 3.68 |
hudi_AbstractHoodieLogRecordReader_reconcileSpuriousBlocksAndGetValidOnes | /**
* There could be spurious log blocks due to spark task retries. So, we will use BLOCK_SEQUENCE_NUMBER in the log block header to deduce such spurious log blocks and return
* a deduped set of log blocks.
* @param allValidLogBlocks all valid log blocks parsed so far.
* @param blockSequenceMapPerCommit map containing block sequence numbers for every commit.
* @return a Pair of boolean and list of deduped valid block blocks, where boolean of true means, there have been dups detected.
*/
private Pair<Boolean, List<HoodieLogBlock>> reconcileSpuriousBlocksAndGetValidOnes(List<HoodieLogBlock> allValidLogBlocks,
Map<String, Map<Long, List<Pair<Integer, HoodieLogBlock>>>> blockSequenceMapPerCommit) {
boolean dupsFound = blockSequenceMapPerCommit.values().stream().anyMatch(perCommitBlockList -> perCommitBlockList.size() > 1);
if (dupsFound) {
if (LOG.isDebugEnabled()) {
logBlockSequenceMapping(blockSequenceMapPerCommit);
}
// duplicates are found. we need to remove duplicate log blocks.
for (Map.Entry<String, Map<Long, List<Pair<Integer, HoodieLogBlock>>>> entry: blockSequenceMapPerCommit.entrySet()) {
Map<Long, List<Pair<Integer, HoodieLogBlock>>> perCommitBlockSequences = entry.getValue();
if (perCommitBlockSequences.size() > 1) {
// only those that have more than 1 sequence needs deduping.
int maxSequenceCount = -1;
int maxAttemptNo = -1;
for (Map.Entry<Long, List<Pair<Integer, HoodieLogBlock>>> perAttemptEntries : perCommitBlockSequences.entrySet()) {
Long attemptNo = perAttemptEntries.getKey();
int size = perAttemptEntries.getValue().size();
if (maxSequenceCount <= size) {
maxSequenceCount = size;
maxAttemptNo = Math.toIntExact(attemptNo);
}
}
// for other sequences (!= maxSequenceIndex), we need to remove the corresponding logBlocks from allValidLogBlocks
for (Map.Entry<Long, List<Pair<Integer, HoodieLogBlock>>> perAttemptEntries : perCommitBlockSequences.entrySet()) {
Long attemptNo = perAttemptEntries.getKey();
if (maxAttemptNo != attemptNo) {
List<HoodieLogBlock> logBlocksToRemove = perCommitBlockSequences.get(attemptNo).stream().map(pair -> pair.getValue()).collect(Collectors.toList());
logBlocksToRemove.forEach(logBlockToRemove -> {
allValidLogBlocks.remove(logBlockToRemove);
});
}
}
}
}
return Pair.of(true, allValidLogBlocks);
} else {
return Pair.of(false, allValidLogBlocks);
}
} | 3.68 |
flink_CheckpointStatsCounts_getNumberOfRestoredCheckpoints | /**
* Returns the number of restored checkpoints.
*
* @return Number of restored checkpoints.
*/
public long getNumberOfRestoredCheckpoints() {
return numRestoredCheckpoints;
} | 3.68 |
flink_Types_PRIMITIVE_ARRAY | /**
* Returns type information for Java arrays of primitive type (such as <code>byte[]</code>). The
* array must not be null.
*
* @param elementType element type of the array (e.g. Types.BOOLEAN, Types.INT, Types.DOUBLE)
*/
public static TypeInformation<?> PRIMITIVE_ARRAY(TypeInformation<?> elementType) {
if (elementType == BOOLEAN) {
return PrimitiveArrayTypeInfo.BOOLEAN_PRIMITIVE_ARRAY_TYPE_INFO;
} else if (elementType == BYTE) {
return PrimitiveArrayTypeInfo.BYTE_PRIMITIVE_ARRAY_TYPE_INFO;
} else if (elementType == SHORT) {
return PrimitiveArrayTypeInfo.SHORT_PRIMITIVE_ARRAY_TYPE_INFO;
} else if (elementType == INT) {
return PrimitiveArrayTypeInfo.INT_PRIMITIVE_ARRAY_TYPE_INFO;
} else if (elementType == LONG) {
return PrimitiveArrayTypeInfo.LONG_PRIMITIVE_ARRAY_TYPE_INFO;
} else if (elementType == FLOAT) {
return PrimitiveArrayTypeInfo.FLOAT_PRIMITIVE_ARRAY_TYPE_INFO;
} else if (elementType == DOUBLE) {
return PrimitiveArrayTypeInfo.DOUBLE_PRIMITIVE_ARRAY_TYPE_INFO;
} else if (elementType == CHAR) {
return PrimitiveArrayTypeInfo.CHAR_PRIMITIVE_ARRAY_TYPE_INFO;
}
throw new IllegalArgumentException("Invalid element type for a primitive array.");
} | 3.68 |
shardingsphere-elasticjob_FailoverService_setCrashedFailoverFlagDirectly | /**
* set crashed failover flag directly.
*
* @param item crashed item
*/
public void setCrashedFailoverFlagDirectly(final int item) {
jobNodeStorage.createJobNodeIfNeeded(FailoverNode.getItemsNode(item));
} | 3.68 |
flink_CliFrontend_runClusterAction | /**
* Retrieves the {@link ClusterClient} from the given {@link CustomCommandLine} and runs the
* given {@link ClusterAction} against it.
*
* @param activeCommandLine to create the {@link ClusterDescriptor} from
* @param commandLine containing the parsed command line options
* @param clusterAction the cluster action to run against the retrieved {@link ClusterClient}.
* @param <ClusterID> type of the cluster id
* @throws FlinkException if something goes wrong
*/
private <ClusterID> void runClusterAction(
CustomCommandLine activeCommandLine,
CommandLine commandLine,
ClusterAction<ClusterID> clusterAction)
throws FlinkException {
final Configuration effectiveConfiguration =
getEffectiveConfiguration(activeCommandLine, commandLine);
LOG.debug(
"Effective configuration after Flink conf, and custom commandline: {}",
effectiveConfiguration);
final ClusterClientFactory<ClusterID> clusterClientFactory =
clusterClientServiceLoader.getClusterClientFactory(effectiveConfiguration);
final ClusterID clusterId = clusterClientFactory.getClusterId(effectiveConfiguration);
if (clusterId == null) {
throw new FlinkException(
"No cluster id was specified. Please specify a cluster to which you would like to connect.");
}
try (final ClusterDescriptor<ClusterID> clusterDescriptor =
clusterClientFactory.createClusterDescriptor(effectiveConfiguration)) {
try (final ClusterClient<ClusterID> clusterClient =
clusterDescriptor.retrieve(clusterId).getClusterClient()) {
clusterAction.runAction(clusterClient, effectiveConfiguration);
}
}
} | 3.68 |
flink_FormatDescriptor_option | /**
* Sets the given option on the format.
*
* <p>Note that format options must not be prefixed with the format identifier itself here.
* For example,
*
* <pre>{@code
* FormatDescriptor.forFormat("json")
* .option("ignore-parse-errors", "true")
* .build();
* }</pre>
*
* <p>will automatically be converted into its prefixed form:
*
* <pre>{@code
* 'format' = 'json'
* 'json.ignore-parse-errors' = 'true'
* }</pre>
*/
public Builder option(String key, String value) {
Preconditions.checkNotNull(key, "Key must not be null.");
Preconditions.checkNotNull(value, "Value must not be null.");
options.put(key, value);
return this;
} | 3.68 |
hadoop_PartitionInfo_addTo | /**
* This method will generate a new PartitionInfo object based on two PartitionInfo objects.
* The combination process is mainly based on the Resources. Add method.
*
* @param left left PartitionInfo Object.
* @param right right PartitionInfo Object.
* @return new PartitionInfo Object.
*/
public static PartitionInfo addTo(PartitionInfo left, PartitionInfo right) {
Resource leftResource = Resource.newInstance(0, 0);
if (left != null && left.getResourceAvailable() != null) {
ResourceInfo leftResourceInfo = left.getResourceAvailable();
leftResource = leftResourceInfo.getResource();
}
Resource rightResource = Resource.newInstance(0, 0);
if (right != null && right.getResourceAvailable() != null) {
ResourceInfo rightResourceInfo = right.getResourceAvailable();
rightResource = rightResourceInfo.getResource();
}
Resource resource = Resources.addTo(leftResource, rightResource);
return new PartitionInfo(new ResourceInfo(resource));
} | 3.68 |
hadoop_FederationPolicyStoreInputValidator_validate | /**
* Quick validation on the input to check some obvious fail conditions (fail
* fast). Check if the provided
* {@link SetSubClusterPolicyConfigurationRequest} for adding a new policy is
* valid or not.
*
* @param request the {@link SetSubClusterPolicyConfigurationRequest} to
* validate against
* @throws FederationStateStoreInvalidInputException if the request is invalid
*/
public static void validate(SetSubClusterPolicyConfigurationRequest request)
throws FederationStateStoreInvalidInputException {
if (request == null) {
String message = "Missing SetSubClusterPolicyConfiguration Request."
+ " Please try again by specifying an policy insertion information.";
LOG.warn(message);
throw new FederationStateStoreInvalidInputException(message);
}
// validate subcluster policy configuration
checkSubClusterPolicyConfiguration(request.getPolicyConfiguration());
} | 3.68 |
flink_InstantiationUtil_instantiate | /**
* Creates a new instance of the given class.
*
* @param <T> The generic type of the class.
* @param clazz The class to instantiate.
* @return An instance of the given class.
* @throws RuntimeException Thrown, if the class could not be instantiated. The exception
* contains a detailed message about the reason why the instantiation failed.
*/
public static <T> T instantiate(Class<T> clazz) {
if (clazz == null) {
throw new NullPointerException();
}
// try to instantiate the class
try {
return clazz.newInstance();
} catch (InstantiationException | IllegalAccessException iex) {
// check for the common problem causes
checkForInstantiation(clazz);
// here we are, if non of the common causes was the problem. then the error was
// most likely an exception in the constructor or field initialization
throw new RuntimeException(
"Could not instantiate type '"
+ clazz.getName()
+ "' due to an unspecified exception: "
+ iex.getMessage(),
iex);
} catch (Throwable t) {
String message = t.getMessage();
throw new RuntimeException(
"Could not instantiate type '"
+ clazz.getName()
+ "' Most likely the constructor (or a member variable initialization) threw an exception"
+ (message == null ? "." : ": " + message),
t);
}
} | 3.68 |
hadoop_Validate_checkGreater | /**
* Validates that the first value is greater than the second value.
* @param value1 the first value to check.
* @param value1Name the name of the first argument.
* @param value2 the second value to check.
* @param value2Name the name of the second argument.
*/
public static void checkGreater(
long value1,
String value1Name,
long value2,
String value2Name) {
checkArgument(
value1 > value2,
"'%s' (%s) must be greater than '%s' (%s).",
value1Name,
value1,
value2Name,
value2);
} | 3.68 |
hadoop_FindOptions_setIn | /**
* Sets the input stream to be used.
*
* @param in input stream to be used
*/
public void setIn(InputStream in) {
this.in = in;
} | 3.68 |
pulsar_KeyValueSchemaInfo_decodeKeyValueSchemaInfo | /**
* Decode the key/value schema info to get key schema info and value schema info.
*
* @param schemaInfo key/value schema info.
* @return the pair of key schema info and value schema info
*/
public static KeyValue<SchemaInfo, SchemaInfo> decodeKeyValueSchemaInfo(SchemaInfo schemaInfo) {
checkArgument(SchemaType.KEY_VALUE == schemaInfo.getType(),
"Not a KeyValue schema");
return KeyValue.decode(
schemaInfo.getSchema(),
(keyBytes, valueBytes) -> {
SchemaInfo keySchemaInfo = decodeSubSchemaInfo(
schemaInfo,
KEY_SCHEMA_NAME,
KEY_SCHEMA_TYPE,
KEY_SCHEMA_PROPS,
keyBytes
);
SchemaInfo valueSchemaInfo = decodeSubSchemaInfo(
schemaInfo,
VALUE_SCHEMA_NAME,
VALUE_SCHEMA_TYPE,
VALUE_SCHEMA_PROPS,
valueBytes
);
return new KeyValue<>(keySchemaInfo, valueSchemaInfo);
}
);
} | 3.68 |
querydsl_JTSGeometryExpression_isSimple | /**
* Returns 1 (TRUE) if this geometric object has no anomalous geometric points, such
* as self intersection or self tangency. The description of each instantiable geometric class
* will include the specific conditions that cause an instance of that class to be classified as not simple.
*
* @return simple
*/
public BooleanExpression isSimple() {
if (simple == null) {
simple = Expressions.booleanOperation(SpatialOps.IS_SIMPLE, mixin);
}
return simple;
} | 3.68 |
framework_MenuBar_getSelectedIndex | /**
* Gets the index of the selected item.
*
* @since 7.2.6
* @return the index of the selected item.
*/
public int getSelectedIndex() {
return items != null ? items.indexOf(getSelectedItem()) : -1;
} | 3.68 |
hadoop_ReplicaUnderConstruction_getState | /**
* Get replica state as reported by the data-node.
*/
HdfsServerConstants.ReplicaState getState() {
return state;
} | 3.68 |
flink_ResourceID_getResourceIdString | /**
* Gets the Resource Id as string.
*
* @return Stringified version of the ResourceID
*/
public final String getResourceIdString() {
return resourceId;
} | 3.68 |
hadoop_RpcNoSuchProtocolException_getRpcErrorCodeProto | /**
* get the detailed rpc status corresponding to this exception
*/
public RpcErrorCodeProto getRpcErrorCodeProto() {
return RpcErrorCodeProto.ERROR_NO_SUCH_PROTOCOL;
} | 3.68 |
morf_SpreadsheetDataSetProducer_columnName | /**
* Converts the given long name in to a column name. This is the same as
* removing all the spaces and making the first character lowercase.
*
* @param longName the long name to convert
* @return the name of the column
*/
private String columnName(final String longName) {
final String noSpaces = longName.replaceAll(" ", "");
return noSpaces.substring(0, 1).toLowerCase() + noSpaces.substring(1);
} | 3.68 |
framework_StaticSection_getRow | /**
* Returns the row at the given index.
*
* @param index
* the index of the row
* @return the row at the index
* @throws IndexOutOfBoundsException
* if {@code index < 0 || index >= getRowCount()}
*/
public ROW getRow(int index) {
return rows.get(index);
} | 3.68 |
pulsar_LegacyHierarchicalLedgerRangeIterator_getStartLedgerIdByLevel | /**
* Get the smallest cache id in a specified node /level1/level2.
*
* @param level1
* 1st level node name
* @param level2
* 2nd level node name
* @return the smallest ledger id
*/
private long getStartLedgerIdByLevel(String level1, String level2) throws IOException {
return StringUtils.stringToHierarchicalLedgerId(level1, level2, MIN_ID_SUFFIX);
} | 3.68 |
dubbo_FrameworkModel_tryDestroyProtocols | /**
* Protocols are special resources that need to be destroyed as soon as possible.
*
* Since connections inside protocol are not classified by applications, trying to destroy protocols in advance might only work for singleton application scenario.
*/
void tryDestroyProtocols() {
synchronized (instLock) {
if (pubApplicationModels.size() == 0) {
notifyProtocolDestroy();
}
}
} | 3.68 |
hadoop_AbstractTask_getTimeout | /**
* Get Timeout for a Task.
* @return timeout in seconds
*/
@Override
public final long getTimeout() {
return this.timeout;
} | 3.68 |
framework_VColorPickerArea_setText | /**
* Sets the caption's content to the given text.
*
* @param text
*
* @see Label#setText(String)
*/
@Override
public void setText(String text) {
caption.setText(text);
} | 3.68 |
hibernate-validator_Configuration_methodConstraintsSupported | /**
* Whether method constraints are allowed at any method ({@code true}) or only
* getter methods ({@code false}).
*
* @return {@code true} if method constraints are allowed on any method, {code false} if only on getter methods
*/
public boolean methodConstraintsSupported() {
return methodConstraintsSupported;
} | 3.68 |
hmily_PropertyName_isValidChar | /**
* Is valid char boolean.
*
* @param ch the ch
* @param index the index
* @return the boolean
*/
static boolean isValidChar(final char ch, final int index) {
return isAlpha(ch) || isNumeric(ch) || (index != 0 && ch == '-');
} | 3.68 |
hadoop_ConfigRedactor_redact | /**
* Given a key / value pair, decides whether or not to redact and returns
* either the original value or text indicating it has been redacted.
*
* @param key param key.
* @param value param value, will return if conditions permit.
* @return Original value, or text indicating it has been redacted
*/
public String redact(String key, String value) {
if (configIsSensitive(key)) {
return REDACTED_TEXT;
}
return value;
} | 3.68 |
pulsar_KerberosName_getShortName | /**
* Get the translation of the principal name into an operating system
* user name.
* @return the short name
* @throws IOException
*/
public String getShortName() throws IOException {
String[] params;
if (hostName == null) {
// if it is already simple, just return it
if (realm == null) {
return serviceName;
}
params = new String[]{realm, serviceName};
} else {
params = new String[]{realm, serviceName, hostName};
}
for (Rule r: rules) {
String result = r.apply(params);
if (result != null) {
return result;
}
}
throw new NoMatchingRule("No rules applied to " + toString());
} | 3.68 |
framework_TabSheet_copyTabMetadata | /**
* Copies properties from one Tab to another.
*
* @param from
* The tab whose data to copy.
* @param to
* The tab to which copy the data.
*/
private static void copyTabMetadata(Tab from, Tab to) {
to.setCaption(from.getCaption());
to.setIcon(from.getIcon(), from.getIconAlternateText());
to.setDescription(from.getDescription());
to.setVisible(from.isVisible());
to.setEnabled(from.isEnabled());
to.setClosable(from.isClosable());
to.setStyleName(from.getStyleName());
to.setComponentError(from.getComponentError());
} | 3.68 |
flink_TypeStrategies_argument | /** Type strategy that returns the n-th input argument, mapping it. */
public static TypeStrategy argument(int pos, Function<DataType, Optional<DataType>> mapper) {
return new ArgumentMappingTypeStrategy(pos, mapper);
} | 3.68 |
hbase_FileMmapIOEngine_isPersistent | /**
* File IO engine is always able to support persistent storage for the cache
*/
@Override
public boolean isPersistent() {
// TODO : HBASE-21981 needed for persistence to really work
return true;
} | 3.68 |
framework_VaadinSession_modifyBootstrapResponse | /**
* Fires a bootstrap event to all registered listeners. There are currently
* two supported events, both inheriting from {@link BootstrapResponse}:
* {@link BootstrapFragmentResponse} and {@link BootstrapPageResponse}.
*
* @param response
* the bootstrap response event for which listeners should be
* fired
*
* @deprecated As of 7.0. Will likely change or be removed in a future
* version
*/
@Deprecated
public void modifyBootstrapResponse(BootstrapResponse response) {
assert hasLock();
eventRouter.fireEvent(response);
} | 3.68 |
hudi_HoodieRealtimeRecordReaderUtils_orderFields | /**
* Given a comma separated list of field names and positions at which they appear on Hive, return
* an ordered list of field names, that can be passed onto storage.
*/
public static List<String> orderFields(String fieldNameCsv, String fieldOrderCsv, List<String> partitioningFields) {
// Need to convert the following to Set first since Hive does not handle duplicate field names correctly but
// handles duplicate fields orders correctly.
// Fields Orders -> {@link https://github
// .com/apache/hive/blob/f37c5de6c32b9395d1b34fa3c02ed06d1bfbf6eb/serde/src/java
// /org/apache/hadoop/hive/serde2/ColumnProjectionUtils.java#L188}
// Field Names -> {@link https://github.com/apache/hive/blob/f37c5de6c32b9395d1b34fa3c02ed06d1bfbf6eb/serde/src/java
// /org/apache/hadoop/hive/serde2/ColumnProjectionUtils.java#L229}
String[] fieldOrdersWithDups = fieldOrderCsv.isEmpty() ? new String[0] : fieldOrderCsv.split(",");
Set<String> fieldOrdersSet = new LinkedHashSet<>(Arrays.asList(fieldOrdersWithDups));
String[] fieldOrders = fieldOrdersSet.toArray(new String[0]);
List<String> fieldNames = fieldNameCsv.isEmpty() ? new ArrayList<>() : Arrays.stream(fieldNameCsv.split(",")).collect(Collectors.toList());
Set<String> fieldNamesSet = new LinkedHashSet<>(fieldNames);
if (fieldNamesSet.size() != fieldOrders.length) {
throw new HoodieException(String
.format("Error ordering fields for storage read. #fieldNames: %d, #fieldPositions: %d",
fieldNames.size(), fieldOrders.length));
}
TreeMap<Integer, String> orderedFieldMap = new TreeMap<>();
String[] fieldNamesArray = fieldNamesSet.toArray(new String[0]);
for (int ox = 0; ox < fieldOrders.length; ox++) {
orderedFieldMap.put(Integer.parseInt(fieldOrders[ox]), fieldNamesArray[ox]);
}
return new ArrayList<>(orderedFieldMap.values());
} | 3.68 |
morf_AbstractSqlDialectTest_expectedSelectSome | /**
* @return The expected sql.
*/
protected String expectedSelectSome() {
return "SELECT MAX(booleanField) FROM " + tableName(TEST_TABLE);
} | 3.68 |
flink_DataType_getFieldNames | /**
* Returns the first-level field names for the provided {@link DataType}.
*
* <p>Note: This method returns an empty list for every {@link DataType} that is not a composite
* type.
*/
public static List<String> getFieldNames(DataType dataType) {
final LogicalType type = dataType.getLogicalType();
if (type.is(LogicalTypeRoot.DISTINCT_TYPE)) {
return getFieldNames(dataType.getChildren().get(0));
} else if (isCompositeType(type)) {
return LogicalTypeChecks.getFieldNames(type);
}
return Collections.emptyList();
} | 3.68 |
querydsl_SQLExpressions_stddevSamp | /**
* returns the cumulative sample standard deviation and returns the square root of the sample variance.
*
* @param expr argument
* @return stddev_samp(expr)
*/
public static <T extends Number> WindowOver<T> stddevSamp(Expression<T> expr) {
return new WindowOver<T>(expr.getType(), SQLOps.STDDEVSAMP, expr);
} | 3.68 |
framework_VDragEvent_getDragImage | /**
* @return the current Element used as a drag image (aka drag proxy) or null
* if drag image is not currently set for this drag operation.
*/
public com.google.gwt.user.client.Element getDragImage() {
return DOM.asOld(VDragAndDropManager.get().getDragElement());
} | 3.68 |
framework_VTree_getChildren | /**
* Returns the children of the node.
*
* @return A set of tree nodes
*/
public List<TreeNode> getChildren() {
List<TreeNode> nodes = new LinkedList<TreeNode>();
if (!isLeaf() && isChildrenLoaded()) {
for (Widget w : childNodeContainer) {
TreeNode node = (TreeNode) w;
nodes.add(node);
}
}
return nodes;
} | 3.68 |
hudi_HoodieTableMetaClient_getCommitTimeline | /**
* Get the compacted commit timeline visible for this table.
*/
public HoodieTimeline getCommitTimeline() {
switch (this.getTableType()) {
case COPY_ON_WRITE:
case MERGE_ON_READ:
// We need to include the parquet files written out in delta commits in tagging
return getActiveTimeline().getCommitTimeline();
default:
throw new HoodieException("Unsupported table type :" + this.getTableType());
}
} | 3.68 |
framework_MarginInfo_hasNone | /**
* Checks if this MarginInfo object has no margins enabled.
*
* @since 8.0
*
* @return true if all edges have margins disabled
*/
public boolean hasNone() {
return (bitMask & ALL) == 0;
} | 3.68 |
hudi_FailSafeConsistencyGuard_checkFileVisibility | /**
* Helper to check of file visibility.
*
* @param filePath File Path
* @param visibility Visibility
* @return true (if file visible in Path), false (otherwise)
* @throws IOException -
*/
protected boolean checkFileVisibility(Path filePath, FileVisibility visibility) throws IOException {
try {
FileStatus status = fs.getFileStatus(filePath);
switch (visibility) {
case APPEAR:
return status != null;
case DISAPPEAR:
default:
return status == null;
}
} catch (FileNotFoundException nfe) {
switch (visibility) {
case APPEAR:
return false;
case DISAPPEAR:
default:
return true;
}
}
} | 3.68 |
morf_IndexNameDecorator_getName | /**
* @see org.alfasoftware.morf.metadata.Index#getName()
*/
@Override
public String getName() {
return name;
} | 3.68 |
dubbo_MetricsEventBus_before | /**
* Applicable to the scene where execution and return are separated,
* eventSaveRunner saves the event, so that the calculation rt is introverted
*/
public static void before(MetricsEvent event) {
MetricsDispatcher dispatcher = validate(event);
if (dispatcher == null) return;
tryInvoke(() -> dispatcher.publishEvent(event));
} | 3.68 |
flink_MasterHooks_triggerHook | /**
* Trigger master hook and return a completable future with state.
*
* @param hook The master hook given
* @param checkpointId The checkpoint ID of the triggering checkpoint
* @param timestamp The (informational) timestamp for the triggering checkpoint
* @param executor An executor that can be used for asynchronous I/O calls
* @param <T> The type of data produced by the hook
* @return the completable future with state
*/
public static <T> CompletableFuture<MasterState> triggerHook(
MasterTriggerRestoreHook<T> hook,
long checkpointId,
long timestamp,
Executor executor) {
final String id = hook.getIdentifier();
final SimpleVersionedSerializer<T> serializer = hook.createCheckpointDataSerializer();
try {
// call the hook!
final CompletableFuture<T> resultFuture =
hook.triggerCheckpoint(checkpointId, timestamp, executor);
if (resultFuture == null) {
return CompletableFuture.completedFuture(null);
}
return resultFuture
.thenApply(
result -> {
// if the result of the future is not null, return it as state
if (result == null) {
return null;
} else if (serializer != null) {
try {
final int version = serializer.getVersion();
final byte[] bytes = serializer.serialize(result);
return new MasterState(id, bytes, version);
} catch (Throwable t) {
ExceptionUtils.rethrowIfFatalErrorOrOOM(t);
throw new CompletionException(
new FlinkException(
"Failed to serialize state of master hook '"
+ id
+ '\'',
t));
}
} else {
throw new CompletionException(
new FlinkException(
"Checkpoint hook '"
+ id
+ " is stateful but creates no serializer"));
}
})
.exceptionally(
(throwable) -> {
throw new CompletionException(
new FlinkException(
"Checkpoint master hook '"
+ id
+ "' produced an exception",
throwable.getCause()));
});
} catch (Throwable t) {
return FutureUtils.completedExceptionally(
new FlinkException(
"Error while triggering checkpoint master hook '" + id + '\'', t));
}
} | 3.68 |
hadoop_OBSFileSystem_getWriteHelper | /**
* Return the write helper used by {@link OBSBlockOutputStream}.
*
* @return the write helper
*/
OBSWriteOperationHelper getWriteHelper() {
return writeHelper;
} | 3.68 |
zxing_PDF417_setDimensions | /**
* Sets max/min row/col values
*
* @param maxCols maximum allowed columns
* @param minCols minimum allowed columns
* @param maxRows maximum allowed rows
* @param minRows minimum allowed rows
*/
public void setDimensions(int maxCols, int minCols, int maxRows, int minRows) {
this.maxCols = maxCols;
this.minCols = minCols;
this.maxRows = maxRows;
this.minRows = minRows;
} | 3.68 |
framework_AbstractTextField_getMaxLength | /**
* Returns the maximum number of characters in the field. Value -1 is
* considered unlimited. Terminal may however have some technical limits.
*
* @return the maxLength
*/
public int getMaxLength() {
return getState(false).maxLength;
} | 3.68 |
hbase_RequestConverter_buildSetRegionStateInMetaRequest | /**
* Creates a protocol buffer SetRegionStateInMetaRequest
* @param nameOrEncodedName2State list of regions states to update in Meta
* @return a SetRegionStateInMetaRequest
*/
public static SetRegionStateInMetaRequest
buildSetRegionStateInMetaRequest(Map<String, RegionState.State> nameOrEncodedName2State) {
SetRegionStateInMetaRequest.Builder builder = SetRegionStateInMetaRequest.newBuilder();
nameOrEncodedName2State.forEach((name, state) -> {
byte[] bytes = Bytes.toBytes(name);
RegionSpecifier spec;
if (RegionInfo.isEncodedRegionName(bytes)) {
spec = buildRegionSpecifier(RegionSpecifierType.ENCODED_REGION_NAME, bytes);
} else {
spec = buildRegionSpecifier(RegionSpecifierType.REGION_NAME, bytes);
}
builder.addStates(RegionSpecifierAndState.newBuilder().setRegionSpecifier(spec)
.setState(state.convert()).build());
});
return builder.build();
} | 3.68 |
hadoop_RouterClientRMService_createRequestInterceptorChain | /**
* This method creates and returns reference of the first interceptor in the
* chain of request interceptor instances.
*
* @return the reference of the first interceptor in the chain
*/
@VisibleForTesting
protected ClientRequestInterceptor createRequestInterceptorChain() {
Configuration conf = getConfig();
return RouterServerUtil.createRequestInterceptorChain(conf,
YarnConfiguration.ROUTER_CLIENTRM_INTERCEPTOR_CLASS_PIPELINE,
YarnConfiguration.DEFAULT_ROUTER_CLIENTRM_INTERCEPTOR_CLASS,
ClientRequestInterceptor.class);
} | 3.68 |
hbase_MasterObserver_preHasUserPermissions | /*
* Called before checking if user has permissions.
* @param ctx the coprocessor instance's environment
* @param userName the user name
* @param permissions the permission list
*/
default void preHasUserPermissions(ObserverContext<MasterCoprocessorEnvironment> ctx,
String userName, List<Permission> permissions) throws IOException {
} | 3.68 |
flink_NetUtils_isValidHostPort | /**
* check whether the given port is in right range when getting port from local system.
*
* @param port the port to check
* @return true if the number in the range 0 to 65535
*/
public static boolean isValidHostPort(int port) {
return 0 <= port && port <= 65535;
} | 3.68 |
hadoop_IOStatisticsStoreImpl_getMinimumReference | /**
* Get a reference to the atomic instance providing the
* value for a specific minimum. This is useful if
* the value is passed around.
* @param key statistic name
* @return the reference
* @throws NullPointerException if there is no entry of that name
*/
@Override
public AtomicLong getMinimumReference(String key) {
return lookup(minimumMap, key);
} | 3.68 |
hbase_Procedure_doReleaseLock | /**
* Internal method called by the ProcedureExecutor that starts the user-level code releaseLock().
*/
final void doReleaseLock(TEnvironment env, ProcedureStore store) {
locked = false;
// persist that we have released the lock. This must be done before we actually release the
// lock. Another procedure may take this lock immediately after we release the lock, and if we
// crash before persist the information that we have already released the lock, then when
// restarting there will be two procedures which both have the lock and cause problems.
if (getState() != ProcedureState.ROLLEDBACK) {
// If the state is ROLLEDBACK, it means that we have already deleted the procedure from
// procedure store, so do not need to log the release operation any more.
store.update(this);
}
releaseLock(env);
} | 3.68 |
hbase_RSProcedureDispatcher_unableToConnectToServer | /**
* The category of exceptions where we can ensure that the request has not yet been received
* and/or processed by the target regionserver yet and hence we can determine whether it is safe
* to choose different regionserver as the target.
* @param e IOException thrown by the underlying rpc framework.
* @return true if the exception belongs to the category where the regionserver has not yet
* received the request yet.
*/
private boolean unableToConnectToServer(IOException e) {
if (e instanceof CallQueueTooBigException) {
LOG.warn("request to {} failed due to {}, try={}, this usually because"
+ " server is overloaded, give up", serverName, e, numberOfAttemptsSoFar);
return true;
}
if (isSaslError(e)) {
LOG.warn("{} is not reachable; give up after first attempt", serverName, e);
return true;
}
return false;
} | 3.68 |
hbase_LeaseManager_createLease | /**
* Create a lease and insert it to the map of leases.
* @param leaseName name of the lease
* @param leaseTimeoutPeriod length of the lease in milliseconds
* @param listener listener that will process lease expirations
* @return The lease created.
*/
public Lease createLease(String leaseName, int leaseTimeoutPeriod, final LeaseListener listener)
throws LeaseStillHeldException {
Lease lease = new Lease(leaseName, leaseTimeoutPeriod, listener);
addLease(lease);
return lease;
} | 3.68 |
morf_SqlDialect_fetchSizeForBulkSelects | /**
* Different JDBC drivers and platforms have different behaviour for paging results
* into a {@link ResultSet} as they are fetched. For example, MySQL defaults
* to <a href="http://stackoverflow.com/questions/20496616/fetchsize-in-resultset-set-to-0-by-default">fetching
* <em>all</em> records</a> into memory, whereas Oracle defaults to fetching
* <a href="https://docs.oracle.com/cd/A87860_01/doc/java.817/a83724/resltse5.htm">10
* records</a> at a time.
*
* <p>The impact mostly rears its head during bulk loads (when loading large numbers
* of records). MySQL starts to run out of memory, and Oracle does not run at
* optimal speed due to unnecessary round-trips.</p>
*
* <p>This provides the ability for us to specify different fetch sizes for bulk loads
* on different platforms. Refer to the individual implementations for reasons for
* the choices there.</p>
*
* @return The number of rows to try and fetch at a time (default) when
* performing bulk select operations.
* @see #fetchSizeForBulkSelectsAllowingConnectionUseDuringStreaming()
*/
public int fetchSizeForBulkSelects() {
return 2000;
} | 3.68 |
framework_VScrollTable_updateFooter | /**
* Updates footers.
* <p>
* Update headers whould be called before this method is called!
* </p>
*
* @param strings
*/
private void updateFooter(String[] strings) {
if (strings == null) {
return;
}
// Add dummy column if row headers are present
int colIndex = 0;
if (showRowHeaders) {
tFoot.enableColumn(ROW_HEADER_COLUMN_KEY, colIndex);
colIndex++;
} else {
tFoot.removeCell(ROW_HEADER_COLUMN_KEY);
}
for (final String cid : strings) {
tFoot.enableColumn(cid, colIndex);
colIndex++;
}
tFoot.setVisible(showColFooters);
} | 3.68 |
morf_SelectStatementBuilder_having | /**
* Filters the grouped records by some criteria.
*
* <blockquote><pre>
* select()
* .from(tableRef("Foo"))
* .groupBy(field("age"))
* .having(min(field("age")).greaterThan(20));</pre></blockquote>
*
* @param criterion the criteria on which to filter the grouped records
* @return this, for method chaining.
*/
public SelectStatementBuilder having(Criterion criterion) {
if (criterion == null) {
throw new IllegalArgumentException("Criterion was null in having clause");
}
if (having != null) {
throw new UnsupportedOperationException("Cannot specify more than one having clause per statement");
}
// Add the singleton
having = criterion;
return this;
} | 3.68 |
hadoop_SlowPeerTracker_getSlowNodes | /**
* Returns all tracking slow peers.
* @param numNodes
* @return
*/
public List<String> getSlowNodes(int numNodes) {
Collection<SlowPeerJsonReport> jsonReports = getJsonReports(numNodes);
ArrayList<String> slowNodes = new ArrayList<>();
for (SlowPeerJsonReport jsonReport : jsonReports) {
slowNodes.add(jsonReport.getSlowNode());
}
if (!slowNodes.isEmpty()) {
LOG.warn("Slow nodes list: " + slowNodes);
}
return slowNodes;
} | 3.68 |
hudi_FlinkConcatHandle_write | /**
* Write old record as is w/o merging with incoming record.
*/
@Override
public void write(HoodieRecord oldRecord) {
Schema oldSchema = config.populateMetaFields() ? writeSchemaWithMetaFields : writeSchema;
String key = oldRecord.getRecordKey(oldSchema, keyGeneratorOpt);
try {
fileWriter.write(key, oldRecord, oldSchema);
} catch (IOException | RuntimeException e) {
String errMsg = String.format("Failed to write old record into new file for key %s from old file %s to new file %s with writerSchema %s",
key, getOldFilePath(), newFilePath, oldSchema.toString(true));
LOG.debug("Old record is " + oldRecord);
throw new HoodieUpsertException(errMsg, e);
}
recordsWritten++;
} | 3.68 |
flink_SavepointLoader_loadSavepointMetadata | /**
* Takes the given string (representing a pointer to a checkpoint) and resolves it to a file
* status for the checkpoint's metadata file.
*
* <p>This should only be used when the user code class loader is the current classloader for
* the thread.
*
* @param savepointPath The path to an external savepoint.
* @return A state handle to savepoint's metadata.
* @throws IOException Thrown, if the path cannot be resolved, the file system not accessed, or
* the path points to a location that does not seem to be a savepoint.
*/
public static CheckpointMetadata loadSavepointMetadata(String savepointPath)
throws IOException {
CompletedCheckpointStorageLocation location =
AbstractFsCheckpointStorageAccess.resolveCheckpointPointer(savepointPath);
try (DataInputStream stream =
new DataInputStream(location.getMetadataHandle().openInputStream())) {
return Checkpoints.loadCheckpointMetadata(
stream, Thread.currentThread().getContextClassLoader(), savepointPath);
}
} | 3.68 |
flink_MailboxMetricsController_getMailCounter | /**
* Gets {@link Counter} for number of mails processed.
*
* @return {@link Counter} for number of mails processed.
*/
public Counter getMailCounter() {
return this.mailCounter;
} | 3.68 |
flink_LocalFileSystem_mkdirs | /**
* Recursively creates the directory specified by the provided path.
*
* @return <code>true</code>if the directories either already existed or have been created
* successfully, <code>false</code> otherwise
* @throws IOException thrown if an error occurred while creating the directory/directories
*/
@Override
public boolean mkdirs(final Path f) throws IOException {
checkNotNull(f, "path is null");
return mkdirsInternal(pathToFile(f));
} | 3.68 |
hudi_HoodieTableConfig_getPayloadClass | /**
* Read the payload class for HoodieRecords from the table properties.
*/
public String getPayloadClass() {
return RecordPayloadType.getPayloadClassName(this);
} | 3.68 |
hudi_LogFileCreationCallback_preFileCreation | /**
* Executes action right before log file is created.
*
* @param logFile The log file.
* @return true if the action executes successfully.
*/
default boolean preFileCreation(HoodieLogFile logFile) {
return true;
} | 3.68 |
hadoop_Check_notEmpty | /**
* Verifies a string is not NULL and not emtpy
*
* @param str the variable to check.
* @param name the name to use in the exception message.
*
* @return the variable.
*
* @throws IllegalArgumentException if the variable is NULL or empty.
*/
public static String notEmpty(String str, String name) {
if (str == null) {
throw new IllegalArgumentException(name + " cannot be null");
}
if (str.length() == 0) {
throw new IllegalArgumentException(name + " cannot be empty");
}
return str;
} | 3.68 |
framework_VTooltip_handleHideEvent | /**
* Handle hide event
*
*/
private void handleHideEvent() {
hideTooltip();
} | 3.68 |
hbase_MasterObserver_postLockHeartbeat | /**
* Called after heartbeat to a lock.
* @param ctx the environment to interact with the framework and master
*/
default void postLockHeartbeat(ObserverContext<MasterCoprocessorEnvironment> ctx)
throws IOException {
} | 3.68 |
hudi_HoodieDropPartitionsTool_printDeleteFilesInfo | /**
* Prints the delete data files info.
*
* @param partitionToReplaceFileIds
*/
private void printDeleteFilesInfo(Map<String, List<String>> partitionToReplaceFileIds) {
LOG.info("Data files and partitions to delete : ");
for (Map.Entry<String, List<String>> entry : partitionToReplaceFileIds.entrySet()) {
LOG.info(String.format("Partitions : %s, corresponding data file IDs : %s", entry.getKey(), entry.getValue()));
}
} | 3.68 |
flink_SequenceGeneratorSource_getRandomKey | /** Returns a random key that belongs to this key range. */
int getRandomKey(Random random) {
return random.nextInt(endKey - startKey) + startKey;
} | 3.68 |
querydsl_SQLExpressions_min | /**
* Start a window function expression
*
* @param expr expression
* @return min(expr)
*/
public static <T extends Comparable> WindowOver<T> min(Expression<T> expr) {
return new WindowOver<T>(expr.getType(), Ops.AggOps.MIN_AGG, expr);
} | 3.68 |
hudi_AbstractHoodieLogRecordReader_composeEvolvedSchemaTransformer | /**
* Get final Read Schema for support evolution.
* step1: find the fileSchema for current dataBlock.
* step2: determine whether fileSchema is compatible with the final read internalSchema.
* step3: merge fileSchema and read internalSchema to produce final read schema.
*
* @param dataBlock current processed block
* @return final read schema.
*/
private Option<Pair<Function<HoodieRecord, HoodieRecord>, Schema>> composeEvolvedSchemaTransformer(
HoodieDataBlock dataBlock) {
if (internalSchema.isEmptySchema()) {
return Option.empty();
}
long currentInstantTime = Long.parseLong(dataBlock.getLogBlockHeader().get(INSTANT_TIME));
InternalSchema fileSchema = InternalSchemaCache.searchSchemaAndCache(currentInstantTime,
hoodieTableMetaClient, false);
InternalSchema mergedInternalSchema = new InternalSchemaMerger(fileSchema, internalSchema,
true, false).mergeSchema();
Schema mergedAvroSchema = AvroInternalSchemaConverter.convert(mergedInternalSchema, readerSchema.getFullName());
return Option.of(Pair.of((record) -> {
return record.rewriteRecordWithNewSchema(
dataBlock.getSchema(),
this.hoodieTableMetaClient.getTableConfig().getProps(),
mergedAvroSchema,
Collections.emptyMap());
}, mergedAvroSchema));
} | 3.68 |
flink_EnvironmentInformation_getJvmStartupOptionsArray | /**
* Gets the system parameters and environment parameters that were passed to the JVM on startup.
*
* @return The options passed to the JVM on startup.
*/
public static String[] getJvmStartupOptionsArray() {
try {
RuntimeMXBean bean = ManagementFactory.getRuntimeMXBean();
List<String> options = bean.getInputArguments();
return options.toArray(new String[options.size()]);
} catch (Throwable t) {
return new String[0];
}
} | 3.68 |
hadoop_TimedHealthReporterService_setHealthy | /**
* Sets if the node is healthy or not.
*
* @param healthy whether the node is healthy
*/
protected synchronized void setHealthy(boolean healthy) {
this.isHealthy = healthy;
} | 3.68 |
hbase_AbstractFSWAL_markClosedAndClean | /**
* Mark this WAL file as closed and call cleanOldLogs to see if we can archive this file.
*/
private void markClosedAndClean(Path path) {
WALProps props = walFile2Props.get(path);
// typically this should not be null, but if there is no big issue if it is already null, so
// let's make the code more robust
if (props != null) {
props.closed = true;
cleanOldLogs();
}
} | 3.68 |
querydsl_PointExpression_x | /**
* The x-coordinate value for this Point.
*
* @return x-coordinate
*/
public NumberExpression<Double> x() {
if (x == null) {
x = Expressions.numberOperation(Double.class, SpatialOps.X, mixin);
}
return x;
} | 3.68 |
framework_VScrollTable_sendSelectedRows | /**
* Sends the selection to the server if it has been changed since the last
* update/visit.
*
* @param immediately
* set to true to immediately send the rows
*/
protected void sendSelectedRows(boolean immediately) {
// Don't send anything if selection has not changed
if (!selectionChanged) {
return;
}
// Reset selection changed flag
selectionChanged = false;
// Note: changing the immediateness of this might require changes to
// "clickEvent" immediateness also.
if (isMultiSelectModeDefault()) {
// Convert ranges to a set of strings
Set<String> ranges = new HashSet<String>();
for (SelectionRange range : selectedRowRanges) {
ranges.add(range.toString());
}
// Send the selected row ranges
client.updateVariable(paintableId, "selectedRanges",
ranges.toArray(new String[selectedRowRanges.size()]),
false);
selectedRowRanges.clear();
// clean selectedRowKeys so that they don't contain excess values
for (Iterator<String> iterator = selectedRowKeys
.iterator(); iterator.hasNext();) {
String key = iterator.next();
VScrollTableRow renderedRowByKey = getRenderedRowByKey(key);
if (renderedRowByKey != null) {
for (SelectionRange range : selectedRowRanges) {
if (range.inRange(renderedRowByKey)) {
iterator.remove();
}
}
} else {
// orphaned selected key, must be in a range, ignore
iterator.remove();
}
}
}
// Send the selected rows
client.updateVariable(paintableId, "selected",
selectedRowKeys.toArray(new String[selectedRowKeys.size()]),
immediately);
} | 3.68 |
framework_Margins_setMarginLeft | /**
* Sets the width of the left margin.
*
* @param marginLeft
* the left margin to set (in pixels)
*/
public void setMarginLeft(int marginLeft) {
this.marginLeft = marginLeft;
updateHorizontal();
} | 3.68 |
hbase_StoreFileTrackerFactory_create | /**
* Used at master side when splitting/merging regions, as we do not have a Store, thus no
* StoreContext at master side.
*/
public static StoreFileTracker create(Configuration conf, TableDescriptor td,
ColumnFamilyDescriptor cfd, HRegionFileSystem regionFs) {
StoreContext ctx =
StoreContext.getBuilder().withColumnFamilyDescriptor(cfd).withRegionFileSystem(regionFs)
.withFamilyStoreDirectoryPath(regionFs.getStoreDir(cfd.getNameAsString())).build();
return StoreFileTrackerFactory.create(mergeConfigurations(conf, td, cfd), true, ctx);
} | 3.68 |
flink_RocksDBResourceContainer_getWriteOptions | /** Gets the RocksDB {@link WriteOptions} to be used for write operations. */
public WriteOptions getWriteOptions() {
// Disable WAL by default
WriteOptions opt = new WriteOptions().setDisableWAL(true);
handlesToClose.add(opt);
// add user-defined options factory, if specified
if (optionsFactory != null) {
opt = optionsFactory.createWriteOptions(opt, handlesToClose);
}
return opt;
} | 3.68 |
hadoop_ServiceLauncher_createOptions | /**
* Override point: create an options instance to combine with the
* standard options set.
* <i>Important. Synchronize uses of {@link Option}</i>
* with {@code Option.class}
* @return the new options
*/
@SuppressWarnings("static-access")
protected Options createOptions() {
synchronized (Option.class) {
Options options = new Options();
Option oconf = Option.builder(ARG_CONF_SHORT).argName("configuration file")
.hasArg()
.desc("specify an application configuration file")
.longOpt(ARG_CONF)
.build();
Option confclass = Option.builder(ARG_CONFCLASS_SHORT).argName("configuration classname")
.hasArg()
.desc("Classname of a Hadoop Configuration subclass to load")
.longOpt(ARG_CONFCLASS)
.build();
Option property = Option.builder("D").argName("property=value")
.hasArg()
.desc("use value for given property")
.build();
options.addOption(oconf);
options.addOption(property);
options.addOption(confclass);
return options;
}
} | 3.68 |
hadoop_FileSystemStorageStatistics_isTracked | /**
* Return true if a statistic is being tracked.
*
* @return True only if the statistic is being tracked.
*/
@Override
public boolean isTracked(String key) {
for (String k: KEYS) {
if (k.equals(key)) {
return true;
}
}
return false;
} | 3.68 |
hbase_JVMClusterUtil_shutdown | /**
* */
public static void shutdown(final List<MasterThread> masters,
final List<RegionServerThread> regionservers) {
LOG.debug("Shutting down HBase Cluster");
if (masters != null) {
// Do backups first.
JVMClusterUtil.MasterThread activeMaster = null;
for (JVMClusterUtil.MasterThread t : masters) {
// Master was killed but could be still considered as active. Check first if it is stopped.
if (!t.master.isStopped()) {
if (!t.master.isActiveMaster()) {
try {
t.master.stopMaster();
} catch (IOException e) {
LOG.error("Exception occurred while stopping master", e);
}
LOG.info("Stopped backup Master {} is stopped: {}", t.master.hashCode(),
t.master.isStopped());
} else {
if (activeMaster != null) {
LOG.warn("Found more than 1 active master, hash {}", activeMaster.master.hashCode());
}
activeMaster = t;
LOG.debug("Found active master hash={}, stopped={}", t.master.hashCode(),
t.master.isStopped());
}
}
}
// Do active after.
if (activeMaster != null) {
try {
activeMaster.master.shutdown();
} catch (IOException e) {
LOG.error("Exception occurred in HMaster.shutdown()", e);
}
}
}
boolean wasInterrupted = false;
final long maxTime = EnvironmentEdgeManager.currentTime() + 30 * 1000;
if (regionservers != null) {
// first try nicely.
for (RegionServerThread t : regionservers) {
t.getRegionServer().stop("Shutdown requested");
}
for (RegionServerThread t : regionservers) {
long now = EnvironmentEdgeManager.currentTime();
if (t.isAlive() && !wasInterrupted && now < maxTime) {
try {
t.join(maxTime - now);
} catch (InterruptedException e) {
LOG.info("Got InterruptedException on shutdown - "
+ "not waiting anymore on region server ends", e);
wasInterrupted = true; // someone wants us to speed up.
}
}
}
// Let's try to interrupt the remaining threads if any.
for (int i = 0; i < 100; ++i) {
boolean atLeastOneLiveServer = false;
for (RegionServerThread t : regionservers) {
if (t.isAlive()) {
atLeastOneLiveServer = true;
try {
LOG.warn("RegionServerThreads remaining, give one more chance before interrupting");
t.join(1000);
} catch (InterruptedException e) {
wasInterrupted = true;
}
}
}
if (!atLeastOneLiveServer) break;
for (RegionServerThread t : regionservers) {
if (t.isAlive()) {
LOG.warn("RegionServerThreads taking too long to stop, interrupting; thread dump "
+ "if > 3 attempts: i=" + i);
if (i > 3) {
Threads.printThreadInfo(System.out, "Thread dump " + t.getName());
}
t.interrupt();
}
}
}
}
if (masters != null) {
for (JVMClusterUtil.MasterThread t : masters) {
while (t.master.isAlive() && !wasInterrupted) {
try {
// The below has been replaced to debug sometime hangs on end of
// tests.
// this.master.join():
Threads.threadDumpingIsAlive(t.master);
} catch (InterruptedException e) {
LOG.info(
"Got InterruptedException on shutdown - " + "not waiting anymore on master ends", e);
wasInterrupted = true;
}
}
}
}
LOG.info("Shutdown of " + ((masters != null) ? masters.size() : "0") + " master(s) and "
+ ((regionservers != null) ? regionservers.size() : "0") + " regionserver(s) "
+ (wasInterrupted ? "interrupted" : "complete"));
if (wasInterrupted) {
Thread.currentThread().interrupt();
}
} | 3.68 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.