name
stringlengths 12
178
| code_snippet
stringlengths 8
36.5k
| score
float64 3.26
3.68
|
---|---|---|
hadoop_StagingCommitter_abortTask | /**
* Abort the task.
* The API specifies that the task has not yet been committed, so there are
* no uploads that need to be cancelled.
* Accordingly just delete files on the local FS, and call abortTask in
* the wrapped committer.
* <b>Important: this may be called in the AM after a container failure.</b>
* When that occurs and the failed container was on a different host in the
* cluster, the local files will not be deleted.
* @param context task context
* @throws IOException any failure
*/
@Override
public void abortTask(TaskAttemptContext context) throws IOException {
// the API specifies that the task has not yet been committed, so there are
// no uploads that need to be cancelled. just delete files on the local FS.
try (DurationInfo d = new DurationInfo(LOG,
"Abort task %s", context.getTaskAttemptID())) {
deleteTaskAttemptPathQuietly(context);
deleteTaskWorkingPathQuietly(context);
wrappedCommitter.abortTask(context);
} catch (IOException e) {
LOG.error("{}: exception when aborting task {}",
getRole(), context.getTaskAttemptID(), e);
throw e;
}
} | 3.68 |
hbase_RSGroupInfoManagerImpl_getRSGroupAssignmentsByTable | /**
* This is an EXPENSIVE clone. Cloning though is the safest thing to do. Can't let out original
* since it can change and at least the load balancer wants to iterate this exported list. Load
* balancer should iterate over this list because cloned list will ignore disabled table and split
* parent region cases. This method is invoked by {@link #balanceRSGroup}
* @return A clone of current assignments for this group.
*/
Map<TableName, Map<ServerName, List<RegionInfo>>> getRSGroupAssignmentsByTable(
TableStateManager tableStateManager, String groupName) throws IOException {
Map<TableName, Map<ServerName, List<RegionInfo>>> result = Maps.newHashMap();
Set<TableName> tablesInGroupCache = new HashSet<>();
for (Map.Entry<RegionInfo, ServerName> entry : masterServices.getAssignmentManager()
.getRegionStates().getRegionAssignments().entrySet()) {
RegionInfo region = entry.getKey();
TableName tn = region.getTable();
ServerName server = entry.getValue();
if (isTableInGroup(tn, groupName, tablesInGroupCache)) {
if (
tableStateManager.isTableState(tn, TableState.State.DISABLED, TableState.State.DISABLING)
) {
continue;
}
if (region.isSplitParent()) {
continue;
}
result.computeIfAbsent(tn, k -> new HashMap<>())
.computeIfAbsent(server, k -> new ArrayList<>()).add(region);
}
}
RSGroupInfo rsGroupInfo = getRSGroupInfo(groupName);
for (ServerName serverName : masterServices.getServerManager().getOnlineServers().keySet()) {
if (rsGroupInfo.containsServer(serverName.getAddress())) {
for (Map<ServerName, List<RegionInfo>> map : result.values()) {
map.computeIfAbsent(serverName, k -> Collections.emptyList());
}
}
}
return result;
} | 3.68 |
hudi_HoodiePipeline_partition | /**
* Add partition fields.
*/
public Builder partition(String... partitions) {
this.partitions = new ArrayList<>(Arrays.asList(partitions));
return this;
} | 3.68 |
hadoop_IOStatisticsBinding_trackDurationOfSupplier | /**
* Given a Java supplier, evaluate it while
* tracking the duration of the operation and success/failure.
* @param factory factory of duration trackers
* @param statistic statistic key
* @param input input callable.
* @param <B> return type.
* @return the output of the supplier.
*/
public static <B> B trackDurationOfSupplier(
@Nullable DurationTrackerFactory factory,
String statistic,
Supplier<B> input) {
// create the tracker outside try-with-resources so
// that failures can be set in the catcher.
DurationTracker tracker = createTracker(factory, statistic);
try {
// exec the input function and return its value
return input.get();
} catch (RuntimeException e) {
// input function failed: note it
tracker.failed();
// and rethrow
throw e;
} finally {
// update the tracker.
// this is called after any catch() call will have
// set the failed flag.
tracker.close();
}
} | 3.68 |
hadoop_FederationStateStoreFacade_getConf | /**
* Get the configuration.
*
* @return configuration object
*/
public Configuration getConf() {
return this.conf;
} | 3.68 |
hbase_HeapMemoryManager_isTunerOn | // Used by the test cases.
boolean isTunerOn() {
return this.tunerOn;
} | 3.68 |
hudi_AbstractTableFileSystemView_fetchLatestBaseFiles | /**
* Fetch latest base-files across all partitions.
*/
private Stream<HoodieBaseFile> fetchLatestBaseFiles() {
return fetchAllStoredFileGroups()
.filter(fg -> !isFileGroupReplaced(fg))
.map(fg -> Pair.of(fg.getFileGroupId(), getLatestBaseFile(fg)))
.filter(p -> p.getValue().isPresent())
.map(p -> addBootstrapBaseFileIfPresent(p.getKey(), p.getValue().get()));
} | 3.68 |
flink_BinarySegmentUtils_hashByWords | /**
* hash segments to int, numBytes must be aligned to 4 bytes.
*
* @param segments Source segments.
* @param offset Source segments offset.
* @param numBytes the number bytes to hash.
*/
public static int hashByWords(MemorySegment[] segments, int offset, int numBytes) {
if (inFirstSegment(segments, offset, numBytes)) {
return MurmurHashUtils.hashBytesByWords(segments[0], offset, numBytes);
} else {
return hashMultiSegByWords(segments, offset, numBytes);
}
} | 3.68 |
flink_PojoSerializerSnapshot_buildNewFieldSerializersIndex | /**
* Builds an index of fields to their corresponding serializers for the new {@link
* PojoSerializer} for faster field serializer lookups.
*/
private static <T> Map<Field, TypeSerializer<?>> buildNewFieldSerializersIndex(
PojoSerializer<T> newPojoSerializer) {
final Field[] newFields = newPojoSerializer.getFields();
final TypeSerializer<?>[] newFieldSerializers = newPojoSerializer.getFieldSerializers();
checkState(newFields.length == newFieldSerializers.length);
int numFields = newFields.length;
final Map<Field, TypeSerializer<?>> index =
CollectionUtil.newHashMapWithExpectedSize(numFields);
for (int i = 0; i < numFields; i++) {
index.put(newFields[i], newFieldSerializers[i]);
}
return index;
} | 3.68 |
flink_InstantiationUtil_checkForInstantiation | /**
* Performs a standard check whether the class can be instantiated by {@code
* Class#newInstance()}.
*
* @param clazz The class to check.
* @throws RuntimeException Thrown, if the class cannot be instantiated by {@code
* Class#newInstance()}.
*/
public static void checkForInstantiation(Class<?> clazz) {
final String errorMessage = checkForInstantiationError(clazz);
if (errorMessage != null) {
throw new RuntimeException(
"The class '" + clazz.getName() + "' is not instantiable: " + errorMessage);
}
} | 3.68 |
framework_LayoutManager_getPaddingRight | /**
* Gets the right padding of the given element, provided that it has been
* measured. These elements are guaranteed to be measured:
* <ul>
* <li>ManagedLayouts and their child Connectors
* <li>Elements for which there is at least one ElementResizeListener
* <li>Elements for which at least one ManagedLayout has registered a
* dependency
* </ul>
*
* A negative number is returned if the element has not been measured. If 0
* is returned, it might indicate that the element is not attached to the
* DOM.
*
* @param element
* the element to get the measured size for
* @return the measured right padding of the element in pixels.
*/
public int getPaddingRight(Element element) {
assert needsMeasure(
element) : "Getting measurement for element that is not measured";
return getMeasuredSize(element, nullSize).getPaddingRight();
} | 3.68 |
framework_ConnectorTracker_writeObject | /* Special serialization to JsonObjects which are not serializable */
private void writeObject(ObjectOutputStream out) throws IOException {
out.defaultWriteObject();
// Convert JsonObjects in diff state to String representation as
// JsonObject is not serializable
Map<ClientConnector, String> stringDiffStates = new HashMap<>(
diffStates.size() * 2);
for (ClientConnector key : diffStates.keySet()) {
stringDiffStates.put(key, diffStates.get(key).toString());
}
out.writeObject(stringDiffStates);
} | 3.68 |
dubbo_ClassUtils_isPresent | /**
* Test the specified class name is present in the {@link ClassLoader}
*
* @param className the name of {@link Class}
* @param classLoader {@link ClassLoader}
* @return If found, return <code>true</code>
* @since 2.7.6
*/
public static boolean isPresent(String className, ClassLoader classLoader) {
try {
forName(className, classLoader);
} catch (Exception ignored) { // Ignored
return false;
}
return true;
} | 3.68 |
flink_InputTypeStrategies_sequence | /**
* Strategy for a named function signature like {@code f(s STRING, n NUMERIC)} using a sequence
* of {@link ArgumentTypeStrategy}s.
*/
public static InputTypeStrategy sequence(
List<String> argumentNames, List<ArgumentTypeStrategy> strategies) {
return new SequenceInputTypeStrategy(strategies, argumentNames);
} | 3.68 |
hbase_Bytes_multiple | /**
* Create a byte array which is multiple given bytes
* @return byte array
*/
public static byte[] multiple(byte[] srcBytes, int multiNum) {
if (multiNum <= 0) {
return new byte[0];
}
byte[] result = new byte[srcBytes.length * multiNum];
for (int i = 0; i < multiNum; i++) {
System.arraycopy(srcBytes, 0, result, i * srcBytes.length, srcBytes.length);
}
return result;
} | 3.68 |
dubbo_SingleRouterChain_printRouterSnapshot | /**
* store each router's input and output, log out if empty
*/
private void printRouterSnapshot(URL url, BitList<Invoker<T>> availableInvokers, Invocation invocation) {
if (logger.isWarnEnabled()) {
logRouterSnapshot(url, invocation, buildRouterSnapshot(url, availableInvokers, invocation));
}
} | 3.68 |
Activiti_ExclusiveGatewayActivityBehavior_leave | /**
* The default behaviour of BPMN, taking every outgoing sequence flow (where the condition evaluates to true), is not valid for an exclusive gateway.
*
* Hence, this behaviour is overridden and replaced by the correct behavior: selecting the first sequence flow which condition evaluates to true (or which hasn't got a condition) and leaving the
* activity through that sequence flow.
*
* If no sequence flow is selected (ie all conditions evaluate to false), then the default sequence flow is taken (if defined).
*/
@Override
public void leave(DelegateExecution execution) {
if (log.isDebugEnabled()) {
log.debug("Leaving exclusive gateway '{}'", execution.getCurrentActivityId());
}
ExclusiveGateway exclusiveGateway = (ExclusiveGateway) execution.getCurrentFlowElement();
if (Context.getProcessEngineConfiguration() != null && Context.getProcessEngineConfiguration().getEventDispatcher().isEnabled()) {
Context.getProcessEngineConfiguration().getEventDispatcher().dispatchEvent(
ActivitiEventBuilder.createActivityEvent(ActivitiEventType.ACTIVITY_COMPLETED, execution, exclusiveGateway));
}
SequenceFlow outgoingSequenceFlow = null;
SequenceFlow defaultSequenceFlow = null;
String defaultSequenceFlowId = exclusiveGateway.getDefaultFlow();
// Determine sequence flow to take
Iterator<SequenceFlow> sequenceFlowIterator = exclusiveGateway.getOutgoingFlows().iterator();
while (outgoingSequenceFlow == null && sequenceFlowIterator.hasNext()) {
SequenceFlow sequenceFlow = sequenceFlowIterator.next();
String skipExpressionString = sequenceFlow.getSkipExpression();
if (!SkipExpressionUtil.isSkipExpressionEnabled(execution, skipExpressionString)) {
boolean conditionEvaluatesToTrue = ConditionUtil.hasTrueCondition(sequenceFlow, execution);
if (conditionEvaluatesToTrue && (defaultSequenceFlowId == null || !defaultSequenceFlowId.equals(sequenceFlow.getId()))) {
if (log.isDebugEnabled()) {
log.debug("Sequence flow '{}'selected as outgoing sequence flow.", sequenceFlow.getId());
}
outgoingSequenceFlow = sequenceFlow;
}
} else if (SkipExpressionUtil.shouldSkipFlowElement(Context.getCommandContext(), execution, skipExpressionString)) {
outgoingSequenceFlow = sequenceFlow;
}
// Already store it, if we would need it later. Saves one for loop.
if (defaultSequenceFlowId != null && defaultSequenceFlowId.equals(sequenceFlow.getId())) {
defaultSequenceFlow = sequenceFlow;
}
}
// We have to record the end here, or else we're already past it
Context.getCommandContext().getHistoryManager().recordActivityEnd((ExecutionEntity) execution, null);
// Leave the gateway
if (outgoingSequenceFlow != null) {
execution.setCurrentFlowElement(outgoingSequenceFlow);
} else {
if (defaultSequenceFlow != null) {
execution.setCurrentFlowElement(defaultSequenceFlow);
} else {
// No sequence flow could be found, not even a default one
throw new ActivitiException("No outgoing sequence flow of the exclusive gateway '" + exclusiveGateway.getId() + "' could be selected for continuing the process");
}
}
super.leave(execution);
} | 3.68 |
framework_Potus_setLastName | /**
* @param lastName
* the lastName to set
*/
public void setLastName(String lastName) {
this.lastName = lastName;
} | 3.68 |
pulsar_MathUtils_ceilDiv | /**
* Ceil version of Math.floorDiv().
* @param x the dividend
* @param y the divisor
* @return the smallest value that is larger than or equal to the algebraic quotient.
*
*/
public static int ceilDiv(int x, int y) {
return -Math.floorDiv(-x, y);
} | 3.68 |
hbase_Permission_equalsExceptActions | /**
* Check if two permission equals regardless of actions. It is useful when merging a new
* permission with an existed permission which needs to check two permissions's fields.
* @param obj instance
* @return true if equals, false otherwise
*/
public boolean equalsExceptActions(Object obj) {
return obj instanceof Permission;
} | 3.68 |
querydsl_TemporalExpression_before | /**
* Create a {@code this < right} expression
*
* @param right rhs of the comparison
* @return this < right
*/
public BooleanExpression before(Expression<T> right) {
return lt(right);
} | 3.68 |
pulsar_AuthorizationService_isSuperUserOrAdmin | /**
* Functions, sources, and sinks each have their own method in this class. This method first checks for
* tenant admin access, then for namespace level permission.
*/
private CompletableFuture<Boolean> isSuperUserOrAdmin(NamespaceName namespaceName,
String role,
AuthenticationDataSource authenticationData) {
return isSuperUser(role, authenticationData)
.thenCompose(isSuperUserOrAdmin -> isSuperUserOrAdmin
? CompletableFuture.completedFuture(true)
: isTenantAdmin(namespaceName.getTenant(), role, authenticationData));
} | 3.68 |
hbase_HFileArchiver_getFileSystem | /** Returns the {@link FileSystem} on which this file resides */
public FileSystem getFileSystem() {
return this.fs;
} | 3.68 |
hbase_ColumnCount_decrement | /**
* Decrement the current version count
* @return current count
*/
public int decrement() {
return --count;
} | 3.68 |
hadoop_DataNodeFaultInjector_interceptFreeBlockReaderBuffer | /**
* Used as a hook to inject intercept when free the block reader buffer.
*/
public void interceptFreeBlockReaderBuffer() {} | 3.68 |
framework_AbstractGridRendererConnector_getRowKey | /**
* Gets the row key for a row object.
* <p>
* In case this renderer wants be able to identify a row in such a way that
* the server also understands it, the row key is used for that. Rows are
* identified by unified keys between the client and the server.
*
* @param row
* the row object
* @return the row key for the given row
*/
protected String getRowKey(JsonObject row) {
return row.getString(DataCommunicatorConstants.KEY);
} | 3.68 |
flink_BinaryExternalSorter_startThreads | /** Starts all the threads that are used by this sorter. */
public void startThreads() {
if (this.sortThread != null) {
this.sortThread.start();
}
if (this.spillThread != null) {
this.spillThread.start();
}
if (this.mergeThread != null) {
this.mergeThread.start();
}
} | 3.68 |
hbase_MultithreadedTableMapper_setMapperClass | /**
* Set the application's mapper class.
* @param <K2> the map output key type
* @param <V2> the map output value type
* @param job the job to modify
* @param cls the class to use as the mapper
*/
public static <K2, V2> void setMapperClass(Job job,
Class<? extends Mapper<ImmutableBytesWritable, Result, K2, V2>> cls) {
if (MultithreadedTableMapper.class.isAssignableFrom(cls)) {
throw new IllegalArgumentException(
"Can't have recursive " + "MultithreadedTableMapper instances.");
}
job.getConfiguration().setClass(MAPPER_CLASS, cls, Mapper.class);
} | 3.68 |
hbase_Log4jUtils_getMethod | // load class when calling to avoid introducing class not found exception on log4j when loading
// this class even without calling any of the methods below.
private static Method getMethod(String methodName, Class<?>... args) {
try {
Class<?> clazz = Class.forName(INTERNAL_UTILS_CLASS_NAME);
return clazz.getDeclaredMethod(methodName, args);
} catch (ClassNotFoundException | NoSuchMethodException e) {
throw new AssertionError("should not happen", e);
}
} | 3.68 |
graphhopper_VectorTile_addAllValues | /**
* <pre>
* Dictionary encoding for values
* </pre>
*
* <code>repeated .vector_tile.Tile.Value values = 4;</code>
*/
public Builder addAllValues(
java.lang.Iterable<? extends vector_tile.VectorTile.Tile.Value> values) {
if (valuesBuilder_ == null) {
ensureValuesIsMutable();
com.google.protobuf.AbstractMessageLite.Builder.addAll(
values, values_);
onChanged();
} else {
valuesBuilder_.addAllMessages(values);
}
return this;
} | 3.68 |
hbase_TableRecordReader_getProgress | /**
* The current progress of the record reader through its data.
* @return A number between 0.0 and 1.0, the fraction of the data read.
* @see org.apache.hadoop.mapreduce.RecordReader#getProgress()
*/
@Override
public float getProgress() {
return this.recordReaderImpl.getProgress();
} | 3.68 |
framework_VFlash_setAlternateText | /**
* Sets this component's alternate text that can be presented instead of the
* component's normal content for accessibility purposes.
*
* @param altText
* a short, human-readable description of this component's
* content
*/
public void setAlternateText(String altText) {
if (this.altText != altText) {
this.altText = altText;
needsRebuild = true;
}
} | 3.68 |
hbase_WALInputFormat_getSplits | /**
* implementation shared with deprecated HLogInputFormat
*/
List<InputSplit> getSplits(final JobContext context, final String startKey, final String endKey)
throws IOException, InterruptedException {
Configuration conf = context.getConfiguration();
boolean ignoreMissing = conf.getBoolean(WALPlayer.IGNORE_MISSING_FILES, false);
Path[] inputPaths = getInputPaths(conf);
// get delegation token for the filesystem
TokenCache.obtainTokensForNamenodes(context.getCredentials(), inputPaths, conf);
long startTime = conf.getLong(startKey, Long.MIN_VALUE);
long endTime = conf.getLong(endKey, Long.MAX_VALUE);
List<FileStatus> allFiles = new ArrayList<FileStatus>();
for (Path inputPath : inputPaths) {
FileSystem fs = inputPath.getFileSystem(conf);
try {
List<FileStatus> files = getFiles(fs, inputPath, startTime, endTime);
allFiles.addAll(files);
} catch (FileNotFoundException e) {
if (ignoreMissing) {
LOG.warn("File " + inputPath + " is missing. Skipping it.");
continue;
}
throw e;
}
}
List<InputSplit> splits = new ArrayList<InputSplit>(allFiles.size());
for (FileStatus file : allFiles) {
splits.add(new WALSplit(file.getPath().toString(), file.getLen(), startTime, endTime));
}
return splits;
} | 3.68 |
hbase_FanOutOneBlockAsyncDFSOutput_failed | // this usually does not happen which means it is not on the critical path so make it synchronized
// so that the implementation will not burn up our brain as there are multiple state changes and
// checks.
private synchronized void failed(Channel channel, Supplier<Throwable> errorSupplier) {
if (state == State.CLOSED) {
return;
}
if (state == State.BROKEN) {
failWaitingAckQueue(channel, errorSupplier);
return;
}
if (state == State.CLOSING) {
Callback c = waitingAckQueue.peekFirst();
if (c == null || !c.unfinishedReplicas.contains(channel.id())) {
// nothing, the endBlock request has already finished.
return;
}
}
// disable further write, and fail all pending ack.
state = State.BROKEN;
failWaitingAckQueue(channel, errorSupplier);
datanodeInfoMap.keySet().forEach(NettyFutureUtils::safeClose);
} | 3.68 |
hadoop_ExecutionSummarizer_stringifyDataStatistics | // Gets the stringified version of DataStatistics
static String stringifyDataStatistics(DataStatistics stats) {
if (stats != null) {
StringBuffer buffer = new StringBuffer();
String compressionStatus = stats.isDataCompressed()
? "Compressed"
: "Uncompressed";
buffer.append(compressionStatus).append(" input data size: ");
buffer.append(StringUtils.humanReadableInt(stats.getDataSize()));
buffer.append(", ");
buffer.append("Number of files: ").append(stats.getNumFiles());
return buffer.toString();
} else {
return Summarizer.NA;
}
} | 3.68 |
pulsar_SaslRoleTokenSigner_sign | /**
* Returns a signed string.
* <p/>
* The signature '&s=SIGNATURE' is appended at the end of the string.
*
* @param str string to sign.
*
* @return the signed string.
*/
public String sign(String str) {
if (str == null || str.length() == 0) {
throw new IllegalArgumentException("NULL or empty string to sign");
}
String signature = computeSignature(str);
return str + SIGNATURE + signature;
} | 3.68 |
cron-utils_RebootCron_validate | /**
* Validates this Cron instance by validating its cron expression.
*
* @return this Cron instance
* @throws IllegalArgumentException if the cron expression is invalid
*/
public Cron validate() {
for (final Map.Entry<CronFieldName, CronField> field : retrieveFieldsAsMap().entrySet()) {
final CronFieldName fieldName = field.getKey();
field.getValue().getExpression().accept(
new ValidationFieldExpressionVisitor(getCronDefinition().getFieldDefinition(fieldName).getConstraints())
);
}
for (final CronConstraint constraint : getCronDefinition().getCronConstraints()) {
if (!constraint.validate(this)) {
throw new IllegalArgumentException(String.format("Invalid cron expression: %s. %s", asString(), constraint.getDescription()));
}
}
return this;
} | 3.68 |
morf_Function_sum | /**
* Helper method to create an instance of the "sum" SQL function.
*
* @param fieldToEvaluate the field to evaluate in the sum function. This can be any expression resulting in a single column of data.
* @return an instance of the sum function
*/
public static Function sum(AliasedField fieldToEvaluate) {
return new Function(FunctionType.SUM, fieldToEvaluate);
} | 3.68 |
flink_Router_allowedMethods | /**
* Returns allowed methods for a specific URI.
*
* <p>For {@code OPTIONS *}, use {@link #allAllowedMethods()} instead of this method.
*/
public Set<HttpMethod> allowedMethods(String uri) {
QueryStringDecoder decoder = new QueryStringDecoder(uri);
String[] tokens = PathPattern.removeSlashesAtBothEnds(decoder.path()).split("/");
if (anyMethodRouter.anyMatched(tokens)) {
return allAllowedMethods();
}
Set<HttpMethod> ret = new HashSet<HttpMethod>(routers.size());
for (Map.Entry<HttpMethod, MethodlessRouter<T>> entry : routers.entrySet()) {
MethodlessRouter<T> router = entry.getValue();
if (router.anyMatched(tokens)) {
HttpMethod method = entry.getKey();
ret.add(method);
}
}
return ret;
} | 3.68 |
hbase_MiniBatchOperationInProgress_getOperationStatus | /** Returns Gets the status code for the operation(Mutation) at the specified position. */
public OperationStatus getOperationStatus(int index) {
return this.retCodeDetails[getAbsoluteIndex(index)];
} | 3.68 |
hadoop_PageBlobFormatHelpers_fromShort | /**
* Stores the given short as a two-byte array.
*/
public static byte[] fromShort(short s) {
return ByteBuffer.allocate(2).putShort(s).array();
} | 3.68 |
hudi_WriteMarkers_create | /**
* Creates a marker without checking if the marker already exists.
* This can invoke marker-based early conflict detection when enabled for multi-writers.
*
* @param partitionPath partition path in the table
* @param fileName file name
* @param type write IO type
* @param writeConfig Hudi write configs.
* @param fileId File ID.
* @param activeTimeline Active timeline for the write operation.
* @return the marker path.
*/
public Option<Path> create(String partitionPath, String fileName, IOType type, HoodieWriteConfig writeConfig,
String fileId, HoodieActiveTimeline activeTimeline) {
if (writeConfig.getWriteConcurrencyMode().isOptimisticConcurrencyControl() && writeConfig.isEarlyConflictDetectionEnable()) {
HoodieTimeline pendingCompactionTimeline = activeTimeline.filterPendingCompactionTimeline();
HoodieTimeline pendingReplaceTimeline = activeTimeline.filterPendingReplaceTimeline();
// TODO If current is compact or clustering then create marker directly without early conflict detection.
// Need to support early conflict detection between table service and common writers.
if (pendingCompactionTimeline.containsInstant(instantTime) || pendingReplaceTimeline.containsInstant(instantTime)) {
return create(partitionPath, fileName, type, false);
}
return createWithEarlyConflictDetection(partitionPath, fileName, type, false, writeConfig, fileId, activeTimeline);
}
return create(partitionPath, fileName, type, false);
} | 3.68 |
framework_AbstractInMemoryContainer_doSort | /**
* Perform the sorting of the data structures in the container. This is
* invoked when the <code>itemSorter</code> has been prepared for the sort
* operation. Typically this method calls
* <code>Collections.sort(aCollection, getItemSorter())</code> on all arrays
* (containing item ids) that need to be sorted.
*
*/
protected void doSort() {
Collections.sort(getAllItemIds(), getItemSorter());
} | 3.68 |
hbase_StripeStoreFileManager_loadUnclassifiedStoreFiles | /**
* Loads initial store files that were picked up from some physical location pertaining to this
* store (presumably). Unlike adding files after compaction, assumes empty initial sets, and is
* forgiving with regard to stripe constraints - at worst, many/all files will go to level 0.
* @param storeFiles Store files to add.
*/
private void loadUnclassifiedStoreFiles(List<HStoreFile> storeFiles) {
LOG.debug("Attempting to load " + storeFiles.size() + " store files.");
TreeMap<byte[], ArrayList<HStoreFile>> candidateStripes = new TreeMap<>(MAP_COMPARATOR);
ArrayList<HStoreFile> level0Files = new ArrayList<>();
// Separate the files into tentative stripes; then validate. Currently, we rely on metadata.
// If needed, we could dynamically determine the stripes in future.
for (HStoreFile sf : storeFiles) {
byte[] startRow = startOf(sf), endRow = endOf(sf);
// Validate the range and put the files into place.
if (isInvalid(startRow) || isInvalid(endRow)) {
insertFileIntoStripe(level0Files, sf); // No metadata - goes to L0.
ensureLevel0Metadata(sf);
} else if (!isOpen(startRow) && !isOpen(endRow) && nonOpenRowCompare(startRow, endRow) >= 0) {
LOG.error("Unexpected metadata - start row [" + Bytes.toString(startRow) + "], end row ["
+ Bytes.toString(endRow) + "] in file [" + sf.getPath() + "], pushing to L0");
insertFileIntoStripe(level0Files, sf); // Bad metadata - goes to L0 also.
ensureLevel0Metadata(sf);
} else {
ArrayList<HStoreFile> stripe = candidateStripes.get(endRow);
if (stripe == null) {
stripe = new ArrayList<>();
candidateStripes.put(endRow, stripe);
}
insertFileIntoStripe(stripe, sf);
}
}
// Possible improvement - for variable-count stripes, if all the files are in L0, we can
// instead create single, open-ended stripe with all files.
boolean hasOverlaps = false;
byte[] expectedStartRow = null; // first stripe can start wherever
Iterator<Map.Entry<byte[], ArrayList<HStoreFile>>> entryIter =
candidateStripes.entrySet().iterator();
while (entryIter.hasNext()) {
Map.Entry<byte[], ArrayList<HStoreFile>> entry = entryIter.next();
ArrayList<HStoreFile> files = entry.getValue();
// Validate the file start rows, and remove the bad ones to level 0.
for (int i = 0; i < files.size(); ++i) {
HStoreFile sf = files.get(i);
byte[] startRow = startOf(sf);
if (expectedStartRow == null) {
expectedStartRow = startRow; // ensure that first stripe is still consistent
} else if (!rowEquals(expectedStartRow, startRow)) {
hasOverlaps = true;
LOG.warn("Store file doesn't fit into the tentative stripes - expected to start at ["
+ Bytes.toString(expectedStartRow) + "], but starts at [" + Bytes.toString(startRow)
+ "], to L0 it goes");
HStoreFile badSf = files.remove(i);
insertFileIntoStripe(level0Files, badSf);
ensureLevel0Metadata(badSf);
--i;
}
}
// Check if any files from the candidate stripe are valid. If so, add a stripe.
byte[] endRow = entry.getKey();
if (!files.isEmpty()) {
expectedStartRow = endRow; // Next stripe must start exactly at that key.
} else {
entryIter.remove();
}
}
// In the end, there must be open ends on two sides. If not, and there were no errors i.e.
// files are consistent, they might be coming from a split. We will treat the boundaries
// as open keys anyway, and log the message.
// If there were errors, we'll play it safe and dump everything into L0.
if (!candidateStripes.isEmpty()) {
HStoreFile firstFile = candidateStripes.firstEntry().getValue().get(0);
boolean isOpen = isOpen(startOf(firstFile)) && isOpen(candidateStripes.lastKey());
if (!isOpen) {
LOG.warn("The range of the loaded files does not cover full key space: from ["
+ Bytes.toString(startOf(firstFile)) + "], to ["
+ Bytes.toString(candidateStripes.lastKey()) + "]");
if (!hasOverlaps) {
ensureEdgeStripeMetadata(candidateStripes.firstEntry().getValue(), true);
ensureEdgeStripeMetadata(candidateStripes.lastEntry().getValue(), false);
} else {
LOG.warn("Inconsistent files, everything goes to L0.");
for (ArrayList<HStoreFile> files : candidateStripes.values()) {
for (HStoreFile sf : files) {
insertFileIntoStripe(level0Files, sf);
ensureLevel0Metadata(sf);
}
}
candidateStripes.clear();
}
}
}
// Copy the results into the fields.
State state = new State();
state.level0Files = ImmutableList.copyOf(level0Files);
state.stripeFiles = new ArrayList<>(candidateStripes.size());
state.stripeEndRows = new byte[Math.max(0, candidateStripes.size() - 1)][];
ArrayList<HStoreFile> newAllFiles = new ArrayList<>(level0Files);
int i = candidateStripes.size() - 1;
for (Map.Entry<byte[], ArrayList<HStoreFile>> entry : candidateStripes.entrySet()) {
state.stripeFiles.add(ImmutableList.copyOf(entry.getValue()));
newAllFiles.addAll(entry.getValue());
if (i > 0) {
state.stripeEndRows[state.stripeFiles.size() - 1] = entry.getKey();
}
--i;
}
state.allFilesCached = ImmutableList.copyOf(newAllFiles);
this.state = state;
debugDumpState("Files loaded");
} | 3.68 |
hbase_RSProcedureDispatcher_sendRequest | // will be overridden in test.
protected ExecuteProceduresResponse sendRequest(final ServerName serverName,
final ExecuteProceduresRequest request) throws IOException {
return FutureUtils.get(getRsAdmin().executeProcedures(request));
} | 3.68 |
querydsl_JTSGeometryExpressions_fromText | /**
* Return a specified ST_Geometry value from Well-Known Text representation (WKT).
*
* @param text WKT form
* @return geometry
*/
public static JTSGeometryExpression<?> fromText(Expression<String> text) {
return geometryOperation(SpatialOps.GEOM_FROM_TEXT, text);
} | 3.68 |
hbase_HFileCorruptionChecker_checkColFamDir | /**
* Check all files in a column family dir. column family directory
*/
protected void checkColFamDir(Path cfDir) throws IOException {
FileStatus[] statuses = null;
try {
statuses = fs.listStatus(cfDir); // use same filter as scanner.
} catch (FileNotFoundException fnfe) {
// Hadoop 0.23+ listStatus semantics throws an exception if the path does not exist.
LOG.warn("Colfam Directory " + cfDir
+ " does not exist. Likely due to concurrent split/compaction. Skipping.");
missing.add(cfDir);
return;
}
List<FileStatus> hfs = FSUtils.filterFileStatuses(statuses, new HFileFilter(fs));
// Hadoop 1.0 listStatus does not throw an exception if the path does not exist.
if (hfs.isEmpty() && !fs.exists(cfDir)) {
LOG.warn("Colfam Directory " + cfDir
+ " does not exist. Likely due to concurrent split/compaction. Skipping.");
missing.add(cfDir);
return;
}
LOG.info("Checking Column Family Directory {}. Number of entries = {}", cfDir, hfs.size());
for (FileStatus hfFs : hfs) {
Path hf = hfFs.getPath();
checkHFile(hf);
}
} | 3.68 |
zxing_Detector_sampleLine | /**
* Samples a line.
*
* @param p1 start point (inclusive)
* @param p2 end point (exclusive)
* @param size number of bits
* @return the array of bits as an int (first bit is high-order bit of result)
*/
private int sampleLine(ResultPoint p1, ResultPoint p2, int size) {
int result = 0;
float d = distance(p1, p2);
float moduleSize = d / size;
float px = p1.getX();
float py = p1.getY();
float dx = moduleSize * (p2.getX() - p1.getX()) / d;
float dy = moduleSize * (p2.getY() - p1.getY()) / d;
for (int i = 0; i < size; i++) {
if (image.get(MathUtils.round(px + i * dx), MathUtils.round(py + i * dy))) {
result |= 1 << (size - i - 1);
}
}
return result;
} | 3.68 |
framework_AbstractComponent_setResponsive | /**
* Toggles responsiveness of this component.
*
* @since 7.5.0
* @param responsive
* boolean enables responsiveness, false disables
*/
public void setResponsive(boolean responsive) {
if (responsive) {
// make responsive if necessary
if (!isResponsive()) {
Responsive.makeResponsive(this);
}
} else {
// remove responsive extensions
List<Extension> extensions = new ArrayList<>(getExtensions());
for (Extension e : extensions) {
if (e instanceof Responsive) {
removeExtension(e);
}
}
}
} | 3.68 |
hbase_RestoreTablesClient_restore | /**
* Restore operation. Stage 2: resolved Backup Image dependency
* @param backupManifestMap : tableName, Manifest
* @param sTableArray The array of tables to be restored
* @param tTableArray The array of mapping tables to restore to
* @throws IOException exception
*/
private void restore(HashMap<TableName, BackupManifest> backupManifestMap,
TableName[] sTableArray, TableName[] tTableArray, boolean isOverwrite) throws IOException {
TreeSet<BackupImage> restoreImageSet = new TreeSet<>();
for (int i = 0; i < sTableArray.length; i++) {
TableName table = sTableArray[i];
BackupManifest manifest = backupManifestMap.get(table);
// Get the image list of this backup for restore in time order from old
// to new.
List<BackupImage> list = new ArrayList<>();
list.add(manifest.getBackupImage());
TreeSet<BackupImage> set = new TreeSet<>(list);
List<BackupImage> depList = manifest.getDependentListByTable(table);
set.addAll(depList);
BackupImage[] arr = new BackupImage[set.size()];
set.toArray(arr);
restoreImages(arr, table, tTableArray[i], isOverwrite);
restoreImageSet.addAll(list);
if (restoreImageSet != null && !restoreImageSet.isEmpty()) {
LOG.info("Restore includes the following image(s):");
for (BackupImage image : restoreImageSet) {
LOG.info("Backup: " + image.getBackupId() + " "
+ HBackupFileSystem.getTableBackupDir(image.getRootDir(), image.getBackupId(), table));
}
}
}
LOG.debug("restoreStage finished");
} | 3.68 |
framework_CompositeErrorMessage_toString | /**
* Returns a comma separated list of the error messages.
*
* @return String, comma separated list of error messages.
*/
@Override
public String toString() {
String retval = "[";
int pos = 0;
for (ErrorMessage errorMessage : getCauses()) {
if (pos > 0) {
retval += ",";
}
pos++;
retval += errorMessage.toString();
}
retval += "]";
return retval;
} | 3.68 |
flink_CastRuleProvider_canFail | /**
* Resolves the rule and returns the result of {@link CastRule#canFail(LogicalType,
* LogicalType)}. Fails with {@link NullPointerException} if the rule cannot be resolved.
*/
public static boolean canFail(LogicalType inputType, LogicalType targetType) {
return Preconditions.checkNotNull(
resolve(inputType, targetType), "Cast rule cannot be resolved")
.canFail(inputType, targetType);
} | 3.68 |
hbase_RegionSizeReportingChore_getInitialDelay | /**
* Extracts the initial delay for the chore from the configuration.
* @param conf The configuration object.
* @return The configured chore initial delay or the default value.
*/
static long getInitialDelay(Configuration conf) {
return conf.getLong(REGION_SIZE_REPORTING_CHORE_DELAY_KEY,
REGION_SIZE_REPORTING_CHORE_DELAY_DEFAULT);
} | 3.68 |
hbase_HRegionFileSystem_deleteDir | /**
* Deletes a directory. Assumes the user has already checked for this directory existence.
* @return true if the directory is deleted.
*/
boolean deleteDir(Path dir) throws IOException {
IOException lastIOE = null;
int i = 0;
do {
try {
return fs.delete(dir, true);
} catch (IOException ioe) {
lastIOE = ioe;
if (!fs.exists(dir)) return true;
// dir is there, retry deleting after some time.
try {
sleepBeforeRetry("Delete Directory", i + 1);
} catch (InterruptedException e) {
throw (InterruptedIOException) new InterruptedIOException().initCause(e);
}
}
} while (++i <= hdfsClientRetriesNumber);
throw new IOException("Exception in DeleteDir", lastIOE);
} | 3.68 |
flink_ExceptionUtils_findThrowableWithMessage | /**
* Checks whether a throwable chain contains a specific error message and returns the
* corresponding throwable.
*
* @param throwable the throwable chain to check.
* @param searchMessage the error message to search for in the chain.
* @return Optional throwable containing the search message if available, otherwise empty
*/
public static Optional<Throwable> findThrowableWithMessage(
Throwable throwable, String searchMessage) {
if (throwable == null || searchMessage == null) {
return Optional.empty();
}
Throwable t = throwable;
while (t != null) {
if (t.getMessage() != null && t.getMessage().contains(searchMessage)) {
return Optional.of(t);
} else {
t = t.getCause();
}
}
return Optional.empty();
} | 3.68 |
hbase_TableInputFormat_createScanFromConfiguration | /**
* Sets up a {@link Scan} instance, applying settings from the configuration property constants
* defined in {@code TableInputFormat}. This allows specifying things such as:
* <ul>
* <li>start and stop rows</li>
* <li>column qualifiers or families</li>
* <li>timestamps or timerange</li>
* <li>scanner caching and batch size</li>
* </ul>
*/
public static Scan createScanFromConfiguration(Configuration conf) throws IOException {
Scan scan = new Scan();
if (conf.get(SCAN_ROW_START) != null) {
scan.withStartRow(Bytes.toBytesBinary(conf.get(SCAN_ROW_START)));
}
if (conf.get(SCAN_ROW_STOP) != null) {
scan.withStopRow(Bytes.toBytesBinary(conf.get(SCAN_ROW_STOP)));
}
if (conf.get(SCAN_COLUMNS) != null) {
addColumns(scan, conf.get(SCAN_COLUMNS));
}
for (String columnFamily : conf.getTrimmedStrings(SCAN_COLUMN_FAMILY)) {
scan.addFamily(Bytes.toBytes(columnFamily));
}
if (conf.get(SCAN_TIMESTAMP) != null) {
scan.setTimestamp(Long.parseLong(conf.get(SCAN_TIMESTAMP)));
}
if (conf.get(SCAN_TIMERANGE_START) != null && conf.get(SCAN_TIMERANGE_END) != null) {
scan.setTimeRange(Long.parseLong(conf.get(SCAN_TIMERANGE_START)),
Long.parseLong(conf.get(SCAN_TIMERANGE_END)));
}
if (conf.get(SCAN_MAXVERSIONS) != null) {
scan.readVersions(Integer.parseInt(conf.get(SCAN_MAXVERSIONS)));
}
if (conf.get(SCAN_CACHEDROWS) != null) {
scan.setCaching(Integer.parseInt(conf.get(SCAN_CACHEDROWS)));
}
if (conf.get(SCAN_BATCHSIZE) != null) {
scan.setBatch(Integer.parseInt(conf.get(SCAN_BATCHSIZE)));
}
// false by default, full table scans generate too much BC churn
scan.setCacheBlocks((conf.getBoolean(SCAN_CACHEBLOCKS, false)));
return scan;
} | 3.68 |
flink_ResourceManager_closeTaskManagerConnection | /**
* This method should be called by the framework once it detects that a currently registered
* task executor has failed.
*
* @param resourceID Id of the TaskManager that has failed.
* @param cause The exception which cause the TaskManager failed.
* @return The {@link WorkerType} of the closed connection, or empty if already removed.
*/
protected Optional<WorkerType> closeTaskManagerConnection(
final ResourceID resourceID, final Exception cause) {
taskManagerHeartbeatManager.unmonitorTarget(resourceID);
WorkerRegistration<WorkerType> workerRegistration = taskExecutors.remove(resourceID);
if (workerRegistration != null) {
log.info(
"Closing TaskExecutor connection {} because: {}",
resourceID.getStringWithMetadata(),
cause.getMessage(),
ExceptionUtils.returnExceptionIfUnexpected(cause.getCause()));
ExceptionUtils.logExceptionIfExcepted(cause.getCause(), log);
// TODO :: suggest failed task executor to stop itself
slotManager.unregisterTaskManager(workerRegistration.getInstanceID(), cause);
clusterPartitionTracker.processTaskExecutorShutdown(resourceID);
workerRegistration.getTaskExecutorGateway().disconnectResourceManager(cause);
} else {
log.debug(
"No open TaskExecutor connection {}. Ignoring close TaskExecutor connection. Closing reason was: {}",
resourceID.getStringWithMetadata(),
cause.getMessage());
}
return Optional.ofNullable(workerRegistration).map(WorkerRegistration::getWorker);
} | 3.68 |
hadoop_RouterResolver_updateSubclusterMapping | /**
* Update <NamespaceId, Subcluster Info> mapping info periodically.
*/
private synchronized void updateSubclusterMapping() {
if (subclusterMapping == null
|| (monotonicNow() - lastUpdated) > minUpdateTime) {
// Fetch the mapping asynchronously
Thread updater = new Thread(new Runnable() {
@Override
public void run() {
final MembershipStore membershipStore = getMembershipStore();
if (membershipStore == null) {
LOG.error("Cannot access the Membership store.");
return;
}
subclusterMapping = getSubclusterInfo(membershipStore);
lastUpdated = monotonicNow();
}
});
updater.start();
// Wait until initialized
if (subclusterMapping == null) {
try {
LOG.debug("Wait to get the mapping for the first time");
updater.join();
} catch (InterruptedException e) {
LOG.error("Cannot wait for the updater to finish");
}
}
}
} | 3.68 |
flink_HiveParserQB_getSkewedColumnNames | /** Retrieve skewed column name for a table. */
public List<String> getSkewedColumnNames(String alias) {
// currently, skew column means nothing for flink, so we just return an empty list.
return Collections.emptyList();
} | 3.68 |
morf_MySqlDialect_indexDeploymentStatements | /**
* @see org.alfasoftware.morf.jdbc.SqlDialect#indexDeploymentStatements(org.alfasoftware.morf.metadata.Table, org.alfasoftware.morf.metadata.Index)
*/
@Override
protected Collection<String> indexDeploymentStatements(Table table, Index index) {
StringBuilder statement = new StringBuilder();
statement.append("ALTER TABLE `");
statement.append(table.getName());
statement.append("` ADD ");
if (index.isUnique()) {
statement.append("UNIQUE ");
}
statement.append("INDEX `")
.append(index.getName())
.append("` (`")
.append(Joiner.on("`, `").join(index.columnNames()))
.append("`)");
return Collections.singletonList(statement.toString());
} | 3.68 |
AreaShop_FileManager_getRegionSettings | /**
* Get the default region settings as provided by the user (default.yml).
* @return YamlConfiguration with the settings (might miss settings, which should be filled in with {@link #getFallbackRegionSettings()})
*/
public YamlConfiguration getRegionSettings() {
return defaultConfig;
} | 3.68 |
flink_GeneratedClass_compile | /**
* Compiles the generated code, the compiled class will be cached in the {@link GeneratedClass}.
*/
public Class<T> compile(ClassLoader classLoader) {
if (compiledClass == null) {
// cache the compiled class
try {
// first try to compile the split code
compiledClass = CompileUtils.compile(classLoader, className, splitCode);
} catch (Throwable t) {
// compile the original code as fallback
LOG.warn("Failed to compile split code, falling back to original code", t);
compiledClass = CompileUtils.compile(classLoader, className, code);
}
}
return compiledClass;
} | 3.68 |
flink_NFACompiler_getIgnoreCondition | /**
* @return The {@link IterativeCondition condition} for the {@code IGNORE} edge that
* corresponds to the specified {@link Pattern} and extended with stop(until) condition
* if necessary. For more on strategy see {@link Quantifier}
*/
@SuppressWarnings("unchecked")
private IterativeCondition<T> getIgnoreCondition(Pattern<T, ?> pattern) {
Quantifier.ConsumingStrategy consumingStrategy =
pattern.getQuantifier().getConsumingStrategy();
if (headOfGroup(pattern)) {
// for the head pattern of a group pattern, we should consider the inner consume
// strategy
// of the group pattern if the group pattern is not the head of the TIMES/LOOPING
// quantifier;
// otherwise, we should consider the consume strategy of the group pattern
if (isCurrentGroupPatternFirstOfLoop()) {
consumingStrategy = currentGroupPattern.getQuantifier().getConsumingStrategy();
} else {
consumingStrategy =
currentGroupPattern.getQuantifier().getInnerConsumingStrategy();
}
}
IterativeCondition<T> ignoreCondition = null;
switch (consumingStrategy) {
case STRICT:
ignoreCondition = null;
break;
case SKIP_TILL_NEXT:
ignoreCondition =
new RichNotCondition<>((IterativeCondition<T>) pattern.getCondition());
break;
case SKIP_TILL_ANY:
ignoreCondition = BooleanConditions.trueFunction();
break;
}
if (currentGroupPattern != null && currentGroupPattern.getUntilCondition() != null) {
ignoreCondition =
extendWithUntilCondition(
ignoreCondition,
(IterativeCondition<T>) currentGroupPattern.getUntilCondition(),
false);
}
return ignoreCondition;
} | 3.68 |
hadoop_OBSDataBlocks_closeBlock | /**
* Close the block. This will delete the block's buffer file if the block
* has not previously been closed.
*/
void closeBlock() {
LOG.debug("block[{}]: closeBlock()", getIndex());
if (!closed.getAndSet(true)) {
if (!bufferFile.delete() && bufferFile.exists()) {
LOG.warn("delete({}) returned false",
bufferFile.getAbsoluteFile());
}
} else {
LOG.debug("block[{}]: skipping re-entrant closeBlock()",
getIndex());
}
} | 3.68 |
flink_BroadcastVariableManager_materializeBroadcastVariable | /**
* Materializes the broadcast variable for the given name, scoped to the given task and its
* iteration superstep. An existing materialization created by another parallel subtask may be
* returned, if it hasn't expired yet.
*/
public <T> BroadcastVariableMaterialization<T, ?> materializeBroadcastVariable(
String name,
int superstep,
BatchTask<?, ?> holder,
MutableReader<?> reader,
TypeSerializerFactory<T> serializerFactory)
throws IOException {
final BroadcastVariableKey key =
new BroadcastVariableKey(holder.getEnvironment().getJobVertexId(), name, superstep);
while (true) {
final BroadcastVariableMaterialization<T, Object> newMat =
new BroadcastVariableMaterialization<T, Object>(key);
final BroadcastVariableMaterialization<?, ?> previous =
variables.putIfAbsent(key, newMat);
@SuppressWarnings("unchecked")
final BroadcastVariableMaterialization<T, ?> materialization =
(previous == null) ? newMat : (BroadcastVariableMaterialization<T, ?>) previous;
try {
materialization.materializeVariable(reader, serializerFactory, holder);
return materialization;
} catch (MaterializationExpiredException e) {
// concurrent release. as an optimization, try to replace the previous one with our
// version. otherwise we might spin for a while
// until the releaser removes the variable
// NOTE: This would also catch a bug prevented an expired materialization from ever
// being removed, so it acts as a future safeguard
boolean replaceSuccessful = false;
try {
replaceSuccessful = variables.replace(key, materialization, newMat);
} catch (Throwable t) {
}
if (replaceSuccessful) {
try {
newMat.materializeVariable(reader, serializerFactory, holder);
return newMat;
} catch (MaterializationExpiredException ee) {
// can still happen in cases of extreme races and fast tasks
// fall through the loop;
}
}
// else fall through the loop
}
}
} | 3.68 |
morf_MySqlDialect_getSqlForAddMonths | /**
* @see org.alfasoftware.morf.jdbc.SqlDialect#getSqlForAddMonths(org.alfasoftware.morf.sql.element.Function)
*/
@Override
protected String getSqlForAddMonths(Function function) {
return String.format(
"DATE_ADD(%s, INTERVAL %s MONTH)",
getSqlFrom(function.getArguments().get(0)),
getSqlFrom(function.getArguments().get(1))
);
} | 3.68 |
hudi_Source_fetchNext | /**
* Main API called by Hoodie Streamer to fetch records.
*
* @param lastCkptStr Last Checkpoint
* @param sourceLimit Source Limit
* @return
*/
public final InputBatch<T> fetchNext(Option<String> lastCkptStr, long sourceLimit) {
InputBatch<T> batch = fetchNewData(lastCkptStr, sourceLimit);
// If overriddenSchemaProvider is passed in CLI, use it
return overriddenSchemaProvider == null ? batch
: new InputBatch<>(batch.getBatch(), batch.getCheckpointForNextBatch(), overriddenSchemaProvider);
} | 3.68 |
morf_Operator_toString | /**
* @see java.lang.Enum#toString()
*/
@Override
public String toString() {
return stringRepresentation;
} | 3.68 |
flink_CollectionUtil_newLinkedHashSetWithExpectedSize | /**
* Creates a new {@link LinkedHashSet} of the expected size, i.e. a hash set that will not
* rehash if expectedSize many unique elements are inserted, considering the load factor.
*
* @param expectedSize the expected size of the created hash map.
* @return a new hash map instance with enough capacity for the expected size.
* @param <E> the type of elements stored by this set.
*/
public static <E> LinkedHashSet<E> newLinkedHashSetWithExpectedSize(int expectedSize) {
return new LinkedHashSet<>(
computeRequiredCapacity(expectedSize, HASH_MAP_DEFAULT_LOAD_FACTOR),
HASH_MAP_DEFAULT_LOAD_FACTOR);
} | 3.68 |
hbase_StoreUtils_getChecksumType | /**
* Returns the configured checksum algorithm.
* @param conf The configuration
* @return The checksum algorithm that is set in the configuration
*/
public static ChecksumType getChecksumType(Configuration conf) {
return ChecksumType.nameToType(
conf.get(HConstants.CHECKSUM_TYPE_NAME, ChecksumType.getDefaultChecksumType().getName()));
} | 3.68 |
framework_SelectAllConstantViewport_getTicketNumber | /*
* (non-Javadoc)
*
* @see com.vaadin.tests.components.AbstractTestUI#getTicketNumber()
*/
@Override
protected Integer getTicketNumber() {
return 13341;
} | 3.68 |
framework_VFilterSelect_showSuggestions | /**
* Shows the popup where the user can see the filtered options.
*
* @param currentSuggestions
* The filtered suggestions
* @param currentPage
* The current page number
* @param totalSuggestions
* The total amount of suggestions
*/
public void showSuggestions(
final Collection<FilterSelectSuggestion> currentSuggestions,
final int currentPage, final int totalSuggestions) {
debug("VFS.SP: showSuggestions(" + currentSuggestions + ", "
+ currentPage + ", " + totalSuggestions + ")");
/*
* We need to defer the opening of the popup so that the parent DOM
* has stabilized so we can calculate an absolute top and left
* correctly. This issue manifests when a Combobox is placed in
* another popupView which also needs to calculate the absoluteTop()
* to position itself. #9768
*
* After deferring the showSuggestions method, a problem with
* navigating in the combo box occurs. Because of that the method
* navigateItemAfterPageChange in ComboBoxConnector class, which
* navigates to the exact item after page was changed also was
* marked as deferred. #11333
*/
final SuggestionPopup popup = this;
Scheduler.get().scheduleDeferred(new ScheduledCommand() {
@Override
public void execute() {
// Add TT anchor point
getElement().setId("VAADIN_COMBOBOX_OPTIONLIST");
menu.setSuggestions(currentSuggestions);
leftPosition = getDesiredLeftPosition();
topPosition = getDesiredTopPosition();
setPopupPosition(leftPosition, topPosition);
int nullOffset = (nullSelectionAllowed
&& "".equals(lastFilter) ? 1 : 0);
boolean firstPage = (currentPage == 0);
final int first = currentPage * pageLength + 1
- (firstPage ? 0 : nullOffset);
final int last = first + currentSuggestions.size() - 1
- (firstPage && "".equals(lastFilter) ? nullOffset
: 0);
final int matches = totalSuggestions - nullOffset;
if (last > 0) {
// nullsel not counted, as requested by user
status.setInnerText((matches == 0 ? 0 : first) + "-"
+ last + "/" + matches);
} else {
status.setInnerText("");
}
// We don't need to show arrows or statusbar if there is
// only one page
if (totalSuggestions <= pageLength || pageLength == 0) {
setPagingEnabled(false);
} else {
setPagingEnabled(true);
}
setPrevButtonActive(first > 1);
setNextButtonActive(last < matches);
// clear previously fixed width
menu.setWidth("");
menu.getElement().getFirstChildElement().getStyle()
.clearWidth();
setPopupPositionAndShow(popup);
// Fix for #14173
// IE9 and IE10 have a bug, when resize an a element with
// box-shadow.
// IE9 and IE10 need explicit update to remove extra
// box-shadows
if (BrowserInfo.get().isIE9()
|| BrowserInfo.get().isIE10()) {
forceReflow();
}
}
});
} | 3.68 |
hadoop_TimelineDomain_setReaders | /**
* Set the reader (and/or reader group) list string
*
* @param readers the reader (and/or reader group) list string
*/
public void setReaders(String readers) {
this.readers = readers;
} | 3.68 |
flink_MetricStore_isRepresentativeAttempt | // Returns whether the attempt is the representative one. It's also true if the current
// execution attempt number for the subtask is not present in the currentExecutionAttempts,
// which means there should be only one execution
private boolean isRepresentativeAttempt(
String jobID, String vertexID, int subtaskIndex, int attemptNumber) {
return Optional.of(representativeAttempts)
.map(m -> m.get(jobID))
.map(m -> m.get(vertexID))
.map(m -> m.get(subtaskIndex))
.orElse(attemptNumber)
== attemptNumber;
} | 3.68 |
hudi_DateTimeUtils_parseDuration | /**
* Parse the given string to a java {@link Duration}. The string is in format "{length
* value}{time unit label}", e.g. "123ms", "321 s". If no time unit label is specified, it will
* be considered as milliseconds.
*
* <p>Supported time unit labels are:
*
* <ul>
* <li>DAYS: "d", "day"
* <li>HOURS: "h", "hour"
* <li>MINUTES: "min", "minute"
* <li>SECONDS: "s", "sec", "second"
* <li>MILLISECONDS: "ms", "milli", "millisecond"
* <li>MICROSECONDS: "µs", "micro", "microsecond"
* <li>NANOSECONDS: "ns", "nano", "nanosecond"
* </ul>
*
* @param text string to parse.
*/
public static Duration parseDuration(String text) {
ValidationUtils.checkArgument(!StringUtils.isNullOrEmpty(text));
final String trimmed = text.trim();
ValidationUtils.checkArgument(!trimmed.isEmpty(), "argument is an empty- or whitespace-only string");
final int len = trimmed.length();
int pos = 0;
char current;
while (pos < len && (current = trimmed.charAt(pos)) >= '0' && current <= '9') {
pos++;
}
final String number = trimmed.substring(0, pos);
final String unitLabel = trimmed.substring(pos).trim().toLowerCase(Locale.US);
if (number.isEmpty()) {
throw new NumberFormatException("text does not start with a number");
}
final long value;
try {
value = Long.parseLong(number); // this throws a NumberFormatException on overflow
} catch (NumberFormatException e) {
throw new IllegalArgumentException(
"The value '"
+ number
+ "' cannot be re represented as 64bit number (numeric overflow).");
}
if (unitLabel.isEmpty()) {
return Duration.of(value, ChronoUnit.MILLIS);
}
ChronoUnit unit = LABEL_TO_UNIT_MAP.get(unitLabel);
if (unit != null) {
return Duration.of(value, unit);
} else {
throw new IllegalArgumentException(
"Time interval unit label '"
+ unitLabel
+ "' does not match any of the recognized units: "
+ TimeUnit.getAllUnits());
}
} | 3.68 |
hadoop_OBSFileSystem_listLocatedStatus | /**
* List a directory. The returned results include its block location if it is
* a file The results are filtered by the given path filter
*
* @param f a path
* @param filter a path filter
* @return an iterator that traverses statuses of the files/directories in the
* given path
* @throws FileNotFoundException if <code>f</code> does not exist
* @throws IOException if any I/O error occurred
*/
@Override
public RemoteIterator<LocatedFileStatus> listLocatedStatus(final Path f,
final PathFilter filter)
throws FileNotFoundException, IOException {
Path path = OBSCommonUtils.qualify(this, f);
LOG.debug("listLocatedStatus({}, {}", path, filter);
try {
// lookup dir triggers existence check
final FileStatus fileStatus = getFileStatus(path);
if (fileStatus.isFile()) {
// simple case: File
LOG.debug("Path is a file");
return new OBSListing.SingleStatusRemoteIterator(
filter.accept(path) ? OBSCommonUtils.toLocatedFileStatus(
this, fileStatus) : null);
} else {
// directory: trigger a lookup
String key = OBSCommonUtils.maybeAddTrailingSlash(
OBSCommonUtils.pathToKey(this, path));
return obsListing.createLocatedFileStatusIterator(
obsListing.createFileStatusListingIterator(
path,
OBSCommonUtils.createListObjectsRequest(this, key, "/"),
filter,
new OBSListing.AcceptAllButSelfAndS3nDirs(path)));
}
} catch (ObsException e) {
throw OBSCommonUtils.translateException("listLocatedStatus", path,
e);
}
} | 3.68 |
flink_AbstractWritableVector_setDictionary | /** Update the dictionary. */
@Override
public void setDictionary(Dictionary dictionary) {
this.dictionary = dictionary;
} | 3.68 |
hbase_WALKeyImpl_setSequenceId | // For deserialization. DO NOT USE. See setWriteEntry below.
@InterfaceAudience.Private
protected void setSequenceId(long sequenceId) {
this.sequenceId = sequenceId;
} | 3.68 |
flink_AsyncSinkWriterStateSerializer_serialize | /**
* Serializes state in form of
* [DATA_IDENTIFIER,NUM_OF_ELEMENTS,SIZE1,REQUEST1,SIZE2,REQUEST2....].
*/
@Override
public byte[] serialize(BufferedRequestState<RequestEntryT> obj) throws IOException {
Collection<RequestEntryWrapper<RequestEntryT>> bufferState =
obj.getBufferedRequestEntries();
try (final ByteArrayOutputStream baos = new ByteArrayOutputStream();
final DataOutputStream out = new DataOutputStream(baos)) {
out.writeLong(DATA_IDENTIFIER);
out.writeInt(bufferState.size());
for (RequestEntryWrapper<RequestEntryT> wrapper : bufferState) {
out.writeLong(wrapper.getSize());
serializeRequestToStream(wrapper.getRequestEntry(), out);
}
return baos.toByteArray();
}
} | 3.68 |
hbase_LeaseManager_getLeaseName | /** Returns the lease name */
public String getLeaseName() {
return leaseName;
} | 3.68 |
hudi_GenericRecordFullPayloadSizeEstimator_getNonNull | /**
* Get the nonNull Schema of a given UNION Schema.
*
* @param schema
* @return
*/
protected Schema getNonNull(Schema schema) {
List<Schema> types = schema.getTypes();
return types.get(0).getType().equals(Schema.Type.NULL) ? types.get(1) : types.get(0);
} | 3.68 |
hadoop_AMRMProxyTokenSecretManager_getMasterKey | // If nextMasterKey is not Null, then return nextMasterKey
// otherwise return currentMasterKey.
@VisibleForTesting
public MasterKeyData getMasterKey() {
this.readLock.lock();
try {
return nextMasterKey == null ? currentMasterKey : nextMasterKey;
} finally {
this.readLock.unlock();
}
} | 3.68 |
pulsar_GracefulExecutorServicesShutdown_timeout | /**
* Sets the timeout for graceful shutdown.
*
* @param timeout duration for the timeout
* @return the current instance for controlling graceful shutdown
*/
public GracefulExecutorServicesShutdown timeout(Duration timeout) {
this.timeout = timeout;
return this;
} | 3.68 |
flink_PartitionTable_hasTrackedPartitions | /** Returns whether any partitions are being tracked for the given key. */
public boolean hasTrackedPartitions(K key) {
return trackedPartitionsPerKey.containsKey(key);
} | 3.68 |
pulsar_ClientCnx_checkServerError | /**
* check serverError and take appropriate action.
* <ul>
* <li>InternalServerError: close connection immediately</li>
* <li>TooManyRequest: received error count is more than maxNumberOfRejectedRequestPerConnection in
* #rejectedRequestResetTimeSec</li>
* </ul>
*
* @param error
* @param errMsg
*/
private void checkServerError(ServerError error, String errMsg) {
if (ServerError.ServiceNotReady.equals(error)) {
log.error("{} Close connection because received internal-server error {}", ctx.channel(), errMsg);
ctx.close();
} else if (ServerError.TooManyRequests.equals(error)) {
incrementRejectsAndMaybeClose();
}
} | 3.68 |
zxing_WifiConfigManager_convertToQuotedString | /**
* Encloses the incoming string inside double quotes, if it isn't already quoted.
* @param s the input string
* @return a quoted string, of the form "input". If the input string is null, it returns null
* as well.
*/
private static String convertToQuotedString(String s) {
if (s == null || s.isEmpty()) {
return null;
}
// If already quoted, return as-is
if (s.charAt(0) == '"' && s.charAt(s.length() - 1) == '"') {
return s;
}
return '\"' + s + '\"';
} | 3.68 |
flink_ArrowUtils_readFully | /** Fills a buffer with data read from the channel. */
private static void readFully(ReadableByteChannel channel, ByteBuffer dst) throws IOException {
int expected = dst.remaining();
while (dst.hasRemaining()) {
if (channel.read(dst) < 0) {
throw new EOFException(
String.format("Not enough bytes in channel (expected %d).", expected));
}
}
} | 3.68 |
pulsar_MongoSourceConfig_setSyncType | /**
* @param syncTypeStr Sync type string.
*/
private void setSyncType(String syncTypeStr) {
// if syncType is not set, the default sync type is used
if (StringUtils.isEmpty(syncTypeStr)) {
this.syncType = DEFAULT_SYNC_TYPE;
return;
}
// if syncType is set but not correct, an exception will be thrown
try {
this.syncType = SyncType.valueOf(syncTypeStr.toUpperCase());
} catch (IllegalArgumentException e) {
throw new IllegalArgumentException("The value of the syncType field is incorrect.");
}
} | 3.68 |
hbase_HMaster_getTableDescriptors | /**
* Return a list of table table descriptors after applying any provided filter parameters. Note
* that the user-facing description of this filter logic is presented on the class-level javadoc
* of {@link NormalizeTableFilterParams}.
*/
private List<TableDescriptor> getTableDescriptors(final List<TableDescriptor> htds,
final String namespace, final String regex, final List<TableName> tableNameList,
final boolean includeSysTables) throws IOException {
if (tableNameList == null || tableNameList.isEmpty()) {
// request for all TableDescriptors
Collection<TableDescriptor> allHtds;
if (namespace != null && namespace.length() > 0) {
// Do a check on the namespace existence. Will fail if does not exist.
this.clusterSchemaService.getNamespace(namespace);
allHtds = tableDescriptors.getByNamespace(namespace).values();
} else {
allHtds = tableDescriptors.getAll().values();
}
for (TableDescriptor desc : allHtds) {
if (
tableStateManager.isTablePresent(desc.getTableName())
&& (includeSysTables || !desc.getTableName().isSystemTable())
) {
htds.add(desc);
}
}
} else {
for (TableName s : tableNameList) {
if (tableStateManager.isTablePresent(s)) {
TableDescriptor desc = tableDescriptors.get(s);
if (desc != null) {
htds.add(desc);
}
}
}
}
// Retains only those matched by regular expression.
if (regex != null) filterTablesByRegex(htds, Pattern.compile(regex));
return htds;
} | 3.68 |
hudi_BloomFilterUtils_getBitSize | /**
* @return the bitsize given the total number of entries and error rate.
*/
static int getBitSize(int numEntries, double errorRate) {
return (int) Math.ceil(numEntries * (-Math.log(errorRate) / LOG2_SQUARED));
} | 3.68 |
flink_ExtendedSqlCollectionTypeNameSpec_createCollectionType | /**
* Create collection data type.
*
* @param elementType Type of the collection element
* @param typeFactory Type factory
* @return The collection data type, or throw exception if the collection type name does not
* belong to {@code SqlTypeName} enumerations
*/
private RelDataType createCollectionType(
RelDataType elementType, RelDataTypeFactory typeFactory) {
switch (collectionTypeName) {
case MULTISET:
return typeFactory.createMultisetType(elementType, -1);
case ARRAY:
return typeFactory.createArrayType(elementType, -1);
default:
throw Util.unexpected(collectionTypeName);
}
} | 3.68 |
hbase_HBaseCommonTestingUtility_waitFor | /**
* Wrapper method for {@link Waiter#waitFor(Configuration, long, long, boolean, Predicate)}.
*/
public <E extends Exception> long waitFor(long timeout, long interval, boolean failIfTimeout,
Predicate<E> predicate) throws E {
return Waiter.waitFor(this.conf, timeout, interval, failIfTimeout, predicate);
} | 3.68 |
graphhopper_GraphHopper_cleanUp | /**
* Internal method to clean up the graph.
*/
protected void cleanUp() {
PrepareRoutingSubnetworks preparation = new PrepareRoutingSubnetworks(baseGraph.getBaseGraph(), buildSubnetworkRemovalJobs());
preparation.setMinNetworkSize(minNetworkSize);
preparation.setThreads(subnetworksThreads);
preparation.doWork();
properties.put("profiles", getProfilesString());
logger.info("nodes: " + Helper.nf(baseGraph.getNodes()) + ", edges: " + Helper.nf(baseGraph.getEdges()));
} | 3.68 |
hbase_TableInputFormat_getSplits | /**
* Calculates the splits that will serve as input for the map tasks. The number of splits matches
* the number of regions in a table. Splits are shuffled if required.
* @param context The current job context.
* @return The list of input splits.
* @throws IOException When creating the list of splits fails.
* @see org.apache.hadoop.mapreduce.InputFormat#getSplits( org.apache.hadoop.mapreduce.JobContext)
*/
@Override
public List<InputSplit> getSplits(JobContext context) throws IOException {
List<InputSplit> splits = super.getSplits(context);
if (
(conf.get(SHUFFLE_MAPS) != null)
&& "true".equals(conf.get(SHUFFLE_MAPS).toLowerCase(Locale.ROOT))
) {
Collections.shuffle(splits);
}
return splits;
} | 3.68 |
shardingsphere-elasticjob_JobScheduleController_rescheduleJob | /**
* Reschedule OneOff job.
*/
public synchronized void rescheduleJob() {
try {
SimpleTrigger trigger = (SimpleTrigger) scheduler.getTrigger(TriggerKey.triggerKey(triggerIdentity));
if (!scheduler.isShutdown() && null != trigger) {
scheduler.rescheduleJob(TriggerKey.triggerKey(triggerIdentity), createOneOffTrigger());
}
} catch (final SchedulerException ex) {
throw new JobSystemException(ex);
}
} | 3.68 |
hadoop_AbstractTask_toString | /**
* ToString.
* @return String representation of Task
*/
@Override
public final String toString() {
return "TaskId: " + this.taskID.toString() + ", TaskType: " + this.taskType
+ ", cmd: '" + taskCmd + "'";
} | 3.68 |
hbase_ExportSnapshot_getBalancedSplits | /**
* Given a list of file paths and sizes, create around ngroups in as balanced a way as possible.
* The groups created will have similar amounts of bytes.
* <p>
* The algorithm used is pretty straightforward; the file list is sorted by size, and then each
* group fetch the bigger file available, iterating through groups alternating the direction.
*/
static List<List<Pair<SnapshotFileInfo, Long>>>
getBalancedSplits(final List<Pair<SnapshotFileInfo, Long>> files, final int ngroups) {
// Sort files by size, from small to big
Collections.sort(files, new Comparator<Pair<SnapshotFileInfo, Long>>() {
public int compare(Pair<SnapshotFileInfo, Long> a, Pair<SnapshotFileInfo, Long> b) {
long r = a.getSecond() - b.getSecond();
return (r < 0) ? -1 : ((r > 0) ? 1 : 0);
}
});
// create balanced groups
List<List<Pair<SnapshotFileInfo, Long>>> fileGroups = new LinkedList<>();
long[] sizeGroups = new long[ngroups];
int hi = files.size() - 1;
int lo = 0;
List<Pair<SnapshotFileInfo, Long>> group;
int dir = 1;
int g = 0;
while (hi >= lo) {
if (g == fileGroups.size()) {
group = new LinkedList<>();
fileGroups.add(group);
} else {
group = fileGroups.get(g);
}
Pair<SnapshotFileInfo, Long> fileInfo = files.get(hi--);
// add the hi one
sizeGroups[g] += fileInfo.getSecond();
group.add(fileInfo);
// change direction when at the end or the beginning
g += dir;
if (g == ngroups) {
dir = -1;
g = ngroups - 1;
} else if (g < 0) {
dir = 1;
g = 0;
}
}
if (LOG.isDebugEnabled()) {
for (int i = 0; i < sizeGroups.length; ++i) {
LOG.debug("export split=" + i + " size=" + StringUtils.humanReadableInt(sizeGroups[i]));
}
}
return fileGroups;
} | 3.68 |
framework_VCustomField_setFocusDelegate | /**
* Sets the focusable widget to focus instead of this custom field.
*
* @param focusDelegate
* the widget to delegate focus to
*/
public void setFocusDelegate(
final com.google.gwt.user.client.ui.Focusable focusDelegate) {
this.focusDelegate = () -> focusDelegate.setFocus(true);
} | 3.68 |
framework_Window_setAssistiveRole | /**
* Sets the WAI-ARIA role the window.
*
* This role defines how an assistive device handles a window. Available
* roles are alertdialog and dialog (@see
* <a href="http://www.w3.org/TR/2011/CR-wai-aria-20110118/roles">Roles
* Model</a>).
*
* The default role is dialog.
*
* @param role
* WAI-ARIA role to set for the window
*/
public void setAssistiveRole(WindowRole role) {
getState().role = role;
} | 3.68 |
hbase_BinaryComponentComparator_areSerializedFieldsEqual | /**
* Returns true if and only if the fields of the comparator that are serialized are equal to the
* corresponding fields in other. Used for testing.
*/
@Override
boolean areSerializedFieldsEqual(ByteArrayComparable other) {
if (other == this) {
return true;
}
if (!(other instanceof BinaryComponentComparator)) {
return false;
}
return super.areSerializedFieldsEqual(other);
} | 3.68 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.