name
stringlengths 12
178
| code_snippet
stringlengths 8
36.5k
| score
float64 3.26
3.68
|
---|---|---|
hadoop_IrqHandler_handle | /**
* Handler for the JVM API for signal handling.
* @param s signal raised
*/
@Override
public void handle(Signal s) {
signalCount.incrementAndGet();
InterruptData data = new InterruptData(s.getName(), s.getNumber());
LOG.info("Interrupted: {}", data);
handler.interrupted(data);
} | 3.68 |
hadoop_OBSCommonUtils_propagateBucketOptions | /**
* Propagates bucket-specific settings into generic OBS configuration keys.
* This is done by propagating the values of the form {@code
* fs.obs.bucket.${bucket}.key} to {@code fs.obs.key}, for all values of "key"
* other than a small set of unmodifiable values.
*
* <p>The source of the updated property is set to the key name of the
* bucket property, to aid in diagnostics of where things came from.
*
* <p>Returns a new configuration. Why the clone? You can use the same conf
* for different filesystems, and the original values are not updated.
*
* <p>The {@code fs.obs.impl} property cannot be set, nor can any with the
* prefix {@code fs.obs.bucket}.
*
* <p>This method does not propagate security provider path information
* from the OBS property into the Hadoop common provider: callers must call
* {@link #patchSecurityCredentialProviders(Configuration)} explicitly.
*
* @param source Source Configuration object.
* @param bucket bucket name. Must not be empty.
* @return a (potentially) patched clone of the original.
*/
static Configuration propagateBucketOptions(final Configuration source,
final String bucket) {
Preconditions.checkArgument(StringUtils.isNotEmpty(bucket), "bucket");
final String bucketPrefix = OBSConstants.FS_OBS_BUCKET_PREFIX + bucket
+ '.';
LOG.debug("Propagating entries under {}", bucketPrefix);
final Configuration dest = new Configuration(source);
for (Map.Entry<String, String> entry : source) {
final String key = entry.getKey();
// get the (unexpanded) value.
final String value = entry.getValue();
if (!key.startsWith(bucketPrefix) || bucketPrefix.equals(key)) {
continue;
}
// there's a bucket prefix, so strip it
final String stripped = key.substring(bucketPrefix.length());
if (stripped.startsWith("bucket.") || "impl".equals(stripped)) {
// tell user off
LOG.debug("Ignoring bucket option {}", key);
} else {
// propagate the value, building a new origin field.
// to track overwrites, the generic key is overwritten even if
// already matches the new one.
final String generic = OBSConstants.FS_OBS_PREFIX + stripped;
LOG.debug("Updating {}", generic);
dest.set(generic, value, key);
}
}
return dest;
} | 3.68 |
hbase_FileIOEngine_write | /**
* Transfers data from the given byte buffer to file
* @param srcBuffer the given byte buffer from which bytes are to be read
* @param offset The offset in the file where the first byte to be written
*/
@Override
public void write(ByteBuffer srcBuffer, long offset) throws IOException {
write(ByteBuff.wrap(srcBuffer), offset);
} | 3.68 |
flink_ConnectedStreams_getType1 | /**
* Gets the type of the first input.
*
* @return The type of the first input
*/
public TypeInformation<IN1> getType1() {
return inputStream1.getType();
} | 3.68 |
hadoop_ByteArray_size | /**
* @return the size of the byte array.
*/
@Override
public int size() {
return len;
} | 3.68 |
AreaShop_FileManager_getBuyNames | /**
* Get a list of names of all buy regions.
* @return A String list with all the names
*/
public List<String> getBuyNames() {
ArrayList<String> result = new ArrayList<>();
for(BuyRegion region : getBuys()) {
result.add(region.getName());
}
return result;
} | 3.68 |
framework_FlyweightCell_getColumn | /**
* Returns the column index of the cell.
*
* @return the column index
*/
public int getColumn() {
assertSetup();
return column;
} | 3.68 |
hadoop_ProtobufHelper_getByteString | /**
* Get the byte string of a non-null byte array.
* If the array is 0 bytes long, return a singleton to reduce object allocation.
* @param bytes bytes to convert.
* @return a value
*/
public static ByteString getByteString(byte[] bytes) {
// return singleton to reduce object allocation
return ShadedProtobufHelper.getByteString(bytes);
} | 3.68 |
hbase_CellUtil_compareColumns | /**
* Compares the cell's column (family and qualifier) with the given byte[]
* @param left the cell for which the column has to be compared
* @param right the byte[] having the column
* @param rfoffset the offset of the family
* @param rflength the length of the family
* @param rqoffset the offset of the qualifier
* @param rqlength the length of the qualifier
* @return greater than 0 if left cell's column is bigger than byte[], lesser than 0 if left
* cell's column is lesser than byte[] and 0 otherwise
*/
public final static int compareColumns(Cell left, byte[] right, int rfoffset, int rflength,
int rqoffset, int rqlength) {
int diff = compareFamilies(left, right, rfoffset, rflength);
if (diff != 0) return diff;
return compareQualifiers(left, right, rqoffset, rqlength);
} | 3.68 |
flink_PlannerTypeInferenceUtilImpl_getValidationErrorMessage | /**
* Return the validation error message of this {@link PlannerExpression} or return the
* validation error message of it's children if it passes the validation. Return empty if all
* validation succeeded.
*/
private Optional<String> getValidationErrorMessage(PlannerExpression plannerCall) {
ValidationResult validationResult = plannerCall.validateInput();
if (validationResult instanceof ValidationFailure) {
return Optional.of(((ValidationFailure) validationResult).message());
} else {
for (Expression plannerExpression : plannerCall.getChildren()) {
Optional<String> errorMessage =
getValidationErrorMessage((PlannerExpression) plannerExpression);
if (errorMessage.isPresent()) {
return errorMessage;
}
}
}
return Optional.empty();
} | 3.68 |
morf_ChangeColumn_reverse | /**
* @see org.alfasoftware.morf.upgrade.SchemaChange#reverse(org.alfasoftware.morf.metadata.Schema)
*/
@Override
public Schema reverse(Schema schema) {
return applyChange(schema, toColumn, fromColumn);
} | 3.68 |
hudi_Triple_of | /**
* <p>
* Obtains an immutable triple of from three objects inferring the generic types.
* </p>
*
* <p>
* This factory allows the triple to be created using inference to obtain the generic types.
* </p>
*
* @param <L> the left element type
* @param <M> the middle element type
* @param <R> the right element type
* @param left the left element, may be null
* @param middle the middle element, may be null
* @param right the right element, may be null
* @return a triple formed from the three parameters, not null
*/
public static <L, M, R> Triple<L, M, R> of(final L left, final M middle, final R right) {
return new ImmutableTriple<L, M, R>(left, middle, right);
} | 3.68 |
hadoop_ClusterMetrics_getTotalJobSubmissions | /**
* Get the total number of job submissions in the cluster.
*
* @return total number of job submissions
*/
public int getTotalJobSubmissions() {
return totalJobSubmissions;
} | 3.68 |
hmily_Binder_setSource | /**
* Sets source.
*
* @param source the source
* @param value the value
* @return the source
*/
Object setSource(final ConfigPropertySource source, final Supplier<?> value) {
this.source = source;
return value.get();
} | 3.68 |
hadoop_AuthenticationToken_parse | /**
* Parses a string into an authentication token.
*
* @param tokenStr string representation of a token.
*
* @return the parsed authentication token.
*
* @throws AuthenticationException thrown if the string representation could not be parsed into
* an authentication token.
*/
public static AuthenticationToken parse(String tokenStr) throws AuthenticationException {
return new AuthenticationToken(AuthToken.parse(tokenStr));
} | 3.68 |
morf_NamedParameterPreparedStatement_executeBatch | /**
* @see PreparedStatement#executeBatch()
* @return an array of update counts containing one element for each
* command in the batch. The elements of the array are ordered according
* to the order in which commands were added to the batch.
* @exception SQLException if a database access error occurs,
* this method is called on a closed <code>Statement</code> or the
* driver does not support batch statements.
* @throws SQLTimeoutException when the driver has determined that the
* timeout value that was specified by the {@code setQueryTimeout}
* method has been exceeded and has at least attempted to cancel
* the currently running {@code Statement}
*/
public int[] executeBatch() throws SQLException {
return statement.executeBatch();
} | 3.68 |
hadoop_RollingFileSystemSink_createLogFile | /**
* Create a new log file and return the {@link FSDataOutputStream}. If a
* file with the specified path already exists, add a suffix, starting with 1
* and try again. Keep incrementing the suffix until a nonexistent target
* path is found.
*
* Once the file is open, update {@link #currentFSOutStream},
* {@link #currentOutStream}, and {@#link #currentFilePath} are set
* appropriately.
*
* @param initial the target path
* @throws IOException thrown if the call to see if the exists fails
*/
private void createLogFile(Path initial) throws IOException {
Path currentAttempt = initial;
// Start at 0 so that if the base filname exists, we start with the suffix
// ".1".
int id = 0;
while (true) {
// First try blindly creating the file. If we fail, it either means
// the file exists, or the operation actually failed. We do it this way
// because if we check whether the file exists, it might still be created
// by the time we try to create it. Creating first works like a
// test-and-set.
try {
currentFSOutStream = fileSystem.create(currentAttempt, false);
currentOutStream = new PrintStream(currentFSOutStream, true,
StandardCharsets.UTF_8.name());
currentFilePath = currentAttempt;
break;
} catch (IOException ex) {
// Now we can check to see if the file exists to know why we failed
if (fileSystem.exists(currentAttempt)) {
id = getNextIdToTry(initial, id);
currentAttempt = new Path(initial.toString() + "." + id);
} else {
throw ex;
}
}
}
} | 3.68 |
zxing_ResultPoint_orderBestPatterns | /**
* Orders an array of three ResultPoints in an order [A,B,C] such that AB is less than AC
* and BC is less than AC, and the angle between BC and BA is less than 180 degrees.
*
* @param patterns array of three {@code ResultPoint} to order
*/
public static void orderBestPatterns(ResultPoint[] patterns) {
// Find distances between pattern centers
float zeroOneDistance = distance(patterns[0], patterns[1]);
float oneTwoDistance = distance(patterns[1], patterns[2]);
float zeroTwoDistance = distance(patterns[0], patterns[2]);
ResultPoint pointA;
ResultPoint pointB;
ResultPoint pointC;
// Assume one closest to other two is B; A and C will just be guesses at first
if (oneTwoDistance >= zeroOneDistance && oneTwoDistance >= zeroTwoDistance) {
pointB = patterns[0];
pointA = patterns[1];
pointC = patterns[2];
} else if (zeroTwoDistance >= oneTwoDistance && zeroTwoDistance >= zeroOneDistance) {
pointB = patterns[1];
pointA = patterns[0];
pointC = patterns[2];
} else {
pointB = patterns[2];
pointA = patterns[0];
pointC = patterns[1];
}
// Use cross product to figure out whether A and C are correct or flipped.
// This asks whether BC x BA has a positive z component, which is the arrangement
// we want for A, B, C. If it's negative, then we've got it flipped around and
// should swap A and C.
if (crossProductZ(pointA, pointB, pointC) < 0.0f) {
ResultPoint temp = pointA;
pointA = pointC;
pointC = temp;
}
patterns[0] = pointA;
patterns[1] = pointB;
patterns[2] = pointC;
} | 3.68 |
hbase_BucketCache_finalize | /**
* Needed mostly for UTs that might run in the same VM and create different BucketCache instances
* on different UT methods.
*/
@Override
protected void finalize() {
if (cachePersister != null && !cachePersister.isInterrupted()) {
cachePersister.interrupt();
}
} | 3.68 |
hadoop_OBSInputStream_seekInStream | /**
* Adjust the stream to a specific position.
*
* @param targetPos target seek position
* @throws IOException on any failure to seek
*/
private void seekInStream(final long targetPos) throws IOException {
checkNotClosed();
if (wrappedStream == null) {
return;
}
// compute how much more to skip
long diff = targetPos - streamCurrentPos;
if (diff > 0) {
// forward seek -this is where data can be skipped
int available = wrappedStream.available();
// always seek at least as far as what is available
long forwardSeekRange = Math.max(readAheadRange, available);
// work out how much is actually left in the stream
// then choose whichever comes first: the range or the EOF
long remainingInCurrentRequest = remainingInCurrentRequest();
long forwardSeekLimit = Math.min(remainingInCurrentRequest,
forwardSeekRange);
boolean skipForward = remainingInCurrentRequest > 0
&& diff <= forwardSeekLimit;
if (skipForward) {
// the forward seek range is within the limits
LOG.debug("Forward seek on {}, of {} bytes", uri, diff);
long skippedOnce = wrappedStream.skip(diff);
while (diff > 0 && skippedOnce > 0) {
streamCurrentPos += skippedOnce;
diff -= skippedOnce;
incrementBytesRead(skippedOnce);
skippedOnce = wrappedStream.skip(diff);
}
if (streamCurrentPos == targetPos) {
// all is well
return;
} else {
// log a warning; continue to attempt to re-open
LOG.info("Failed to seek on {} to {}. Current position {}",
uri, targetPos, streamCurrentPos);
}
}
} else if (diff == 0 && remainingInCurrentRequest() > 0) {
// targetPos == streamCurrentPos
// if there is data left in the stream, keep going
return;
}
// if the code reaches here, the stream needs to be reopened.
// close the stream; if read the object will be opened at the
// new streamCurrentPos
closeStream("seekInStream()", this.contentRangeFinish);
streamCurrentPos = targetPos;
} | 3.68 |
hbase_RegionNormalizerManager_getSkippedCount | /**
* Retrieve a count of the number of times plans of type {@code type} were submitted but skipped.
* @param type type of plan for which skipped count is to be returned
*/
public long getSkippedCount(NormalizationPlan.PlanType type) {
// TODO: this appears to be used only for testing.
return worker == null ? 0 : worker.getSkippedCount(type);
} | 3.68 |
flink_KeyedStream_min | /**
* Applies an aggregation that gives the current minimum of the data stream at the given field
* expression by the given key. An independent aggregate is kept per key. A field expression is
* either the name of a public field or a getter method with parentheses of the {@link
* DataStream}'s underlying type. A dot can be used to drill down into objects, as in {@code
* "field1.fieldxy" }.
*
* @param field In case of a POJO, Scala case class, or Tuple type, the name of the (public)
* field on which to perform the aggregation. Additionally, a dot can be used to drill down
* into nested objects, as in {@code "field1.fieldxy" }. Furthermore "*" can be specified in
* case of a basic type (which is considered as having only one field).
* @return The transformed DataStream.
*/
public SingleOutputStreamOperator<T> min(String field) {
return aggregate(
new ComparableAggregator<>(
field,
getType(),
AggregationFunction.AggregationType.MIN,
false,
getExecutionConfig()));
} | 3.68 |
hbase_CreateStoreFileWriterParams_compression | /**
* Set the compression algorithm to use
*/
public CreateStoreFileWriterParams compression(Compression.Algorithm compression) {
this.compression = compression;
return this;
} | 3.68 |
hadoop_StagingCommitter_commitTaskInternal | /**
* Commit the task by uploading all created files and then
* writing a pending entry for them.
* @param context task context
* @param taskOutput list of files from the output
* @param commitContext commit context
* @return number of uploads committed.
* @throws IOException IO Failures.
*/
protected int commitTaskInternal(final TaskAttemptContext context,
List<? extends FileStatus> taskOutput,
CommitContext commitContext)
throws IOException {
LOG.debug("{}: commitTaskInternal", getRole());
Configuration conf = context.getConfiguration();
final Path attemptPath = getTaskAttemptPath(context);
FileSystem attemptFS = getTaskAttemptFilesystem(context);
LOG.debug("{}: attempt path is {}", getRole(), attemptPath);
// add the commits file to the wrapped committer's task attempt location.
// of this method.
Path commitsAttemptPath = wrappedCommitter.getTaskAttemptPath(context);
FileSystem commitsFS = commitsAttemptPath.getFileSystem(conf);
// keep track of unfinished commits in case one fails. if something fails,
// we will try to abort the ones that had already succeeded.
int commitCount = taskOutput.size();
final Queue<SinglePendingCommit> commits = new ConcurrentLinkedQueue<>();
LOG.info("{}: uploading from staging directory to S3 {}", getRole(),
attemptPath);
LOG.info("{}: Saving pending data information to {}",
getRole(), commitsAttemptPath);
if (taskOutput.isEmpty()) {
// there is nothing to write. needsTaskCommit() should have caught
// this, so warn that there is some kind of problem in the protocol.
LOG.warn("{}: No files to commit", getRole());
} else {
boolean threw = true;
// before the uploads, report some progress
context.progress();
PendingSet pendingCommits = new PendingSet(commitCount);
pendingCommits.putExtraData(TASK_ATTEMPT_ID,
context.getTaskAttemptID().toString());
try {
TaskPool.foreach(taskOutput)
.stopOnFailure()
.suppressExceptions(false)
.executeWith(commitContext.getOuterSubmitter())
.run(stat -> {
Path path = stat.getPath();
File localFile = new File(path.toUri().getPath());
String relative = Paths.getRelativePath(attemptPath, path);
String partition = Paths.getPartition(relative);
String key = getFinalKey(relative, context);
Path destPath = getDestS3AFS().keyToQualifiedPath(key);
SinglePendingCommit commit = getCommitOperations()
.uploadFileToPendingCommit(
localFile,
destPath,
partition,
uploadPartSize,
context);
LOG.debug("{}: adding pending commit {}", getRole(), commit);
commits.add(commit);
});
for (SinglePendingCommit commit : commits) {
pendingCommits.add(commit);
}
// maybe add in the IOStatistics the thread
if (commitContext.isCollectIOStatistics()) {
pendingCommits.getIOStatistics().aggregate(
commitContext.getIOStatisticsContext()
.getIOStatistics());
}
// save the data
// overwrite any existing file, so whichever task attempt
// committed last wins.
LOG.debug("Saving {} pending commit(s)) to file {}",
pendingCommits.size(),
commitsAttemptPath);
pendingCommits.save(commitsFS, commitsAttemptPath,
commitContext.getPendingSetSerializer());
threw = false;
} finally {
if (threw) {
LOG.error(
"{}: Exception during commit process, aborting {} commit(s)",
getRole(), commits.size());
try(DurationInfo ignored = new DurationInfo(LOG,
"Aborting %s uploads", commits.size())) {
TaskPool.foreach(commits)
.suppressExceptions()
.executeWith(commitContext.getOuterSubmitter())
.run(commitContext::abortSingleCommit);
}
deleteTaskAttemptPathQuietly(context);
}
}
// always purge attempt information at this point.
Paths.clearTempFolderInfo(context.getTaskAttemptID());
}
LOG.debug("Committing wrapped task");
wrappedCommitter.commitTask(context);
LOG.debug("Cleaning up attempt dir {}", attemptPath);
attemptFS.delete(attemptPath, true);
return commits.size();
} | 3.68 |
framework_GenericFontIcon_getFontFamily | /*
* (non-Javadoc)
*
* @see com.vaadin.server.FontIcon#getFontFamily()
*/
@Override
public String getFontFamily() {
return fontFamily;
} | 3.68 |
flink_ExecutionConfig_enableAutoGeneratedUIDs | /**
* Enables the Flink runtime to auto-generate UID's for operators.
*
* @see #disableAutoGeneratedUIDs()
*/
public void enableAutoGeneratedUIDs() {
setAutoGeneratedUids(true);
} | 3.68 |
hudi_MiniBatchHandle_finalizeWrite | /**
* Finalize the write of one mini-batch. Usually these mini-bathes
* come from one checkpoint interval. The file handle may roll over to new name
* if the name conflicts, give a chance to clean the intermediate file.
*/
default void finalizeWrite() {
} | 3.68 |
flink_ResourceCounter_empty | /**
* Creates an empty resource counter.
*
* @return empty resource counter
*/
public static ResourceCounter empty() {
return new ResourceCounter(Collections.emptyMap());
} | 3.68 |
flink_BlobKey_addToMessageDigest | /**
* Adds the BLOB key to the given {@link MessageDigest}.
*
* @param md the message digest to add the BLOB key to
*/
public void addToMessageDigest(MessageDigest md) {
md.update(this.key);
} | 3.68 |
hadoop_TaskAttemptContextImpl_getStatus | /**
* Get the last set status message.
* @return the current status message
*/
public String getStatus() {
return status;
} | 3.68 |
pulsar_BrokerService_unloadDeletedReplNamespace | /**
* Unloads the namespace bundles if local cluster is not part of replication-cluster list into the namespace.
* So, broker that owns the bundle and doesn't receive the zk-watch will unload the namespace.
* @param data
* @param namespace
*/
private void unloadDeletedReplNamespace(Policies data, NamespaceName namespace) {
if (!namespace.isGlobal()) {
return;
}
final String localCluster = this.pulsar.getConfiguration().getClusterName();
if (!data.replication_clusters.contains(localCluster)) {
pulsar().getNamespaceService().getNamespaceBundleFactory()
.getBundlesAsync(namespace).thenAccept(bundles -> {
bundles.getBundles().forEach(bundle -> {
pulsar.getNamespaceService().isNamespaceBundleOwned(bundle).thenAccept(isExist -> {
if (isExist) {
this.pulsar().getExecutor().execute(() -> {
try {
pulsar().getAdminClient().namespaces().unloadNamespaceBundle(namespace.toString(),
bundle.getBundleRange());
} catch (Exception e) {
log.error("Failed to unload namespace-bundle {}-{} that not owned by {}, {}",
namespace.toString(), bundle.toString(), localCluster, e.getMessage());
}
});
}
});
});
});
}
} | 3.68 |
hbase_MemoryBoundedLogMessageBuffer_add | /**
* Append the given message to this buffer, automatically evicting older messages until the
* desired memory limit is achieved.
*/
public synchronized void add(String messageText) {
LogMessage message = new LogMessage(messageText, EnvironmentEdgeManager.currentTime());
usage += message.estimateHeapUsage();
messages.add(message);
while (usage > maxSizeBytes) {
LogMessage removed = messages.remove();
usage -= removed.estimateHeapUsage();
assert usage >= 0;
}
} | 3.68 |
flink_HiveParserStorageFormat_fillStorageFormat | /**
* Returns true if the passed token was a storage format token and thus was processed
* accordingly.
*/
public boolean fillStorageFormat(HiveParserASTNode child) throws SemanticException {
switch (child.getToken().getType()) {
case HiveASTParser.TOK_TABLEFILEFORMAT:
if (child.getChildCount() < 2) {
throw new SemanticException(
"Incomplete specification of File Format. "
+ "You must provide InputFormat, OutputFormat.");
}
inputFormat =
HiveParserBaseSemanticAnalyzer.unescapeSQLString(
child.getChild(0).getText());
outputFormat =
HiveParserBaseSemanticAnalyzer.unescapeSQLString(
child.getChild(1).getText());
if (child.getChildCount() == 3) {
serde =
HiveParserBaseSemanticAnalyzer.unescapeSQLString(
child.getChild(2).getText());
}
break;
case HiveASTParser.TOK_STORAGEHANDLER:
storageHandler =
HiveParserBaseSemanticAnalyzer.unescapeSQLString(
child.getChild(0).getText());
if (child.getChildCount() == 2) {
HiveParserBaseSemanticAnalyzer.readProps(
(HiveParserASTNode) (child.getChild(1).getChild(0)), serdeProps);
}
break;
case HiveASTParser.TOK_FILEFORMAT_GENERIC:
HiveParserASTNode grandChild = (HiveParserASTNode) child.getChild(0);
genericName = (grandChild == null ? "" : grandChild.getText()).trim().toUpperCase();
processStorageFormat(genericName);
break;
default:
// token was not a storage format token
return false;
}
return true;
} | 3.68 |
incubator-hugegraph-toolchain_HugeGraphLoader_stopThenShutdown | /**
* TODO: How to distinguish load task finished normally or abnormally
*/
private synchronized void stopThenShutdown() {
if (this.context.closed()) {
return;
}
LOG.info("Stop loading then shutdown HugeGraphLoader");
try {
this.context.stopLoading();
if (this.manager != null) {
// Wait all insert tasks stopped before exit
this.manager.waitFinished();
this.manager.shutdown();
}
} finally {
try {
this.context.unsetLoadingMode();
} finally {
this.context.close();
}
}
} | 3.68 |
hibernate-validator_ScriptEngineScriptEvaluator_engineAllowsParallelAccessFromMultipleThreads | /**
* Checks whether the given engine is thread-safe or not.
*
* @return true if the given engine is thread-safe, false otherwise.
*/
private boolean engineAllowsParallelAccessFromMultipleThreads() {
String threadingType = (String) engine.getFactory().getParameter( "THREADING" );
return "THREAD-ISOLATED".equals( threadingType ) || "STATELESS".equals( threadingType );
} | 3.68 |
zxing_ByteMatrix_getArray | /**
* @return an internal representation as bytes, in row-major order. array[y][x] represents point (x,y)
*/
public byte[][] getArray() {
return bytes;
} | 3.68 |
rocketmq-connect_BrokerBasedLog_prepare | /**
* Preparation before startup
*/
private void prepare() {
Set<String> consumerGroupSet = ConnectUtil.fetchAllConsumerGroupList(workerConfig);
if (!consumerGroupSet.contains(groupName)) {
log.info("Try to create group: {}!", groupName);
ConnectUtil.createSubGroup(workerConfig, groupName);
}
if (!ConnectUtil.isTopicExist(workerConfig, topicName)) {
log.info("Try to create store topic: {}!", topicName);
TopicConfig topicConfig = new TopicConfig(topicName, 1, 1, PermName.PERM_READ | PermName.PERM_WRITE);
ConnectUtil.createTopic(workerConfig, topicConfig);
}
} | 3.68 |
framework_DesignContext_getComponentByCaption | /**
* Returns a component having the specified caption. If no component is
* found, returns null.
*
* @param caption
* The caption of the component
* @return a component whose caption equals the caption given as a parameter
*/
public Component getComponentByCaption(String caption) {
return captionToComponent.get(caption);
} | 3.68 |
pulsar_BundleData_update | /**
* Update the historical data for this bundle.
*
* @param newSample
* The bundle stats to update this data with.
*/
public void update(final NamespaceBundleStats newSample) {
shortTermData.update(newSample);
longTermData.update(newSample);
this.topics = (int) newSample.topics;
} | 3.68 |
flink_IterationHeadTask_initBackChannel | /**
* The iteration head prepares the backchannel: it allocates memory, instantiates a {@link
* BlockingBackChannel} and hands it to the iteration tail via a {@link Broker} singleton.
*/
private BlockingBackChannel initBackChannel() throws Exception {
/* get the size of the memory available to the backchannel */
int backChannelMemoryPages =
getMemoryManager().computeNumberOfPages(this.config.getRelativeBackChannelMemory());
/* allocate the memory available to the backchannel */
List<MemorySegment> segments = new ArrayList<MemorySegment>();
int segmentSize = getMemoryManager().getPageSize();
getMemoryManager().allocatePages(this, segments, backChannelMemoryPages);
/* instantiate the backchannel */
BlockingBackChannel backChannel =
new BlockingBackChannel(
new SerializedUpdateBuffer(segments, segmentSize, getIOManager()));
/* hand the backchannel over to the iteration tail */
Broker<BlockingBackChannel> broker = BlockingBackChannelBroker.instance();
broker.handIn(brokerKey(), backChannel);
return backChannel;
} | 3.68 |
hadoop_MountTableRefresherThread_run | /**
* Refresh mount table cache of local and remote routers. Local and remote
* routers will be refreshed differently. Let's understand what are the
* local and remote routers and refresh will be done differently on these
* routers. Suppose there are three routers R1, R2 and R3. User want to add
* new mount table entry. He will connect to only one router, not all the
* routers. Suppose He connects to R1 and calls add mount table entry through
* API or CLI. Now in this context R1 is local router, R2 and R3 are remote
* routers. Because add mount table entry is invoked on R1, R1 will update the
* cache locally it need not make RPC call. But R1 will make RPC calls to
* update cache on R2 and R3.
*/
@Override
public void run() {
try {
SecurityUtil.doAsLoginUser(() -> {
if (UserGroupInformation.isSecurityEnabled()) {
UserGroupInformation.getLoginUser().checkTGTAndReloginFromKeytab();
}
RefreshMountTableEntriesResponse refreshMountTableEntries = manager
.refreshMountTableEntries(
RefreshMountTableEntriesRequest.newInstance());
success = refreshMountTableEntries.getResult();
return true;
});
} catch (IOException e) {
LOG.error("Failed to refresh mount table entries cache at router {}",
adminAddress, e);
} finally {
countDownLatch.countDown();
}
} | 3.68 |
streampipes_PipelineManager_startPipeline | /**
* Starts all processing elements of the pipeline with the pipelineId
*
* @param pipelineId of pipeline to be started
* @return pipeline status of the start operation
*/
public static PipelineOperationStatus startPipeline(String pipelineId) {
Pipeline pipeline = getPipeline(pipelineId);
return Operations.startPipeline(pipeline);
} | 3.68 |
hadoop_LocalCacheCleaner_addResources | /**
* Adds resources from the passed LocalResourceTracker that are candidates for
* deletion from the cache.
*
* @param newTracker add all resources being tracked by the passed
* LocalResourcesTracker to the LocalCacheCleaner.
*/
public void addResources(LocalResourcesTracker newTracker) {
for (LocalizedResource resource : newTracker) {
currentSize += resource.getSize();
if (resource.getRefCount() > 0) {
// Do not delete resources that are still in use
continue;
}
resourceMap.put(resource, newTracker);
}
} | 3.68 |
framework_TabSheet_areTabsHidden | /**
* Are the tab selection parts ("tabs") hidden.
*
* @return true if the tabs are hidden in the UI
* @deprecated as of 7.5, use {@link #isTabsVisible()} instead
*/
@Deprecated
public boolean areTabsHidden() {
return !isTabsVisible();
} | 3.68 |
hbase_MasterProcedureScheduler_wakeNamespaceExclusiveLock | /**
* Wake the procedures waiting for the specified namespace
* @see #waitNamespaceExclusiveLock(Procedure,String)
* @param procedure the procedure releasing the lock
* @param namespace the namespace that has the exclusive lock
*/
public void wakeNamespaceExclusiveLock(final Procedure<?> procedure, final String namespace) {
schedLock();
try {
final LockAndQueue namespaceLock = locking.getNamespaceLock(namespace);
final LockAndQueue systemNamespaceTableLock =
locking.getTableLock(TableProcedureInterface.DUMMY_NAMESPACE_TABLE_NAME);
int waitingCount = 0;
if (namespaceLock.releaseExclusiveLock(procedure)) {
waitingCount += wakeWaitingProcedures(namespaceLock);
}
if (systemNamespaceTableLock.releaseSharedLock()) {
addToRunQueue(tableRunQueue,
getTableQueue(TableProcedureInterface.DUMMY_NAMESPACE_TABLE_NAME),
() -> procedure + " released namespace exclusive lock");
waitingCount += wakeWaitingProcedures(systemNamespaceTableLock);
}
wakePollIfNeeded(waitingCount);
} finally {
schedUnlock();
}
} | 3.68 |
hbase_MasterObserver_preMergeRegions | /**
* Called before merge regions request.
* @param ctx coprocessor environment
* @param regionsToMerge regions to be merged
*/
default void preMergeRegions(final ObserverContext<MasterCoprocessorEnvironment> ctx,
final RegionInfo[] regionsToMerge) throws IOException {
} | 3.68 |
AreaShop_Value_get | /**
* Get the stored content.
* @return The stored content
*/
public T get() {
return content;
} | 3.68 |
hudi_HoodieIndexUtils_filterKeysFromFile | /**
* Given a list of row keys and one file, return only row keys existing in that file.
*
* @param filePath - File to filter keys from
* @param candidateRecordKeys - Candidate keys to filter
* @return List of pairs of candidate keys and positions that are available in the file
*/
public static List<Pair<String, Long>> filterKeysFromFile(Path filePath, List<String> candidateRecordKeys,
Configuration configuration) throws HoodieIndexException {
ValidationUtils.checkArgument(FSUtils.isBaseFile(filePath));
List<Pair<String, Long>> foundRecordKeys = new ArrayList<>();
try (HoodieFileReader fileReader = HoodieFileReaderFactory.getReaderFactory(HoodieRecordType.AVRO)
.getFileReader(configuration, filePath)) {
// Load all rowKeys from the file, to double-confirm
if (!candidateRecordKeys.isEmpty()) {
HoodieTimer timer = HoodieTimer.start();
Set<Pair<String, Long>> fileRowKeys = fileReader.filterRowKeys(candidateRecordKeys.stream().collect(Collectors.toSet()));
foundRecordKeys.addAll(fileRowKeys);
LOG.info(String.format("Checked keys against file %s, in %d ms. #candidates (%d) #found (%d)", filePath,
timer.endTimer(), candidateRecordKeys.size(), foundRecordKeys.size()));
if (LOG.isDebugEnabled()) {
LOG.debug("Keys matching for file " + filePath + " => " + foundRecordKeys);
}
}
} catch (Exception e) {
throw new HoodieIndexException("Error checking candidate keys against file.", e);
}
return foundRecordKeys;
} | 3.68 |
dubbo_ProtobufTypeBuilder_isSimplePropertySettingMethod | /**
* judge custom type or primitive type property<br/>
* 1. proto3 grammar ex: string name = 1 <br/>
* 2. proto3 grammar ex: optional string name =1 <br/>
* generated setting method ex: setNameValue(String name);
*
* @param method
* @return
*/
private boolean isSimplePropertySettingMethod(Method method) {
String methodName = method.getName();
Class<?>[] types = method.getParameterTypes();
if (!methodName.startsWith("set") || types.length != 1) {
return false;
}
// filter general setting method
// 1. - setUnknownFields( com.google.protobuf.UnknownFieldSet unknownFields)
// 2. - setField(com.google.protobuf.Descriptors.FieldDescriptor field,java.lang.Object value)
// 3. - setRepeatedField(com.google.protobuf.Descriptors.FieldDescriptor field,int index,java.lang.Object value)
if ("setField".equals(methodName) && types[0].equals(Descriptors.FieldDescriptor.class)
|| "setUnknownFields".equals(methodName) && types[0].equals(UnknownFieldSet.class)
|| "setRepeatedField".equals(methodName) && types[0].equals(Descriptors.FieldDescriptor.class)) {
return false;
}
// String property has two setting method.
// skip setXXXBytes(com.google.protobuf.ByteString value)
// parse setXXX(String string)
if (methodName.endsWith("Bytes") && types[0].equals(ByteString.class)) {
return false;
}
// Protobuf property has two setting method.
// skip setXXX(com.google.protobuf.Builder value)
// parse setXXX(com.google.protobuf.Message value)
if (GeneratedMessageV3.Builder.class.isAssignableFrom(types[0])) {
return false;
}
// Enum property has two setting method.
// skip setXXXValue(int value)
// parse setXXX(SomeEnum value)
return !methodName.endsWith("Value") || types[0] != int.class;
} | 3.68 |
zxing_MatrixUtil_buildMatrix | // Build 2D matrix of QR Code from "dataBits" with "ecLevel", "version" and "getMaskPattern". On
// success, store the result in "matrix" and return true.
static void buildMatrix(BitArray dataBits,
ErrorCorrectionLevel ecLevel,
Version version,
int maskPattern,
ByteMatrix matrix) throws WriterException {
clearMatrix(matrix);
embedBasicPatterns(version, matrix);
// Type information appear with any version.
embedTypeInfo(ecLevel, maskPattern, matrix);
// Version info appear if version >= 7.
maybeEmbedVersionInfo(version, matrix);
// Data should be embedded at end.
embedDataBits(dataBits, maskPattern, matrix);
} | 3.68 |
flink_ConfluentRegistryAvroDeserializationSchema_forSpecific | /**
* Creates {@link AvroDeserializationSchema} that produces classes that were generated from Avro
* schema and looks up the writer schema in the Confluent Schema Registry.
*
* @param tClass class of record to be produced
* @param url URL of schema registry to connect
* @param identityMapCapacity maximum number of cached schema versions
* @param registryConfigs map with additional schema registry configs (for example SSL
* properties)
* @return deserialized record
*/
public static <T extends SpecificRecord>
ConfluentRegistryAvroDeserializationSchema<T> forSpecific(
Class<T> tClass,
String url,
int identityMapCapacity,
@Nullable Map<String, ?> registryConfigs) {
return new ConfluentRegistryAvroDeserializationSchema<>(
tClass,
null,
new CachedSchemaCoderProvider(null, url, identityMapCapacity, registryConfigs));
} | 3.68 |
flink_CliFrontend_validateAndGetActiveCommandLine | /**
* Gets the custom command-line for the arguments.
*
* @param commandLine The input to the command-line.
* @return custom command-line which is active (may only be one at a time)
*/
public CustomCommandLine validateAndGetActiveCommandLine(CommandLine commandLine) {
LOG.debug("Custom commandlines: {}", customCommandLines);
for (CustomCommandLine cli : customCommandLines) {
LOG.debug(
"Checking custom commandline {}, isActive: {}", cli, cli.isActive(commandLine));
if (cli.isActive(commandLine)) {
return cli;
}
}
throw new IllegalStateException("No valid command-line found.");
} | 3.68 |
hbase_Bytes_putAsShort | /**
* Put an int value as short out to the specified byte array position. Only the lower 2 bytes of
* the short will be put into the array. The caller of the API need to make sure they will not
* loose the value by doing so. This is useful to store an unsigned short which is represented as
* int in other parts.
* @param bytes the byte array
* @param offset position in the array
* @param val value to write out
* @return incremented offset
* @throws IllegalArgumentException if the byte array given doesn't have enough room at the offset
* specified.
*/
public static int putAsShort(byte[] bytes, int offset, int val) {
if (bytes.length - offset < SIZEOF_SHORT) {
throw new IllegalArgumentException("Not enough room to put a short at" + " offset " + offset
+ " in a " + bytes.length + " byte array");
}
bytes[offset + 1] = (byte) val;
val >>= 8;
bytes[offset] = (byte) val;
return offset + SIZEOF_SHORT;
} | 3.68 |
flink_HadoopMapFunction_writeObject | /**
* Custom serialization methods.
*
* @see <a
* href="http://docs.oracle.com/javase/7/docs/api/java/io/Serializable.html">http://docs.oracle.com/javase/7/docs/api/java/io/Serializable.html</a>
*/
private void writeObject(final ObjectOutputStream out) throws IOException {
out.writeObject(mapper.getClass());
jobConf.write(out);
} | 3.68 |
hadoop_YarnWebServiceUtils_getNodeInfoFromRMWebService | /**
* Utility function to get NodeInfo by calling RM WebService.
* @param conf the configuration
* @param nodeId the nodeId
* @return a JSONObject which contains the NodeInfo
* @throws ClientHandlerException if there is an error
* processing the response.
* @throws UniformInterfaceException if the response status
* is 204 (No Content).
*/
public static JSONObject getNodeInfoFromRMWebService(Configuration conf,
String nodeId) throws ClientHandlerException,
UniformInterfaceException {
try {
return WebAppUtils.execOnActiveRM(conf,
YarnWebServiceUtils::getNodeInfoFromRM, nodeId);
} catch (Exception e) {
if (e instanceof ClientHandlerException) {
throw ((ClientHandlerException) e);
} else if (e instanceof UniformInterfaceException) {
throw ((UniformInterfaceException) e);
} else {
throw new RuntimeException(e);
}
}
} | 3.68 |
hadoop_DynamicIOStatisticsBuilder_withAtomicLongGauge | /**
* Add a gauge statistic to dynamically return the
* latest value of the source.
* @param key key of this statistic
* @param source atomic long gauge
* @return the builder.
*/
public DynamicIOStatisticsBuilder withAtomicLongGauge(String key,
AtomicLong source) {
withLongFunctionGauge(key, s -> source.get());
return this;
} | 3.68 |
hadoop_FederationStateStoreFacade_deregisterSubCluster | /**
* Deregister subCluster, Update the subCluster state to
* SC_LOST、SC_DECOMMISSIONED etc.
*
* @param subClusterId subClusterId.
* @param subClusterState The state of the subCluster to be updated.
* @throws YarnException yarn exception.
* @return If Deregister subCluster is successful, return true, otherwise, return false.
*/
public boolean deregisterSubCluster(SubClusterId subClusterId,
SubClusterState subClusterState) throws YarnException {
SubClusterDeregisterRequest deregisterRequest =
SubClusterDeregisterRequest.newInstance(subClusterId, subClusterState);
SubClusterDeregisterResponse response = stateStore.deregisterSubCluster(deregisterRequest);
// If the response is not empty, deregisterSubCluster is successful.
if (response != null) {
return true;
}
return false;
} | 3.68 |
flink_PushWatermarkIntoTableSourceScanRuleBase_getNewScan | /**
* It uses the input watermark expression to generate the {@link WatermarkGeneratorSupplier}.
* After the {@link WatermarkStrategy} is pushed into the scan, it will build a new scan.
* However, when {@link FlinkLogicalWatermarkAssigner} is the parent of the {@link
* FlinkLogicalTableSourceScan} it should modify the rowtime type to keep the type of plan is
* consistent. In other cases, it just keep the data type of the scan as same as before and
* leave the work when rewriting the projection.
*
* <p>NOTES: the row type of the scan is not always as same as the watermark assigner. Because
* the scan will not add the rowtime column into the row when pushing the watermark assigner
* into the scan. In some cases, query may have computed columns defined on rowtime column. If
* modifying the type of the rowtime(with time attribute), it will also influence the type of
* the computed column. Therefore, if the watermark assigner is not the parent of the scan, set
* the type of the scan as before and leave the work to projection.
*/
protected FlinkLogicalTableSourceScan getNewScan(
FlinkLogicalWatermarkAssigner watermarkAssigner,
RexNode watermarkExpr,
FlinkLogicalTableSourceScan scan,
TableConfig tableConfig,
boolean useWatermarkAssignerRowType) {
final TableSourceTable tableSourceTable = scan.getTable().unwrap(TableSourceTable.class);
final DynamicTableSource newDynamicTableSource = tableSourceTable.tableSource().copy();
final boolean isSourceWatermark =
newDynamicTableSource instanceof SupportsSourceWatermark
&& hasSourceWatermarkDeclaration(watermarkExpr);
final RelDataType newType;
if (useWatermarkAssignerRowType) {
// project is trivial and set rowtime type in scan
newType = watermarkAssigner.getRowType();
} else {
// project add/delete columns and set the rowtime column type in project
newType = scan.getRowType();
}
final RowType producedType = (RowType) FlinkTypeFactory.toLogicalType(newType);
final SourceAbilityContext abilityContext = SourceAbilityContext.from(scan);
final SourceAbilitySpec abilitySpec;
if (isSourceWatermark) {
final SourceWatermarkSpec sourceWatermarkSpec =
new SourceWatermarkSpec(true, producedType);
sourceWatermarkSpec.apply(newDynamicTableSource, abilityContext);
abilitySpec = sourceWatermarkSpec;
} else {
final Duration globalIdleTimeout =
tableConfig.get(ExecutionConfigOptions.TABLE_EXEC_SOURCE_IDLE_TIMEOUT);
final long globalIdleTimeoutMillis;
if (!globalIdleTimeout.isZero() && !globalIdleTimeout.isNegative()) {
globalIdleTimeoutMillis = globalIdleTimeout.toMillis();
} else {
globalIdleTimeoutMillis = -1L;
}
Optional<RelHint> optionsHintOptional =
scan.getHints().stream()
.filter(
relHint ->
relHint.hintName.equalsIgnoreCase(
FlinkHints.HINT_NAME_OPTIONS))
.findFirst();
Configuration hintOptions =
optionsHintOptional
.map(relHint -> Configuration.fromMap(relHint.kvOptions))
.orElseGet(Configuration::new);
RelOptTable table = scan.getTable();
Configuration tableOptions =
Optional.of(table)
.filter(TableSourceTable.class::isInstance)
.map(
t -> {
Map<String, String> tableConfigs =
((TableSourceTable) t)
.contextResolvedTable()
.getResolvedTable()
.getOptions();
return Configuration.fromMap(tableConfigs);
})
.orElseGet(Configuration::new);
WatermarkParams watermarkParams = parseWatermarkParams(hintOptions, tableOptions);
final WatermarkPushDownSpec watermarkPushDownSpec =
new WatermarkPushDownSpec(
watermarkExpr, globalIdleTimeoutMillis, producedType, watermarkParams);
watermarkPushDownSpec.apply(newDynamicTableSource, abilityContext);
abilitySpec = watermarkPushDownSpec;
}
TableSourceTable newTableSourceTable =
tableSourceTable.copy(
newDynamicTableSource, newType, new SourceAbilitySpec[] {abilitySpec});
return FlinkLogicalTableSourceScan.create(
scan.getCluster(), scan.getHints(), newTableSourceTable);
} | 3.68 |
hbase_ClientIdGenerator_getIpAddressBytes | /**
* Returns Some IPv4/IPv6 address available on the current machine that is up, not virtual and not
* a loopback address. Empty array if none can be found or error occurred.
*/
public static byte[] getIpAddressBytes() {
try {
return Addressing.getIpAddress().getAddress();
} catch (IOException ex) {
LOG.warn("Failed to get IP address bytes", ex);
}
return new byte[0];
} | 3.68 |
hbase_MobUtils_isMobFileExpired | /**
* Checks if the mob file is expired.
* @param column The descriptor of the current column family.
* @param current The current time.
* @param fileDate The date string parsed from the mob file name.
* @return True if the mob file is expired.
*/
public static boolean isMobFileExpired(ColumnFamilyDescriptor column, long current,
String fileDate) {
if (column.getMinVersions() > 0) {
return false;
}
long timeToLive = column.getTimeToLive();
if (Integer.MAX_VALUE == timeToLive) {
return false;
}
Date expireDate = new Date(current - timeToLive * 1000);
expireDate = new Date(expireDate.getYear(), expireDate.getMonth(), expireDate.getDate());
try {
Date date = parseDate(fileDate);
if (date.getTime() < expireDate.getTime()) {
return true;
}
} catch (ParseException e) {
LOG.warn("Failed to parse the date " + fileDate, e);
return false;
}
return false;
} | 3.68 |
framework_DragSourceExtension_getEffectAllowed | /**
* Returns the allowed effects for the current drag source element. Used to
* set client side {@code DataTransfer.effectAllowed} parameter for the drag
* event.
* <p>
* You can use different types of data to support dragging to different
* targets. Accepted types depend on the drop target and those can be
* platform specific. See
* https://developer.mozilla.org/en-US/docs/Web/API/HTML_Drag_and_Drop_API/Recommended_drag_types
* for examples on different types.
* <p>
* <em>NOTE: IE11 only supports type ' text', which can be set using
* {@link #setDataTransferText(String data)}</em>
*
* @return Effects that are allowed for this draggable element.
*/
public EffectAllowed getEffectAllowed() {
return getState(false).effectAllowed;
} | 3.68 |
flink_ProjectOperator_projectTuple6 | /**
* Projects a {@link Tuple} {@link DataSet} to the previously selected fields.
*
* @return The projected DataSet.
* @see Tuple
* @see DataSet
*/
public <T0, T1, T2, T3, T4, T5>
ProjectOperator<T, Tuple6<T0, T1, T2, T3, T4, T5>> projectTuple6() {
TypeInformation<?>[] fTypes = extractFieldTypes(fieldIndexes, ds.getType());
TupleTypeInfo<Tuple6<T0, T1, T2, T3, T4, T5>> tType =
new TupleTypeInfo<Tuple6<T0, T1, T2, T3, T4, T5>>(fTypes);
return new ProjectOperator<T, Tuple6<T0, T1, T2, T3, T4, T5>>(
this.ds, this.fieldIndexes, tType);
} | 3.68 |
flink_DefaultVertexParallelismAndInputInfosDecider_decideParallelismAndEvenlyDistributeSubpartitions | /**
* Decide parallelism and input infos, which will make the subpartitions be evenly distributed
* to downstream subtasks, such that different downstream subtasks consume roughly the same
* number of subpartitions.
*
* @param jobVertexId The job vertex id
* @param consumedResults The information of consumed blocking results
* @param initialParallelism The initial parallelism of the job vertex
* @param minParallelism the min parallelism
* @param maxParallelism the max parallelism
* @return the parallelism and vertex input infos
*/
private ParallelismAndInputInfos decideParallelismAndEvenlyDistributeSubpartitions(
JobVertexID jobVertexId,
List<BlockingResultInfo> consumedResults,
int initialParallelism,
int minParallelism,
int maxParallelism) {
checkArgument(!consumedResults.isEmpty());
int parallelism =
initialParallelism > 0
? initialParallelism
: decideParallelism(
jobVertexId, consumedResults, minParallelism, maxParallelism);
return new ParallelismAndInputInfos(
parallelism,
VertexInputInfoComputationUtils.computeVertexInputInfos(
parallelism, consumedResults, true));
} | 3.68 |
flink_MultipleParameterTool_getNumberOfParameters | /** Returns number of parameters in {@link ParameterTool}. */
@Override
public int getNumberOfParameters() {
return data.size();
} | 3.68 |
framework_LoadingIndicatorConfiguration_setFirstDelay | /*
* (non-Javadoc)
*
* @see com.vaadin.ui.LoadingIndicator#setFirstDelay(int)
*/
@Override
public void setFirstDelay(int firstDelay) {
getState().firstDelay = firstDelay;
} | 3.68 |
hbase_ZNodeClearer_readMyEphemeralNodeOnDisk | /**
* read the content of znode file, expects a single line.
*/
public static String readMyEphemeralNodeOnDisk() throws IOException {
String fileName = getMyEphemeralNodeFileName();
if (fileName == null) {
throw new FileNotFoundException("No filename; set environment variable HBASE_ZNODE_FILE");
}
FileReader znodeFile = new FileReader(fileName);
BufferedReader br = null;
try {
br = new BufferedReader(znodeFile);
String file_content = br.readLine();
return file_content;
} finally {
if (br != null) br.close();
}
} | 3.68 |
flink_CsvReaderFormat_withIgnoreParseErrors | /**
* Returns a new {@code CsvReaderFormat} configured to ignore all parsing errors. All the other
* options directly carried over from the subject of the method call.
*/
public CsvReaderFormat<T> withIgnoreParseErrors() {
return new CsvReaderFormat<>(
this.mapperFactory,
this.schemaGenerator,
this.rootType,
this.converter,
this.typeInformation,
true);
} | 3.68 |
pulsar_SSLContextValidatorEngine_validate | /**
* Validates TLS handshake up to TLSv1.2.
* TLSv1.3 has a differences in TLS handshake as described in https://stackoverflow.com/a/62465859
*/
public static void validate(SSLEngineProvider clientSslEngineSupplier, SSLEngineProvider serverSslEngineSupplier)
throws SSLException {
SSLContextValidatorEngine clientEngine = new SSLContextValidatorEngine(clientSslEngineSupplier);
if (Arrays.stream(clientEngine.sslEngine.getEnabledProtocols()).anyMatch(s -> s.equals("TLSv1.3"))) {
throw new IllegalStateException("This validator doesn't support TLSv1.3");
}
SSLContextValidatorEngine serverEngine = new SSLContextValidatorEngine(serverSslEngineSupplier);
try {
clientEngine.beginHandshake();
serverEngine.beginHandshake();
while (!serverEngine.complete() || !clientEngine.complete()) {
clientEngine.handshake(serverEngine);
serverEngine.handshake(clientEngine);
}
} finally {
clientEngine.close();
serverEngine.close();
}
} | 3.68 |
framework_DefaultSQLGenerator_generateLimits | /**
* Generates the LIMIT and OFFSET clause.
*
* @param sb
* StringBuffer to which the clause is appended.
* @param offset
* Value for offset.
* @param pagelength
* Value for pagelength.
* @return StringBuffer with LIMIT and OFFSET clause added.
*/
protected StringBuffer generateLimits(StringBuffer sb, int offset,
int pagelength) {
sb.append(" LIMIT ").append(pagelength).append(" OFFSET ")
.append(offset);
return sb;
} | 3.68 |
flink_WindowedStream_evictor | /**
* Sets the {@code Evictor} that should be used to evict elements from a window before emission.
*
* <p>Note: When using an evictor window performance will degrade significantly, since
* incremental aggregation of window results cannot be used.
*/
@PublicEvolving
public WindowedStream<T, K, W> evictor(Evictor<? super T, ? super W> evictor) {
builder.evictor(evictor);
return this;
} | 3.68 |
shardingsphere-elasticjob_ShardingNode_getRunningNode | /**
* Get job running node.
*
* @param item sharding item
* @return job running node
*/
public static String getRunningNode(final int item) {
return String.format(RUNNING, item);
} | 3.68 |
flink_FlinkDatabaseMetaData_supportsMixedCaseQuotedIdentifiers | /** Flink sql is mixed case as sensitive. */
@Override
public boolean supportsMixedCaseQuotedIdentifiers() throws SQLException {
return true;
} | 3.68 |
zxing_OneDReader_recordPattern | /**
* Records the size of successive runs of white and black pixels in a row, starting at a given point.
* The values are recorded in the given array, and the number of runs recorded is equal to the size
* of the array. If the row starts on a white pixel at the given start point, then the first count
* recorded is the run of white pixels starting from that point; likewise it is the count of a run
* of black pixels if the row begin on a black pixels at that point.
*
* @param row row to count from
* @param start offset into row to start at
* @param counters array into which to record counts
* @throws NotFoundException if counters cannot be filled entirely from row before running out
* of pixels
*/
protected static void recordPattern(BitArray row,
int start,
int[] counters) throws NotFoundException {
int numCounters = counters.length;
Arrays.fill(counters, 0, numCounters, 0);
int end = row.getSize();
if (start >= end) {
throw NotFoundException.getNotFoundInstance();
}
boolean isWhite = !row.get(start);
int counterPosition = 0;
int i = start;
while (i < end) {
if (row.get(i) != isWhite) {
counters[counterPosition]++;
} else {
if (++counterPosition == numCounters) {
break;
} else {
counters[counterPosition] = 1;
isWhite = !isWhite;
}
}
i++;
}
// If we read fully the last section of pixels and filled up our counters -- or filled
// the last counter but ran off the side of the image, OK. Otherwise, a problem.
if (!(counterPosition == numCounters || (counterPosition == numCounters - 1 && i == end))) {
throw NotFoundException.getNotFoundInstance();
}
} | 3.68 |
flink_Tuple16_setFields | /**
* Sets new values to all fields of the tuple.
*
* @param f0 The value for field 0
* @param f1 The value for field 1
* @param f2 The value for field 2
* @param f3 The value for field 3
* @param f4 The value for field 4
* @param f5 The value for field 5
* @param f6 The value for field 6
* @param f7 The value for field 7
* @param f8 The value for field 8
* @param f9 The value for field 9
* @param f10 The value for field 10
* @param f11 The value for field 11
* @param f12 The value for field 12
* @param f13 The value for field 13
* @param f14 The value for field 14
* @param f15 The value for field 15
*/
public void setFields(
T0 f0,
T1 f1,
T2 f2,
T3 f3,
T4 f4,
T5 f5,
T6 f6,
T7 f7,
T8 f8,
T9 f9,
T10 f10,
T11 f11,
T12 f12,
T13 f13,
T14 f14,
T15 f15) {
this.f0 = f0;
this.f1 = f1;
this.f2 = f2;
this.f3 = f3;
this.f4 = f4;
this.f5 = f5;
this.f6 = f6;
this.f7 = f7;
this.f8 = f8;
this.f9 = f9;
this.f10 = f10;
this.f11 = f11;
this.f12 = f12;
this.f13 = f13;
this.f14 = f14;
this.f15 = f15;
} | 3.68 |
zxing_Detector_sampleGrid | /**
* Creates a BitMatrix by sampling the provided image.
* topLeft, topRight, bottomRight, and bottomLeft are the centers of the squares on the
* diagonal just outside the bull's eye.
*/
private BitMatrix sampleGrid(BitMatrix image,
ResultPoint topLeft,
ResultPoint topRight,
ResultPoint bottomRight,
ResultPoint bottomLeft) throws NotFoundException {
GridSampler sampler = GridSampler.getInstance();
int dimension = getDimension();
float low = dimension / 2.0f - nbCenterLayers;
float high = dimension / 2.0f + nbCenterLayers;
return sampler.sampleGrid(image,
dimension,
dimension,
low, low, // topleft
high, low, // topright
high, high, // bottomright
low, high, // bottomleft
topLeft.getX(), topLeft.getY(),
topRight.getX(), topRight.getY(),
bottomRight.getX(), bottomRight.getY(),
bottomLeft.getX(), bottomLeft.getY());
} | 3.68 |
hmily_HmilyRepositoryNode_getHmilyParticipantRootPath | /**
* Get hmily participant root path.
*
* @return hmily participant root path
*/
public String getHmilyParticipantRootPath() {
return Joiner.on("/").join("", ROOT_PATH_PREFIX, appName, HMILY_TRANSACTION_PARTICIPANT);
} | 3.68 |
morf_SqlDialect_getSqlForOrderByFieldNullValueHandling | /**
* Get the SQL expression for NULL values handling.
* @param orderByField The order by clause
* @return The resulting SQL String
*
*/
protected String getSqlForOrderByFieldNullValueHandling(FieldReference orderByField) {
if (orderByField.getNullValueHandling().isPresent()) {
switch (orderByField.getNullValueHandling().get()) {
case FIRST:
return " NULLS FIRST";
case LAST:
return " NULLS LAST";
case NONE:
default:
return "";
}
} else {
return " " + defaultNullOrder();
}
} | 3.68 |
hudi_SourceFormatAdapter_fetchNewDataInRowFormat | /**
* Fetch new data in row format. If the source provides data in different format, they are translated to Row format
*/
public InputBatch<Dataset<Row>> fetchNewDataInRowFormat(Option<String> lastCkptStr, long sourceLimit) {
switch (source.getSourceType()) {
case ROW:
//we do the sanitizing here if enabled
InputBatch<Dataset<Row>> datasetInputBatch = ((Source<Dataset<Row>>) source).fetchNext(lastCkptStr, sourceLimit);
return new InputBatch<>(processErrorEvents(datasetInputBatch.getBatch(),
ErrorEvent.ErrorReason.JSON_ROW_DESERIALIZATION_FAILURE),
datasetInputBatch.getCheckpointForNextBatch(), datasetInputBatch.getSchemaProvider());
case AVRO: {
//don't need to sanitize because it's already avro
InputBatch<JavaRDD<GenericRecord>> r = ((Source<JavaRDD<GenericRecord>>) source).fetchNext(lastCkptStr, sourceLimit);
return avroDataInRowFormat(r);
}
case JSON: {
if (isFieldNameSanitizingEnabled()) {
//leverage the json -> avro sanitizing. TODO([HUDI-5829]) Optimize by sanitizing during direct conversion
InputBatch<JavaRDD<GenericRecord>> r = fetchNewDataInAvroFormat(lastCkptStr, sourceLimit);
return avroDataInRowFormat(r);
}
InputBatch<JavaRDD<String>> r = ((Source<JavaRDD<String>>) source).fetchNext(lastCkptStr, sourceLimit);
Schema sourceSchema = r.getSchemaProvider().getSourceSchema();
if (errorTableWriter.isPresent()) {
// if error table writer is enabled, during spark read `columnNameOfCorruptRecord` option is configured.
// Any records which spark is unable to read successfully are transferred to the column
// configured via this option. The column is then used to trigger error events.
StructType dataType = AvroConversionUtils.convertAvroSchemaToStructType(sourceSchema)
.add(new StructField(ERROR_TABLE_CURRUPT_RECORD_COL_NAME, DataTypes.StringType, true, Metadata.empty()));
StructType nullableStruct = dataType.asNullable();
Option<Dataset<Row>> dataset = r.getBatch().map(rdd -> source.getSparkSession().read()
.option("columnNameOfCorruptRecord", ERROR_TABLE_CURRUPT_RECORD_COL_NAME)
.schema(nullableStruct)
.option("mode", "PERMISSIVE")
.json(rdd));
Option<Dataset<Row>> eventsDataset = processErrorEvents(dataset,
ErrorEvent.ErrorReason.JSON_ROW_DESERIALIZATION_FAILURE);
return new InputBatch<>(
eventsDataset,
r.getCheckpointForNextBatch(), r.getSchemaProvider());
} else {
StructType dataType = AvroConversionUtils.convertAvroSchemaToStructType(sourceSchema);
return new InputBatch<>(
Option.ofNullable(
r.getBatch().map(rdd -> source.getSparkSession().read().schema(dataType).json(rdd)).orElse(null)),
r.getCheckpointForNextBatch(), r.getSchemaProvider());
}
}
case PROTO: {
//TODO([HUDI-5830]) implement field name sanitization
InputBatch<JavaRDD<Message>> r = ((Source<JavaRDD<Message>>) source).fetchNext(lastCkptStr, sourceLimit);
Schema sourceSchema = r.getSchemaProvider().getSourceSchema();
AvroConvertor convertor = new AvroConvertor(r.getSchemaProvider().getSourceSchema());
return new InputBatch<>(
Option
.ofNullable(
r.getBatch()
.map(rdd -> rdd.map(convertor::fromProtoMessage))
.map(rdd -> AvroConversionUtils.createDataFrame(JavaRDD.toRDD(rdd), sourceSchema.toString(),
source.getSparkSession())
)
.orElse(null)),
r.getCheckpointForNextBatch(), r.getSchemaProvider());
}
default:
throw new IllegalArgumentException("Unknown source type (" + source.getSourceType() + ")");
}
} | 3.68 |
hadoop_StartupProgress_setCount | /**
* Sets counter to the specified value.
*
* @param phase Phase to set
* @param step Step to set
* @param count long to set
*/
public void setCount(Phase phase, Step step, long count) {
lazyInitStep(phase, step).count.set(count);
} | 3.68 |
hmily_HmilyRepositoryFacade_findHmilyParticipant | /**
* Find hmily participant list.
*
* @param participantId the participant id
* @return the list
*/
public List<HmilyParticipant> findHmilyParticipant(final Long participantId) {
return hmilyRepository.findHmilyParticipant(participantId);
} | 3.68 |
hadoop_DiskBalancerWorkItem_toJson | /**
* returns a serialized json string.
*
* @return String - json
* @throws IOException
*/
public String toJson() throws IOException {
return MAPPER.writeValueAsString(this);
} | 3.68 |
Activiti_ExecutionTree_leafsFirstIterator | /**
* Uses an {@link ExecutionTreeBfsIterator}, but returns the leafs first (so flipped order of BFS)
*/
public ExecutionTreeBfsIterator leafsFirstIterator() {
return new ExecutionTreeBfsIterator(this.getRoot(), true);
} | 3.68 |
zxing_GenericGFPoly_evaluateAt | /**
* @return evaluation of this polynomial at a given point
*/
int evaluateAt(int a) {
if (a == 0) {
// Just return the x^0 coefficient
return getCoefficient(0);
}
if (a == 1) {
// Just the sum of the coefficients
int result = 0;
for (int coefficient : coefficients) {
result = GenericGF.addOrSubtract(result, coefficient);
}
return result;
}
int result = coefficients[0];
int size = coefficients.length;
for (int i = 1; i < size; i++) {
result = GenericGF.addOrSubtract(field.multiply(a, result), coefficients[i]);
}
return result;
} | 3.68 |
flink_HeaderlessChannelWriterOutputView_close | /**
* Closes this OutputView, closing the underlying writer. And return number bytes in last memory
* segment.
*/
@Override
public int close() throws IOException {
if (!writer.isClosed()) {
int currentPositionInSegment = getCurrentPositionInSegment();
// write last segment
writer.writeBlock(getCurrentSegment());
clear();
writer.getReturnQueue().clear();
this.writer.close();
return currentPositionInSegment;
}
return -1;
} | 3.68 |
flink_ModuleManager_listFullModules | /**
* Get all loaded modules with use status. Modules in use status are returned in resolution
* order.
*
* @return a list of module entries with module name and use status
*/
public List<ModuleEntry> listFullModules() {
// keep the order for used modules
List<ModuleEntry> moduleEntries =
usedModules.stream()
.map(name -> new ModuleEntry(name, true))
.collect(Collectors.toList());
loadedModules.keySet().stream()
.filter(name -> !usedModules.contains(name))
.forEach(name -> moduleEntries.add(new ModuleEntry(name, false)));
return moduleEntries;
} | 3.68 |
framework_AbstractSplitPanel_setSplitPosition | /**
* Moves the position of the splitter with given position and unit.
*
* @param pos
* the new size of the first region. Fractions are only allowed
* when unit is percentage.
* @param unit
* the unit (from {@link Sizeable}) in which the size is given.
* @param reverse
* if set to true the split splitter position is measured by the
* second region else it is measured by the first region
*
*/
public void setSplitPosition(float pos, Unit unit, boolean reverse) {
if (unit != Unit.PERCENTAGE && unit != Unit.PIXELS) {
throw new IllegalArgumentException(
"Only percentage and pixel units are allowed");
}
if (unit != Unit.PERCENTAGE) {
pos = Math.round(pos);
}
float oldPosition = getSplitPosition();
Unit oldUnit = getSplitPositionUnit();
SplitterState splitterState = getSplitterState();
splitterState.position = pos;
splitterState.positionUnit = unit.getSymbol();
splitterState.positionReversed = reverse;
posUnit = unit;
fireEvent(new SplitPositionChangeEvent(AbstractSplitPanel.this, false,
oldPosition, oldUnit, pos, posUnit));
} | 3.68 |
flink_SharedResourceHolder_getInternal | /**
* Visible to unit tests.
*
* @see #get(Resource)
*/
@SuppressWarnings("unchecked")
synchronized <T> T getInternal(Resource<T> resource) {
Instance instance = instances.get(resource);
if (instance == null) {
instance = new Instance(resource.create());
instances.put(resource, instance);
}
if (instance.destroyTask != null) {
instance.destroyTask.cancel(false);
instance.destroyTask = null;
}
instance.refcount++;
return (T) instance.payload;
} | 3.68 |
hadoop_LoadManifestsStage_getIOStatistics | /**
* Get the IOStatistics.
* @return aggregate IOStatistics
*/
@Override
public IOStatisticsSnapshot getIOStatistics() {
return iostatistics;
} | 3.68 |
hadoop_HeaderProcessing_extractXAttrLongValue | /**
* Convert an XAttr byte array to a long.
* testability.
* @param data data to parse
* @return either a length or none
*/
public static Optional<Long> extractXAttrLongValue(byte[] data) {
String xAttr;
xAttr = HeaderProcessing.decodeBytes(data);
if (StringUtils.isNotEmpty(xAttr)) {
try {
long l = Long.parseLong(xAttr);
if (l >= 0) {
return Optional.of(l);
}
} catch (NumberFormatException ex) {
LOG.warn("Not a number: {}", xAttr, ex);
}
}
// missing/empty header or parse failure.
return Optional.empty();
} | 3.68 |
hadoop_MoveStep_setBandwidth | /**
* Sets the maximum disk bandwidth per sec to use for this step.
* @param bandwidth - Long, MB / Sec of data to be moved between
* source and destinatin volume.
*/
@Override
public void setBandwidth(long bandwidth) {
this.bandwidth = bandwidth;
} | 3.68 |
framework_ShortcutActionHandler_shakeTarget | /**
* We try to fire value change in the component the key combination was
* typed. E.g. TextField may contain newly typed text that is expected to be
* sent to server before the shortcut action is triggered. This is done by
* removing focus and then returning it immediately back to target element.
* <p>
* This is a hack copied over from V7 in order to keep the compatibility
* classes working. Main V8 classes don't require shaking.
*/
private static void shakeTarget(final Element e) {
blur(e);
focus(e);
} | 3.68 |
framework_AbstractDateField_setParseErrorMessage | /**
* Sets the default error message used if the DateField cannot parse the
* text input by user to a Date field. Note that if the
* {@link #handleUnparsableDateString(String)} method is overridden, the
* localized message from its exception is used.
*
* @param parsingErrorMessage
* the default parsing error message
*
* @see #getParseErrorMessage()
* @see #handleUnparsableDateString(String)
*/
public void setParseErrorMessage(String parsingErrorMessage) {
defaultParseErrorMessage = parsingErrorMessage;
} | 3.68 |
flink_FlinkPreparingTableBase_getStatistic | /** Returns the statistic of this table. */
public FlinkStatistic getStatistic() {
return this.statistic;
} | 3.68 |
dubbo_PathMatcher_httpMethodMatch | /**
* it is needed to compare http method when one of needCompareHttpMethod is true,and don`t compare when both needCompareHttpMethod are false
*
* @param that
* @return
*/
private boolean httpMethodMatch(PathMatcher that) {
return !that.needCompareHttpMethod || !this.needCompareHttpMethod
? true
: Objects.equals(this.httpMethod, that.httpMethod);
} | 3.68 |
hudi_HoodieAvroDataBlock_getBlock | /**
* This method is retained to provide backwards compatibility to HoodieArchivedLogs which were written using
* HoodieLogFormat V1.
*/
@Deprecated
public static HoodieAvroDataBlock getBlock(byte[] content, Schema readerSchema, InternalSchema internalSchema) throws IOException {
SizeAwareDataInputStream dis = new SizeAwareDataInputStream(new DataInputStream(new ByteArrayInputStream(content)));
// 1. Read the schema written out
int schemaLength = dis.readInt();
byte[] compressedSchema = new byte[schemaLength];
dis.readFully(compressedSchema, 0, schemaLength);
Schema writerSchema = new Schema.Parser().parse(decompress(compressedSchema));
if (readerSchema == null) {
readerSchema = writerSchema;
}
if (!internalSchema.isEmptySchema()) {
readerSchema = writerSchema;
}
GenericDatumReader<IndexedRecord> reader = new GenericDatumReader<>(writerSchema, readerSchema);
// 2. Get the total records
int totalRecords = dis.readInt();
List<HoodieRecord> records = new ArrayList<>(totalRecords);
// 3. Read the content
for (int i = 0; i < totalRecords; i++) {
int recordLength = dis.readInt();
Decoder decoder = DecoderFactory.get().binaryDecoder(content, dis.getNumberOfBytesRead(), recordLength, null);
IndexedRecord record = reader.read(null, decoder);
records.add(new HoodieAvroIndexedRecord(record));
dis.skipBytes(recordLength);
}
dis.close();
return new HoodieAvroDataBlock(records, readerSchema);
} | 3.68 |
hbase_HBaseTestingUtility_createTableDescriptor | /**
* Create a table of name <code>name</code>.
* @param name Name to give table.
* @return Column descriptor.
*/
public TableDescriptor createTableDescriptor(final TableName name) {
return createTableDescriptor(name, ColumnFamilyDescriptorBuilder.DEFAULT_MIN_VERSIONS,
MAXVERSIONS, HConstants.FOREVER, ColumnFamilyDescriptorBuilder.DEFAULT_KEEP_DELETED);
} | 3.68 |
dubbo_DubboCertManager_generateCsr | /**
* Generate CSR (Certificate Sign Request)
*
* @param keyPair key pair to request
* @return csr
* @throws IOException ioException
*/
private String generateCsr(KeyPair keyPair) throws IOException {
PKCS10CertificationRequest request = new JcaPKCS10CertificationRequestBuilder(
new X500Name("O=" + "cluster.domain"), keyPair.getPublicKey())
.build(keyPair.getSigner());
String csr = generatePemKey("CERTIFICATE REQUEST", request.getEncoded());
if (logger.isDebugEnabled()) {
logger.debug("CSR Request to Dubbo Certificate Authorization. \n" + csr);
}
return csr;
} | 3.68 |
hbase_HFileArchiveUtil_getStoreArchivePathForRootDir | /**
* Gets the archive directory under specified root dir. One scenario where this is useful is when
* WAL and root dir are configured under different file systems, i.e. root dir on S3 and WALs on
* HDFS. This is mostly useful for archiving recovered edits, when
* <b>hbase.region.archive.recovered.edits</b> is enabled.
* @param rootDir {@link Path} the root dir under which archive path should be created.
* @param region parent region information under which the store currently lives
* @param family name of the family in the store
* @return {@link Path} to the WAL FS directory to archive the given store or <tt>null</tt> if it
* should not be archived
*/
public static Path getStoreArchivePathForRootDir(Path rootDir, RegionInfo region, byte[] family) {
Path tableArchiveDir = getTableArchivePath(rootDir, region.getTable());
return HRegionFileSystem.getStoreHomedir(tableArchiveDir, region, family);
} | 3.68 |
hudi_ParquetUtils_readSchema | /**
* Get the schema of the given parquet file.
*/
public MessageType readSchema(Configuration configuration, Path parquetFilePath) {
return readMetadata(configuration, parquetFilePath).getFileMetaData().getSchema();
} | 3.68 |
hadoop_FederationStateStoreUtils_logAndThrowInvalidInputException | /**
* Throws an <code>FederationStateStoreInvalidInputException</code> due to an
* error in <code>FederationStateStore</code>.
*
* @param log the logger interface
* @param errMsg the error message
* @throws YarnException on failure
*/
public static void logAndThrowInvalidInputException(Logger log, String errMsg)
throws YarnException {
log.error(errMsg);
throw new FederationStateStoreInvalidInputException(errMsg);
} | 3.68 |
Subsets and Splits
No saved queries yet
Save your SQL queries to embed, download, and access them later. Queries will appear here once saved.