name
stringlengths 12
178
| code_snippet
stringlengths 8
36.5k
| score
float64 3.26
3.68
|
---|---|---|
morf_AbstractSqlDialectTest_expectedPower | /**
* @return The expected SQL for the POWER function
*/
private Object expectedPower() {
return "SELECT POWER(floatField, intField) FROM " + tableName(TEST_TABLE);
} | 3.68 |
flink_RowDataVectorizer_convert | /**
* Converting ArrayData to RowData for calling {@link RowDataVectorizer#setColumn(int,
* ColumnVector, LogicalType, RowData, int)} recursively with array.
*
* @param arrayData input ArrayData.
* @param arrayFieldType LogicalType of input ArrayData.
* @return RowData.
*/
private static RowData convert(ArrayData arrayData, LogicalType arrayFieldType) {
GenericRowData rowData = new GenericRowData(arrayData.size());
ArrayData.ElementGetter elementGetter = ArrayData.createElementGetter(arrayFieldType);
for (int i = 0; i < arrayData.size(); i++) {
rowData.setField(i, elementGetter.getElementOrNull(arrayData, i));
}
return rowData;
} | 3.68 |
starts_Writer_getJarToChecksumMapping | /**
* Compute the checksum for the given map and return the jar
* and the checksum as a string.
*
* @param jar The jar whose checksum we need to compute.
*/
public static Pair<String, String> getJarToChecksumMapping(String jar) {
Pair<String, String> pair = new Pair<>(jar, "-1");
byte[] bytes;
int bufSize = 65536 * 2;
try {
MessageDigest md = MessageDigest.getInstance("MD5");
InputStream is = Files.newInputStream(Paths.get(jar));
bytes = new byte[bufSize];
int size = is.read(bytes, 0, bufSize);
while (size >= 0) {
md.update(bytes, 0, size);
size = is.read(bytes, 0, bufSize);
}
pair.setValue(Hex.encodeHexString(md.digest()));
} catch (IOException ioe) {
ioe.printStackTrace();
} catch (NoSuchAlgorithmException nsae) {
nsae.printStackTrace();
}
return pair;
} | 3.68 |
hadoop_DeletionService_isTerminated | /**
* Determine if the service has completely stopped.
* Used only by unit tests
* @return true if service has completely stopped
*/
@Private
public boolean isTerminated() {
return getServiceState() == STATE.STOPPED && sched.isTerminated();
} | 3.68 |
hadoop_AWSRequestAnalyzer_analyze | /**
* Given an AWS request, try to analyze it to operation,
* read/write and path.
* @param request request.
* @return information about the request.
*/
public RequestInfo analyze(SdkRequest request) {
// this is where Scala's case statement would massively
// simplify life.
// Please Keep in Alphabetical Order.
if (request instanceof AbortMultipartUploadRequest) {
return writing(MULTIPART_UPLOAD_ABORTED,
((AbortMultipartUploadRequest) request).key(),
0);
} else if (request instanceof CompleteMultipartUploadRequest) {
CompleteMultipartUploadRequest r
= (CompleteMultipartUploadRequest) request;
return writing(MULTIPART_UPLOAD_COMPLETED,
r.key(),
r.multipartUpload().parts().size());
} else if (request instanceof CreateMultipartUploadRequest) {
return writing(MULTIPART_UPLOAD_STARTED,
((CreateMultipartUploadRequest) request).key(),
0);
} else if (request instanceof DeleteObjectRequest) {
// DeleteObject: single object
return writing(OBJECT_DELETE_REQUEST,
((DeleteObjectRequest) request).key(),
1);
} else if (request instanceof DeleteObjectsRequest) {
// DeleteObjects: bulk delete
// use first key as the path
DeleteObjectsRequest r = (DeleteObjectsRequest) request;
List<ObjectIdentifier> objectIdentifiers
= r.delete().objects();
return writing(OBJECT_BULK_DELETE_REQUEST,
objectIdentifiers.isEmpty() ? null : objectIdentifiers.get(0).key(),
objectIdentifiers.size());
} else if (request instanceof GetBucketLocationRequest) {
GetBucketLocationRequest r = (GetBucketLocationRequest) request;
return reading(STORE_EXISTS_PROBE,
r.bucket(),
0);
} else if (request instanceof GetObjectRequest) {
GetObjectRequest r = (GetObjectRequest) request;
return reading(ACTION_HTTP_GET_REQUEST,
r.key(),
sizeFromRangeHeader(r.range()));
} else if (request instanceof HeadObjectRequest) {
return reading(ACTION_HTTP_HEAD_REQUEST,
((HeadObjectRequest) request).key(), 0);
} else if (request instanceof ListMultipartUploadsRequest) {
ListMultipartUploadsRequest r
= (ListMultipartUploadsRequest) request;
return reading(MULTIPART_UPLOAD_LIST,
r.prefix(),
r.maxUploads());
} else if (request instanceof ListObjectsRequest) {
ListObjectsRequest r = (ListObjectsRequest) request;
return reading(OBJECT_LIST_REQUEST,
r.prefix(),
r.maxKeys());
} else if (request instanceof ListObjectsV2Request) {
ListObjectsV2Request r = (ListObjectsV2Request) request;
return reading(OBJECT_LIST_REQUEST,
r.prefix(),
r.maxKeys());
} else if (request instanceof PutObjectRequest) {
PutObjectRequest r = (PutObjectRequest) request;
return writing(OBJECT_PUT_REQUEST,
r.key(),
0);
} else if (request instanceof SelectObjectContentRequest) {
SelectObjectContentRequest r =
(SelectObjectContentRequest) request;
return reading(OBJECT_SELECT_REQUESTS,
r.key(),
1);
} else if (request instanceof UploadPartRequest) {
UploadPartRequest r = (UploadPartRequest) request;
return writing(MULTIPART_UPLOAD_PART_PUT,
r.key(),
r.contentLength());
}
// no explicit support, return classname
return writing(request.getClass().getName(), null, 0);
} | 3.68 |
flink_ExceptionUtils_rethrowIOException | /**
* Re-throws the given {@code Throwable} in scenarios where the signatures allows only
* IOExceptions (and RuntimeException and Error).
*
* <p>Throws this exception directly, if it is an IOException, a RuntimeException, or an Error.
* Otherwise it wraps it in an IOException and throws it.
*
* @param t The Throwable to be thrown.
*/
public static void rethrowIOException(Throwable t) throws IOException {
if (t instanceof IOException) {
throw (IOException) t;
} else if (t instanceof RuntimeException) {
throw (RuntimeException) t;
} else if (t instanceof Error) {
throw (Error) t;
} else {
throw new IOException(t.getMessage(), t);
}
} | 3.68 |
framework_VAbstractCalendarPanel_getCloseKey | /**
* Returns the key that closes the popup window if this is a VPopopCalendar.
* Else this does nothing. By default this is the Escape key but you can
* change the key to whatever you want by overriding this method.
*
* @return the closing key
*/
protected int getCloseKey() {
return KeyCodes.KEY_ESCAPE;
} | 3.68 |
morf_MySqlDialect_getSqlForYYYYMMDDToDate | /**
* @see org.alfasoftware.morf.jdbc.SqlDialect#getSqlForYYYYMMDDToDate(org.alfasoftware.morf.sql.element.Function)
*/
@Override
protected String getSqlForYYYYMMDDToDate(Function function) {
return "DATE(" + getSqlFrom(function.getArguments().get(0)) + ")";
} | 3.68 |
hadoop_JsonSerDeser_fromInstance | /**
* clone by converting to JSON and back again.
* This is much less efficient than any Java clone process.
* @param instance instance to duplicate
* @return a new instance
* @throws IOException problems.
*/
public T fromInstance(T instance) throws IOException {
return fromJson(toJson(instance));
} | 3.68 |
hbase_ReusableStreamGzipCodec_writeTrailer | /** re-implement because the relative method in jdk is invisible */
private void writeTrailer(byte[] paramArrayOfByte, int paramInt) throws IOException {
writeInt((int) this.crc.getValue(), paramArrayOfByte, paramInt);
writeInt(this.def.getTotalIn(), paramArrayOfByte, paramInt + 4);
} | 3.68 |
flink_FileCatalogStore_open | /**
* Opens the catalog store and initializes the catalog file map.
*
* @throws CatalogException if the catalog store directory does not exist, not a directory, or
* if there is an error reading the directory
*/
@Override
public void open() throws CatalogException {
try {
FileSystem fs = catalogStorePath.getFileSystem();
if (!fs.exists(catalogStorePath)) {
fs.mkdirs(catalogStorePath);
}
if (!fs.getFileStatus(catalogStorePath).isDir()) {
throw new CatalogException(
String.format(
"Failed to open catalog store. The given catalog store path %s is not a directory.",
catalogStorePath));
}
} catch (CatalogException e) {
throw e;
} catch (Exception e) {
throw new CatalogException(
String.format(
"Failed to open file catalog store directory %s.", catalogStorePath),
e);
}
super.open();
} | 3.68 |
flink_Execution_triggerCheckpoint | /**
* Trigger a new checkpoint on the task of this execution.
*
* @param checkpointId of th checkpoint to trigger
* @param timestamp of the checkpoint to trigger
* @param checkpointOptions of the checkpoint to trigger
* @return Future acknowledge which is returned once the checkpoint has been triggered
*/
public CompletableFuture<Acknowledge> triggerCheckpoint(
long checkpointId, long timestamp, CheckpointOptions checkpointOptions) {
return triggerCheckpointHelper(checkpointId, timestamp, checkpointOptions);
} | 3.68 |
hadoop_InstantiationIOException_unavailable | /**
* Class is unavailable for some reason, probably a missing dependency.
* @param uri URI of filesystem
* @param classname classname.
* @param key configuration key
* @param text text to include
* @return an exception.
*/
public static InstantiationIOException unavailable(
@Nullable URI uri,
@Nullable String classname,
@Nullable String key,
String text) {
return new InstantiationIOException(Kind.Unavailable,
uri, classname, key, text, null);
} | 3.68 |
hadoop_MDCFilter_init | /**
* Initializes the filter.
* <p>
* This implementation is a NOP.
*
* @param config filter configuration.
*
* @throws ServletException thrown if the filter could not be initialized.
*/
@Override
public void init(FilterConfig config) throws ServletException {
} | 3.68 |
framework_SizeWithUnit_getUnit | /**
* Returns the unit stored in this object.
*
* @return the unit of this (value, unit) pair
*/
public Unit getUnit() {
return unit;
} | 3.68 |
hadoop_DomainRowKey_getRowKeyAsString | /**
* Constructs a row key for the domain table as follows:
* <p>
* {@code clusterId!domainId}.
* </p>
* @return String representation of row key.
*/
public String getRowKeyAsString() {
return domainIdKeyConverter.encodeAsString(this);
} | 3.68 |
flink_CheckpointStatsSnapshot_getSummaryStats | /**
* Returns the snapshotted completed checkpoint summary stats.
*
* @return Snapshotted completed checkpoint summary stats.
*/
public CompletedCheckpointStatsSummarySnapshot getSummaryStats() {
return summary;
} | 3.68 |
flink_AbstractParameterTool_getByte | /**
* Returns the Byte value for the given key. If the key does not exists it will return the
* default value given. The method fails if the value is not a Byte.
*/
public byte getByte(String key, byte defaultValue) {
addToDefaults(key, Byte.toString(defaultValue));
String value = get(key);
if (value == null) {
return defaultValue;
} else {
return Byte.valueOf(value);
}
} | 3.68 |
rocketmq-connect_MemoryClusterManagementServiceImpl_configure | /**
* Configure class with the given key-value pairs
*
* @param config can be DistributedConfig or StandaloneConfig
*/
@Override
public void configure(WorkerConfig config) {
this.config = (StandaloneConfig) config;
} | 3.68 |
hbase_MasterRpcServices_getTableNames | /**
* Get list of userspace table names
* @param controller Unused (set to null).
* @param req GetTableNamesRequest
*/
@Override
public GetTableNamesResponse getTableNames(RpcController controller, GetTableNamesRequest req)
throws ServiceException {
try {
server.checkServiceStarted();
final String regex = req.hasRegex() ? req.getRegex() : null;
final String namespace = req.hasNamespace() ? req.getNamespace() : null;
List<TableName> tableNames =
server.listTableNames(namespace, regex, req.getIncludeSysTables());
GetTableNamesResponse.Builder builder = GetTableNamesResponse.newBuilder();
if (tableNames != null && tableNames.size() > 0) {
// Add the table names to the response
for (TableName table : tableNames) {
builder.addTableNames(ProtobufUtil.toProtoTableName(table));
}
}
return builder.build();
} catch (IOException e) {
throw new ServiceException(e);
}
} | 3.68 |
hadoop_TaskAttemptScanDirectoryStage_scanDirectoryTree | /**
* Recursively scan a directory tree.
* The manifest will contain all files to rename
* (source and dest) and directories to create.
* All files are processed before any of the subdirs are.
* This helps in statistics gathering.
* There's some optimizations which could be done with async
* fetching of the iterators of those subdirs, but as this
* is generally off-critical path then that "enhancement"
* can be postponed until data suggests this needs improvement.
* @param manifest manifest to update
* @param srcDir dir to scan
* @param destDir destination directory
* @param depth depth from the task attempt dir.
* @param parentDirExists does the parent dir exist?
* @return the maximum depth of child directories
* @throws IOException IO failure.
*/
private int scanDirectoryTree(
TaskManifest manifest,
Path srcDir,
Path destDir,
int depth,
boolean parentDirExists) throws IOException {
// generate some task progress in case directory scanning is very slow.
progress();
int maxDepth = 0;
int files = 0;
boolean dirExists = parentDirExists;
List<FileStatus> subdirs = new ArrayList<>();
try (DurationInfo ignored = new DurationInfo(LOG, false,
"Task Attempt %s source dir %s, dest dir %s",
getTaskAttemptId(), srcDir, destDir)) {
// list the directory. This may block until the listing is complete,
// or, if the FS does incremental or asynchronous fetching,
// then the next()/hasNext() call will block for the results
// unless turned off, ABFS does to this async
final RemoteIterator<FileStatus> listing = listStatusIterator(srcDir);
// when the FS (especially ABFS) does an asyn fetch of the listing,
// we can probe for the status of the destination dir while that
// page is being fetched.
// probe for and add the dest dir entry for all but
// the base dir
if (depth > 0) {
final EntryStatus status;
if (parentDirExists) {
final FileStatus destDirStatus = getFileStatusOrNull(destDir);
status = EntryStatus.toEntryStatus(destDirStatus);
dirExists = destDirStatus != null;
} else {
// if there is no parent dir, then there is no need to look
// for this directory -report it as missing automatically.
status = EntryStatus.not_found;
}
manifest.addDirectory(DirEntry.dirEntry(
destDir,
status,
depth));
}
// process the listing; this is where abfs will block
// to wait the result of the list call.
while (listing.hasNext()) {
final FileStatus st = listing.next();
if (st.isFile()) {
// this is a file, so add to the list of files to commit.
files++;
final FileEntry entry = fileEntry(st, destDir);
manifest.addFileToCommit(entry);
LOG.debug("To rename: {}", entry);
} else {
if (st.isDirectory()) {
// will need to scan this directory too.
subdirs.add(st);
} else {
// some other object. ignoring
LOG.info("Ignoring FS object {}", st);
}
}
}
// add any statistics provided by the listing.
maybeAddIOStatistics(getIOStatistics(), listing);
}
// now scan the subdirectories
LOG.debug("{}: Number of subdirectories under {} found: {}; file count {}",
getName(), srcDir, subdirs.size(), files);
for (FileStatus st : subdirs) {
Path destSubDir = new Path(destDir, st.getPath().getName());
final int d = scanDirectoryTree(manifest,
st.getPath(),
destSubDir,
depth + 1,
dirExists);
maxDepth = Math.max(maxDepth, d);
}
return 1 + maxDepth;
} | 3.68 |
hbase_CompactionRequestImpl_getPriority | /** Gets the priority for the request */
@Override
public int getPriority() {
return priority;
} | 3.68 |
pulsar_ConcurrentLongHashMap_keys | /**
* @return a new list of all keys (makes a copy)
*/
public List<Long> keys() {
List<Long> keys = Lists.newArrayListWithExpectedSize((int) size());
forEach((key, value) -> keys.add(key));
return keys;
} | 3.68 |
streampipes_ConnectWorkerDescriptionProvider_getRegisteredAdapters | /**
* This is a helper method to mock the Declarer Singleton in unit tests
* @return the registered adapters from the DeclarerSingleton
*/
public Collection<StreamPipesAdapter> getRegisteredAdapters() {
return DeclarersSingleton.getInstance().getAdapters();
} | 3.68 |
MagicPlugin_PreLoadEvent_registerCastPermissionManager | /**
* Register a CastPermissionManager, for controlling whether or not players can cast spells in
* specific regions.
*
* @param manager The manager to add.
*/
public void registerCastPermissionManager(CastPermissionManager manager) {
castManagers.add(manager);
} | 3.68 |
dubbo_Environment_loadMigrationRule | /**
* @deprecated MigrationRule will be removed in 3.1
*/
@Deprecated
private void loadMigrationRule() {
if (Boolean.parseBoolean(System.getProperty(CommonConstants.DUBBO_MIGRATION_FILE_ENABLE, "false"))) {
String path = System.getProperty(CommonConstants.DUBBO_MIGRATION_KEY);
if (StringUtils.isEmpty(path)) {
path = System.getenv(CommonConstants.DUBBO_MIGRATION_KEY);
if (StringUtils.isEmpty(path)) {
path = CommonConstants.DEFAULT_DUBBO_MIGRATION_FILE;
}
}
this.localMigrationRule = ConfigUtils.loadMigrationRule(scopeModel.getClassLoaders(), path);
} else {
this.localMigrationRule = null;
}
} | 3.68 |
flink_QueryableStateClient_setExecutionConfig | /**
* Replaces the existing {@link ExecutionConfig} (possibly {@code null}), with the provided one.
*
* @param config The new {@code configuration}.
* @return The old configuration, or {@code null} if none was specified.
*/
public ExecutionConfig setExecutionConfig(ExecutionConfig config) {
ExecutionConfig prev = executionConfig;
this.executionConfig = config;
return prev;
} | 3.68 |
flink_RequestedGlobalProperties_isTrivial | /** Checks, if the properties in this object are trivial, i.e. only standard values. */
public boolean isTrivial() {
return this.partitioning == null
|| this.partitioning == PartitioningProperty.RANDOM_PARTITIONED;
} | 3.68 |
framework_ConnectorHelper_getDebugInformation | /**
* Creates a string containing debug info for the connector.
*
* @since 7.1
* @param connector
* The connector to print debug info about
* @return A string with debug information
*/
public static String getDebugInformation(ClientConnector connector) {
StringBuilder sb = new StringBuilder();
sb.append("*** Debug details of a connector: *** \n");
sb.append("Type: ");
sb.append(connector.getClass().getName());
sb.append("\nId:");
sb.append(connector.getConnectorId());
if (connector instanceof Component) {
Component component = (Component) connector;
if (component.getCaption() != null) {
sb.append("\nCaption:");
sb.append(component.getCaption());
}
}
writeHierarchyInformation(connector, sb);
return sb.toString();
} | 3.68 |
hudi_TimelineUtils_getWrittenPartitions | /**
* Returns partitions that have new data strictly after commitTime.
* Does not include internal operations such as clean in the timeline.
*/
public static List<String> getWrittenPartitions(HoodieTimeline timeline) {
HoodieTimeline timelineToSync = timeline.getWriteTimeline();
return getAffectedPartitions(timelineToSync);
} | 3.68 |
flink_StreamExecutionEnvironment_fromSource | /**
* Adds a data {@link Source} to the environment to get a {@link DataStream}.
*
* <p>The result will be either a bounded data stream (that can be processed in a batch way) or
* an unbounded data stream (that must be processed in a streaming way), based on the
* boundedness property of the source, as defined by {@link Source#getBoundedness()}.
*
* <p>This method takes an explicit type information for the produced data stream, so that
* callers can define directly what type/serializer will be used for the produced stream. For
* sources that describe their produced type, the method {@link #fromSource(Source,
* WatermarkStrategy, String)} can be used to avoid specifying the produced type redundantly.
*
* @param source the user defined source
* @param sourceName Name of the data source
* @param <OUT> type of the returned stream
* @param typeInfo the user defined type information for the stream
* @return the data stream constructed
*/
@Experimental
public <OUT> DataStreamSource<OUT> fromSource(
Source<OUT, ?, ?> source,
WatermarkStrategy<OUT> timestampsAndWatermarks,
String sourceName,
TypeInformation<OUT> typeInfo) {
final TypeInformation<OUT> resolvedTypeInfo =
getTypeInfo(source, sourceName, Source.class, typeInfo);
return new DataStreamSource<>(
this,
checkNotNull(source, "source"),
checkNotNull(timestampsAndWatermarks, "timestampsAndWatermarks"),
checkNotNull(resolvedTypeInfo),
checkNotNull(sourceName));
} | 3.68 |
hadoop_AccessTokenTimer_getNextRefreshMSSinceEpoch | /**
* Get next time we should refresh the token.
*
* @return Next time since epoch we'll need to refresh the token.
*/
public long getNextRefreshMSSinceEpoch() {
return nextRefreshMSSinceEpoch;
} | 3.68 |
flink_Operator_setParallelism | /**
* Sets the parallelism for this operator. The parallelism must be 1 or more.
*
* @param parallelism The parallelism for this operator. A value equal to {@link
* ExecutionConfig#PARALLELISM_DEFAULT} will use the system default.
* @return The operator with set parallelism.
*/
public O setParallelism(int parallelism) {
OperatorValidationUtils.validateParallelism(parallelism);
this.parallelism = parallelism;
@SuppressWarnings("unchecked")
O returnType = (O) this;
return returnType;
} | 3.68 |
streampipes_BoilerpipeHTMLContentHandler_ignorableWhitespace | // @Override
public void ignorableWhitespace(char[] ch, int start, int length) throws SAXException {
if (!sbLastWasWhitespace) {
textBuffer.append(' ');
tokenBuffer.append(' ');
}
sbLastWasWhitespace = true;
} | 3.68 |
hadoop_BatchedRequests_getPlacementAttempt | /**
* Get placement attempt.
* @return PlacementAlgorithmOutput placement Attempt.
*/
public int getPlacementAttempt() {
return placementAttempt;
} | 3.68 |
hadoop_SpillCallBackInjector_getAndSet | /**
* Sets the global SpillFilesCBInjector to the new value, returning the old
* value.
*
* @param spillInjector the new implementation for the spill injector.
* @return the previous implementation.
*/
public static SpillCallBackInjector getAndSet(
SpillCallBackInjector spillInjector) {
SpillCallBackInjector prev = instance;
instance = spillInjector;
return prev;
} | 3.68 |
framework_AutoScroller_doScrollAreaChecks | /**
* This method checks whether the first pointer event started in an area
* that would start scrolling immediately, and does some actions
* accordingly.
* <p>
* If it is, that scroll area will be offset "beyond" the pointer (above
* if pointer is towards the top/left, otherwise below/right).
*/
private void doScrollAreaChecks(int pageCordinate) {
/*
* The first run makes sure that neither scroll position is
* underneath the finger, but offset to either direction from
* underneath the pointer.
*/
if (startBound == -1) {
startBound = Math.min(finalStartBound, pageCordinate);
endBound = Math.max(finalEndBound, pageCordinate);
} else {
/*
* Subsequent runs make sure that the scroll area grows (but
* doesn't shrink) with the finger, but no further than the
* final bound.
*/
int oldTopBound = startBound;
if (startBound < finalStartBound) {
startBound = Math.max(startBound,
Math.min(finalStartBound, pageCordinate));
}
int oldBottomBound = endBound;
if (endBound > finalEndBound) {
endBound = Math.min(endBound,
Math.max(finalEndBound, pageCordinate));
}
final boolean startDidNotMove = oldTopBound == startBound;
final boolean endDidNotMove = oldBottomBound == endBound;
final boolean wasMovement = pageCordinate != scrollingAxisPageCoordinate;
scrollAreaShouldRebound = (startDidNotMove && endDidNotMove
&& wasMovement);
}
} | 3.68 |
framework_VAbstractCalendarPanel_handleNavigation | /**
* Handles the keyboard navigation.
*
* @param keycode
* The key code that was pressed
* @param ctrl
* Was the ctrl key pressed
* @param shift
* Was the shift key pressed
* @return Return true if key press was handled by the component, else
* return false
*/
protected boolean handleNavigation(int keycode, boolean ctrl,
boolean shift) {
if (!isEnabled() || isReadonly()) {
return false;
} else if (isYear(getResolution())) {
return handleNavigationYearMode(keycode, ctrl, shift);
} else if (isMonth(getResolution())) {
return handleNavigationMonthMode(keycode, ctrl, shift);
} else if (isDay(getResolution())) {
return handleNavigationDayMode(keycode, ctrl, shift);
} else {
return handleNavigationDayMode(keycode, ctrl, shift);
}
} | 3.68 |
hadoop_CrcComposer_update | /**
* Updates with a single additional CRC which corresponds to an underlying
* data size of {@code bytesPerCrc}.
*
* @param crcB crcB.
* @param bytesPerCrc bytesPerCrc.
* @throws IOException raised on errors performing I/O.
*/
public void update(int crcB, long bytesPerCrc) throws IOException {
if (curCompositeCrc == 0) {
curCompositeCrc = crcB;
} else if (bytesPerCrc == bytesPerCrcHint) {
curCompositeCrc = CrcUtil.composeWithMonomial(
curCompositeCrc, crcB, precomputedMonomialForHint, crcPolynomial);
} else {
curCompositeCrc = CrcUtil.compose(
curCompositeCrc, crcB, bytesPerCrc, crcPolynomial);
}
curPositionInStripe += bytesPerCrc;
if (curPositionInStripe > stripeLength) {
throw new IOException(String.format(
"Current position in stripe '%d' after advancing by bytesPerCrc '%d' "
+ "exceeds stripeLength '%d' without stripe alignment.",
curPositionInStripe, bytesPerCrc, stripeLength));
} else if (curPositionInStripe == stripeLength) {
// Hit a stripe boundary; flush the curCompositeCrc and reset for next
// stripe.
digestOut.write(CrcUtil.intToBytes(curCompositeCrc), 0, CRC_SIZE_BYTES);
curCompositeCrc = 0;
curPositionInStripe = 0;
}
} | 3.68 |
hbase_SnapshotManifest_readDataManifest | /*
* Read the SnapshotDataManifest file
*/
private SnapshotDataManifest readDataManifest() throws IOException {
try (FSDataInputStream in = workingDirFs.open(new Path(workingDir, DATA_MANIFEST_NAME))) {
CodedInputStream cin = CodedInputStream.newInstance(in);
cin.setSizeLimit(manifestSizeLimit);
return SnapshotDataManifest.parseFrom(cin);
} catch (FileNotFoundException e) {
return null;
} catch (InvalidProtocolBufferException e) {
throw new CorruptedSnapshotException("unable to parse data manifest " + e.getMessage(), e);
}
} | 3.68 |
hmily_HmilyLock_getLockId | /**
* Get lock id.
*
* @return lock id
*/
public String getLockId() {
return Joiner.on(";;").join(resourceId, targetTableName, targetTablePk);
} | 3.68 |
hadoop_EditLogOutputStream_flush | /**
* Flush data to persistent store.
* Collect sync metrics.
*/
public void flush() throws IOException {
flush(true);
} | 3.68 |
hbase_ChecksumUtil_numChunks | /**
* Returns the number of checksum chunks needed to store the checksums for a specified data size
* @param datasize number of bytes of data
* @param bytesPerChecksum number of bytes in a checksum chunk
* @return The number of checksum chunks
*/
static long numChunks(long datasize, int bytesPerChecksum) {
long numChunks = datasize / bytesPerChecksum;
if (datasize % bytesPerChecksum != 0) {
numChunks++;
}
return numChunks;
} | 3.68 |
graphhopper_CustomModel_merge | /**
* A new CustomModel is created from the baseModel merged with the specified queryModel. Returns the baseModel if
* queryModel is null.
*/
public static CustomModel merge(CustomModel baseModel, CustomModel queryModel) {
if (queryModel == null) return baseModel;
// avoid changing the specified CustomModel via deep copy otherwise the server-side CustomModel would be
// modified (same problem if queryModel would be used as target)
CustomModel mergedCM = new CustomModel(baseModel);
if (queryModel.getDistanceInfluence() != null)
mergedCM.distanceInfluence = queryModel.distanceInfluence;
if (queryModel.getHeadingPenalty() != null)
mergedCM.headingPenalty = queryModel.headingPenalty;
mergedCM.speedStatements.addAll(queryModel.getSpeed());
mergedCM.priorityStatements.addAll(queryModel.getPriority());
mergedCM.addAreas(queryModel.getAreas());
return mergedCM;
} | 3.68 |
hbase_MetaTableAccessor_makePutFromTableState | /**
* Construct PUT for given state
* @param state new state
*/
public static Put makePutFromTableState(TableState state, long ts) {
Put put = new Put(state.getTableName().getName(), ts);
put.addColumn(HConstants.TABLE_FAMILY, HConstants.TABLE_STATE_QUALIFIER,
state.convert().toByteArray());
return put;
} | 3.68 |
hbase_ServerManager_getLoad | /** Returns ServerMetrics if serverName is known else null */
public ServerMetrics getLoad(final ServerName serverName) {
return this.onlineServers.get(serverName);
} | 3.68 |
hudi_DFSPathSelector_listEligibleFiles | /**
* List files recursively, filter out illegible files/directories while doing so.
*/
protected List<FileStatus> listEligibleFiles(FileSystem fs, Path path, long lastCheckpointTime) throws IOException {
// skip files/dirs whose names start with (_, ., etc)
FileStatus[] statuses = fs.listStatus(path, file ->
IGNORE_FILEPREFIX_LIST.stream().noneMatch(pfx -> file.getName().startsWith(pfx)));
List<FileStatus> res = new ArrayList<>();
for (FileStatus status: statuses) {
if (status.isDirectory()) {
// avoid infinite loop
if (!status.isSymlink()) {
res.addAll(listEligibleFiles(fs, status.getPath(), lastCheckpointTime));
}
} else if (status.getModificationTime() > lastCheckpointTime && status.getLen() > 0) {
res.add(status);
}
}
return res;
} | 3.68 |
hadoop_CachingBlockManager_requestCaching | /**
* Requests that the given block should be copied to the local cache.
* The block must not be accessed by the caller after calling this method
* because it will released asynchronously relative to the caller.
*
* @throws IllegalArgumentException if data is null.
*/
@Override
public void requestCaching(BufferData data) {
if (closed) {
return;
}
if (cachingDisabled.get()) {
data.setDone();
return;
}
Validate.checkNotNull(data, "data");
// Opportunistic check without locking.
if (!data.stateEqualsOneOf(EXPECTED_STATE_AT_CACHING)) {
return;
}
synchronized (data) {
// Reconfirm state after locking.
if (!data.stateEqualsOneOf(EXPECTED_STATE_AT_CACHING)) {
return;
}
if (cache.containsBlock(data.getBlockNumber())) {
data.setDone();
return;
}
BufferData.State state = data.getState();
BlockOperations.Operation op = ops.requestCaching(data.getBlockNumber());
Future<Void> blockFuture;
if (state == BufferData.State.PREFETCHING) {
blockFuture = data.getActionFuture();
} else {
CompletableFuture<Void> cf = new CompletableFuture<>();
cf.complete(null);
blockFuture = cf;
}
CachePutTask task =
new CachePutTask(data, blockFuture, this, Instant.now());
Future<Void> actionFuture = futurePool.executeFunction(task);
data.setCaching(actionFuture);
ops.end(op);
}
} | 3.68 |
flink_CatalogSourceTable_createAnonymous | /**
* Create a {@link CatalogSourceTable} from an anonymous {@link ContextResolvedTable}. This is
* required to manually create a preparing table skipping the calcite catalog resolution.
*/
public static CatalogSourceTable createAnonymous(
FlinkRelBuilder relBuilder,
ContextResolvedTable contextResolvedTable,
boolean isBatchMode) {
Preconditions.checkArgument(
contextResolvedTable.isAnonymous(), "ContextResolvedTable must be anonymous");
// Statistics are unknown for anonymous tables
// Look at DatabaseCalciteSchema#getStatistic for more details
FlinkStatistic flinkStatistic =
FlinkStatistic.unknown(contextResolvedTable.getResolvedSchema()).build();
CatalogSchemaTable catalogSchemaTable =
new CatalogSchemaTable(contextResolvedTable, flinkStatistic, !isBatchMode);
return new CatalogSourceTable(
relBuilder.getRelOptSchema(),
contextResolvedTable.getIdentifier().toList(),
catalogSchemaTable.getRowType(relBuilder.getTypeFactory()),
catalogSchemaTable);
} | 3.68 |
flink_JobExecutionResult_fromJobSubmissionResult | /**
* Returns a dummy object for wrapping a JobSubmissionResult.
*
* @param result The SubmissionResult
* @return a JobExecutionResult
* @deprecated Will be removed in future versions.
*/
@Deprecated
public static JobExecutionResult fromJobSubmissionResult(JobSubmissionResult result) {
return new JobExecutionResult(result.getJobID(), -1, null);
} | 3.68 |
hadoop_ZKSignerSecretProvider_generateZKData | /**
* Serialize the data to attempt to push into ZooKeeper. The format is this:
* <p>
* [DATA_VERSION, newSecretLength, newSecret, currentSecretLength, currentSecret, previousSecretLength, previousSecret, nextRolloverDate]
* <p>
* Only previousSecret can be null, in which case the format looks like this:
* <p>
* [DATA_VERSION, newSecretLength, newSecret, currentSecretLength, currentSecret, 0, nextRolloverDate]
* <p>
* @param newSecret The new secret to use
* @param currentSecret The current secret
* @param previousSecret The previous secret
* @return The serialized data for ZooKeeper
*/
private synchronized byte[] generateZKData(byte[] newSecret,
byte[] currentSecret, byte[] previousSecret) {
int newSecretLength = newSecret.length;
int currentSecretLength = currentSecret.length;
int previousSecretLength = 0;
if (previousSecret != null) {
previousSecretLength = previousSecret.length;
}
ByteBuffer bb = ByteBuffer.allocate(INT_BYTES + INT_BYTES + newSecretLength
+ INT_BYTES + currentSecretLength + INT_BYTES + previousSecretLength
+ LONG_BYTES);
bb.putInt(DATA_VERSION);
bb.putInt(newSecretLength);
bb.put(newSecret);
bb.putInt(currentSecretLength);
bb.put(currentSecret);
bb.putInt(previousSecretLength);
if (previousSecretLength > 0) {
bb.put(previousSecret);
}
bb.putLong(nextRolloverDate);
return bb.array();
} | 3.68 |
morf_AbstractSqlDialectTest_expectedLeftPad | /**
* @return the expected SQL for Left pad
*/
protected String expectedLeftPad() {
return "SELECT LPAD(stringField, 10, 'j') FROM " + tableName(TEST_TABLE);
} | 3.68 |
flink_ThriftObjectConversions_toTTypeQualifiers | /**
* Create {@link TTypeQualifiers} from {@link LogicalType}. The logic is almost same in the
* {@code org.apache.hive.service.cli#toTTypeQualifiers}.
*/
private static TTypeQualifiers toTTypeQualifiers(LogicalType type) {
Map<String, TTypeQualifierValue> qualifiers = new HashMap<>();
switch (type.getTypeRoot()) {
case DECIMAL:
qualifiers.put(
TCLIServiceConstants.PRECISION,
TTypeQualifierValue.i32Value(((DecimalType) type).getPrecision()));
qualifiers.put(
TCLIServiceConstants.SCALE,
TTypeQualifierValue.i32Value(((DecimalType) type).getScale()));
break;
case VARCHAR:
qualifiers.put(
TCLIServiceConstants.CHARACTER_MAXIMUM_LENGTH,
TTypeQualifierValue.i32Value(((VarCharType) type).getLength()));
break;
case CHAR:
qualifiers.put(
TCLIServiceConstants.CHARACTER_MAXIMUM_LENGTH,
TTypeQualifierValue.i32Value(((CharType) type).getLength()));
break;
}
return new TTypeQualifiers(qualifiers);
} | 3.68 |
framework_PropertysetItem_addPropertySetChangeListener | /**
* Registers a new property set change listener for this Item.
*
* @param listener
* the new Listener to be registered.
*/
@Override
public void addPropertySetChangeListener(
Item.PropertySetChangeListener listener) {
if (propertySetChangeListeners == null) {
propertySetChangeListeners = new LinkedList<PropertySetChangeListener>();
}
propertySetChangeListeners.add(listener);
} | 3.68 |
hadoop_RollingFileSystemSink_getNonNegative | /**
* Return the property value if it's non-negative and throw an exception if
* it's not.
*
* @param key the property key
* @param defaultValue the default value
*/
private long getNonNegative(String key, int defaultValue) {
int flushOffsetIntervalMillis = properties.getInt(key, defaultValue);
if (flushOffsetIntervalMillis < 0) {
throw new MetricsException("The " + key + " property must be "
+ "non-negative. Value was " + flushOffsetIntervalMillis);
}
return flushOffsetIntervalMillis;
} | 3.68 |
framework_CalendarConnector_getPaintableId | /*
* (non-Javadoc)
*
* @see com.vaadin.terminal.gwt.client.ui.ActionOwner#getPaintableId()
*/
@Override
public String getPaintableId() {
return getConnectorId();
} | 3.68 |
hadoop_ServiceLauncher_startupShutdownMessage | /**
* @return Build a log message for starting up and shutting down.
* @param classname the class of the server
* @param args arguments
*/
protected static String startupShutdownMessage(String classname,
List<String> args) {
final String hostname = NetUtils.getHostname();
return StringUtils.createStartupShutdownMessage(classname, hostname,
args.toArray(new String[args.size()]));
} | 3.68 |
pulsar_LoadSimulationController_handleSimulate | // Handle the command line arguments associated with the simulate command.
private void handleSimulate(final ShellArguments arguments) throws Exception {
final List<String> commandArguments = arguments.commandArguments;
checkAppArgs(commandArguments.size() - 1, 1);
final ZooKeeper zkClient = new ZooKeeper(commandArguments.get(1), 5000, null);
// Make a map for each thread to speed up the ZooKeeper writing process.
final Map<String, ResourceQuota>[] threadLocalMaps = new Map[clients.length];
for (int i = 0; i < clients.length; ++i) {
threadLocalMaps[i] = new HashMap<>();
}
getResourceQuotas(QUOTA_ROOT, zkClient, threadLocalMaps);
final List<Future> futures = new ArrayList<>(clients.length);
int i = 0;
log.info("Simulating...");
for (final Map<String, ResourceQuota> bundleToQuota : threadLocalMaps) {
final int j = i;
futures.add(threadPool.submit(() -> {
for (final Map.Entry<String, ResourceQuota> entry : bundleToQuota.entrySet()) {
final String bundle = entry.getKey();
final String newAPIPath = bundle.replace(QUOTA_ROOT, BUNDLE_DATA_BASE_PATH);
final ResourceQuota quota = entry.getValue();
final int tenantStart = QUOTA_ROOT.length() + 1;
final String topic = String.format("persistent://%s/t", bundle.substring(tenantStart));
final BundleData bundleData = initializeBundleData(quota, arguments);
// Put the bundle data in the new ZooKeeper.
try {
ZkUtils.createFullPathOptimistic(zkClient, newAPIPath,
ObjectMapperFactory.getMapper().writer().writeValueAsBytes(bundleData),
ZooDefs.Ids.OPEN_ACL_UNSAFE, CreateMode.PERSISTENT);
} catch (KeeperException.NodeExistsException e) {
try {
zkClient.setData(newAPIPath,
ObjectMapperFactory.getMapper().writer().writeValueAsBytes(bundleData), -1);
} catch (Exception ex) {
throw new RuntimeException(ex);
}
} catch (Exception e) {
throw new RuntimeException(e);
}
try {
trade(arguments, topic, j);
} catch (Exception e) {
throw new RuntimeException(e);
}
}
}));
++i;
}
for (final Future future : futures) {
future.get();
}
zkClient.close();
} | 3.68 |
flink_NetUtils_acceptWithoutTimeout | /**
* Calls {@link ServerSocket#accept()} on the provided server socket, suppressing any thrown
* {@link SocketTimeoutException}s. This is a workaround for the underlying JDK-8237858 bug in
* JDK 11 that can cause errant SocketTimeoutExceptions to be thrown at unexpected times.
*
* <p>This method expects the provided ServerSocket has no timeout set (SO_TIMEOUT of 0),
* indicating an infinite timeout. It will suppress all SocketTimeoutExceptions, even if a
* ServerSocket with a non-zero timeout is passed in.
*
* @param serverSocket a ServerSocket with {@link SocketOptions#SO_TIMEOUT SO_TIMEOUT} set to 0;
* if SO_TIMEOUT is greater than 0, then this method will suppress SocketTimeoutException;
* must not be null; SO_TIMEOUT option must be set to 0
* @return the new Socket
* @throws IOException see {@link ServerSocket#accept()}
* @see <a href="https://bugs.openjdk.java.net/browse/JDK-8237858">JDK-8237858</a>
*/
public static Socket acceptWithoutTimeout(ServerSocket serverSocket) throws IOException {
Preconditions.checkArgument(
serverSocket.getSoTimeout() == 0, "serverSocket SO_TIMEOUT option must be 0");
while (true) {
try {
return serverSocket.accept();
} catch (SocketTimeoutException exception) {
// This should be impossible given that the socket timeout is set to zero
// which indicates an infinite timeout. This is due to the underlying JDK-8237858
// bug. We retry the accept call indefinitely to replicate the expected behavior.
}
}
} | 3.68 |
dubbo_NettyChannel_send | /**
* Send message by netty and whether to wait the completion of the send.
*
* @param message message that need send.
* @param sent whether to ack async-sent
* @throws RemotingException throw RemotingException if wait until timeout or any exception thrown by method body that surrounded by try-catch.
*/
@Override
public void send(Object message, boolean sent) throws RemotingException {
// whether the channel is closed
super.send(message, sent);
boolean success = true;
int timeout = 0;
try {
Object outputMessage = message;
if (!encodeInIOThread) {
ByteBuf buf = channel.alloc().buffer();
ChannelBuffer buffer = new NettyBackedChannelBuffer(buf);
codec.encode(this, buffer, message);
outputMessage = buf;
}
ChannelFuture future = writeQueue.enqueue(outputMessage).addListener(new ChannelFutureListener() {
@Override
public void operationComplete(ChannelFuture future) throws Exception {
if (!(message instanceof Request)) {
return;
}
ChannelHandler handler = getChannelHandler();
if (future.isSuccess()) {
handler.sent(NettyChannel.this, message);
} else {
Throwable t = future.cause();
if (t == null) {
return;
}
Response response = buildErrorResponse((Request) message, t);
handler.received(NettyChannel.this, response);
}
}
});
if (sent) {
// wait timeout ms
timeout = getUrl().getPositiveParameter(TIMEOUT_KEY, DEFAULT_TIMEOUT);
success = future.await(timeout);
}
Throwable cause = future.cause();
if (cause != null) {
throw cause;
}
} catch (Throwable e) {
removeChannelIfDisconnected(channel);
throw new RemotingException(
this,
"Failed to send message " + PayloadDropper.getRequestWithoutData(message) + " to "
+ getRemoteAddress() + ", cause: " + e.getMessage(),
e);
}
if (!success) {
throw new RemotingException(
this,
"Failed to send message " + PayloadDropper.getRequestWithoutData(message) + " to "
+ getRemoteAddress() + "in timeout(" + timeout + "ms) limit");
}
} | 3.68 |
hadoop_HdfsDataInputStream_getCurrentBlock | /**
* Get the block containing the target position.
*/
public ExtendedBlock getCurrentBlock() {
return getDFSInputStream().getCurrentBlock();
} | 3.68 |
graphhopper_PMap_read | /**
* Reads a PMap from a string array consisting of key=value pairs
*/
public static PMap read(String[] args) {
PMap map = new PMap();
for (String arg : args) {
int index = arg.indexOf("=");
if (index <= 0) {
continue;
}
String key = arg.substring(0, index);
if (key.startsWith("-")) {
key = key.substring(1);
}
if (key.startsWith("-")) {
key = key.substring(1);
}
String value = arg.substring(index + 1);
Object old = map.map.put(Helper.camelCaseToUnderScore(key), Helper.toObject(value));
if (old != null)
throw new IllegalArgumentException("Pair '" + Helper.camelCaseToUnderScore(key) + "'='" + value + "' not possible to " +
"add to the PMap-object as the key already exists with '" + old + "'");
}
return map;
} | 3.68 |
hadoop_MDCFilter_doFilter | /**
* Sets the slf4j <code>MDC</code> and delegates the request to the chain.
*
* @param request servlet request.
* @param response servlet response.
* @param chain filter chain.
*
* @throws IOException thrown if an IO error occurs.
* @throws ServletException thrown if a servlet error occurs.
*/
@Override
public void doFilter(ServletRequest request, ServletResponse response, FilterChain chain)
throws IOException, ServletException {
try {
MDC.clear();
String hostname = HostnameFilter.get();
if (hostname != null) {
MDC.put("hostname", HostnameFilter.get());
}
Principal principal = ((HttpServletRequest) request).getUserPrincipal();
String user = (principal != null) ? principal.getName() : null;
if (user != null) {
MDC.put("user", user);
}
MDC.put("method", ((HttpServletRequest) request).getMethod());
if (((HttpServletRequest) request).getPathInfo() != null) {
MDC.put("path", ((HttpServletRequest) request).getPathInfo());
}
chain.doFilter(request, response);
} finally {
MDC.clear();
}
} | 3.68 |
hibernate-validator_MethodValidationConfiguration_isAllowMultipleCascadedValidationOnReturnValues | /**
* @return {@code true} if more than one return value within a class hierarchy can be marked for cascaded
* validation, {@code false} otherwise.
*/
public boolean isAllowMultipleCascadedValidationOnReturnValues() {
return this.allowMultipleCascadedValidationOnReturnValues;
} | 3.68 |
morf_XmlDataSetProducer_columns | /**
* @see org.alfasoftware.morf.metadata.Table#columns()
*/
@Override
public List<Column> columns() {
return columns;
} | 3.68 |
hmily_SerializeEnum_acquire | /**
* Acquire serialize protocol serialize protocol enum.
*
* @param serialize the serialize protocol
* @return the serialize protocol enum
*/
public static SerializeEnum acquire(final String serialize) {
Optional<SerializeEnum> serializeEnum =
Arrays.stream(SerializeEnum.values())
.filter(v -> Objects.equals(v.getSerialize(), serialize))
.findFirst();
return serializeEnum.orElse(SerializeEnum.KRYO);
} | 3.68 |
framework_DropTargetExtension_registerDropTargetRpc | /**
* Registers the server side RPC methods invoked from client side on
* <code>drop</code> event.
* <p>
* Override this method if you need to have a custom RPC interface for
* transmitting the drop event with more data. If just need to do additional
* things before firing the drop event, then you should override
* {@link #onDrop(List, Map, DropEffect, MouseEventDetails)} instead.
*/
protected void registerDropTargetRpc() {
registerRpc((DropTargetRpc) (types, data, dropEffect,
mouseEventDetails) -> onDrop(types, data,
DropEffect.valueOf(dropEffect.toUpperCase(Locale.ROOT)),
mouseEventDetails));
} | 3.68 |
framework_GridElement_toggleColumnHidden | /**
* Toggles the column visibility. Column is identified by its hiding toggle
* caption.
*
* @param toggleCaption
* @since 8.0.6
*/
public void toggleColumnHidden(String toggleCaption) {
if (!isElementPresent(By.className("v-grid-sidebar-content"))) {
// Open sidebar menu
WebElement sidebarButton = findElement(
By.className("v-grid-sidebar"))
.findElement(By.tagName("button"));
sidebarButton.click();
}
Optional<WebElement> toggleButton = getDriver()
.findElement(By.className("v-grid-sidebar-content"))
.findElements(By.className("column-hiding-toggle")).stream()
.filter(element -> element.getText().equals(toggleCaption))
.findAny();
if (toggleButton.isPresent()) {
toggleButton.ifPresent(element -> element.click());
} else {
throw new IllegalArgumentException(
"No column hiding toggle with caption '" + toggleCaption
+ "'");
}
} | 3.68 |
flink_DynamicSinkUtils_getPhysicalColumnIndices | /** Return the indices from {@param colIndexes} that belong to physical column. */
private static int[] getPhysicalColumnIndices(List<Integer> colIndexes, ResolvedSchema schema) {
return colIndexes.stream()
.filter(i -> schema.getColumns().get(i).isPhysical())
.mapToInt(i -> i)
.toArray();
} | 3.68 |
hbase_SequenceIdAccounting_updateStore | /**
* Update the store sequence id, e.g., upon executing in-memory compaction
*/
void updateStore(byte[] encodedRegionName, byte[] familyName, Long sequenceId,
boolean onlyIfGreater) {
if (sequenceId == null) {
return;
}
Long highest = this.highestSequenceIds.get(encodedRegionName);
if (highest == null || sequenceId > highest) {
this.highestSequenceIds.put(encodedRegionName, sequenceId);
}
ImmutableByteArray familyNameWrapper = ImmutableByteArray.wrap(familyName);
synchronized (this.tieLock) {
ConcurrentMap<ImmutableByteArray, Long> m = getOrCreateLowestSequenceIds(encodedRegionName);
boolean replaced = false;
while (!replaced) {
Long oldSeqId = m.get(familyNameWrapper);
if (oldSeqId == null) {
m.put(familyNameWrapper, sequenceId);
replaced = true;
} else if (onlyIfGreater) {
if (sequenceId > oldSeqId) {
replaced = m.replace(familyNameWrapper, oldSeqId, sequenceId);
} else {
return;
}
} else { // replace even if sequence id is not greater than oldSeqId
m.put(familyNameWrapper, sequenceId);
return;
}
}
}
} | 3.68 |
querydsl_GuavaGroupByBuilder_asMultimap | /**
* Get the results as multi map
*
* @param expression value expression
* @param <V> Value type
* @return new result transformer
*/
public <V> ResultTransformer<Multimap<K, V>> asMultimap(Expression<V> expression) {
final Expression<V> lookup = getLookup(expression);
return new GroupByMultimap<K, V, Multimap<K, V>>(key, expression) {
@Override
protected Multimap<K, V> transform(Multimap<K, Group> groups) {
Multimap<K, V> results = LinkedHashMultimap.create();
for (Map.Entry<K, Group> entry : groups.entries()) {
results.put(entry.getKey(), entry.getValue().getOne(lookup));
}
return results;
}
};
} | 3.68 |
hudi_BaseHoodieQueueBasedExecutor_startProducingAsync | /**
* Start producing
*/
public final CompletableFuture<Void> startProducingAsync() {
return allOf(producers.stream()
.map(producer -> CompletableFuture.supplyAsync(() -> {
doProduce(queue, producer);
return (Void) null;
}, producerExecutorService))
.collect(Collectors.toList())
)
.thenApply(ignored -> (Void) null)
.whenComplete((result, throwable) -> {
// Regardless of how producing has completed, we have to close producers
// to make sure resources are properly cleaned up
producers.forEach(HoodieProducer::close);
// Mark production as done so that consumer will be able to exit
queue.seal();
});
} | 3.68 |
hmily_HmilyTimer_getTime | /**
* get time.
*
* @return long. time
*/
public Long getTime() {
return time;
} | 3.68 |
morf_DataSetConnectorMultiThreaded_connect | /**
* Transmits all data from the producer to the consumer.
*/
public void connect() {
CloseState closeState = CloseState.INCOMPLETE;
ExecutorService executor = Executors.newFixedThreadPool(threadCount);
try {
consumer.open();
producer.open();
try {
for (String tableName : producer.getSchema().tableNames()) {
try {
executor.execute(new DataSetConnectorRunnable(producer, consumer, tableName));
} catch (Exception e) {
executor.shutdownNow();
throw new RuntimeException("Error connecting table [" + tableName + "]", e);
}
}
executor.shutdown();
executor.awaitTermination(60, TimeUnit.MINUTES);
// once we've read all the tables without exception, we're complete
closeState = CloseState.COMPLETE;
} catch (InterruptedException e) {
Thread.currentThread().interrupt();
} finally {
producer.close();
}
} finally {
executor.shutdownNow();
consumer.close(closeState);
}
} | 3.68 |
framework_PushAtmosphereHandler_onMessage | /**
* Called when the client sends a message through the push channel
*
* @param resource
*/
private void onMessage(AtmosphereResource resource) {
pushHandler.onMessage(resource);
} | 3.68 |
dubbo_AdaptiveClassCodeGenerator_generateReturnAndInvocation | /**
* generate method invocation statement and return it if necessary
*/
private String generateReturnAndInvocation(Method method) {
String returnStatement = method.getReturnType().equals(void.class) ? "" : "return ";
String args = IntStream.range(0, method.getParameters().length)
.mapToObj(i -> String.format(CODE_EXTENSION_METHOD_INVOKE_ARGUMENT, i))
.collect(Collectors.joining(", "));
return returnStatement + String.format("extension.%s(%s);\n", method.getName(), args);
} | 3.68 |
hbase_Procedure_childrenCountDown | /**
* Called by the ProcedureExecutor to notify that one of the sub-procedures has completed.
*/
private synchronized boolean childrenCountDown() {
assert childrenLatch > 0 : this;
boolean b = --childrenLatch == 0;
if (LOG.isTraceEnabled()) {
LOG.trace("CHILD LATCH DECREMENT " + childrenLatch, new Throwable(this.toString()));
}
return b;
} | 3.68 |
dubbo_AbstractDependencyFilterMojo_getFilters | /**
* Return artifact filters configured for this MOJO.
* @param additionalFilters optional additional filters to apply
* @return the filters
*/
private FilterArtifacts getFilters(ArtifactsFilter... additionalFilters) {
FilterArtifacts filters = new FilterArtifacts();
for (ArtifactsFilter additionalFilter : additionalFilters) {
filters.addFilter(additionalFilter);
}
filters.addFilter(new MatchingGroupIdFilter(cleanFilterConfig(this.excludeGroupIds)));
if (this.includes != null && !this.includes.isEmpty()) {
filters.addFilter(new IncludeFilter(this.includes));
}
if (this.excludes != null && !this.excludes.isEmpty()) {
filters.addFilter(new ExcludeFilter(this.excludes));
}
return filters;
} | 3.68 |
hbase_WALActionsListener_preLogRoll | /**
* The WAL is going to be rolled. The oldPath can be null if this is the first log file from the
* regionserver.
* @param oldPath the path to the old wal
* @param newPath the path to the new wal
*/
default void preLogRoll(Path oldPath, Path newPath) throws IOException {
} | 3.68 |
pulsar_FunctionRuntimeManager_processAssignment | /**
* Process an assignment update from the assignment topic.
* @param newAssignment the assignment
*/
public synchronized void processAssignment(Assignment newAssignment) {
boolean exists = false;
for (Map<String, Assignment> entry : this.workerIdToAssignments.values()) {
if (entry.containsKey(FunctionCommon.getFullyQualifiedInstanceId(newAssignment.getInstance()))) {
exists = true;
}
}
if (exists) {
updateAssignment(newAssignment);
} else {
addAssignment(newAssignment);
}
} | 3.68 |
hadoop_MawoConfiguration_getWorkerConcurrentTasksLimit | /**
* Get number of tasks a worker can run in parallel.
* @return value of worker.num.tasks
*/
public int getWorkerConcurrentTasksLimit() {
return Integer.parseInt(configsMap.get(WORKER_NUM_TASKS));
} | 3.68 |
flink_ExecNodeMetadataUtil_latestAnnotation | /**
* Returns the {@link ExecNodeMetadata} annotation of the class with the highest (most recent)
* {@link ExecNodeMetadata#version()}.
*/
public static <T extends ExecNode<?>> ExecNodeMetadata latestAnnotation(
Class<T> execNodeClass) {
List<ExecNodeMetadata> sortedAnnotations = extractMetadataFromAnnotation(execNodeClass);
if (sortedAnnotations.isEmpty()) {
return null;
}
sortedAnnotations.sort(Comparator.comparingInt(ExecNodeMetadata::version));
return sortedAnnotations.get(sortedAnnotations.size() - 1);
} | 3.68 |
shardingsphere-elasticjob_GuaranteeService_registerStart | /**
* Register start.
*
* @param shardingItems to be registered sharding items
*/
public void registerStart(final Collection<Integer> shardingItems) {
for (int each : shardingItems) {
jobNodeStorage.createJobNodeIfNeeded(GuaranteeNode.getStartedNode(each));
}
} | 3.68 |
hbase_ThriftUtilities_deleteFromThrift | /**
* Creates a {@link Delete} (HBase) from a {@link TDelete} (Thrift).
* @param in the <code>TDelete</code> to convert
* @return converted <code>Delete</code>
*/
public static Delete deleteFromThrift(TDelete in) {
Delete out;
if (in.isSetColumns()) {
out = new Delete(in.getRow());
for (TColumn column : in.getColumns()) {
if (in.isSetDeleteType()) {
switch (in.getDeleteType()) {
case DELETE_COLUMN:
if (column.isSetTimestamp()) {
out.addColumn(column.getFamily(), column.getQualifier(), column.getTimestamp());
} else {
out.addColumn(column.getFamily(), column.getQualifier());
}
break;
case DELETE_COLUMNS:
if (column.isSetTimestamp()) {
out.addColumns(column.getFamily(), column.getQualifier(), column.getTimestamp());
} else {
out.addColumns(column.getFamily(), column.getQualifier());
}
break;
case DELETE_FAMILY:
if (column.isSetTimestamp()) {
out.addFamily(column.getFamily(), column.getTimestamp());
} else {
out.addFamily(column.getFamily());
}
break;
case DELETE_FAMILY_VERSION:
if (column.isSetTimestamp()) {
out.addFamilyVersion(column.getFamily(), column.getTimestamp());
} else {
throw new IllegalArgumentException(
"Timestamp is required for TDelete with DeleteFamilyVersion type");
}
break;
default:
throw new IllegalArgumentException("DeleteType is required for TDelete");
}
} else {
throw new IllegalArgumentException("DeleteType is required for TDelete");
}
}
} else {
if (in.isSetTimestamp()) {
out = new Delete(in.getRow(), in.getTimestamp());
} else {
out = new Delete(in.getRow());
}
}
if (in.isSetAttributes()) {
addAttributes(out, in.getAttributes());
}
if (in.isSetDurability()) {
out.setDurability(durabilityFromThrift(in.getDurability()));
}
return out;
}
/**
* Converts multiple {@link TDelete}s (Thrift) into a list of {@link Delete} | 3.68 |
hbase_EncryptionUtil_unwrapKey | /**
* Helper for {@link #unwrapKey(Configuration, String, byte[])} which automatically uses the
* configured master and alternative keys, rather than having to specify a key type to unwrap
* with. The configuration must be set up correctly for key alias resolution.
* @param conf the current configuration
* @param keyBytes the key encrypted by master (or alternative) to unwrap
* @return the key bytes, decrypted
* @throws IOException if the key cannot be unwrapped
*/
public static Key unwrapKey(Configuration conf, byte[] keyBytes) throws IOException {
Key key;
String masterKeyName =
conf.get(HConstants.CRYPTO_MASTERKEY_NAME_CONF_KEY, User.getCurrent().getShortName());
try {
// First try the master key
key = unwrapKey(conf, masterKeyName, keyBytes);
} catch (KeyException e) {
// If the current master key fails to unwrap, try the alternate, if
// one is configured
if (LOG.isDebugEnabled()) {
LOG.debug("Unable to unwrap key with current master key '" + masterKeyName + "'");
}
String alternateKeyName = conf.get(HConstants.CRYPTO_MASTERKEY_ALTERNATE_NAME_CONF_KEY);
if (alternateKeyName != null) {
try {
key = unwrapKey(conf, alternateKeyName, keyBytes);
} catch (KeyException ex) {
throw new IOException(ex);
}
} else {
throw new IOException(e);
}
}
return key;
} | 3.68 |
framework_SingleSelectionModelImpl_isKeySelected | /**
* Returns whether the given key maps to the currently selected item.
*
* @param key
* the key to test or {@code null} to test whether nothing is
* selected
* @return {@code true} if the key equals the key of the currently selected
* item (or {@code null} if no selection), {@code false} otherwise.
*/
protected boolean isKeySelected(String key) {
return isSelected(getData(key));
} | 3.68 |
morf_AbstractSqlDialectTest_testAlterColumnRenamingAndChangingNullability | /**
* Test renaming a column and changing it from nullable to not nullable.
*/
@Test
public void testAlterColumnRenamingAndChangingNullability() {
testAlterTableColumn(OTHER_TABLE, AlterationType.ALTER, getColumn(OTHER_TABLE, FLOAT_FIELD), column("blahField", DataType.DECIMAL, 20, 3).nullable(), expectedAlterColumnRenamingAndChangingNullability());
} | 3.68 |
hadoop_BoundedResourcePool_tryAcquire | /**
* Acquires a resource blocking if one is immediately available. Otherwise returns null.
*/
@Override
public T tryAcquire() {
return this.acquireHelper(false);
} | 3.68 |
flink_PartitionLoader_loadEmptyPartition | /**
* The flink job does not write data to the partition, but the corresponding partition needs to
* be created or updated.
*
* <p>The partition does not exist, create it.
*
* <p>The partition exists:
*
* <pre>
* if overwrite is true, delete the path, then create it;
* if overwrite is false, do nothing;
* </pre>
*/
public void loadEmptyPartition(LinkedHashMap<String, String> partSpec) throws Exception {
Optional<Path> pathFromMeta = metaStore.getPartition(partSpec);
if (pathFromMeta.isPresent() && !overwrite) {
commitPartition(partSpec, pathFromMeta.get());
return;
}
Path path = new Path(metaStore.getLocationPath(), generatePartitionPath(partSpec));
if (pathFromMeta.isPresent()) {
fs.delete(pathFromMeta.get(), true);
fs.mkdirs(path);
}
commitPartition(partSpec, path);
} | 3.68 |
flink_ResourceInformationReflector_setResourceInformation | /** Add the given resourceName and value to the {@link Resource}. */
void setResourceInformation(Resource resource, String resourceName, long amount) {
setResourceInformationUnSafe(resource, resourceName, amount);
} | 3.68 |
hbase_MunkresAssignment_testIsDone | /**
* Test whether the algorithm is done, i.e. we have the optimal assignment. This occurs when there
* is exactly one starred zero in each row.
* @return true if the algorithm is done
*/
private boolean testIsDone() {
// Cover all columns containing a starred zero. There can be at most one
// starred zero per column. Therefore, a covered column has an optimal
// assignment.
for (int r = 0; r < rows; r++) {
for (int c = 0; c < cols; c++) {
if (mask[r][c] == STAR) {
colsCovered[c] = true;
}
}
}
// Count the total number of covered columns.
int coveredCols = 0;
for (int c = 0; c < cols; c++) {
coveredCols += colsCovered[c] ? 1 : 0;
}
// Apply an row and column adjustments that are pending.
for (int r = 0; r < rows; r++) {
for (int c = 0; c < cols; c++) {
cost[r][c] += rowAdjust[r];
cost[r][c] += colAdjust[c];
}
}
// Clear the pending row and column adjustments.
Arrays.fill(rowAdjust, 0);
Arrays.fill(colAdjust, 0);
// The covers on columns and rows may have been reset, recompute the least
// value for each row.
for (int r = 0; r < rows; r++) {
leastInRow[r] = Float.POSITIVE_INFINITY;
for (int c = 0; c < cols; c++) {
if (!rowsCovered[r] && !colsCovered[c] && cost[r][c] < leastInRow[r]) {
leastInRow[r] = cost[r][c];
leastInRowIndex[r] = c;
}
}
}
// If all columns are covered, then we are done. Since there may be more
// columns than rows, we are also done if the number of covered columns is
// at least as great as the number of rows.
return (coveredCols == cols || coveredCols >= rows);
} | 3.68 |
hadoop_ProducerConsumer_getWorkCnt | /**
* Returns number of pending ProducerConsumer items (submitted to input
* queue for processing via put() method but not yet consumed by take()
* or blockingTake().
*
* @return Number of items in ProducerConsumer (either pending for
* processing or waiting to be consumed).
*/
public int getWorkCnt() {
return workCnt.get();
} | 3.68 |
hadoop_HdfsFileStatus_convert | /**
* Set redundant flags for compatibility with existing applications.
*/
static FsPermission convert(boolean isdir, boolean symlink,
FsPermission p, Set<Flags> f) {
if (p instanceof FsPermissionExtension) {
// verify flags are set consistently
assert p.getAclBit() == f.contains(HdfsFileStatus.Flags.HAS_ACL);
assert p.getEncryptedBit() == f.contains(HdfsFileStatus.Flags.HAS_CRYPT);
assert p.getErasureCodedBit() == f.contains(HdfsFileStatus.Flags.HAS_EC);
return p;
}
if (null == p) {
if (isdir) {
p = FsPermission.getDirDefault();
} else if (symlink) {
p = FsPermission.getDefault();
} else {
p = FsPermission.getFileDefault();
}
}
return new FsPermissionExtension(p, f.contains(Flags.HAS_ACL),
f.contains(Flags.HAS_CRYPT), f.contains(Flags.HAS_EC));
} | 3.68 |
hadoop_JsonSerialization_fromJson | /**
* Convert from JSON.
*
* @param json input
* @return the parsed JSON
* @throws IOException IO problems
* @throws JsonParseException If the input is not well-formatted
* @throws JsonMappingException failure to map from the JSON to this class
*/
@SuppressWarnings("unchecked")
public synchronized T fromJson(String json)
throws IOException, JsonParseException, JsonMappingException {
if (json.isEmpty()) {
throw new EOFException("No data");
}
try {
return mapper.readValue(json, classType);
} catch (IOException e) {
LOG.error("Exception while parsing json : {}\n{}", e, json, e);
throw e;
}
} | 3.68 |
framework_AbstractInMemoryContainer_sortContainer | /**
* Sort base implementation to be used to implement {@link Sortable}.
*
* Subclasses should call this from a public
* {@link #sort(Object[], boolean[])} method when implementing Sortable.
*
* @see Container.Sortable#sort(java.lang.Object[], boolean[])
*/
protected void sortContainer(Object[] propertyId, boolean[] ascending) {
if (!(this instanceof Sortable)) {
throw new UnsupportedOperationException(
"Cannot sort a Container that does not implement Sortable");
}
// Set up the item sorter for the sort operation
getItemSorter().setSortProperties((Sortable) this, propertyId,
ascending);
// Perform the actual sort
doSort();
// Post sort updates
if (isFiltered()) {
filterAll();
} else {
fireItemSetChange();
}
} | 3.68 |
hbase_Table_getReadRpcTimeout | /**
* Get timeout of each rpc read request in this Table instance.
* @param unit the unit of time the timeout to be represented in
* @return read rpc timeout in the specified time unit
*/
default long getReadRpcTimeout(TimeUnit unit) {
throw new NotImplementedException("Add an implementation!");
} | 3.68 |
morf_SqlScriptExecutor_executionStart | /**
* @see org.alfasoftware.morf.jdbc.SqlScriptExecutor.SqlScriptVisitor#executionStart()
*/
@Override
public void executionStart() {
// Defaults to no-op
} | 3.68 |
framework_AbstractTextField_setCursorPosition | /**
* Sets the cursor position in the field. As a side effect the field will
* become focused.
*
* @param pos
* the position for the cursor
*/
public void setCursorPosition(int pos) {
setSelection(pos, 0);
} | 3.68 |
framework_VSlider_setReadOnly | /**
* Sets the read-only status of this slider. Users cannot interact with a
* read-only widget, but the default styles don't show it grayed out unless
* it's also disabled. The slider is not read-only by default.
*
* @param readonly
* a boolean value specifying whether the slider should be in
* read-only mode or not
* @see #setDisabled(boolean)
*/
public void setReadOnly(boolean readonly) {
this.readonly = readonly;
} | 3.68 |
framework_AbstractExtension_getSupportedParentType | /**
* Gets a type that the parent must be an instance of. Override this if the
* extension only support certain targets, e.g. if only TextFields can be
* extended.
*
* @return a type that the parent must be an instance of
*/
protected Class<? extends ClientConnector> getSupportedParentType() {
return ClientConnector.class;
} | 3.68 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.