name
stringlengths 12
178
| code_snippet
stringlengths 8
36.5k
| score
float64 3.26
3.68
|
---|---|---|
hudi_BitCaskDiskMap_iterator | /**
* Custom iterator to iterate over values written to disk.
*/
@Override
public Iterator<R> iterator() {
ClosableIterator<R> iterator = new LazyFileIterable(filePath, valueMetadataMap, isCompressionEnabled).iterator();
this.iterators.add(iterator);
return iterator;
} | 3.68 |
dubbo_StringUtils_translate | /**
* translate.
*
* @param src source string.
* @param from src char table.
* @param to target char table.
* @return String.
*/
public static String translate(String src, String from, String to) {
if (isEmpty(src)) {
return src;
}
StringBuilder sb = null;
int ix;
char c;
for (int i = 0, len = src.length(); i < len; i++) {
c = src.charAt(i);
ix = from.indexOf(c);
if (ix == -1) {
if (sb != null) {
sb.append(c);
}
} else {
if (sb == null) {
sb = new StringBuilder(len);
sb.append(src, 0, i);
}
if (ix < to.length()) {
sb.append(to.charAt(ix));
}
}
}
return sb == null ? src : sb.toString();
} | 3.68 |
flink_BatchTask_initOutputs | /**
* Creates a writer for each output. Creates an OutputCollector which forwards its input to all
* writers. The output collector applies the configured shipping strategy.
*/
@SuppressWarnings("unchecked")
public static <T> Collector<T> initOutputs(
AbstractInvokable containingTask,
UserCodeClassLoader cl,
TaskConfig config,
List<ChainedDriver<?, ?>> chainedTasksTarget,
List<RecordWriter<?>> eventualOutputs,
ExecutionConfig executionConfig,
Map<String, Accumulator<?, ?>> accumulatorMap)
throws Exception {
final int numOutputs = config.getNumOutputs();
// check whether we got any chained tasks
final int numChained = config.getNumberOfChainedStubs();
if (numChained > 0) {
// got chained stubs. that means that this one may only have a single forward connection
if (numOutputs != 1 || config.getOutputShipStrategy(0) != ShipStrategyType.FORWARD) {
throw new RuntimeException(
"Plan Generation Bug: Found a chained stub that is not connected via an only forward connection.");
}
// instantiate each task
@SuppressWarnings("rawtypes")
Collector previous = null;
for (int i = numChained - 1; i >= 0; --i) {
// get the task first
final ChainedDriver<?, ?> ct;
try {
Class<? extends ChainedDriver<?, ?>> ctc = config.getChainedTask(i);
ct = ctc.newInstance();
} catch (Exception ex) {
throw new RuntimeException("Could not instantiate chained task driver.", ex);
}
// get the configuration for the task
final TaskConfig chainedStubConf = config.getChainedStubConfig(i);
final String taskName = config.getChainedTaskName(i);
if (i == numChained - 1) {
// last in chain, instantiate the output collector for this task
previous =
getOutputCollector(
containingTask,
chainedStubConf,
cl.asClassLoader(),
eventualOutputs,
0,
chainedStubConf.getNumOutputs());
}
ct.setup(
chainedStubConf,
taskName,
previous,
containingTask,
cl,
executionConfig,
accumulatorMap);
chainedTasksTarget.add(0, ct);
if (i == numChained - 1) {
ct.getIOMetrics().reuseOutputMetricsForTask();
}
previous = ct;
}
// the collector of the first in the chain is the collector for the task
return (Collector<T>) previous;
}
// else
// instantiate the output collector the default way from this configuration
return getOutputCollector(
containingTask, config, cl.asClassLoader(), eventualOutputs, 0, numOutputs);
} | 3.68 |
hbase_HRegion_attachRegionReplicationToFlushOpSeqIdMVCCEntry | /**
* Create {@link WALEdit} for {@link FlushDescriptor} and attach {@link RegionReplicationSink#add}
* to the flushOpSeqIdMVCCEntry.
*/
private void attachRegionReplicationToFlushOpSeqIdMVCCEntry(WriteEntry flushOpSeqIdMVCCEntry,
FlushDescriptor desc, RegionReplicationSink sink) {
assert !flushOpSeqIdMVCCEntry.getCompletionAction().isPresent();
WALEdit flushMarkerWALEdit = WALEdit.createFlushWALEdit(getRegionInfo(), desc);
WALKeyImpl walKey =
WALUtil.createWALKey(getRegionInfo(), mvcc, this.getReplicationScope(), null);
walKey.setWriteEntry(flushOpSeqIdMVCCEntry);
/**
* Here the {@link ServerCall} is null for {@link RegionReplicationSink#add} because the
* flushMarkerWALEdit is created by ourselves, not from rpc.
*/
flushOpSeqIdMVCCEntry.attachCompletionAction(() -> sink.add(walKey, flushMarkerWALEdit, null));
} | 3.68 |
hbase_MasterObserver_postListDecommissionedRegionServers | /**
* Called after list decommissioned region servers.
*/
default void postListDecommissionedRegionServers(
ObserverContext<MasterCoprocessorEnvironment> ctx) throws IOException {
} | 3.68 |
AreaShop_Utils_getWorldEditRegionsInSelection | /**
* Get all WorldGuard regions intersecting with a WorldEdit selection.
* @param selection The selection to check
* @return A list with all the WorldGuard regions intersecting with the selection
*/
public static List<ProtectedRegion> getWorldEditRegionsInSelection(WorldEditSelection selection) {
// Get all regions inside or intersecting with the WorldEdit selection of the player
World world = selection.getWorld();
RegionManager regionManager = AreaShop.getInstance().getRegionManager(world);
ArrayList<ProtectedRegion> result = new ArrayList<>();
Location selectionMin = selection.getMinimumLocation();
Location selectionMax = selection.getMaximumLocation();
for(ProtectedRegion region : regionManager.getRegions().values()) {
Vector regionMin = AreaShop.getInstance().getWorldGuardHandler().getMinimumPoint(region);
Vector regionMax = AreaShop.getInstance().getWorldGuardHandler().getMaximumPoint(region);
if(
( // x part, resolves to true if the selection and region overlap anywhere on the x-axis
(regionMin.getBlockX() <= selectionMax.getBlockX() && regionMin.getBlockX() >= selectionMin.getBlockX())
|| (regionMax.getBlockX() <= selectionMax.getBlockX() && regionMax.getBlockX() >= selectionMin.getBlockX())
|| (selectionMin.getBlockX() >= regionMin.getBlockX() && selectionMin.getBlockX() <= regionMax.getBlockX())
|| (selectionMax.getBlockX() >= regionMin.getBlockX() && selectionMax.getBlockX() <= regionMax.getBlockX())
) && ( // Y part, resolves to true if the selection and region overlap anywhere on the y-axis
(regionMin.getBlockY() <= selectionMax.getBlockY() && regionMin.getBlockY() >= selectionMin.getBlockY())
|| (regionMax.getBlockY() <= selectionMax.getBlockY() && regionMax.getBlockY() >= selectionMin.getBlockY())
|| (selectionMin.getBlockY() >= regionMin.getBlockY() && selectionMin.getBlockY() <= regionMax.getBlockY())
|| (selectionMax.getBlockY() >= regionMin.getBlockY() && selectionMax.getBlockY() <= regionMax.getBlockY())
) && ( // Z part, resolves to true if the selection and region overlap anywhere on the z-axis
(regionMin.getBlockZ() <= selectionMax.getBlockZ() && regionMin.getBlockZ() >= selectionMin.getBlockZ())
|| (regionMax.getBlockZ() <= selectionMax.getBlockZ() && regionMax.getBlockZ() >= selectionMin.getBlockZ())
|| (selectionMin.getBlockZ() >= regionMin.getBlockZ() && selectionMin.getBlockZ() <= regionMax.getBlockZ())
|| (selectionMax.getBlockZ() >= regionMin.getBlockZ() && selectionMax.getBlockZ() <= regionMax.getBlockZ())
)
) {
result.add(region);
}
}
return result;
} | 3.68 |
querydsl_QueryModifiers_isRestricting | /**
* Checks if is restricting.
*
* @return true, if is restricting
*/
public boolean isRestricting() {
return limit != null || offset != null;
} | 3.68 |
morf_GraphBasedUpgradeNode_getReads | /**
* @return all the tables which are read by this upgrade node
*/
public Set<String> getReads() {
return reads;
} | 3.68 |
zxing_ModulusPoly_evaluateAt | /**
* @return evaluation of this polynomial at a given point
*/
int evaluateAt(int a) {
if (a == 0) {
// Just return the x^0 coefficient
return getCoefficient(0);
}
if (a == 1) {
// Just the sum of the coefficients
int result = 0;
for (int coefficient : coefficients) {
result = field.add(result, coefficient);
}
return result;
}
int result = coefficients[0];
int size = coefficients.length;
for (int i = 1; i < size; i++) {
result = field.add(field.multiply(a, result), coefficients[i]);
}
return result;
} | 3.68 |
hudi_ParquetUtils_getRowCount | /**
* Returns the number of records in the parquet file.
*
* @param conf Configuration
* @param parquetFilePath path of the file
*/
@Override
public long getRowCount(Configuration conf, Path parquetFilePath) {
ParquetMetadata footer;
long rowCount = 0;
footer = readMetadata(conf, parquetFilePath);
for (BlockMetaData b : footer.getBlocks()) {
rowCount += b.getRowCount();
}
return rowCount;
} | 3.68 |
framework_MultiSelectionModelImpl_updateCanSelectAll | /**
* Controls whether the select all checkbox is visible in the grid default
* header, or not.
* <p>
* This is updated as a part of {@link #beforeClientResponse(boolean)},
* since the data provider for grid can be changed on the fly.
*
* @see SelectAllCheckBoxVisibility
*/
protected void updateCanSelectAll() {
switch (selectAllCheckBoxVisibility) {
case VISIBLE:
getState(false).selectAllCheckBoxVisible = true;
break;
case HIDDEN:
getState(false).selectAllCheckBoxVisible = false;
break;
case DEFAULT:
getState(false).selectAllCheckBoxVisible = getGrid()
.getDataProvider().isInMemory();
break;
default:
break;
}
} | 3.68 |
AreaShop_Utils_getImportantRegions | /**
* Get the most important AreaShop regions.
* - Returns highest priority, child instead of parent regions.
* @param location The location to check for regions
* @param type The type of regions to look for, null for all
* @return empty list if no regions found, 1 member if 1 region is a priority, more if regions with the same priority
*/
public static List<GeneralRegion> getImportantRegions(Location location, GeneralRegion.RegionType type) {
List<GeneralRegion> result = new ArrayList<>();
Set<ProtectedRegion> regions = AreaShop.getInstance().getWorldGuardHandler().getApplicableRegionsSet(location);
if(regions != null) {
List<GeneralRegion> candidates = new ArrayList<>();
for(ProtectedRegion pr : regions) {
GeneralRegion region = AreaShop.getInstance().getFileManager().getRegion(pr.getId());
if(region != null && (
(type == GeneralRegion.RegionType.RENT && region instanceof RentRegion)
|| (type == GeneralRegion.RegionType.BUY && region instanceof BuyRegion)
|| type == null)) {
candidates.add(region);
}
}
boolean first = true;
for(GeneralRegion region : candidates) {
if(region == null) {
AreaShop.debug("skipped null region");
continue;
}
if(first) {
result.add(region);
first = false;
} else {
if(region.getRegion().getPriority() > result.get(0).getRegion().getPriority()) {
result.clear();
result.add(region);
} else if(region.getRegion().getParent() != null && region.getRegion().getParent().equals(result.get(0).getRegion())) {
result.clear();
result.add(region);
} else {
result.add(region);
}
}
}
}
return new ArrayList<>(result);
} | 3.68 |
flink_QueryableStateConfiguration_numStateServerThreads | /**
* Returns the number of threads for the query server NIO event loop. These threads only process
* network events and dispatch query requests to the query threads.
*/
public int numStateServerThreads() {
return numServerThreads;
} | 3.68 |
framework_ColorPickerPopup_createRGBTab | /**
* Creates the RGB tab.
*
* @return the component
*/
private Component createRGBTab(Color color) {
VerticalLayout rgbLayout = new VerticalLayout();
rgbLayout.setMargin(new MarginInfo(false, false, true, false));
rgbLayout.addComponent(rgbPreview);
rgbLayout.setStyleName("rgbtab");
// Add the RGB color gradient
rgbGradient = new ColorPickerGradient("rgb-gradient", rgbConverter);
rgbGradient.setValue(color);
rgbGradient.addValueChangeListener(this::colorChanged);
rgbLayout.addComponent(rgbGradient);
selectors.add(rgbGradient);
// Add the RGB sliders
VerticalLayout sliders = new VerticalLayout();
sliders.setStyleName("rgb-sliders");
redSlider = createRGBSlider("Red", "red");
greenSlider = createRGBSlider("Green", "green");
blueSlider = createRGBSlider("Blue", "blue");
setRgbSliderValues(color);
redSlider.addValueChangeListener(event -> {
double red = event.getValue();
if (!updatingColors) {
Color newColor = new Color((int) red, selectedColor.getGreen(),
selectedColor.getBlue());
setValue(newColor);
}
});
sliders.addComponent(redSlider);
greenSlider.addValueChangeListener(event -> {
double green = event.getValue();
if (!updatingColors) {
Color newColor = new Color(selectedColor.getRed(), (int) green,
selectedColor.getBlue());
setValue(newColor);
}
});
sliders.addComponent(greenSlider);
blueSlider.addValueChangeListener(event -> {
double blue = event.getValue();
if (!updatingColors) {
Color newColor = new Color(selectedColor.getRed(),
selectedColor.getGreen(), (int) blue);
setValue(newColor);
}
});
sliders.addComponent(blueSlider);
rgbLayout.addComponent(sliders);
return rgbLayout;
} | 3.68 |
hbase_AsyncRegionLocatorHelper_replaceRegionLocation | /**
* Create a new {@link RegionLocations} based on the given {@code oldLocs}, and replace the
* location for the given {@code replicaId} with the given {@code loc}.
* <p/>
* All the {@link RegionLocations} in async locator related class are immutable because we want to
* access them concurrently, so here we need to create a new one, instead of calling
* {@link RegionLocations#updateLocation(HRegionLocation, boolean, boolean)}.
*/
static RegionLocations replaceRegionLocation(RegionLocations oldLocs, HRegionLocation loc) {
int replicaId = loc.getRegion().getReplicaId();
HRegionLocation[] locs = oldLocs.getRegionLocations();
locs = Arrays.copyOf(locs, Math.max(replicaId + 1, locs.length));
locs[replicaId] = loc;
return new RegionLocations(locs);
} | 3.68 |
pulsar_BrokerUsage_populateFrom | /**
* Factory method that returns an instance of this class populated from metrics we expect the keys that we are
* looking there's no explicit type checked object which guarantees that we have a specific type of metrics.
*
* @param metrics metrics object containing the metrics collected per minute from mbeans
*
* @return new instance of the class populated from metrics
*/
public static BrokerUsage populateFrom(Map<String, Object> metrics) {
BrokerUsage brokerUsage = null;
if (metrics.containsKey("brk_conn_cnt")) {
brokerUsage = new BrokerUsage();
brokerUsage.connectionCount = (Long) metrics.get("brk_conn_cnt");
}
if (metrics.containsKey("brk_repl_conn_cnt")) {
if (brokerUsage == null) {
brokerUsage = new BrokerUsage();
}
brokerUsage.replicationConnectionCount = (Long) metrics.get("brk_repl_conn_cnt");
}
return brokerUsage;
} | 3.68 |
framework_DesignFormatter_format | /**
* Formats an object according to a converter suitable for a given type.
*
* @param object
* Object to format.
* @param type
* Type of the object.
* @return String representation of the object, as returned by the
* registered converter.
*/
public <T> String format(T object, Class<? extends T> type) {
if (object == null) {
return null;
} else {
Converter<String, Object> converter = findConverterFor(
object.getClass());
return converter.convertToPresentation(object, new ValueContext());
}
} | 3.68 |
hbase_BaseEnvironment_shutdown | /** Clean up the environment */
public void shutdown() {
if (state == Coprocessor.State.ACTIVE) {
state = Coprocessor.State.STOPPING;
Thread currentThread = Thread.currentThread();
ClassLoader hostClassLoader = currentThread.getContextClassLoader();
try {
currentThread.setContextClassLoader(this.getClassLoader());
impl.stop(this);
state = Coprocessor.State.STOPPED;
} catch (IOException ioe) {
LOG.error("Error stopping coprocessor " + impl.getClass().getName(), ioe);
} finally {
currentThread.setContextClassLoader(hostClassLoader);
}
} else {
LOG.warn("Not stopping coprocessor " + impl.getClass().getName()
+ " because not active (state=" + state.toString() + ")");
}
} | 3.68 |
framework_Calc_calculate | // Calculator "business logic" implemented here to keep the example
// minimal
private double calculate(char requestedOperation) {
if ('0' <= requestedOperation && requestedOperation <= '9') {
if (current == null) {
current = 0.0;
}
current = current * 10
+ Double.parseDouble("" + requestedOperation);
return current;
}
if (current == null) {
current = stored;
}
switch (lastOperationRequested) {
case '+':
stored += current;
break;
case '-':
stored -= current;
break;
case '/':
stored /= current;
break;
case '*':
stored *= current;
break;
default:
stored = current;
break;
}
switch (requestedOperation) {
case '+':
log.addRow(current + " +");
break;
case '-':
log.addRow(current + " -");
break;
case '/':
log.addRow(current + " /");
break;
case '*':
log.addRow(current + " x");
break;
case '=':
log.addRow(current + " =");
log.addRow("------------");
log.addRow("" + stored);
break;
}
lastOperationRequested = requestedOperation;
current = null;
if (requestedOperation == 'C') {
log.addRow("0.0");
stored = 0.0;
}
return stored;
} | 3.68 |
flink_CommittableCollector_isFinished | /**
* Returns whether all {@link CheckpointCommittableManager} currently hold by the collector are
* either committed or failed.
*
* @return state of the {@link CheckpointCommittableManager}
*/
public boolean isFinished() {
return checkpointCommittables.values().stream()
.allMatch(CheckpointCommittableManagerImpl::isFinished);
} | 3.68 |
hadoop_BCFile_getBlockCount | /**
* Get the number of data blocks.
*
* @return the number of data blocks.
*/
public int getBlockCount() {
return dataIndex.getBlockRegionList().size();
} | 3.68 |
pulsar_ClientCredentialsFlow_loadPrivateKey | /**
* Loads the private key from the given URL.
* @param privateKeyURL
* @return
* @throws IOException
*/
private static KeyFile loadPrivateKey(String privateKeyURL) throws IOException {
try {
URLConnection urlConnection = new org.apache.pulsar.client.api.url.URL(privateKeyURL).openConnection();
try {
String protocol = urlConnection.getURL().getProtocol();
String contentType = urlConnection.getContentType();
if ("data".equals(protocol) && !"application/json".equals(contentType)) {
throw new IllegalArgumentException(
"Unsupported media type or encoding format: " + urlConnection.getContentType());
}
KeyFile privateKey;
try (Reader r = new InputStreamReader((InputStream) urlConnection.getContent(),
StandardCharsets.UTF_8)) {
privateKey = KeyFile.fromJson(r);
}
return privateKey;
} finally {
IOUtils.close(urlConnection);
}
} catch (URISyntaxException | InstantiationException | IllegalAccessException e) {
throw new IOException("Invalid privateKey format", e);
}
} | 3.68 |
pulsar_BKCluster_startNewBookie | /**
* Helper method to startup a new bookie server with the indicated port
* number. Also, starts the auto recovery process, if the
* isAutoRecoveryEnabled is set true.
* @param index Bookie index
* @throws IOException
*/
public int startNewBookie(int index)
throws Exception {
ServerConfiguration conf = newServerConfiguration(index);
bsConfs.add(conf);
log.info("Starting new bookie on port: {}", conf.getBookiePort());
LifecycleComponentStack server = startBookie(conf);
bookieComponents.add(server);
return conf.getBookiePort();
} | 3.68 |
graphhopper_NavigateResponseConverter_putLocation | /**
* Puts a location array in GeoJson format into the node
*/
private static ObjectNode putLocation(double lat, double lon, ObjectNode node) {
ArrayNode location = node.putArray("location");
// GeoJson lon,lat
location.add(Helper.round6(lon));
location.add(Helper.round6(lat));
return node;
} | 3.68 |
framework_AbstractSplitPanel_getMinSplitPositionUnit | /**
* Returns the unit of the minimum position of the splitter.
*
* @return the unit of the minimum position of the splitter
*/
public Unit getMinSplitPositionUnit() {
return posMinUnit;
} | 3.68 |
hbase_Delete_addFamilyVersion | /**
* Delete all columns of the specified family with a timestamp equal to the specified timestamp.
* @param family family name
* @param timestamp version timestamp
* @return this for invocation chaining
*/
public Delete addFamilyVersion(final byte[] family, final long timestamp) {
if (timestamp < 0) {
throw new IllegalArgumentException("Timestamp cannot be negative. ts=" + timestamp);
}
List<Cell> list = getCellList(family);
list.add(new KeyValue(row, family, null, timestamp, KeyValue.Type.DeleteFamilyVersion));
return this;
} | 3.68 |
flink_FileRegionWriteReadUtils_writeFixedSizeRegionToFile | /**
* Write {@link FixedSizeRegion} to {@link FileChannel}.
*
* <p>Note that this type of region's length is fixed.
*
* @param channel the file's channel to write.
* @param regionBuffer the buffer to write {@link FixedSizeRegion}'s header.
* @param region the region to be written to channel.
*/
public static void writeFixedSizeRegionToFile(
FileChannel channel, ByteBuffer regionBuffer, FileDataIndexRegionHelper.Region region)
throws IOException {
regionBuffer.clear();
regionBuffer.putInt(region.getFirstBufferIndex());
regionBuffer.putInt(region.getNumBuffers());
regionBuffer.putLong(region.getRegionStartOffset());
regionBuffer.putLong(region.getRegionEndOffset());
regionBuffer.flip();
BufferReaderWriterUtil.writeBuffers(channel, regionBuffer.capacity(), regionBuffer);
} | 3.68 |
hadoop_MoveStep_setBytesToMove | /**
* Sets bytes to move.
*
* @param bytesToMove - number of bytes
*/
public void setBytesToMove(long bytesToMove) {
this.bytesToMove = bytesToMove;
} | 3.68 |
morf_OracleDialect_getSqlForInsertInto | /**
* @see org.alfasoftware.morf.jdbc.SqlDialect#getSqlForInsertInto(org.alfasoftware.morf.sql.InsertStatement)
*/
@Override
protected String getSqlForInsertInto(InsertStatement insertStatement) {
return "INSERT " + insertStatementPreIntoDirectives(insertStatement) + "INTO ";
} | 3.68 |
hbase_ByteBuff_wrap | // Make this private because we don't want to expose the refCnt related wrap method to upstream.
private static ByteBuff wrap(ByteBuffer buffer, RefCnt refCnt) {
return new SingleByteBuff(refCnt, buffer);
} | 3.68 |
flink_RocksDBNativeMetricOptions_isColumnFamilyAsVariable | /**
* {{@link RocksDBNativeMetricMonitor}} Whether to expose the column family as a variable..
*
* @return true is column family to expose variable, false otherwise.
*/
public boolean isColumnFamilyAsVariable() {
return this.columnFamilyAsVariable;
} | 3.68 |
hadoop_AbstractS3ACommitter_requiresDelayedCommitOutputInFileSystem | /**
* Flag to indicate whether or not the destination filesystem needs
* to be configured to support magic paths where the output isn't immediately
* visible. If the committer returns true, then committer setup will
* fail if the FS doesn't have the capability.
* Base implementation returns false.
* @return what the requirements of the committer are of the filesystem.
*/
protected boolean requiresDelayedCommitOutputInFileSystem() {
return false;
} | 3.68 |
hbase_BatchScanResultCache_regroupResults | // Add new result to the partial list and return a batched Result if caching size exceed batching
// limit. As the RS will also respect the scan.getBatch, we can make sure that we will get only
// one Result back at most(or null, which means we do not have enough cells).
private Result regroupResults(Result result) {
partialResults.addLast(result);
numCellsOfPartialResults += result.size();
if (numCellsOfPartialResults < batch) {
return null;
}
Cell[] cells = new Cell[batch];
int cellCount = 0;
boolean stale = false;
for (;;) {
Result r = partialResults.pollFirst();
stale = stale || r.isStale();
int newCellCount = cellCount + r.size();
if (newCellCount > batch) {
// We have more cells than expected, so split the current result
int len = batch - cellCount;
System.arraycopy(r.rawCells(), 0, cells, cellCount, len);
Cell[] remainingCells = new Cell[r.size() - len];
System.arraycopy(r.rawCells(), len, remainingCells, 0, r.size() - len);
partialResults.addFirst(
Result.create(remainingCells, r.getExists(), r.isStale(), r.mayHaveMoreCellsInRow()));
break;
}
System.arraycopy(r.rawCells(), 0, cells, cellCount, r.size());
if (newCellCount == batch) {
break;
}
cellCount = newCellCount;
}
numCellsOfPartialResults -= batch;
return Result.create(cells, null, stale,
result.mayHaveMoreCellsInRow() || !partialResults.isEmpty());
} | 3.68 |
hadoop_BlockStorageMovementNeeded_add | /**
* Add the itemInfo to tracking list for which storage movement expected if
* necessary.
*
* @param itemInfo
* - child in the directory
* @param scanCompleted
* -Indicates whether the ItemInfo start id directory has no more
* elements to scan.
*/
@VisibleForTesting
public synchronized void add(ItemInfo itemInfo, boolean scanCompleted) {
storageMovementNeeded.add(itemInfo);
// This represents sps start id is file, so no need to update pending dir
// stats.
if (itemInfo.getStartPath() == itemInfo.getFile()) {
return;
}
updatePendingDirScanStats(itemInfo.getStartPath(), 1, scanCompleted);
} | 3.68 |
rocketmq-connect_JsonSerializer_serialize | /**
* Convert {@code data} into a byte array.
*
* @param topic topic associated with data
* @param data typed data
* @return serialized bytes
*/
@Override
public byte[] serialize(String topic, Object data) {
if (Objects.isNull(data)) {
return null;
}
try {
return JSON.toJSONString(data, SerializerFeature.DisableCircularReferenceDetect, SerializerFeature.WriteMapNullValue).getBytes(StandardCharsets.UTF_8);
} catch (Exception e) {
throw new ConnectException("Error serializing JSON message", e);
}
} | 3.68 |
morf_JdbcUrlElements_getDatabaseType | /**
* @return the database type identifier
*
* @see DatabaseType#identifier()
* @see DatabaseType.Registry#findByIdentifier(String)
*/
public String getDatabaseType() {
return databaseType;
} | 3.68 |
framework_FilesystemContainer_getContainerPropertyIds | /**
* Gets the collection of available file properties.
*
* @return Unmodifiable collection containing all available file properties.
*/
@Override
public Collection<String> getContainerPropertyIds() {
return FILE_PROPERTIES;
} | 3.68 |
querydsl_NumberExpression_goeAny | /**
* Create a {@code this >= any right} expression
*
* @param right
* @return this >= any right
*/
public BooleanExpression goeAny(CollectionExpression<?, ? super T> right) {
return goe(ExpressionUtils.<T> any(right));
} | 3.68 |
morf_ExecuteStatement_isApplied | /**
* {@inheritDoc}
*
* @see org.alfasoftware.morf.upgrade.SchemaChange#isApplied(Schema, ConnectionResources)
*/
@Override
public boolean isApplied(Schema schema, ConnectionResources database) {
return true;
} | 3.68 |
hudi_HoodieRecordPayload_getInsertValue | /**
* Generates an avro record out of the given HoodieRecordPayload, to be written out to storage. Called when writing a new value for the given
* HoodieKey, wherein there is no existing record in storage to be combined against. (i.e insert) Return EMPTY to skip writing this record.
* Implementations can leverage properties if required.
* @param schema Schema used for record
* @param properties Payload related properties. For example pass the ordering field(s) name to extract from value in storage.
* @return the {@link IndexedRecord} to be inserted.
*/
@PublicAPIMethod(maturity = ApiMaturityLevel.STABLE)
default Option<IndexedRecord> getInsertValue(Schema schema, Properties properties) throws IOException {
return getInsertValue(schema);
} | 3.68 |
morf_SelectStatement_asField | /**
* @see org.alfasoftware.morf.sql.AbstractSelectStatement#asField()
*/
@Override
public AliasedField asField() {
return new FieldFromSelect(this);
} | 3.68 |
flink_AbstractCollectResultBuffer_revert | /** Revert the buffer back to the result whose offset is `checkpointedOffset`. */
protected void revert(long checkpointedOffset) {
while (offset > checkpointedOffset) {
buffer.removeLast();
offset--;
}
} | 3.68 |
hadoop_Chain_getChainElementConf | /**
* Creates a {@link Configuration} for the Map or Reduce in the chain.
*
* <p>
* It creates a new Configuration using the chain job's Configuration as base
* and adds to it the configuration properties for the chain element. The keys
* of the chain element Configuration have precedence over the given
* Configuration.
* </p>
*
* @param jobConf
* the chain job's Configuration.
* @param confKey
* the key for chain element configuration serialized in the chain
* job's Configuration.
* @return a new Configuration aggregating the chain job's Configuration with
* the chain element configuration properties.
*/
protected static Configuration getChainElementConf(Configuration jobConf,
String confKey) {
Configuration conf = null;
try (Stringifier<Configuration> stringifier =
new DefaultStringifier<Configuration>(jobConf, Configuration.class);) {
String confString = jobConf.get(confKey, null);
if (confString != null) {
conf = stringifier.fromString(jobConf.get(confKey, null));
}
} catch (IOException ioex) {
throw new RuntimeException(ioex);
}
// we have to do this because the Writable desearialization clears all
// values set in the conf making not possible do a
// new Configuration(jobConf) in the creation of the conf above
jobConf = new Configuration(jobConf);
if (conf != null) {
for (Map.Entry<String, String> entry : conf) {
jobConf.set(entry.getKey(), entry.getValue());
}
}
return jobConf;
} | 3.68 |
hbase_SegmentScanner_shipped | /**
* Called after a batch of rows scanned (RPC) and set to be returned to client. Any in between
* cleanup can be done here. Nothing to be done for MutableCellSetSegmentScanner.
*/
@Override
public void shipped() throws IOException {
// do nothing
} | 3.68 |
hbase_HFileBlock_toStringHeader | /**
* Convert the contents of the block header into a human readable string. This is mostly helpful
* for debugging. This assumes that the block has minor version > 0.
*/
static String toStringHeader(ByteBuff buf) throws IOException {
byte[] magicBuf = new byte[Math.min(buf.limit() - buf.position(), BlockType.MAGIC_LENGTH)];
buf.get(magicBuf);
BlockType bt = BlockType.parse(magicBuf, 0, BlockType.MAGIC_LENGTH);
int compressedBlockSizeNoHeader = buf.getInt();
int uncompressedBlockSizeNoHeader = buf.getInt();
long prevBlockOffset = buf.getLong();
byte cksumtype = buf.get();
long bytesPerChecksum = buf.getInt();
long onDiskDataSizeWithHeader = buf.getInt();
return " Header dump: magic: " + Bytes.toString(magicBuf) + " blockType " + bt
+ " compressedBlockSizeNoHeader " + compressedBlockSizeNoHeader
+ " uncompressedBlockSizeNoHeader " + uncompressedBlockSizeNoHeader + " prevBlockOffset "
+ prevBlockOffset + " checksumType " + ChecksumType.codeToType(cksumtype)
+ " bytesPerChecksum " + bytesPerChecksum + " onDiskDataSizeWithHeader "
+ onDiskDataSizeWithHeader;
} | 3.68 |
hbase_IdentityTableReducer_reduce | /**
* Writes each given record, consisting of the row key and the given values, to the configured
* {@link org.apache.hadoop.mapreduce.OutputFormat}. It is emitting the row key and each
* {@link org.apache.hadoop.hbase.client.Put Put} or {@link org.apache.hadoop.hbase.client.Delete
* Delete} as separate pairs.
* @param key The current row key.
* @param values The {@link org.apache.hadoop.hbase.client.Put Put} or
* {@link org.apache.hadoop.hbase.client.Delete Delete} list for the given row.
* @param context The context of the reduce.
* @throws IOException When writing the record fails.
* @throws InterruptedException When the job gets interrupted.
*/
@Override
public void reduce(Writable key, Iterable<Mutation> values, Context context)
throws IOException, InterruptedException {
for (Mutation putOrDelete : values) {
context.write(key, putOrDelete);
}
} | 3.68 |
hbase_HBaseServerBase_getWALRootDir | /** Returns Return the walRootDir. */
public Path getWALRootDir() {
return walRootDir;
} | 3.68 |
hmily_HmilyExecuteTemplate_rollback | /**
* Rollback.
*
* @param connection connection
*/
public void rollback(final Connection connection) {
if (check()) {
return;
}
List<HmilyUndoContext> contexts = HmilyUndoContextCacheManager.INSTANCE.get();
List<HmilyLock> locks = new LinkedList<>();
for (HmilyUndoContext context : contexts) {
locks.addAll(context.getHmilyLocks());
}
HmilyLockManager.INSTANCE.releaseLocks(locks);
clean(connection);
} | 3.68 |
flink_SliceAssigners_hopping | /**
* Creates a hopping window {@link SliceAssigner} that assigns elements to slices of hopping
* windows.
*
* @param rowtimeIndex the index of rowtime field in the input row, {@code -1} if based on *
* processing time.
* @param shiftTimeZone The shift timezone of the window, if the proctime or rowtime type is
* TIMESTAMP_LTZ, the shift timezone is the timezone user configured in TableConfig, other
* cases the timezone is UTC which means never shift when assigning windows.
* @param slide the slide interval of the generated windows.
*/
public static HoppingSliceAssigner hopping(
int rowtimeIndex, ZoneId shiftTimeZone, Duration size, Duration slide) {
return new HoppingSliceAssigner(
rowtimeIndex, shiftTimeZone, size.toMillis(), slide.toMillis(), 0);
} | 3.68 |
hbase_MemorySizeUtil_getBlockCacheHeapPercent | /**
* Retrieve configured size for on heap block cache as percentage of total heap.
*/
public static float getBlockCacheHeapPercent(final Configuration conf) {
// L1 block cache is always on heap
float l1CachePercent = conf.getFloat(HConstants.HFILE_BLOCK_CACHE_SIZE_KEY,
HConstants.HFILE_BLOCK_CACHE_SIZE_DEFAULT);
return l1CachePercent;
} | 3.68 |
hbase_ShutdownHook_suppressHdfsShutdownHook | /*
* So, HDFS keeps a static map of all FS instances. In order to make sure things are cleaned up on
* our way out, it also creates a shutdown hook so that all filesystems can be closed when the
* process is terminated; it calls FileSystem.closeAll. This inconveniently runs concurrently with
* our own shutdown handler, and therefore causes all the filesystems to be closed before the
* server can do all its necessary cleanup. <p>The dirty reflection in this method sneaks into the
* FileSystem class and grabs the shutdown hook, removes it from the list of active shutdown
* hooks, and returns the hook for the caller to run at its convenience. <p>This seems quite
* fragile and susceptible to breaking if Hadoop changes anything about the way this cleanup is
* managed. Keep an eye on things.
* @return The fs shutdown hook
* @throws RuntimeException if we fail to find or grap the shutdown hook.
*/
private static Runnable suppressHdfsShutdownHook(final FileSystem fs) {
try {
// This introspection has been updated to work for hadoop 0.20, 0.21 and for
// cloudera 0.20. 0.21 and cloudera 0.20 both have hadoop-4829. With the
// latter in place, things are a little messy in that there are now two
// instances of the data member clientFinalizer; an uninstalled one in
// FileSystem and one in the innner class named Cache that actually gets
// registered as a shutdown hook. If the latter is present, then we are
// on 0.21 or cloudera patched 0.20.
Runnable hdfsClientFinalizer = null;
// Look into the FileSystem#Cache class for clientFinalizer
Class<?>[] classes = FileSystem.class.getDeclaredClasses();
Class<?> cache = null;
for (Class<?> c : classes) {
if (c.getSimpleName().equals("Cache")) {
cache = c;
break;
}
}
if (cache == null) {
throw new RuntimeException(
"This should not happen. Could not find the cache class in FileSystem.");
}
Field field = null;
try {
field = cache.getDeclaredField(CLIENT_FINALIZER_DATA_METHOD);
} catch (NoSuchFieldException e) {
// We can get here if the Cache class does not have a clientFinalizer
// instance: i.e. we're running on straight 0.20 w/o hadoop-4829.
}
if (field != null) {
field.setAccessible(true);
Field cacheField = FileSystem.class.getDeclaredField("CACHE");
cacheField.setAccessible(true);
Object cacheInstance = cacheField.get(fs);
hdfsClientFinalizer = (Runnable) field.get(cacheInstance);
} else {
// Then we didnt' find clientFinalizer in Cache. Presume clean 0.20 hadoop.
field = FileSystem.class.getDeclaredField(CLIENT_FINALIZER_DATA_METHOD);
field.setAccessible(true);
hdfsClientFinalizer = (Runnable) field.get(null);
}
if (hdfsClientFinalizer == null) {
throw new RuntimeException("Client finalizer is null, can't suppress!");
}
synchronized (fsShutdownHooks) {
boolean isFSCacheDisabled = fs.getConf().getBoolean("fs.hdfs.impl.disable.cache", false);
if (
!isFSCacheDisabled && !fsShutdownHooks.containsKey(hdfsClientFinalizer)
&& !ShutdownHookManager.deleteShutdownHook(hdfsClientFinalizer)
) {
throw new RuntimeException(
"Failed suppression of fs shutdown hook: " + hdfsClientFinalizer);
}
Integer refs = fsShutdownHooks.get(hdfsClientFinalizer);
fsShutdownHooks.put(hdfsClientFinalizer, refs == null ? 1 : refs + 1);
}
return hdfsClientFinalizer;
} catch (NoSuchFieldException nsfe) {
LOG.error(HBaseMarkers.FATAL, "Couldn't find field 'clientFinalizer' in FileSystem!", nsfe);
throw new RuntimeException("Failed to suppress HDFS shutdown hook");
} catch (IllegalAccessException iae) {
LOG.error(HBaseMarkers.FATAL, "Couldn't access field 'clientFinalizer' in FileSystem!", iae);
throw new RuntimeException("Failed to suppress HDFS shutdown hook");
}
} | 3.68 |
graphhopper_BaseGraph_init | /**
* Similar to {@link #init(int edgeId, int adjNode)}, but here we retrieve the edge in a certain direction
* directly using an edge key.
*/
final void init(int edgeKey) {
if (edgeKey < 0)
throw new IllegalArgumentException("edge keys must not be negative, given: " + edgeKey);
this.edgeId = GHUtility.getEdgeFromEdgeKey(edgeKey);
edgePointer = store.toEdgePointer(edgeId);
baseNode = store.getNodeA(edgePointer);
adjNode = store.getNodeB(edgePointer);
if (edgeKey % 2 == 0) {
reverse = false;
} else {
reverse = true;
int tmp = baseNode;
baseNode = adjNode;
adjNode = tmp;
}
} | 3.68 |
hadoop_AzureFileSystemInstrumentation_updateBytesWrittenInLastSecond | /**
* Sets the current gauge value for how many bytes were written in the last
* second.
* @param currentBytesWritten The number of bytes.
*/
public void updateBytesWrittenInLastSecond(long currentBytesWritten) {
bytesWrittenInLastSecond.set(currentBytesWritten);
} | 3.68 |
framework_BeanUtil_getBeanPropertyDescriptors | /**
* Returns the property descriptors of a class or an interface.
*
* For an interface, superinterfaces are also iterated as Introspector does
* not take them into account (Oracle Java bug 4275879), but in that case,
* both the setter and the getter for a property must be in the same
* interface and should not be overridden in subinterfaces for the discovery
* to work correctly.
* <p>
* NOTE : This utility method relies on introspection (and returns
* PropertyDescriptor) which is a part of java.beans package. The latter
* package could require bigger JDK in the future (with Java 9+). So it may
* be changed in the future.
* <p>
* For interfaces, the iteration is depth first and the properties of
* superinterfaces are returned before those of their subinterfaces.
*
* @param beanType
* the type whose properties to query
* @return a list of property descriptors of the given type
* @throws IntrospectionException
* if the introspection fails
*/
public static List<PropertyDescriptor> getBeanPropertyDescriptors(
final Class<?> beanType) throws IntrospectionException {
// Oracle bug 4275879: Introspector does not consider superinterfaces of
// an interface
if (beanType.isInterface()) {
List<PropertyDescriptor> propertyDescriptors = new ArrayList<>();
for (Class<?> cls : beanType.getInterfaces()) {
propertyDescriptors.addAll(getBeanPropertyDescriptors(cls));
}
BeanInfo info = Introspector.getBeanInfo(beanType);
propertyDescriptors.addAll(getPropertyDescriptors(info));
return propertyDescriptors;
} else {
BeanInfo info = Introspector.getBeanInfo(beanType);
return getPropertyDescriptors(info);
}
} | 3.68 |
hadoop_MultipartUploaderBuilderImpl_append | /**
* Append to an existing file (optional operation).
*/
@Override
public B append() {
flags.add(CreateFlag.APPEND);
return getThisBuilder();
} | 3.68 |
hadoop_JsonSerDeser_writeJsonAsBytes | /**
* Write the json as bytes -then close the file
* @param dataOutputStream an outout stream that will always be closed
* @throws IOException on any failure
*/
private void writeJsonAsBytes(T instance,
OutputStream dataOutputStream) throws IOException {
try {
String json = toJson(instance);
byte[] b = json.getBytes(StandardCharsets.UTF_8);
dataOutputStream.write(b);
dataOutputStream.flush();
dataOutputStream.close();
} finally {
IOUtils.closeStream(dataOutputStream);
}
} | 3.68 |
pulsar_ConsumerConfiguration_getMaxTotalReceiverQueueSizeAcrossPartitions | /**
* @return the configured max total receiver queue size across partitions
*/
public int getMaxTotalReceiverQueueSizeAcrossPartitions() {
return conf.getMaxTotalReceiverQueueSizeAcrossPartitions();
} | 3.68 |
shardingsphere-elasticjob_Handler_execute | /**
* Execute handle method with required arguments.
*
* @param args Required arguments
* @return Method invoke result
* @throws InvocationTargetException Wraps exception thrown by invoked method
* @throws IllegalAccessException Handle method is not accessible
*/
public Object execute(final Object... args) throws InvocationTargetException, IllegalAccessException {
return handleMethod.invoke(instance, args);
} | 3.68 |
hadoop_AbstractMultipartUploader_checkPutArguments | /**
* Check all the arguments to the
* {@link MultipartUploader#putPart(UploadHandle, int, Path, InputStream, long)}
* operation.
* @param filePath Target path for upload (as {@link #startUpload(Path)}).
* @param inputStream Data for this part. Implementations MUST close this
* stream after reading in the data.
* @param partNumber Index of the part relative to others.
* @param uploadId Identifier from {@link #startUpload(Path)}.
* @param lengthInBytes Target length to read from the stream.
* @throws IllegalArgumentException invalid argument
*/
protected void checkPutArguments(Path filePath,
InputStream inputStream,
int partNumber,
UploadHandle uploadId,
long lengthInBytes) throws IllegalArgumentException {
checkPath(filePath);
checkArgument(inputStream != null, "null inputStream");
checkArgument(partNumber > 0, "Invalid part number: %d", partNumber);
checkArgument(uploadId != null, "null uploadId");
checkArgument(lengthInBytes >= 0, "Invalid part length: %d", lengthInBytes);
} | 3.68 |
flink_TableOperatorWrapper_isClosed | /**
* Checks if the wrapped operator has been closed.
*
* <p>Note that this method must be called in the task thread.
*/
public boolean isClosed() {
return closed;
} | 3.68 |
framework_Link_getTargetWidth | /**
* Returns the target window width or -1 if not set.
*
* @return the target window width.
*/
public int getTargetWidth() {
return getState(false).targetWidth < 0 ? -1
: getState(false).targetWidth;
} | 3.68 |
hbase_ChecksumUtil_validateChecksum | /**
* Validates that the data in the specified HFileBlock matches the checksum. Generates the
* checksums for the data and then validate that it matches those stored in the end of the data.
* @param buf Contains the data in following order: HFileBlock header, data, checksums.
* @param pathName Path of the HFile to which the {@code data} belongs. Only used for logging.
* @param offset offset of the data being validated. Only used for logging.
* @param hdrSize Size of the block header in {@code data}. Only used for logging.
* @return True if checksum matches, else false.
*/
static boolean validateChecksum(ByteBuff buf, String pathName, long offset, int hdrSize) {
ChecksumType ctype = ChecksumType.codeToType(buf.get(HFileBlock.Header.CHECKSUM_TYPE_INDEX));
if (ctype == ChecksumType.NULL) {
return true;// No checksum validations needed for this block.
}
// read in the stored value of the checksum size from the header.
int bytesPerChecksum = buf.getInt(HFileBlock.Header.BYTES_PER_CHECKSUM_INDEX);
DataChecksum dataChecksum =
DataChecksum.newDataChecksum(ctype.getDataChecksumType(), bytesPerChecksum);
assert dataChecksum != null;
int onDiskDataSizeWithHeader =
buf.getInt(HFileBlock.Header.ON_DISK_DATA_SIZE_WITH_HEADER_INDEX);
LOG.trace(
"dataLength={}, sizeWithHeader={}, checksumType={}, file={}, "
+ "offset={}, headerSize={}, bytesPerChecksum={}",
buf.capacity(), onDiskDataSizeWithHeader, ctype.getName(), pathName, offset, hdrSize,
bytesPerChecksum);
ByteBuff data = buf.duplicate().position(0).limit(onDiskDataSizeWithHeader);
ByteBuff checksums = buf.duplicate().position(onDiskDataSizeWithHeader).limit(buf.limit());
return verifyChunkedSums(dataChecksum, data, checksums, pathName);
} | 3.68 |
framework_AbstractMultiSelect_deselect | /**
* Removes the given items. Any item that is not currently selected, is
* ignored. If none of the items are selected, does nothing.
*
* @param items
* the items to deselect, not {@code null}
* @param userOriginated
* {@code true} if this was used originated, {@code false} if not
*/
protected void deselect(Set<T> items, boolean userOriginated) {
Objects.requireNonNull(items);
if (items.stream().noneMatch(i -> isSelected(i))) {
return;
}
updateSelection(set -> set.removeAll(items), userOriginated);
} | 3.68 |
hadoop_StagingCommitter_deleteTaskWorkingPathQuietly | /**
* Delete the working path of a task; no-op if there is none, that
* is: this is a job.
* @param context job/task context
*/
protected void deleteTaskWorkingPathQuietly(JobContext context) {
ignoreIOExceptions(LOG, "Delete working path", "",
() -> {
Path path = buildWorkPath(context, getUUID());
if (path != null) {
deleteQuietly(path.getFileSystem(getConf()), path, true);
}
});
} | 3.68 |
hadoop_StageExecutionIntervalByDemand_calcWeight | // Weight = total memory consumption of stage
protected double calcWeight(ReservationRequest stage) {
return (stage.getDuration() * stage.getCapability().getMemorySize())
* (stage.getNumContainers());
} | 3.68 |
flink_JobGraphGenerator_connectJobVertices | /**
* NOTE: The channel for global and local strategies are different if we connect a union. The
* global strategy channel is then the channel into the union node, the local strategy channel
* the one from the union to the actual target operator.
*
* @throws CompilerException
*/
private DistributionPattern connectJobVertices(
Channel channel,
int inputNumber,
final JobVertex sourceVertex,
final TaskConfig sourceConfig,
final JobVertex targetVertex,
final TaskConfig targetConfig,
boolean isBroadcast)
throws CompilerException {
// ------------ connect the vertices to the job graph --------------
final DistributionPattern distributionPattern;
switch (channel.getShipStrategy()) {
case FORWARD:
distributionPattern = DistributionPattern.POINTWISE;
break;
case PARTITION_RANDOM:
case BROADCAST:
case PARTITION_HASH:
case PARTITION_CUSTOM:
case PARTITION_RANGE:
case PARTITION_FORCED_REBALANCE:
distributionPattern = DistributionPattern.ALL_TO_ALL;
break;
default:
throw new RuntimeException(
"Unknown runtime ship strategy: " + channel.getShipStrategy());
}
final ResultPartitionType resultType;
switch (channel.getDataExchangeMode()) {
case PIPELINED:
resultType = ResultPartitionType.PIPELINED;
break;
case BATCH:
// BLOCKING results are currently not supported in closed loop iterations
//
// See https://issues.apache.org/jira/browse/FLINK-1713 for details
resultType =
channel.getSource().isOnDynamicPath()
? ResultPartitionType.PIPELINED
: ResultPartitionType.BLOCKING;
break;
case PIPELINE_WITH_BATCH_FALLBACK:
throw new UnsupportedOperationException(
"Data exchange mode "
+ channel.getDataExchangeMode()
+ " currently not supported.");
default:
throw new UnsupportedOperationException("Unknown data exchange mode.");
}
JobEdge edge =
targetVertex.connectNewDataSetAsInput(
sourceVertex, distributionPattern, resultType, isBroadcast);
// -------------- configure the source task's ship strategy strategies in task config
// --------------
final int outputIndex = sourceConfig.getNumOutputs();
sourceConfig.addOutputShipStrategy(channel.getShipStrategy());
if (outputIndex == 0) {
sourceConfig.setOutputSerializer(channel.getSerializer());
}
if (channel.getShipStrategyComparator() != null) {
sourceConfig.setOutputComparator(channel.getShipStrategyComparator(), outputIndex);
}
if (channel.getShipStrategy() == ShipStrategyType.PARTITION_RANGE) {
final DataDistribution dataDistribution = channel.getDataDistribution();
if (dataDistribution != null) {
sourceConfig.setOutputDataDistribution(dataDistribution, outputIndex);
} else {
throw new RuntimeException("Range partitioning requires data distribution.");
}
}
if (channel.getShipStrategy() == ShipStrategyType.PARTITION_CUSTOM) {
if (channel.getPartitioner() != null) {
sourceConfig.setOutputPartitioner(channel.getPartitioner(), outputIndex);
} else {
throw new CompilerException(
"The ship strategy was set to custom partitioning, but no partitioner was set.");
}
}
// ---------------- configure the receiver -------------------
if (isBroadcast) {
targetConfig.addBroadcastInputToGroup(inputNumber);
} else {
targetConfig.addInputToGroup(inputNumber);
}
// ---------------- attach the additional infos to the job edge -------------------
String shipStrategy = JsonMapper.getShipStrategyString(channel.getShipStrategy());
if (channel.getShipStrategyKeys() != null && channel.getShipStrategyKeys().size() > 0) {
shipStrategy +=
" on "
+ (channel.getShipStrategySortOrder() == null
? channel.getShipStrategyKeys().toString()
: Utils.createOrdering(
channel.getShipStrategyKeys(),
channel.getShipStrategySortOrder())
.toString());
}
String localStrategy;
if (channel.getLocalStrategy() == null
|| channel.getLocalStrategy() == LocalStrategy.NONE) {
localStrategy = null;
} else {
localStrategy = JsonMapper.getLocalStrategyString(channel.getLocalStrategy());
if (localStrategy != null
&& channel.getLocalStrategyKeys() != null
&& channel.getLocalStrategyKeys().size() > 0) {
localStrategy +=
" on "
+ (channel.getLocalStrategySortOrder() == null
? channel.getLocalStrategyKeys().toString()
: Utils.createOrdering(
channel.getLocalStrategyKeys(),
channel.getLocalStrategySortOrder())
.toString());
}
}
String caching =
channel.getTempMode() == TempMode.NONE ? null : channel.getTempMode().toString();
edge.setShipStrategyName(shipStrategy);
edge.setForward(channel.getShipStrategy() == ShipStrategyType.FORWARD);
edge.setPreProcessingOperationName(localStrategy);
edge.setOperatorLevelCachingDescription(caching);
return distributionPattern;
} | 3.68 |
flink_DataStream_iterate | /**
* Initiates an iterative part of the program that feeds back data streams. The iterative part
* needs to be closed by calling {@link IterativeStream#closeWith(DataStream)}. The
* transformation of this IterativeStream will be the iteration head. The data stream given to
* the {@link IterativeStream#closeWith(DataStream)} method is the data stream that will be fed
* back and used as the input for the iteration head. The user can also use different feedback
* type than the input of the iteration and treat the input and feedback streams as a {@link
* ConnectedStreams} be calling {@link IterativeStream#withFeedbackType(TypeInformation)}
*
* <p>A common usage pattern for streaming iterations is to use output splitting to send a part
* of the closing data stream to the head. Refer to {@link
* ProcessFunction.Context#output(OutputTag, Object)} for more information.
*
* <p>The iteration edge will be partitioned the same way as the first input of the iteration
* head unless it is changed in the {@link IterativeStream#closeWith(DataStream)} call.
*
* <p>By default a DataStream with iteration will never terminate, but the user can use the
* maxWaitTime parameter to set a max waiting time for the iteration head. If no data received
* in the set time, the stream terminates.
*
* @param maxWaitTimeMillis Number of milliseconds to wait between inputs before shutting down
* @return The iterative data stream created.
* @deprecated This method is deprecated since Flink 1.19. The only known use case of this
* Iteration API comes from Flink ML, which already has its own implementation of iteration
* and no longer uses this API. If there's any use cases other than Flink ML that needs
* iteration support, please reach out to [email protected] and we can consider making
* the Flink ML iteration implementation a separate common library.
* @see <a
* href="https://cwiki.apache.org/confluence/display/FLINK/FLIP-357%3A+Deprecate+Iteration+API+of+DataStream">
* FLIP-357: Deprecate Iteration API of DataStream </a>
* @see <a href="https://nightlies.apache.org/flink/flink-ml-docs-stable/">Flink ML </a>
*/
@Deprecated
public IterativeStream<T> iterate(long maxWaitTimeMillis) {
return new IterativeStream<>(this, maxWaitTimeMillis);
} | 3.68 |
framework_Window_bringToFront | /**
* If there are currently several windows visible, calling this method makes
* this window topmost.
* <p>
* This method can only be called if this window connected a UI. Else an
* illegal state exception is thrown. Also if there are modal windows and
* this window is not modal, and illegal state exception is thrown.
* <p>
*/
public void bringToFront() {
UI uI = getUI();
if (uI == null) {
throw new IllegalStateException(
"Window must be attached to parent before calling bringToFront method.");
}
int maxBringToFront = -1;
for (Window w : uI.getWindows()) {
if (!isModal() && w.isModal()) {
throw new IllegalStateException(
"The UI contains modal windows, non-modal window cannot be brought to front.");
}
if (w.bringToFront != null) {
maxBringToFront = Math.max(maxBringToFront,
w.bringToFront.intValue());
}
}
bringToFront = Integer.valueOf(maxBringToFront + 1);
markAsDirty();
} | 3.68 |
graphhopper_TurnCostStorage_getAllTurnCosts | /**
* Returns an iterator over all entries.
*
* @return an iterator over all entries.
*/
public Iterator getAllTurnCosts() {
return new Itr();
} | 3.68 |
hbase_TableInputFormatBase_getSplits | /**
* Calculates the splits that will serve as input for the map tasks.
* <p/>
* Splits are created in number equal to the smallest between numSplits and the number of
* {@link org.apache.hadoop.hbase.regionserver.HRegion}s in the table. If the number of splits is
* smaller than the number of {@link org.apache.hadoop.hbase.regionserver.HRegion}s then splits
* are spanned across multiple {@link org.apache.hadoop.hbase.regionserver.HRegion}s and are
* grouped the most evenly possible. In the case splits are uneven the bigger splits are placed
* first in the {@link InputSplit} array.
* @param job the map task {@link JobConf}
* @param numSplits a hint to calculate the number of splits (mapred.map.tasks).
* @return the input splits
* @see InputFormat#getSplits(org.apache.hadoop.mapred.JobConf, int)
*/
public InputSplit[] getSplits(JobConf job, int numSplits) throws IOException {
if (this.table == null) {
initialize(job);
}
// null check in case our child overrides getTable to not throw.
try {
if (getTable() == null) {
// initialize() must not have been implemented in the subclass.
throw new IOException(INITIALIZATION_ERROR);
}
} catch (IllegalStateException exception) {
throw new IOException(INITIALIZATION_ERROR, exception);
}
byte[][] startKeys = this.regionLocator.getStartKeys();
if (startKeys == null || startKeys.length == 0) {
throw new IOException("Expecting at least one region");
}
if (this.inputColumns == null || this.inputColumns.length == 0) {
throw new IOException("Expecting at least one column");
}
int realNumSplits = numSplits > startKeys.length ? startKeys.length : numSplits;
InputSplit[] splits = new InputSplit[realNumSplits];
int middle = startKeys.length / realNumSplits;
int startPos = 0;
for (int i = 0; i < realNumSplits; i++) {
int lastPos = startPos + middle;
lastPos = startKeys.length % realNumSplits > i ? lastPos + 1 : lastPos;
String regionLocation = regionLocator.getRegionLocation(startKeys[startPos]).getHostname();
splits[i] = new TableSplit(this.table.getName(), startKeys[startPos],
((i + 1) < realNumSplits) ? startKeys[lastPos] : HConstants.EMPTY_START_ROW,
regionLocation);
LOG.info("split: " + i + "->" + splits[i]);
startPos = lastPos;
}
return splits;
} | 3.68 |
framework_Range_withLength | /**
* Creates a range from a start point, with a given length.
*
* @param start
* the first integer to include in the range
* @param length
* the length of the resulting range
* @return a range starting from <code>start</code>, with
* <code>length</code> number of integers following
* @throws IllegalArgumentException
* if length < 0
*/
public static Range withLength(final int start, final int length)
throws IllegalArgumentException {
if (length < 0) {
/*
* The constructor of Range will throw an exception if start >
* start+length (i.e. if length is negative). We're throwing the
* same exception type, just with a more descriptive message.
*/
throw new IllegalArgumentException("length must not be negative");
}
return new Range(start, start + length);
} | 3.68 |
druid_TableStat_setDataType | /**
* @since 1.0.20
*/
public void setDataType(String dataType) {
this.dataType = dataType;
} | 3.68 |
flink_ForwardHashExchangeProcessor_addExchangeAndReconnectEdge | // TODO This implementation should be updated once FLINK-21224 is finished.
private ExecEdge addExchangeAndReconnectEdge(
ReadableConfig tableConfig,
ExecEdge edge,
InputProperty inputProperty,
boolean strict,
boolean visitChild) {
ExecNode<?> target = edge.getTarget();
ExecNode<?> source = edge.getSource();
if (source instanceof CommonExecExchange) {
return edge;
}
// only Calc, Correlate and Sort can propagate sort property and distribution property
if (visitChild
&& (source instanceof BatchExecCalc
|| source instanceof BatchExecPythonCalc
|| source instanceof BatchExecSort
|| source instanceof BatchExecCorrelate
|| source instanceof BatchExecPythonCorrelate)) {
ExecEdge newEdge =
addExchangeAndReconnectEdge(
tableConfig,
source.getInputEdges().get(0),
inputProperty,
strict,
true);
source.setInputEdges(Collections.singletonList(newEdge));
}
BatchExecExchange exchange =
createExchangeWithKeepInputAsIsDistribution(
tableConfig, inputProperty, strict, (RowType) edge.getOutputType());
ExecEdge newEdge =
new ExecEdge(source, exchange, edge.getShuffle(), edge.getExchangeMode());
exchange.setInputEdges(Collections.singletonList(newEdge));
return new ExecEdge(exchange, target, edge.getShuffle(), edge.getExchangeMode());
} | 3.68 |
hbase_FutureUtils_addListener | /**
* Almost the same with {@link #addListener(CompletableFuture, BiConsumer)} method above, the only
* exception is that we will call
* {@link CompletableFuture#whenCompleteAsync(BiConsumer, Executor)}.
* @see #addListener(CompletableFuture, BiConsumer)
*/
@SuppressWarnings("FutureReturnValueIgnored")
public static <T> void addListener(CompletableFuture<T> future,
BiConsumer<? super T, ? super Throwable> action, Executor executor) {
future.whenCompleteAsync((resp, error) -> {
try {
action.accept(resp, unwrapCompletionException(error));
} catch (Throwable t) {
LOG.error("Unexpected error caught when processing CompletableFuture", t);
}
}, executor);
} | 3.68 |
flink_StreamExecutionEnvironment_generateStreamGraph | /**
* Generates a {@link StreamGraph} that consists of the given {@link Transformation
* transformations} and is configured with the configuration of this environment.
*
* <p>This method does not access or clear the previously registered transformations.
*
* @param transformations list of transformations that the graph should contain
* @return The stream graph representing the transformations
*/
@Internal
public StreamGraph generateStreamGraph(List<Transformation<?>> transformations) {
return getStreamGraphGenerator(transformations).generate();
} | 3.68 |
zxing_MatrixUtil_makeVersionInfoBits | // Make bit vector of version information. On success, store the result in "bits" and return true.
// See 8.10 of JISX0510:2004 (p.45) for details.
static void makeVersionInfoBits(Version version, BitArray bits) throws WriterException {
bits.appendBits(version.getVersionNumber(), 6);
int bchCode = calculateBCHCode(version.getVersionNumber(), VERSION_INFO_POLY);
bits.appendBits(bchCode, 12);
if (bits.getSize() != 18) { // Just in case.
throw new WriterException("should not happen but we got: " + bits.getSize());
}
} | 3.68 |
Activiti_BpmnDeploymentHelper_updateTimersAndEvents | /**
* Updates all timers and events for the process definition. This removes obsolete message and signal
* subscriptions and timers, and adds new ones.
*/
public void updateTimersAndEvents(ProcessDefinitionEntity processDefinition,
ProcessDefinitionEntity previousProcessDefinition, ParsedDeployment parsedDeployment) {
Process process = parsedDeployment.getProcessModelForProcessDefinition(processDefinition);
BpmnModel bpmnModel = parsedDeployment.getBpmnModelForProcessDefinition(processDefinition);
eventSubscriptionManager.removeObsoleteMessageEventSubscriptions(previousProcessDefinition);
eventSubscriptionManager.addMessageEventSubscriptions(processDefinition, process, bpmnModel);
eventSubscriptionManager.removeObsoleteSignalEventSubScription(previousProcessDefinition);
eventSubscriptionManager.addSignalEventSubscriptions(Context.getCommandContext(), processDefinition, process, bpmnModel);
timerManager.removeObsoleteTimers(processDefinition);
timerManager.scheduleTimers(processDefinition, process);
} | 3.68 |
hbase_Scan_setScanMetricsEnabled | /**
* Enable collection of {@link ScanMetrics}. For advanced users.
* @param enabled Set to true to enable accumulating scan metrics
*/
public Scan setScanMetricsEnabled(final boolean enabled) {
setAttribute(Scan.SCAN_ATTRIBUTES_METRICS_ENABLE, Bytes.toBytes(Boolean.valueOf(enabled)));
return this;
} | 3.68 |
dubbo_AbstractZookeeperTransporter_getURLBackupAddress | /**
* get all zookeeper urls (such as zookeeper://127.0.0.1:2181?backup=127.0.0.1:8989,127.0.0.1:9999)
*
* @param url such as zookeeper://127.0.0.1:2181?backup=127.0.0.1:8989,127.0.0.1:9999
* @return such as 127.0.0.1:2181,127.0.0.1:8989,127.0.0.1:9999
*/
public List<String> getURLBackupAddress(URL url) {
List<String> addressList = new ArrayList<>();
addressList.add(url.getAddress());
addressList.addAll(url.getParameter(RemotingConstants.BACKUP_KEY, Collections.emptyList()));
String authPrefix = null;
if (StringUtils.isNotEmpty(url.getUsername())) {
StringBuilder buf = new StringBuilder();
buf.append(url.getUsername());
if (StringUtils.isNotEmpty(url.getPassword())) {
buf.append(':');
buf.append(url.getPassword());
}
buf.append('@');
authPrefix = buf.toString();
}
if (StringUtils.isNotEmpty(authPrefix)) {
List<String> authedAddressList = new ArrayList<>(addressList.size());
for (String addr : addressList) {
authedAddressList.add(authPrefix + addr);
}
return authedAddressList;
}
return addressList;
} | 3.68 |
hbase_InfoServer_buildAdminAcl | /**
* Builds an ACL that will restrict the users who can issue commands to endpoints on the UI which
* are meant only for administrators.
*/
AccessControlList buildAdminAcl(Configuration conf) {
final String userGroups = conf.get(HttpServer.HTTP_SPNEGO_AUTHENTICATION_ADMIN_USERS_KEY, null);
final String adminGroups =
conf.get(HttpServer.HTTP_SPNEGO_AUTHENTICATION_ADMIN_GROUPS_KEY, null);
if (userGroups == null && adminGroups == null) {
// Backwards compatibility - if the user doesn't have anything set, allow all users in.
return new AccessControlList("*", null);
}
return new AccessControlList(userGroups, adminGroups);
} | 3.68 |
zxing_MatrixUtil_embedPositionDetectionPatternsAndSeparators | // Embed position detection patterns and surrounding vertical/horizontal separators.
private static void embedPositionDetectionPatternsAndSeparators(ByteMatrix matrix) throws WriterException {
// Embed three big squares at corners.
int pdpWidth = POSITION_DETECTION_PATTERN[0].length;
// Left top corner.
embedPositionDetectionPattern(0, 0, matrix);
// Right top corner.
embedPositionDetectionPattern(matrix.getWidth() - pdpWidth, 0, matrix);
// Left bottom corner.
embedPositionDetectionPattern(0, matrix.getWidth() - pdpWidth, matrix);
// Embed horizontal separation patterns around the squares.
int hspWidth = 8;
// Left top corner.
embedHorizontalSeparationPattern(0, hspWidth - 1, matrix);
// Right top corner.
embedHorizontalSeparationPattern(matrix.getWidth() - hspWidth,
hspWidth - 1, matrix);
// Left bottom corner.
embedHorizontalSeparationPattern(0, matrix.getWidth() - hspWidth, matrix);
// Embed vertical separation patterns around the squares.
int vspSize = 7;
// Left top corner.
embedVerticalSeparationPattern(vspSize, 0, matrix);
// Right top corner.
embedVerticalSeparationPattern(matrix.getHeight() - vspSize - 1, 0, matrix);
// Left bottom corner.
embedVerticalSeparationPattern(vspSize, matrix.getHeight() - vspSize,
matrix);
} | 3.68 |
flink_PartitionOperator_getCustomPartitioner | /**
* Gets the custom partitioner from this partitioning.
*
* @return The custom partitioner.
*/
@Internal
public Partitioner<?> getCustomPartitioner() {
return customPartitioner;
} | 3.68 |
hbase_BackupManager_getBackupInfo | /**
* Returns backup info
*/
protected BackupInfo getBackupInfo() {
return backupInfo;
} | 3.68 |
rocketmq-connect_ExpressionBuilder_appendColumnName | /**
* Append to this builder's expression the specified Column identifier, possibly surrounded by
* the leading and trailing quotes based upon {@link #setQuoteIdentifiers(QuoteMethod)}.
*
* @param name the name to be appended
* @param quote whether to quote the column name; may not be null
* @return this builder to enable methods to be chained; never null
*/
public ExpressionBuilder appendColumnName(String name, QuoteMethod quote) {
appendLeadingQuote(quote);
sb.append(name);
appendTrailingQuote(quote);
return this;
} | 3.68 |
AreaShop_RentedRegionEvent_hasExtended | /**
* Check if the region was extended or rented for the first time.
* @return true if the region was extended, false when rented for the first time
*/
public boolean hasExtended() {
return extended;
} | 3.68 |
rocketmq-connect_Serdes_serdeFrom | /**
* Construct a serde object from separate serializer and deserializer
*
* @param serializer must not be null.
* @param deserializer must not be null.
*/
static public <T> Serde<T> serdeFrom(final Serializer<T> serializer, final Deserializer<T> deserializer) {
if (serializer == null) {
throw new IllegalArgumentException("serializer must not be null");
}
if (deserializer == null) {
throw new IllegalArgumentException("deserializer must not be null");
}
return new WrapperSerde<>(serializer, deserializer);
} | 3.68 |
flink_ClusterEntrypointUtils_generateJobManagerWorkingDirectoryFile | /**
* Generates the working directory {@link File} for the JobManager process. This method does not
* ensure that the working directory exists.
*
* @param configuration to extract the required settings from
* @param resourceId identifying the JobManager process
* @return working directory file
*/
@VisibleForTesting
public static File generateJobManagerWorkingDirectoryFile(
Configuration configuration, ResourceID resourceId) {
return generateWorkingDirectoryFile(
configuration,
Optional.of(ClusterOptions.JOB_MANAGER_PROCESS_WORKING_DIR_BASE),
"jm_" + resourceId);
} | 3.68 |
hadoop_JavaCommandLineBuilder_addPrefixedConfOptions | /**
* Add all configuration options which match the prefix
* @param conf configuration
* @param prefix prefix, e.g {@code "slider."}
* @return the number of entries copied
*/
public int addPrefixedConfOptions(Configuration conf, String prefix) {
int copied = 0;
for (Map.Entry<String, String> entry : conf) {
if (entry.getKey().startsWith(prefix)) {
define(entry.getKey(), entry.getValue());
copied++;
}
}
return copied;
} | 3.68 |
framework_AbstractMultiSelectConnector_isEnabled | /**
* Returns whether the given item is enabled or not.
* <p>
* Disabling items is not supported by all multiselects.
*
* @param item
* the item, not {@code null}
* @return {@code true} enabled, {@code false} if not
*/
static boolean isEnabled(JsonObject item) {
return !(item.hasKey(ListingJsonConstants.JSONKEY_ITEM_DISABLED)
&& item.getBoolean(
ListingJsonConstants.JSONKEY_ITEM_DISABLED));
} | 3.68 |
hadoop_OBSInputStream_calculateRequestLimit | /**
* Calculate the limit for a get request, based on input policy and state of
* object.
*
* @param targetPos position of the read
* @param length length of bytes requested; if less than zero
* "unknown"
* @param contentLength total length of file
* @param readahead current readahead value
* @return the absolute value of the limit of the request.
*/
static long calculateRequestLimit(
final long targetPos, final long length, final long contentLength,
final long readahead) {
// cannot read past the end of the object
return Math.min(contentLength, length < 0 ? contentLength
: targetPos + Math.max(readahead, length));
} | 3.68 |
hadoop_ShortWritable_hashCode | /** hash code */
@Override
public int hashCode() {
return value;
} | 3.68 |
hmily_HmilyUpdateStatement_getWhere | /**
* Get where.
*
* @return where segment
*/
public Optional<HmilyWhereSegment> getWhere() {
return Optional.ofNullable(where);
} | 3.68 |
flink_ArchivedExecutionGraph_createFrom | /**
* Create a {@link ArchivedExecutionGraph} from the given {@link ExecutionGraph}.
*
* @param executionGraph to create the ArchivedExecutionGraph from
* @param statusOverride optionally overrides the JobStatus of the ExecutionGraph with a
* non-globally-terminal state and clears timestamps of globally-terminal states
* @return ArchivedExecutionGraph created from the given ExecutionGraph
*/
public static ArchivedExecutionGraph createFrom(
ExecutionGraph executionGraph, @Nullable JobStatus statusOverride) {
Preconditions.checkArgument(
statusOverride == null || !statusOverride.isGloballyTerminalState(),
"Status override is only allowed for non-globally-terminal states.");
Map<JobVertexID, ArchivedExecutionJobVertex> archivedTasks = new HashMap<>();
List<ArchivedExecutionJobVertex> archivedVerticesInCreationOrder = new ArrayList<>();
for (ExecutionJobVertex task : executionGraph.getVerticesTopologically()) {
ArchivedExecutionJobVertex archivedTask = task.archive();
archivedVerticesInCreationOrder.add(archivedTask);
archivedTasks.put(task.getJobVertexId(), archivedTask);
}
final Map<String, SerializedValue<OptionalFailure<Object>>> serializedUserAccumulators =
executionGraph.getAccumulatorsSerialized();
final long[] timestamps = new long[JobStatus.values().length];
// if the state is overridden with a non-globally-terminal state then we need to erase
// traces of globally-terminal states for consistency
final boolean clearGloballyTerminalStateTimestamps = statusOverride != null;
for (JobStatus jobStatus : JobStatus.values()) {
final int ordinal = jobStatus.ordinal();
if (!(clearGloballyTerminalStateTimestamps && jobStatus.isGloballyTerminalState())) {
timestamps[ordinal] = executionGraph.getStatusTimestamp(jobStatus);
}
}
return new ArchivedExecutionGraph(
executionGraph.getJobID(),
executionGraph.getJobName(),
archivedTasks,
archivedVerticesInCreationOrder,
timestamps,
statusOverride == null ? executionGraph.getState() : statusOverride,
executionGraph.getFailureInfo(),
executionGraph.getJsonPlan(),
executionGraph.getAccumulatorResultsStringified(),
serializedUserAccumulators,
executionGraph.getArchivedExecutionConfig(),
executionGraph.isStoppable(),
executionGraph.getCheckpointCoordinatorConfiguration(),
executionGraph.getCheckpointStatsSnapshot(),
executionGraph.getStateBackendName().orElse(null),
executionGraph.getCheckpointStorageName().orElse(null),
executionGraph.isChangelogStateBackendEnabled(),
executionGraph.getChangelogStorageName().orElse(null));
} | 3.68 |
morf_GraphBasedUpgradeScriptGenerator_generatePostUpgradeStatements | /**
* @return post-upgrade statements to be executed after the Graph Based Upgrade
*/
public List<String> generatePostUpgradeStatements() {
ImmutableList.Builder<String> statements = ImmutableList.builder();
// temp table drop
statements.addAll(connectionResources.sqlDialect().truncateTableStatements(idTable));
statements.addAll(connectionResources.sqlDialect().dropStatements(idTable));
statements.addAll(UpgradeHelper.postSchemaUpgrade(upgradeSchemas,
viewChanges,
viewChangesDeploymentHelperFactory.create(connectionResources)));
// Since Oracle is not able to re-map schema references in trigger code, we need to rebuild all triggers
// for id column autonumbering when exporting and importing data between environments.
// We will drop-and-recreate triggers whenever there are upgrade steps to execute. Ideally we'd want to do
// this step once, however there's no easy way to do that with our upgrade framework.
AtomicBoolean first = new AtomicBoolean(true);
upgradeSchemas.getTargetSchema().tables().stream()
.map(t -> connectionResources.sqlDialect().rebuildTriggers(t))
.filter(triggerSql -> !triggerSql.isEmpty())
.peek(triggerSql -> {
if (first.compareAndSet(true, false)) {
statements.addAll(ImmutableList.of(
connectionResources.sqlDialect().convertCommentToSQL("Upgrades executed. Rebuilding all triggers to account for potential changes to autonumbered columns")
));
}
})
.forEach(statements::addAll);
// upgrade script additions (if any)
upgradeScriptAdditions.stream()
.map(add -> Lists.newArrayList(add.sql(connectionResources)))
.forEach(statements::addAll);
// status table
statements.addAll(upgradeStatusTableService.updateTableScript(UpgradeStatus.IN_PROGRESS, UpgradeStatus.COMPLETED));
return statements.build();
} | 3.68 |
querydsl_MapExpressionBase_contains | /**
* Create a {@code (key, value) in this} expression
*
* @param key key of entry
* @param value value of entry
* @return expression
*/
@SuppressWarnings("unchecked")
public final BooleanExpression contains(Expression<K> key, Expression<V> value) {
return get(key).eq((Expression) value);
} | 3.68 |
framework_ApplicationConnection_getThemeUri | /**
* Gets the URI for the current theme. Can be used to reference theme
* resources.
*
* @return URI to the current theme
*/
public String getThemeUri() {
return configuration.getVaadinDirUrl() + "themes/"
+ getUIConnector().getActiveTheme();
} | 3.68 |
hbase_LockManager_release | /**
* Release the lock. No-op if the lock was never acquired.
*/
public void release() {
if (proc != null) {
proc.unlock(master.getMasterProcedureExecutor().getEnvironment());
}
proc = null;
} | 3.68 |
hbase_LocalHBaseCluster_getConfiguration | /** Returns the Configuration used by this LocalHBaseCluster */
public Configuration getConfiguration() {
return this.conf;
} | 3.68 |
framework_Table_setText | /**
* Pass one String if spanColumns is used, one String for each visible
* column otherwise.
*/
public void setText(String... text) {
if (text == null || (text.length == 1 && text[0] == null)) {
text = new String[] { "" };
}
this.text = text;
} | 3.68 |
hudi_ParquetUtils_fetchRecordKeysWithPositions | /**
* Fetch {@link HoodieKey}s with row positions from the given parquet file.
*
* @param configuration configuration to build fs object
* @param filePath The parquet file path.
* @param keyGeneratorOpt instance of KeyGenerator.
* @return {@link List} of pairs of {@link HoodieKey} and row position fetched from the parquet file
*/
@Override
public List<Pair<HoodieKey, Long>> fetchRecordKeysWithPositions(Configuration configuration, Path filePath, Option<BaseKeyGenerator> keyGeneratorOpt) {
List<Pair<HoodieKey, Long>> hoodieKeysAndPositions = new ArrayList<>();
long position = 0;
try (ClosableIterator<HoodieKey> iterator = getHoodieKeyIterator(configuration, filePath, keyGeneratorOpt)) {
while (iterator.hasNext()) {
hoodieKeysAndPositions.add(Pair.of(iterator.next(), position));
position++;
}
return hoodieKeysAndPositions;
}
} | 3.68 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.