name
stringlengths 12
178
| code_snippet
stringlengths 8
36.5k
| score
float64 3.26
3.68
|
---|---|---|
framework_ApplicationConnection_flushActiveConnector | /**
* Calls {@link ComponentConnector#flush()} on the active connector. Does
* nothing if there is no active (focused) connector.
*/
public void flushActiveConnector() {
ComponentConnector activeConnector = getActiveConnector();
if (activeConnector == null) {
return;
}
activeConnector.flush();
} | 3.68 |
hadoop_DirectBufferPool_countBuffersOfSize | /**
* Return the number of available buffers of a given size.
* This is used only for tests.
*/
@VisibleForTesting
int countBuffersOfSize(int size) {
Queue<WeakReference<ByteBuffer>> list = buffersBySize.get(size);
if (list == null) {
return 0;
}
return list.size();
} | 3.68 |
flink_SqlJsonUtils_createArrayNode | /** Returns a new {@link ArrayNode}. */
public static ArrayNode createArrayNode() {
return MAPPER.createArrayNode();
} | 3.68 |
flink_PythonFunction_takesRowAsInput | /** Returns Whether the Python function takes row as input instead of each columns of a row. */
default boolean takesRowAsInput() {
return false;
} | 3.68 |
hbase_RecoverableZooKeeper_setAcl | /**
* setAcl is an idempotent operation. Retry before throwing exception
* @return list of ACLs
*/
public Stat setAcl(String path, List<ACL> acls, int version)
throws KeeperException, InterruptedException {
final Span span = TraceUtil.createSpan("RecoverableZookeeper.setAcl");
try (Scope ignored = span.makeCurrent()) {
RetryCounter retryCounter = retryCounterFactory.create();
while (true) {
try {
span.setStatus(StatusCode.OK);
return checkZk().setACL(path, acls, version);
} catch (KeeperException e) {
switch (e.code()) {
case CONNECTIONLOSS:
case OPERATIONTIMEOUT:
TraceUtil.setError(span, e);
retryOrThrow(retryCounter, e, "setAcl");
break;
default:
TraceUtil.setError(span, e);
throw e;
}
}
retryCounter.sleepUntilNextRetry();
}
} finally {
span.end();
}
} | 3.68 |
framework_CheckBoxGroupElement_getOptionElements | /**
* Gets the list of option elements for this check box group.
*
* @return list of option elements
*/
public List<WebElement> getOptionElements() {
return findElements(bySelectOption);
} | 3.68 |
framework_WindowConnector_setWindowOrderAndPosition | /**
* Gives the WindowConnector an order number. As a side effect, moves the
* window according to its order number so the windows are stacked. This
* method should be called for each window in the order they should appear.
*/
public void setWindowOrderAndPosition() {
getWidget().setWindowOrderAndPosition();
} | 3.68 |
flink_StreamGraphHasherV2_traverseStreamGraphAndGenerateHashes | /**
* Returns a map with a hash for each {@link StreamNode} of the {@link StreamGraph}. The hash is
* used as the {@link JobVertexID} in order to identify nodes across job submissions if they
* didn't change.
*
* <p>The complete {@link StreamGraph} is traversed. The hash is either computed from the
* transformation's user-specified id (see {@link Transformation#getUid()}) or generated in a
* deterministic way.
*
* <p>The generated hash is deterministic with respect to:
*
* <ul>
* <li>node-local properties (node ID),
* <li>chained output nodes, and
* <li>input nodes hashes
* </ul>
*
* @return A map from {@link StreamNode#id} to hash as 16-byte array.
*/
@Override
public Map<Integer, byte[]> traverseStreamGraphAndGenerateHashes(StreamGraph streamGraph) {
// The hash function used to generate the hash
final HashFunction hashFunction = Hashing.murmur3_128(0);
final Map<Integer, byte[]> hashes = new HashMap<>();
Set<Integer> visited = new HashSet<>();
Queue<StreamNode> remaining = new ArrayDeque<>();
// We need to make the source order deterministic. The source IDs are
// not returned in the same order, which means that submitting the same
// program twice might result in different traversal, which breaks the
// deterministic hash assignment.
List<Integer> sources = new ArrayList<>();
for (Integer sourceNodeId : streamGraph.getSourceIDs()) {
sources.add(sourceNodeId);
}
Collections.sort(sources);
//
// Traverse the graph in a breadth-first manner. Keep in mind that
// the graph is not a tree and multiple paths to nodes can exist.
//
// Start with source nodes
for (Integer sourceNodeId : sources) {
remaining.add(streamGraph.getStreamNode(sourceNodeId));
visited.add(sourceNodeId);
}
StreamNode currentNode;
while ((currentNode = remaining.poll()) != null) {
// Generate the hash code. Because multiple path exist to each
// node, we might not have all required inputs available to
// generate the hash code.
if (generateNodeHash(
currentNode,
hashFunction,
hashes,
streamGraph.isChainingEnabled(),
streamGraph)) {
// Add the child nodes
for (StreamEdge outEdge : currentNode.getOutEdges()) {
StreamNode child = streamGraph.getTargetVertex(outEdge);
if (!visited.contains(child.getId())) {
remaining.add(child);
visited.add(child.getId());
}
}
} else {
// We will revisit this later.
visited.remove(currentNode.getId());
}
}
return hashes;
} | 3.68 |
graphhopper_BBox_createInverse | /**
* Prefills BBox with minimum values so that it can increase.
*/
public static BBox createInverse(boolean elevation) {
if (elevation) {
return new BBox(Double.MAX_VALUE, -Double.MAX_VALUE, Double.MAX_VALUE, -Double.MAX_VALUE,
Double.MAX_VALUE, -Double.MAX_VALUE, true);
} else {
return new BBox(Double.MAX_VALUE, -Double.MAX_VALUE, Double.MAX_VALUE, -Double.MAX_VALUE,
Double.NaN, Double.NaN, false);
}
} | 3.68 |
morf_CaseStatement_drive | /**
* @see org.alfasoftware.morf.util.ObjectTreeTraverser.Driver#drive(ObjectTreeTraverser)
*/
@Override
public void drive(ObjectTreeTraverser traverser) {
traverser
.dispatch(getWhenConditions())
.dispatch(getDefaultValue());
} | 3.68 |
hbase_RegionPlan_setDestination | /**
* Set the destination server for the plan for this region.
*/
public void setDestination(ServerName dest) {
this.dest = dest;
} | 3.68 |
querydsl_DateExpression_currentDate | /**
* Create an expression representing the current date as a {@code DateExpression} instance
*
* @param cl type of expression
* @return current date
*/
public static <T extends Comparable> DateExpression<T> currentDate(Class<T> cl) {
return Expressions.dateOperation(cl, Ops.DateTimeOps.CURRENT_DATE);
} | 3.68 |
graphhopper_SubnetworkStorage_setSubnetwork | /**
* This method sets the subnetwork if of the specified nodeId. Default is 0 and means subnetwork
* was too small to be useful to be stored.
*/
public void setSubnetwork(int nodeId, int subnetwork) {
if (subnetwork > 127)
throw new IllegalArgumentException("Number of subnetworks is currently limited to 127 but requested " + subnetwork);
da.setByte(nodeId, (byte) subnetwork);
} | 3.68 |
hadoop_GangliaMetricVisitor_getType | /**
* @return the type of a visited metric
*/
String getType() {
return type;
} | 3.68 |
framework_CssLayoutConnector_makeCamelCase | /**
* Converts a css property string to CamelCase
*
* @param cssProperty
* The property string
* @return A string converted to camelcase
*/
private static final String makeCamelCase(String cssProperty) {
cssProperty = SharedUtil.dashSeparatedToCamelCase(cssProperty);
if ("float".equals(cssProperty)) {
if (BrowserInfo.get().isIE()) {
return "styleFloat";
} else {
return "cssFloat";
}
}
return cssProperty;
} | 3.68 |
hibernate-validator_TraversableResolvers_wrapWithCachingForSingleValidation | /**
* Potentially wrap the {@link TraversableResolver} into a caching one.
* <p>
* If {@code traversableResolver} is {@code TraverseAllTraversableResolver.INSTANCE}, we don't wrap it and it is
* returned directly. Same if the caching is explicitly disabled.
* <p>
* If {@code traversableResolver} is an instance of our {@code JPATraversableResolver}, we wrap it with a caching
* wrapper specially tailored for the requirements of the spec. It is a very common case as it is used as soon as we
* have a JPA implementation in the classpath so optimizing this case is worth it.
* <p>
* In all the other cases, we wrap the resolver for caching.
* <p>
* Note that, in the {@code TraversableResolver} is wrapped, a new instance is returned each time and it should be
* used only for the duration of a validation call.
*
* @return The resolver for the duration of a validation call.
*/
public static TraversableResolver wrapWithCachingForSingleValidation(TraversableResolver traversableResolver,
boolean traversableResolverResultCacheEnabled) {
if ( TraverseAllTraversableResolver.class.equals( traversableResolver.getClass() ) || !traversableResolverResultCacheEnabled ) {
return traversableResolver;
}
else if ( JPA_AWARE_TRAVERSABLE_RESOLVER_CLASS_NAME.equals( traversableResolver.getClass().getName() ) ) {
return new CachingJPATraversableResolverForSingleValidation( traversableResolver );
}
else {
return new CachingTraversableResolverForSingleValidation( traversableResolver );
}
} | 3.68 |
hadoop_TypedBytesInput_readLong | /**
* Reads the long following a <code>Type.LONG</code> code.
* @return the obtained long
* @throws IOException
*/
public long readLong() throws IOException {
return in.readLong();
} | 3.68 |
hbase_HRegionServer_buildRegionSpaceUseReportRequest | /**
* Builds a {@link RegionSpaceUseReportRequest} protobuf message from the region size map.
* @param regionSizes The size in bytes of regions
* @return The corresponding protocol buffer message.
*/
RegionSpaceUseReportRequest buildRegionSpaceUseReportRequest(RegionSizeStore regionSizes) {
RegionSpaceUseReportRequest.Builder request = RegionSpaceUseReportRequest.newBuilder();
for (Entry<RegionInfo, RegionSize> entry : regionSizes) {
request.addSpaceUse(convertRegionSize(entry.getKey(), entry.getValue().getSize()));
}
return request.build();
} | 3.68 |
pulsar_BaseContext_getStateStore | /**
* Get the state store with the provided store name.
*
* @param tenant the state tenant name
* @param ns the state namespace name
* @param name the state store name
* @param <X> the type of interface of the store to return
* @return the state store instance.
*
* @throws ClassCastException if the return type isn't a type
* or interface of the actual returned store.
*/
default <X extends StateStore> X getStateStore(String tenant, String ns, String name) {
throw new UnsupportedOperationException("Component cannot get state store");
} | 3.68 |
hadoop_AbstractS3ACommitter_loadAndAbort | /**
* Load a pendingset file and abort all of its contents.
* Invoked within a parallel run; the commitContext thread
* pool is already busy/possibly full, so do not
* execute work through the same submitter.
* @param commitContext context to commit through
* @param activeCommit commit state
* @param status status of file to load
* @param deleteRemoteFiles should remote files be deleted?
* @throws IOException failure
*/
private void loadAndAbort(
final CommitContext commitContext,
final ActiveCommit activeCommit,
final FileStatus status,
final boolean suppressExceptions,
final boolean deleteRemoteFiles) throws IOException {
final Path path = status.getPath();
commitContext.switchToIOStatisticsContext();
try (DurationInfo ignored =
new DurationInfo(LOG, false, "Aborting %s", path)) {
PendingSet pendingSet = PersistentCommitData.load(
activeCommit.getSourceFS(),
status,
commitContext.getPendingSetSerializer());
FileSystem fs = getDestFS();
TaskPool.foreach(pendingSet.getCommits())
.executeWith(commitContext.getInnerSubmitter())
.suppressExceptions(suppressExceptions)
.run(commit -> {
try {
commitContext.abortSingleCommit(commit);
} catch (FileNotFoundException e) {
// Commit ID was not known; file may exist.
// delete it if instructed to do so.
if (deleteRemoteFiles) {
fs.delete(commit.destinationPath(), false);
}
}
});
}
} | 3.68 |
hbase_HFileWriterImpl_checkBlockBoundary | /**
* At a block boundary, write all the inline blocks and opens new block.
*/
protected void checkBlockBoundary() throws IOException {
boolean shouldFinishBlock = false;
// This means hbase.writer.unified.encoded.blocksize.ratio was set to something different from 0
// and we should use the encoding ratio
if (encodedBlockSizeLimit > 0) {
shouldFinishBlock = blockWriter.encodedBlockSizeWritten() >= encodedBlockSizeLimit;
} else {
shouldFinishBlock = blockWriter.encodedBlockSizeWritten() >= hFileContext.getBlocksize()
|| blockWriter.blockSizeWritten() >= hFileContext.getBlocksize();
}
shouldFinishBlock &= blockWriter.checkBoundariesWithPredicate();
if (shouldFinishBlock) {
finishBlock();
writeInlineBlocks(false);
newBlock();
}
} | 3.68 |
flink_ViewUpdater_notifyOfAddedView | /**
* Notifies this ViewUpdater of a new metric that should be regularly updated.
*
* @param view metric that should be regularly updated
*/
public void notifyOfAddedView(View view) {
synchronized (lock) {
toAdd.add(view);
}
} | 3.68 |
flink_TableFactoryService_filterByFactoryClass | /** Filters factories with matching context by factory class. */
@SuppressWarnings("unchecked")
private static <T> List<T> filterByFactoryClass(
Class<T> factoryClass,
Map<String, String> properties,
List<TableFactory> foundFactories) {
List<TableFactory> classFactories =
foundFactories.stream()
.filter(p -> factoryClass.isAssignableFrom(p.getClass()))
.collect(Collectors.toList());
if (classFactories.isEmpty()) {
throw new NoMatchingTableFactoryException(
String.format("No factory implements '%s'.", factoryClass.getCanonicalName()),
factoryClass,
foundFactories,
properties);
}
return (List<T>) classFactories;
} | 3.68 |
flink_ExecutionConfig_disableForceKryo | /** Disable use of Kryo serializer for all POJOs. */
public void disableForceKryo() {
setForceKryo(false);
} | 3.68 |
zxing_CameraManager_stopPreview | /**
* Tells the camera to stop drawing preview frames.
*/
public synchronized void stopPreview() {
if (autoFocusManager != null) {
autoFocusManager.stop();
autoFocusManager = null;
}
if (camera != null && previewing) {
camera.getCamera().stopPreview();
previewCallback.setHandler(null, 0);
previewing = false;
}
} | 3.68 |
zxing_ProductResultParser_parse | // Treat all UPC and EAN variants as UPCs, in the sense that they are all product barcodes.
@Override
public ProductParsedResult parse(Result result) {
BarcodeFormat format = result.getBarcodeFormat();
if (!(format == BarcodeFormat.UPC_A || format == BarcodeFormat.UPC_E ||
format == BarcodeFormat.EAN_8 || format == BarcodeFormat.EAN_13)) {
return null;
}
String rawText = getMassagedText(result);
if (!isStringOfDigits(rawText, rawText.length())) {
return null;
}
// Not actually checking the checksum again here
String normalizedProductID;
// Expand UPC-E for purposes of searching
if (format == BarcodeFormat.UPC_E && rawText.length() == 8) {
normalizedProductID = UPCEReader.convertUPCEtoUPCA(rawText);
} else {
normalizedProductID = rawText;
}
return new ProductParsedResult(rawText, normalizedProductID);
} | 3.68 |
framework_Flash_removeParameter | /**
* Removes an object parameter from the list.
*
* @param name
* the name of the parameter to remove.
*/
public void removeParameter(String name) {
if (getState().embedParams == null) {
return;
}
getState().embedParams.remove(name);
requestRepaint();
} | 3.68 |
hbase_ActiveMasterManager_handleMasterNodeChange | /**
* Handle a change in the master node. Doesn't matter whether this was called from a nodeCreated
* or nodeDeleted event because there are no guarantees that the current state of the master node
* matches the event at the time of our next ZK request.
* <p>
* Uses the watchAndCheckExists method which watches the master address node regardless of whether
* it exists or not. If it does exist (there is an active master), it returns true. Otherwise it
* returns false.
* <p>
* A watcher is set which guarantees that this method will get called again if there is another
* change in the master node.
*/
private void handleMasterNodeChange() {
// Watch the node and check if it exists.
try {
synchronized (clusterHasActiveMaster) {
if (ZKUtil.watchAndCheckExists(watcher, watcher.getZNodePaths().masterAddressZNode)) {
// A master node exists, there is an active master
LOG.trace("A master is now available");
clusterHasActiveMaster.set(true);
} else {
// Node is no longer there, cluster does not have an active master
LOG.debug("No master available. Notifying waiting threads");
clusterHasActiveMaster.set(false);
// Notify any thread waiting to become the active master
clusterHasActiveMaster.notifyAll();
}
// Reset the active master sn. Will be re-fetched later if needed.
// We don't want to make a synchronous RPC under a monitor.
activeMasterServerName = null;
}
} catch (KeeperException ke) {
master.abort("Received an unexpected KeeperException, aborting", ke);
}
} | 3.68 |
flink_TieredStorageMemoryManagerImpl_recycleBuffer | /** Note that this method may be called by the netty thread. */
private void recycleBuffer(Object owner, MemorySegment buffer) {
bufferPool.recycle(buffer);
decNumRequestedBuffer(owner);
} | 3.68 |
open-banking-gateway_PaymentAccessFactory_paymentForPsuAndAspsp | /**
* Create {@code PaymentAccess} object that is similar to consent facing to PSU/Fintech user and ASPSP pair.
* @param psu Payee/authorizer of this payment
* @param aspsp ASPSP/Bank that is going to perform the payment
* @param session Session that identifies the payment.
* @return Payment context to authorize
*/
public PaymentAccess paymentForPsuAndAspsp(Psu psu, Bank aspsp, ServiceSession session) {
PsuAspspPrvKey prvKey = prvKeyRepository.findByPsuIdAndAspspId(psu.getId(), aspsp.getId())
.orElseThrow(() -> new IllegalStateException("No public key for: " + psu.getId()));
return new PsuPaymentAccess(psu, aspsp, psuEncryption.forPublicKey(prvKey.getId(), prvKey.getPubKey().getKey()), session, paymentRepository);
} | 3.68 |
morf_Function_countDistinct | /**
* Helper method to create an instance of the "count(distinct)" SQL function.
*
* @param field the field to evaluate in the count function.
*
* @return an instance of a count function
*/
public static Function countDistinct(AliasedField field) {
return new Function(FunctionType.COUNT_DISTINCT, field);
} | 3.68 |
framework_InMemoryDataProviderHelpers_filteringByCaseInsensitiveString | /**
* Wraps a given data provider so that its filter tests the given predicate
* with the lower case string provided by the given value provider.
*
* @param dataProvider
* the data provider to wrap
* @param valueProvider
* the value provider for providing string values to filter
* @param predicate
* the predicate to use for comparing the resulting lower case
* strings
* @param localeSupplier
* the locale to use when converting strings to lower case
* @return the wrapped data provider
*/
public static <T> DataProvider<T, String> filteringByCaseInsensitiveString(
InMemoryDataProvider<T> dataProvider,
ValueProvider<T, String> valueProvider,
SerializableBiPredicate<String, String> predicate,
SerializableSupplier<Locale> localeSupplier) {
// Only assert since these are only passed from our own code
assert predicate != null;
assert localeSupplier != null;
return filteringByIgnoreNull(dataProvider, valueProvider,
(itemString, filterString) -> {
Locale locale = localeSupplier.get();
assert locale != null;
return predicate.test(itemString.toLowerCase(locale),
filterString.toLowerCase(locale));
});
} | 3.68 |
framework_FocusableComplexPanel_addKeyDownHandler | /*
* (non-Javadoc)
*
* @see
* com.google.gwt.event.dom.client.HasKeyDownHandlers#addKeyDownHandler(
* com.google.gwt.event.dom.client.KeyDownHandler)
*/
@Override
public HandlerRegistration addKeyDownHandler(KeyDownHandler handler) {
return addDomHandler(handler, KeyDownEvent.getType());
} | 3.68 |
hbase_RegionCoprocessorHost_preMemStoreCompactionCompactScannerOpen | /**
* Invoked before create StoreScanner for in memory compaction.
*/
public ScanInfo preMemStoreCompactionCompactScannerOpen(HStore store) throws IOException {
CustomizedScanInfoBuilder builder = new CustomizedScanInfoBuilder(store.getScanInfo());
execOperation(coprocEnvironments.isEmpty() ? null : new RegionObserverOperationWithoutResult() {
@Override
public void call(RegionObserver observer) throws IOException {
observer.preMemStoreCompactionCompactScannerOpen(this, store, builder);
}
});
return builder.build();
} | 3.68 |
framework_AbstractSelect_isNewItemsAllowed | /**
* Does the select allow adding new options by the user. If true, the new
* options can be added to the Container. The text entered by the user is
* used as id. Note that data-source must allow adding new items.
*
* @return True if additions are allowed.
*/
public boolean isNewItemsAllowed() {
return allowNewOptions;
} | 3.68 |
zxing_OpenCameraInterface_open | /**
* Opens the requested camera with {@link Camera#open(int)}, if one exists.
*
* @param cameraId camera ID of the camera to use. A negative value
* or {@link #NO_REQUESTED_CAMERA} means "no preference", in which case a rear-facing
* camera is returned if possible or else any camera
* @return handle to {@link OpenCamera} that was opened
*/
public static OpenCamera open(int cameraId) {
int numCameras = Camera.getNumberOfCameras();
if (numCameras == 0) {
Log.w(TAG, "No cameras!");
return null;
}
if (cameraId >= numCameras) {
Log.w(TAG, "Requested camera does not exist: " + cameraId);
return null;
}
if (cameraId <= NO_REQUESTED_CAMERA) {
cameraId = 0;
while (cameraId < numCameras) {
Camera.CameraInfo cameraInfo = new Camera.CameraInfo();
Camera.getCameraInfo(cameraId, cameraInfo);
if (CameraFacing.values()[cameraInfo.facing] == CameraFacing.BACK) {
break;
}
cameraId++;
}
if (cameraId == numCameras) {
Log.i(TAG, "No camera facing " + CameraFacing.BACK + "; returning camera #0");
cameraId = 0;
}
}
Log.i(TAG, "Opening camera #" + cameraId);
Camera.CameraInfo cameraInfo = new Camera.CameraInfo();
Camera.getCameraInfo(cameraId, cameraInfo);
Camera camera = Camera.open(cameraId);
if (camera == null) {
return null;
}
return new OpenCamera(cameraId,
camera,
CameraFacing.values()[cameraInfo.facing],
cameraInfo.orientation);
} | 3.68 |
cron-utils_CronDefinitionBuilder_quartz | /**
* Creates CronDefinition instance matching Quartz specification.
*
* <p>The cron expression is expected to be a string comprised of 6 or 7
* fields separated by white space. Fields can contain any of the allowed
* values, along with various combinations of the allowed special characters
* for that field. The fields are as follows:
*
* <table style="width:100%">
* <tr>
* <th>Field Name</th>
* <th>Mandatory</th>
* <th>Allowed Values</th>
* <th>Allowed Special Characters</th>
* </tr>
* <tr>
* <td>Seconds</td>
* <td>YES</td>
* <td>0-59</td>
* <td>* , - /</td>
* </tr>
* <tr>
* <td>Minutes</td>
* <td>YES</td>
* <td>0-59</td>
* <td>* , - /</td>
* </tr>
* <tr>
* <td>Hours</td>
* <td>YES</td>
* <td>0-23</td>
* <td>* , - /</td>
* </tr>
* <tr>
* <td>Day of month</td>
* <td>YES</td>
* <td>1-31</td>
* <td>* ? , - / L W</td>
* </tr>
* <tr>
* <td>Month</td>
* <td>YES</td>
* <td>1-12 or JAN-DEC</td>
* <td>* , -</td>
* </tr>
* <tr>
* <td>Day of week</td>
* <td>YES</td>
* <td>1-7 or SUN-SAT</td>
* <td>* ? , - / L #</td>
* </tr>
* <tr>
* <td>Year</td>
* <td>NO</td>
* <td>empty, 1970-2099</td>
* <td>* , - /</td>
* </tr>
* </table>
*
* <p>Thus in general Quartz cron expressions are as follows:
*
* <p>S M H DoM M DoW [Y]
*
* @return {@link CronDefinition} instance, never {@code null}
*/
private static CronDefinition quartz() {
return CronDefinitionBuilder.defineCron()
.withSeconds().withValidRange(0, 59).and()
.withMinutes().withValidRange(0, 59).and()
.withHours().withValidRange(0, 23).and()
.withDayOfMonth().withValidRange(1, 31).supportsL().supportsW().supportsLW().supportsQuestionMark().and()
.withMonth().withValidRange(1, 12).and()
.withDayOfWeek().withValidRange(1, 7).withMondayDoWValue(2).supportsHash().supportsL().supportsQuestionMark().and()
.withYear().withValidRange(1970, 2099).withStrictRange().optional().and()
.withCronValidation(CronConstraintsFactory.ensureEitherDayOfWeekOrDayOfMonth())
.instance();
} | 3.68 |
pulsar_PulsarClientException_getPreviousExceptions | /**
* Get the collection of previous exceptions which have caused retries
* for this operation.
*
* @return a collection of exception, ordered as they occurred
*/
public Collection<Throwable> getPreviousExceptions() {
return this.previous;
} | 3.68 |
hbase_MetricSampleQuantiles_compress | /**
* Try to remove extraneous items from the set of sampled items. This checks if an item is
* unnecessary based on the desired error bounds, and merges it with the adjacent item if it is.
*/
private void compress() {
if (samples.size() < 2) {
return;
}
ListIterator<SampleItem> it = samples.listIterator();
SampleItem prev = null;
SampleItem next = it.next();
while (it.hasNext()) {
prev = next;
next = it.next();
if (prev.g + next.g + next.delta <= allowableError(it.previousIndex())) {
next.g += prev.g;
// Remove prev. it.remove() kills the last thing returned.
it.previous();
it.previous();
it.remove();
// it.next() is now equal to next, skip it back forward again
it.next();
}
}
} | 3.68 |
hbase_StoreFileTrackerValidationUtils_checkForCreateTable | /**
* Pre check when creating a new table.
* <p/>
* For now, only make sure that we do not use {@link Trackers#MIGRATION} for newly created tables.
* @throws IOException when there are check errors, the upper layer should fail the
* {@code CreateTableProcedure}.
*/
public static void checkForCreateTable(Configuration conf, TableDescriptor table)
throws IOException {
for (ColumnFamilyDescriptor family : table.getColumnFamilies()) {
checkForNewFamily(conf, table, family);
}
} | 3.68 |
flink_AsyncWaitOperator_timerTriggered | /** Rewrite the timeout process to deal with retry state. */
private void timerTriggered() throws Exception {
if (!resultHandler.completed.get()) {
// cancel delayed retry timer first
cancelRetryTimer();
// force reset retryAwaiting to prevent the handler to trigger retry unnecessarily
retryAwaiting.set(false);
userFunction.timeout(resultHandler.inputRecord.getValue(), this);
}
} | 3.68 |
hadoop_CacheStats_roundUpPageSize | /**
* Round up to the OS page size.
*/
long roundUpPageSize(long count) {
return usedBytesCount.rounder.roundUp(count);
} | 3.68 |
hbase_ClusterStatusPublisher_getDeadServers | /**
* Get the servers which died since a given timestamp. protected because it can be subclassed by
* the tests.
*/
protected List<Pair<ServerName, Long>> getDeadServers(long since) {
if (master.getServerManager() == null) {
return Collections.emptyList();
}
return master.getServerManager().getDeadServers().copyDeadServersSince(since);
} | 3.68 |
flink_NetUtils_getAvailablePort | /**
* Find a non-occupied port.
*
* @return A non-occupied port.
*/
public static Port getAvailablePort() {
for (int i = 0; i < 50; i++) {
try (ServerSocket serverSocket = new ServerSocket(0)) {
int port = serverSocket.getLocalPort();
if (port != 0) {
FileLock fileLock = new FileLock(NetUtils.class.getName() + port);
if (fileLock.tryLock()) {
return new Port(port, fileLock);
} else {
fileLock.unlockAndDestroy();
}
}
} catch (IOException ignored) {
}
}
throw new RuntimeException("Could not find a free permitted port on the machine.");
} | 3.68 |
hadoop_EmptyIOStatisticsContextImpl_snapshot | /**
* Create a new empty snapshot.
* A new one is always created for isolation.
*
* @return a statistics snapshot
*/
@Override
public IOStatisticsSnapshot snapshot() {
return new IOStatisticsSnapshot();
} | 3.68 |
flink_SortMergeResultPartitionReadScheduler_release | /**
* Releases this read scheduler and returns a {@link CompletableFuture} which will be completed
* when all resources are released.
*/
CompletableFuture<?> release() {
List<SortMergeSubpartitionReader> pendingReaders;
synchronized (lock) {
if (isReleased) {
return releaseFuture;
}
isReleased = true;
failedReaders.addAll(allReaders);
pendingReaders = new ArrayList<>(allReaders);
mayNotifyReleased();
}
failSubpartitionReaders(
pendingReaders,
new IllegalStateException("Result partition has been already released."));
return releaseFuture;
} | 3.68 |
rocketmq-connect_Base64Util_base64Decode | /**
* decode
*
* @param in
* @return
*/
public static byte[] base64Decode(String in) {
if (StringUtils.isEmpty(in)) {
return null;
}
return Base64.getDecoder().decode(in);
} | 3.68 |
hadoop_TimelineEntity_getStartTime | /**
* Get the start time of the entity
*
* @return the start time of the entity
*/
@XmlElement(name = "starttime")
public Long getStartTime() {
return startTime;
} | 3.68 |
hadoop_TimelineEntity_setRelatedEntities | /**
* Set the related entity map to the given map of related entities
*
* @param relatedEntities
* a map of related entities
*/
public void setRelatedEntities(
Map<String, Set<String>> relatedEntities) {
this.relatedEntities = TimelineServiceHelper.mapCastToHashMap(
relatedEntities);
} | 3.68 |
flink_MetricRegistryConfiguration_fromConfiguration | /**
* Create a metric registry configuration object from the given {@link Configuration}.
*
* @param configuration to generate the metric registry configuration from
* @param maximumFrameSize the maximum message size that the RPC system supports
* @return Metric registry configuration generated from the configuration
*/
public static MetricRegistryConfiguration fromConfiguration(
Configuration configuration, long maximumFrameSize) {
ScopeFormats scopeFormats;
try {
scopeFormats = ScopeFormats.fromConfig(configuration);
} catch (Exception e) {
LOG.warn("Failed to parse scope format, using default scope formats", e);
scopeFormats = ScopeFormats.fromConfig(new Configuration());
}
char delim;
try {
delim = configuration.getString(MetricOptions.SCOPE_DELIMITER).charAt(0);
} catch (Exception e) {
LOG.warn("Failed to parse delimiter, using default delimiter.", e);
delim = '.';
}
// padding to account for serialization overhead
final long messageSizeLimitPadding = 256;
return new MetricRegistryConfiguration(
scopeFormats, delim, maximumFrameSize - messageSizeLimitPadding);
} | 3.68 |
druid_DruidAbstractDataSource_setInitExceptionThrow | /**
* @since 1.1.11
*/
public void setInitExceptionThrow(boolean initExceptionThrow) {
this.initExceptionThrow = initExceptionThrow;
} | 3.68 |
framework_TabSheet_addSelectedTabChangeListener | /**
* Adds a tab selection listener.
*
* @see Registration
*
* @param listener
* the Listener to be added, not null
* @return a registration object for removing the listener
* @since 8.0
*/
public Registration addSelectedTabChangeListener(
SelectedTabChangeListener listener) {
return addListener(SelectedTabChangeEvent.class, listener,
SELECTED_TAB_CHANGE_METHOD);
} | 3.68 |
pulsar_NonPersistentSubscription_delete | /**
* Delete the subscription by closing and deleting its managed cursor. Handle unsubscribe call from admin layer.
*
* @param closeIfConsumersConnected
* Flag indicate whether explicitly close connected consumers before trying to delete subscription. If
* any consumer is connected to it and if this flag is disable then this operation fails.
* @return CompletableFuture indicating the completion of delete operation
*/
private CompletableFuture<Void> delete(boolean closeIfConsumersConnected) {
CompletableFuture<Void> deleteFuture = new CompletableFuture<>();
log.info("[{}][{}] Unsubscribing", topicName, subName);
CompletableFuture<Void> closeSubscriptionFuture = new CompletableFuture<>();
if (closeIfConsumersConnected) {
this.disconnect().thenRun(() -> {
closeSubscriptionFuture.complete(null);
}).exceptionally(ex -> {
log.error("[{}][{}] Error disconnecting and closing subscription", topicName, subName, ex);
closeSubscriptionFuture.completeExceptionally(ex);
return null;
});
} else {
this.close().thenRun(() -> {
closeSubscriptionFuture.complete(null);
}).exceptionally(exception -> {
log.error("[{}][{}] Error closing subscription", topicName, subName, exception);
closeSubscriptionFuture.completeExceptionally(exception);
return null;
});
}
// cursor close handles pending delete (ack) operations
closeSubscriptionFuture.thenCompose(v -> topic.unsubscribe(subName)).thenAccept(v -> {
synchronized (this) {
(dispatcher != null ? dispatcher.close() : CompletableFuture.completedFuture(null)).thenRun(() -> {
log.info("[{}][{}] Successfully deleted subscription", topicName, subName);
deleteFuture.complete(null);
}).exceptionally(ex -> {
IS_FENCED_UPDATER.set(this, FALSE);
if (dispatcher != null) {
dispatcher.reset();
}
log.error("[{}][{}] Error deleting subscription", topicName, subName, ex);
deleteFuture.completeExceptionally(ex);
return null;
});
}
}).exceptionally(exception -> {
IS_FENCED_UPDATER.set(this, FALSE);
log.error("[{}][{}] Error deleting subscription", topicName, subName, exception);
deleteFuture.completeExceptionally(exception);
return null;
});
return deleteFuture;
} | 3.68 |
hudi_HoodieBackedTableMetadataWriter_getRecordIndexUpdates | /**
* Return records that represent update to the record index due to write operation on the dataset.
*
* @param writeStatuses {@code WriteStatus} from the write operation
*/
private HoodieData<HoodieRecord> getRecordIndexUpdates(HoodieData<WriteStatus> writeStatuses) {
HoodiePairData<String, HoodieRecordDelegate> recordKeyDelegatePairs = null;
// if update partition path is true, chances that we might get two records (1 delete in older partition and 1 insert to new partition)
// and hence we might have to do reduce By key before ingesting to RLI partition.
if (dataWriteConfig.getRecordIndexUpdatePartitionPath()) {
recordKeyDelegatePairs = writeStatuses.map(writeStatus -> writeStatus.getWrittenRecordDelegates().stream()
.map(recordDelegate -> Pair.of(recordDelegate.getRecordKey(), recordDelegate)))
.flatMapToPair(Stream::iterator)
.reduceByKey((recordDelegate1, recordDelegate2) -> {
if (recordDelegate1.getRecordKey().equals(recordDelegate2.getRecordKey())) {
if (!recordDelegate1.getNewLocation().isPresent() && !recordDelegate2.getNewLocation().isPresent()) {
throw new HoodieIOException("Both version of records do not have location set. Record V1 " + recordDelegate1.toString()
+ ", Record V2 " + recordDelegate2.toString());
}
if (recordDelegate1.getNewLocation().isPresent()) {
return recordDelegate1;
} else {
// if record delegate 1 does not have location set, record delegate 2 should have location set.
return recordDelegate2;
}
} else {
return recordDelegate1;
}
}, Math.max(1, writeStatuses.getNumPartitions()));
} else {
// if update partition path = false, we should get only one entry per record key.
recordKeyDelegatePairs = writeStatuses.flatMapToPair(
(SerializableFunction<WriteStatus, Iterator<? extends Pair<String, HoodieRecordDelegate>>>) writeStatus
-> writeStatus.getWrittenRecordDelegates().stream().map(rec -> Pair.of(rec.getRecordKey(), rec)).iterator());
}
return recordKeyDelegatePairs
.map(writeStatusRecordDelegate -> {
HoodieRecordDelegate recordDelegate = writeStatusRecordDelegate.getValue();
HoodieRecord hoodieRecord = null;
Option<HoodieRecordLocation> newLocation = recordDelegate.getNewLocation();
if (newLocation.isPresent()) {
if (recordDelegate.getCurrentLocation().isPresent()) {
// This is an update, no need to update index if the location has not changed
// newLocation should have the same fileID as currentLocation. The instantTimes differ as newLocation's
// instantTime refers to the current commit which was completed.
if (!recordDelegate.getCurrentLocation().get().getFileId().equals(newLocation.get().getFileId())) {
final String msg = String.format("Detected update in location of record with key %s from %s "
+ " to %s. The fileID should not change.",
recordDelegate, recordDelegate.getCurrentLocation().get(), newLocation.get());
LOG.error(msg);
throw new HoodieMetadataException(msg);
}
// for updates, we can skip updating RLI partition in MDT
} else {
hoodieRecord = HoodieMetadataPayload.createRecordIndexUpdate(
recordDelegate.getRecordKey(), recordDelegate.getPartitionPath(),
newLocation.get().getFileId(), newLocation.get().getInstantTime(), dataWriteConfig.getWritesFileIdEncoding());
}
} else {
// Delete existing index for a deleted record
hoodieRecord = HoodieMetadataPayload.createRecordIndexDelete(recordDelegate.getRecordKey());
}
return hoodieRecord;
})
.filter(Objects::nonNull);
} | 3.68 |
morf_DatabaseMetaDataProvider_createIndexFrom | /**
* Creates an index from given info.
*
* @param indexName The name of the index.
* @param isUnique Whether to mark this index as unique.
* @param columnNames The column names for the index.
* @return An {@link IndexBuilder} for the index.
*/
protected static Index createIndexFrom(RealName indexName, boolean isUnique, List<RealName> columnNames) {
List<String> realColumnNames = columnNames.stream().map(RealName::getRealName).collect(Collectors.toList());
IndexBuilder index = SchemaUtils.index(indexName.getRealName()).columns(realColumnNames);
return isUnique ? index.unique() : index;
} | 3.68 |
flink_FlinkContainersSettings_getLogProperties | /**
* Gets logging properties.
*
* @return The logging properties.
*/
public Properties getLogProperties() {
return logProperties;
} | 3.68 |
hudi_HoodieSyncClient_getPartitionEvents | /**
* Iterate over the storage partitions and find if there are any new partitions that need to be added or updated.
* Generate a list of PartitionEvent based on the changes required.
*/
public List<PartitionEvent> getPartitionEvents(List<Partition> partitionsInMetastore,
List<String> writtenPartitionsOnStorage,
Set<String> droppedPartitionsOnStorage) {
Map<String, String> paths = getPartitionValuesToPathMapping(partitionsInMetastore);
List<PartitionEvent> events = new ArrayList<>();
for (String storagePartition : writtenPartitionsOnStorage) {
Path storagePartitionPath = FSUtils.getPartitionPath(config.getString(META_SYNC_BASE_PATH), storagePartition);
String fullStoragePartitionPath = Path.getPathWithoutSchemeAndAuthority(storagePartitionPath).toUri().getPath();
// Check if the partition values or if hdfs path is the same
List<String> storagePartitionValues = partitionValueExtractor.extractPartitionValuesInPath(storagePartition);
if (droppedPartitionsOnStorage.contains(storagePartition)) {
events.add(PartitionEvent.newPartitionDropEvent(storagePartition));
} else {
if (!storagePartitionValues.isEmpty()) {
String storageValue = String.join(", ", storagePartitionValues);
if (!paths.containsKey(storageValue)) {
events.add(PartitionEvent.newPartitionAddEvent(storagePartition));
} else if (!paths.get(storageValue).equals(fullStoragePartitionPath)) {
events.add(PartitionEvent.newPartitionUpdateEvent(storagePartition));
}
}
}
}
return events;
} | 3.68 |
hbase_MasterObserver_postSnapshot | /**
* Called after the snapshot operation has been requested. Called as part of snapshot RPC call.
* @param ctx the environment to interact with the framework and master
* @param snapshot the SnapshotDescriptor for the snapshot
* @param tableDescriptor the TableDescriptor of the table to snapshot
*/
default void postSnapshot(final ObserverContext<MasterCoprocessorEnvironment> ctx,
final SnapshotDescription snapshot, final TableDescriptor tableDescriptor) throws IOException {
} | 3.68 |
morf_ExistingViewHashLoader_loadViewHashes | /**
* Loads the hashes for the deployed views, or empty if the hashes cannot be loaded
* (e.g. if the deployed views table does not exist in the existing schema).
*
* @param schema The existing database schema.
* @return The deployed view hashes.
*/
Optional<Map<String, String>> loadViewHashes(Schema schema) {
if (!schema.tableExists(DatabaseUpgradeTableContribution.DEPLOYED_VIEWS_NAME)) {
return Optional.empty();
}
Map<String, String> result = Maps.newHashMap();
// Query the database to load DeployedViews
SelectStatement upgradeAuditSelect = select(field("name"), field("hash")).from(tableRef(DatabaseUpgradeTableContribution.DEPLOYED_VIEWS_NAME));
String sql = dialect.convertStatementToSQL(upgradeAuditSelect);
if (log.isDebugEnabled()) log.debug("Loading " + DatabaseUpgradeTableContribution.DEPLOYED_VIEWS_NAME + " with SQL [" + sql + "]");
try (Connection connection = dataSource.getConnection();
java.sql.Statement statement = connection.createStatement();
ResultSet resultSet = statement.executeQuery(sql)) {
while (resultSet.next()) {
// There was previously a bug in Deployment which wrote records to
// the DeployedViews table without upper-casing them first. Subsequent
// Upgrades would write records in upper case but the original records
// remained and could potentially be picked up here depending on
// DB ordering. We make sure we ignore records where there are
// duplicates and one of them is not uppercased.
String dbViewName = resultSet.getString(1);
String viewName = dbViewName.toUpperCase();
if (!result.containsKey(viewName) || dbViewName.equals(viewName)) {
result.put(viewName, resultSet.getString(2));
}
}
} catch (SQLException e) {
throw new RuntimeSqlException("Failed to load deployed views. SQL: [" + sql + "]", e);
}
return Optional.of(Collections.unmodifiableMap(result));
} | 3.68 |
flink_FileInputFormat_initDefaultsFromConfiguration | /**
* Initialize defaults for input format. Needs to be a static method because it is configured
* for local cluster execution.
*
* @param configuration The configuration to load defaults from
*/
private static void initDefaultsFromConfiguration(Configuration configuration) {
final long to =
configuration.getLong(
ConfigConstants.FS_STREAM_OPENING_TIMEOUT_KEY,
ConfigConstants.DEFAULT_FS_STREAM_OPENING_TIMEOUT);
if (to < 0) {
LOG.error(
"Invalid timeout value for filesystem stream opening: "
+ to
+ ". Using default value of "
+ ConfigConstants.DEFAULT_FS_STREAM_OPENING_TIMEOUT);
DEFAULT_OPENING_TIMEOUT = ConfigConstants.DEFAULT_FS_STREAM_OPENING_TIMEOUT;
} else if (to == 0) {
DEFAULT_OPENING_TIMEOUT = 300000; // 5 minutes
} else {
DEFAULT_OPENING_TIMEOUT = to;
}
} | 3.68 |
dubbo_SimpleReferenceCache_get | /**
* Check and return existing ReferenceConfig and its corresponding proxy instance.
*
* @param type service interface class
* @param <T> service interface type
* @return the existing proxy instance of the same interface definition
*/
@Override
@SuppressWarnings("unchecked")
public <T> T get(Class<T> type) {
List<ReferenceConfigBase<?>> referenceConfigBases = referenceTypeMap.get(type);
if (CollectionUtils.isNotEmpty(referenceConfigBases)) {
return (T) referenceConfigBases.get(0).get();
}
return null;
} | 3.68 |
flink_SinkModifyOperation_getTargetColumns | /** return null when no column list specified. */
@Nullable
public int[][] getTargetColumns() {
return targetColumns;
} | 3.68 |
flink_KubernetesUtils_resolveDNSPolicy | /**
* Resolve the DNS policy defined value. Return DNS_POLICY_HOSTNETWORK if host network enabled.
* If not, check whether there is a DNS policy overridden in pod template.
*
* @param dnsPolicy DNS policy defined in pod template spec
* @param hostNetworkEnabled Host network enabled or not
* @return the resolved value
*/
public static String resolveDNSPolicy(String dnsPolicy, boolean hostNetworkEnabled) {
if (hostNetworkEnabled) {
return DNS_POLICY_HOSTNETWORK;
}
if (!StringUtils.isNullOrWhitespaceOnly(dnsPolicy)) {
return dnsPolicy;
}
return DNS_POLICY_DEFAULT;
} | 3.68 |
flink_BlobUtils_getStorageLocationPath | /**
* Returns the path for the given blob key.
*
* <p>The returned path can be used with the (local or HA) BLOB store file system back-end for
* recovery purposes and follows the same scheme as {@link #getStorageLocation(File, JobID,
* BlobKey)}.
*
* @param storageDir storage directory used be the BLOB service
* @param key the key identifying the BLOB
* @param jobId ID of the job for the incoming files
* @return the path to the given BLOB
*/
static String getStorageLocationPath(String storageDir, @Nullable JobID jobId, BlobKey key) {
if (jobId == null) {
// format: $base/no_job/blob_$key
return String.format(
"%s/%s/%s%s", storageDir, NO_JOB_DIR_PREFIX, BLOB_FILE_PREFIX, key.toString());
} else {
// format: $base/job_$jobId/blob_$key
return String.format(
"%s/%s%s/%s%s",
storageDir, JOB_DIR_PREFIX, jobId.toString(), BLOB_FILE_PREFIX, key.toString());
}
} | 3.68 |
flink_BoundedBlockingSubpartitionReader_notifyDataAvailable | /**
* This method is actually only meaningful for the {@link BoundedBlockingSubpartitionType#FILE}.
*
* <p>For the other types the {@link #nextBuffer} can not be ever set to null, so it is no need
* to notify available via this method. But the implementation is also compatible with other
* types even though called by mistake.
*/
@Override
public void notifyDataAvailable() {
if (nextBuffer == null) {
assert dataReader != null;
try {
nextBuffer = dataReader.nextBuffer();
} catch (IOException ex) {
// this exception wrapper is only for avoiding throwing IOException explicitly
// in relevant interface methods
throw new IllegalStateException("No data available while reading", ex);
}
// next buffer is null indicates the end of partition
if (nextBuffer != null) {
availabilityListener.notifyDataAvailable();
}
}
} | 3.68 |
hadoop_NativeTaskOutputFiles_getSpillIndexFile | /**
* Return a local map spill index file created earlier
*
* @param spillNumber the number
*/
public Path getSpillIndexFile(int spillNumber) throws IOException {
String path = String
.format(SPILL_INDEX_FILE_FORMAT_STRING, id, TASKTRACKER_OUTPUT, spillNumber);
return lDirAlloc.getLocalPathToRead(path, conf);
} | 3.68 |
flink_CheckpointProperties_discardOnSubsumed | /**
* Returns whether the checkpoint should be discarded when it is subsumed.
*
* <p>A checkpoint is subsumed when the maximum number of retained checkpoints is reached and a
* more recent checkpoint completes..
*
* @return <code>true</code> if the checkpoint should be discarded when it is subsumed; <code>
* false</code> otherwise.
* @see CompletedCheckpointStore
*/
boolean discardOnSubsumed() {
return discardSubsumed;
} | 3.68 |
hbase_ZKTableArchiveClient_getArchiveZNode | /**
* @param conf conf to read for the base archive node
* @param zooKeeper zookeeper to used for building the full path
* @return get the znode for long-term archival of a table for
*/
public static String getArchiveZNode(Configuration conf, ZKWatcher zooKeeper) {
return ZNodePaths.joinZNode(zooKeeper.getZNodePaths().baseZNode, conf
.get(ZOOKEEPER_ZNODE_HFILE_ARCHIVE_KEY, TableHFileArchiveTracker.HFILE_ARCHIVE_ZNODE_PARENT));
} | 3.68 |
framework_DragAndDropWrapper_setHTML5DataFlavor | /**
* Sets data flavors available in the DragAndDropWrapper is used to start an
* HTML5 style drags. Most commonly the "Text" flavor should be set.
* Multiple data types can be set.
*
* @param type
* the string identifier of the drag "payload". E.g. "Text" or
* "text/html"
* @param value
* the value
*/
public void setHTML5DataFlavor(String type, Object value) {
html5DataFlavors.put(type, value);
markAsDirty();
} | 3.68 |
flink_SupportsRowLevelUpdate_getRowLevelUpdateMode | /**
* Planner will rewrite the update statement to query base on the {@link
* RowLevelUpdateMode}, keeping the query of update unchanged by default(in `UPDATED_ROWS`
* mode), or changing the query to union the updated rows and the other rows (in `ALL_ROWS`
* mode).
*
* <p>Take the following SQL as an example:
*
* <pre>{@code
* UPDATE t SET x = 1 WHERE y = 2;
* }</pre>
*
* <p>If returns {@link RowLevelUpdateMode#UPDATED_ROWS}, the sink will get the update after
* rows which match the filter [y = 2].
*
* <p>If returns {@link RowLevelUpdateMode#ALL_ROWS}, the sink will get both the update
* after rows which match the filter [y = 2] and the other rows that don't match the filter
* [y = 2].
*
* <p>Note: All rows will have RowKind#UPDATE_AFTER when RowLevelUpdateMode is UPDATED_ROWS,
* and RowKind#INSERT when RowLevelUpdateMode is ALL_ROWS.
*/
default RowLevelUpdateMode getRowLevelUpdateMode() {
return RowLevelUpdateMode.UPDATED_ROWS;
} | 3.68 |
framework_VCalendar_areDatesEqualToSecond | /**
* Are the dates equal (uses second resolution).
*
* @param date1
* The first the to compare
* @param date2
* The second date to compare
* @return
*/
@SuppressWarnings("deprecation")
public static boolean areDatesEqualToSecond(Date date1, Date date2) {
return date1.getYear() == date2.getYear()
&& date1.getMonth() == date2.getMonth()
&& date1.getDay() == date2.getDay()
&& date1.getHours() == date2.getHours()
&& date1.getSeconds() == date2.getSeconds();
} | 3.68 |
morf_ConcatenatedField_toString | /**
*
* @see org.alfasoftware.morf.sql.element.AliasedField#toString()
*/
@Override
public String toString() {
return "CONCAT(" + StringUtils.join(fields, ", ") + ")" + super.toString();
} | 3.68 |
hbase_ZKMainServer_hasCommandLineArguments | /**
* @param args the arguments to check for command-line arguments
* @return True if command-line arguments were passed.
*/
private static boolean hasCommandLineArguments(final String[] args) {
if (hasServer(args)) {
if (args.length < 2) {
throw new IllegalStateException("-server param but no value");
}
return args.length > 2;
}
return args.length > 0;
} | 3.68 |
framework_Flash_getStandby | /**
* Returns standby.
*
* @since 7.4.1
* @return Standby string.
*/
public String getStandby() {
return getState(false).standby;
} | 3.68 |
hadoop_OBSFileSystem_isReadTransformEnabled | /**
* Get read transform switch stat.
*
* @return is read transform enabled
*/
boolean isReadTransformEnabled() {
return readTransformEnable;
} | 3.68 |
hudi_HoodieFlinkCopyOnWriteTable_handleUpdate | // -------------------------------------------------------------------------
// Used for compaction
// -------------------------------------------------------------------------
@Override
public Iterator<List<WriteStatus>> handleUpdate(
String instantTime, String partitionPath, String fileId,
Map<String, HoodieRecord<T>> keyToNewRecords, HoodieBaseFile oldDataFile) throws IOException {
// these are updates
HoodieMergeHandle upsertHandle = getUpdateHandle(instantTime, partitionPath, fileId, keyToNewRecords, oldDataFile);
return handleUpdateInternal(upsertHandle, instantTime, fileId);
} | 3.68 |
flink_DelimitedInputFormat_configure | /**
* Configures this input format by reading the path to the file from the configuration and the
* string that defines the record delimiter.
*
* @param parameters The configuration object to read the parameters from.
*/
@Override
public void configure(Configuration parameters) {
super.configure(parameters);
// the if() clauses are to prevent the configure() method from
// overwriting the values set by the setters
if (Arrays.equals(delimiter, new byte[] {'\n'})) {
String delimString = parameters.getString(RECORD_DELIMITER, null);
if (delimString != null) {
setDelimiter(delimString);
}
}
// set the number of samples
if (numLineSamples == NUM_SAMPLES_UNDEFINED) {
String samplesString = parameters.getString(NUM_STATISTICS_SAMPLES, null);
if (samplesString != null) {
try {
setNumLineSamples(Integer.parseInt(samplesString));
} catch (NumberFormatException e) {
if (LOG.isWarnEnabled()) {
LOG.warn(
"Invalid value for number of samples to take: "
+ samplesString
+ ". Skipping sampling.");
}
setNumLineSamples(0);
}
}
}
} | 3.68 |
hbase_HBaseCommonTestingUtility_randomPort | /**
* Returns a random port. These ports cannot be registered with IANA and are intended for
* dynamic allocation (see http://bit.ly/dynports).
*/
private int randomPort() {
return MIN_RANDOM_PORT
+ ThreadLocalRandom.current().nextInt(MAX_RANDOM_PORT - MIN_RANDOM_PORT);
} | 3.68 |
hadoop_JsonSerDeser_save | /**
* Save an instance to a file
* @param instance instance to save
* @param file file
* @throws IOException
*/
public void save(T instance, File file) throws
IOException {
writeJsonAsBytes(instance, new FileOutputStream(file.getAbsoluteFile()));
} | 3.68 |
morf_SqlServerDialect_getColumnRepresentation | /**
* @see org.alfasoftware.morf.jdbc.SqlDialect#getColumnRepresentation(org.alfasoftware.morf.metadata.DataType,
* int, int)
*/
@Override
protected String getColumnRepresentation(DataType dataType, int width, int scale) {
if (needsCollation(dataType)) {
return String.format("%s %s", getInternalColumnRepresentation(dataType, width, scale), COLLATE);
}
return getInternalColumnRepresentation(dataType, width, scale);
} | 3.68 |
flink_IntervalJoinOperator_processElement1 | /**
* Process a {@link StreamRecord} from the left stream. Whenever an {@link StreamRecord} arrives
* at the left stream, it will get added to the left buffer. Possible join candidates for that
* element will be looked up from the right buffer and if the pair lies within the user defined
* boundaries, it gets passed to the {@link ProcessJoinFunction}.
*
* @param record An incoming record to be joined
* @throws Exception Can throw an Exception during state access
*/
@Override
public void processElement1(StreamRecord<T1> record) throws Exception {
processElement(record, leftBuffer, rightBuffer, lowerBound, upperBound, true);
} | 3.68 |
framework_AbstractSplitPanelConnector_handleSingleComponentMove | /**
* Handles the case when there is only one child component and that
* component is moved between first <-> second. This does not trigger a
* hierarchy change event as the list of children contains the same
* component in both cases.
*/
private void handleSingleComponentMove() {
if (getChildComponents().size() == 1) {
Widget stateFirstChild = null;
Widget stateSecondChild = null;
if (getState().firstChild != null) {
stateFirstChild = ((ComponentConnector) getState().firstChild)
.getWidget();
}
if (getState().secondChild != null) {
stateSecondChild = ((ComponentConnector) getState().secondChild)
.getWidget();
}
if (stateFirstChild == getWidget().getSecondWidget()
|| stateSecondChild == getWidget().getFirstWidget()) {
handleHierarchyChange();
}
}
} | 3.68 |
framework_LayoutDemo_fillLayout | /**
* Add multiple demo component to given layout.
*
* @param layout
* where components are added
* @param numberOfComponents
* to add
*/
private void fillLayout(Layout layout, int numberOfComponents) {
for (int i = 1; i <= numberOfComponents; i++) {
layout.addComponent(getExampleComponent(Integer.toString(i)));
}
} | 3.68 |
streampipes_Protocols_mqtt | /**
* Defines the transport protocol MQTT used by a data stream at runtime.
*
* @param mqttHost The hostname of any MQTT broker
* @param mqttPort The port of any MQTT broker
* @param topic The topic identifier
* @return The {@link org.apache.streampipes.model.grounding.MqttTransportProtocol}
* containing URL and topic where data arrives.
*/
public static MqttTransportProtocol mqtt(String mqttHost, Integer mqttPort, String topic) {
return new MqttTransportProtocol(mqttHost, mqttPort, topic);
} | 3.68 |
hbase_MergeTableRegionsProcedure_preMergeRegions | /**
* Pre merge region action
* @param env MasterProcedureEnv
**/
private void preMergeRegions(final MasterProcedureEnv env) throws IOException {
final MasterCoprocessorHost cpHost = env.getMasterCoprocessorHost();
if (cpHost != null) {
cpHost.preMergeRegionsAction(regionsToMerge, getUser());
}
// TODO: Clean up split and merge. Currently all over the place.
try {
env.getMasterServices().getMasterQuotaManager().onRegionMerged(this.mergedRegion);
} catch (QuotaExceededException e) {
// TODO: why is this here? merge requests can be submitted by actors other than the normalizer
env.getMasterServices().getRegionNormalizerManager()
.planSkipped(NormalizationPlan.PlanType.MERGE);
throw e;
}
} | 3.68 |
hbase_CatalogJanitor_getLastReport | /** Returns Returns last published Report that comes of last successful scan of hbase:meta. */
public CatalogJanitorReport getLastReport() {
return this.lastReport;
} | 3.68 |
hbase_MetaTableAccessor_makeDeleteFromRegionInfo | /**
* Generates and returns a Delete containing the region info for the catalog table
*/
public static Delete makeDeleteFromRegionInfo(RegionInfo regionInfo, long ts) {
if (regionInfo == null) {
throw new IllegalArgumentException("Can't make a delete for null region");
}
if (regionInfo.getReplicaId() != RegionInfo.DEFAULT_REPLICA_ID) {
throw new IllegalArgumentException(
"Can't make delete for a replica region. Operate on the primary");
}
Delete delete = new Delete(CatalogFamilyFormat.getMetaKeyForRegion(regionInfo));
delete.addFamily(HConstants.CATALOG_FAMILY, ts);
return delete;
} | 3.68 |
hbase_RestCsrfPreventionFilter_getFilterParams | /**
* Constructs a mapping of configuration properties to be used for filter initialization. The
* mapping includes all properties that start with the specified configuration prefix. Property
* names in the mapping are trimmed to remove the configuration prefix.
* @param conf configuration to read
* @param confPrefix configuration prefix
* @return mapping of configuration properties to be used for filter initialization
*/
public static Map<String, String> getFilterParams(Configuration conf, String confPrefix) {
Map<String, String> filterConfigMap = new HashMap<>();
for (Map.Entry<String, String> entry : conf) {
String name = entry.getKey();
if (name.startsWith(confPrefix)) {
String value = conf.get(name);
name = name.substring(confPrefix.length());
filterConfigMap.put(name, value);
}
}
return filterConfigMap;
} | 3.68 |
flink_DataViewUtils_createDistinctViewDataType | /** Creates a special {@link DataType} for DISTINCT aggregates. */
public static DataType createDistinctViewDataType(
DataType keyDataType, int filterArgs, int filterArgsLimit) {
final DataType valueDataType;
if (filterArgs <= filterArgsLimit) {
valueDataType = DataTypes.BIGINT().notNull();
} else {
valueDataType = DataTypes.ARRAY(DataTypes.BIGINT().notNull()).bridgedTo(long[].class);
}
return MapView.newMapViewDataType(keyDataType, valueDataType);
} | 3.68 |
flink_SegmentsUtil_hash | /**
* hash segments to int.
*
* @param segments Source segments.
* @param offset Source segments offset.
* @param numBytes the number bytes to hash.
*/
public static int hash(MemorySegment[] segments, int offset, int numBytes) {
if (inFirstSegment(segments, offset, numBytes)) {
return MurmurHashUtil.hashBytes(segments[0], offset, numBytes);
} else {
return hashMultiSeg(segments, offset, numBytes);
}
} | 3.68 |
hadoop_StartupProgressServlet_writeNumberFieldIfDefined | /**
* Writes a JSON number field only if the value is defined.
*
* @param json JsonGenerator to receive output
* @param key String key to put
* @param value long value to put
* @throws IOException if there is an I/O error
*/
private static void writeNumberFieldIfDefined(JsonGenerator json, String key,
long value) throws IOException {
if (value != Long.MIN_VALUE) {
json.writeNumberField(key, value);
}
} | 3.68 |
graphhopper_GraphHopper_setAllowWrites | /**
* Specifies if it is allowed for GraphHopper to write. E.g. for read only filesystems it is not
* possible to create a lock file and so we can avoid write locks.
*/
public GraphHopper setAllowWrites(boolean allowWrites) {
this.allowWrites = allowWrites;
return this;
} | 3.68 |
hbase_ServerManager_getVersionNumber | /**
* May return 0 when server is not online.
*/
public int getVersionNumber(ServerName serverName) {
ServerMetrics serverMetrics = onlineServers.get(serverName);
return serverMetrics != null ? serverMetrics.getVersionNumber() : 0;
} | 3.68 |
flink_OperationUtils_indent | /**
* Increases indentation for description of string of child {@link Operation}. The input can
* already contain indentation. This will increase all the indentations by one level.
*
* @param item result of {@link Operation#asSummaryString()}
* @return string with increased indentation
*/
static String indent(String item) {
return "\n"
+ OPERATION_INDENT
+ item.replace("\n" + OPERATION_INDENT, "\n" + OPERATION_INDENT + OPERATION_INDENT);
} | 3.68 |
hmily_HmilyRepositoryFacade_removeHmilyParticipantUndo | /**
* Remove hmily participant undo.
*
* @param undoId the undo id
*/
public void removeHmilyParticipantUndo(final Long undoId) {
if (hmilyConfig.isPhyDeleted()) {
checkRows(hmilyRepository.removeHmilyParticipantUndo(undoId));
} else {
updateHmilyParticipantUndoStatus(undoId, HmilyActionEnum.DELETE.getCode());
}
} | 3.68 |
flink_RestServerEndpointConfiguration_getMaxContentLength | /**
* Returns the max content length that the REST server endpoint could handle.
*
* @return max content length that the REST server endpoint could handle
*/
public int getMaxContentLength() {
return maxContentLength;
} | 3.68 |
hudi_HoodieWriteHandle_doWrite | /**
* Perform the actual writing of the given record into the backing file.
*/
protected void doWrite(HoodieRecord record, Schema schema, TypedProperties props) {
// NO_OP
} | 3.68 |
flink_WindowedStream_trigger | /** Sets the {@code Trigger} that should be used to trigger window emission. */
@PublicEvolving
public WindowedStream<T, K, W> trigger(Trigger<? super T, ? super W> trigger) {
builder.trigger(trigger);
return this;
} | 3.68 |
flink_DateTimeUtils_parseFraction | /**
* Parses a fraction, multiplying the first character by {@code multiplier}, the second
* character by {@code multiplier / 10}, the third character by {@code multiplier / 100}, and so
* forth.
*
* <p>For example, {@code parseFraction("1234", 100)} yields {@code 123}.
*/
private static int parseFraction(String v) {
int multiplier = 100;
int r = 0;
for (int i = 0; i < v.length(); i++) {
char c = v.charAt(i);
int x = c < '0' || c > '9' ? 0 : (c - '0');
r += multiplier * x;
if (multiplier < 10) {
// We're at the last digit. Check for rounding.
if (i + 1 < v.length() && v.charAt(i + 1) >= '5') {
++r;
}
break;
}
multiplier /= 10;
}
return r;
} | 3.68 |
pulsar_Topics_deletePartitionedTopic | /**
* @see Topics#deletePartitionedTopic(String, boolean, boolean)
* IMPORTANT NOTICE: the application is not able to connect to the topic(delete then re-create with same name) again
* if the schema auto uploading is disabled. Besides, users should to use the truncate method to clean up
* data of the topic instead of delete method if users continue to use this topic later.
*/
default void deletePartitionedTopic(String topic, boolean force) throws PulsarAdminException {
deletePartitionedTopic(topic, force, true);
} | 3.68 |
Subsets and Splits
No saved queries yet
Save your SQL queries to embed, download, and access them later. Queries will appear here once saved.