name
stringlengths 12
178
| code_snippet
stringlengths 8
36.5k
| score
float64 3.26
3.68
|
---|---|---|
hbase_HFileBlock_writeHeaderAndData | /**
* Similar to {@link #writeHeaderAndData(FSDataOutputStream)}, but records the offset of this
* block so that it can be referenced in the next block of the same type.
*/
void writeHeaderAndData(FSDataOutputStream out) throws IOException {
long offset = out.getPos();
if (startOffset != UNSET && offset != startOffset) {
throw new IOException("A " + blockType + " block written to a "
+ "stream twice, first at offset " + startOffset + ", then at " + offset);
}
startOffset = offset;
finishBlockAndWriteHeaderAndData(out);
} | 3.68 |
flink_RowDataLocalTimeZoneConverter_getSessionTimeZone | /** Get time zone from the given session config. */
private static ZoneId getSessionTimeZone(ReadableConfig sessionConfig) {
final String zone = sessionConfig.get(TableConfigOptions.LOCAL_TIME_ZONE);
return TableConfigOptions.LOCAL_TIME_ZONE.defaultValue().equals(zone)
? ZoneId.systemDefault()
: ZoneId.of(zone);
} | 3.68 |
flink_BinaryStringData_byteAt | /**
* Returns the {@code byte} value at the specified index. An index ranges from {@code 0} to
* {@code binarySection.sizeInBytes - 1}.
*
* @param index the index of the {@code byte} value.
* @return the {@code byte} value at the specified index of this UTF-8 bytes.
* @exception IndexOutOfBoundsException if the {@code index} argument is negative or not less
* than the length of this UTF-8 bytes.
*/
public byte byteAt(int index) {
ensureMaterialized();
int globalOffset = binarySection.offset + index;
int size = binarySection.segments[0].size();
if (globalOffset < size) {
return binarySection.segments[0].get(globalOffset);
} else {
return binarySection.segments[globalOffset / size].get(globalOffset % size);
}
} | 3.68 |
hadoop_ShadedProtobufHelper_ipc | /**
* Evaluate a protobuf call, converting any ServiceException to an IOException.
* @param call invocation to make
* @return the result of the call
* @param <T> type of the result
* @throws IOException any translated protobuf exception
*/
public static <T> T ipc(IpcCall<T> call) throws IOException {
try {
return call.call();
} catch (ServiceException e) {
throw getRemoteException(e);
}
} | 3.68 |
flink_AbstractRowTimeUnboundedPrecedingOver_processElement | /**
* Puts an element from the input stream into state if it is not late. Registers a timer for the
* next watermark.
*
* @param input The input value.
* @param ctx A {@link Context} that allows querying the timestamp of the element and getting
* TimerService for registering timers and querying the time. The context is only valid
* during the invocation of this method, do not store it.
* @param out The collector for returning result values.
* @throws Exception
*/
@Override
public void processElement(
RowData input,
KeyedProcessFunction<K, RowData, RowData>.Context ctx,
Collector<RowData> out)
throws Exception {
// register state-cleanup timer
registerProcessingCleanupTimer(ctx, ctx.timerService().currentProcessingTime());
long timestamp = input.getLong(rowTimeIdx);
long curWatermark = ctx.timerService().currentWatermark();
if (timestamp > curWatermark) {
// ensure every key just registers one timer
// default watermark is Long.Min, avoid overflow we use zero when watermark < 0
long triggerTs = curWatermark < 0 ? 0 : curWatermark + 1;
ctx.timerService().registerEventTimeTimer(triggerTs);
// put row into state
List<RowData> rowList = inputState.get(timestamp);
if (rowList == null) {
rowList = new ArrayList<RowData>();
}
rowList.add(input);
inputState.put(timestamp, rowList);
} else {
// discard late record
numLateRecordsDropped.inc();
}
} | 3.68 |
flink_ClusterEntryPointExceptionUtils_tryEnrichClusterEntryPointError | /**
* Tries to enrich the passed exception or its causes with additional information.
*
* <p>This method improves error messages for direct and metaspace {@link OutOfMemoryError}. It
* adds descriptions about possible causes and ways of resolution.
*
* @param root The Throwable of which the cause tree shall be traversed.
*/
public static void tryEnrichClusterEntryPointError(@Nullable Throwable root) {
tryEnrichOutOfMemoryError(
root,
JM_METASPACE_OOM_ERROR_MESSAGE,
JM_DIRECT_OOM_ERROR_MESSAGE,
JM_HEAP_SPACE_OOM_ERROR_MESSAGE);
} | 3.68 |
hbase_HRegionServer_createRegionServerStatusStub | /**
* Get the current master from ZooKeeper and open the RPC connection to it. To get a fresh
* connection, the current rssStub must be null. Method will block until a master is available.
* You can break from this block by requesting the server stop.
* @param refresh If true then master address will be read from ZK, otherwise use cached data
* @return master + port, or null if server has been stopped
*/
@InterfaceAudience.Private
protected synchronized ServerName createRegionServerStatusStub(boolean refresh) {
if (rssStub != null) {
return masterAddressTracker.getMasterAddress();
}
ServerName sn = null;
long previousLogTime = 0;
RegionServerStatusService.BlockingInterface intRssStub = null;
LockService.BlockingInterface intLockStub = null;
boolean interrupted = false;
try {
while (keepLooping()) {
sn = this.masterAddressTracker.getMasterAddress(refresh);
if (sn == null) {
if (!keepLooping()) {
// give up with no connection.
LOG.debug("No master found and cluster is stopped; bailing out");
return null;
}
if (EnvironmentEdgeManager.currentTime() > (previousLogTime + 1000)) {
LOG.debug("No master found; retry");
previousLogTime = EnvironmentEdgeManager.currentTime();
}
refresh = true; // let's try pull it from ZK directly
if (sleepInterrupted(200)) {
interrupted = true;
}
continue;
}
try {
BlockingRpcChannel channel = this.rpcClient.createBlockingRpcChannel(sn,
userProvider.getCurrent(), shortOperationTimeout);
intRssStub = RegionServerStatusService.newBlockingStub(channel);
intLockStub = LockService.newBlockingStub(channel);
break;
} catch (IOException e) {
if (EnvironmentEdgeManager.currentTime() > (previousLogTime + 1000)) {
e = e instanceof RemoteException ? ((RemoteException) e).unwrapRemoteException() : e;
if (e instanceof ServerNotRunningYetException) {
LOG.info("Master isn't available yet, retrying");
} else {
LOG.warn("Unable to connect to master. Retrying. Error was:", e);
}
previousLogTime = EnvironmentEdgeManager.currentTime();
}
if (sleepInterrupted(200)) {
interrupted = true;
}
}
}
} finally {
if (interrupted) {
Thread.currentThread().interrupt();
}
}
this.rssStub = intRssStub;
this.lockStub = intLockStub;
return sn;
} | 3.68 |
flink_HashPartition_spillPartition | /**
* Spills this partition to disk and sets it up such that it continues spilling records that are
* added to it. The spilling process must free at least one buffer, either in the partition's
* record buffers, or in the memory segments for overflow buckets. The partition immediately
* takes back one buffer to use it for further spilling.
*
* @param target The list to which memory segments from overflow buckets are added.
* @param ioAccess The I/O manager to be used to create a writer to disk.
* @param targetChannel The id of the target channel for this partition.
* @return The number of buffers that were freed by spilling this partition.
* @throws IOException Thrown, if the writing failed.
*/
public int spillPartition(
List<MemorySegment> target,
IOManager ioAccess,
FileIOChannel.ID targetChannel,
LinkedBlockingQueue<MemorySegment> bufferReturnQueue)
throws IOException {
// sanity checks
if (!isInMemory()) {
throw new RuntimeException(
"Bug in Hybrid Hash Join: "
+ "Request to spill a partition that has already been spilled.");
}
if (getNumOccupiedMemorySegments() < 2) {
throw new RuntimeException(
"Bug in Hybrid Hash Join: "
+ "Request to spill a partition with less than two buffers.");
}
// return the memory from the overflow segments
for (int i = 0; i < this.numOverflowSegments; i++) {
target.add(this.overflowSegments[i]);
}
this.overflowSegments = null;
this.numOverflowSegments = 0;
this.nextOverflowBucket = 0;
// create the channel block writer and spill the current buffers
// that keep the build side buffers current block, as it is most likely not full, yet
// we return the number of blocks that become available
this.buildSideChannel = ioAccess.createBlockChannelWriter(targetChannel, bufferReturnQueue);
return this.buildSideWriteBuffer.spill(this.buildSideChannel);
} | 3.68 |
flink_SlideWithSize_every | /**
* Specifies the window's slide as time or row-count interval.
*
* <p>The slide determines the interval in which windows are started. Hence, sliding windows can
* overlap if the slide is smaller than the size of the window.
*
* <p>For example, you could have windows of size 15 minutes that slide by 3 minutes. With this
* 15 minutes worth of elements are grouped every 3 minutes and each row contributes to 5
* windows.
*
* @param slide the slide of the window either as time or row-count interval.
* @return a sliding window
*/
public SlideWithSizeAndSlide every(Expression slide) {
return new SlideWithSizeAndSlide(size, slide);
} | 3.68 |
hbase_OrderedFloat64_decodeDouble | /**
* Read a {@code double} value from the buffer {@code src}.
* @param src the {@link PositionedByteRange} to read the {@code double} from
* @return the {@code double} floating-point value with the same bit pattern
*/
public double decodeDouble(PositionedByteRange src) {
return OrderedBytes.decodeFloat64(src);
} | 3.68 |
hadoop_NameValuePair_getName | /**
* Get the name.
* @return The name.
*/
public String getName() {
return name;
} | 3.68 |
hbase_StoreFileWriter_getUniqueFile | /**
* @param dir Directory to create file in.
* @return random filename inside passed <code>dir</code>
*/
public static Path getUniqueFile(final FileSystem fs, final Path dir) throws IOException {
if (!fs.getFileStatus(dir).isDirectory()) {
throw new IOException("Expecting " + dir.toString() + " to be a directory");
}
return new Path(dir, dash.matcher(UUID.randomUUID().toString()).replaceAll(""));
} | 3.68 |
hbase_ProcedureStoreTracker_setDeletedIfModifiedInBoth | /**
* Similar with {@link #setDeletedIfModified(long...)}, but here the {@code procId} are given by
* the {@code tracker}. If a procedure is modified by us, and also by the given {@code tracker},
* then we mark it as deleted.
* @see #setDeletedIfModified(long...)
*/
public void setDeletedIfModifiedInBoth(ProcedureStoreTracker tracker) {
setDeleteIf(tracker, (node, procId) -> node != null && node.isModified(procId));
} | 3.68 |
cron-utils_CronDefinitionBuilder_matchDayOfWeekAndDayOfMonth | /**
* Sets matchDayOfWeekAndDayOfMonth value to true.
*
* @return this CronDefinitionBuilder instance
*/
public CronDefinitionBuilder matchDayOfWeekAndDayOfMonth() {
matchDayOfWeekAndDayOfMonth = true;
return this;
} | 3.68 |
hadoop_DiskBalancerWorkStatus_parseJson | /**
* Returns a DiskBalancerWorkStatus object from the Json .
* @param json - json String
* @return DiskBalancerWorkStatus
* @throws IOException
*/
public static DiskBalancerWorkStatus parseJson(String json) throws
IOException {
return READER_WORKSTATUS.readValue(json);
} | 3.68 |
morf_AbstractSqlDialectTest_testSelectWithNestedConcatenations | /**
* Tests concatenation in a select with nested concatenations.
*/
@Test
public void testSelectWithNestedConcatenations() {
SelectStatement stmt = new SelectStatement(new ConcatenatedField(new FieldReference("field1"), new ConcatenatedField(
new FieldReference("field2"), new FieldLiteral("XYZ"))).as("test")).from(new TableReference("schedule"));
String result = testDialect.convertStatementToSQL(stmt);
assertEquals("Select script should match expected", expectedNestedConcatenations(), result);
} | 3.68 |
graphhopper_LandmarkStorage__getInternalDA | /**
* For testing only
*/
DataAccess _getInternalDA() {
return landmarkWeightDA;
} | 3.68 |
flink_TypeInferenceUtil_createInvalidInputException | /** Returns an exception for invalid input arguments. */
public static ValidationException createInvalidInputException(
TypeInference typeInference, CallContext callContext, ValidationException cause) {
return new ValidationException(
String.format(
"Invalid input arguments. Expected signatures are:\n%s",
generateSignature(
typeInference,
callContext.getName(),
callContext.getFunctionDefinition())),
cause);
} | 3.68 |
framework_TreeAction_execute | /**
* Sends message to server that this action has been fired. Messages are
* "standard" Vaadin messages whose value is comma separated pair of
* targetKey (row, treeNod ...) and actions id.
*
* Variablename is always "action".
*
* Actions are always sent immediatedly to server.
*/
@Override
public void execute() {
owner.getClient().updateVariable(owner.getPaintableId(), "action",
targetKey + "," + actionKey, true);
owner.getClient().getContextMenu().hide();
} | 3.68 |
framework_LayoutManager_setNeedsVerticalLayout | /**
* Marks that a ManagedLayout should be layouted vertically in the next
* layout phase even if none of the elements managed by the layout have been
* resized vertically.
* <p>
* For SimpleManagedLayout which is always layouted in both directions, this
* has the same effect as {@link #setNeedsLayout(ManagedLayout)}.
* <p>
* This method should not be invoked during a layout phase since it only
* controls what will happen in the beginning of the next phase. If you want
* to explicitly cause some layout to be considered in an ongoing layout
* phase, you should use {@link #setNeedsMeasure(ComponentConnector)}
* instead.
*
* @param layout
* the managed layout that should be layouted
*/
public final void setNeedsVerticalLayout(ManagedLayout layout) {
if (isLayoutRunning()) {
getLogger().warning(
"setNeedsVerticalLayout should not be run while a layout phase is in progress.");
}
needsVerticalLayout.add(layout.getConnectorId());
} | 3.68 |
hbase_AvlUtil_readPrev | /**
* Return the predecessor of the current node
* @param node the current node
* @return the predecessor of the current node
*/
public static <TNode extends AvlLinkedNode> TNode readPrev(TNode node) {
return (TNode) node.iterPrev;
} | 3.68 |
open-banking-gateway_ProcessResultEventHandler_handleEvent | /**
* Spring event-bus listener to listen for BPMN process result.
*
* @param result BPMN process message to notify with the subscribers.
*/
@TransactionalEventListener
public void handleEvent(InternalProcessResult result) {
Consumer<InternalProcessResult> consumer;
synchronized (lock) {
InternalProcessResult handledResult = result;
if (handledResult instanceof ProcessError) {
handledResult = replaceErrorProcessIdWithParentProcessIdIfNeeded((ProcessError) handledResult);
}
consumer = subscribers.remove(handledResult.getProcessId());
if (null == consumer) {
deadLetterQueue.put(handledResult.getProcessId(), result);
return;
}
}
consumer.accept(result);
} | 3.68 |
framework_BasicEventResizeHandler_eventResize | /*
* (non-Javadoc)
*
* @see
* com.vaadin.addon.calendar.ui.CalendarComponentEvents.EventResizeHandler
* #eventResize
* (com.vaadin.addon.calendar.ui.CalendarComponentEvents.EventResize)
*/
@Override
public void eventResize(EventResize event) {
CalendarEvent calendarEvent = event.getCalendarEvent();
if (calendarEvent instanceof EditableCalendarEvent) {
Date newStartTime = event.getNewStart();
Date newEndTime = event.getNewEnd();
EditableCalendarEvent editableEvent = (EditableCalendarEvent) calendarEvent;
setDates(editableEvent, newStartTime, newEndTime);
}
} | 3.68 |
flink_HiveCatalog_getOrderedFullPartitionValues | /**
* Get a list of ordered partition values by re-arranging them based on the given list of
* partition keys. If the partition value is null, it'll be converted into default partition
* name.
*
* @param partitionSpec a partition spec.
* @param partitionKeys a list of partition keys.
* @param tablePath path of the table to which the partition belongs.
* @return A list of partition values ordered according to partitionKeys.
* @throws PartitionSpecInvalidException thrown if partitionSpec and partitionKeys have
* different sizes, or any key in partitionKeys doesn't exist in partitionSpec.
*/
private List<String> getOrderedFullPartitionValues(
CatalogPartitionSpec partitionSpec, List<String> partitionKeys, ObjectPath tablePath)
throws PartitionSpecInvalidException {
Map<String, String> spec = partitionSpec.getPartitionSpec();
if (spec.size() != partitionKeys.size()) {
throw new PartitionSpecInvalidException(
getName(), partitionKeys, tablePath, partitionSpec);
}
List<String> values = new ArrayList<>(spec.size());
for (String key : partitionKeys) {
if (!spec.containsKey(key)) {
throw new PartitionSpecInvalidException(
getName(), partitionKeys, tablePath, partitionSpec);
} else {
String value = spec.get(key);
if (value == null) {
value = getHiveConf().getVar(HiveConf.ConfVars.DEFAULTPARTITIONNAME);
}
values.add(value);
}
}
return values;
} | 3.68 |
framework_VaadinService_setClassLoader | /**
* Sets the class loader to use for loading classes loaded by name, e.g.
* custom UI classes. Invokers of this method should be careful to not break
* any existing class loader hierarchy, e.g. by ensuring that a class loader
* set for this service delegates to the previously set class loader if the
* class is not found.
*
* @param classLoader
* the new class loader to set, not <code>null</code>.
*
* @see #getClassLoader()
*/
public void setClassLoader(ClassLoader classLoader) {
if (classLoader == null) {
throw new IllegalArgumentException(
"Can not set class loader to null");
}
this.classLoader = classLoader;
} | 3.68 |
hadoop_AbfsRestOperation_createNewTracingContext | /**
* Creates a new Tracing context before entering the retry loop of a rest operation.
* This will ensure all rest operations have unique
* tracing context that will be used for all the retries.
* @param tracingContext original tracingContext.
* @return tracingContext new tracingContext object created from original one.
*/
@VisibleForTesting
public TracingContext createNewTracingContext(final TracingContext tracingContext) {
return new TracingContext(tracingContext);
} | 3.68 |
hmily_XidImpl_newBranchId | /**
* New branch id x id.
*
* @return the x id
*/
public XidImpl newBranchId() {
return new XidImpl(this);
} | 3.68 |
hudi_CompactionUtil_rollbackEarliestCompaction | /**
* Rolls back the earliest compaction if there exists.
*
* <p>Makes the strategy not that radical: firstly check whether there exists inflight compaction instants,
* rolls back the first inflight instant only if it has timed out. That means, if there are
* multiple timed out instants on the timeline, we only roll back the first one at a time.
*/
public static void rollbackEarliestCompaction(HoodieFlinkTable<?> table, Configuration conf) {
Option<HoodieInstant> earliestInflight = table.getActiveTimeline()
.filterPendingCompactionTimeline()
.filter(instant ->
instant.getState() == HoodieInstant.State.INFLIGHT).firstInstant();
if (earliestInflight.isPresent()) {
HoodieInstant instant = earliestInflight.get();
String currentTime = table.getMetaClient().createNewInstantTime();
int timeout = conf.getInteger(FlinkOptions.COMPACTION_TIMEOUT_SECONDS);
if (StreamerUtil.instantTimeDiffSeconds(currentTime, instant.getTimestamp()) >= timeout) {
LOG.info("Rollback the inflight compaction instant: " + instant + " for timeout(" + timeout + "s)");
table.rollbackInflightCompaction(instant);
table.getMetaClient().reloadActiveTimeline();
}
}
} | 3.68 |
framework_TooltipOnRequiredIndicator_setup | /*
* (non-Javadoc)
*
* @see com.vaadin.tests.components.AbstractTestUI#setup(com.vaadin.server.
* VaadinRequest)
*/
@Override
protected void setup(VaadinRequest request) {
VerticalLayout layout = new VerticalLayout();
TextField inVertical = new TextField();
inVertical.setRequired(true);
inVertical.setRequiredError("Vertical layout tooltip");
inVertical.setCaption("Vertical layout caption");
inVertical.setId("verticalField");
layout.addComponent(inVertical);
addComponent(layout);
HorizontalLayout horizontalLayout = new HorizontalLayout();
TextField inHorizontal = new TextField();
inHorizontal.setRequired(true);
inHorizontal.setRequiredError("Horizontal layout tooltip");
inHorizontal.setCaption("Horizontal layout caption");
inHorizontal.setId("horizontalField");
horizontalLayout.addComponent(inHorizontal);
layout.addComponent(horizontalLayout);
} | 3.68 |
pulsar_ResourceUnitRanking_removeLoadedServiceUnit | /**
* Remove a service unit from the loaded bundle list.
*/
public void removeLoadedServiceUnit(String suName, ResourceQuota quota) {
if (this.loadedBundles.remove(suName)) {
this.allocatedQuota.substract(quota);
estimateLoadPercentage();
}
} | 3.68 |
framework_VaadinSession_getBrowser | /**
* Get the web browser associated with this session.
*
* @return the web browser object
*
* @deprecated As of 7.0, use {@link Page#getWebBrowser()} instead.
*/
@Deprecated
public WebBrowser getBrowser() {
assert hasLock();
return browser;
} | 3.68 |
hadoop_CommitContext_getOuterSubmitter | /**
* Return a submitter.
* If created with 0 threads, this returns null so
* TaskPool knows to run it in the current thread.
* @return a submitter or null
*/
public synchronized TaskPool.Submitter getOuterSubmitter() {
return outerSubmitter;
} | 3.68 |
hadoop_ManifestSuccessData_getCommitter | /**
* @return committer name.
*/
public String getCommitter() {
return committer;
} | 3.68 |
hadoop_Validate_checkPositiveInteger | /**
* Validates that the given integer argument is not zero or negative.
* @param value the argument value to validate
* @param argName the name of the argument being validated.
*/
public static void checkPositiveInteger(long value, String argName) {
checkArgument(value > 0, "'%s' must be a positive integer.", argName);
} | 3.68 |
pulsar_MetadataStore_getMetadataCache | /**
* Create a metadata cache that uses a particular serde object.
*
* @param <T>
* @param serde
* the custom serialization/deserialization object
* @return the metadata cache object
*/
default <T> MetadataCache<T> getMetadataCache(MetadataSerde<T> serde) {
return getMetadataCache(serde, getDefaultMetadataCacheConfig());
} | 3.68 |
framework_AbstractField_setReadOnly | /**
* {@inheritDoc}
* <p>
* The server ignores (potentially forged) value change requests from the
* client to fields that are read-only. Programmatically changing the field
* value via {@link #setValue(T)} is still possible.
* <p>
* The read-only mode is distinct from the
* {@linkplain Component#setEnabled(boolean) disabled} state. When disabled,
* a component cannot be interacted with at all, and its content should be
* considered irrelevant or not applicable. In contrast, the user should
* still be able to read the content and otherwise interact with a read-only
* field even though changing the value is disallowed.
*
* @param readOnly
* {@code true} to set read-only mode, {@code false} otherwise.
*/
@Override
public void setReadOnly(boolean readOnly) {
super.setReadOnly(readOnly);
} | 3.68 |
flink_Schema_fromResolvedSchema | /** Adopts all members from the given resolved schema. */
public Builder fromResolvedSchema(ResolvedSchema resolvedSchema) {
addResolvedColumns(resolvedSchema.getColumns());
addResolvedWatermarkSpec(resolvedSchema.getWatermarkSpecs());
resolvedSchema.getPrimaryKey().ifPresent(this::addResolvedConstraint);
return this;
} | 3.68 |
flink_EncodingUtils_repeat | /**
* Returns padding using the specified delimiter repeated to a given length.
*
* <pre>
* StringUtils.repeat('e', 0) = ""
* StringUtils.repeat('e', 3) = "eee"
* StringUtils.repeat('e', -2) = ""
* </pre>
*
* <p>Note: this method doesn't not support padding with <a
* href="http://www.unicode.org/glossary/#supplementary_character">Unicode Supplementary
* Characters</a> as they require a pair of {@code char}s to be represented. If you are needing
* to support full I18N of your applications consider using {@link #repeat(String, int)}
* instead.
*
* @param ch character to repeat
* @param repeat number of times to repeat char, negative treated as zero
* @return String with repeated character
* @see #repeat(String, int)
*/
public static String repeat(final char ch, final int repeat) {
final char[] buf = new char[repeat];
for (int i = repeat - 1; i >= 0; i--) {
buf[i] = ch;
}
return new String(buf);
} | 3.68 |
hadoop_Check_validIdentifier | /**
* Verifies a value is a valid identifier,
* <code>[a-zA-Z_][a-zA-Z0-9_\-]*</code>, up to a maximum length.
*
* @param value string to check if it is a valid identifier.
* @param maxLen maximun length.
* @param name the name to use in the exception message.
*
* @return the value.
*
* @throws IllegalArgumentException if the string is not a valid identifier.
*/
public static String validIdentifier(String value, int maxLen, String name) {
Check.notEmpty(value, name);
if (value.length() > maxLen) {
throw new IllegalArgumentException(
MessageFormat.format("[{0}] = [{1}] exceeds max len [{2}]", name, value, maxLen));
}
if (!IDENTIFIER_PATTERN.matcher(value).find()) {
throw new IllegalArgumentException(
MessageFormat.format("[{0}] = [{1}] must be \"{2}\"", name, value, IDENTIFIER_PATTERN_STR));
}
return value;
} | 3.68 |
hbase_AsyncAdmin_balance | /**
* Invoke the balancer. Will run the balancer and if regions to move, it will go ahead and do the
* reassignments. If there is region in transition, force parameter of true would still run
* balancer. Can *not* run for other reasons. Check logs.
* @param forcible whether we should force balance even if there is region in transition.
* @return True if balancer ran, false otherwise. The return value will be wrapped by a
* {@link CompletableFuture}.
* @deprecated Since 2.5.0. Will be removed in 4.0.0. Use {@link #balance(BalanceRequest)}
* instead.
*/
default CompletableFuture<Boolean> balance(boolean forcible) {
return balance(BalanceRequest.newBuilder().setIgnoreRegionsInTransition(forcible).build())
.thenApply(BalanceResponse::isBalancerRan);
} | 3.68 |
shardingsphere-elasticjob_GuaranteeService_executeInLeaderForLastCompleted | /**
* Invoke doAfterJobExecutedAtLastCompleted method once after last completed.
*
* @param listener AbstractDistributeOnceElasticJobListener instance
* @param shardingContexts sharding contexts
*/
public void executeInLeaderForLastCompleted(final AbstractDistributeOnceElasticJobListener listener,
final ShardingContexts shardingContexts) {
jobNodeStorage.executeInLeader(GuaranteeNode.COMPLETED_LATCH_ROOT,
new LeaderExecutionCallbackForLastCompleted(listener, shardingContexts));
} | 3.68 |
flink_PythonEnvUtils_getLibFiles | /**
* Gets pyflink dependent libs in specified directory.
*
* @param libDir The lib directory
*/
private static List<java.nio.file.Path> getLibFiles(String libDir) {
final List<java.nio.file.Path> libFiles = new ArrayList<>();
SimpleFileVisitor<java.nio.file.Path> finder =
new SimpleFileVisitor<java.nio.file.Path>() {
@Override
public FileVisitResult visitFile(
java.nio.file.Path file, BasicFileAttributes attrs) throws IOException {
// only include zip file
if (file.toString().endsWith(".zip")) {
libFiles.add(file);
}
return FileVisitResult.CONTINUE;
}
};
try {
Files.walkFileTree(FileSystems.getDefault().getPath(libDir), finder);
} catch (Throwable t) {
// ignore, this may occur when executing `flink run` using the PyFlink Python package.
}
return libFiles;
} | 3.68 |
querydsl_StringExpression_startsWithIgnoreCase | /**
* Create a {@code this.startsWithIgnoreCase(str)} expression
*
* @param str string
* @return this.startsWithIgnoreCase(str)
*/
public BooleanExpression startsWithIgnoreCase(String str) {
return startsWithIgnoreCase(ConstantImpl.create(str));
} | 3.68 |
framework_DataCommunicator_removeDataGenerator | /**
* Removes a data generator from this data communicator. If there is no such
* data generator, does nothing.
*
* @param generator
* the data generator to remove, not null
*/
public void removeDataGenerator(DataGenerator<T> generator) {
Objects.requireNonNull(generator, "generator cannot be null");
generators.remove(generator);
} | 3.68 |
querydsl_JDOExpressions_selectFrom | /**
* Create a new detached {@link JDOQuery} instance with the given projection
*
* @param expr projection and source
* @param <T>
* @return select(expr).from(expr)
*/
public static <T> JDOQuery<T> selectFrom(EntityPath<T> expr) {
return select(expr).from(expr);
} | 3.68 |
hadoop_StoreContext_getOwner | /**
* Get the owner of the filesystem.
* @return the user who created this filesystem.
*/
public UserGroupInformation getOwner() {
return owner;
} | 3.68 |
framework_Form_setReadOnly | /**
* Sets the component's to read-only mode to the specified state.
*
* @see Component#setReadOnly(boolean)
*/
@Override
public void setReadOnly(boolean readOnly) {
super.setReadOnly(readOnly);
for (final Object id : propertyIds) {
fields.get(id).setReadOnly(readOnly);
}
} | 3.68 |
hbase_MasterObserver_preTruncateTable | /**
* Called before {@link org.apache.hadoop.hbase.master.HMaster} truncates a table. Called as part
* of truncate table RPC call.
* @param ctx the environment to interact with the framework and master
* @param tableName the name of the table
*/
default void preTruncateTable(final ObserverContext<MasterCoprocessorEnvironment> ctx,
TableName tableName) throws IOException {
} | 3.68 |
hadoop_InstrumentedReadLock_startLockTiming | /**
* Starts timing for the instrumented read lock.
* It records the time to ThreadLocal.
*/
@Override
protected void startLockTiming() {
if (readWriteLock.getReadHoldCount() == 1) {
readLockHeldTimeStamp.set(getTimer().monotonicNow());
}
} | 3.68 |
flink_TypeExtractionUtils_checkAndExtractLambda | /**
* Checks if the given function has been implemented using a Java 8 lambda. If yes, a
* LambdaExecutable is returned describing the method/constructor. Otherwise null.
*
* @throws TypeExtractionException lambda extraction is pretty hacky, it might fail for unknown
* JVM issues.
*/
public static LambdaExecutable checkAndExtractLambda(Function function)
throws TypeExtractionException {
try {
// get serialized lambda
SerializedLambda serializedLambda = null;
for (Class<?> clazz = function.getClass();
clazz != null;
clazz = clazz.getSuperclass()) {
try {
Method replaceMethod = clazz.getDeclaredMethod("writeReplace");
replaceMethod.setAccessible(true);
Object serialVersion = replaceMethod.invoke(function);
// check if class is a lambda function
if (serialVersion != null
&& serialVersion.getClass() == SerializedLambda.class) {
serializedLambda = (SerializedLambda) serialVersion;
break;
}
} catch (NoSuchMethodException e) {
// thrown if the method is not there. fall through the loop
}
}
// not a lambda method -> return null
if (serializedLambda == null) {
return null;
}
// find lambda method
String className = serializedLambda.getImplClass();
String methodName = serializedLambda.getImplMethodName();
String methodSig = serializedLambda.getImplMethodSignature();
Class<?> implClass =
Class.forName(
className.replace('/', '.'),
true,
Thread.currentThread().getContextClassLoader());
// find constructor
if (methodName.equals("<init>")) {
Constructor<?>[] constructors = implClass.getDeclaredConstructors();
for (Constructor<?> constructor : constructors) {
if (getConstructorDescriptor(constructor).equals(methodSig)) {
return new LambdaExecutable(constructor);
}
}
}
// find method
else {
List<Method> methods = getAllDeclaredMethods(implClass);
for (Method method : methods) {
if (method.getName().equals(methodName)
&& getMethodDescriptor(method).equals(methodSig)) {
return new LambdaExecutable(method);
}
}
}
throw new TypeExtractionException("No lambda method found.");
} catch (Exception e) {
throw new TypeExtractionException(
"Could not extract lambda method out of function: "
+ e.getClass().getSimpleName()
+ " - "
+ e.getMessage(),
e);
}
} | 3.68 |
flink_TemporalTableJoinUtil_isEventTimeTemporalJoin | /**
* Check if the given join condition is an initial temporal join condition or a rewrote join
* condition on event time.
*/
public static boolean isEventTimeTemporalJoin(@Nonnull RexNode joinCondition) {
RexVisitor<Void> temporalConditionFinder =
new RexVisitorImpl<Void>(true) {
@Override
public Void visitCall(RexCall call) {
if ((call.getOperator()
== TemporalJoinUtil
.INITIAL_TEMPORAL_JOIN_CONDITION()
&& TemporalJoinUtil.isInitialRowTimeTemporalTableJoin(call))
|| isRowTimeTemporalTableJoinCondition(call)) {
// has initial temporal join condition or
throw new Util.FoundOne(call);
}
return super.visitCall(call);
}
};
try {
joinCondition.accept(temporalConditionFinder);
} catch (Util.FoundOne found) {
return true;
}
return false;
} | 3.68 |
hudi_QuickstartUtils_generateUniqueUpdates | /**
* Generates new updates, one for each of the keys above
* list
*
* @param n Number of updates (must be no more than number of existing keys)
* @return list of hoodie record updates
*/
public List<HoodieRecord> generateUniqueUpdates(Integer n) {
if (numExistingKeys < n) {
throw new HoodieException("Data must have been written before performing the update operation");
}
List<Integer> keys = IntStream.range(0, numExistingKeys).boxed()
.collect(Collectors.toCollection(ArrayList::new));
Collections.shuffle(keys);
String randomString = generateRandomString();
return IntStream.range(0, n).boxed().map(x -> {
try {
return generateUpdateRecord(existingKeys.get(keys.get(x)), randomString);
} catch (IOException e) {
throw new HoodieIOException(e.getMessage(), e);
}
}).collect(Collectors.toList());
} | 3.68 |
hadoop_LocatedFileStatus_setBlockLocations | /**
* Hook for subclasses to lazily set block locations. The {@link #locations}
* field should be null before this is called.
* @param locations Block locations for this instance.
*/
protected void setBlockLocations(BlockLocation[] locations) {
this.locations = locations;
} | 3.68 |
framework_VComboBox_onKeyUp | /**
* Triggered when a key was depressed.
*
* @param event
* The KeyUpEvent of the key depressed
*/
@Override
public void onKeyUp(KeyUpEvent event) {
if (enableDebug) {
debug("VComboBox: onKeyUp(" + event.getNativeKeyCode() + ")");
}
if (enabled && !readonly) {
switch (event.getNativeKeyCode()) {
case KeyCodes.KEY_ENTER:
case KeyCodes.KEY_TAB:
case KeyCodes.KEY_SHIFT:
case KeyCodes.KEY_CTRL:
case KeyCodes.KEY_ALT:
case KeyCodes.KEY_DOWN:
case KeyCodes.KEY_UP:
case KeyCodes.KEY_RIGHT:
case KeyCodes.KEY_LEFT:
case KeyCodes.KEY_PAGEDOWN:
case KeyCodes.KEY_PAGEUP:
case KeyCodes.KEY_ESCAPE:
case KeyCodes.KEY_HOME:
case KeyCodes.KEY_END:
// NOP
break;
default:
if (textInputEnabled && !noKeyDownEvents) {
// when filtering, we always want to see the results on the
// first page first.
filterOptions(0);
}
break;
}
}
} | 3.68 |
hadoop_MappingRuleResult_getQueue | /**
* This method returns the result queue. Currently only makes sense when
* result == PLACE.
* @return the queue this result is about
*/
public String getQueue() {
return queue;
} | 3.68 |
hadoop_OBSFileSystem_getCannedACL | /**
* Get the bucket acl of user setting.
*
* @return bucket acl {@link AccessControlList}
*/
AccessControlList getCannedACL() {
return cannedACL;
} | 3.68 |
flink_SourceCoordinatorSerdeUtils_readAndVerifyCoordinatorSerdeVersion | /** Read and verify the serde version. */
static int readAndVerifyCoordinatorSerdeVersion(DataInputStream in) throws IOException {
int version = in.readInt();
if (version > CURRENT_VERSION) {
throw new IOException("Unsupported source coordinator serde version " + version);
}
return version;
} | 3.68 |
morf_BaseDataSetReader_availableStreamNames | /**
* @see org.alfasoftware.morf.xml.XmlStreamProvider.XmlInputStreamProvider#availableStreamNames()
*/
@Override
public Collection<String> availableStreamNames() {
return Lists.newArrayList(tableNames); // return a copy, as we are about to clear the list
} | 3.68 |
hadoop_DefaultOBSClientFactory_initConnectionSettings | /**
* Initializes all OBS SDK settings related to connection management.
*
* @param conf Hadoop configuration
* @param obsConf OBS SDK configuration
*/
@SuppressWarnings("deprecation")
private static void initConnectionSettings(final Configuration conf,
final ExtObsConfiguration obsConf) {
obsConf.setMaxConnections(
OBSCommonUtils.intOption(conf, OBSConstants.MAXIMUM_CONNECTIONS,
OBSConstants.DEFAULT_MAXIMUM_CONNECTIONS,
1));
boolean secureConnections = conf.getBoolean(
OBSConstants.SECURE_CONNECTIONS,
OBSConstants.DEFAULT_SECURE_CONNECTIONS);
obsConf.setHttpsOnly(secureConnections);
obsConf.setMaxErrorRetry(
OBSCommonUtils.intOption(conf, OBSConstants.MAX_ERROR_RETRIES,
OBSConstants.DEFAULT_MAX_ERROR_RETRIES, 0));
obsConf.setConnectionTimeout(
OBSCommonUtils.intOption(conf, OBSConstants.ESTABLISH_TIMEOUT,
OBSConstants.DEFAULT_ESTABLISH_TIMEOUT, 0));
obsConf.setSocketTimeout(
OBSCommonUtils.intOption(conf, OBSConstants.SOCKET_TIMEOUT,
OBSConstants.DEFAULT_SOCKET_TIMEOUT, 0));
obsConf.setIdleConnectionTime(
OBSCommonUtils.intOption(conf, OBSConstants.IDLE_CONNECTION_TIME,
OBSConstants.DEFAULT_IDLE_CONNECTION_TIME,
1));
obsConf.setMaxIdleConnections(
OBSCommonUtils.intOption(conf, OBSConstants.MAX_IDLE_CONNECTIONS,
OBSConstants.DEFAULT_MAX_IDLE_CONNECTIONS,
1));
obsConf.setReadBufferSize(
OBSCommonUtils.intOption(conf, OBSConstants.READ_BUFFER_SIZE,
OBSConstants.DEFAULT_READ_BUFFER_SIZE,
-1)); // to be
// modified
obsConf.setWriteBufferSize(
OBSCommonUtils.intOption(conf, OBSConstants.WRITE_BUFFER_SIZE,
OBSConstants.DEFAULT_WRITE_BUFFER_SIZE,
-1)); // to be
// modified
obsConf.setUploadStreamRetryBufferSize(
OBSCommonUtils.intOption(conf,
OBSConstants.UPLOAD_STREAM_RETRY_SIZE,
OBSConstants.DEFAULT_UPLOAD_STREAM_RETRY_SIZE, 1));
obsConf.setSocketReadBufferSize(
OBSCommonUtils.intOption(conf, OBSConstants.SOCKET_RECV_BUFFER,
OBSConstants.DEFAULT_SOCKET_RECV_BUFFER, -1));
obsConf.setSocketWriteBufferSize(
OBSCommonUtils.intOption(conf, OBSConstants.SOCKET_SEND_BUFFER,
OBSConstants.DEFAULT_SOCKET_SEND_BUFFER, -1));
obsConf.setKeepAlive(conf.getBoolean(OBSConstants.KEEP_ALIVE,
OBSConstants.DEFAULT_KEEP_ALIVE));
obsConf.setValidateCertificate(
conf.getBoolean(OBSConstants.VALIDATE_CERTIFICATE,
OBSConstants.DEFAULT_VALIDATE_CERTIFICATE));
obsConf.setVerifyResponseContentType(
conf.getBoolean(OBSConstants.VERIFY_RESPONSE_CONTENT_TYPE,
OBSConstants.DEFAULT_VERIFY_RESPONSE_CONTENT_TYPE));
obsConf.setCname(
conf.getBoolean(OBSConstants.CNAME, OBSConstants.DEFAULT_CNAME));
obsConf.setIsStrictHostnameVerification(
conf.getBoolean(OBSConstants.STRICT_HOSTNAME_VERIFICATION,
OBSConstants.DEFAULT_STRICT_HOSTNAME_VERIFICATION));
// sdk auth type negotiation enable
obsConf.setAuthTypeNegotiation(
conf.getBoolean(OBSConstants.SDK_AUTH_TYPE_NEGOTIATION_ENABLE,
OBSConstants.DEFAULT_SDK_AUTH_TYPE_NEGOTIATION_ENABLE));
// set SDK AUTH TYPE to OBS when auth type negotiation unenabled
if (!obsConf.isAuthTypeNegotiation()) {
obsConf.setAuthType(AuthTypeEnum.OBS);
}
// okhttp retryOnConnectionFailure switch, default set to true
obsConf.retryOnConnectionFailureInOkhttp(
conf.getBoolean(OBSConstants.SDK_RETRY_ON_CONNECTION_FAILURE_ENABLE,
OBSConstants.DEFAULT_SDK_RETRY_ON_CONNECTION_FAILURE_ENABLE));
// sdk max retry times on unexpected end of stream exception,
// default: -1 don't retry
int retryTime = conf.getInt(
OBSConstants.SDK_RETRY_TIMES_ON_UNEXPECTED_END_EXCEPTION,
OBSConstants.DEFAULT_SDK_RETRY_TIMES_ON_UNEXPECTED_END_EXCEPTION);
if (retryTime > 0
&& retryTime < OBSConstants.DEFAULT_MAX_SDK_CONNECTION_RETRY_TIMES
|| !obsConf.isRetryOnConnectionFailureInOkhttp() && retryTime < 0) {
retryTime = OBSConstants.DEFAULT_MAX_SDK_CONNECTION_RETRY_TIMES;
}
obsConf.setMaxRetryOnUnexpectedEndException(retryTime);
} | 3.68 |
hadoop_ManifestSuccessData_getTimestamp | /** @return timestamp of creation. */
public long getTimestamp() {
return timestamp;
} | 3.68 |
dubbo_HttpHeaderUtil_isRestAttachHeader | /**
* for judge rest header or rest attachment
*
* @param header
* @return
*/
public static boolean isRestAttachHeader(String header) {
if (StringUtils.isEmpty(header) || !header.startsWith(RestHeaderEnum.REST_HEADER_PREFIX.getHeader())) {
return false;
}
return true;
} | 3.68 |
hudi_HDFSParquetImporter_load | /**
* Imports records to Hoodie table.
*
* @param client Hoodie Client
* @param instantTime Instant Time
* @param hoodieRecords Hoodie Records
* @param <T> Type
*/
protected <T extends HoodieRecordPayload> JavaRDD<WriteStatus> load(SparkRDDWriteClient<T> client, String instantTime,
JavaRDD<HoodieRecord<T>> hoodieRecords) {
switch (cfg.command.toLowerCase()) {
case "upsert": {
return client.upsert(hoodieRecords, instantTime);
}
case "bulkinsert": {
return client.bulkInsert(hoodieRecords, instantTime);
}
default: {
return client.insert(hoodieRecords, instantTime);
}
}
} | 3.68 |
hbase_FilterBase_hasFilterRow | /**
* Filters that never filter by modifying the returned List of Cells can inherit this
* implementation that does nothing. {@inheritDoc}
*/
@Override
public boolean hasFilterRow() {
return false;
} | 3.68 |
hbase_ZKClusterId_getUUIDForCluster | /**
* Get the UUID for the provided ZK watcher. Doesn't handle any ZK exceptions
* @param zkw watcher connected to an ensemble
* @return the UUID read from zookeeper
* @throws KeeperException if a ZooKeeper operation fails
*/
public static UUID getUUIDForCluster(ZKWatcher zkw) throws KeeperException {
String uuid = readClusterIdZNode(zkw);
return uuid == null ? null : UUID.fromString(uuid);
} | 3.68 |
flink_CollectSink_open | /**
* Initialize the connection with the Socket in the server.
*
* @param openContext the context.
*/
@Override
public void open(OpenContext openContext) throws Exception {
try {
client = new Socket(hostIp, port);
outputStream = client.getOutputStream();
streamWriter = new DataOutputViewStreamWrapper(outputStream);
} catch (IOException e) {
throw new IOException(
"Cannot get back the stream while opening connection to client at "
+ hostIp.toString()
+ ":"
+ port,
e);
}
} | 3.68 |
hibernate-validator_ConstraintHelper_run | /**
* Runs the given privileged action, using a privileged block if required.
* <p>
* <b>NOTE:</b> This must never be changed into a publicly available method to avoid execution of arbitrary
* privileged actions within HV's protection domain.
*/
@IgnoreForbiddenApisErrors(reason = "SecurityManager is deprecated in JDK17")
private static <T> T run(PrivilegedAction<T> action) {
return System.getSecurityManager() != null ? AccessController.doPrivileged( action ) : action.run();
} | 3.68 |
hbase_WALEdit_isMetaEditFamily | /**
* Replaying WALs can read Cell-at-a-time so need this method in those cases.
*/
public static boolean isMetaEditFamily(Cell cell) {
return CellUtil.matchingFamily(cell, METAFAMILY);
} | 3.68 |
aws-saas-boost_UpdateAction_addTarget | /**
* Adds a new target to this <code>UpdateAction</code>.
*
* e.g. if this is a <code>SERVICE</code>, the list of targets may be
* <code>[ "onboarding", "tenant" ]</code>.
*
* @param target the target to add
*/
public void addTarget(String target) {
targets.add(target);
} | 3.68 |
pulsar_ProxyConnection_handleLookup | /**
* handles discovery request from client ands sends next active broker address.
*/
@Override
protected void handleLookup(CommandLookupTopic lookup) {
checkArgument(state == State.ProxyLookupRequests);
lookupProxyHandler.handleLookup(lookup);
} | 3.68 |
hbase_AsyncAdmin_getCompactionState | /**
* Get the current compaction state of a table. It could be in a major compaction, a minor
* compaction, both, or none.
* @param tableName table to examine
* @return the current compaction state wrapped by a {@link CompletableFuture}
*/
default CompletableFuture<CompactionState> getCompactionState(TableName tableName) {
return getCompactionState(tableName, CompactType.NORMAL);
} | 3.68 |
flink_DataSource_withParameters | /**
* Pass a configuration to the InputFormat.
*
* @param parameters Configuration parameters
*/
public DataSource<OUT> withParameters(Configuration parameters) {
this.parameters = parameters;
return this;
} | 3.68 |
hadoop_OBSObjectBucketUtils_getObjectMetadata | /**
* Request object metadata; increments counters in the process.
*
* @param owner OBS File System instance
* @param key key
* @return the metadata
*/
protected static ObjectMetadata getObjectMetadata(final OBSFileSystem owner,
final String key) {
GetObjectMetadataRequest request = new GetObjectMetadataRequest();
request.setBucketName(owner.getBucket());
request.setObjectKey(key);
if (owner.getSse().isSseCEnable()) {
request.setSseCHeader(owner.getSse().getSseCHeader());
}
ObjectMetadata meta = owner.getObsClient().getObjectMetadata(request);
owner.getSchemeStatistics().incrementReadOps(1);
return meta;
} | 3.68 |
framework_SelectorPath_getLegacyLocatorQuery | /**
* Generates a legacy locator for SelectorPath.
*
* @return String containing Java code for element search and assignment
*/
private String getLegacyLocatorQuery() {
String name;
if (!path.isEmpty()) {
String[] frags = path.split("/");
name = getComponentName(frags[frags.length - 1]).substring(1);
} else {
name = "root";
}
if (legacyNames.containsKey(name)) {
name = legacyNames.get(name);
}
name = getNameWithCount(name);
// Use direct path and elementX naming style.
return "WebElement " + name.substring(0, 1).toLowerCase(Locale.ROOT)
+ name.substring(1) + " = getDriver().findElement(By.vaadin(\""
+ path + "\"));";
} | 3.68 |
hudi_OptionsResolver_allOptions | /**
* Returns all the config options with the given class {@code clazz}.
*/
public static List<ConfigOption<?>> allOptions(Class<?> clazz) {
Field[] declaredFields = clazz.getDeclaredFields();
List<ConfigOption<?>> options = new ArrayList<>();
for (Field field : declaredFields) {
if (java.lang.reflect.Modifier.isStatic(field.getModifiers())
&& field.getType().equals(ConfigOption.class)) {
try {
options.add((ConfigOption<?>) field.get(ConfigOption.class));
} catch (IllegalAccessException e) {
throw new HoodieException("Error while fetching static config option", e);
}
}
}
return options;
} | 3.68 |
hadoop_FedBalanceContext_setUseMountReadOnly | /**
* Use mount point readonly to disable write.
* @param value true if disabling write by setting mount point readonly.
* @return the builder.
*/
public Builder setUseMountReadOnly(boolean value) {
this.useMountReadOnly = value;
return this;
} | 3.68 |
framework_FlyweightRow_unattached | /**
* Creates a new iterator of unattached flyweight cells. A cell is
* unattached if it does not have a corresponding
* {@link FlyweightCell#getElement() DOM element} attached to the row
* element.
*
* @param cells
* the collection of cells to iterate
*/
public static CellIterator unattached(
final Collection<FlyweightCell> cells) {
return new CellIterator(cells, false);
} | 3.68 |
hudi_HoodieLogBlock_tryReadContent | /**
* Read or Skip block content of a log block in the log file. Depends on lazy reading enabled in
* {@link HoodieMergedLogRecordScanner}
*/
public static Option<byte[]> tryReadContent(FSDataInputStream inputStream, Integer contentLength, boolean readLazily)
throws IOException {
if (readLazily) {
// Seek to the end of the content block
inputStream.seek(inputStream.getPos() + contentLength);
return Option.empty();
}
// TODO re-use buffer if stream is backed by buffer
// Read the contents in memory
byte[] content = new byte[contentLength];
inputStream.readFully(content, 0, contentLength);
return Option.of(content);
} | 3.68 |
hadoop_JobTokenIdentifier_readFields | /** {@inheritDoc} */
@Override
public void readFields(DataInput in) throws IOException {
jobid.readFields(in);
} | 3.68 |
hbase_KeyLocker_acquireLock | /**
* Return a lock for the given key. The lock is already locked.
*/
public ReentrantLock acquireLock(K key) {
if (key == null) throw new IllegalArgumentException("key must not be null");
lockPool.purge();
ReentrantLock lock = lockPool.get(key);
lock.lock();
return lock;
} | 3.68 |
pulsar_RocksdbMetadataStore_serialize | /**
* Note: we can only add new fields, but not change or remove existing fields.
*/
public byte[] serialize() {
byte[] result = new byte[HEADER_SIZE + data.length];
ByteBuffer buffer = ByteBuffer.wrap(result);
buffer.putInt(HEADER_SIZE);
buffer.putInt(FORMAT_VERSION_V1);
buffer.putLong(version);
buffer.putLong(owner);
buffer.putLong(createdTimestamp);
buffer.putLong(modifiedTimestamp);
buffer.put((byte) (ephemeral ? 1 : 0));
buffer.put(data);
return result;
} | 3.68 |
morf_Function_yyyymmddToDate | /**
* Helper method to create an instance of the "YYYYMMDDToDate" SQL function.
* {@code expression} must result in a string.
*
* @see Cast
* @param expression the expression to evaluate
* @return an instance of the YYYYMMDDToDate function
*/
public static Function yyyymmddToDate(AliasedField expression) {
return new Function(FunctionType.YYYYMMDD_TO_DATE, expression);
} | 3.68 |
hudi_InternalSchemaChangeApplier_applyColumnCommentChange | /**
* Update col comment for hudi table.
*
* @param colName col name to be changed. if we want to change col from a nested filed, the fullName should be specify
* @param doc .
*/
public InternalSchema applyColumnCommentChange(String colName, String doc) {
TableChanges.ColumnUpdateChange updateChange = TableChanges.ColumnUpdateChange.get(latestSchema);
updateChange.updateColumnComment(colName, doc);
return SchemaChangeUtils.applyTableChanges2Schema(latestSchema, updateChange);
} | 3.68 |
flink_CrossOperator_projectTuple20 | /**
* Projects a pair of crossed elements to a {@link Tuple} with the previously selected
* fields.
*
* @return The projected data set.
* @see Tuple
* @see DataSet
*/
public <
T0,
T1,
T2,
T3,
T4,
T5,
T6,
T7,
T8,
T9,
T10,
T11,
T12,
T13,
T14,
T15,
T16,
T17,
T18,
T19>
ProjectCross<
I1,
I2,
Tuple20<
T0,
T1,
T2,
T3,
T4,
T5,
T6,
T7,
T8,
T9,
T10,
T11,
T12,
T13,
T14,
T15,
T16,
T17,
T18,
T19>>
projectTuple20() {
TypeInformation<?>[] fTypes = extractFieldTypes(fieldIndexes);
TupleTypeInfo<
Tuple20<
T0,
T1,
T2,
T3,
T4,
T5,
T6,
T7,
T8,
T9,
T10,
T11,
T12,
T13,
T14,
T15,
T16,
T17,
T18,
T19>>
tType =
new TupleTypeInfo<
Tuple20<
T0,
T1,
T2,
T3,
T4,
T5,
T6,
T7,
T8,
T9,
T10,
T11,
T12,
T13,
T14,
T15,
T16,
T17,
T18,
T19>>(fTypes);
return new ProjectCross<
I1,
I2,
Tuple20<
T0,
T1,
T2,
T3,
T4,
T5,
T6,
T7,
T8,
T9,
T10,
T11,
T12,
T13,
T14,
T15,
T16,
T17,
T18,
T19>>(
this.ds1, this.ds2, this.fieldIndexes, this.isFieldInFirst, tType, this, hint);
} | 3.68 |
hudi_HoodieSparkQuickstart_incrementalQuery | /**
* Hudi also provides capability to obtain a stream of records that changed since given commit timestamp.
* This can be achieved using Hudi’s incremental view and providing a begin time from which changes need to be streamed.
* We do not need to specify endTime, if we want all changes after the given commit (as is the common case).
*/
public static void incrementalQuery(SparkSession spark, String tablePath, String tableName) {
List<String> commits =
spark.sql("select distinct(_hoodie_commit_time) as commitTime from hudi_ro_table order by commitTime")
.toJavaRDD()
.map((Function<Row, String>) row -> row.getString(0))
.take(50);
String beginTime = commits.get(commits.size() - 1); // commit time we are interested in
// incrementally query data
Dataset<Row> incViewDF = spark
.read()
.format("hudi")
.option("hoodie.datasource.query.type", "incremental")
.option("hoodie.datasource.read.begin.instanttime", beginTime)
.load(tablePath);
incViewDF.createOrReplaceTempView("hudi_incr_table");
spark.sql("select `_hoodie_commit_time`, fare, begin_lon, begin_lat, ts from hudi_incr_table where fare > 20.0")
.show();
} | 3.68 |
dubbo_ApplicationModel_setServiceRepository | /**
* @deprecated only for ut
*/
@Deprecated
public void setServiceRepository(ServiceRepository serviceRepository) {
this.serviceRepository = serviceRepository;
} | 3.68 |
hadoop_TFile_hashCode | /**
* @see java.lang.Object#hashCode()
*/
@Override
public int hashCode() {
final int prime = 31;
int result = prime + blockIndex;
result = (int) (prime * result + recordIndex);
return result;
} | 3.68 |
hbase_KeyValue_shallowCopy | /**
* Creates a shallow copy of this KeyValue, reusing the data byte buffer.
* http://en.wikipedia.org/wiki/Object_copy
* @return Shallow copy of this KeyValue
*/
public KeyValue shallowCopy() {
KeyValue shallowCopy = new KeyValue(this.bytes, this.offset, this.length);
shallowCopy.setSequenceId(this.seqId);
return shallowCopy;
} | 3.68 |
hbase_WALKeyImpl_getClusterIds | /** Returns the set of cluster Ids that have consumed the change */
public List<UUID> getClusterIds() {
return clusterIds;
} | 3.68 |
hmily_HmilyHashLoadBalance_refresh | /**
* Refresh local invoker.
*
* @param invokers invokers
*/
@Override
public void refresh(final Collection<Invoker<T>> invokers) {
LOGGER.info("{} try to refresh RoundRobinLoadBalance's invoker cache, size= {} ", config.getSimpleObjectName(), CollectionUtils.isEmpty(invokers) ? 0 : invokers.size());
if (invokers == null || invokers.isEmpty()) {
sortedInvokersCache = null;
staticWeightInvokersCache = null;
return;
}
List<Invoker<T>> sortedInvokersTmp = new ArrayList<Invoker<T>>(invokers);
Collections.sort(sortedInvokersTmp, comparator);
sortedInvokersCache = sortedInvokersTmp;
staticWeightInvokersCache = LoadBalanceHelper.buildStaticWeightList(sortedInvokersTmp, config);
LOGGER.info(config.getSimpleObjectName() + " refresh HashLoadBalance's invoker cache done, staticWeightInvokersCache size="
+ (staticWeightInvokersCache == null || staticWeightInvokersCache.isEmpty() ? 0 : staticWeightInvokersCache.size())
+ ", sortedInvokersCache size="
+ (sortedInvokersCache == null || sortedInvokersCache.isEmpty() ? 0 : sortedInvokersCache.size()));
} | 3.68 |
hbase_MasterObserver_preListDecommissionedRegionServers | /**
* Called before list decommissioned region servers.
*/
default void preListDecommissionedRegionServers(ObserverContext<MasterCoprocessorEnvironment> ctx)
throws IOException {
} | 3.68 |
hbase_RemoteProcedureDispatcher_addOperationToNode | /**
* Add a remote rpc.
* @param key the node identifier
*/
public void addOperationToNode(final TRemote key, RemoteProcedure rp)
throws NullTargetServerDispatchException, NoServerDispatchException, NoNodeDispatchException {
if (key == null) {
throw new NullTargetServerDispatchException(rp.toString());
}
BufferNode node = nodeMap.get(key);
if (node == null) {
// If null here, it means node has been removed because it crashed. This happens when server
// is expired in ServerManager. ServerCrashProcedure may or may not have run.
throw new NoServerDispatchException(key.toString() + "; " + rp.toString());
}
node.add(rp);
// Check our node still in the map; could have been removed by #removeNode.
if (!nodeMap.containsValue(node)) {
throw new NoNodeDispatchException(key.toString() + "; " + rp.toString());
}
} | 3.68 |
flink_TaskManagerServicesConfiguration_fromConfiguration | /**
* Utility method to extract TaskManager config parameters from the configuration and to sanity
* check them.
*
* @param configuration The configuration.
* @param resourceID resource ID of the task manager
* @param externalAddress identifying the IP address under which the TaskManager will be
* accessible
* @param localCommunicationOnly True if only local communication is possible. Use only in cases
* where only one task manager runs.
* @param taskExecutorResourceSpec resource specification of the TaskManager to start
* @param workingDirectory working directory of the TaskManager
* @return configuration of task manager services used to create them
*/
public static TaskManagerServicesConfiguration fromConfiguration(
Configuration configuration,
ResourceID resourceID,
String externalAddress,
boolean localCommunicationOnly,
TaskExecutorResourceSpec taskExecutorResourceSpec,
WorkingDirectory workingDirectory)
throws Exception {
String[] localStateRootDirs = ConfigurationUtils.parseLocalStateDirectories(configuration);
final Reference<File[]> localStateDirs;
if (localStateRootDirs.length == 0) {
localStateDirs =
Reference.borrowed(new File[] {workingDirectory.getLocalStateDirectory()});
} else {
File[] createdLocalStateDirs = new File[localStateRootDirs.length];
final String localStateDirectoryName = LOCAL_STATE_SUB_DIRECTORY_ROOT + resourceID;
for (int i = 0; i < localStateRootDirs.length; i++) {
createdLocalStateDirs[i] = new File(localStateRootDirs[i], localStateDirectoryName);
}
localStateDirs = Reference.owned(createdLocalStateDirs);
}
boolean localRecoveryMode = configuration.getBoolean(CheckpointingOptions.LOCAL_RECOVERY);
final QueryableStateConfiguration queryableStateConfig =
QueryableStateConfiguration.fromConfiguration(configuration);
long timerServiceShutdownTimeout =
configuration.get(AkkaOptions.ASK_TIMEOUT_DURATION).toMillis();
final RetryingRegistrationConfiguration retryingRegistrationConfiguration =
RetryingRegistrationConfiguration.fromConfiguration(configuration);
final int externalDataPort =
configuration.getInteger(NettyShuffleEnvironmentOptions.DATA_PORT);
String bindAddr =
configuration.getString(
TaskManagerOptions.BIND_HOST, NetUtils.getWildcardIPAddress());
InetAddress bindAddress = InetAddress.getByName(bindAddr);
final String classLoaderResolveOrder =
configuration.getString(CoreOptions.CLASSLOADER_RESOLVE_ORDER);
final String[] alwaysParentFirstLoaderPatterns =
CoreOptions.getParentFirstLoaderPatterns(configuration);
final int numIoThreads = ClusterEntrypointUtils.getPoolSize(configuration);
final String[] tmpDirs = ConfigurationUtils.parseTempDirectories(configuration);
// If TaskManagerOptionsInternal.TASK_MANAGER_NODE_ID is not set, use the external address
// as the node id.
final String nodeId =
configuration
.getOptional(TaskManagerOptionsInternal.TASK_MANAGER_NODE_ID)
.orElse(externalAddress);
return new TaskManagerServicesConfiguration(
configuration,
resourceID,
externalAddress,
bindAddress,
externalDataPort,
localCommunicationOnly,
tmpDirs,
localStateDirs,
localRecoveryMode,
queryableStateConfig,
ConfigurationParserUtils.getSlot(configuration),
ConfigurationParserUtils.getPageSize(configuration),
taskExecutorResourceSpec,
timerServiceShutdownTimeout,
retryingRegistrationConfiguration,
ConfigurationUtils.getSystemResourceMetricsProbingInterval(configuration),
FlinkUserCodeClassLoaders.ResolveOrder.fromString(classLoaderResolveOrder),
alwaysParentFirstLoaderPatterns,
numIoThreads,
nodeId);
} | 3.68 |
framework_VFilterSelect_updateStyleNames | /**
* Updates style names in suggestion popup to help theme building.
*
* @param uidl
* UIDL for the whole combo box
* @param componentState
* shared state of the combo box
*/
public void updateStyleNames(UIDL uidl,
AbstractComponentState componentState) {
debug("VFS.SP: updateStyleNames()");
setStyleName(
VFilterSelect.this.getStylePrimaryName() + "-suggestpopup");
menu.setStyleName(
VFilterSelect.this.getStylePrimaryName() + "-suggestmenu");
status.setClassName(
VFilterSelect.this.getStylePrimaryName() + "-status");
if (ComponentStateUtil.hasStyles(componentState)) {
for (String style : componentState.styles) {
if (!"".equals(style)) {
addStyleDependentName(style);
}
}
}
} | 3.68 |
hadoop_MultipartUploaderBuilderImpl_checksumOpt | /**
* Set checksum opt.
*/
@Override
public B checksumOpt(@Nonnull final ChecksumOpt chksumOpt) {
checkNotNull(chksumOpt);
checksumOpt = chksumOpt;
return getThisBuilder();
} | 3.68 |
hbase_QuotaTableUtil_createGetNamespaceSnapshotSize | /**
* Creates a {@code Get} to fetch the namespace's total snapshot size.
*/
static Get createGetNamespaceSnapshotSize(String namespace) {
Get g = new Get(getNamespaceRowKey(namespace));
g.addColumn(QUOTA_FAMILY_USAGE, QUOTA_SNAPSHOT_SIZE_QUALIFIER);
return g;
} | 3.68 |
framework_TreeTable_isCollapsed | /**
* Checks if Item with given identifier is collapsed in the UI.
*
* <p>
*
* @param itemId
* the identifier of the checked Item
* @return true if the Item with given id is collapsed
* @see Collapsible#isCollapsed(Object)
*/
public boolean isCollapsed(Object itemId) {
return !getContainerStrategy().isNodeOpen(itemId);
} | 3.68 |
hibernate-validator_ConstraintDescriptorImpl_run | /**
* Runs the given privileged action, using a privileged block if required.
* <p>
* <b>NOTE:</b> This must never be changed into a publicly available method to avoid execution of arbitrary
* privileged actions within HV's protection domain.
*/
@IgnoreForbiddenApisErrors(reason = "SecurityManager is deprecated in JDK17")
private static <P> P run(PrivilegedAction<P> action) {
return System.getSecurityManager() != null ? AccessController.doPrivileged( action ) : action.run();
} | 3.68 |
flink_StreamExchangeModeUtils_getGlobalStreamExchangeMode | /**
* The {@link GlobalStreamExchangeMode} should be determined by the {@link StreamGraphGenerator}
* in the future.
*/
@Deprecated
static Optional<GlobalStreamExchangeMode> getGlobalStreamExchangeMode(ReadableConfig config) {
return config.getOptional(ExecutionConfigOptions.TABLE_EXEC_SHUFFLE_MODE)
.map(
value -> {
try {
return GlobalStreamExchangeMode.valueOf(
convertLegacyShuffleMode(value).toUpperCase());
} catch (IllegalArgumentException e) {
throw new IllegalArgumentException(
String.format(
"Unsupported value %s for config %s.",
value,
ExecutionConfigOptions.TABLE_EXEC_SHUFFLE_MODE
.key()));
}
});
} | 3.68 |
morf_OracleDialect_createSequenceStartingFromExistingData | /**
* Returns a SQL statement to create a sequence for a table's autonumber column, where
* the sequence should start from the greater of either the autonumber column's start value
* or the maximum value for that column existing in the table.
*
* @param table Table for which the sequence should be created.
* @param onColumn The autonumber column.
* @return SQL string.
*/
private String createSequenceStartingFromExistingData(Table table, Column onColumn) {
String tableName = schemaNamePrefix() + truncatedTableName(table.getName());
String sequenceName = schemaNamePrefix() + sequenceName(table.getName());
return new StringBuilder("DECLARE query CHAR(255); \n")
.append("BEGIN \n")
.append(" SELECT 'CREATE SEQUENCE ").append(sequenceName).append(" START WITH ' || TO_CHAR(GREATEST(").append(onColumn.getAutoNumberStart()).append(", MAX(id)+1)) || ' CACHE 2000' INTO QUERY FROM \n")
.append(" (SELECT MAX(").append(onColumn.getName()).append(") AS id FROM ").append(tableName).append(" UNION SELECT 0 AS id FROM SYS.DUAL); \n")
.append(" EXECUTE IMMEDIATE query; \n")
.append("END;")
.toString();
} | 3.68 |
hbase_MiniHBaseCluster_startRegionServer | /**
* Starts a region server thread running
* @return New RegionServerThread
*/
public JVMClusterUtil.RegionServerThread startRegionServer() throws IOException {
final Configuration newConf = HBaseConfiguration.create(conf);
return startRegionServer(newConf);
} | 3.68 |
Subsets and Splits
No saved queries yet
Save your SQL queries to embed, download, and access them later. Queries will appear here once saved.