name
stringlengths 12
178
| code_snippet
stringlengths 8
36.5k
| score
float64 3.26
3.68
|
---|---|---|
flink_JobGraph_getCheckpointingSettings | /**
* Gets the settings for asynchronous snapshots. This method returns null, when checkpointing is
* not enabled.
*
* @return The snapshot settings
*/
public JobCheckpointingSettings getCheckpointingSettings() {
return snapshotSettings;
} | 3.68 |
hmily_HmilyMetaDataManager_get | /**
* Get data source meta data.
*
* @param resourceId the resource id
* @return data source metadata
*/
public static DataSourceMetaData get(final String resourceId) {
return DATASOURCE_META_CACHE.get(resourceId);
} | 3.68 |
hbase_HRegion_getWAL | /** Returns WAL in use for this region */
public WAL getWAL() {
return this.wal;
} | 3.68 |
querydsl_Expressions_timeTemplate | /**
* Create a new Template expression
*
* @param cl type of expression
* @param template template
* @param args template parameters
* @return template expression
*/
public static <T extends Comparable<?>> TimeTemplate<T> timeTemplate(Class<? extends T> cl, Template template, List<?> args) {
return new TimeTemplate<T>(cl, template, args);
} | 3.68 |
shardingsphere-elasticjob_DataSourceRegistry_getDataSource | /**
* Get {@link DataSource} by {@link RDBTracingStorageConfiguration}.
*
* @param dataSourceConfig data source configuration
* @return instance of {@link DataSource}
*/
public DataSource getDataSource(final RDBTracingStorageConfiguration dataSourceConfig) {
return dataSources.computeIfAbsent(dataSourceConfig, RDBTracingStorageConfiguration::createDataSource);
} | 3.68 |
flink_RpcUtils_getHostname | /**
* Returns the hostname onto which the given {@link RpcService} has been bound. If the {@link
* RpcService} has been started in local mode, then the hostname is {@code "hostname"}.
*
* @param rpcService to retrieve the hostname for
* @return hostname onto which the given {@link RpcService} has been bound or localhost
*/
public static String getHostname(RpcService rpcService) {
final String rpcServiceAddress = rpcService.getAddress();
return rpcServiceAddress != null && rpcServiceAddress.isEmpty()
? "localhost"
: rpcServiceAddress;
} | 3.68 |
flink_SharedBuffer_upsertEvent | /**
* Inserts or updates an event in cache.
*
* @param eventId id of the event
* @param event event body
*/
void upsertEvent(EventId eventId, Lockable<V> event) {
this.eventsBufferCache.put(eventId, event);
} | 3.68 |
flink_NettyMessageDecoder_onNewMessageReceived | /**
* Notifies that a new message is to be decoded.
*
* @param msgId The type of the message to be decoded.
* @param messageLength The length of the message to be decoded.
*/
void onNewMessageReceived(int msgId, int messageLength) {
this.msgId = msgId;
this.messageLength = messageLength;
} | 3.68 |
framework_DefaultErrorHandler_findRelevantThrowable | /**
* Vaadin wraps exceptions in its own and due to reflection usage there
* might be also other irrelevant exceptions that make no sense for Vaadin
* users (~developers using Vaadin). This method tries to choose the
* relevant one to be reported.
*
* @since 7.2
* @param t
* a throwable passed to ErrorHandler
* @return the throwable that is relevant for Vaadin users
*/
public static Throwable findRelevantThrowable(Throwable t) {
try {
if ((t instanceof RpcInvocationException)
&& (t.getCause() instanceof InvocationTargetException)) {
/*
* RpcInvocationException (that always wraps irrelevant
* java.lang.reflect.InvocationTargetException) might only be
* relevant for core Vaadin developers.
*/
return findRelevantThrowable(t.getCause().getCause());
} else if (t instanceof MethodException) {
/*
* Method exception might only be relevant for core Vaadin
* developers.
*/
return t.getCause();
}
} catch (Exception e) {
// NOP, just return the original one
}
return t;
} | 3.68 |
flink_TimeWindowUtil_toUtcTimestampMills | /**
* Convert a epoch mills to timestamp mills which can describe a locate date time.
*
* <p>For example: The timestamp string of epoch mills 5 in GMT+08:00 is 1970-01-01 08:00:05,
* the timestamp mills is 8 * 60 * 60 * 1000 + 5.
*
* @param epochMills the epoch mills.
* @param shiftTimeZone the timezone that the given timestamp mills has been shifted.
* @return the mills which can describe the local timestamp string in given timezone.
*/
public static long toUtcTimestampMills(long epochMills, ZoneId shiftTimeZone) {
// Long.MAX_VALUE is a flag of max watermark, directly return it
if (UTC_ZONE_ID.equals(shiftTimeZone) || Long.MAX_VALUE == epochMills) {
return epochMills;
}
LocalDateTime localDateTime =
LocalDateTime.ofInstant(Instant.ofEpochMilli(epochMills), shiftTimeZone);
return localDateTime.atZone(UTC_ZONE_ID).toInstant().toEpochMilli();
} | 3.68 |
AreaShop_GeneralRegion_limitGroupsOfSameCategory | /**
* Checks if two limitGroups are of the same category (same groups and worlds lists).
* @param firstGroup The first group
* @param secondGroup The second group
* @return true if the groups and worlds lists are the same, otherwise false
*/
private boolean limitGroupsOfSameCategory(String firstGroup, String secondGroup) {
List<String> firstGroups = plugin.getConfig().getStringList("limitGroups." + firstGroup + ".groups");
List<String> secondGroups = plugin.getConfig().getStringList("limitGroups." + secondGroup + ".groups");
if(!firstGroups.containsAll(secondGroups) || !secondGroups.containsAll(firstGroups)) {
return false;
}
List<String> firstWorlds = plugin.getConfig().getStringList("limitGroups." + firstGroup + ".worlds");
List<String> secondWorlds = plugin.getConfig().getStringList("limitGroups." + secondGroup + ".worlds");
return !(!firstWorlds.containsAll(secondWorlds) || !secondWorlds.containsAll(firstWorlds));
} | 3.68 |
flink_WindowValueState_value | /** Returns the current value for the state under current key and the given window. */
public RowData value(W window) throws IOException {
windowState.setCurrentNamespace(window);
return windowState.value();
} | 3.68 |
hbase_MasterProcedureScheduler_wakeTableExclusiveLock | /**
* Wake the procedures waiting for the specified table
* @param procedure the procedure releasing the lock
* @param table the name of the table that has the exclusive lock
*/
public void wakeTableExclusiveLock(final Procedure<?> procedure, final TableName table) {
schedLock();
try {
final LockAndQueue namespaceLock = locking.getNamespaceLock(table.getNamespaceAsString());
final LockAndQueue tableLock = locking.getTableLock(table);
int waitingCount = 0;
if (tableLock.releaseExclusiveLock(procedure)) {
waitingCount += wakeWaitingProcedures(tableLock);
}
if (namespaceLock.releaseSharedLock()) {
waitingCount += wakeWaitingProcedures(namespaceLock);
}
addToRunQueue(tableRunQueue, getTableQueue(table),
() -> procedure + " released the exclusive lock");
wakePollIfNeeded(waitingCount);
} finally {
schedUnlock();
}
} | 3.68 |
hbase_BusyRegionSplitPolicy_updateRate | /**
* Update the blocked request rate based on number of blocked and total write requests in the last
* aggregation window, or since last call to this method, whichever is farthest in time. Uses
* weighted rate calculation based on the previous rate and new data.
* @return Updated blocked request rate.
*/
private synchronized float updateRate() {
float aggBlockedRate;
long curTime = EnvironmentEdgeManager.currentTime();
long newBlockedReqs = region.getBlockedRequestsCount();
long newWriteReqs = region.getWriteRequestsCount();
aggBlockedRate =
(newBlockedReqs - blockedRequestCount) / (newWriteReqs - writeRequestCount + 0.00001f);
if (curTime - prevTime >= aggregationWindow) {
blockedRate = aggBlockedRate;
prevTime = curTime;
blockedRequestCount = newBlockedReqs;
writeRequestCount = newWriteReqs;
} else if (curTime - startTime >= aggregationWindow) {
// Calculate the aggregate blocked rate as the weighted sum of
// previous window's average blocked rate and blocked rate in this window so far.
float timeSlice = (curTime - prevTime) / (aggregationWindow + 0.0f);
aggBlockedRate = (1 - timeSlice) * blockedRate + timeSlice * aggBlockedRate;
} else {
aggBlockedRate = 0.0f;
}
return aggBlockedRate;
} | 3.68 |
framework_ButtonRenderer_isHtmlContentAllowed | /**
* Gets whether the data should be rendered as HTML (instead of text).
* <p>
* By default everything is rendered as text.
*
* @return <code>true</code> if the renderer renders a HTML,
* <code>false</code> if the content is rendered as text
*
* @since 8.0.3
*/
public boolean isHtmlContentAllowed() {
return getState(false).htmlContentAllowed;
} | 3.68 |
hudi_DynamoTableUtils_waitUntilActive | /**
* Waits up to a specified amount of time for a specified DynamoDB table to
* move into the <code>ACTIVE</code> state. If the table does not exist or
* does not transition to the <code>ACTIVE</code> state after this time,
* then a SdkClientException is thrown.
*
* @param dynamo
* The DynamoDB client to use to make requests.
* @param tableName
* The name of the table whose status is being checked.
* @param timeout
* The maximum number of milliseconds to wait.
* @param interval
* The poll interval in milliseconds.
*
* @throws TableNeverTransitionedToStateException
* If the specified table does not exist or does not transition
* into the <code>ACTIVE</code> state before this method times
* out and stops polling.
* @throws InterruptedException
* If the thread is interrupted while waiting for the table to
* transition into the <code>ACTIVE</code> state.
*/
public static void waitUntilActive(final DynamoDbClient dynamo, final String tableName, final int timeout,
final int interval) throws InterruptedException, TableNeverTransitionedToStateException {
TableDescription table = waitForTableDescription(dynamo, tableName, TableStatus.ACTIVE, timeout, interval);
if (table == null || !table.tableStatus().equals(TableStatus.ACTIVE)) {
throw new TableNeverTransitionedToStateException(tableName, TableStatus.ACTIVE);
}
} | 3.68 |
framework_Slot_onAttach | /*
* (non-Javadoc)
*
* @see com.google.gwt.user.client.ui.Widget#onAttach()
*/
@Override
protected void onAttach() {
super.onAttach();
if (spacer != null) {
getElement().getParentElement().insertBefore(spacer, getElement());
}
} | 3.68 |
flink_AbstractFsCheckpointStorageAccess_decodePathFromReference | /**
* Decodes the given reference into a path. This method validates that the reference bytes start
* with the correct magic number (as written by {@link #encodePathAsReference(Path)}) and
* converts the remaining bytes back to a proper path.
*
* @param reference The bytes representing the reference.
* @return The path decoded from the reference.
* @throws IllegalArgumentException Thrown, if the bytes do not represent a proper reference.
*/
public static Path decodePathFromReference(CheckpointStorageLocationReference reference) {
if (reference.isDefaultReference()) {
throw new IllegalArgumentException("Cannot decode default reference");
}
final byte[] bytes = reference.getReferenceBytes();
final int headerLen = REFERENCE_MAGIC_NUMBER.length;
if (bytes.length > headerLen) {
// compare magic number
for (int i = 0; i < headerLen; i++) {
if (bytes[i] != REFERENCE_MAGIC_NUMBER[i]) {
throw new IllegalArgumentException(
"Reference starts with the wrong magic number");
}
}
// covert to string and path
try {
return new Path(
new String(
bytes,
headerLen,
bytes.length - headerLen,
StandardCharsets.UTF_8));
} catch (Exception e) {
throw new IllegalArgumentException("Reference cannot be decoded to a path", e);
}
} else {
throw new IllegalArgumentException("Reference too short.");
}
} | 3.68 |
framework_DefaultConnectionStateHandler_stopDialogTimer | /**
* Ensures the reconnect dialog does not popup some time from now
*/
private void stopDialogTimer() {
if (dialogShowTimer.isRunning()) {
dialogShowTimer.cancel();
}
} | 3.68 |
hudi_GenericRecordFullPayloadGenerator_typeConvert | /**
* Generate random value according to their type.
*/
private Object typeConvert(Schema.Field field) {
Schema fieldSchema = field.schema();
if (isOption(fieldSchema)) {
fieldSchema = getNonNull(fieldSchema);
}
if (fieldSchema.getName().equals(DEFAULT_HOODIE_IS_DELETED_COL)) {
return false;
}
switch (fieldSchema.getType()) {
case BOOLEAN:
return random.nextBoolean();
case DOUBLE:
return random.nextDouble();
case FLOAT:
return random.nextFloat();
case INT:
return random.nextInt();
case LONG:
return random.nextLong();
case STRING:
return UUID.randomUUID().toString();
case ENUM:
List<String> enumSymbols = fieldSchema.getEnumSymbols();
return new GenericData.EnumSymbol(fieldSchema, enumSymbols.get(random.nextInt(enumSymbols.size() - 1)));
case RECORD:
return getNewPayload(fieldSchema);
case ARRAY:
Schema.Field elementField = new Schema.Field(field.name(), fieldSchema.getElementType(), "", null);
List listRes = new ArrayList();
int numEntriesToAdd = extraEntriesMap.getOrDefault(field.name(), 1);
while (numEntriesToAdd-- > 0) {
listRes.add(typeConvert(elementField));
}
return listRes;
case MAP:
Schema.Field valueField = new Schema.Field(field.name(), fieldSchema.getValueType(), "", null);
Map<String, Object> mapRes = new HashMap<String, Object>();
numEntriesToAdd = extraEntriesMap.getOrDefault(field.name(), 1);
while (numEntriesToAdd > 0) {
mapRes.put(UUID.randomUUID().toString(), typeConvert(valueField));
numEntriesToAdd--;
}
return mapRes;
case BYTES:
return ByteBuffer.wrap(UUID.randomUUID().toString().getBytes(Charset.defaultCharset()));
case FIXED:
return generateFixedType(fieldSchema);
default:
throw new IllegalArgumentException("Cannot handle type: " + fieldSchema.getType());
}
} | 3.68 |
morf_XmlDataSetProducer_getName | /**
* @see org.alfasoftware.morf.metadata.Index#getName()
*/
@Override
public String getName() {
return indexName;
} | 3.68 |
graphhopper_TarjanSCC_getComponents | /**
* A list of arrays each containing the nodes of a strongly connected component. Components with only a single
* node are not included here, but need to be obtained using {@link #getSingleNodeComponents()}.
*/
public List<IntArrayList> getComponents() {
return components;
} | 3.68 |
framework_SerializableFunction_identity | /**
* Returns a function that always returns its input argument.
*
* @param <T>
* the type of the input and output objects to the function
* @return a function that always returns its input argument
*/
static <T> SerializableFunction<T, T> identity() {
return t -> t;
} | 3.68 |
pulsar_LinuxInfoUtils_getUsablePhysicalNICs | /**
* Get paths of all usable physical nic.
* @return All usable physical nic paths.
*/
public static List<String> getUsablePhysicalNICs() {
try (Stream<Path> stream = Files.list(Paths.get(NIC_PATH))) {
return stream.filter(LinuxInfoUtils::isPhysicalNic)
.filter(LinuxInfoUtils::isUsable)
.map(path -> path.getFileName().toString())
.collect(Collectors.toList());
} catch (IOException e) {
log.error("[LinuxInfo] Failed to find NICs", e);
return Collections.emptyList();
}
} | 3.68 |
flink_EncodingUtils_escapeJava | /**
* Escapes the characters in a <code>String</code> using Java String rules.
*
* <p>Deals correctly with quotes and control-chars (tab, backslash, cr, ff, etc.)
*
* <p>So a tab becomes the characters <code>'\\'</code> and <code>'t'</code>.
*
* <p>The only difference between Java strings and JavaScript strings is that in JavaScript, a
* single quote must be escaped.
*
* <p>Example:
*
* <pre>
* input string: He didn't say, "Stop!"
* output string: He didn't say, \"Stop!\"
* </pre>
*
* @param str String to escape values in, may be null
* @return String with escaped values, <code>null</code> if null string input
*/
public static String escapeJava(String str) {
return escapeJavaStyleString(str, false);
} | 3.68 |
hbase_InfoServer_getPort | /**
* @return the port of the info server
* @deprecated Since 0.99.0
*/
@Deprecated
public int getPort() {
return this.httpServer.getPort();
} | 3.68 |
MagicPlugin_Mage_castMessage | /**
* Send a message to this Mage when a spell is cast.
*
* @param message The message to send
*/
@Override
public void castMessage(String message) {
if (!controller.showCastMessages()) return;
sendMessage(controller.getCastMessagePrefix(), message);
} | 3.68 |
hudi_HoodieAvroUtils_needsRewriteToString | /**
* Helper for recordNeedsRewriteForExtendedAvroSchemaEvolution. Returns true if schema type is
* int, long, float, double, or bytes because avro doesn't support evolution from those types to
* string so some intervention is needed
*/
private static boolean needsRewriteToString(Schema schema) {
switch (schema.getType()) {
case INT:
case LONG:
case FLOAT:
case DOUBLE:
case BYTES:
return true;
default:
return false;
}
} | 3.68 |
framework_VScrollTable_getMinWidth | /**
* Returns the smallest possible cell width in pixels.
*
* @param includeIndent
* - width should include hierarchy column indent if
* applicable (VTreeTable only)
* @param includeCellExtraWidth
* - width should include paddings etc.
* @return
*/
private int getMinWidth(boolean includeIndent,
boolean includeCellExtraWidth) {
int minWidth = sortIndicator.getOffsetWidth();
if (scrollBody != null) {
// check the need for indent before adding paddings etc.
if (includeIndent && isHierarchyColumn()) {
int maxIndent = scrollBody.getMaxIndent();
if (minWidth < maxIndent) {
minWidth = maxIndent;
}
}
if (includeCellExtraWidth) {
minWidth += scrollBody.getCellExtraWidth();
}
}
return minWidth;
} | 3.68 |
hbase_MultipleColumnPrefixFilter_areSerializedFieldsEqual | /**
* Returns true if and only if the fields of the filter that are serialized are equal to the
* corresponding fields in other. Used for testing.
*/
@Override
boolean areSerializedFieldsEqual(Filter o) {
if (o == this) {
return true;
}
if (!(o instanceof MultipleColumnPrefixFilter)) {
return false;
}
MultipleColumnPrefixFilter other = (MultipleColumnPrefixFilter) o;
return this.sortedPrefixes.equals(other.sortedPrefixes);
} | 3.68 |
flink_AbstractHeapPriorityQueue_clear | /** Clears the queue. */
public void clear() {
final int arrayOffset = getHeadElementIndex();
Arrays.fill(queue, arrayOffset, arrayOffset + size, null);
size = 0;
} | 3.68 |
framework_VAbstractCalendarPanel_getSubmitListener | /**
* Returns the submit listener that listens to selection made from the
* panel.
*
* @return The listener or NULL if no listener has been set
*/
public SubmitListener getSubmitListener() {
return submitListener;
} | 3.68 |
graphhopper_WaySegmentParser_setRelationProcessor | /**
* @param relationProcessor callback function that receives OSM relations during the second pass
*/
public Builder setRelationProcessor(RelationProcessor relationProcessor) {
waySegmentParser.relationProcessor = relationProcessor;
return this;
} | 3.68 |
morf_DatabaseUpgradeTableContribution_deployedViewsTable | /**
* @return The Table descriptor of DeployedViews
*/
public static TableBuilder deployedViewsTable() {
return table(DEPLOYED_VIEWS_NAME)
.columns(
column("name", DataType.STRING, 30).primaryKey(),
column("hash", DataType.STRING, 64),
column("sqlDefinition", DataType.CLOB).nullable()
);
} | 3.68 |
flink_AllWindowedStream_evictor | /**
* Sets the {@code Evictor} that should be used to evict elements from a window before emission.
*
* <p>Note: When using an evictor window performance will degrade significantly, since
* incremental aggregation of window results cannot be used.
*/
@PublicEvolving
public AllWindowedStream<T, W> evictor(Evictor<? super T, ? super W> evictor) {
this.evictor = evictor;
return this;
} | 3.68 |
pulsar_BasicKubernetesManifestCustomizer_partialDeepClone | /**
* A clone where the maps and lists are properly cloned. The k8s resources themselves are shallow clones.
*/
public RuntimeOpts partialDeepClone() {
return new RuntimeOpts(
jobNamespace,
jobName,
extraLabels != null ? new HashMap<>(extraLabels) : null,
extraAnnotations != null ? new HashMap<>(extraAnnotations) : null,
nodeSelectorLabels != null ? new HashMap<>(nodeSelectorLabels) : null,
resourceRequirements,
tolerations != null ? new ArrayList<>(tolerations) : null
);
} | 3.68 |
framework_PropertyFormatter_getValue | /**
* Get the formatted value.
*
* @return If the datasource returns null, this is null. Otherwise this is
* String given by format().
*/
@Override
public String getValue() {
T value = dataSource == null ? null : dataSource.getValue();
if (value == null) {
return null;
}
return format(value);
} | 3.68 |
framework_VDebugWindow_formatDuration | /**
* Formats the given milliseconds as hours, minutes, seconds and
* milliseconds.
*
* @param ms
* @return
*/
static String formatDuration(int ms) {
NumberFormat fmt = NumberFormat.getFormat("00");
String seconds = fmt.format((ms / 1000) % 60);
String minutes = fmt.format((ms / (1000 * 60)) % 60);
String hours = fmt.format((ms / (1000 * 60 * 60)) % 24);
String millis = NumberFormat.getFormat("000").format(ms % 1000);
return hours + "h " + minutes + "m " + seconds + "s " + millis + "ms";
} | 3.68 |
hbase_TableRecordReader_setTable | /**
* @param table the {@link Table} to scan.
*/
public void setTable(Table table) {
this.recordReaderImpl.setHTable(table);
} | 3.68 |
flink_StandardDeCompressors_getDecompressorForExtension | /**
* Gets the decompressor for a file extension. Returns null if there is no decompressor for this
* file extension.
*/
@Nullable
public static InflaterInputStreamFactory<?> getDecompressorForExtension(String extension) {
return DECOMPRESSORS.get(extension);
} | 3.68 |
flink_DeltaIteration_getInitialWorkset | /**
* Gets the initial workset. This is the data set passed to the method that starts the delta
* iteration.
*
* <p>Consider the following example:
*
* <pre>{@code
* DataSet<MyType> solutionSetData = ...;
* DataSet<AnotherType> worksetData = ...;
*
* DeltaIteration<MyType, AnotherType> iteration = solutionSetData.iteratorDelta(worksetData, 10, ...);
* }</pre>
*
* <p>The <tt>worksetData</tt> would be the data set returned by {@code
* iteration.getInitialWorkset();}.
*
* @return The data set that forms the initial workset.
*/
public DataSet<WT> getInitialWorkset() {
return initialWorkset;
} | 3.68 |
flink_HadoopDelegationTokenConverter_deserialize | /** Deserializes delegation tokens. */
public static Credentials deserialize(byte[] credentialsBytes) throws IOException {
try (DataInputStream dis =
new DataInputStream(new ByteArrayInputStream(credentialsBytes))) {
Credentials credentials = new Credentials();
credentials.readTokenStorageStream(dis);
return credentials;
}
} | 3.68 |
hudi_BufferedRandomAccessFile_read | /**
* Read specified number of bytes into given array starting at given offset.
* @param b - byte array
* @param off - start offset
* @param len - length of bytes to be read
* @return - number of bytes read.
* @throws IOException
*/
@Override
public int read(byte[] b, int off, int len) throws IOException {
if (endOfBufferReached()) {
if (!loadNewBlockToBuffer()) {
return -1;
}
}
// copy data from buffer
len = Math.min(len, (int) (this.validLastPosition - this.currentPosition));
int buffOff = (int) (this.currentPosition - this.startPosition);
System.arraycopy(this.dataBuffer.array(), buffOff, b, off, len);
this.currentPosition += len;
return len;
} | 3.68 |
flink_CatalogManager_getDataTypeFactory | /** Returns a factory for creating fully resolved data types that can be used for planning. */
public DataTypeFactory getDataTypeFactory() {
return typeFactory;
} | 3.68 |
rocketmq-connect_AbstractKafkaSourceConnector_start | /**
* Start the component
*
* @param config component context
*/
@Override
public void start(KeyValue config) {
this.configValue = new ConnectKeyValue();
config.keySet().forEach(key -> {
this.configValue.put(key, config.getString(key));
});
setConnectorClass(configValue);
taskConfig = new HashMap<>(configValue.config());
// get the source class name from config and create source task from reflection
try {
sourceConnector = Class.forName(taskConfig.get(ConnectorConfig.CONNECTOR_CLASS_CONFIG))
.asSubclass(org.apache.kafka.connect.source.SourceConnector.class)
.getDeclaredConstructor()
.newInstance();
} catch (Exception e) {
throw new ConnectException("Load task class failed, " + taskConfig.get(TaskConfig.TASK_CLASS_CONFIG));
}
} | 3.68 |
flink_SegmentsUtil_setDouble | /**
* set double from segments.
*
* @param segments target segments.
* @param offset value offset.
*/
public static void setDouble(MemorySegment[] segments, int offset, double value) {
if (inFirstSegment(segments, offset, 8)) {
segments[0].putDouble(offset, value);
} else {
setDoubleMultiSegments(segments, offset, value);
}
} | 3.68 |
hbase_HBaseTestingUtility_createLocalHRegion | /**
* Return a region on which you must call {@link HBaseTestingUtility#closeRegionAndWAL(HRegion)}
* when done.
*/
public HRegion createLocalHRegion(TableName tableName, byte[] startKey, byte[] stopKey,
Configuration conf, boolean isReadOnly, Durability durability, WAL wal, byte[]... families)
throws IOException {
return createLocalHRegionWithInMemoryFlags(tableName, startKey, stopKey, conf, isReadOnly,
durability, wal, null, families);
} | 3.68 |
hbase_CatalogFamilyFormat_getMergeRegionsWithName | /**
* Returns Deserialized values of <qualifier,regioninfo> pairs taken from column values that
* match the regex 'info:merge.*' in array of <code>cells</code>.
*/
@Nullable
public static Map<String, RegionInfo> getMergeRegionsWithName(Cell[] cells) {
if (cells == null) {
return null;
}
Map<String, RegionInfo> regionsToMerge = null;
for (Cell cell : cells) {
if (!isMergeQualifierPrefix(cell)) {
continue;
}
// Ok. This cell is that of a info:merge* column.
RegionInfo ri = RegionInfo.parseFromOrNull(cell.getValueArray(), cell.getValueOffset(),
cell.getValueLength());
if (ri != null) {
if (regionsToMerge == null) {
regionsToMerge = new LinkedHashMap<>();
}
regionsToMerge.put(Bytes.toString(CellUtil.cloneQualifier(cell)), ri);
}
}
return regionsToMerge;
} | 3.68 |
AreaShop_FileManager_updateAllRegions | /**
* Update all regions.
* @param confirmationReceiver Optional CommandSender that should receive progress messages
*/
public void updateAllRegions(CommandSender confirmationReceiver) {
updateRegions(getRegions(), confirmationReceiver);
} | 3.68 |
framework_RowReference_set | /**
* Sets the identifying information for this row.
*
* @param rowIndex
* the index of the row
* @param row
* the row object
* @param element
* the element of the row
*/
public void set(int rowIndex, T row, TableRowElement element) {
this.rowIndex = rowIndex;
this.row = row;
this.element = element;
} | 3.68 |
hudi_HoodieMergeHandle_write | /**
* Go through an old record. Here if we detect a newer version shows up, we write the new one to the file.
*/
public void write(HoodieRecord<T> oldRecord) {
Schema oldSchema = config.populateMetaFields() ? writeSchemaWithMetaFields : writeSchema;
Schema newSchema = useWriterSchemaForCompaction ? writeSchemaWithMetaFields : writeSchema;
boolean copyOldRecord = true;
String key = oldRecord.getRecordKey(oldSchema, keyGeneratorOpt);
TypedProperties props = config.getPayloadConfig().getProps();
if (keyToNewRecords.containsKey(key)) {
// If we have duplicate records that we are updating, then the hoodie record will be deflated after
// writing the first record. So make a copy of the record to be merged
HoodieRecord<T> newRecord = keyToNewRecords.get(key).newInstance();
try {
Option<Pair<HoodieRecord, Schema>> mergeResult = recordMerger.merge(oldRecord, oldSchema, newRecord, newSchema, props);
Schema combineRecordSchema = mergeResult.map(Pair::getRight).orElse(null);
Option<HoodieRecord> combinedRecord = mergeResult.map(Pair::getLeft);
if (combinedRecord.isPresent() && combinedRecord.get().shouldIgnore(combineRecordSchema, props)) {
// If it is an IGNORE_RECORD, just copy the old record, and do not update the new record.
copyOldRecord = true;
} else if (writeUpdateRecord(newRecord, oldRecord, combinedRecord, combineRecordSchema)) {
/*
* ONLY WHEN 1) we have an update for this key AND 2) We are able to successfully
* write the combined new value
*
* We no longer need to copy the old record over.
*/
copyOldRecord = false;
}
writtenRecordKeys.add(key);
} catch (Exception e) {
throw new HoodieUpsertException("Failed to combine/merge new record with old value in storage, for new record {"
+ keyToNewRecords.get(key) + "}, old value {" + oldRecord + "}", e);
}
}
if (copyOldRecord) {
try {
// NOTE: We're enforcing preservation of the record metadata to keep existing semantic
writeToFile(new HoodieKey(key, partitionPath), oldRecord, oldSchema, props, true);
} catch (IOException | RuntimeException e) {
String errMsg = String.format("Failed to merge old record into new file for key %s from old file %s to new file %s with writerSchema %s",
key, getOldFilePath(), newFilePath, writeSchemaWithMetaFields.toString(true));
LOG.debug("Old record is " + oldRecord);
throw new HoodieUpsertException(errMsg, e);
}
recordsWritten++;
}
} | 3.68 |
dubbo_NetUtils_isMulticastAddress | /**
* is multicast address or not
*
* @param host ipv4 address
* @return {@code true} if is multicast address
*/
public static boolean isMulticastAddress(String host) {
int i = host.indexOf('.');
if (i > 0) {
String prefix = host.substring(0, i);
if (StringUtils.isNumber(prefix)) {
int p = Integer.parseInt(prefix);
return p >= 224 && p <= 239;
}
}
return false;
} | 3.68 |
streampipes_ExpandTitleToContentFilter_getInstance | /**
* Returns the singleton instance for ExpandTitleToContentFilter.
*/
public static ExpandTitleToContentFilter getInstance() {
return INSTANCE;
} | 3.68 |
flink_StreamElementQueueEntry_completeExceptionally | /** Not supported. Exceptions must be handled in the AsyncWaitOperator. */
@Override
default void completeExceptionally(Throwable error) {
throw new UnsupportedOperationException(
"This result future should only be used to set completed results.");
} | 3.68 |
hadoop_ContainerUpdates_getPromotionRequests | /**
* Returns Container Promotion Requests.
* @return Container Promotion Requests.
*/
public List<UpdateContainerRequest> getPromotionRequests() {
return promotionRequests;
} | 3.68 |
flink_FlinkMatchers_futureWillCompleteExceptionally | /**
* Checks whether {@link CompletableFuture} will completed exceptionally within a certain time.
*/
public static <T> FutureWillFailMatcher<T> futureWillCompleteExceptionally(Duration timeout) {
return futureWillCompleteExceptionally(Throwable.class, timeout);
} | 3.68 |
morf_XmlDataSetProducer_tables | /**
* @see org.alfasoftware.morf.metadata.Schema#tables()
*/
@Override
public Collection<Table> tables() {
List<Table> tables = new ArrayList<>();
for (String tableName : tableNames()) {
tables.add(getTable(tableName));
}
return tables;
} | 3.68 |
framework_SuperDevMode_recompileIfNeeded | /**
* The URL of the code server. The default URL (http://localhost:9876/) will
* be used if this is empty or null.
*
* @param serverUrl
* The url of the code server or null to use the default
* @return true if recompile started, false if we are running in
* SuperDevMode
*/
protected static boolean recompileIfNeeded(String serverUrl) {
if (serverUrl == null || serverUrl.isEmpty()) {
serverUrl = "http://localhost:9876/";
} else {
if (serverUrl.contains(":")) {
serverUrl = "http://" + serverUrl + "/";
} else {
serverUrl = "http://" + serverUrl + ":9876/";
}
}
if (hasSession(SKIP_RECOMPILE)) {
getLogger().info("Running in SuperDevMode");
// When we get here, we are running in super dev mode
// Remove the flag so next reload will recompile
removeSession(SKIP_RECOMPILE);
// Remove the gwt flag so we will not end up in dev mode if we
// remove the url parameter manually
removeSession(getSuperDevModeHookKey());
return false;
}
recompileWidgetsetAndStartInDevMode(serverUrl);
return true;
} | 3.68 |
incubator-hugegraph-toolchain_DataTypeUtil_checkDataType | /**
* Check type of the value valid
*/
private static boolean checkDataType(String key, Object value,
DataType dataType) {
if (value instanceof Number && dataType.isNumber()) {
return parseNumber(key, value, dataType) != null;
}
return dataType.clazz().isInstance(value);
} | 3.68 |
hbase_AbstractMemStore_timeOfOldestEdit | /** Returns Oldest timestamp of all the Cells in the MemStore */
@Override
public long timeOfOldestEdit() {
return timeOfOldestEdit;
} | 3.68 |
querydsl_PolygonExpression_numInteriorRing | /**
* Returns the number of interior rings in this Polygon.
*
* @return number of interior rings
*/
public NumberExpression<Integer> numInteriorRing() {
if (numInteriorRing == null) {
numInteriorRing = Expressions.numberOperation(Integer.class, SpatialOps.NUM_INTERIOR_RING, mixin);
}
return numInteriorRing;
} | 3.68 |
hbase_BlockType_expectSpecific | /**
* Throws an exception if the block category passed is the special category meaning "all
* categories".
*/
public void expectSpecific() {
if (this == ALL_CATEGORIES) {
throw new IllegalArgumentException(
"Expected a specific block " + "category but got " + this);
}
} | 3.68 |
flink_YarnTaskExecutorRunner_main | /**
* The entry point for the YARN task executor runner.
*
* @param args The command line arguments.
*/
public static void main(String[] args) {
EnvironmentInformation.logEnvironmentInfo(LOG, "YARN TaskExecutor runner", args);
SignalHandler.register(LOG);
JvmShutdownSafeguard.installAsShutdownHook(LOG);
runTaskManagerSecurely(args);
} | 3.68 |
hadoop_NamenodeStatusReport_getNumOfBlocksPendingDeletion | /**
* Get the number of pending deletion blocks.
*
* @return Number of pending deletion blocks.
*/
public long getNumOfBlocksPendingDeletion() {
return this.numOfBlocksPendingDeletion;
} | 3.68 |
flink_RocksDBMemoryControllerUtils_calculateWriteBufferManagerCapacity | /**
* Calculate the actual memory capacity of write buffer manager, which would be shared among
* rocksDB instance(s). The formula to use here could refer to the doc of {@link
* #calculateActualCacheCapacity(long, double)}.
*
* @param totalMemorySize Total off-heap memory size reserved for RocksDB instance(s).
* @param writeBufferRatio The ratio of total memory size which would be reserved for write
* buffer manager and its over-capacity part.
* @return The actual calculated write buffer manager capacity.
*/
@VisibleForTesting
static long calculateWriteBufferManagerCapacity(long totalMemorySize, double writeBufferRatio) {
return (long) (2 * totalMemorySize * writeBufferRatio / 3);
} | 3.68 |
hadoop_RegistryTypeUtils_validateEndpoint | /**
* Validate the endpoint by checking for null fields and other invalid
* conditions
* @param path path for exceptions
* @param endpoint endpoint to validate. May be null
* @throws InvalidRecordException on invalid entries
*/
public static void validateEndpoint(String path, Endpoint endpoint)
throws InvalidRecordException {
if (endpoint == null) {
throw new InvalidRecordException(path, "Null endpoint");
}
try {
endpoint.validate();
} catch (RuntimeException e) {
throw new InvalidRecordException(path, e.toString());
}
} | 3.68 |
hadoop_OBSCommonUtils_dateToLong | /**
* Date to long conversion. Handles null Dates that can be returned by OBS by
* returning 0
*
* @param date date from OBS query
* @return timestamp of the object
*/
public static long dateToLong(final Date date) {
if (date == null) {
return 0L;
}
return date.getTime() / OBSConstants.SEC2MILLISEC_FACTOR
* OBSConstants.SEC2MILLISEC_FACTOR;
} | 3.68 |
hbase_TableSplit_getLength | /**
* Returns the length of the split.
* @return The length of the split.
* @see org.apache.hadoop.mapreduce.InputSplit#getLength()
*/
@Override
public long getLength() {
return length;
} | 3.68 |
hbase_TableMapReduceUtil_initTableMapperJob | /**
* Use this before submitting a Multi TableMap job. It will appropriately set up the job.
* @param scans The list of {@link Scan} objects to read from.
* @param mapper The mapper class to use.
* @param outputKeyClass The class of the output key.
* @param outputValueClass The class of the output value.
* @param job The current job to adjust. Make sure the passed job is carrying all
* necessary HBase configuration.
* @param addDependencyJars upload HBase jars and jars for any of the configured job classes via
* the distributed cache (tmpjars).
* @param initCredentials whether to initialize hbase auth credentials for the job
* @throws IOException When setting up the details fails.
*/
public static void initTableMapperJob(List<Scan> scans, Class<? extends TableMapper> mapper,
Class<?> outputKeyClass, Class<?> outputValueClass, Job job, boolean addDependencyJars,
boolean initCredentials) throws IOException {
job.setInputFormatClass(MultiTableInputFormat.class);
if (outputValueClass != null) {
job.setMapOutputValueClass(outputValueClass);
}
if (outputKeyClass != null) {
job.setMapOutputKeyClass(outputKeyClass);
}
job.setMapperClass(mapper);
Configuration conf = job.getConfiguration();
HBaseConfiguration.merge(conf, HBaseConfiguration.create(conf));
List<String> scanStrings = new ArrayList<>();
for (Scan scan : scans) {
scanStrings.add(convertScanToString(scan));
}
job.getConfiguration().setStrings(MultiTableInputFormat.SCANS,
scanStrings.toArray(new String[scanStrings.size()]));
if (addDependencyJars) {
addDependencyJars(job);
}
if (initCredentials) {
initCredentials(job);
}
} | 3.68 |
hadoop_AMRMProxyService_getInterceptorClassNames | /**
* Returns the comma separated interceptor class names from the configuration.
*
* @param conf configuration
* @return the interceptor class names as an instance of ArrayList
*/
private List<String> getInterceptorClassNames(Configuration conf) {
String configuredInterceptorClassNames =
conf.get(
YarnConfiguration.AMRM_PROXY_INTERCEPTOR_CLASS_PIPELINE,
YarnConfiguration.DEFAULT_AMRM_PROXY_INTERCEPTOR_CLASS_PIPELINE);
List<String> interceptorClassNames = new ArrayList<>();
Collection<String> tempList =
StringUtils.getStringCollection(configuredInterceptorClassNames);
for (String item : tempList) {
interceptorClassNames.add(item.trim());
}
// Make sure DistributedScheduler is present at the beginning of the chain.
if (this.nmContext.isDistributedSchedulingEnabled()) {
interceptorClassNames.add(0, DistributedScheduler.class.getName());
}
return interceptorClassNames;
} | 3.68 |
zilla_HpackContext_staticIndex4 | // Index in static table for the given name of length 4
private static int staticIndex4(DirectBuffer name)
{
switch (name.getByte(3))
{
case 'e':
if (STATIC_TABLE[33].name.equals(name)) // date
{
return 33;
}
break;
case 'g':
if (STATIC_TABLE[34].name.equals(name)) // etag
{
return 34;
}
break;
case 'k':
if (STATIC_TABLE[45].name.equals(name)) // link
{
return 45;
}
break;
case 'm':
if (STATIC_TABLE[37].name.equals(name)) // from
{
return 37;
}
break;
case 't':
if (STATIC_TABLE[38].name.equals(name)) // host
{
return 38;
}
break;
case 'y':
if (STATIC_TABLE[59].name.equals(name)) // vary
{
return 59;
}
break;
}
return -1;
} | 3.68 |
hbase_HFileBlockIndex_writeSingleLevelIndex | /**
* Writes the block index data as a single level only. Does not do any block framing.
* @param out the buffered output stream to write the index to. Typically a stream
* writing into an {@link HFile} block.
* @param description a short description of the index being written. Used in a log message.
*/
public void writeSingleLevelIndex(DataOutput out, String description) throws IOException {
expectNumLevels(1);
if (!singleLevelOnly) throw new IOException("Single-level mode is turned off");
if (rootChunk.getNumEntries() > 0)
throw new IOException("Root-level entries already added in " + "single-level mode");
rootChunk = curInlineChunk;
curInlineChunk = new BlockIndexChunkImpl();
if (LOG.isTraceEnabled()) {
LOG.trace("Wrote a single-level " + description + " index with " + rootChunk.getNumEntries()
+ " entries, " + rootChunk.getRootSize() + " bytes");
}
indexBlockEncoder.encode(rootChunk, true, out);
} | 3.68 |
hudi_ConsistencyGuard_waitTill | /**
* Wait Till target visibility is reached.
*
* @param dirPath Directory Path
* @param files Files
* @param targetVisibility Target Visibility
* @throws IOException
* @throws TimeoutException
*/
default void waitTill(String dirPath, List<String> files, FileVisibility targetVisibility)
throws IOException, TimeoutException {
switch (targetVisibility) {
case APPEAR: {
waitTillAllFilesAppear(dirPath, files);
break;
}
case DISAPPEAR: {
waitTillAllFilesDisappear(dirPath, files);
break;
}
default:
throw new IllegalStateException("Unknown File Visibility");
}
} | 3.68 |
hbase_MetricsConnection_getRunnerStats | /** runnerStats metric */
public RunnerStats getRunnerStats() {
return runnerStats;
} | 3.68 |
flink_SourceTestSuiteBase_testSourceSingleSplit | /**
* Test connector source with only one split in the external system.
*
* <p>This test will create one split in the external system, write test data into it, and
* consume back via a Flink job with 1 parallelism.
*
* <p>The number and order of records consumed by Flink need to be identical to the test data
* written to the external system in order to pass this test.
*
* <p>A bounded source is required for this test.
*/
@TestTemplate
@DisplayName("Test source with single split")
public void testSourceSingleSplit(
TestEnvironment testEnv,
DataStreamSourceExternalContext<T> externalContext,
CheckpointingMode semantic)
throws Exception {
// Step 1: Preparation
TestingSourceSettings sourceSettings =
TestingSourceSettings.builder()
.setBoundedness(Boundedness.BOUNDED)
.setCheckpointingMode(semantic)
.build();
TestEnvironmentSettings envSettings =
TestEnvironmentSettings.builder()
.setConnectorJarPaths(externalContext.getConnectorJarPaths())
.build();
Source<T, ?, ?> source = tryCreateSource(externalContext, sourceSettings);
// Step 2: Write test data to external system
List<T> testRecords = generateAndWriteTestData(0, externalContext, sourceSettings);
// Step 3: Build and execute Flink job
StreamExecutionEnvironment execEnv = testEnv.createExecutionEnvironment(envSettings);
DataStreamSource<T> stream =
execEnv.fromSource(source, WatermarkStrategy.noWatermarks(), "Tested Source")
.setParallelism(1);
CollectIteratorBuilder<T> iteratorBuilder = addCollectSink(stream);
JobClient jobClient = submitJob(execEnv, "Source Single Split Test");
// Step 5: Validate test data
try (CollectResultIterator<T> resultIterator = iteratorBuilder.build(jobClient)) {
// Check test result
LOG.info("Checking test results");
checkResultWithSemantic(resultIterator, singletonList(testRecords), semantic, null);
}
// Step 5: Clean up
waitForJobStatus(jobClient, singletonList(JobStatus.FINISHED));
} | 3.68 |
dubbo_InternalThreadLocal_get | /**
* Returns the current value for the current thread
*/
@SuppressWarnings("unchecked")
@Override
public final V get() {
InternalThreadLocalMap threadLocalMap = InternalThreadLocalMap.get();
Object v = threadLocalMap.indexedVariable(index);
if (v != InternalThreadLocalMap.UNSET) {
return (V) v;
}
return initialize(threadLocalMap);
} | 3.68 |
hbase_TableDescriptorBuilder_setMaxFileSize | /**
* Sets the maximum size upto which a region can grow to after which a region split is
* triggered. The region size is represented by the size of the biggest store file in that
* region, i.e. If the biggest store file grows beyond the maxFileSize, then the region split is
* triggered. This defaults to a value of 256 MB.
* <p>
* This is not an absolute value and might vary. Assume that a single row exceeds the
* maxFileSize then the storeFileSize will be greater than maxFileSize since a single row cannot
* be split across multiple regions
* </p>
* @param maxFileSize The maximum file size that a store file can grow to before a split is
* triggered.
* @return the modifyable TD
*/
public ModifyableTableDescriptor setMaxFileSize(long maxFileSize) {
return setValue(MAX_FILESIZE_KEY, Long.toString(maxFileSize));
} | 3.68 |
flink_StreamGraphHasherV2_generateDeterministicHash | /** Generates a deterministic hash from node-local properties and input and output edges. */
private byte[] generateDeterministicHash(
StreamNode node,
Hasher hasher,
Map<Integer, byte[]> hashes,
boolean isChainingEnabled,
StreamGraph streamGraph) {
// Include stream node to hash. We use the current size of the computed
// hashes as the ID. We cannot use the node's ID, because it is
// assigned from a static counter. This will result in two identical
// programs having different hashes.
generateNodeLocalHash(hasher, hashes.size());
// Include chained nodes to hash
for (StreamEdge outEdge : node.getOutEdges()) {
if (isChainable(outEdge, isChainingEnabled, streamGraph)) {
// Use the hash size again, because the nodes are chained to
// this node. This does not add a hash for the chained nodes.
generateNodeLocalHash(hasher, hashes.size());
}
}
byte[] hash = hasher.hash().asBytes();
// Make sure that all input nodes have their hash set before entering
// this loop (calling this method).
for (StreamEdge inEdge : node.getInEdges()) {
byte[] otherHash = hashes.get(inEdge.getSourceId());
// Sanity check
if (otherHash == null) {
throw new IllegalStateException(
"Missing hash for input node "
+ streamGraph.getSourceVertex(inEdge)
+ ". Cannot generate hash for "
+ node
+ ".");
}
for (int j = 0; j < hash.length; j++) {
hash[j] = (byte) (hash[j] * 37 ^ otherHash[j]);
}
}
if (LOG.isDebugEnabled()) {
String udfClassName = "";
if (node.getOperatorFactory() instanceof UdfStreamOperatorFactory) {
udfClassName =
((UdfStreamOperatorFactory) node.getOperatorFactory())
.getUserFunctionClassName();
}
LOG.debug(
"Generated hash '"
+ byteToHexString(hash)
+ "' for node "
+ "'"
+ node.toString()
+ "' {id: "
+ node.getId()
+ ", "
+ "parallelism: "
+ node.getParallelism()
+ ", "
+ "user function: "
+ udfClassName
+ "}");
}
return hash;
} | 3.68 |
hbase_MapReduceHFileSplitterJob_usage | /**
* Print usage
* @param errorMsg Error message. Can be null.
*/
private void usage(final String errorMsg) {
if (errorMsg != null && errorMsg.length() > 0) {
System.err.println("ERROR: " + errorMsg);
}
System.err.println("Usage: " + NAME + " [options] <HFile inputdir(s)> <table>");
System.err.println("Read all HFile's for <table> and split them to <table> region boundaries.");
System.err.println("<table> table to load.\n");
System.err.println("To generate HFiles for a bulk data load, pass the option:");
System.err.println(" -D" + BULK_OUTPUT_CONF_KEY + "=/path/for/output");
System.err.println("Other options:");
System.err.println(" -D " + JOB_NAME_CONF_KEY
+ "=jobName - use the specified mapreduce job name for the HFile splitter");
System.err.println("For performance also consider the following options:\n"
+ " -Dmapreduce.map.speculative=false\n" + " -Dmapreduce.reduce.speculative=false");
} | 3.68 |
hbase_StripeStoreFileManager_getCandidateFilesForRowKeyBefore | /**
* See {@link StoreFileManager#getCandidateFilesForRowKeyBefore(KeyValue)} for details on this
* methods.
*/
@Override
public Iterator<HStoreFile> getCandidateFilesForRowKeyBefore(final KeyValue targetKey) {
KeyBeforeConcatenatedLists result = new KeyBeforeConcatenatedLists();
// Order matters for this call.
result.addSublist(state.level0Files);
if (!state.stripeFiles.isEmpty()) {
int lastStripeIndex = findStripeForRow(CellUtil.cloneRow(targetKey), false);
for (int stripeIndex = lastStripeIndex; stripeIndex >= 0; --stripeIndex) {
result.addSublist(state.stripeFiles.get(stripeIndex));
}
}
return result.iterator();
} | 3.68 |
flink_BufferBuilder_trim | /**
* The result capacity can not be greater than allocated memorySegment. It also can not be less
* than already written data.
*/
public void trim(int newSize) {
maxCapacity =
Math.min(Math.max(newSize, positionMarker.getCached()), buffer.getMaxCapacity());
} | 3.68 |
flink_StreamArrowPythonGroupWindowAggregateFunctionOperator_registerCleanupTimer | /**
* Registers a timer to cleanup the content of the window.
*
* @param window the window whose state to discard
*/
private void registerCleanupTimer(W window) {
long cleanupTime = toEpochMillsForTimer(cleanupTime(window), shiftTimeZone);
if (cleanupTime == Long.MAX_VALUE) {
// don't set a GC timer for "end of time"
return;
}
if (windowAssigner.isEventTime()) {
triggerContext.registerEventTimeTimer(cleanupTime);
} else {
triggerContext.registerProcessingTimeTimer(cleanupTime);
}
} | 3.68 |
querydsl_AbstractHibernateQuery_setFetchSize | /**
* Set a fetchJoin size for the underlying JDBC query.
* @param fetchSize the fetchJoin size
* @return the current object
*/
@SuppressWarnings("unchecked")
public Q setFetchSize(int fetchSize) {
this.fetchSize = fetchSize;
return (Q) this;
} | 3.68 |
framework_VSlider_setMinValue | /**
* Sets the minimum value for slider.
*
* @param value
* the minimum value to use
*/
public void setMinValue(double value) {
min = value;
} | 3.68 |
framework_ComboBox_sanitizeList | /**
* Makes correct sublist of given list of options.
* <p>
* If paint is not an option request (affected by page or filter change),
* page will be the one where possible selection exists.
* <p>
* Detects proper first and last item in list to return right page of
* options. Also, if the current page is beyond the end of the list, it will
* be adjusted.
* <p>
* Package private only for testing purposes.
*
* @param options
* @param needNullSelectOption
* flag to indicate if nullselect option needs to be taken into
* consideration
*/
List<?> sanitizeList(List<?> options, boolean needNullSelectOption) {
int totalRows = options.size() + (needNullSelectOption ? 1 : 0);
if (pageLength != 0 && totalRows > pageLength) {
// options will not fit on one page
int indexToEnsureInView = -1;
// if not an option request (item list when user changes page), go
// to page with the selected item after filtering if accepted by
// filter
Object selection = getValue();
if (isScrollToSelectedItem() && !optionRequest
&& selection != null) {
// ensure proper page
indexToEnsureInView = options.indexOf(selection);
}
int size = options.size();
currentPage = adjustCurrentPage(currentPage, needNullSelectOption,
indexToEnsureInView, size);
int first = getFirstItemIndexOnCurrentPage(needNullSelectOption);
int last = getLastItemIndexOnCurrentPage(needNullSelectOption, size,
first);
return options.subList(first, last + 1);
} else {
return options;
}
} | 3.68 |
hadoop_ECBlockGroup_getParityBlocks | /**
* Get parity blocks
* @return parity blocks
*/
public ECBlock[] getParityBlocks() {
return parityBlocks;
} | 3.68 |
flink_SessionContext_close | /** Close resources, e.g. catalogs. */
public void close() {
operationManager.close();
for (String name : sessionState.catalogManager.listCatalogs()) {
try {
sessionState.catalogManager.getCatalog(name).ifPresent(Catalog::close);
} catch (Throwable t) {
LOG.error(
String.format(
"Failed to close catalog %s for the session %s.", name, sessionId),
t);
}
}
try {
userClassloader.close();
} catch (IOException e) {
LOG.error(
String.format(
"Error while closing class loader for the session %s.", sessionId),
e);
}
try {
sessionState.resourceManager.close();
} catch (IOException e) {
LOG.error(
String.format(
"Failed to close the resource manager for the session %s.", sessionId),
e);
}
} | 3.68 |
flink_RestClusterClient_updateJobResourceRequirements | /**
* Update {@link JobResourceRequirements} of a given job.
*
* @param jobId jobId specifies the job for which to change the resource requirements
* @param jobResourceRequirements new resource requirements for the provided job
* @return Future which is completed upon successful operation.
*/
public CompletableFuture<Acknowledge> updateJobResourceRequirements(
JobID jobId, JobResourceRequirements jobResourceRequirements) {
final JobMessageParameters params = new JobMessageParameters();
params.jobPathParameter.resolve(jobId);
return sendRequest(
JobResourcesRequirementsUpdateHeaders.INSTANCE,
params,
new JobResourceRequirementsBody(jobResourceRequirements))
.thenApply(ignored -> Acknowledge.get());
} | 3.68 |
hbase_WALFactory_createStreamReader | /**
* Create a one-way stream reader for a given path.
*/
public static WALStreamReader createStreamReader(FileSystem fs, Path path, Configuration conf,
long startPosition) throws IOException {
return getInstance(conf).createStreamReader(fs, path, (CancelableProgressable) null,
startPosition);
} | 3.68 |
hadoop_ApplicationRowKey_getRowKey | /**
* Constructs a row key for the application table as follows:
* {@code clusterId!userName!flowName!flowRunId!AppId}.
*
* @return byte array with the row key
*/
public byte[] getRowKey() {
return appRowKeyConverter.encode(this);
} | 3.68 |
flink_AbstractKeyedStateBackend_setCurrentKey | /** @see KeyedStateBackend */
@Override
public void setCurrentKey(K newKey) {
notifyKeySelected(newKey);
this.keyContext.setCurrentKey(newKey);
this.keyContext.setCurrentKeyGroupIndex(
KeyGroupRangeAssignment.assignToKeyGroup(newKey, numberOfKeyGroups));
} | 3.68 |
framework_VAbsoluteLayout_setPosition | /**
* Set the position for the wrapper in the layout.
*
* @param position
* The position string
*/
public void setPosition(String position) {
if (css == null || !css.equals(position)) {
css = position;
top = right = bottom = left = zIndex = null;
if (!css.isEmpty()) {
for (String property : css.split(";")) {
String[] keyValue = property.split(":");
if (keyValue[0].equals("left")) {
left = keyValue[1];
} else if (keyValue[0].equals("top")) {
top = keyValue[1];
} else if (keyValue[0].equals("right")) {
right = keyValue[1];
} else if (keyValue[0].equals("bottom")) {
bottom = keyValue[1];
} else if (keyValue[0].equals("z-index")) {
zIndex = keyValue[1];
}
}
}
// ensure ne values
Style style = getElement().getStyle();
style.setProperty("zIndex", zIndex);
style.setProperty("top", top);
style.setProperty("left", left);
style.setProperty("right", right);
style.setProperty("bottom", bottom);
}
updateCaptionPosition();
} | 3.68 |
hadoop_ListResultEntrySchema_isDirectory | /**
* Get the isDirectory value.
*
* @return the isDirectory value
*/
public Boolean isDirectory() {
return isDirectory;
} | 3.68 |
hadoop_Summarizer_toString | /**
* Summarizes the current {@link Gridmix} run and the cluster used.
*/
@Override
public String toString() {
StringBuilder builder = new StringBuilder();
builder.append(executionSummarizer.toString());
builder.append(clusterSummarizer.toString());
return builder.toString();
} | 3.68 |
framework_TestSampler_registerComponent | /**
* Register a component to the TestSampler for style name changes/additions.
*
* @param component
*/
public void registerComponent(Component component) {
components.add(component);
} | 3.68 |
hadoop_JsonSerDeser_load | /**
* Load from a Hadoop filesystem
* @param fs filesystem
* @param path path
* @return a loaded CD
* @throws IOException IO problems
* @throws JsonParseException parse problems
* @throws JsonMappingException O/J mapping problems
*/
public T load(FileSystem fs, Path path) throws IOException {
FSDataInputStream dataInputStream = fs.open(path);
return fromStream(dataInputStream);
} | 3.68 |
framework_VUI_getTheme | /**
* @return the name of the theme in use by this UI.
* @deprecated as of 7.3. Use {@link UIConnector#getActiveTheme()} instead.
*/
@Deprecated
public String getTheme() {
return ((UIConnector) ConnectorMap.get(connection).getConnector(this))
.getActiveTheme();
} | 3.68 |
hadoop_StringValueMin_reset | /**
* reset the aggregator
*/
public void reset() {
minVal = null;
} | 3.68 |
framework_Label_setConverter | /**
* Sets the converter used to convert the label value to the property data
* source type. The converter must have a presentation type of String.
*
* @param converter
* The new converter to use.
*/
public void setConverter(Converter<String, ?> converter) {
this.converter = (Converter<String, Object>) converter;
markAsDirty();
} | 3.68 |
pulsar_ManagedCursorContainer_add | /**
* Add a cursor to the container. The cursor will be optionally tracked for the slowest reader when
* a position is passed as the second argument. It is expected that the position is updated with
* {@link #cursorUpdated(ManagedCursor, Position)} method when the position changes.
*
* @param cursor cursor to add
* @param position position of the cursor to use for ordering, pass null if the cursor's position shouldn't be
* tracked for the slowest reader.
*/
public void add(ManagedCursor cursor, Position position) {
long stamp = rwLock.writeLock();
try {
Item item = new Item(cursor, (PositionImpl) position, position != null ? heap.size() : -1);
cursors.put(cursor.getName(), item);
if (position != null) {
heap.add(item);
if (heap.size() > 1) {
siftUp(item);
}
}
if (cursor.isDurable()) {
durableCursorCount++;
}
} finally {
rwLock.unlockWrite(stamp);
}
} | 3.68 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.