name
stringlengths 12
178
| code_snippet
stringlengths 8
36.5k
| score
float64 3.26
3.68
|
---|---|---|
flink_KvStateSerializer_deserializeValue | /**
* Deserializes the value with the given serializer.
*
* @param serializedValue Serialized value of type T
* @param serializer Serializer for T
* @param <T> Type of the value
* @return Deserialized value or <code>null</code> if the serialized value is <code>null</code>
* @throws IOException On failure during deserialization
*/
public static <T> T deserializeValue(byte[] serializedValue, TypeSerializer<T> serializer)
throws IOException {
if (serializedValue == null) {
return null;
} else {
final DataInputDeserializer deser =
new DataInputDeserializer(serializedValue, 0, serializedValue.length);
final T value = serializer.deserialize(deser);
if (deser.available() > 0) {
throw new IOException(
"Unconsumed bytes in the deserialized value. "
+ "This indicates a mismatch in the value serializers "
+ "used by the KvState instance and this access.");
}
return value;
}
} | 3.68 |
flink_MapViewSerializer_transformLegacySerializerSnapshot | /**
* We need to override this as a {@link LegacySerializerSnapshotTransformer} because in Flink
* 1.6.x and below, this serializer was incorrectly returning directly the snapshot of the
* nested map serializer as its own snapshot.
*
* <p>This method transforms the incorrect map serializer snapshot to be a proper {@link
* MapViewSerializerSnapshot}.
*/
@Override
public <U> TypeSerializerSnapshot<MapView<K, V>> transformLegacySerializerSnapshot(
TypeSerializerSnapshot<U> legacySnapshot) {
if (legacySnapshot instanceof MapViewSerializerSnapshot) {
return (TypeSerializerSnapshot<MapView<K, V>>) legacySnapshot;
} else {
throw new UnsupportedOperationException(
legacySnapshot.getClass().getCanonicalName() + " is not supported.");
}
} | 3.68 |
hadoop_YarnClientUtils_getYarnConfWithRmHaId | /**
* Returns a {@link YarnConfiguration} built from the {@code conf} parameter
* that is guaranteed to have the {@link YarnConfiguration#RM_HA_ID}
* property set.
*
* @param conf the base configuration
* @return a {@link YarnConfiguration} built from the base
* {@link Configuration}
* @throws IOException thrown if the {@code conf} parameter contains
* inconsistent properties
*/
@VisibleForTesting
static YarnConfiguration getYarnConfWithRmHaId(Configuration conf)
throws IOException {
YarnConfiguration yarnConf = new YarnConfiguration(conf);
if (yarnConf.get(YarnConfiguration.RM_HA_ID) == null) {
// If RM_HA_ID is not configured, use the first of RM_HA_IDS.
// Any valid RM HA ID should work.
String[] rmIds = yarnConf.getStrings(YarnConfiguration.RM_HA_IDS);
if ((rmIds != null) && (rmIds.length > 0)) {
yarnConf.set(YarnConfiguration.RM_HA_ID, rmIds[0]);
} else {
throw new IOException("RM_HA_IDS property is not set for HA resource "
+ "manager");
}
}
return yarnConf;
} | 3.68 |
graphhopper_PointList_parse2DJSON | /**
* Takes the string from a json array ala [lon1,lat1], [lon2,lat2], ... and fills the list from
* it.
*/
public void parse2DJSON(String str) {
for (String latlon : str.split("\\[")) {
if (latlon.trim().length() == 0)
continue;
String[] ll = latlon.split(",");
String lat = ll[1].replace("]", "").trim();
add(Double.parseDouble(lat), Double.parseDouble(ll[0].trim()), Double.NaN);
}
} | 3.68 |
hadoop_CachingBlockManager_cancelPrefetches | /**
* Requests cancellation of any previously issued prefetch requests.
*/
@Override
public void cancelPrefetches() {
BlockOperations.Operation op = ops.cancelPrefetches();
for (BufferData data : bufferPool.getAll()) {
// We add blocks being prefetched to the local cache so that the prefetch is not wasted.
if (data.stateEqualsOneOf(BufferData.State.PREFETCHING, BufferData.State.READY)) {
requestCaching(data);
}
}
ops.end(op);
} | 3.68 |
framework_Range_splitAtFromStart | /**
* Split the range into two after a certain number of integers into the
* range.
* <p>
* Calling this method is equivalent to calling
* <code>{@link #splitAt(int) splitAt}({@link #getStart()}+length);</code>
* <p>
* <em>Example:</em>
* <code>[5..10[.splitAtFromStart(2) == [5..7[, [7..10[</code>
*
* @param length
* the length at which to split this range into two
* @return an array of two ranges, having the <code>length</code>-first
* elements of this range, and the second range having the rest. If
* <code>length</code> ≤ 0, the first element will be empty, and
* the second element will be this range. If <code>length</code>
* ≥ {@link #length()}, the first element will be this range,
* and the second element will be empty.
*/
public Range[] splitAtFromStart(final int length) {
return splitAt(getStart() + length);
} | 3.68 |
framework_GridDragSourceConnector_getRowDragData | /**
* Gets drag data provided by the generator functions.
*
* @param row
* The row data.
* @return The generated drag data type mapped to the corresponding drag
* data. If there are no generator functions, returns an empty map.
*/
private Map<String, String> getRowDragData(JsonObject row) {
// Collect a map of data types and data that is provided by the
// generator functions set for this drag source
if (row.hasKey(GridDragSourceState.JSONKEY_DRAG_DATA)) {
JsonObject dragData = row
.getObject(GridDragSourceState.JSONKEY_DRAG_DATA);
return Arrays.stream(dragData.keys()).collect(
Collectors.toMap(Function.identity(), dragData::get));
}
// Otherwise return empty map
return Collections.emptyMap();
} | 3.68 |
hadoop_ManifestCommitter_getJobUniqueId | /**
* Get the unique ID of this job.
* @return job ID (yarn, spark)
*/
public String getJobUniqueId() {
return baseConfig.getJobUniqueId();
} | 3.68 |
flink_SavepointMetadataV2_getExistingOperators | /** @return List of {@link OperatorState} that already exists within the savepoint. */
public List<OperatorState> getExistingOperators() {
return operatorStateIndex.values().stream()
.filter(OperatorStateSpecV2::isExistingState)
.map(OperatorStateSpecV2::asExistingState)
.collect(Collectors.toList());
} | 3.68 |
framework_ResourceLoader_getResourceLoader | /**
* Gets the resource loader that has fired this event.
*
* @return the resource loader
*/
public ResourceLoader getResourceLoader() {
return loader;
} | 3.68 |
framework_TreeTable_setCollapsed | /**
* Sets the Item specified by given identifier as collapsed or expanded. If
* the Item is collapsed, its children are not displayed to the user.
*
* @param itemId
* the identifier of the Item
* @param collapsed
* true if the Item should be collapsed, false if expanded
*/
public void setCollapsed(Object itemId, boolean collapsed) {
if (isCollapsed(itemId) != collapsed) {
if (null == toggledItemId && !isRowCacheInvalidated()
&& getVisibleItemIds().contains(itemId)) {
// optimization: partial refresh if only one item is
// collapsed/expanded
toggledItemId = itemId;
toggleChildVisibility(itemId, false);
} else {
// make sure a full refresh takes place - otherwise neither
// partial nor full repaint of table content is performed
toggledItemId = null;
toggleChildVisibility(itemId, true);
}
}
} | 3.68 |
graphhopper_Path_calcPoints | /**
* This method calculated a list of points for this path
* <p>
*
* @return the geometry of this path
*/
public PointList calcPoints() {
final PointList points = new PointList(edgeIds.size() + 1, nodeAccess.is3D());
if (edgeIds.isEmpty()) {
if (isFound()) {
points.add(nodeAccess, endNode);
}
return points;
}
int tmpNode = getFromNode();
points.add(nodeAccess, tmpNode);
forEveryEdge(new EdgeVisitor() {
@Override
public void next(EdgeIteratorState eb, int index, int prevEdgeId) {
PointList pl = eb.fetchWayGeometry(FetchMode.PILLAR_AND_ADJ);
for (int j = 0; j < pl.size(); j++) {
points.add(pl, j);
}
}
@Override
public void finish() {
}
});
return points;
} | 3.68 |
rocketmq-connect_SourceOffsetCompute_initOffset | /**
* init and compute offset
*
* @return
*/
public static Map<String, Map<String, Object>> initOffset(
JdbcSourceTaskConfig config,
SourceTaskContext context,
DatabaseDialect dialect,
CachedConnectionProvider cachedConnectionProvider
) {
List<String> tables = config.getTables();
String query = config.getQuery();
TableLoadMode mode = TableLoadMode.findTableLoadModeByName(config.getMode());
QueryMode queryMode = !StringUtils.isEmpty(query) ? QueryMode.QUERY : QueryMode.TABLE;
// step 1 -——-- compute partitions
Map<String, RecordPartition> partitionsByTableFqn = buildTablePartitions(mode, queryMode, tables, dialect, config.getOffsetSuffix(), config.getTopicPrefix());
// step 2 ----- get last time offset
Map<RecordPartition, RecordOffset> offsets = null;
if (partitionsByTableFqn != null) {
offsets = context.offsetStorageReader().readOffsets(partitionsByTableFqn.values());
}
// step 3 ----- compute offset init value
List<String> tablesOrQuery = queryMode == QueryMode.QUERY ? Collections.singletonList(query) : tables;
return initOffsetValues(
cachedConnectionProvider,
dialect, queryMode,
partitionsByTableFqn,
offsets,
config,
tablesOrQuery
);
} | 3.68 |
flink_SegmentsUtil_find | /**
* Find equal segments2 in segments1.
*
* @param segments1 segs to find.
* @param segments2 sub segs.
* @return Return the found offset, return -1 if not find.
*/
public static int find(
MemorySegment[] segments1,
int offset1,
int numBytes1,
MemorySegment[] segments2,
int offset2,
int numBytes2) {
if (numBytes2 == 0) { // quick way 1.
return offset1;
}
if (inFirstSegment(segments1, offset1, numBytes1)
&& inFirstSegment(segments2, offset2, numBytes2)) {
byte first = segments2[0].get(offset2);
int end = numBytes1 - numBytes2 + offset1;
for (int i = offset1; i <= end; i++) {
// quick way 2: equal first byte.
if (segments1[0].get(i) == first
&& segments1[0].equalTo(segments2[0], i, offset2, numBytes2)) {
return i;
}
}
return -1;
} else {
return findInMultiSegments(
segments1, offset1, numBytes1, segments2, offset2, numBytes2);
}
} | 3.68 |
hmily_InventoryServiceImpl_cancelMethod | /**
* Cancel method boolean.
*
* @param inventoryDTO the inventory dto
* @return the boolean
*/
public Boolean cancelMethod(InventoryDTO inventoryDTO) {
LOGGER.info("==========调用扣减库存取消方法===========");
inventoryMapper.cancel(inventoryDTO);
return true;
} | 3.68 |
flink_StateTtlConfig_cleanupIncrementally | /**
* Cleanup expired state incrementally cleanup local state.
*
* <p>Upon every state access this cleanup strategy checks a bunch of state keys for
* expiration and cleans up expired ones. It keeps a lazy iterator through all keys with
* relaxed consistency if backend supports it. This way all keys should be regularly checked
* and cleaned eventually over time if any state is constantly being accessed.
*
* <p>Additionally to the incremental cleanup upon state access, it can also run per every
* record. Caution: if there are a lot of registered states using this option, they all will
* be iterated for every record to check if there is something to cleanup.
*
* <p>Note: if no access happens to this state or no records are processed in case of {@code
* runCleanupForEveryRecord}, expired state will persist.
*
* <p>Note: Time spent for the incremental cleanup increases record processing latency.
*
* <p>Note: At the moment incremental cleanup is implemented only for Heap state backend.
* Setting it for RocksDB will have no effect.
*
* <p>Note: If heap state backend is used with synchronous snapshotting, the global iterator
* keeps a copy of all keys while iterating because of its specific implementation which
* does not support concurrent modifications. Enabling of this feature will increase memory
* consumption then. Asynchronous snapshotting does not have this problem.
*
* @param cleanupSize max number of keys pulled from queue for clean up upon state touch for
* any key
* @param runCleanupForEveryRecord run incremental cleanup per each processed record
*/
@Nonnull
public Builder cleanupIncrementally(
@Nonnegative int cleanupSize, boolean runCleanupForEveryRecord) {
strategies.put(
CleanupStrategies.Strategies.INCREMENTAL_CLEANUP,
new IncrementalCleanupStrategy(cleanupSize, runCleanupForEveryRecord));
return this;
} | 3.68 |
framework_BrowserInfo_getBrowserMinorVersion | /**
* Returns the browser minor version e.g., 5 for Firefox 3.5.
*
* @see #getBrowserMajorVersion()
*
* @return The minor version of the browser, or -1 if not known/parsed.
*/
public int getBrowserMinorVersion() {
return browserDetails.getBrowserMinorVersion();
} | 3.68 |
hbase_AbstractByteRange_isEmpty | /** Returns true when {@code range} is of zero length, false otherwise. */
public static boolean isEmpty(ByteRange range) {
return range == null || range.getLength() == 0;
} | 3.68 |
hbase_Table_incrementColumnValue | /**
* Atomically increments a column value. If the column value already exists and is not a
* big-endian long, this could throw an exception. If the column value does not yet exist it is
* initialized to <code>amount</code> and written to the specified column.
* <p>
* Setting durability to {@link Durability#SKIP_WAL} means that in a fail scenario you will lose
* any increments that have not been flushed.
* @param row The row that contains the cell to increment.
* @param family The column family of the cell to increment.
* @param qualifier The column qualifier of the cell to increment.
* @param amount The amount to increment the cell with (or decrement, if the amount is
* negative).
* @param durability The persistence guarantee for this increment.
* @return The new value, post increment.
* @throws IOException if a remote or network exception occurs.
*/
default long incrementColumnValue(byte[] row, byte[] family, byte[] qualifier, long amount,
Durability durability) throws IOException {
Increment increment =
new Increment(row).addColumn(family, qualifier, amount).setDurability(durability);
Cell cell = increment(increment).getColumnLatestCell(family, qualifier);
return Bytes.toLong(cell.getValueArray(), cell.getValueOffset(), cell.getValueLength());
} | 3.68 |
dubbo_AbstractServiceBuilder_preferSerialization | /**
* The prefer serialization type
*
* @param preferSerialization prefer serialization type
* @return {@link B}
*/
public B preferSerialization(String preferSerialization) {
this.preferSerialization = preferSerialization;
return getThis();
} | 3.68 |
cron-utils_CronDefinitionBuilder_withYear | /**
* Adds definition for year field.
*
* @return new FieldDefinitionBuilder instance
*/
public FieldDefinitionBuilder withYear() {
return new FieldDefinitionBuilder(this, CronFieldName.YEAR);
} | 3.68 |
framework_GridElement_getDetails | /**
* Gets the element that contains the details of a row.
*
* @since 8.0
* @param rowIndex
* the index of the row for the details
* @return the element that contains the details of a row. <code>null</code>
* if no widget is defined for the details row
* @throws NoSuchElementException
* if the given details row is currently not open
*/
public TestBenchElement getDetails(int rowIndex)
throws NoSuchElementException {
return getSubPart("#details[" + rowIndex + "]");
} | 3.68 |
hudi_DagScheduler_execute | /**
* Method to start executing the nodes in workflow DAGs.
*
* @param service ExecutorService
* @param workflowDag instance of workflow dag that needs to be executed
* @throws Exception will be thrown if ant error occurred
*/
private void execute(ExecutorService service, WorkflowDag workflowDag) throws Exception {
// Nodes at the same level are executed in parallel
log.info("Running workloads");
List<DagNode> nodes = workflowDag.getNodeList();
int curRound = 1;
do {
log.warn("===================================================================");
log.warn("Running workloads for round num " + curRound);
log.warn("===================================================================");
Queue<DagNode> queue = new PriorityQueue<>();
for (DagNode dagNode : nodes) {
queue.add(dagNode.clone());
}
do {
List<Future> futures = new ArrayList<>();
Set<DagNode> childNodes = new HashSet<>();
while (queue.size() > 0) {
DagNode nodeToExecute = queue.poll();
log.warn("Executing node \"" + nodeToExecute.getConfig().getOtherConfigs().get(CONFIG_NAME) + "\" :: " + nodeToExecute.getConfig());
int finalCurRound = curRound;
futures.add(service.submit(() -> executeNode(nodeToExecute, finalCurRound)));
if (nodeToExecute.getChildNodes().size() > 0) {
childNodes.addAll(nodeToExecute.getChildNodes());
}
}
queue.addAll(childNodes);
childNodes.clear();
for (Future future : futures) {
future.get(1, TimeUnit.HOURS);
}
} while (queue.size() > 0);
log.info("Finished workloads for round num " + curRound);
if (curRound < workflowDag.getRounds()) {
new DelayNode(workflowDag.getIntermittentDelayMins()).execute(executionContext, curRound);
}
} while (curRound++ < workflowDag.getRounds());
log.info("Finished workloads");
} | 3.68 |
Activiti_TablePage_getTotal | /**
* @return the total rowcount of the table from which this page is only a subset.
*/
public long getTotal() {
return total;
} | 3.68 |
hadoop_RMWebAppUtil_createCredentials | /**
* Generate a Credentials object from the information in the CredentialsInfo
* object.
*
* @param credentials the CredentialsInfo provided by the user.
* @return
*/
private static Credentials createCredentials(CredentialsInfo credentials) {
Credentials ret = new Credentials();
try {
for (Map.Entry<String, String> entry : credentials.getTokens()
.entrySet()) {
Text alias = new Text(entry.getKey());
Token<TokenIdentifier> token = new Token<TokenIdentifier>();
token.decodeFromUrlString(entry.getValue());
ret.addToken(alias, token);
}
for (Map.Entry<String, String> entry : credentials.getSecrets()
.entrySet()) {
Text alias = new Text(entry.getKey());
Base64 decoder = new Base64(0, null, true);
byte[] secret = decoder.decode(entry.getValue());
ret.addSecretKey(alias, secret);
}
} catch (IOException ie) {
throw new BadRequestException(
"Could not parse credentials data; exception message = "
+ ie.getMessage());
}
return ret;
} | 3.68 |
graphhopper_InstructionsOutgoingEdges_getVisibleTurns | /**
* This method calculates the number of all outgoing edges, which could be considered the number of roads you see
* at the intersection. This excludes the road you are coming from and also inaccessible roads.
*/
public int getVisibleTurns() {
return 1 + visibleAlternativeTurns.size();
} | 3.68 |
hudi_HoodieTableMetadataUtil_isFilesPartitionAvailable | /**
* Returns whether the files partition of metadata table is ready for read.
*
* @param metaClient {@link HoodieTableMetaClient} instance.
* @return true if the files partition of metadata table is ready for read,
* based on the table config; false otherwise.
*/
public static boolean isFilesPartitionAvailable(HoodieTableMetaClient metaClient) {
return metaClient.getTableConfig().getMetadataPartitions()
.contains(HoodieTableMetadataUtil.PARTITION_NAME_FILES);
} | 3.68 |
pulsar_BrokerInterceptor_consumerClosed | /**
* Called by the broker when a consumer is closed.
*
* @param cnx client Connection
* @param consumer Consumer object
* @param metadata A map of metadata
*/
default void consumerClosed(ServerCnx cnx,
Consumer consumer,
Map<String, String> metadata) {
} | 3.68 |
hadoop_BCFile_getCompressedSize | /**
* Get the compressed size of the block.
*
* @return compressed size of the block.
*/
public long getCompressedSize() {
return rBlkState.getBlockRegion().getCompressedSize();
} | 3.68 |
druid_DruidAbstractDataSource_setQueryTimeout | /**
* Sets the number of seconds the driver will wait for a <code>Statement</code> object to execute to the given
* number of seconds. If the limit is exceeded, an <code>SQLException</code> is thrown. A JDBC driver must apply
* this limit to the <code>execute</code>, <code>executeQuery</code> and <code>executeUpdate</code> methods. JDBC
* driver implementations may also apply this limit to <code>ResultSet</code> methods (consult your driver vendor
* documentation for details).
*
* @param seconds the new query timeout limit in seconds; zero means there is no limit
* @see #getQueryTimeout
*/
public void setQueryTimeout(int seconds) {
this.queryTimeout = seconds;
} | 3.68 |
pulsar_CmdProduce_run | /**
* Run the producer.
*
* @return 0 for success, < 0 otherwise
* @throws Exception
*/
public int run() throws PulsarClientException {
if (mainOptions.size() != 1) {
throw (new ParameterException("Please provide one and only one topic name."));
}
if (this.numTimesProduce <= 0) {
throw (new ParameterException("Number of times need to be positive number."));
}
if (messages.size() > 0){
messages = messages.stream().map(str -> str.split(separator)).flatMap(Stream::of).toList();
}
if (messages.size() == 0 && messageFileNames.size() == 0) {
throw (new ParameterException("Please supply message content with either --messages or --files"));
}
if (keyValueEncodingType == null) {
keyValueEncodingType = KEY_VALUE_ENCODING_TYPE_NOT_SET;
} else {
switch (keyValueEncodingType) {
case KEY_VALUE_ENCODING_TYPE_SEPARATED:
case KEY_VALUE_ENCODING_TYPE_INLINE:
break;
default:
throw (new ParameterException("--key-value-encoding-type "
+ keyValueEncodingType + " is not valid, only 'separated' or 'inline'"));
}
}
int totalMessages = (messages.size() + messageFileNames.size()) * numTimesProduce;
if (totalMessages > MAX_MESSAGES) {
String msg = "Attempting to send " + totalMessages + " messages. Please do not send more than "
+ MAX_MESSAGES + " messages";
throw new ParameterException(msg);
}
String topic = this.mainOptions.get(0);
if (this.serviceURL.startsWith("ws")) {
return publishToWebSocket(topic);
} else {
return publish(topic);
}
} | 3.68 |
flink_AbstractBlockResettableIterator_close | /**
* This method closes the iterator and releases all resources. This method works both as a
* regular shutdown and as a canceling method. The method may be called multiple times and will
* not produce an error.
*/
public void close() {
synchronized (this) {
if (this.closed) {
return;
}
this.closed = true;
}
this.numRecordsInBuffer = 0;
this.numRecordsReturned = 0;
// add the full segments to the empty ones
for (int i = this.fullSegments.size() - 1; i >= 0; i--) {
this.emptySegments.add(this.fullSegments.remove(i));
}
// release the memory segment
this.memoryManager.release(this.emptySegments);
this.emptySegments.clear();
if (LOG.isDebugEnabled()) {
LOG.debug("Block Resettable Iterator closed.");
}
} | 3.68 |
querydsl_GeometryExpression_distanceSpheroid | // TODO maybe move out
public NumberExpression<Double> distanceSpheroid(Expression<? extends Geometry> geometry) {
return Expressions.numberOperation(Double.class, SpatialOps.DISTANCE_SPHEROID, mixin, geometry);
} | 3.68 |
flink_DiskCacheManager_release | /**
* Release this {@link DiskCacheManager}, it means all memory taken by this class will recycle.
*/
void release() {
Arrays.stream(subpartitionCacheManagers).forEach(SubpartitionDiskCacheManager::release);
partitionFileWriter.release();
} | 3.68 |
hadoop_S3AReadOpContext_withReadahead | /**
* Set builder value.
* @param value new value
* @return the builder
*/
public S3AReadOpContext withReadahead(final long value) {
readahead = value;
return this;
} | 3.68 |
flink_TimestampStringUtils_toLocalDateTime | /** Convert a calcite's {@link TimestampString} to a {@link LocalDateTime}. */
public static LocalDateTime toLocalDateTime(TimestampString timestampString) {
final String v = timestampString.toString();
final int year = Integer.parseInt(v.substring(0, 4));
final int month = Integer.parseInt(v.substring(5, 7));
final int day = Integer.parseInt(v.substring(8, 10));
final int h = Integer.parseInt(v.substring(11, 13));
final int m = Integer.parseInt(v.substring(14, 16));
final int s = Integer.parseInt(v.substring(17, 19));
final int nano = getNanosInSecond(v);
return LocalDateTime.of(year, month, day, h, m, s, nano);
} | 3.68 |
dubbo_StringUtils_encodeParameters | /**
* Encode parameters map to string, like '[{a:b},{c:d}]'
*
* @param params
* @return
*/
public static String encodeParameters(Map<String, String> params) {
if (params == null || params.isEmpty()) {
return null;
}
StringBuilder sb = new StringBuilder();
sb.append('[');
params.forEach((key, value) -> {
// {key:value},
if (hasText(value)) {
sb.append('{').append(key).append(':').append(value).append("},");
}
});
// delete last separator ','
if (sb.charAt(sb.length() - 1) == ',') {
sb.deleteCharAt(sb.length() - 1);
}
sb.append(']');
return sb.toString();
} | 3.68 |
hadoop_AzureBlobFileSystem_createResilientCommitSupport | /**
* Private method to create resilient commit support.
* @return a new instance
* @param path destination path
* @throws IOException problem probing store capabilities
* @throws UnsupportedOperationException if the store lacks this support
*/
@InterfaceAudience.Private
public ResilientCommitByRename createResilientCommitSupport(final Path path)
throws IOException {
if (!hasPathCapability(path,
CommonPathCapabilities.ETAGS_PRESERVED_IN_RENAME)) {
throw new UnsupportedOperationException(
"Resilient commit support not available for " + path);
}
return new ResilientCommitByRenameImpl();
} | 3.68 |
flink_KubernetesUtils_resolveUserDefinedValue | /**
* Resolve the user defined value with the precedence. First an explicit config option value is
* taken, then the value in pod template and at last the default value of a config option if
* nothing is specified.
*
* @param flinkConfig flink configuration
* @param configOption the config option to define the Kubernetes fields
* @param valueOfConfigOptionOrDefault the value defined by explicit config option or default
* @param valueOfPodTemplate the value defined in the pod template
* @param fieldDescription Kubernetes fields description
* @param <T> The type of value associated with the configuration option.
* @return the resolved value
*/
public static <T> String resolveUserDefinedValue(
Configuration flinkConfig,
ConfigOption<T> configOption,
String valueOfConfigOptionOrDefault,
@Nullable String valueOfPodTemplate,
String fieldDescription) {
final String resolvedValue;
if (valueOfPodTemplate != null) {
// The config option is explicitly set.
if (flinkConfig.contains(configOption)) {
resolvedValue = valueOfConfigOptionOrDefault;
LOG.info(
"The {} configured in pod template will be overwritten to '{}' "
+ "because of explicitly configured options.",
fieldDescription,
resolvedValue);
} else {
resolvedValue = valueOfPodTemplate;
}
} else {
resolvedValue = valueOfConfigOptionOrDefault;
}
return resolvedValue;
} | 3.68 |
hbase_CheckAndMutate_ifEquals | /**
* Check for equality
* @param family family to check
* @param qualifier qualifier to check
* @param value the expected value
* @return the CheckAndMutate object
*/
public Builder ifEquals(byte[] family, byte[] qualifier, byte[] value) {
return ifMatches(family, qualifier, CompareOperator.EQUAL, value);
} | 3.68 |
hbase_AbstractClientScanner_getScanMetrics | /**
* Used internally accumulating metrics on scan. To enable collection of metrics on a Scanner,
* call {@link Scan#setScanMetricsEnabled(boolean)}.
* @return Returns the running {@link ScanMetrics} instance or null if scan metrics not enabled.
*/
@Override
public ScanMetrics getScanMetrics() {
return scanMetrics;
} | 3.68 |
hadoop_XMLUtils_newSecureSAXTransformerFactory | /**
* This method should be used if you need a {@link SAXTransformerFactory}. Use this method
* instead of {@link SAXTransformerFactory#newInstance()}. The factory that is returned has
* secure configuration enabled.
*
* @return a {@link SAXTransformerFactory} with secure configuration enabled
* @throws TransformerConfigurationException if the {@code JAXP} transformer does not
* support the secure configuration
*/
public static SAXTransformerFactory newSecureSAXTransformerFactory()
throws TransformerConfigurationException {
SAXTransformerFactory trfactory = (SAXTransformerFactory) SAXTransformerFactory.newInstance();
trfactory.setFeature(XMLConstants.FEATURE_SECURE_PROCESSING, true);
setOptionalSecureTransformerAttributes(trfactory);
return trfactory;
} | 3.68 |
graphhopper_RAMDataAccess_store | /**
* @param store true if in-memory data should be saved when calling flush
*/
public RAMDataAccess store(boolean store) {
this.store = store;
return this;
} | 3.68 |
hbase_MutableRegionInfo_hashCode | /**
* @see Object#hashCode()
*/
@Override
public int hashCode() {
return this.hashCode;
} | 3.68 |
hbase_RequestConverter_buildGetTableDescriptorsRequest | /**
* Creates a protocol buffer GetTableDescriptorsRequest for a single table
* @param tableName the table name
* @return a GetTableDescriptorsRequest
*/
public static GetTableDescriptorsRequest
buildGetTableDescriptorsRequest(final TableName tableName) {
return GetTableDescriptorsRequest.newBuilder()
.addTableNames(ProtobufUtil.toProtoTableName(tableName)).build();
} | 3.68 |
hadoop_Duration_close | /**
* The close operation relays to {@link #finish()}.
* Implementing it allows Duration instances to be automatically
* finish()'d in Java7 try blocks for when used in measuring durations.
*/
@Override
public final void close() {
finish();
} | 3.68 |
flink_DataStream_collectAsync | /**
* Sets up the collection of the elements in this {@link DataStream}, which can be retrieved
* later via the given {@link Collector}.
*
* <p>Caution: When multiple streams are being collected it is recommended to consume all
* streams in parallel to not back-pressure the job.
*
* <p>Caution: Closing the iterator from the collector cancels the job! It is recommended to
* close all iterators once you are no longer interested in any of the collected streams.
*
* <p>This method is functionally equivalent to {@link #collectAsync()}.
*
* <p>This method is meant to support use-cases where the application of a sink is done via a
* {@code Consumer<DataStream<T>>}, where it wouldn't be possible (or inconvenient) to return an
* iterator.
*
* @param collector a collector that can be used to retrieve the elements
*/
@Experimental
public void collectAsync(Collector<T> collector) {
TypeSerializer<T> serializer =
getType().createSerializer(getExecutionEnvironment().getConfig());
String accumulatorName = "dataStreamCollect_" + UUID.randomUUID().toString();
StreamExecutionEnvironment env = getExecutionEnvironment();
CollectSinkOperatorFactory<T> factory =
new CollectSinkOperatorFactory<>(serializer, accumulatorName);
CollectSinkOperator<T> operator = (CollectSinkOperator<T>) factory.getOperator();
long resultFetchTimeout =
env.getConfiguration().get(AkkaOptions.ASK_TIMEOUT_DURATION).toMillis();
CollectResultIterator<T> iterator =
new CollectResultIterator<>(
operator.getOperatorIdFuture(),
serializer,
accumulatorName,
env.getCheckpointConfig(),
resultFetchTimeout);
CollectStreamSink<T> sink = new CollectStreamSink<>(this, factory);
sink.name("Data stream collect sink");
env.addOperator(sink.getTransformation());
env.registerCollectIterator(iterator);
collector.setIterator(iterator);
} | 3.68 |
framework_AbstractClickEventHandler_shouldFireEvent | /**
* Called before firing a click event. Allows sub classes to decide if this
* in an event that should cause an event or not.
*
* @param event
* The user event
* @return true if the event should be fired, false otherwise
*/
protected boolean shouldFireEvent(DomEvent<?> event) {
return true;
} | 3.68 |
flink_MasterTriggerRestoreHook_close | /**
* Tear-down method for the hook.
*
* @throws Exception Exceptions encountered when calling close will be logged.
*/
default void close() throws Exception {} | 3.68 |
flink_ResolvedSchema_getPrimaryKeyIndexes | /** Returns the primary key indexes, if any, otherwise returns an empty array. */
public int[] getPrimaryKeyIndexes() {
final List<String> columns = getColumnNames();
return getPrimaryKey()
.map(UniqueConstraint::getColumns)
.map(pkColumns -> pkColumns.stream().mapToInt(columns::indexOf).toArray())
.orElseGet(() -> new int[] {});
} | 3.68 |
framework_LayoutManager_getBorderLeft | /**
* Gets the left border of the given element, provided that it has been
* measured. These elements are guaranteed to be measured:
* <ul>
* <li>ManagedLayouts and their child Connectors
* <li>Elements for which there is at least one ElementResizeListener
* <li>Elements for which at least one ManagedLayout has registered a
* dependency
* </ul>
*
* A negative number is returned if the element has not been measured. If 0
* is returned, it might indicate that the element is not attached to the
* DOM.
*
* @param element
* the element to get the measured size for
* @return the measured left border of the element in pixels.
*/
public int getBorderLeft(Element element) {
assert needsMeasure(
element) : "Getting measurement for element that is not measured";
return getMeasuredSize(element, nullSize).getBorderLeft();
} | 3.68 |
morf_HumanReadableStatementProducer_addColumn | /**
* @see org.alfasoftware.morf.upgrade.SchemaEditor#addColumn(java.lang.String, org.alfasoftware.morf.metadata.Column)
*/
@Override
public void addColumn(String tableName, Column definition) {
consumer.schemaChange(HumanReadableStatementHelper.generateAddColumnString(tableName, definition));
} | 3.68 |
flink_RocksDBOperationUtils_createStateInfo | /**
* Creates a state info from a new meta info to use with a k/v state.
*
* <p>Creates the column family for the state. Sets TTL compaction filter if {@code
* ttlCompactFiltersManager} is not {@code null}.
*/
public static RocksDBKeyedStateBackend.RocksDbKvStateInfo createStateInfo(
RegisteredStateMetaInfoBase metaInfoBase,
RocksDB db,
Function<String, ColumnFamilyOptions> columnFamilyOptionsFactory,
@Nullable RocksDbTtlCompactFiltersManager ttlCompactFiltersManager,
@Nullable Long writeBufferManagerCapacity) {
ColumnFamilyDescriptor columnFamilyDescriptor =
createColumnFamilyDescriptor(
metaInfoBase,
columnFamilyOptionsFactory,
ttlCompactFiltersManager,
writeBufferManagerCapacity);
return new RocksDBKeyedStateBackend.RocksDbKvStateInfo(
createColumnFamily(columnFamilyDescriptor, db), metaInfoBase);
} | 3.68 |
flink_DataStream_executeAndCollect | /**
* Triggers the distributed execution of the streaming dataflow and returns an iterator over the
* elements of the given DataStream.
*
* <p>The DataStream application is executed in the regular distributed manner on the target
* environment, and the events from the stream are polled back to this application process and
* thread through Flink's REST API.
*/
public List<T> executeAndCollect(String jobExecutionName, int limit) throws Exception {
Preconditions.checkState(limit > 0, "Limit must be greater than 0");
try (ClientAndIterator<T> clientAndIterator =
executeAndCollectWithClient(jobExecutionName)) {
List<T> results = new ArrayList<>(limit);
while (limit > 0 && clientAndIterator.iterator.hasNext()) {
results.add(clientAndIterator.iterator.next());
limit--;
}
return results;
}
} | 3.68 |
hadoop_TypedBytesInput_readString | /**
* Reads the string following a <code>Type.STRING</code> code.
* @return the obtained string
* @throws IOException
*/
public String readString() throws IOException {
return WritableUtils.readString(in);
} | 3.68 |
framework_ContainerEventProvider_setContainerDataSource | /**
* Set the container data source.
*
* @param container
* The container to use as datasource
*
*/
public void setContainerDataSource(Container.Indexed container) {
// Detach the previous container
detachContainerDataSource();
this.container = container;
listenToContainerEvents();
} | 3.68 |
morf_SqlServerDialect_getSqlForDateToYyyymmdd | /**
* @see org.alfasoftware.morf.jdbc.SqlDialect#getSqlForDateToYyyymmdd(org.alfasoftware.morf.sql.element.Function)
*/
@Override
protected String getSqlForDateToYyyymmdd(Function function) {
return String.format("CONVERT(VARCHAR(8),%s, 112)", getSqlFrom(function.getArguments().get(0)));
} | 3.68 |
hbase_TagUtil_readVIntValuePart | /**
* Reads an int value stored as a VInt at tag's given offset.
* @param tag The Tag
* @param offset The offset where VInt bytes begin
* @return A pair of the int value and number of bytes taken to store VInt
* @throws IOException When varint is malformed and not able to be read correctly
*/
public static Pair<Integer, Integer> readVIntValuePart(Tag tag, int offset) throws IOException {
if (tag.hasArray()) {
return StreamUtils.readRawVarint32(tag.getValueArray(), offset);
}
return StreamUtils.readRawVarint32(tag.getValueByteBuffer(), offset);
} | 3.68 |
hadoop_MountTableStore_updateCacheAllRouters | /**
* Update mount table cache of this router as well as all other routers.
*/
protected void updateCacheAllRouters() {
if (refreshService != null) {
try {
refreshService.refresh();
} catch (StateStoreUnavailableException e) {
LOG.error("Cannot refresh mount table: state store not available", e);
}
}
} | 3.68 |
pulsar_FunctionMetaDataManager_acquireExclusiveWrite | /**
* Acquires a exclusive producer. This method cannot return null. It can only return a valid exclusive producer
* or throw NotLeaderAnymore exception.
* @param isLeader if the worker is still the leader
* @return A valid exclusive producer
* @throws WorkerUtils.NotLeaderAnymore if the worker is no longer the leader.
*/
public Producer<byte[]> acquireExclusiveWrite(Supplier<Boolean> isLeader) throws WorkerUtils.NotLeaderAnymore {
// creates exclusive producer for metadata topic
return WorkerUtils.createExclusiveProducerWithRetry(
pulsarClient,
workerConfig.getFunctionMetadataTopic(),
workerConfig.getWorkerId() + "-leader",
isLeader, 1000);
} | 3.68 |
flink_ResourceManager_registerJobMasterInternal | /**
* Registers a new JobMaster.
*
* @param jobMasterGateway to communicate with the registering JobMaster
* @param jobId of the job for which the JobMaster is responsible
* @param jobManagerAddress address of the JobMaster
* @param jobManagerResourceId ResourceID of the JobMaster
* @return RegistrationResponse
*/
private RegistrationResponse registerJobMasterInternal(
final JobMasterGateway jobMasterGateway,
JobID jobId,
String jobManagerAddress,
ResourceID jobManagerResourceId) {
if (jobManagerRegistrations.containsKey(jobId)) {
JobManagerRegistration oldJobManagerRegistration = jobManagerRegistrations.get(jobId);
if (Objects.equals(
oldJobManagerRegistration.getJobMasterId(),
jobMasterGateway.getFencingToken())) {
// same registration
log.debug(
"Job manager {}@{} was already registered.",
jobMasterGateway.getFencingToken(),
jobManagerAddress);
} else {
// tell old job manager that he is no longer the job leader
closeJobManagerConnection(
oldJobManagerRegistration.getJobID(),
ResourceRequirementHandling.RETAIN,
new Exception("New job leader for job " + jobId + " found."));
JobManagerRegistration jobManagerRegistration =
new JobManagerRegistration(jobId, jobManagerResourceId, jobMasterGateway);
jobManagerRegistrations.put(jobId, jobManagerRegistration);
jmResourceIdRegistrations.put(jobManagerResourceId, jobManagerRegistration);
blocklistHandler.registerBlocklistListener(jobMasterGateway);
}
} else {
// new registration for the job
JobManagerRegistration jobManagerRegistration =
new JobManagerRegistration(jobId, jobManagerResourceId, jobMasterGateway);
jobManagerRegistrations.put(jobId, jobManagerRegistration);
jmResourceIdRegistrations.put(jobManagerResourceId, jobManagerRegistration);
blocklistHandler.registerBlocklistListener(jobMasterGateway);
}
log.info(
"Registered job manager {}@{} for job {}.",
jobMasterGateway.getFencingToken(),
jobManagerAddress,
jobId);
jobManagerHeartbeatManager.monitorTarget(
jobManagerResourceId, new JobMasterHeartbeatSender(jobMasterGateway));
return new JobMasterRegistrationSuccess(getFencingToken(), resourceId);
} | 3.68 |
framework_VaadinService_criticalNotification | /**
* @deprecated As of 7.0. Will likely change or be removed in a future
* version
*/
@Deprecated
public void criticalNotification(VaadinRequest request,
VaadinResponse response, String caption, String message,
String details, String url) throws IOException {
writeUncachedStringResponse(response, JsonConstants.JSON_CONTENT_TYPE,
createCriticalNotificationJSON(caption, message, details, url));
} | 3.68 |
flink_Tumble_over | /**
* Creates a tumbling window. Tumbling windows are fixed-size, consecutive, non-overlapping
* windows of a specified fixed length. For example, a tumbling window of 5 minutes size groups
* elements in 5 minutes intervals.
*
* @param size the size of the window as time or row-count interval.
* @return a partially defined tumbling window
*/
public static TumbleWithSize over(Expression size) {
return new TumbleWithSize(size);
} | 3.68 |
flink_Costs_setHeuristicDiskCost | /**
* Sets the heuristic costs for disk for this Costs object.
*
* @param cost The heuristic disk cost to set.
*/
public void setHeuristicDiskCost(double cost) {
if (cost <= 0) {
throw new IllegalArgumentException("Heuristic costs must be positive.");
}
this.heuristicDiskCost = cost;
} | 3.68 |
flink_DataSourceTask_initOutputs | /**
* Creates a writer for each output. Creates an OutputCollector which forwards its input to all
* writers. The output collector applies the configured shipping strategy.
*/
private void initOutputs(UserCodeClassLoader cl) throws Exception {
this.chainedTasks = new ArrayList<ChainedDriver<?, ?>>();
this.eventualOutputs = new ArrayList<RecordWriter<?>>();
this.output =
BatchTask.initOutputs(
this,
cl,
this.config,
this.chainedTasks,
this.eventualOutputs,
getExecutionConfig(),
getEnvironment().getAccumulatorRegistry().getUserMap());
} | 3.68 |
MagicPlugin_ModernMythicMobManager_getActiveMob | // Not in the API...
@SuppressWarnings("unchecked")
public Optional<ActiveMob> getActiveMob(UUID id) {
try {
MobManager manager = api.getMobManager();
Method getActiveMobMethod = manager.getClass().getMethod("getActiveMob", UUID.class);
if (getActiveMobMethod != null) {
return (Optional<ActiveMob>)getActiveMobMethod.invoke(manager, id);
} else {
controller.getLogger().warning("MythicMobs integration has gone wrong, disabling");
api = null;
}
} catch (Exception ex) {
controller.getLogger().warning("MythicMobs integration has gone wrong, disabling");
api = null;
}
return Optional.empty();
} | 3.68 |
hmily_HmilyRepositoryFacade_updateHmilyParticipantUndoStatus | /**
* Update hmily participant undo status.
*
* @param undoId the undo id
* @param status the status
*/
public void updateHmilyParticipantUndoStatus(final Long undoId, final Integer status) {
checkRows(hmilyRepository.updateHmilyParticipantUndoStatus(undoId, status));
} | 3.68 |
framework_WebBrowser_isMacOSX | /**
* Tests whether the user is using Mac OS X.
*
* @return true if the user is using Mac OS X, false if the user is not
* using Mac OS X or if no information on the browser is present
*/
public boolean isMacOSX() {
return browserDetails.isMacOSX();
} | 3.68 |
framework_VUIDLBrowser_getConnector | /**
* Returns the Connector associated with this state change.
*/
protected ServerConnector getConnector() {
return client.getConnectorMap().getConnector(getConnectorId());
} | 3.68 |
hadoop_ReplicaInfo_getFileIoProvider | /**
* Get the {@link FileIoProvider} for disk IO operations.
*/
public FileIoProvider getFileIoProvider() {
// In tests and when invoked via FsDatasetUtil#computeChecksum, the
// target volume for this replica may be unknown and hence null.
// Use the DEFAULT_FILE_IO_PROVIDER with no-op hooks.
return (volume != null) ? volume.getFileIoProvider()
: DEFAULT_FILE_IO_PROVIDER;
} | 3.68 |
hadoop_RecoverPausedContainerLaunch_call | /**
* Cleanup the paused container by issuing a kill on it.
*/
@SuppressWarnings("unchecked")
@Override
public Integer call() {
int retCode = ContainerExecutor.ExitCode.LOST.getExitCode();
ContainerId containerId = container.getContainerId();
String appIdStr =
containerId.getApplicationAttemptId().getApplicationId().toString();
String containerIdStr = containerId.toString();
dispatcher.getEventHandler().handle(new ContainerEvent(containerId,
ContainerEventType.RECOVER_PAUSED_CONTAINER));
boolean interrupted = false;
try {
File pidFile = locatePidFile(appIdStr, containerIdStr);
if (pidFile != null) {
String pidPathStr = pidFile.getPath();
pidFilePath = new Path(pidPathStr);
exec.activateContainer(containerId, pidFilePath);
retCode = exec.reacquireContainer(
new ContainerReacquisitionContext.Builder()
.setContainer(container)
.setUser(container.getUser())
.setContainerId(containerId)
.build());
} else {
LOG.warn("Unable to locate pid file for container " + containerIdStr);
}
} catch (InterruptedException | InterruptedIOException e) {
LOG.warn("Interrupted while waiting for exit code from " + containerId);
interrupted = true;
} catch (IOException e) {
LOG.error("Unable to kill the paused container " + containerIdStr, e);
} finally {
if (!interrupted) {
this.completed.set(true);
exec.deactivateContainer(containerId);
try {
getContext().getNMStateStore()
.storeContainerCompleted(containerId, retCode);
} catch (IOException e) {
LOG.error("Unable to set exit code for container " + containerId);
}
}
}
if (retCode != 0) {
LOG.warn("Recovered container exited with a non-zero exit code "
+ retCode);
this.dispatcher.getEventHandler().handle(new ContainerExitEvent(
containerId,
ContainerEventType.CONTAINER_EXITED_WITH_FAILURE, retCode,
"Container exited with a non-zero exit code " + retCode));
return retCode;
}
LOG.info("Recovered container " + containerId + " succeeded");
dispatcher.getEventHandler().handle(
new ContainerEvent(containerId,
ContainerEventType.CONTAINER_EXITED_WITH_SUCCESS));
return 0;
} | 3.68 |
hbase_TableOverAsyncTable_getKeysAndRegionsInRange | /**
* Get the corresponding start keys and regions for an arbitrary range of keys.
* <p>
* @param startKey Starting row in range, inclusive
* @param endKey Ending row in range
* @param includeEndKey true if endRow is inclusive, false if exclusive
* @param reload true to reload information or false to use cached information
* @return A pair of list of start keys and list of HRegionLocations that contain the specified
* range
* @throws IOException if a remote or network exception occurs
*/
private Pair<List<byte[]>, List<HRegionLocation>> getKeysAndRegionsInRange(final byte[] startKey,
final byte[] endKey, final boolean includeEndKey, final boolean reload) throws IOException {
final boolean endKeyIsEndOfTable = Bytes.equals(endKey, HConstants.EMPTY_END_ROW);
if ((Bytes.compareTo(startKey, endKey) > 0) && !endKeyIsEndOfTable) {
throw new IllegalArgumentException(
"Invalid range: " + Bytes.toStringBinary(startKey) + " > " + Bytes.toStringBinary(endKey));
}
List<byte[]> keysInRange = new ArrayList<>();
List<HRegionLocation> regionsInRange = new ArrayList<>();
byte[] currentKey = startKey;
do {
HRegionLocation regionLocation =
FutureUtils.get(conn.getRegionLocator(getName()).getRegionLocation(currentKey, reload));
keysInRange.add(currentKey);
regionsInRange.add(regionLocation);
currentKey = regionLocation.getRegion().getEndKey();
} while (
!Bytes.equals(currentKey, HConstants.EMPTY_END_ROW)
&& (endKeyIsEndOfTable || Bytes.compareTo(currentKey, endKey) < 0
|| (includeEndKey && Bytes.compareTo(currentKey, endKey) == 0))
);
return new Pair<>(keysInRange, regionsInRange);
} | 3.68 |
hbase_TableName_isLegalNamespaceName | /**
* Valid namespace characters are alphabetic characters, numbers, and underscores.
*/
public static void isLegalNamespaceName(final byte[] namespaceName, final int start,
final int end) {
if (end - start < 1) {
throw new IllegalArgumentException("Namespace name must not be empty");
}
String nsString = new String(namespaceName, start, (end - start), StandardCharsets.UTF_8);
if (nsString.equals(DISALLOWED_TABLE_NAME)) {
// Per https://zookeeper.apache.org/doc/r3.4.10/zookeeperProgrammers.html#ch_zkDataModel
// A znode named "zookeeper" is disallowed by zookeeper.
throw new IllegalArgumentException("Tables may not be named '" + DISALLOWED_TABLE_NAME + "'");
}
for (int i = 0; i < nsString.length(); i++) {
// Treat the string as a char-array as some characters may be multi-byte
char c = nsString.charAt(i);
// ZooKeeper also has limitations, but Character.isAlphabetic omits those all
// See https://zookeeper.apache.org/doc/r3.4.10/zookeeperProgrammers.html#ch_zkDataModel
if (Character.isAlphabetic(c) || Character.isDigit(c) || c == '_') {
continue;
}
throw new IllegalArgumentException(
"Illegal character <" + c + "> at " + i + ". Namespaces may only contain "
+ "'alphanumeric characters' from any language and digits: " + nsString);
}
} | 3.68 |
flink_RecordsBySplits_addAll | /**
* Add multiple records from the given source split.
*
* @param split the source split the records were from.
* @param records the records to add.
*/
public void addAll(SourceSplit split, Collection<E> records) {
addAll(split.splitId(), records);
} | 3.68 |
hudi_HoodieAppendHandle_appendDataAndDeleteBlocks | /**
* Appends data and delete blocks. When appendDeleteBlocks value is false, only data blocks are appended.
* This is done so that all the data blocks are created first and then a single delete block is added.
* Otherwise what can end up happening is creation of multiple small delete blocks get added after each data block.
*/
protected void appendDataAndDeleteBlocks(Map<HeaderMetadataType, String> header, boolean appendDeleteBlocks) {
try {
header.put(HoodieLogBlock.HeaderMetadataType.INSTANT_TIME, instantTime);
header.put(HoodieLogBlock.HeaderMetadataType.SCHEMA, writeSchemaWithMetaFields.toString());
List<HoodieLogBlock> blocks = new ArrayList<>(2);
if (recordList.size() > 0) {
String keyField = config.populateMetaFields()
? HoodieRecord.RECORD_KEY_METADATA_FIELD
: hoodieTable.getMetaClient().getTableConfig().getRecordKeyFieldProp();
blocks.add(getBlock(config, pickLogDataBlockFormat(), recordList, shouldWriteRecordPositions,
getUpdatedHeader(header, blockSequenceNumber++, attemptNumber, config,
addBlockIdentifier()), keyField));
}
if (appendDeleteBlocks && recordsToDeleteWithPositions.size() > 0) {
blocks.add(new HoodieDeleteBlock(recordsToDeleteWithPositions, shouldWriteRecordPositions,
getUpdatedHeader(header, blockSequenceNumber++, attemptNumber, config,
addBlockIdentifier())));
}
if (blocks.size() > 0) {
AppendResult appendResult = writer.appendBlocks(blocks);
processAppendResult(appendResult, recordList);
recordList.clear();
if (appendDeleteBlocks) {
recordsToDeleteWithPositions.clear();
}
}
} catch (Exception e) {
throw new HoodieAppendException("Failed while appending records to " + writer.getLogFile().getPath(), e);
}
} | 3.68 |
hudi_BaseConsistentHashingBucketClusteringPlanStrategy_getFileSlicesEligibleForClustering | /**
* Generate candidate clustering file slices of the given partition.
* If there is inflight / requested clustering working on the partition, then return empty list
* to ensure serialized update to the hashing metadata.
*
* @return candidate file slices to be clustered (i.e., sort, bucket split or merge)
*/
@Override
protected Stream<FileSlice> getFileSlicesEligibleForClustering(String partition) {
TableFileSystemView fileSystemView = getHoodieTable().getFileSystemView();
boolean isPartitionInClustering = fileSystemView.getFileGroupsInPendingClustering().anyMatch(p -> p.getLeft().getPartitionPath().equals(partition));
if (isPartitionInClustering) {
LOG.info("Partition {} is already in clustering, skip.", partition);
return Stream.empty();
}
return super.getFileSlicesEligibleForClustering(partition);
} | 3.68 |
framework_TabSheetElement_getContent | /**
* Gets TabSheet content and wraps it in given class.
*
* @param clazz
* Components element class
* @return TabSheet content wrapped in given class
*/
public <T extends AbstractElement> T getContent(Class<T> clazz) {
return TestBench.createElement(clazz,
$$(AbstractComponentElement.class).first().getWrappedElement(),
getCommandExecutor());
} | 3.68 |
hbase_MultiRowRangeFilter_flipAndReverseRanges | /**
* Rebuilds the sorted ranges (by startKey) into an equivalent sorted list of ranges, only by
* stopKey instead. Descending order and the ReversedRowRange compareTo implementation make sure
* that we can use Collections.binarySearch().
*/
static List<ReversedRowRange> flipAndReverseRanges(List<RowRange> ranges) {
List<ReversedRowRange> flippedRanges = new ArrayList<>(ranges.size());
for (int i = ranges.size() - 1; i >= 0; i--) {
RowRange origRange = ranges.get(i);
ReversedRowRange newRowRange = new ReversedRowRange(origRange.startRow,
origRange.startRowInclusive, origRange.stopRow, origRange.isStopRowInclusive());
flippedRanges.add(newRowRange);
}
return flippedRanges;
} | 3.68 |
flink_AbstractMergeIterator_crossSecond1withNValues | /**
* Crosses a single value from the second side with N values, all sharing a common key.
* Effectively realizes a <i>N:1</i> join.
*
* @param val1 The value form the <i>1</i> side.
* @param firstValN The first of the values from the <i>N</i> side.
* @param valsN Iterator over remaining <i>N</i> side values.
* @throws Exception Forwards all exceptions thrown by the stub.
*/
private void crossSecond1withNValues(
T2 val1,
T1 firstValN,
Iterator<T1> valsN,
FlatJoinFunction<T1, T2, O> joinFunction,
Collector<O> collector)
throws Exception {
T2 copy2 = createCopy(serializer2, val1, this.copy2);
joinFunction.join(firstValN, copy2, collector);
// set copy and join first element
boolean more = true;
do {
final T1 nRec = valsN.next();
if (valsN.hasNext()) {
copy2 = createCopy(serializer2, val1, this.copy2);
joinFunction.join(nRec, copy2, collector);
} else {
joinFunction.join(nRec, val1, collector);
more = false;
}
} while (more);
} | 3.68 |
flink_JarHandlerUtils_tokenizeArguments | /**
* Takes program arguments as a single string, and splits them into a list of string.
*
* <pre>
* tokenizeArguments("--foo bar") = ["--foo" "bar"]
* tokenizeArguments("--foo \"bar baz\"") = ["--foo" "bar baz"]
* tokenizeArguments("--foo 'bar baz'") = ["--foo" "bar baz"]
* tokenizeArguments(null) = []
* </pre>
*
* <strong>WARNING: </strong>This method does not respect escaped quotes.
*/
@VisibleForTesting
static List<String> tokenizeArguments(@Nullable final String args) {
if (args == null) {
return Collections.emptyList();
}
final Matcher matcher = ARGUMENTS_TOKENIZE_PATTERN.matcher(args);
final List<String> tokens = new ArrayList<>();
while (matcher.find()) {
tokens.add(matcher.group().trim().replace("\"", "").replace("\'", ""));
}
return tokens;
} | 3.68 |
hadoop_NodeType_getIndex | /**
* @return the index of the node type
*/
public int getIndex() {
return index;
} | 3.68 |
hbase_MasterObserver_postCreateTable | /**
* Called after the createTable operation has been requested. Called as part of create table RPC
* call.
* @param ctx the environment to interact with the framework and master
* @param desc the TableDescriptor for the table
* @param regions the initial regions created for the table
*/
default void postCreateTable(final ObserverContext<MasterCoprocessorEnvironment> ctx,
TableDescriptor desc, RegionInfo[] regions) throws IOException {
} | 3.68 |
hadoop_TFile_getComparator | /**
* Get an instance of the RawComparator that is constructed based on the
* string comparator representation.
*
* @return a Comparator that can compare RawComparable's.
*/
public Comparator<RawComparable> getComparator() {
return comparator;
} | 3.68 |
hbase_MasterObserver_preBalanceSwitch | /**
* Called prior to modifying the flag used to enable/disable region balancing.
* @param ctx the coprocessor instance's environment
*/
default void preBalanceSwitch(final ObserverContext<MasterCoprocessorEnvironment> ctx,
final boolean newValue) throws IOException {
} | 3.68 |
graphhopper_TileBasedElevationProvider_setInterpolate | /**
* Configuration option to use bilinear interpolation to find the elevation at a point from the
* surrounding elevation points. Has only an effect if called before the first getEle call.
* Turned off by default.
*/
public TileBasedElevationProvider setInterpolate(boolean interpolate) {
this.interpolate = interpolate;
return this;
} | 3.68 |
hbase_StorageClusterStatusModel_getCurrentCompactedKVs | /** Returns The number of current compacted key-values */
@XmlAttribute
public long getCurrentCompactedKVs() {
return currentCompactedKVs;
} | 3.68 |
flink_SecurityOptions_isRestSSLEnabled | /** Checks whether SSL for the external REST endpoint is enabled. */
public static boolean isRestSSLEnabled(Configuration sslConfig) {
@SuppressWarnings("deprecation")
final boolean fallbackFlag = sslConfig.getBoolean(SSL_ENABLED);
return sslConfig.getBoolean(SSL_REST_ENABLED, fallbackFlag);
} | 3.68 |
hbase_LogEventHandler_onEvent | /**
* Called when a publisher has published an event to the {@link RingBuffer}. This is generic
* consumer of disruptor ringbuffer and for each new namedQueue that we add, we should also
* provide specific consumer logic here.
* @param event published to the {@link RingBuffer}
* @param sequence of the event being processed
* @param endOfBatch flag to indicate if this is the last event in a batch from the
* {@link RingBuffer}
*/
@Override
public void onEvent(RingBufferEnvelope event, long sequence, boolean endOfBatch) {
final NamedQueuePayload namedQueuePayload = event.getPayload();
// consume ringbuffer payload based on event type
namedQueueServices.get(namedQueuePayload.getNamedQueueEvent())
.consumeEventFromDisruptor(namedQueuePayload);
} | 3.68 |
framework_Button_getClientY | /**
* Returns the mouse position (y coordinate) when the click took place.
* The position is relative to the browser client area.
*
* @return The mouse cursor y position or -1 if unknown
*/
public int getClientY() {
if (null != details) {
return details.getClientY();
} else {
return -1;
}
} | 3.68 |
hadoop_Query_matches | /**
* Check if a record matches the primary keys or the partial record.
*
* @param other Record to check.
* @return If the record matches. Don't match if there is no partial.
*/
public boolean matches(T other) {
if (this.partial == null) {
return false;
}
return this.partial.like(other);
} | 3.68 |
framework_VTabsheet_getFocusedTab | /**
* Returns the tab that has the focus currently.
*
* @return the focused tab or {@code null} if one doesn't exist
*/
private Tab getFocusedTab() {
return focusedTab;
} | 3.68 |
hudi_BoundedInMemoryQueue_markAsFailed | /**
* API to allow producers and consumer to communicate termination due to failure.
*/
@Override
public void markAsFailed(Throwable e) {
this.hasFailed.set(e);
// release the permits so that if the queueing thread is waiting for permits then it will
// get it.
this.rateLimiter.release(RECORD_CACHING_LIMIT + 1);
} | 3.68 |
flink_IterativeStream_withFeedbackType | /**
* Changes the feedback type of the iteration and allows the user to apply co-transformations on
* the input and feedback stream, as in a {@link ConnectedStreams}.
*
* <p>For type safety the user needs to define the feedback type
*
* @param feedbackType The type information of the feedback stream.
* @return A {@link ConnectedIterativeStreams}.
*/
public <F> ConnectedIterativeStreams<T, F> withFeedbackType(TypeInformation<F> feedbackType) {
return new ConnectedIterativeStreams<>(originalInput, feedbackType, maxWaitTime);
} | 3.68 |
hadoop_MetricsCache_metricsEntrySet | /**
* @return entry set of metrics
*/
public Set<Map.Entry<String, AbstractMetric>> metricsEntrySet() {
return metrics.entrySet();
} | 3.68 |
framework_UIDL_getStringArrayVariable | /**
* Gets the value of the named variable.
*
* @param name
* the name of the variable
* @return the value of the variable
*/
public String[] getStringArrayVariable(String name) {
return var().getStringArray(name);
} | 3.68 |
flink_SharedBuffer_getEvent | /**
* It always returns event either from state or cache.
*
* @param eventId id of the event
* @return event
*/
Lockable<V> getEvent(EventId eventId) {
try {
Lockable<V> lockableFromCache = eventsBufferCache.getIfPresent(eventId);
if (Objects.nonNull(lockableFromCache)) {
return lockableFromCache;
} else {
Lockable<V> lockableFromState = eventsBuffer.get(eventId);
if (Objects.nonNull(lockableFromState)) {
eventsBufferCache.put(eventId, lockableFromState);
}
return lockableFromState;
}
} catch (Exception ex) {
throw new WrappingRuntimeException(ex);
}
} | 3.68 |
framework_ListSelectElement_getOptions | /**
* Gets a list of the texts shown for all options.
*
* @return a list of option texts
*/
public List<String> getOptions() {
List<String> options = new ArrayList<String>();
for (WebElement webElement : select.getOptions()) {
options.add(webElement.getText());
}
return options;
} | 3.68 |
framework_AbstractComponent_setHeightFull | /*
* (non-Javadoc)
*
* @see com.vaadin.server.Sizeable#setHeightFull()
*/
@Override
public void setHeightFull() {
setHeight(100, Unit.PERCENTAGE);
} | 3.68 |
flink_BinaryExternalSorter_isRunning | /**
* Checks whether this thread is still alive.
*
* @return true, if the thread is alive, false otherwise.
*/
public boolean isRunning() {
return this.alive;
} | 3.68 |
framework_Calendar_getEndDate | /**
* Gets the calendar's end date.
*
* @return Last visible date.
*/
public Date getEndDate() {
if (endDate == null) {
currentCalendar.set(java.util.Calendar.MILLISECOND, 0);
currentCalendar.set(java.util.Calendar.SECOND, 59);
currentCalendar.set(java.util.Calendar.MINUTE, 59);
currentCalendar.set(java.util.Calendar.HOUR_OF_DAY, 23);
currentCalendar.set(java.util.Calendar.DAY_OF_WEEK,
currentCalendar.getFirstDayOfWeek() + 6);
return currentCalendar.getTime();
}
return endDate;
} | 3.68 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.