name
stringlengths 12
178
| code_snippet
stringlengths 8
36.5k
| score
float64 3.26
3.68
|
---|---|---|
flink_TextValueInputFormat_getCharsetName_rdh | // --------------------------------------------------------------------------------------------
public String getCharsetName() {
return charsetName;
} | 3.26 |
flink_TextValueInputFormat_readRecord_rdh | // --------------------------------------------------------------------------------------------
@Override public StringValue readRecord(StringValue reuse, byte[] bytes, int offset, int numBytes) {
if (this.ascii) {
reuse.setValueAscii(bytes, offset,
numBytes);
return reuse;
} else {
ByteBuffer byteWrapper = this.byteWrapper;
if (bytes != byteWrapper.array()) {
byteWrapper = ByteBuffer.wrap(bytes, 0, bytes.length);
this.byteWrapper = byteWrapper;
}
byteWrapper.limit(offset + numBytes);
byteWrapper.position(offset);try {
CharBuffer result = this.decoder.decode(byteWrapper);
reuse.setValue(result);
return reuse;
} catch (CharacterCodingException e) {
if (skipInvalidLines) {
return
null;
} else {
byte[] copy = new byte[numBytes];
System.arraycopy(bytes, offset, copy, 0, numBytes);
throw new RuntimeException("Line could not be encoded: " + Arrays.toString(copy), e);
}
}
}
} | 3.26 |
flink_TextValueInputFormat_toString_rdh | // --------------------------------------------------------------------------------------------
@Override
public String toString() {
return ((("TextValueInputFormat (" + Arrays.toString(getFilePaths())) + ") - ") + this.charsetName) + (this.skipInvalidLines ? "(skipping invalid lines)" : "");
} | 3.26 |
flink_WorksetIterationNode_getOperatorName_rdh | // --------------------------------------------------------------------------------------------
@Overridepublic String getOperatorName() {
return "Workset Iteration";
} | 3.26 |
flink_WorksetIterationNode_acceptForStepFunction_rdh | // --------------------------------------------------------------------------------------------
// Iteration Specific Traversals
// --------------------------------------------------------------------------------------------
public void acceptForStepFunction(Visitor<OptimizerNode> visitor) {
this.singleRoot.accept(visitor);
} | 3.26 |
flink_WorksetIterationNode_getIterationContract_rdh | // --------------------------------------------------------------------------------------------
public DeltaIterationBase<?, ?> getIterationContract() {
return ((DeltaIterationBase<?, ?>) (getOperator()));
} | 3.26 |
flink_WorksetIterationNode_getPossibleProperties_rdh | // --------------------------------------------------------------------------------------------
// Properties and Optimization
// --------------------------------------------------------------------------------------------
@Overrideprotected List<OperatorDescriptorDual> getPossibleProperties() {
return this.dataProperties;} | 3.26 |
flink_CopyOnWriteSkipListStateMap_releaseAllResource_rdh | /**
* Release all resource used by the map.
*/
private void releaseAllResource() {long node = levelIndexHeader.getNextNode(0);
while (node != NIL_NODE) {
long nextNode = helpGetNextNode(node,
0);
long valuePointer = SkipListUtils.helpGetValuePointer(node, spaceAllocator);
spaceAllocator.free(node);
SkipListUtils.removeAllValues(valuePointer, spaceAllocator);
node = nextNode;
} totalSize = 0;
logicallyRemovedNodes.clear();
} | 3.26 |
flink_CopyOnWriteSkipListStateMap_totalSize_rdh | /**
* Returns total size of this map, including logically removed state.
*/
int totalSize() {
return totalSize;
} | 3.26 |
flink_CopyOnWriteSkipListStateMap_putValue_rdh | /**
* Update or insert the value for the given node.
*
* @param currentNode
* the node to put value for.
* @param value
* the value to put.
* @param returnOldState
* whether to return the old state.
* @return the old state if it exists and {@code returnOldState} is true, or else null.
*/
private S putValue(long currentNode, byte[] value, boolean returnOldState) {
int version = SkipListUtils.helpGetNodeLatestVersion(currentNode, spaceAllocator);
boolean needCopyOnWrite = version < highestRequiredSnapshotVersionPlusOne;
long oldValuePointer;
if (needCopyOnWrite)
{
oldValuePointer = updateValueWithCopyOnWrite(currentNode, value);
}
else {oldValuePointer = updateValueWithReplace(currentNode, value);
}NodeStatus oldStatus = helpSetNodeStatus(currentNode, NodeStatus.PUT);
if (oldStatus == NodeStatus.REMOVE) {
logicallyRemovedNodes.remove(currentNode);
}
S oldState = null;if (returnOldState) {
oldState = helpGetState(oldValuePointer);
}
// for the replace, old value space need to free
if (!needCopyOnWrite) {
spaceAllocator.free(oldValuePointer);
}
return oldState;
} | 3.26 |
flink_CopyOnWriteSkipListStateMap_helpGetStateEntry_rdh | /**
* Returns the state entry of the node.
*/private StateEntry<K, N, S> helpGetStateEntry(long node) {
Node nodeStorage = getNodeSegmentAndOffset(node);
MemorySegment segment = nodeStorage.nodeSegment;
int offsetInSegment = nodeStorage.nodeOffset;
int level = SkipListUtils.getLevel(segment, offsetInSegment);
int keyDataLen = SkipListUtils.getKeyLen(segment, offsetInSegment);
int
keyDataOffset = offsetInSegment + SkipListUtils.getKeyDataOffset(level);
K v140 = skipListKeySerializer.deserializeKey(segment, keyDataOffset, keyDataLen);
N namespace = skipListKeySerializer.deserializeNamespace(segment, keyDataOffset, keyDataLen);
long valuePointer = SkipListUtils.getValuePointer(segment, offsetInSegment);
S state = helpGetState(valuePointer);
return
new StateEntry.SimpleStateEntry<>(v140, namespace, state);
} | 3.26 |
flink_CopyOnWriteSkipListStateMap_deleteNodeMeta_rdh | /**
* Physically delte the meta of the node, including the node level index, the node key, and
* reduce the total size of the skip list.
*
* @param node
* node to remove.
* @param prevNode
* previous node at the level 0.
* @param nextNode
* next node at the level 0.
* @return value pointer of the node.
*/ private long deleteNodeMeta(long node, long prevNode, long nextNode) {
// set next node of prevNode at level 0 to nextNode
helpSetNextNode(prevNode, nextNode,
0);
// remove the level index for the node
SkipListUtils.removeLevelIndex(node, spaceAllocator, levelIndexHeader);
// free space used by key
long valuePointer = SkipListUtils.helpGetValuePointer(node, spaceAllocator);
this.spaceAllocator.free(node);
// reduce total size of the skip list
// note that we regard the node to be removed once its meta is deleted
totalSize--;
return valuePointer;
} | 3.26 |
flink_CopyOnWriteSkipListStateMap_doPhysicalRemoveAndGetValue_rdh | /**
* Removes the node physically, and return the newest-version value pointer. Space used by key
* and value will be freed here, but the space of newest-version value will not be freed, and
* the caller should be responsible for the free of space.
*
* @param node
* node to remove.
* @param prevNode
* previous node at the level 0.
* @param nextNode
* next node at the level 0.
* @return newest-version value pointer.
*/
private long doPhysicalRemoveAndGetValue(long node, long prevNode, long nextNode) {
// free space used by key and level index
long valuePointer = deleteNodeMeta(node, prevNode, nextNode);
// free space used by values except for the newest-version
long nextValuePointer = SkipListUtils.helpGetNextValuePointer(valuePointer, spaceAllocator);
SkipListUtils.removeAllValues(nextValuePointer, spaceAllocator);
return valuePointer;
} | 3.26 |
flink_CopyOnWriteSkipListStateMap_doWriteValue_rdh | /**
* Write the meta and data for the value to the space where the value pointer points.
*
* @param valuePointer
* pointer to the space where the meta and data is written.
* @param value
* data of the value.
* @param version
* version of this value.
* @param keyPointer
* pointer to the key.
* @param nextValuePointer
* pointer to the next value.
*/
private void doWriteValue(long valuePointer, byte[] value, int version, long keyPointer, long nextValuePointer) {
Node node = getNodeSegmentAndOffset(valuePointer);
MemorySegment segment = node.nodeSegment;
int offsetInSegment = node.nodeOffset;
SkipListUtils.putValueVersion(segment, offsetInSegment, version);
SkipListUtils.putKeyPointer(segment, offsetInSegment, keyPointer);
SkipListUtils.putNextValuePointer(segment, offsetInSegment, nextValuePointer);
SkipListUtils.putValueLen(segment, offsetInSegment, value == null ? 0 : value.length);
if (value != null) {
SkipListUtils.putValueData(segment, offsetInSegment, value);
}
} | 3.26 |
flink_CopyOnWriteSkipListStateMap_isNodeRemoved_rdh | // Help methods ---------------------------------------------------------------
/**
* Whether the node has been logically removed.
*/
private boolean isNodeRemoved(long node) {
return SkipListUtils.isNodeRemoved(node, spaceAllocator);} | 3.26 |
flink_CopyOnWriteSkipListStateMap_doWriteKey_rdh | /**
* Write the meta and data for the key to the given node.
*
* @param node
* the node for the key to write.
* @param level
* level of this node.
* @param keySegment
* memory segment storing the key.
* @param keyOffset
* offset of key in memory segment.
* @param keyLen
* length of the key.
* @param valuePointer
* pointer to value.
* @param nextNode
* next node on level 0.
*/
private void doWriteKey(long node, int level, MemorySegment keySegment, int keyOffset, int keyLen, long valuePointer, long nextNode) {
Node nodeStorage = getNodeSegmentAndOffset(node);
MemorySegment segment = nodeStorage.nodeSegment;
int v75 = nodeStorage.nodeOffset;
SkipListUtils.putLevelAndNodeStatus(segment, v75, level, NodeStatus.PUT);
SkipListUtils.putKeyLen(segment, v75, keyLen);
SkipListUtils.putValuePointer(segment, v75, valuePointer);
SkipListUtils.putNextKeyPointer(segment, v75, nextNode);
SkipListUtils.putKeyData(segment, v75, keySegment, keyOffset, keyLen, level);
} | 3.26 |
flink_CopyOnWriteSkipListStateMap_helpSetNextNode_rdh | /**
* Set the next node of the given node at the given level.
*/private void helpSetNextNode(long node, long nextNode, int level) {
SkipListUtils.helpSetNextNode(node, nextNode, level, levelIndexHeader, spaceAllocator);
} | 3.26 |
flink_CopyOnWriteSkipListStateMap_doPhysicalRemove_rdh | /**
* Removes the node physically, and free all space used by the key and value.
*
* @param node
* node to remove.
* @param prevNode
* previous node at the level 0.
* @param nextNode
* next node at the level 0.
*/private void doPhysicalRemove(long node, long prevNode, long nextNode) {
// free space used by key and level index
long valuePointer = deleteNodeMeta(node, prevNode, nextNode);// free space used by value
SkipListUtils.removeAllValues(valuePointer, spaceAllocator);
} | 3.26 |
flink_CopyOnWriteSkipListStateMap_findPredecessor_rdh | /**
* Find the predecessor node for the given key at the given level. The key is in the memory
* segment positioning at the given offset.
*
* @param keySegment
* memory segment which contains the key.
* @param keyOffset
* offset of the key in the memory segment.
* @param level
* the level.
* @return node id before the key at the given level.
*/
private long findPredecessor(MemorySegment keySegment,
int keyOffset, int level) {return SkipListUtils.findPredecessor(keySegment, keyOffset, level, levelIndexHeader, spaceAllocator);
} | 3.26 |
flink_CopyOnWriteSkipListStateMap_iterateAndProcess_rdh | /**
* Iterate the skip list and perform given function.
*
* @param keySegment
* memory segment storing the key.
* @param keyOffset
* offset of the key.
* @param keyLen
* length of the key.
* @param function
* the function to apply when the skip list contains the given key, which
* accepts two parameters: an encapsulation of [previous_node, current_node, next_node] and
* a boolean indicating whether the node with same key has been logically removed, and
* returns a state.
* @return the iterate and processing result
*/
private SkipListIterateAndProcessResult iterateAndProcess(MemorySegment keySegment, int keyOffset, int keyLen, BiFunction<SkipListNodePointers, Boolean, S> function) {
int deleteCount = 0;
long prevNode = findPredecessor(keySegment, keyOffset, 1);
long currentNode = helpGetNextNode(prevNode, 0);
long nextNode;
int c;
while (currentNode != NIL_NODE) {
nextNode = helpGetNextNode(currentNode, 0);// Check whether the current code is already logically removed to save some comparisons
// on key,
// with the cost of an additional remove-then-add operation if the to-be-removed node
// has the same key
// with the to-be-put one.
boolean isRemoved = isNodeRemoved(currentNode);
if ((isRemoved && (highestRequiredSnapshotVersionPlusOne == 0)) && (deleteCount < numKeysToDeleteOneTime)) {
doPhysicalRemove(currentNode, prevNode, nextNode);
logicallyRemovedNodes.remove(currentNode);currentNode = nextNode;
deleteCount++;
continue;
}
c = compareSegmentAndNode(keySegment, keyOffset, keyLen, currentNode);
if (c < 0) {
// The given key is less than the current node, break the loop
break;
} else if (c > 0) {
// The given key is larger than the current node, continue
prevNode = currentNode;
currentNode = nextNode;
} else {
// The given key is equal to the current node, apply the function
S state = function.apply(new SkipListNodePointers(prevNode, currentNode, nextNode), isRemoved);
return new SkipListIterateAndProcessResult(prevNode, currentNode, true, state);
}
} return new SkipListIterateAndProcessResult(prevNode, currentNode, false, null);
} | 3.26 |
flink_CopyOnWriteSkipListStateMap_helpSetNodeStatus_rdh | /**
* Set node status to the given new status, and return old status.
*/
private NodeStatus helpSetNodeStatus(long node, NodeStatus newStatus) {
Node nodeStorage
= getNodeSegmentAndOffset(node);
MemorySegment segment = nodeStorage.nodeSegment;
int offsetInSegment = nodeStorage.nodeOffset;
NodeStatus oldStatus = SkipListUtils.getNodeStatus(segment, offsetInSegment);
if (oldStatus != newStatus) {
int level = SkipListUtils.getLevel(segment, offsetInSegment);
SkipListUtils.putLevelAndNodeStatus(segment, offsetInSegment, level, newStatus);
}
return oldStatus;
} | 3.26 |
flink_CopyOnWriteSkipListStateMap_tryToDeleteNodesPhysically_rdh | /**
* Try to delete some nodes that has been logically removed.
*/
private void tryToDeleteNodesPhysically() {
if (highestRequiredSnapshotVersionPlusOne != 0) {
return;
}
int threshold = ((int) (totalSize * logicalRemovedKeysRatio));
int size = logicallyRemovedNodes.size();
if (size > threshold) {
deleteLogicallyRemovedNodes(size
-
threshold);
}
} | 3.26 |
flink_CopyOnWriteSkipListStateMap_updateStat_rdh | /**
* Update some statistics.
*/
private void updateStat() {
requestCount++;
} | 3.26 |
flink_CopyOnWriteSkipListStateMap_getNode_rdh | /**
* Find the node containing the given key.
*
* @param keySegment
* memory segment storing the key.
* @param keyOffset
* offset of the key.
* @param keyLen
* length of the key.
* @return the state. Null will be returned if key does not exist.
*/
@VisibleForTesting
@Nullable
S getNode(MemorySegment keySegment, int keyOffset, int keyLen) {
SkipListIterateAndProcessResult result = iterateAndProcess(keySegment, keyOffset, keyLen, (pointers, isRemoved) ->
{
long v17
= pointers.currentNode;
return isRemoved ? null : getNodeStateHelper(v17);
});return result.isKeyFound ? result.state : null;
}
/**
* Put the key into the skip list. If the key does not exist before, a new node will be created.
* If the key exists before, return the old state or null depending on {@code returnOldState} | 3.26 |
flink_CopyOnWriteSkipListStateMap_updateValueWithCopyOnWrite_rdh | /**
* Update the value of the node with copy-on-write mode. The old value will be linked after the
* new value, and can be still accessed.
*
* @param node
* the node to update.
* @param value
* the value.
* @return the old value pointer.
*/
private long updateValueWithCopyOnWrite(long
node, byte[] value) {
// a null value indicates this is a removed node
int valueSize = (value == null) ? 0 : value.length;
int totalValueLen = SkipListUtils.getValueMetaLen() + valueSize;
long valuePointer = allocateSpace(totalValueLen);
Node nodeStorage = getNodeSegmentAndOffset(node);MemorySegment v55 = nodeStorage.nodeSegment;
int offsetInNodeSegment = nodeStorage.nodeOffset;
long v57 = SkipListUtils.getValuePointer(v55, offsetInNodeSegment);
doWriteValue(valuePointer, value, stateMapVersion, node, v57);
// update value pointer in node after the new value has points the older value so that
// old value can be accessed concurrently
SkipListUtils.putValuePointer(v55, offsetInNodeSegment, valuePointer);
return v57;
} | 3.26 |
flink_CopyOnWriteSkipListStateMap_getRandomIndexLevel_rdh | /**
* Return a random level for new node.
*
* <p>The implementation refers to the {@code randomLevel} method of JDK7's
* ConcurrentSkipListMap. See
* https://github.com/openjdk-mirror/jdk7u-jdk/blob/master/src/share/classes/java/util/concurrent/ConcurrentSkipListMap.java#L899
*/
private int getRandomIndexLevel() {
int x = randomSeed;
x ^= x << 13;
x ^= x >>> 17;
x ^= x << 5;
randomSeed = x;
// test highest and lowest bits
if ((x & 0x8001) != 0) {
return 0;
}
int level = 1;
int curMax = levelIndexHeader.getLevel();
x >>>= 1;
while ((x & 1) != 0) {++level;
x >>>= 1;
// the level only be increased by step
if (level > curMax)
{
break;
}
}
return level;
} | 3.26 |
flink_CopyOnWriteSkipListStateMap_getKeys_rdh | // ----------------------------------------------------------------------------------
@Override
public Stream<K> getKeys(N namespace) {
updateStat();
MemorySegment namespaceSegment = skipListKeySerializer.serializeNamespaceToSegment(namespace);
Iterator<Long> nodeIter = new NamespaceNodeIterator(namespaceSegment, 0, namespaceSegment.size());
return StreamSupport.stream(Spliterators.spliteratorUnknownSize(nodeIter, 0), false).map(this::helpGetKey);
} | 3.26 |
flink_CopyOnWriteSkipListStateMap_updateValueWithReplace_rdh | /**
* Update the value of the node with replace mode. The old value will be unlinked and replaced
* by the new value, and can not be accessed later. Note that the space of the old value is not
* freed here, and the caller of this method should be responsible for the space management.
*
* @param node
* the node whose value will be replaced.
* @param value
* the value.
* @return the old value pointer.
*/
private long updateValueWithReplace(long node, byte[] value) {
// a null value indicates this is a removed node
int valueSize = (value == null) ? 0 : value.length;
int totalValueLen = SkipListUtils.getValueMetaLen() + valueSize;
long valuePointer = allocateSpace(totalValueLen);
Node nodeStorage = getNodeSegmentAndOffset(node);
MemorySegment nodeSegment = nodeStorage.nodeSegment;
int offsetInNodeSegment = nodeStorage.nodeOffset;
long oldValuePointer = SkipListUtils.getValuePointer(nodeSegment, offsetInNodeSegment);
long v65 = SkipListUtils.helpGetNextValuePointer(oldValuePointer, spaceAllocator);
doWriteValue(valuePointer, value, stateMapVersion, node, v65);
// update value pointer in node after the new value has points the older value so that
// old value can be accessed concurrently
SkipListUtils.putValuePointer(nodeSegment, offsetInNodeSegment, valuePointer);
return oldValuePointer;
} | 3.26 |
flink_CopyOnWriteSkipListStateMap_getNodeStateHelper_rdh | /**
* Return the state of the node. null will be returned if the node is removed.
*/
private S getNodeStateHelper(long node) { Node nodeStorage = getNodeSegmentAndOffset(node);
MemorySegment segment = nodeStorage.nodeSegment;
int v112 = nodeStorage.nodeOffset;
long valuePointer = SkipListUtils.getValuePointer(segment, v112);
return
helpGetState(valuePointer);
} | 3.26 |
flink_CopyOnWriteSkipListStateMap_getFirstNodeWithNamespace_rdh | /**
* Find the first node with the given namespace at level 0.
*
* @param namespaceSegment
* memory segment storing the namespace.
* @param namespaceOffset
* offset of the namespace.
* @param namespaceLen
* length of the namespace.
* @return the first node with the given namespace. NIL_NODE will be returned if not exist.
*/
private long getFirstNodeWithNamespace(MemorySegment namespaceSegment, int namespaceOffset, int namespaceLen) {
int currentLevel = levelIndexHeader.getLevel();
long prevNode = HEAD_NODE;
long currentNode = helpGetNextNode(prevNode, currentLevel);
int c;
// find the predecessor node at level 0.
for (; ;) {
if (currentNode
!= NIL_NODE) {
c = compareNamespaceAndNode(namespaceSegment, namespaceOffset, namespaceLen, currentNode);
if (c > 0) {
prevNode = currentNode;
currentNode = helpGetNextNode(prevNode, currentLevel);
continue;}
}
currentLevel--;
if (currentLevel < 0) {
break;
}
currentNode = helpGetNextNode(prevNode, currentLevel);
}
// find the first node that has not been logically removed
while (currentNode != NIL_NODE) {
if (isNodeRemoved(currentNode)) {
currentNode = helpGetNextNode(currentNode, 0);
continue;
}
c = compareNamespaceAndNode(namespaceSegment, namespaceOffset, namespaceLen, currentNode);
if (c == 0) {
return currentNode;
}
if (c < 0) {
break;
}
}
return NIL_NODE;
} | 3.26 |
flink_CopyOnWriteSkipListStateMap_helpGetNextNode_rdh | /**
* Return the next of the given node at the given level.
*/
long helpGetNextNode(long node, int level) {
return SkipListUtils.helpGetNextNode(node, level, this.levelIndexHeader, this.spaceAllocator);
} | 3.26 |
flink_CopyOnWriteSkipListStateMap_removeNode_rdh | /**
* Remove the given node indicated by {@link SkipListNodePointers#currentNode}.
*
* @param pointers
* pointers of the node to remove and its prev/next node.
* @param isLogicallyRemoved
* whether the node to remove is already logically removed.
* @param returnOldState
* whether to return the old state after removal.
* @return the old state if {@code returnOldState} is true, or else return null.
*/
private S removeNode(SkipListNodePointers pointers, Boolean isLogicallyRemoved, boolean returnOldState) {
long prevNode = pointers.prevNode;
long currentNode = pointers.currentNode;
long nextNode = pointers.nextNode;
// if the node has been logically removed, and can not be physically
// removed here, just return null
if (isLogicallyRemoved && (highestRequiredSnapshotVersionPlusOne != 0)) {
return null;
}
long oldValuePointer;
boolean oldValueNeedFree;
if
(highestRequiredSnapshotVersionPlusOne == 0) {
// do physically remove only when there is no snapshot running
oldValuePointer = doPhysicalRemoveAndGetValue(currentNode, prevNode, nextNode);
// the node has been logically removed, and remove it from the set
if (isLogicallyRemoved) {
logicallyRemovedNodes.remove(currentNode);
}
oldValueNeedFree = true;
} else {
int version = SkipListUtils.helpGetNodeLatestVersion(currentNode, spaceAllocator);
if (version < highestRequiredSnapshotVersionPlusOne) {
// the newest-version value may be used by snapshots, and update it with
// copy-on-write
oldValuePointer = updateValueWithCopyOnWrite(currentNode, null);
oldValueNeedFree = false;
} else {
// replace the newest-version value.
oldValuePointer = updateValueWithReplace(currentNode, null);
oldValueNeedFree = true;
}
helpSetNodeStatus(currentNode, NodeStatus.REMOVE);
logicallyRemovedNodes.add(currentNode);
}
S oldState = null;
if (returnOldState) {
oldState = helpGetState(oldValuePointer);
}
if (oldValueNeedFree) {
spaceAllocator.free(oldValuePointer);
}
return oldState;
} | 3.26 |
flink_CopyOnWriteSkipListStateMap_helpGetState_rdh | /**
* Return the state pointed by the given pointer. The serializer used is the {@link #skipListValueSerializer}. Because serializer is not thread safe, so this method should only
* be called in the state map synchronously.
*/
S helpGetState(long valuePointer) {
return helpGetState(valuePointer, skipListValueSerializer);
} | 3.26 |
flink_CopyOnWriteSkipListStateMap_helpGetKey_rdh | /**
* Returns the key of the node.
*/
private K helpGetKey(long node) {
Node nodeStorage = getNodeSegmentAndOffset(node);
MemorySegment segment = nodeStorage.nodeSegment;
int offsetInSegment = nodeStorage.nodeOffset;
int level = SkipListUtils.getLevel(segment, offsetInSegment);
int keyDataLen = SkipListUtils.getKeyLen(segment, offsetInSegment);int keyDataOffset = offsetInSegment + SkipListUtils.getKeyDataOffset(level);
return skipListKeySerializer.deserializeKey(segment, keyDataOffset, keyDataLen);
} | 3.26 |
flink_CopyOnWriteSkipListStateMap_getNodeInternal_rdh | /**
* Find the node containing the given key.
*
* @param key
* the key.
* @param namespace
* the namespace.
* @return id of the node. NIL_NODE will be returned if key does no exist.
*/
private S getNodeInternal(K key, N namespace) {
MemorySegment keySegment = getKeySegment(key, namespace);
int keyLen = keySegment.size();
return getNode(keySegment, 0, keyLen);
} | 3.26 |
flink_CopyOnWriteSkipListStateMap_helpGetBytesForKeyAndNamespace_rdh | /**
* Returns the byte arrays of serialized key and namespace.
*
* @param node
* the node.
* @return a tuple of byte arrays of serialized key and namespace
*/
Tuple2<byte[], byte[]> helpGetBytesForKeyAndNamespace(long node) {
Node nodeStorage = getNodeSegmentAndOffset(node);
MemorySegment segment = nodeStorage.nodeSegment;
int offsetInSegment = nodeStorage.nodeOffset;
int level = SkipListUtils.getLevel(segment, offsetInSegment);
int keyDataOffset = offsetInSegment + SkipListUtils.getKeyDataOffset(level);
return skipListKeySerializer.getSerializedKeyAndNamespace(segment, keyDataOffset);
} | 3.26 |
flink_CopyOnWriteSkipListStateMap_helpGetBytesForState_rdh | /**
* Returns the byte array of serialized state.
*
* @param valuePointer
* pointer to value.
* @return byte array of serialized value.
*/
byte[] helpGetBytesForState(long valuePointer) {
Node node = getNodeSegmentAndOffset(valuePointer);
MemorySegment segment = node.nodeSegment;
int offsetInSegment = node.nodeOffset;
int valueLen = SkipListUtils.getValueLen(segment, offsetInSegment);
MemorySegment valueSegment = MemorySegmentFactory.allocateUnpooledSegment(valueLen);
segment.copyTo(offsetInSegment + SkipListUtils.getValueMetaLen(), valueSegment, 0, valueLen);
return valueSegment.getArray();
} | 3.26 |
flink_CopyOnWriteSkipListStateMap_getKeySegment_rdh | /**
* Get the {@link MemorySegment} wrapping up the serialized key bytes.
*
* @param key
* the key.
* @param namespace
* the namespace.
* @return the {@link MemorySegment} wrapping up the serialized key bytes.
*/
private MemorySegment getKeySegment(K key, N namespace) {
return skipListKeySerializer.serializeToSegment(key, namespace);
} | 3.26 |
flink_FileCatalogStore_contains_rdh | /**
* Returns whether the specified catalog exists in the catalog store.
*
* @param catalogName
* the name of the catalog to check
* @return {@code true} if the catalog exists in the catalog store, {@code false} otherwise
* @throws CatalogException
* if the catalog store is not open or if there is an error checking
* for the catalog
*/
@Override
public boolean contains(String catalogName) throws CatalogException {
checkOpenState();
Path catalogPath = getCatalogPath(catalogName);
try {
return catalogPath.getFileSystem().exists(catalogPath);
} catch (Exception e) {
throw new CatalogException(String.format("Failed to check if catalog %s exists in the catalog store.", catalogName), e);
}
} | 3.26 |
flink_FileCatalogStore_storeCatalog_rdh | /**
* Stores the specified catalog in the catalog store.
*
* @param catalogName
* the name of the catalog
* @param catalog
* the catalog descriptor to store
* @throws CatalogException
* if the catalog store is not open or if there is an error storing the
* catalog
*/
@Override
public void storeCatalog(String catalogName, CatalogDescriptor catalog) throws CatalogException {
checkOpenState();Path catalogPath = getCatalogPath(catalogName);
try {
FileSystem fs = catalogPath.getFileSystem();
if (fs.exists(catalogPath)) {
throw new CatalogException(String.format("Catalog %s's store file %s is already exist.", catalogName, catalogPath));
}
try (FSDataOutputStream os = fs.create(catalogPath, WriteMode.NO_OVERWRITE)) {
YAML_MAPPER.writeValue(os, catalog.getConfiguration().toMap());}
LOG.info("Catalog {}'s configuration saved to file {}", catalogName, catalogPath);} catch (CatalogException e) {
throw e;
} catch (Exception e) {throw new CatalogException(String.format("Failed to store catalog %s's configuration to file %s.", catalogName, catalogPath), e);
}
} | 3.26 |
flink_FileCatalogStore_listCatalogs_rdh | /**
* Returns a set of all catalog names in the catalog store.
*
* @return a set of all catalog names in the catalog store
* @throws CatalogException
* if the catalog store is not open or if there is an error retrieving
* the list of catalog names
*/
@Override
public Set<String> listCatalogs() throws
CatalogException {
checkOpenState();
try {
FileStatus[] statusArr = catalogStorePath.getFileSystem().listStatus(catalogStorePath);
return Arrays.stream(statusArr).filter(status -> !status.isDir()).map(FileStatus::getPath).map(Path::getName).map(filename -> filename.replace(FILE_EXTENSION, "")).collect(Collectors.toSet());
} catch (Exception e) {
throw new CatalogException(String.format("Failed to list file catalog store directory %s.", catalogStorePath), e);}
} | 3.26 |
flink_FileCatalogStore_open_rdh | /**
* Opens the catalog store and initializes the catalog file map.
*
* @throws CatalogException
* if the catalog store directory does not exist, not a directory, or
* if there is an error reading the directory
*/
@Override
public void open() throws CatalogException {
try {
FileSystem fs = catalogStorePath.getFileSystem();
if (!fs.exists(catalogStorePath)) {
fs.mkdirs(catalogStorePath);
}
if (!fs.getFileStatus(catalogStorePath).isDir()) { throw new CatalogException(String.format("Failed to open catalog store. The given catalog store path %s is not a directory.", catalogStorePath)); }
} catch (CatalogException e) {
throw e;} catch (Exception e) {
throw new CatalogException(String.format("Failed to open file catalog store directory %s.", catalogStorePath), e); }
super.open();
} | 3.26 |
flink_FileCatalogStore_removeCatalog_rdh | /**
* Removes the specified catalog from the catalog store.
*
* @param catalogName
* the name of the catalog to remove
* @param ignoreIfNotExists
* whether to ignore if the catalog does not exist in the catalog store
* @throws CatalogException
* if the catalog store is not open or if there is an error removing
* the catalog
*/
@Override
public void removeCatalog(String catalogName, boolean ignoreIfNotExists) throws CatalogException {
checkOpenState();
Path catalogPath = getCatalogPath(catalogName);
try {
FileSystem fs = catalogPath.getFileSystem();
if (fs.exists(catalogPath)) {
fs.delete(catalogPath, false);
} else if (!ignoreIfNotExists) {
throw new CatalogException(String.format("Catalog %s's store file %s does not exist.", catalogName, catalogPath));
}
} catch (CatalogException e) {
throw e;} catch (Exception e) {
throw new CatalogException(String.format("Failed to remove catalog %s's store file.", catalogName), e);
}
}
/**
* Returns the catalog descriptor for the specified catalog, if it exists in the catalog store.
*
* @param catalogName
* the name of the catalog to retrieve
* @return an {@link Optional} containing the catalog descriptor, or an empty {@link Optional} | 3.26 |
flink_TumbleWithSize_on_rdh | /**
* Specifies the time attribute on which rows are grouped.
*
* <p>For streaming tables you can specify grouping by a event-time or processing-time
* attribute.
*
* <p>For batch tables you can specify grouping on a timestamp or long attribute.
*
* @param timeField
* time attribute for streaming and batch tables
* @return a tumbling window on event-time
*/
public TumbleWithSizeOnTime on(Expression timeField) {
return
new TumbleWithSizeOnTime(timeField, size);
} | 3.26 |
flink_ApplicationStatus_fromJobStatus_rdh | /**
* Derives the ApplicationStatus that should be used for a job that resulted in the given job
* status. If the job is not yet in a globally terminal state, this method returns {@link #UNKNOWN}.
*/
public static ApplicationStatus fromJobStatus(JobStatus jobStatus) {
return JOB_STATUS_APPLICATION_STATUS_BI_MAP.getOrDefault(jobStatus, UNKNOWN);
} | 3.26 |
flink_ApplicationStatus_processExitCode_rdh | /**
* Gets the process exit code associated with this status.
*
* @return The associated process exit code.
*/
public int processExitCode() {
return processExitCode;
} | 3.26 |
flink_ApplicationStatus_deriveJobStatus_rdh | /**
* Derives the {@link JobStatus} from the {@code ApplicationStatus}.
*
* @return The corresponding {@code JobStatus}.
* @throws UnsupportedOperationException
* for {@link #UNKNOWN}.
*/
public JobStatus deriveJobStatus() {
if (!JOB_STATUS_APPLICATION_STATUS_BI_MAP.inverse().containsKey(this)) {
throw new UnsupportedOperationException(this.name() + " cannot be mapped to a JobStatus.");
}
return JOB_STATUS_APPLICATION_STATUS_BI_MAP.inverse().get(this);
} | 3.26 |
flink_HiveMetastoreClientWrapper_getNotNullColumns_rdh | // -------- Start of shimmed methods ----------
public Set<String> getNotNullColumns(Configuration conf, String dbName, String tableName) {
return hiveShim.getNotNullColumns(client, conf, dbName, tableName);
} | 3.26 |
flink_KeyGroupPartitioner_buildHistogramByAccumulatingCounts_rdh | /**
* This method creates a histogram from the counts per key-group in {@link #counterHistogram}.
*/
private int buildHistogramByAccumulatingCounts() {
int sum = 0;
for (int i = 0; i < counterHistogram.length; ++i) {
int currentSlotValue = counterHistogram[i];
counterHistogram[i] = sum;
sum += currentSlotValue;
}
return sum;
} | 3.26 |
flink_KeyGroupPartitioner_partitionByKeyGroup_rdh | /**
* Partitions the data into key-groups and returns the result as a {@link PartitioningResult}.
*/
public PartitioningResult<T> partitionByKeyGroup() {
if (computedResult == null) {
reportAllElementKeyGroups();int outputNumberOfElements = buildHistogramByAccumulatingCounts();
executePartitioning(outputNumberOfElements);
}
return computedResult;
} | 3.26 |
flink_KeyGroupPartitioner_reportAllElementKeyGroups_rdh | /**
* This method iterates over the input data and reports the key-group for each element.
*/
protected void reportAllElementKeyGroups() {
Preconditions.checkState(partitioningSource.length >= numberOfElements);
for (int i = 0; i < numberOfElements; ++i) {
int keyGroup = KeyGroupRangeAssignment.assignToKeyGroup(keyExtractorFunction.extractKeyFromElement(partitioningSource[i]), totalKeyGroups);
reportKeyGroupOfElementAtIndex(i, keyGroup);
}
} | 3.26 |
flink_StateHandleStoreUtils_deserialize_rdh | /**
* Deserializes the passed data into a {@link RetrievableStateHandle}.
*
* @param data
* The data that shall be deserialized.
* @param <T>
* The type of data handled by the deserialized {@code RetrievableStateHandle}.
* @return The {@code RetrievableStateHandle} instance.
* @throws IOException
* Any of the usual Input/Output related exceptions.
* @throws ClassNotFoundException
* If the data couldn't be deserialized into a {@code RetrievableStateHandle} referring to the expected type {@code <T>}.
*/
public static <T extends Serializable> T deserialize(byte[] data) throws IOException, ClassNotFoundException {
return InstantiationUtil.deserializeObject(data, Thread.currentThread().getContextClassLoader());
} | 3.26 |
flink_StateHandleStoreUtils_serializeOrDiscard_rdh | /**
* Serializes the passed {@link StateObject} and discards the state in case of failure.
*
* @param stateObject
* the {@code StateObject} that shall be serialized.
* @return The serialized version of the passed {@code StateObject}.
* @throws Exception
* if an error occurred during the serialization. The corresponding {@code StateObject} will be discarded in that case.
*/
public static byte[] serializeOrDiscard(StateObject stateObject) throws Exception {try {
return InstantiationUtil.serializeObject(stateObject);
} catch (Exception
e)
{
try {
stateObject.discardState();
} catch (Exception discardException) {
e.addSuppressed(discardException);
}
ExceptionUtils.rethrowException(e);
}
// will never happen but is added to please the compiler
return new byte[0];
} | 3.26 |
flink_NetUtils_validateHostPortString_rdh | /**
* Validates if the given String represents a hostname:port.
*
* <p>Works also for ipv6.
*
* <p>See:
* http://stackoverflow.com/questions/2345063/java-common-way-to-validate-and-convert-hostport-to-inetsocketaddress
*
* @return URL object for accessing host and port
*/
private static URL validateHostPortString(String hostPort) {
if (StringUtils.isNullOrWhitespaceOnly(hostPort)) {throw new IllegalArgumentException("hostPort should not be null or empty");
}
try {
URL v2 = (hostPort.toLowerCase().startsWith("http://") || hostPort.toLowerCase().startsWith("https://")) ? new URL(hostPort) : new URL("http://" + hostPort);
if (v2.getHost() == null) {
throw new IllegalArgumentException(("The given host:port ('" + hostPort) + "') doesn't contain a valid host");
}
if (v2.getPort() == (-1)) {
throw new IllegalArgumentException(("The given host:port ('" + hostPort) + "') doesn't contain a valid port");
}return v2;
} catch (MalformedURLException e) {
throw new IllegalArgumentException(("The given host:port ('" + hostPort) + "') is invalid", e);
}
} | 3.26 |
flink_NetUtils_getIPv6UrlRepresentation_rdh | /**
* Creates a compressed URL style representation of an Inet6Address.
*
* <p>This method copies and adopts code from Google's Guava library. We re-implement this here
* in order to reduce dependency on Guava. The Guava library has frequently caused dependency
* conflicts in the past.
*/
private static String getIPv6UrlRepresentation(byte[] addressBytes) {
// first, convert bytes to 16 bit chunks
int[] hextets = new int[8];
for (int i = 0; i < hextets.length; i++) {
hextets[i] =
((addressBytes[2 * i] & 0xff) << 8) | (addressBytes[(2 * i) + 1] & 0xff);
}
// now, find the sequence of zeros that should be compressed
int bestRunStart = -1;
int bestRunLength = -1;
int runStart = -1;
for (int i = 0; i < (hextets.length + 1); i++) {
if ((i < hextets.length) && (hextets[i] == 0)) {
if (runStart < 0)
{
runStart = i;
}
} else if (runStart >= 0) {
int runLength = i - runStart;
if (runLength > bestRunLength) {
bestRunStart = runStart;
bestRunLength = runLength;
}
runStart = -1;
}
}
if (bestRunLength >= 2) { Arrays.fill(hextets, bestRunStart, bestRunStart + bestRunLength, -1);
}
// convert into text form
StringBuilder buf = new StringBuilder(40);
buf.append('[');
boolean lastWasNumber = false;
for (int i = 0; i < hextets.length; i++) {
boolean thisIsNumber = hextets[i] >= 0;
if (thisIsNumber) {
if (lastWasNumber) {
buf.append(':');
}
buf.append(Integer.toHexString(hextets[i]));
} else if ((i == 0) || lastWasNumber) {
buf.append("::");
}
lastWasNumber = thisIsNumber;
}
buf.append(']');
return buf.toString();
} | 3.26 |
flink_NetUtils_ipAddressAndPortToUrlString_rdh | /**
* Encodes an IP address and port to be included in URL. in particular, this method makes sure
* that IPv6 addresses have the proper formatting to be included in URLs.
*
* @param address
* The address to be included in the URL.
* @param port
* The port for the URL address.
* @return The proper URL string encoded IP address and port.
*/ public static String ipAddressAndPortToUrlString(InetAddress address, int port) {
return (ipAddressToUrlString(address) + ':') + port;
} | 3.26 |
flink_NetUtils_getHostnameFromFQDN_rdh | /**
* Turn a fully qualified domain name (fqdn) into a hostname. If the fqdn has multiple subparts
* (separated by a period '.'), it will take the first part. Otherwise it takes the entire fqdn.
*
* @param fqdn
* The fully qualified domain name.
* @return The hostname.
*/
public static String getHostnameFromFQDN(String fqdn) {if (fqdn == null) {
throw new IllegalArgumentException("fqdn is null");
}
int dotPos = fqdn.indexOf('.');
if (dotPos == (-1)) {
return fqdn;
} else {
return fqdn.substring(0, dotPos);
}
} | 3.26 |
flink_NetUtils_getPortRangeFromString_rdh | // ------------------------------------------------------------------------
// Port range parsing
// ------------------------------------------------------------------------
/**
* Returns an iterator over available ports defined by the range definition.
*
* @param rangeDefinition
* String describing a single port, a range of ports or multiple ranges.
* @return Set of ports from the range definition
* @throws NumberFormatException
* If an invalid string is passed.
*/
public static Iterator<Integer> getPortRangeFromString(String rangeDefinition) throws NumberFormatException {
final String[] ranges = rangeDefinition.trim().split(",");
UnionIterator<Integer> iterators = new UnionIterator<>();
for (String rawRange : ranges) {Iterator<Integer> rangeIterator;
String range = rawRange.trim();
int dashIdx = range.indexOf('-');
if (dashIdx == (-1)) {
// only one port in range:
final int port = Integer.parseInt(range);
if (!isValidHostPort(port)) {
throw new IllegalConfigurationException((("Invalid port configuration. Port must be between 0" +
"and 65535, but was ") + port) + ".");
}
rangeIterator = Collections.singleton(Integer.valueOf(range)).iterator();
} else {
// evaluate range
final int start = Integer.parseInt(range.substring(0, dashIdx));
if (!isValidHostPort(start)) {
throw new IllegalConfigurationException((("Invalid port configuration. Port must be between 0" + "and 65535, but range start was ") + start) + ".");
}
final int end = Integer.parseInt(range.substring(dashIdx + 1));
if (!isValidHostPort(end)) {
throw new IllegalConfigurationException((("Invalid port configuration. Port must be between 0" + "and 65535, but range end was ") + end) + ".");
}
if (start >= end) {
throw new IllegalConfigurationException(((("Invalid port configuration." + " Port range end must be bigger than port range start.") + " If you wish to use single port please provide the value directly, not as a range.") + " Given range: ") + range);
}
rangeIterator = new Iterator<Integer>() {
int i = start;
@Override
public boolean hasNext() {
return i <= end;
}
@Override
public Integer next() {
return i++;
}
@Override
public void remove() {
throw new UnsupportedOperationException("Remove not supported");
}
};
}
iterators.add(rangeIterator);
}
return iterators;
} | 3.26 |
flink_NetUtils_createSocketFromPorts_rdh | /**
* Tries to allocate a socket from the given sets of ports.
*
* @param portsIterator
* A set of ports to choose from.
* @param factory
* A factory for creating the SocketServer
* @return null if no port was available or an allocated socket.
*/
public static ServerSocket createSocketFromPorts(Iterator<Integer> portsIterator, SocketFactory factory) {
while (portsIterator.hasNext()) {
int port = portsIterator.next();
LOG.debug("Trying to open socket on port {}", port);
try {
return factory.createSocket(port);
} catch (IOException | IllegalArgumentException
e) {
if (LOG.isDebugEnabled()) {
LOG.debug("Unable to allocate socket on port", e);
} else {
LOG.info("Unable to allocate on port {}, due to error: {}", port, e.getMessage());
}
}
}
return null;
} | 3.26 |
flink_NetUtils_m1_rdh | /**
* Check whether the given port is in right range when connecting to somewhere.
*
* @param port
* the port to check
* @return true if the number in the range 1 to 65535
*/
public static boolean m1(int port) {
return (1 <= port) && (port <= 65535);
} | 3.26 |
flink_NetUtils_ipAddressToUrlString_rdh | /**
* Encodes an IP address properly as a URL string. This method makes sure that IPv6 addresses
* have the proper formatting to be included in URLs.
*
* @param address
* The IP address to encode.
* @return The proper URL string encoded IP address.
*/
public static String ipAddressToUrlString(InetAddress address) {
if (address == null) {
throw new NullPointerException("address is null");
} else if (address instanceof Inet4Address) {
return address.getHostAddress();
} else if (address instanceof Inet6Address) {
return getIPv6UrlRepresentation(((Inet6Address) (address)));
} else {
throw new IllegalArgumentException("Unrecognized type of InetAddress: " + address);
}
} | 3.26 |
flink_NetUtils_unresolvedHostAndPortToNormalizedString_rdh | /**
* Returns a valid address for Pekko. It returns a String of format 'host:port'. When an IPv6
* address is specified, it normalizes the IPv6 address to avoid complications with the exact
* URL match policy of Pekko.
*
* @param host
* The hostname, IPv4 or IPv6 address
* @param port
* The port
* @return host:port where host will be normalized if it is an IPv6 address
*/
public static String unresolvedHostAndPortToNormalizedString(String host, int port) {
Preconditions.checkArgument(isValidHostPort(port), "Port is not within the valid range,");
return (m0(host) + ":") + port;
} | 3.26 |
flink_NetUtils_m0_rdh | // ------------------------------------------------------------------------
// Encoding of IP addresses for URLs
// ------------------------------------------------------------------------
/**
* Returns an address in a normalized format for Pekko. When an IPv6 address is specified, it
* normalizes the IPv6 address to avoid complications with the exact URL match policy of Pekko.
*
* @param host
* The hostname, IPv4 or IPv6 address
* @return host which will be normalized if it is an IPv6 address
*/
public static String m0(String host) {// Return loopback interface address if host is null
// This represents the behavior of {@code InetAddress.getByName } and RFC 3330
if (host == null) {
host = InetAddress.getLoopbackAddress().getHostAddress();
} else {
host = host.trim().toLowerCase();
if (host.startsWith("[") && host.endsWith("]"))
{
String address =
host.substring(1, host.length() - 1);
if (InetAddresses.isInetAddress(address)) {
host = address;
}
}
}
// normalize and valid address
if (InetAddresses.isInetAddress(host)) {
InetAddress inetAddress = InetAddresses.forString(host);
if (inetAddress instanceof Inet6Address) {
byte[] ipV6Address = inetAddress.getAddress();
host = getIPv6UrlRepresentation(ipV6Address);
}
} else {
try {
// We don't allow these in hostnames
Preconditions.checkArgument(!host.startsWith("."));
Preconditions.checkArgument(!host.endsWith("."));
Preconditions.checkArgument(!host.contains(":"));} catch (Exception e) {
throw new IllegalConfigurationException("The configured hostname is not valid", e);
}
}
return host;
} | 3.26 |
flink_NetUtils_parseHostPortAddress_rdh | /**
* Converts a string of the form "host:port" into an {@link InetSocketAddress}.
*
* @param hostPort
* The "host:port" string.
* @return The converted InetSocketAddress.
*/
public static InetSocketAddress parseHostPortAddress(String hostPort) {
URL url = validateHostPortString(hostPort);
return new InetSocketAddress(url.getHost(), url.getPort());
} | 3.26 |
flink_NetUtils_socketToUrl_rdh | /**
* Converts an InetSocketAddress to a URL. This method assigns the "http://" schema to the URL
* by default.
*
* @param socketAddress
* the InetSocketAddress to be converted
* @return a URL object representing the provided socket address with "http://" schema
*/
public static URL socketToUrl(InetSocketAddress socketAddress) {
String hostString = socketAddress.getHostString();
// If the hostString is an IPv6 address, it needs to be enclosed in square brackets
// at the beginning and end.
if (((socketAddress.getAddress() != null) && (socketAddress.getAddress() instanceof Inet6Address)) && hostString.equals(socketAddress.getAddress().getHostAddress())) {
hostString = ("[" + hostString) + "]";
}
String hostPort = (hostString + ":") + socketAddress.getPort();
return validateHostPortString(hostPort);
} | 3.26 |
flink_NetUtils_isValidHostPort_rdh | /**
* check whether the given port is in right range when getting port from local system.
*
* @param port
* the port to check
* @return true if the number in the range 0 to 65535
*/
public static boolean isValidHostPort(int port) {
return (0 <= port) && (port <= 65535);
} | 3.26 |
flink_NetUtils_getCorrectHostnamePort_rdh | /**
* Converts a string of the form "host:port" into an {@link URL}.
*
* @param hostPort
* The "host:port" string.
* @return The converted URL.
*/
public static URL getCorrectHostnamePort(String hostPort) {
return validateHostPortString(hostPort);
} | 3.26 |
flink_NetUtils_hostAndPortToUrlString_rdh | /**
* Normalizes and encodes a hostname and port to be included in URL. In particular, this method
* makes sure that IPv6 address literals have the proper formatting to be included in URLs.
*
* @param host
* The address to be included in the URL.
* @param port
* The port for the URL address.
* @return The proper URL string encoded IP address and port.
* @throws java.net.UnknownHostException
* Thrown, if the hostname cannot be translated into a
* URL.
*/
public static String hostAndPortToUrlString(String host, int port) throws UnknownHostException {
return ipAddressAndPortToUrlString(InetAddress.getByName(host), port);} | 3.26 |
flink_NetUtils_acceptWithoutTimeout_rdh | /**
* Calls {@link ServerSocket#accept()} on the provided server socket, suppressing any thrown
* {@link SocketTimeoutException}s. This is a workaround for the underlying JDK-8237858 bug in
* JDK 11 that can cause errant SocketTimeoutExceptions to be thrown at unexpected times.
*
* <p>This method expects the provided ServerSocket has no timeout set (SO_TIMEOUT of 0),
* indicating an infinite timeout. It will suppress all SocketTimeoutExceptions, even if a
* ServerSocket with a non-zero timeout is passed in.
*
* @param serverSocket
* a ServerSocket with {@link SocketOptions#SO_TIMEOUT SO_TIMEOUT} set to 0;
* if SO_TIMEOUT is greater than 0, then this method will suppress SocketTimeoutException;
* must not be null; SO_TIMEOUT option must be set to 0
* @return the new Socket
* @throws IOException
* see {@link ServerSocket#accept()}
* @see <a href="https://bugs.openjdk.java.net/browse/JDK-8237858">JDK-8237858</a>
*/
public static Socket acceptWithoutTimeout(ServerSocket serverSocket) throws IOException {
Preconditions.checkArgument(serverSocket.getSoTimeout() == 0, "serverSocket SO_TIMEOUT option must be 0");
while (true)
{
try {
return serverSocket.accept();
} catch (SocketTimeoutException exception) {// This should be impossible given that the socket timeout is set to zero
// which indicates an infinite timeout. This is due to the underlying JDK-8237858
// bug. We retry the accept call indefinitely to replicate the expected behavior.
}
}
} | 3.26 |
flink_NetUtils_socketAddressToUrlString_rdh | /**
* Encodes an IP address and port to be included in URL. in particular, this method makes sure
* that IPv6 addresses have the proper formatting to be included in URLs.
*
* @param address
* The socket address with the IP address and port.
* @return The proper URL string encoded IP address and port.
*/
public static String socketAddressToUrlString(InetSocketAddress address) {
if (address.isUnresolved()) {
throw new IllegalArgumentException("Address cannot be resolved: " + address.getHostString());
}
return ipAddressAndPortToUrlString(address.getAddress(), address.getPort());
} | 3.26 |
flink_NetUtils_getWildcardIPAddress_rdh | /**
* Returns the wildcard address to listen on all interfaces.
*
* @return Either 0.0.0.0 or :: depending on the IP setup.
*/
public static String getWildcardIPAddress() {
return WILDCARD_ADDRESS;
} | 3.26 |
flink_NetUtils_getAvailablePort_rdh | // ------------------------------------------------------------------------
// Lookup of to free ports
// ------------------------------------------------------------------------
/**
* Find a non-occupied port.
*
* @return A non-occupied port.
*/
public static Port getAvailablePort() {
for (int i = 0; i < 50; i++) {
try (ServerSocket serverSocket = new ServerSocket(0)) {
int port = serverSocket.getLocalPort();
if (port != 0) {
FileLock fileLock = new FileLock(NetUtils.class.getName() + port);
if (fileLock.tryLock()) {
return new Port(port, fileLock);
} else {
fileLock.unlockAndDestroy();
}
}
} catch (IOException ignored) {
}
}
throw new RuntimeException("Could not find a free permitted port on the machine.");
} | 3.26 |
flink_PlanGenerator_registerCachedFilesWithPlan_rdh | /**
* Registers all files that were registered at this execution environment's cache registry of
* the given plan's cache registry.
*
* @param p
* The plan to register files at.
* @throws IOException
* Thrown if checks for existence and sanity fail.
*/
private void registerCachedFilesWithPlan(Plan p) throws IOException {
for (Tuple2<String, DistributedCache.DistributedCacheEntry> entry : cacheFile) {
p.registerCachedFile(entry.f0, entry.f1);
}
} | 3.26 |
flink_PlanGenerator_registerGenericTypeInfoIfConfigured_rdh | /**
* Check plan for GenericTypeInfo's and register the types at the serializers.
*
* @param plan
* the generated plan.
*/
private void registerGenericTypeInfoIfConfigured(Plan plan) {
if (!config.isAutoTypeRegistrationDisabled()) {
plan.accept(new Visitor<Operator<?>>() {
private final Set<Class<?>> registeredTypes = new HashSet<>();
private final Set<Operator<?>> visitedOperators = new HashSet<>();
@Override
public boolean preVisit(Operator<?> visitable) {
if (!visitedOperators.add(visitable)) {
return false;
}
OperatorInformation<?> opInfo = visitable.getOperatorInfo();
Serializers.recursivelyRegisterType(opInfo.getOutputType(), config, registeredTypes);
return true;
}
@Override
public void postVisit(Operator<?> visitable) {
}
});
}
} | 3.26 |
flink_PlanGenerator_createPlan_rdh | /**
* Create plan.
*
* @return the generated plan.
*/
private Plan createPlan() {
final OperatorTranslation translator = new OperatorTranslation();
final Plan plan = translator.translateToPlan(sinks, jobName);
if (defaultParallelism > 0) {plan.setDefaultParallelism(defaultParallelism);}
plan.setExecutionConfig(config);
return plan;
} | 3.26 |
flink_OrCondition_getLeft_rdh | /**
*
* @return One of the {@link IterativeCondition conditions} combined in this condition.
*/
public IterativeCondition<T> getLeft() {
return left;
} | 3.26 |
flink_OrCondition_getRight_rdh | /**
*
* @return One of the {@link IterativeCondition conditions} combined in this condition.
*/
public IterativeCondition<T> getRight() {
return right;
} | 3.26 |
flink_HyperLogLogPlusPlus_query_rdh | /**
* Compute the HyperLogLog estimate.
*
* <p>Variable names in the HLL++ paper match variable names in the code.
*/
public long query(HllBuffer buffer) {
// Compute the inverse of indicator value 'z' and count the number of zeros 'V'.
double zInverse = 0.0;
double v = 0.0;
int idx = 0;
int wordOffset = 0;
while (wordOffset < numWords) {
long word = buffer.array[wordOffset];
int i = 0;
int shift = 0;
while ((idx < m) && (i < REGISTERS_PER_WORD)) {
long mIdx = (word >>> shift)
& REGISTER_WORD_MASK;
zInverse += 1.0 / (1 << mIdx);
if (mIdx == 0) {
v += 1.0;
}
shift += REGISTER_SIZE;
i += 1;
idx += 1;} wordOffset += 1;
}
// We integrate two steps from the paper:
// val Z = 1.0d / zInverse
// val E = alphaM2 * Z
double e = alphaM2 / zInverse;
double eBiasCorrected = ((p
< 19) && (e < (5.0 * m))) ? e - estimateBias(e) : e;
double estimate;
// Estimate the cardinality.
if (v > 0) {
// Use linear counting for small cardinality estimates.
double h = m * Math.log(m / v);
// HLL++ is defined only when p < 19, otherwise we need to fallback to HLL.
// The threshold `2.5 * m` is from the original HLL algorithm.
if (((p < 19) && (h <= THRESHOLDS[p - 4])) || (e <= (2.5 * m))) {
estimate = h;
} else {
estimate = eBiasCorrected;
}
} else {
estimate = eBiasCorrected;
}
// Round to the nearest long value.
return Math.round(estimate);
} | 3.26 |
flink_HyperLogLogPlusPlus_distance_rdh | /**
* Use square of the difference between the current estimate and the estimate at the given index
* as distance metric.
*/
private double distance(double e, double[] estimates, int i) {
double v23 = e - estimates[i];
return v23 * v23;
} | 3.26 |
flink_HyperLogLogPlusPlus_trueRsd_rdh | /**
* The <code>rsd</code> of HLL++ is always equal to or better than the <code>rsd</code>
* requested. This method returns the <code>rsd</code> this instance actually guarantees.
*
* @return the actual <code>rsd</code>.
*/
public double trueRsd() {
return 1.04 / Math.sqrt(m);} | 3.26 |
flink_HyperLogLogPlusPlus_estimateBias_rdh | /**
* Estimate the bias using the raw estimates with their respective biases from the HLL++
* appendix. We currently use KNN interpolation to determine the bias (as suggested in the
* paper).
*/
public double estimateBias(double e) {
double[] estimates = RAW_ESTIMATE_DATA[p - 4];
int numEstimates = estimates.length;
// The estimates are sorted so we can use a binary search to find the index of the
// interpolation estimate closest to the current estimate.
int ix = Arrays.binarySearch(estimates, 0, numEstimates, e);
int nearestEstimateIndex = (ix < 0) ? -(ix + 1) : ix;
// Keep moving bounds as long as the (exclusive) high bound is closer to the estimate than
// the lower (inclusive) bound.
int low = Math.max((nearestEstimateIndex - K) + 1, 0);
int high =
Math.min(low + K, numEstimates);
while
((high < numEstimates) && (distance(e, estimates, high) < distance(e, estimates, low))) {
low += 1;
high += 1;
}
// Calculate the sum of the biases in low-high interval.
double[] biases = BIAS_DATA[p - 4];
int i = low;
double biasSum = 0.0;
while (i < high) {
biasSum += biases[i];
i += 1;
}
// Calculate the bias.
return biasSum / (high - low);
} | 3.26 |
flink_BigDecParser_parseField_rdh | /**
* Static utility to parse a field of type BigDecimal from a byte sequence that represents text
* characters (such as when read from a file stream).
*
* @param bytes
* The bytes containing the text data that should be parsed.
* @param startPos
* The offset to start the parsing.
* @param length
* The length of the byte sequence (counting from the offset).
* @param delimiter
* The delimiter that terminates the field.
* @return The parsed value.
* @throws IllegalArgumentException
* Thrown when the value cannot be parsed because the text
* represents not a correct number.
*/
public static final BigDecimal parseField(byte[] bytes,
int startPos, int length, char delimiter) {
if (length <= 0) {
throw new NumberFormatException("Invalid input: Empty string");
}
int i = 0;
final byte delByte = ((byte) (delimiter));
while ((i < length) && (bytes[startPos + i] != delByte)) {
i++;
}
if ((i > 0) && (Character.isWhitespace(bytes[startPos]) || Character.isWhitespace(bytes[(startPos + i) - 1]))) {
throw new NumberFormatException("There is leading or trailing whitespace in the numeric field.");
}
final char[]
chars = new char[i];
for (int j = 0; j < i; j++) {
final byte b = bytes[startPos + j];
if (((((((b < '0') || (b > '9')) &&
(b != '-')) && (b != '+')) && (b != '.')) && (b != 'E')) && (b != 'e')) {
throw new NumberFormatException();
}
chars[j]
= ((char) (bytes[startPos + j])); }
return new BigDecimal(chars);
} | 3.26 |
flink_ResultSubpartitionView_notifyRequiredSegmentId_rdh | /**
* In tiered storage shuffle mode, only required segments will be sent to prevent the redundant
* buffer usage. Downstream will notify the upstream by this method to send required segments.
*
* @param segmentId
* segment id is the id indicating the required id.
*/
default void notifyRequiredSegmentId(int segmentId) {
} | 3.26 |
flink_InFlightRequestTracker_m0_rdh | /**
* Deregisters an in-flight request.
*/
public void m0() {phaser.arriveAndDeregister();
} | 3.26 |
flink_OperatorInformation_getOutputType_rdh | /**
* Gets the return type of the user code function.
*/
public TypeInformation<OUT> getOutputType() {
return outputType;
} | 3.26 |
flink_PythonStreamGroupAggregateOperator_getUserDefinedFunctionsProto_rdh | /**
* Gets the proto representation of the Python user-defined aggregate functions to be executed.
*/
@Override
public UserDefinedAggregateFunctions getUserDefinedFunctionsProto() {FlinkFnApi.UserDefinedAggregateFunctions.Builder builder = super.getUserDefinedFunctionsProto().toBuilder();
builder.setCountStarInserted(countStarInserted);
return builder.build();
} | 3.26 |
flink_EventTimeTrigger_create_rdh | /**
* Creates an event-time trigger that fires once the watermark passes the end of the window.
*
* <p>Once the trigger fires all elements are discarded. Elements that arrive late immediately
* trigger window evaluation with just this one element.
*/
public static EventTimeTrigger create() {return new EventTimeTrigger();
} | 3.26 |
flink_FromElementsFunction_m0_rdh | /**
* Gets the number of elements emitted so far.
*
* @return The number of elements emitted so far.
*/
public int m0() {
return numElementsEmitted; } | 3.26 |
flink_FromElementsFunction_setOutputType_rdh | /**
* Set element type and re-serialize element if required. Should only be called before
* serialization/deserialization of this function.
*/@Override
public void setOutputType(TypeInformation<T> outTypeInfo, ExecutionConfig executionConfig) {
Preconditions.checkState(elements != null, "The output type should've been specified before shipping the graph to the cluster");checkIterable(elements, outTypeInfo.getTypeClass());
TypeSerializer<T> newSerializer = outTypeInfo.createSerializer(executionConfig);
if (Objects.equals(serializer, newSerializer)) {
return;
}
serializer = newSerializer;
try {
serializeElements();
} catch (IOException ex) {
throw new UncheckedIOException(ex);
}
} | 3.26 |
flink_FromElementsFunction_checkCollection_rdh | // ------------------------------------------------------------------------
// Utilities
// ------------------------------------------------------------------------
/**
* Verifies that all elements in the collection are non-null, and are of the given class, or a
* subclass thereof.
*
* @param elements
* The collection to check.
* @param viewedAs
* The class to which the elements must be assignable to.
* @param <OUT>
* The generic type of the collection to be checked.
*/public static <OUT> void checkCollection(Collection<OUT> elements, Class<OUT> viewedAs) {
checkIterable(elements, viewedAs);
} | 3.26 |
flink_FromElementsFunction_snapshotState_rdh | // ------------------------------------------------------------------------
// Checkpointing
// ------------------------------------------------------------------------
@Override
public void snapshotState(FunctionSnapshotContext context) throws
Exception {
Preconditions.checkState(this.checkpointedState != null, ("The " + getClass().getSimpleName()) + " has not been properly initialized.");
this.checkpointedState.update(Collections.singletonList(this.numElementsEmitted));
} | 3.26 |
flink_FromElementsFunction_getNumElements_rdh | /**
* Gets the number of elements produced in total by this function.
*
* @return The number of elements produced in total.
*/public int getNumElements() {
return numElements;
} | 3.26 |
flink_DatadogHttpReporter_m0_rdh | /**
* Removes leading and trailing angle brackets.
*/
private String m0(String str) {
return str.substring(1, str.length() - 1);
} | 3.26 |
flink_DatadogHttpReporter_getTagsFromMetricGroup_rdh | /**
* Get tags from MetricGroup#getAllVariables(), excluding 'host'.
*/
private List<String> getTagsFromMetricGroup(MetricGroup metricGroup) {
List<String> tags = new ArrayList<>();
for (Map.Entry<String, String> entry : metricGroup.getAllVariables().entrySet()) {
if
(!entry.getKey().equals(HOST_VARIABLE)) {
tags.add((m0(entry.getKey()) + ":") + entry.getValue());
}
}
return tags;
} | 3.26 |
flink_DatadogHttpReporter_getTagsFromConfig_rdh | /**
* Get config tags from config 'metrics.reporter.dghttp.tags'.
*/
private List<String> getTagsFromConfig(String str) {
return Arrays.asList(str.split(","));
} | 3.26 |
flink_CommonExecLookupJoin_validate_rdh | // ----------------------------------------------------------------------------------------
// Validation
// ----------------------------------------------------------------------------------------
private void validate(RelOptTable temporalTable) {
// validate table source and function implementation first
validateTableSource(temporalTable);
// check join on all fields of PRIMARY KEY or (UNIQUE) INDEX
if (lookupKeys.isEmpty()) {
throw new TableException(String.format("Temporal table join requires an equality condition on fields of %s.", getTableSourceDescription(temporalTable)));
}
// check type
if ((joinType != FlinkJoinType.LEFT) && (joinType != FlinkJoinType.INNER)) {
throw new TableException(String.format("Temporal table join currently only support INNER JOIN and LEFT JOIN, but was %s JOIN.", joinType.toString()));
}
// success
} | 3.26 |
flink_DistributedRuntimeUDFContext_setBroadcastVariable_rdh | // --------------------------------------------------------------------------------------------
public void setBroadcastVariable(String name, BroadcastVariableMaterialization<?, ?> value) {this.broadcastVars.put(name, value);
} | 3.26 |
flink_JoinOperatorSetsBase_equalTo_rdh | /**
* Continues a Join transformation and defines a {@link KeySelector} function for the second
* join {@link DataSet}.
*
* <p>The KeySelector function is called for each element of the second DataSet and extracts
* a single key value on which the DataSet is joined.
*
* <p>The resulting {@link JoinFunctionAssigner} needs to be finished by providing a {@link JoinFunction} by calling {@link JoinFunctionAssigner#with(JoinFunction)}
*
* @param keySelector
* The KeySelector function which extracts the key values from the second
* DataSet on which it is joined.
* @return A JoinFunctionAssigner.
*/
public <K> JoinFunctionAssigner<I1, I2> equalTo(KeySelector<I2, K> keySelector) {
TypeInformation<K> keyType = TypeExtractor.getKeySelectorTypes(keySelector, input2.getType());
return createJoinFunctionAssigner(new Keys.SelectorFunctionKeys<>(keySelector, input2.getType(), keyType));
} | 3.26 |
flink_AbstractOuterJoinDriver_setup_rdh | // ------------------------------------------------------------------------
@Override
public void setup(TaskContext<FlatJoinFunction<IT1, IT2, OT>, OT> context) {
this.taskContext
= context;
this.running = true;
} | 3.26 |
flink_XORShiftRandom_next_rdh | /**
* All other methods like nextInt()/nextDouble()... depends on this, so we just need to
* overwrite this.
*
* @param bits
* Random bits
* @return The next pseudorandom value from this random number generator's sequence
*/
@Override
public int next(int bits) {
long nextSeed = seed ^ (seed << 21);
nextSeed ^= nextSeed >>> 35;
nextSeed ^= nextSeed << 4;
seed = nextSeed;
return ((int) (nextSeed & ((1L <<
bits) - 1)));
} | 3.26 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.