name
stringlengths 12
178
| code_snippet
stringlengths 8
36.5k
| score
float64 3.26
3.68
|
---|---|---|
zxing_CameraManager_setManualFramingRect | /**
* Allows third party apps to specify the scanning rectangle dimensions, rather than determine
* them automatically based on screen resolution.
*
* @param width The width in pixels to scan.
* @param height The height in pixels to scan.
*/
public synchronized void setManualFramingRect(int width, int height) {
if (initialized) {
Point screenResolution = configManager.getScreenResolution();
if (width > screenResolution.x) {
width = screenResolution.x;
}
if (height > screenResolution.y) {
height = screenResolution.y;
}
int leftOffset = (screenResolution.x - width) / 2;
int topOffset = (screenResolution.y - height) / 2;
framingRect = new Rect(leftOffset, topOffset, leftOffset + width, topOffset + height);
Log.d(TAG, "Calculated manual framing rect: " + framingRect);
framingRectInPreview = null;
} else {
requestedFramingRectWidth = width;
requestedFramingRectHeight = height;
}
} | 3.68 |
hadoop_AppIdKeyConverter_decode | /*
* (non-Javadoc)
*
* Converts/decodes a 12 byte representation of app id for (row) keys to an
* app id in string format which can be returned back to client.
* For decoding, 12 bytes are interpreted as 8 bytes of inverted cluster
* timestamp(long) followed by 4 bytes of inverted sequence id(int). Calls
* ApplicationId#toString to generate string representation of app id.
*
* @see
* org.apache.hadoop.yarn.server.timelineservice.storage.common.KeyConverter
* #decode(byte[])
*/
@Override
public String decode(byte[] appIdBytes) {
if (appIdBytes.length != getKeySize()) {
throw new IllegalArgumentException("Invalid app id in byte format");
}
long clusterTs = LongConverter.invertLong(
Bytes.toLong(appIdBytes, 0, Bytes.SIZEOF_LONG));
int seqId = HBaseTimelineSchemaUtils.invertInt(
Bytes.toInt(appIdBytes, Bytes.SIZEOF_LONG, Bytes.SIZEOF_INT));
return HBaseTimelineSchemaUtils.convertApplicationIdToString(
ApplicationId.newInstance(clusterTs, seqId));
} | 3.68 |
hudi_InternalSchemaUtils_searchSchema | /**
* Search target internalSchema by version number.
*
* @param versionId the internalSchema version to be search.
* @param treeMap internalSchemas collections to be searched.
* @return a internalSchema.
*/
public static InternalSchema searchSchema(long versionId, TreeMap<Long, InternalSchema> treeMap) {
if (treeMap.containsKey(versionId)) {
return treeMap.get(versionId);
} else {
SortedMap<Long, InternalSchema> headMap = treeMap.headMap(versionId);
if (!headMap.isEmpty()) {
return headMap.get(headMap.lastKey());
}
}
return InternalSchema.getEmptyInternalSchema();
} | 3.68 |
hbase_SimpleMutableByteRange_putVLong | // Copied from com.google.protobuf.CodedOutputStream v2.5.0 writeRawVarint64
@Override
public int putVLong(int index, long val) {
int rPos = 0;
while (true) {
if ((val & ~0x7F) == 0) {
bytes[offset + index + rPos] = (byte) val;
break;
} else {
bytes[offset + index + rPos] = (byte) ((val & 0x7F) | 0x80);
val >>>= 7;
}
rPos++;
}
clearHashCache();
return rPos + 1;
} | 3.68 |
hmily_HmilyXaStatement_getXaConnection | /**
* Gets xa connection.
*
* @return the xa connection
*/
public synchronized XAConnection getXaConnection() {
if (this.xaConnection == null) {
throw new IllegalArgumentException("connection not implements XAConnection");
}
return xaConnection;
} | 3.68 |
flink_PrintStyle_tableauWithTypeInferredColumnWidths | /**
* Create a new {@link TableauStyle} using column widths computed from the type.
*
* @param schema the schema of the data to print
* @param converter the converter to use to convert field values to string
* @param maxColumnWidth Max column width
* @param printNullAsEmpty A flag to indicate whether null should be printed as empty string
* more than {@code <NULL>}
* @param printRowKind A flag to indicate whether print row kind info.
*/
static TableauStyle tableauWithTypeInferredColumnWidths(
ResolvedSchema schema,
RowDataToStringConverter converter,
int maxColumnWidth,
boolean printNullAsEmpty,
boolean printRowKind) {
Preconditions.checkArgument(maxColumnWidth > 0, "maxColumnWidth should be greater than 0");
return new TableauStyle(
schema,
converter,
TableauStyle.columnWidthsByType(
schema.getColumns(), maxColumnWidth, printNullAsEmpty, printRowKind),
maxColumnWidth,
printNullAsEmpty,
printRowKind);
} | 3.68 |
hadoop_EntityGroupFSTimelineStoreMetrics_incrNoRefreshCacheRead | // Cache related
public void incrNoRefreshCacheRead() {
noRefreshCacheRead.incr();
} | 3.68 |
hadoop_UnmanagedApplicationManager_forceKillApplication | /**
* Force kill the UAM.
*
* @return kill response
* @throws IOException if fails to create rmProxy
* @throws YarnException if force kill fails
*/
public KillApplicationResponse forceKillApplication()
throws IOException, YarnException {
shutDownConnections();
KillApplicationRequest request =
KillApplicationRequest.newInstance(this.applicationId);
if (this.rmClient == null) {
this.rmClient = createRMProxy(ApplicationClientProtocol.class, this.conf,
UserGroupInformation.createRemoteUser(this.submitter), null);
}
return this.rmClient.forceKillApplication(request);
} | 3.68 |
hbase_AsyncTable_getScanner | /**
* Gets a scanner on the current table for the given family and qualifier.
* @param family The column family to scan.
* @param qualifier The column qualifier to scan.
* @return A scanner.
*/
default ResultScanner getScanner(byte[] family, byte[] qualifier) {
return getScanner(new Scan().addColumn(family, qualifier));
} | 3.68 |
hbase_ConcurrentMapUtils_computeIfAbsent | /**
* In HBASE-16648 we found that ConcurrentHashMap.get is much faster than computeIfAbsent if the
* value already exists. Notice that the implementation does not guarantee that the supplier will
* only be executed once.
*/
public static <K, V> V computeIfAbsent(ConcurrentMap<K, V> map, K key, Supplier<V> supplier) {
return computeIfAbsent(map, key, supplier, () -> {
});
} | 3.68 |
framework_DataCommunicator_getActiveDataHandler | /**
* Returns the active data handler.
*
* @return the active data handler
* @since 8.0.6
*/
protected ActiveDataHandler getActiveDataHandler() {
return handler;
} | 3.68 |
hbase_ZNodeClearer_writeMyEphemeralNodeOnDisk | /**
* Logs the errors without failing on exception.
*/
public static void writeMyEphemeralNodeOnDisk(String fileContent) {
String fileName = ZNodeClearer.getMyEphemeralNodeFileName();
if (fileName == null) {
LOG.warn("Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared "
+ "on crash by start scripts (Longer MTTR!)");
return;
}
FileWriter fstream;
try {
fstream = new FileWriter(fileName);
} catch (IOException e) {
LOG.warn("Can't write znode file " + fileName, e);
return;
}
BufferedWriter out = new BufferedWriter(fstream);
try {
try {
out.write(fileContent + "\n");
} finally {
try {
out.close();
} finally {
fstream.close();
}
}
} catch (IOException e) {
LOG.warn("Can't write znode file " + fileName, e);
}
} | 3.68 |
hadoop_KeyProviderCache_invalidateCache | /**
* Invalidate cache. KeyProviders in the cache will be closed by cache hook.
*/
@VisibleForTesting
synchronized void invalidateCache() {
LOG.debug("Invalidating all cached KeyProviders.");
if (cache != null) {
cache.invalidateAll();
}
} | 3.68 |
framework_DragSourceExtension_setEffectAllowed | /**
* Sets the allowed effects for the current drag source element. Used for
* setting client side {@code DataTransfer.effectAllowed} parameter for the
* drag event.
* <p>
* By default the value is {@link EffectAllowed#UNINITIALIZED} which is
* equivalent to {@link EffectAllowed#ALL}.
*
* @param effect
* Effects to allow for this draggable element. Cannot be {@code
* null}.
*/
public void setEffectAllowed(EffectAllowed effect) {
if (effect == null) {
throw new IllegalArgumentException("Allowed effect cannot be null");
}
if (!Objects.equals(getState(false).effectAllowed, effect)) {
getState().effectAllowed = effect;
}
} | 3.68 |
dubbo_DubboCertManager_signWithEcdsa | /**
* Generate key pair with ECDSA
*
* @return key pair
*/
protected static KeyPair signWithEcdsa() {
KeyPair keyPair = null;
try {
ECGenParameterSpec ecSpec = new ECGenParameterSpec("secp256r1");
KeyPairGenerator g = KeyPairGenerator.getInstance("EC");
g.initialize(ecSpec, new SecureRandom());
java.security.KeyPair keypair = g.generateKeyPair();
PublicKey publicKey = keypair.getPublic();
PrivateKey privateKey = keypair.getPrivate();
ContentSigner signer = new JcaContentSignerBuilder("SHA256withECDSA").build(privateKey);
keyPair = new KeyPair(publicKey, privateKey, signer);
} catch (NoSuchAlgorithmException | InvalidAlgorithmParameterException | OperatorCreationException e) {
logger.error(
CONFIG_SSL_CERT_GENERATE_FAILED,
"",
"",
"Generate Key with secp256r1 algorithm failed. Please check if your system support. "
+ "Will attempt to generate with RSA2048.",
e);
}
return keyPair;
} | 3.68 |
flink_Channel_getReplicationFactor | /**
* Returns the replication factor of the connection.
*
* @return The replication factor of the connection.
*/
public int getReplicationFactor() {
return this.replicationFactor;
} | 3.68 |
hibernate-validator_InheritedMethodsHelper_run | /**
* Runs the given privileged action, using a privileged block if required.
* <p>
* <b>NOTE:</b> This must never be changed into a publicly available method to avoid execution of arbitrary
* privileged actions within HV's protection domain.
*/
@IgnoreForbiddenApisErrors(reason = "SecurityManager is deprecated in JDK17")
private static <T> T run(PrivilegedAction<T> action) {
return System.getSecurityManager() != null ? AccessController.doPrivileged( action ) : action.run();
} | 3.68 |
Activiti_TreeMethodExpression_getMethodInfo | /**
* Evaluates the expression and answers information about the method
* @param context used to resolve properties (<code>base.property</code> and <code>base[property]</code>)
* @return method information or <code>null</code> for literal expressions
* @throws ELException if evaluation fails (e.g. suitable method not found)
*/
@Override
public MethodInfo getMethodInfo(ELContext context) throws ELException {
return node.getMethodInfo(bindings, context, type, types);
} | 3.68 |
flink_NFACompiler_headOfGroup | /**
* Checks if the given pattern is the head pattern of the current group pattern.
*
* @param pattern the pattern to be checked
* @return {@code true} iff the given pattern is in a group pattern and it is the head
* pattern of the group pattern, {@code false} otherwise
*/
private boolean headOfGroup(Pattern<T, ?> pattern) {
return currentGroupPattern != null && pattern.getPrevious() == null;
} | 3.68 |
shardingsphere-elasticjob_LeaderService_removeLeader | /**
* Remove leader and trigger leader election.
*/
public void removeLeader() {
jobNodeStorage.removeJobNodeIfExisted(LeaderNode.INSTANCE);
} | 3.68 |
hbase_AssignmentVerificationReport_getDispersionInformation | /**
* Return a list which contains 3 elements: average dispersion score, max dispersion score and min
* dispersion score as first, second and third elements, respectively.
*/
public List<Float> getDispersionInformation() {
List<Float> dispersion = new ArrayList<>();
dispersion.add(avgDispersionScore);
dispersion.add(maxDispersionScore);
dispersion.add(minDispersionScore);
return dispersion;
} | 3.68 |
hbase_ChaosAgent_createZNode | /***
* Function to create PERSISTENT ZNODE with given path and data given as params
* @param path Path at which ZNode to create
* @param data Data to put under ZNode
*/
public void createZNode(String path, byte[] data) {
zk.create(path, data, ZooDefs.Ids.OPEN_ACL_UNSAFE, CreateMode.PERSISTENT, createZNodeCallback,
data);
} | 3.68 |
flink_ExecutionVertex_notifyStateTransition | /** Simply forward this notification. */
void notifyStateTransition(
Execution execution, ExecutionState previousState, ExecutionState newState) {
// only forward this notification if the execution is still the current execution
// otherwise we have an outdated execution
if (isCurrentExecution(execution)) {
getExecutionGraphAccessor().notifyExecutionChange(execution, previousState, newState);
}
} | 3.68 |
hadoop_ExitUtil_terminate | /**
* Terminate the current process. Note that terminate is the *only* method
* that should be used to terminate the daemon processes.
*
* @param status exit code
* @param msg message used to create the {@code ExitException}
* @throws ExitException if {@link System#exit(int)} is disabled.
*/
public static void terminate(int status, String msg) throws ExitException {
terminate(new ExitException(status, msg));
} | 3.68 |
flink_NFAState_resetStateChanged | /** Reset the changed bit checked via {@link #isStateChanged()} to {@code false}. */
public void resetStateChanged() {
this.stateChanged = false;
} | 3.68 |
hbase_ReplicationSourceManager_getFs | /**
* Get the handle on the local file system
* @return Handle on the local file system
*/
public FileSystem getFs() {
return this.fs;
} | 3.68 |
zxing_DecoderResult_getErasures | /**
* @return number of erasures corrected, or {@code null} if not applicable
*/
public Integer getErasures() {
return erasures;
} | 3.68 |
flink_CoGroupOperator_with | /**
* Finalizes a CoGroup transformation by applying a {@link
* org.apache.flink.api.common.functions.RichCoGroupFunction} to groups of elements
* with identical keys.
*
* <p>Each CoGroupFunction call returns an arbitrary number of keys.
*
* @param function The CoGroupFunction that is called for all groups of elements
* with identical keys.
* @return An CoGroupOperator that represents the co-grouped result DataSet.
* @see org.apache.flink.api.common.functions.RichCoGroupFunction
* @see DataSet
*/
public <R> CoGroupOperator<I1, I2, R> with(CoGroupFunction<I1, I2, R> function) {
if (function == null) {
throw new NullPointerException("CoGroup function must not be null.");
}
TypeInformation<R> returnType =
TypeExtractor.getCoGroupReturnTypes(
function,
input1.getType(),
input2.getType(),
Utils.getCallLocationName(),
true);
return new CoGroupOperator<>(
input1,
input2,
keys1,
keys2,
input1.clean(function),
returnType,
groupSortKeyOrderFirst,
groupSortKeyOrderSecond,
customPartitioner,
Utils.getCallLocationName());
} | 3.68 |
hbase_DefaultMetricsSystemHelper_removeSourceName | /**
* Unfortunately Hadoop tries to be too-clever and permanently keeps track of all names registered
* so far as a Source, thus preventing further re-registration of the source with the same name.
* In case of dynamic metrics tied to region-lifecycles, this becomes a problem because we would
* like to be able to re-register and remove with the same name. Otherwise, it is resource leak.
* This ugly code manually removes the name from the UniqueNames map. TODO: May not be needed for
* Hadoop versions after YARN-5190.
*/
public void removeSourceName(String name) {
if (sourceNamesField == null || mapField == null) {
return;
}
try {
Object sourceNames = sourceNamesField.get(DefaultMetricsSystem.INSTANCE);
HashMap map = (HashMap) mapField.get(sourceNames);
synchronized (sourceNames) {
map.remove(name);
}
} catch (Exception ex) {
if (LOG.isTraceEnabled()) {
LOG.trace(
"Received exception while trying to access Hadoop Metrics classes via " + "reflection.",
ex);
}
}
} | 3.68 |
hbase_NamespaceStateManager_checkAndUpdateNamespaceRegionCount | /**
* Check and update region count for an existing table. To handle scenarios like restore snapshot
* @param name name of the table for region count needs to be checked and updated
* @param incr count of regions
* @throws QuotaExceededException if quota exceeds for the number of regions allowed in a
* namespace
* @throws IOException Signals that an I/O exception has occurred.
*/
synchronized void checkAndUpdateNamespaceRegionCount(TableName name, int incr)
throws IOException {
String namespace = name.getNamespaceAsString();
NamespaceDescriptor nspdesc = getNamespaceDescriptor(namespace);
if (nspdesc != null) {
NamespaceTableAndRegionInfo currentStatus = getState(namespace);
int regionCountOfTable = currentStatus.getRegionCountOfTable(name);
if (
(currentStatus.getRegionCount() - regionCountOfTable + incr)
> TableNamespaceManager.getMaxRegions(nspdesc)
) {
throw new QuotaExceededException("The table " + name.getNameAsString()
+ " region count cannot be updated as it would exceed maximum number "
+ "of regions allowed in the namespace. The total number of regions permitted is "
+ TableNamespaceManager.getMaxRegions(nspdesc));
}
currentStatus.removeTable(name);
currentStatus.addTable(name, incr);
}
} | 3.68 |
hadoop_NMTokenCache_clearCache | /**
* It will remove all the nm tokens from its cache
*/
@Private
@VisibleForTesting
public void clearCache() {
nmTokens.clear();
} | 3.68 |
framework_Tree_getVisibleItemIds | /**
* Gets the visible item ids.
*
* @see Select#getVisibleItemIds()
*/
@Override
public Collection<?> getVisibleItemIds() {
final LinkedList<Object> visible = new LinkedList<Object>();
// Iterates trough hierarchical tree using a stack of iterators
final Stack<Iterator<?>> iteratorStack = new Stack<Iterator<?>>();
final Collection<?> ids = rootItemIds();
if (ids != null) {
iteratorStack.push(ids.iterator());
}
while (!iteratorStack.isEmpty()) {
// Gets the iterator for current tree level
final Iterator<?> i = iteratorStack.peek();
// If the level is finished, back to previous tree level
if (!i.hasNext()) {
// Removes used iterator from the stack
iteratorStack.pop();
} else {
// Adds the item on current level
final Object itemId = i.next();
visible.add(itemId);
// Adds children if expanded, or close the tag
if (isExpanded(itemId) && hasChildren(itemId)) {
iteratorStack.push(getChildren(itemId).iterator());
}
}
}
return visible;
} | 3.68 |
flink_BlobUtils_readFully | /**
* Auxiliary method to read a particular number of bytes from an input stream. This method
* blocks until the requested number of bytes have been read from the stream. If the stream
* cannot offer enough data, an {@link EOFException} is thrown.
*
* @param inputStream The input stream to read the data from.
* @param buf The buffer to store the read data.
* @param off The offset inside the buffer.
* @param len The number of bytes to read from the stream.
* @param type The name of the type, to throw a good error message in case of not enough data.
* @throws IOException Thrown if I/O error occurs while reading from the stream or the stream
* cannot offer enough data.
*/
static void readFully(InputStream inputStream, byte[] buf, int off, int len, String type)
throws IOException {
int bytesRead = 0;
while (bytesRead < len) {
final int read = inputStream.read(buf, off + bytesRead, len - bytesRead);
if (read < 0) {
throw new EOFException("Received an incomplete " + type);
}
bytesRead += read;
}
} | 3.68 |
flink_CheckpointedPosition_getRecordsAfterOffset | /** Gets the records to skip after the offset. */
public long getRecordsAfterOffset() {
return recordsAfterOffset;
} | 3.68 |
flink_ReusingBuildFirstReOpenableHashJoinIterator_reopenProbe | /**
* Set new input for probe side
*
* @throws IOException
*/
public void reopenProbe(MutableObjectIterator<V2> probeInput) throws IOException {
reopenHashTable.reopenProbe(probeInput);
} | 3.68 |
hadoop_HdfsDataOutputStream_getCurrentBlockReplication | /**
* Get the actual number of replicas of the current block.
*
* This can be different from the designated replication factor of the file
* because the namenode does not maintain replication for the blocks which are
* currently being written to. Depending on the configuration, the client may
* continue to write to a block even if a few datanodes in the write pipeline
* have failed, or the client may add a new datanodes once a datanode has
* failed.
*
* @return the number of valid replicas of the current block
*/
public synchronized int getCurrentBlockReplication() throws IOException {
OutputStream wrappedStream = getWrappedStream();
if (wrappedStream instanceof CryptoOutputStream) {
wrappedStream = ((CryptoOutputStream) wrappedStream).getWrappedStream();
}
return ((DFSOutputStream) wrappedStream).getCurrentBlockReplication();
} | 3.68 |
flink_TaskSlot_generateSlotOffer | /**
* Generate the slot offer from this TaskSlot.
*
* @return The sot offer which this task slot can provide
*/
public SlotOffer generateSlotOffer() {
Preconditions.checkState(
TaskSlotState.ACTIVE == state || TaskSlotState.ALLOCATED == state,
"The task slot is not in state active or allocated.");
Preconditions.checkState(allocationId != null, "The task slot are not allocated");
return new SlotOffer(allocationId, index, resourceProfile);
} | 3.68 |
hudi_JavaExecutionStrategy_readRecordsForGroupBaseFiles | /**
* Read records from baseFiles.
*/
private List<HoodieRecord<T>> readRecordsForGroupBaseFiles(List<ClusteringOperation> clusteringOps) {
List<HoodieRecord<T>> records = new ArrayList<>();
clusteringOps.forEach(clusteringOp -> {
try (HoodieFileReader baseFileReader = HoodieFileReaderFactory.getReaderFactory(recordType).getFileReader(getHoodieTable().getHadoopConf(), new Path(clusteringOp.getDataFilePath()))) {
Schema readerSchema = HoodieAvroUtils.addMetadataFields(new Schema.Parser().parse(getWriteConfig().getSchema()));
Iterator<HoodieRecord> recordIterator = baseFileReader.getRecordIterator(readerSchema);
// NOTE: Record have to be cloned here to make sure if it holds low-level engine-specific
// payload pointing into a shared, mutable (underlying) buffer we get a clean copy of
// it since these records will be put into the records(List).
recordIterator.forEachRemaining(record -> records.add(record.copy().wrapIntoHoodieRecordPayloadWithKeyGen(readerSchema, new Properties(), Option.empty())));
} catch (IOException e) {
throw new HoodieClusteringException("Error reading input data for " + clusteringOp.getDataFilePath()
+ " and " + clusteringOp.getDeltaFilePaths(), e);
}
});
return records;
} | 3.68 |
querydsl_JTSMultiCurveExpression_isClosed | /**
* Returns 1 (TRUE) if this MultiCurve is closed [StartPoint ( ) = EndPoint ( ) for each
* Curve in this MultiCurve].
*
* @return closed
*/
public BooleanExpression isClosed() {
if (closed == null) {
closed = Expressions.booleanOperation(SpatialOps.IS_CLOSED, mixin);
}
return closed;
} | 3.68 |
hudi_HoodieTable_getCompletedCleanTimeline | /**
* Get only the completed (no-inflights) clean timeline.
*/
public HoodieTimeline getCompletedCleanTimeline() {
return getActiveTimeline().getCleanerTimeline().filterCompletedInstants();
} | 3.68 |
querydsl_AbstractSQLClause_addListener | /**
* Add a listener
*
* @param listener listener to add
*/
public void addListener(SQLListener listener) {
listeners.add(listener);
} | 3.68 |
flink_KeyGroupRangeAssignment_computeKeyGroupForKeyHash | /**
* Assigns the given key to a key-group index.
*
* @param keyHash the hash of the key to assign
* @param maxParallelism the maximum supported parallelism, aka the number of key-groups.
* @return the key-group to which the given key is assigned
*/
public static int computeKeyGroupForKeyHash(int keyHash, int maxParallelism) {
return MathUtils.murmurHash(keyHash) % maxParallelism;
} | 3.68 |
rocketmq-connect_WorkerDirectTask_assignment | /**
* Current task assignment processing partition
*
* @return the partition list
*/
@Override
public Set<RecordPartition> assignment() {
return null;
} | 3.68 |
hadoop_Hadoop20JHParser_canParse | /**
* Can this parser parse the input?
*
* @param input
* @return Whether this parser can parse the input.
* @throws IOException
*
* We will deem a stream to be a good 0.20 job history stream if the
* first line is exactly "Meta VERSION=\"1\" ."
*/
public static boolean canParse(InputStream input) throws IOException {
try {
LineReader reader = new LineReader(input);
Text buffer = new Text();
return reader.readLine(buffer) != 0
&& buffer.toString().equals("Meta VERSION=\"1\" .");
} catch (EOFException e) {
return false;
}
} | 3.68 |
hbase_ProcedureEvent_getSuspendedProcedures | /**
* Access to suspendedProcedures is 'synchronized' on this object, but it's fine to return it here
* for tests.
*/
public ProcedureDeque getSuspendedProcedures() {
return suspendedProcedures;
} | 3.68 |
morf_CompositeSchema_tableNames | /**
* @see org.alfasoftware.morf.metadata.Schema#tableNames()
*/
@Override
public Collection<String> tableNames() {
Set<String> result = Sets.newHashSet();
Set<String> seenTables = Sets.newHashSet();
for (Schema schema : delegates) {
for (Table table : schema.tables()) {
if (seenTables.add(table.getName().toUpperCase())) {
result.add(table.getName());
}
}
}
return result;
} | 3.68 |
hbase_ZKProcedureCoordinator_sendAbortToMembers | /**
* This is the abort message being sent by the coordinator to member TODO this code isn't actually
* used but can be used to issue a cancellation from the coordinator.
*/
@Override
final public void sendAbortToMembers(Procedure proc, ForeignException ee) {
String procName = proc.getName();
LOG.debug("Aborting procedure '" + procName + "' in zk");
String procAbortNode = zkProc.getAbortZNode(procName);
try {
LOG.debug("Creating abort znode:" + procAbortNode);
String source = (ee.getSource() == null) ? coordName : ee.getSource();
byte[] errorInfo = ProtobufUtil.prependPBMagic(ForeignException.serialize(source, ee));
// first create the znode for the procedure
ZKUtil.createAndFailSilent(zkProc.getWatcher(), procAbortNode, errorInfo);
LOG.debug("Finished creating abort node:" + procAbortNode);
} catch (KeeperException e) {
// possible that we get this error for the procedure if we already reset the zk state, but in
// that case we should still get an error for that procedure anyways
zkProc.logZKTree(zkProc.baseZNode);
coordinator.rpcConnectionFailure(
"Failed to post zk node:" + procAbortNode + " to abort procedure '" + procName + "'",
new IOException(e));
}
} | 3.68 |
morf_AliasedField_minus | /**
* @param expression value to subtract from this field.
* @return A new expression using {@link MathsField} and {@link MathsOperator#MINUS}.
*/
public final MathsField minus(AliasedField expression) {
return new MathsField(this, MathsOperator.MINUS, potentiallyBracketExpression(expression));
} | 3.68 |
hadoop_LocalResolver_getNamenodesSubcluster | /**
* Get the Namenode mapping from the subclusters from the Membership store. As
* the Routers are usually co-located with Namenodes, we also check for the
* local address for this Router here.
*
* @return NN IP -> Subcluster.
*/
private Map<String, String> getNamenodesSubcluster(
MembershipStore membershipStore) {
// Manage requests from this hostname (127.0.0.1)
String localIp = "127.0.0.1";
String localHostname = localIp;
try {
localHostname = InetAddress.getLocalHost().getHostName();
} catch (UnknownHostException e) {
LOG.error("Cannot get local host name");
}
Map<String, String> ret = new HashMap<>();
try {
// Get the values from the store
GetNamenodeRegistrationsRequest request =
GetNamenodeRegistrationsRequest.newInstance();
GetNamenodeRegistrationsResponse response =
membershipStore.getNamenodeRegistrations(request);
final List<MembershipState> nns = response.getNamenodeMemberships();
for (MembershipState nn : nns) {
try {
String nsId = nn.getNameserviceId();
String rpcAddress = nn.getRpcAddress();
String hostname = HostAndPort.fromString(rpcAddress).getHost();
ret.put(hostname, nsId);
if (hostname.equals(localHostname)) {
ret.put(localIp, nsId);
}
InetAddress addr = InetAddress.getByName(hostname);
String ipAddr = addr.getHostAddress();
ret.put(ipAddr, nsId);
} catch (Exception e) {
LOG.error("Cannot get address for {}: {}", nn, e.getMessage());
}
}
} catch (IOException ioe) {
LOG.error("Cannot get Namenodes from the State Store", ioe);
}
return ret;
} | 3.68 |
hbase_RollingStatCalculator_getMean | /** Returns mean of the data values that are in the current list of data values */
public double getMean() {
return this.currentSum / (double) numberOfDataValues;
} | 3.68 |
framework_HierarchicalDataCommunicator_setDataProvider | /**
* Set the current hierarchical data provider for this communicator.
*
* @param dataProvider
* the data provider to set, must extend
* {@link HierarchicalDataProvider}, not <code>null</code>
* @param initialFilter
* the initial filter value to use, or <code>null</code> to not
* use any initial filter value
*
* @param <F>
* the filter type
*
* @return a consumer that accepts a new filter value to use
*/
@Override
public <F> SerializableConsumer<F> setDataProvider(
DataProvider<T, F> dataProvider, F initialFilter) {
if (dataProvider instanceof HierarchicalDataProvider) {
return setDataProvider(
(HierarchicalDataProvider<T, F>) dataProvider,
initialFilter);
}
throw new IllegalArgumentException(
"Only " + HierarchicalDataProvider.class.getName()
+ " and subtypes supported.");
} | 3.68 |
hadoop_Times_parseISO8601ToLocalTimeInMillis | /**
* Given ISO formatted string with format "yyyy-MM-dd'T'HH:mm:ss.SSSZ", return
* epoch time for local Time zone.
* @param isoString in format of "yyyy-MM-dd'T'HH:mm:ss.SSSZ".
* @return epoch time for local time zone.
* @throws ParseException if given ISO formatted string can not be parsed.
*/
public static long parseISO8601ToLocalTimeInMillis(String isoString)
throws ParseException {
if (isoString == null) {
throw new ParseException("Invalid input.", -1);
}
return Instant.from(ISO_OFFSET_DATE_TIME.parse(isoString)).toEpochMilli();
} | 3.68 |
flink_FlatMapNode_computeOperatorSpecificDefaultEstimates | /**
* Computes the estimates for the FlatMap operator. Since it un-nests, we assume a cardinality
* increase. To give the system a hint at data increase, we take a default magic number of a 5
* times increase.
*/
@Override
protected void computeOperatorSpecificDefaultEstimates(DataStatistics statistics) {
this.estimatedNumRecords = getPredecessorNode().getEstimatedNumRecords() * 5;
} | 3.68 |
flink_SharedSlot_allocateLogicalSlot | /**
* Registers an allocation request for a logical slot.
*
* <p>The logical slot request is complete once the underlying physical slot request is
* complete.
*
* @param executionVertexId {@link ExecutionVertexID} of the execution for which to allocate the
* logical slot
* @return the logical slot future
*/
CompletableFuture<LogicalSlot> allocateLogicalSlot(ExecutionVertexID executionVertexId) {
Preconditions.checkArgument(
executionSlotSharingGroup.getExecutionVertexIds().contains(executionVertexId),
"Trying to allocate a logical slot for execution %s which is not in the ExecutionSlotSharingGroup",
executionVertexId);
CompletableFuture<SingleLogicalSlot> logicalSlotFuture =
requestedLogicalSlots.getValueByKeyA(executionVertexId);
if (logicalSlotFuture != null) {
LOG.debug("Request for {} already exists", getLogicalSlotString(executionVertexId));
} else {
logicalSlotFuture = allocateNonExistentLogicalSlot(executionVertexId);
}
return logicalSlotFuture.thenApply(Function.identity());
} | 3.68 |
pulsar_ObjectMapperFactory_replaceSingletonInstances | /*
* Replaces the existing singleton ObjectMapper instances with new instances.
* This is used in tests to ensure that classloaders and class references don't leak between tests.
*/
private static void replaceSingletonInstances() {
MAPPER_REFERENCE.set(new MapperReference(createObjectMapperInstance()));
INSTANCE_WITH_INCLUDE_ALWAYS.set(new MapperReference(createObjectMapperWithIncludeAlways()));
YAML_MAPPER_REFERENCE.set(new MapperReference(createYamlInstance()));
} | 3.68 |
hadoop_SCMStore_createAppCheckerService | /**
* Create an instance of the AppChecker service via reflection based on the
* {@link YarnConfiguration#SCM_APP_CHECKER_CLASS} parameter.
*
* @param conf
* @return an instance of the AppChecker class
*/
@Private
@SuppressWarnings("unchecked")
public static AppChecker createAppCheckerService(Configuration conf) {
Class<? extends AppChecker> defaultCheckerClass;
try {
defaultCheckerClass =
(Class<? extends AppChecker>) Class
.forName(YarnConfiguration.DEFAULT_SCM_APP_CHECKER_CLASS);
} catch (Exception e) {
throw new YarnRuntimeException("Invalid default scm app checker class"
+ YarnConfiguration.DEFAULT_SCM_APP_CHECKER_CLASS, e);
}
AppChecker checker =
ReflectionUtils.newInstance(conf.getClass(
YarnConfiguration.SCM_APP_CHECKER_CLASS, defaultCheckerClass,
AppChecker.class), conf);
return checker;
} | 3.68 |
AreaShop_BuyRegion_isInResellingMode | /**
* Check if the region is being resold.
* @return true if the region is available for reselling, otherwise false
*/
public boolean isInResellingMode() {
return config.getBoolean("buy.resellMode");
} | 3.68 |
flink_YarnApplicationFileUploader_registerSingleLocalResource | /**
* Register a single local/remote resource and adds it to <tt>localResources</tt>.
*
* @param key the key to add the resource under
* @param resourcePath path of the resource to be registered
* @param relativeDstPath the relative path at the target location (this will be prefixed by the
* application-specific directory)
* @param resourceType type of the resource, which can be one of FILE, PATTERN, or ARCHIVE
* @param whetherToAddToRemotePaths whether to add the path of local resource to
* <tt>remotePaths</tt>
* @param whetherToAddToEnvShipResourceList whether to add the local resource to
* <tt>envShipResourceList</tt>
* @return the uploaded resource descriptor
*/
YarnLocalResourceDescriptor registerSingleLocalResource(
final String key,
final Path resourcePath,
final String relativeDstPath,
final LocalResourceType resourceType,
final boolean whetherToAddToRemotePaths,
final boolean whetherToAddToEnvShipResourceList)
throws IOException {
addToRemotePaths(whetherToAddToRemotePaths, resourcePath);
if (Utils.isRemotePath(resourcePath.toString())) {
final FileStatus fileStatus = fileSystem.getFileStatus(resourcePath);
LOG.debug("Using remote file {} to register local resource", fileStatus.getPath());
final YarnLocalResourceDescriptor descriptor =
YarnLocalResourceDescriptor.fromFileStatus(
key, fileStatus, LocalResourceVisibility.APPLICATION, resourceType);
addToEnvShipResourceList(whetherToAddToEnvShipResourceList, descriptor);
localResources.put(key, descriptor.toLocalResource());
return descriptor;
}
final File localFile = new File(resourcePath.toUri().getPath());
final Tuple2<Path, Long> remoteFileInfo =
uploadLocalFileToRemote(resourcePath, relativeDstPath);
final YarnLocalResourceDescriptor descriptor =
new YarnLocalResourceDescriptor(
key,
remoteFileInfo.f0,
localFile.length(),
remoteFileInfo.f1,
LocalResourceVisibility.APPLICATION,
resourceType);
addToEnvShipResourceList(whetherToAddToEnvShipResourceList, descriptor);
localResources.put(key, descriptor.toLocalResource());
return descriptor;
} | 3.68 |
MagicPlugin_BaseSpell_onPlayerDeath | /**
* Listener method, called on player move for registered spells.
*
* @param event The original entity death event
*/
public void onPlayerDeath(EntityDeathEvent event)
{
} | 3.68 |
morf_DataValueLookup_getValues | /**
* Returns all the key/value pairs stored.
*
* @return An iterable of data values.
*/
public default Iterable<? extends DataValue> getValues() {
throw new UnsupportedOperationException(
"Data value lookup type " + getClass().getName() + " currently lacks supported for getValues()");
} | 3.68 |
hadoop_CloseableTaskPoolSubmitter_close | /**
* Shut down the pool.
*/
@Override
public void close() {
if (pool != null) {
pool.shutdown();
pool = null;
}
} | 3.68 |
pulsar_PulsarClientImplementationBindingImpl_convertKeyValueDataStringToSchemaInfoSchema | /**
* Convert the key/value schema info data json bytes to key/value schema info data bytes.
*
* @param keyValueSchemaInfoDataJsonBytes the key/value schema info data json bytes
* @return the key/value schema info data bytes
*/
public byte[] convertKeyValueDataStringToSchemaInfoSchema(byte[] keyValueSchemaInfoDataJsonBytes)
throws IOException {
return SchemaUtils.convertKeyValueDataStringToSchemaInfoSchema(keyValueSchemaInfoDataJsonBytes);
} | 3.68 |
hbase_ZkSplitLogWorkerCoordination_taskLoop | /**
* Wait for tasks to become available at /hbase/splitlog zknode. Grab a task one at a time. This
* policy puts an upper-limit on the number of simultaneous log splitting that could be happening
* in a cluster.
* <p>
* Synchronization using <code>taskReadySeq</code> ensures that it will try to grab every task
* that has been put up
*/
@Override
public void taskLoop() throws InterruptedException {
while (!shouldStop) {
int seq_start = taskReadySeq.get();
List<String> paths;
paths = getTaskList();
if (paths == null) {
LOG.warn("Could not get tasks, did someone remove " + watcher.getZNodePaths().splitLogZNode
+ " ... worker thread exiting.");
return;
}
// shuffle the paths to prevent different split log worker start from the same log file after
// meta log (if any)
Collections.shuffle(paths);
// pick meta wal firstly
int offset = 0;
for (int i = 0; i < paths.size(); i++) {
if (AbstractFSWALProvider.isMetaFile(paths.get(i))) {
offset = i;
break;
}
}
int numTasks = paths.size();
boolean taskGrabbed = false;
for (int i = 0; i < numTasks; i++) {
while (!shouldStop) {
if (this.areSplittersAvailable()) {
if (LOG.isTraceEnabled()) {
LOG.trace("Current region server " + server.getServerName()
+ " is ready to take more tasks, will get task list and try grab tasks again.");
}
int idx = (i + offset) % paths.size();
// don't call ZKSplitLog.getNodeName() because that will lead to
// double encoding of the path name
taskGrabbed |=
grabTask(ZNodePaths.joinZNode(watcher.getZNodePaths().splitLogZNode, paths.get(idx)));
break;
} else {
if (LOG.isTraceEnabled()) {
LOG.trace("Current region server " + server.getServerName() + " has "
+ this.tasksInProgress.get() + " tasks in progress and can't take more.");
}
Thread.sleep(100);
}
}
if (shouldStop) {
return;
}
}
if (!taskGrabbed && !shouldStop) {
// do not grab any tasks, sleep a little bit to reduce zk request.
Thread.sleep(1000);
}
SplitLogCounters.tot_wkr_task_grabing.increment();
synchronized (taskReadySeq) {
while (seq_start == taskReadySeq.get()) {
taskReadySeq.wait(checkInterval);
}
}
}
} | 3.68 |
pulsar_AuthorizationService_allowNamespaceOperationAsync | /**
* Grant authorization-action permission on a namespace to the given client.
*
* @param namespaceName
* @param operation
* @param role
* @param authData
* additional authdata in json for targeted authorization provider
* @return IllegalArgumentException when namespace not found
* @throws IllegalStateException
* when failed to grant permission
*/
public CompletableFuture<Boolean> allowNamespaceOperationAsync(NamespaceName namespaceName,
NamespaceOperation operation,
String role,
AuthenticationDataSource authData) {
if (!this.conf.isAuthorizationEnabled()) {
return CompletableFuture.completedFuture(true);
}
return provider.allowNamespaceOperationAsync(namespaceName, role, operation, authData);
} | 3.68 |
hadoop_BlockManagerParameters_getMaxBlocksCount | /**
* @return The max blocks count to be kept in cache at any time.
*/
public int getMaxBlocksCount() {
return maxBlocksCount;
} | 3.68 |
streampipes_ZipFileExtractor_extractZipToMap | // TODO used by export feature - extend this to support binaries
public Map<String, byte[]> extractZipToMap() throws IOException {
byte[] buffer = new byte[1024];
Map<String, byte[]> entries = new HashMap<>();
ZipInputStream zis = new ZipInputStream(zipInputStream);
ZipEntry zipEntry = zis.getNextEntry();
while (zipEntry != null) {
ByteArrayOutputStream fos = new ByteArrayOutputStream();
int len;
while ((len = zis.read(buffer)) > 0) {
fos.write(buffer, 0, len);
}
entries.put(sanitizeName(zipEntry.getName()), fos.toByteArray());
fos.close();
zipEntry = zis.getNextEntry();
}
zis.closeEntry();
zis.close();
return entries;
} | 3.68 |
flink_CheckedThread_run | /** This method is final - thread work should go into the {@link #go()} method instead. */
@Override
public final void run() {
try {
go();
} catch (Throwable t) {
error = t;
}
} | 3.68 |
pulsar_SchemaUtils_jsonifySchemaInfoWithVersion | /**
* Jsonify the schema info with version.
*
* @param schemaInfoWithVersion the schema info
* @return the jsonified schema info with version
*/
public static String jsonifySchemaInfoWithVersion(SchemaInfoWithVersion schemaInfoWithVersion) {
GsonBuilder gsonBuilder = new GsonBuilder()
.setPrettyPrinting()
.registerTypeHierarchyAdapter(SchemaInfo.class, SCHEMAINFO_ADAPTER)
.registerTypeHierarchyAdapter(Map.class, SCHEMA_PROPERTIES_SERIALIZER);
return gsonBuilder.create().toJson(schemaInfoWithVersion);
} | 3.68 |
framework_VScrollTable_prepareRow | /**
* This method is used to instantiate new rows for this table. It
* automatically sets correct widths to rows cells and assigns correct
* client reference for child widgets.
*
* This method can be called only after table has been initialized
*
* @param uidl
*/
private VScrollTableRow prepareRow(UIDL uidl) {
final VScrollTableRow row = createRow(uidl, aligns);
row.initCellWidths();
return row;
} | 3.68 |
hmily_CreateSQLUtil_getInsertValuesClause | /**
* Get insert values clause.
*
* @param keySet key set
* @return insert values clause
*/
public static String getInsertValuesClause(final Set<String> keySet) {
Map<String, String> map = Maps.asMap(keySet, input -> "?");
return String.format("(%s) VALUES (%s)", Joiner.on(",").join(map.keySet()), Joiner.on(",").join(map.values()));
} | 3.68 |
flink_CommonTestUtils_createCopySerializable | /**
* Creates a copy of an object via Java Serialization.
*
* @param original The original object.
* @return The copied object.
*/
public static <T extends java.io.Serializable> T createCopySerializable(T original)
throws IOException {
if (original == null) {
throw new IllegalArgumentException();
}
ByteArrayOutputStream baos = new ByteArrayOutputStream();
ObjectOutputStream oos = new ObjectOutputStream(baos);
oos.writeObject(original);
oos.close();
baos.close();
ByteArrayInputStream bais = new ByteArrayInputStream(baos.toByteArray());
try (ObjectInputStream ois = new ObjectInputStream(bais)) {
@SuppressWarnings("unchecked")
T copy = (T) ois.readObject();
return copy;
} catch (ClassNotFoundException e) {
throw new IOException(e);
}
} | 3.68 |
flink_ExternalResourceUtils_getExternalResourceConfigurationKeys | /**
* Get the external resource configuration keys map, indexed by the resource name. The
* configuration key should be used for deployment specific container request.
*
* @param config Configurations
* @param suffix suffix of config option for deployment specific configuration key
* @return external resource configuration keys map, map the resource name to the configuration
* key for deployment * specific container request
*/
public static Map<String, String> getExternalResourceConfigurationKeys(
Configuration config, String suffix) {
final Set<String> resourceSet = getExternalResourceSet(config);
final Map<String, String> configKeysToResourceNameMap = new HashMap<>();
LOG.info("Enabled external resources: {}", resourceSet);
if (resourceSet.isEmpty()) {
return Collections.emptyMap();
}
final Map<String, String> externalResourceConfigs = new HashMap<>();
for (String resourceName : resourceSet) {
final ConfigOption<String> configKeyOption =
key(ExternalResourceOptions.getSystemConfigKeyConfigOptionForResource(
resourceName, suffix))
.stringType()
.noDefaultValue();
final String configKey = config.get(configKeyOption);
if (StringUtils.isNullOrWhitespaceOnly(configKey)) {
LOG.warn(
"Could not find valid {} for {}. Will ignore that resource.",
configKeyOption.key(),
resourceName);
} else {
configKeysToResourceNameMap.compute(
configKey,
(ignored, previousResource) -> {
if (previousResource != null) {
LOG.warn(
"Duplicate config key {} occurred for external resources, the one named {} will overwrite the value.",
configKey,
resourceName);
externalResourceConfigs.remove(previousResource);
}
return resourceName;
});
externalResourceConfigs.put(resourceName, configKey);
}
}
return externalResourceConfigs;
} | 3.68 |
querydsl_BeanMap_getTypeFunction | /**
* Returns a transformer for the given primitive type.
*
* @param aType the primitive type whose transformer to return
* @return a transformer that will convert strings into that type,
* or null if the given type is not a primitive type
*/
protected Function<?,?> getTypeFunction(Class<?> aType) {
return defaultFunctions.get(aType);
} | 3.68 |
dubbo_ServiceInvokeRestFilter_acceptSupportJudge | /**
* accept can not support will throw UnSupportAcceptException
*
* @param requestFacade
*/
private void acceptSupportJudge(RequestFacade requestFacade, Class<?> returnType) {
try {
// media type judge
getAcceptMediaType(requestFacade, returnType);
} catch (UnSupportContentTypeException e) {
// return type judge
MediaType mediaType = HttpMessageCodecManager.typeSupport(returnType);
String accept = requestFacade.getHeader(RestHeaderEnum.ACCEPT.getHeader());
if (mediaType == null || accept == null) {
throw e;
}
if (!accept.contains(mediaType.value)) {
throw e;
}
}
} | 3.68 |
hadoop_CandidateNodeSetUtils_getSingleNode | /*
* If the {@link CandidateNodeSet} only has one entry, return it. Otherwise,
* return null.
*/
public static <N extends SchedulerNode> N getSingleNode(
CandidateNodeSet<N> candidates) {
N node = null;
if (1 == candidates.getAllNodes().size()) {
node = candidates.getAllNodes().values().iterator().next();
}
return node;
} | 3.68 |
hadoop_DataNodeVolumeMetrics_getNativeCopyIoSampleCount | // Based on nativeCopyIoRate
public long getNativeCopyIoSampleCount() {
return nativeCopyIoRate.lastStat().numSamples();
} | 3.68 |
flink_PageSizeUtil_getSystemPageSize | /**
* Tries to get the system page size. If the page size cannot be determined, this returns -1.
*
* <p>This internally relies on the presence of "unsafe" and the resolution via some Netty
* utilities.
*/
public static int getSystemPageSize() {
try {
return PageSizeUtilInternal.getSystemPageSize();
} catch (Throwable t) {
ExceptionUtils.rethrowIfFatalError(t);
return PAGE_SIZE_UNKNOWN;
}
} | 3.68 |
hbase_RegionSizeCalculator_getRegionSize | /**
* Returns size of given region in bytes. Returns 0 if region was not found.
*/
public long getRegionSize(byte[] regionId) {
Long size = sizeMap.get(regionId);
if (size == null) {
LOG.debug("Unknown region:" + Arrays.toString(regionId));
return 0;
} else {
return size;
}
} | 3.68 |
hbase_ZKWatcher_interruptedExceptionNoThrow | /**
* Log the InterruptedException and interrupt current thread
* @param ie The IterruptedException to log
* @param throwLater Whether we will throw the exception latter
*/
public void interruptedExceptionNoThrow(InterruptedException ie, boolean throwLater) {
LOG.debug(prefix("Received InterruptedException, will interrupt current thread"
+ (throwLater ? " and rethrow a SystemErrorException" : "")), ie);
// At least preserve interrupt.
Thread.currentThread().interrupt();
} | 3.68 |
hudi_HiveSchemaUtils_splitSchemaByPartitionKeys | /**
* Split the field schemas by given partition keys.
*
* @param fieldSchemas The Hive field schemas.
* @param partitionKeys The partition keys.
* @return The pair of (regular columns, partition columns) schema fields
*/
public static Pair<List<FieldSchema>, List<FieldSchema>> splitSchemaByPartitionKeys(
List<FieldSchema> fieldSchemas,
List<String> partitionKeys) {
List<FieldSchema> regularColumns = new ArrayList<>();
List<FieldSchema> partitionColumns = new ArrayList<>();
for (FieldSchema fieldSchema : fieldSchemas) {
if (partitionKeys.contains(fieldSchema.getName())) {
partitionColumns.add(fieldSchema);
} else {
regularColumns.add(fieldSchema);
}
}
return Pair.of(regularColumns, partitionColumns);
} | 3.68 |
flink_WindowSavepointReader_aggregate | /**
* Reads window state generated using an {@link AggregateFunction}.
*
* @param uid The uid of the operator.
* @param aggregateFunction The aggregate function used to create the window.
* @param readerFunction The window reader function.
* @param keyType The key type of the window.
* @param accType The type information of the accumulator function.
* @param outputType The output type of the reader function.
* @param <K> The type of the key.
* @param <T> The type of the values that are aggregated.
* @param <ACC> The type of the accumulator (intermediate aggregate state).
* @param <R> The type of the aggregated result.
* @param <OUT> The output type of the reader function.
* @return A {@code DataStream} of objects read from keyed state.
* @throws IOException If savepoint does not contain the specified uid.
*/
public <K, T, ACC, R, OUT> DataStream<OUT> aggregate(
String uid,
AggregateFunction<T, ACC, R> aggregateFunction,
WindowReaderFunction<R, OUT, K, W> readerFunction,
TypeInformation<K> keyType,
TypeInformation<ACC> accType,
TypeInformation<OUT> outputType)
throws IOException {
WindowReaderOperator<?, K, R, W, OUT> operator =
WindowReaderOperator.aggregate(
aggregateFunction, readerFunction, keyType, windowSerializer, accType);
return readWindowOperator(uid, outputType, operator);
} | 3.68 |
hadoop_AbfsConfiguration_getLong | /**
* Returns the account-specific value if it exists, then looks for an
* account-agnostic value, and finally tries the default value.
* @param key Account-agnostic configuration key
* @param defaultValue Value returned if none is configured
* @return value if one exists, else the default value
*/
public long getLong(String key, long defaultValue) {
return rawConfig.getLong(accountConf(key), rawConfig.getLong(key, defaultValue));
} | 3.68 |
hadoop_Time_now | /**
* Current system time. Do not use this to calculate a duration or interval
* to sleep, because it will be broken by settimeofday. Instead, use
* monotonicNow.
* @return current time in msec.
*/
public static long now() {
return System.currentTimeMillis();
} | 3.68 |
morf_UpdateStatementBuilder_where | /**
* Specifies the where criteria
*
* <blockquote><pre>
* update([table])
* .set([fields])
* .where([criteria]);</pre></blockquote>
*
* @param criterion the criteria to filter the results by
* @return this, for method chaining.
*/
public UpdateStatementBuilder where(Criterion criterion) {
if (criterion == null)
throw new IllegalArgumentException("Criterion was null in where clause");
whereCriterion = criterion;
return this;
} | 3.68 |
AreaShop_RegionSign_getProfile | /**
* Get the ConfigurationSection defining the sign layout.
* @return The sign layout config
*/
public ConfigurationSection getProfile() {
return getRegion().getConfigurationSectionSetting("general.signProfile", "signProfiles", getRegion().getConfig().get("general.signs." + key + ".profile"));
} | 3.68 |
flink_SkipListKeySerializer_deserializeKey | /**
* Deserialize the partition key from the byte buffer which stores skip list key.
*
* @param memorySegment the memory segment which stores the skip list key.
* @param offset the start position of the skip list key in the byte buffer.
* @param len length of the skip list key.
*/
K deserializeKey(MemorySegment memorySegment, int offset, int len) {
MemorySegmentInputStreamWithPos inputStream =
new MemorySegmentInputStreamWithPos(memorySegment, offset, len);
DataInputViewStreamWrapper inputView = new DataInputViewStreamWrapper(inputStream);
int namespaceLen = memorySegment.getInt(offset);
inputStream.setPosition(offset + Integer.BYTES + namespaceLen + Integer.BYTES);
try {
return keySerializer.deserialize(inputView);
} catch (IOException e) {
throw new RuntimeException("deserialize key failed", e);
}
} | 3.68 |
flink_NettyShuffleMetricFactory_registerLegacyNetworkMetrics | /**
* Registers legacy network metric groups before shuffle service refactoring.
*
* <p>Registers legacy metric groups if shuffle service implementation is original default one.
*
* @deprecated should be removed in future
*/
@SuppressWarnings("DeprecatedIsStillUsed")
@Deprecated
public static void registerLegacyNetworkMetrics(
boolean isDetailedMetrics,
MetricGroup metricGroup,
ResultPartitionWriter[] producedPartitions,
InputGate[] inputGates) {
checkNotNull(metricGroup);
checkNotNull(producedPartitions);
checkNotNull(inputGates);
// add metrics for buffers
final MetricGroup buffersGroup = metricGroup.addGroup(METRIC_GROUP_BUFFERS_DEPRECATED);
// similar to MetricUtils.instantiateNetworkMetrics() but inside this IOMetricGroup
// (metricGroup)
final MetricGroup networkGroup = metricGroup.addGroup(METRIC_GROUP_NETWORK_DEPRECATED);
final MetricGroup outputGroup = networkGroup.addGroup(METRIC_GROUP_OUTPUT);
final MetricGroup inputGroup = networkGroup.addGroup(METRIC_GROUP_INPUT);
ResultPartition[] resultPartitions =
Arrays.copyOf(
producedPartitions, producedPartitions.length, ResultPartition[].class);
registerOutputMetrics(isDetailedMetrics, outputGroup, buffersGroup, resultPartitions);
SingleInputGate[] singleInputGates =
Arrays.copyOf(inputGates, inputGates.length, SingleInputGate[].class);
registerInputMetrics(isDetailedMetrics, inputGroup, buffersGroup, singleInputGates);
} | 3.68 |
framework_Navigator_parseParameterStringToMap | /**
* Parses the given parameter string to a map using the given separator
* string.
*
* @param parameterString
* the parameter string to parse
* @param separator
* the string (typically one character) used to separate values
* from each other
* @return The navigation state as Map<String, String>.
* @since 8.1
*/
protected Map<String, String> parseParameterStringToMap(
String parameterString, String separator) {
if (parameterString.isEmpty()) {
return Collections.emptyMap();
}
Map<String, String> parameterMap = new HashMap<>();
String[] parameters = parameterString.split(separator);
for (String parameter : parameters) {
String[] keyAndValue = parameter
.split(DEFAULT_STATE_PARAMETER_KEY_VALUE_SEPARATOR);
parameterMap.put(keyAndValue[0],
keyAndValue.length > 1 ? keyAndValue[1] : "");
}
return parameterMap;
} | 3.68 |
framework_LayoutDependencyTree_setNeedsHorizontalMeasure | /**
* @param connectorId
* the connector id of the component whose horizontal size might
* have changed
* @param needsMeasure
* {@code true} if measuring should be enabled, {@code false} if
* measuring should be disabled (disabling is only effective if
* there are no blockers)
*
* @deprecated Use
* {@link #setNeedsHorizontalMeasure(ComponentConnector, boolean)}
* for improved performance.
*/
@Deprecated
public void setNeedsHorizontalMeasure(String connectorId,
boolean needsMeasure) {
// Ensure connector exists
ComponentConnector connector = (ComponentConnector) ConnectorMap
.get(connection).getConnector(connectorId);
if (connector == null) {
return;
}
setNeedsHorizontalMeasure(connector, needsMeasure);
} | 3.68 |
hadoop_MountTableRefresherService_getClientCreator | /**
* Creates RouterClient and caches it.
*/
private CacheLoader<String, RouterClient> getClientCreator() {
return new CacheLoader<String, RouterClient>() {
public RouterClient load(String adminAddress) throws IOException {
InetSocketAddress routerSocket =
NetUtils.createSocketAddr(adminAddress);
Configuration config = getConfig();
return createRouterClient(routerSocket, config);
}
};
} | 3.68 |
hudi_JsonEncoder_configure | /**
* Reconfigures this JsonEncoder to output to the JsonGenerator provided.
* <p/>
* If the JsonGenerator provided is null, a NullPointerException is thrown.
* <p/>
* Otherwise, this JsonEncoder will flush its current output and then
* reconfigure its output to use the provided JsonGenerator.
*
* @param generator The JsonGenerator to direct output to. Cannot be null.
* @return this JsonEncoder
* @throws IOException
* @throws NullPointerException if {@code generator} is {@code null}
*/
private JsonEncoder configure(JsonGenerator generator) throws IOException {
Objects.requireNonNull(generator, "JsonGenerator cannot be null");
if (null != parser) {
flush();
}
this.out = generator;
return this;
} | 3.68 |
framework_Table_setSpanColumns | /**
* If set to true, only one string will be rendered, spanning the entire
* row.
*
* @param spanColumns
*/
public void setSpanColumns(boolean spanColumns) {
this.spanColumns = spanColumns;
} | 3.68 |
flink_DeduplicateFunctionHelper_processLastRowOnChangelog | /**
* Processes element to deduplicate on keys, sends current element as last row, retracts
* previous element if needed.
*
* <p>Note: we don't support stateless mode yet. Because this is not safe for Kafka tombstone
* messages which doesn't contain full content. This can be a future improvement if the
* downstream (e.g. sink) doesn't require full content for DELETE messages.
*
* @param currentRow latest row received by deduplicate function
* @param generateUpdateBefore whether need to send UPDATE_BEFORE message for updates
* @param state state of function
* @param out underlying collector
*/
static void processLastRowOnChangelog(
RowData currentRow,
boolean generateUpdateBefore,
ValueState<RowData> state,
Collector<RowData> out,
boolean isStateTtlEnabled,
RecordEqualiser equaliser)
throws Exception {
RowData preRow = state.value();
RowKind currentKind = currentRow.getRowKind();
if (currentKind == RowKind.INSERT || currentKind == RowKind.UPDATE_AFTER) {
if (preRow == null) {
// the first row, send INSERT message
currentRow.setRowKind(RowKind.INSERT);
out.collect(currentRow);
} else {
if (!isStateTtlEnabled && equaliser.equals(preRow, currentRow)) {
// currentRow is the same as preRow and state cleaning is not enabled.
// We do not emit retraction and update message.
// If state cleaning is enabled, we have to emit messages to prevent too early
// state eviction of downstream operators.
return;
} else {
if (generateUpdateBefore) {
preRow.setRowKind(RowKind.UPDATE_BEFORE);
out.collect(preRow);
}
currentRow.setRowKind(RowKind.UPDATE_AFTER);
out.collect(currentRow);
}
}
// normalize row kind
currentRow.setRowKind(RowKind.INSERT);
// save to state
state.update(currentRow);
} else {
// DELETE or UPDATER_BEFORE
if (preRow != null) {
// always set to DELETE because this row has been removed
// even the input is UPDATE_BEFORE, there may no UPDATE_AFTER after it.
preRow.setRowKind(RowKind.DELETE);
// output the preRow instead of currentRow,
// because preRow always contains the full content.
// currentRow may only contain key parts (e.g. Kafka tombstone records).
out.collect(preRow);
// clear state as the row has been removed
state.clear();
}
// nothing to do if removing a non-existed row
}
} | 3.68 |
framework_VAbstractCalendarPanel_getResetKey | /**
* Returns the reset key which will reset the calendar to the previous
* selection. By default this is backspace but it can be overridden to
* change the key to whatever you want.
*
* @return the reset key
*/
protected int getResetKey() {
return KeyCodes.KEY_BACKSPACE;
} | 3.68 |
hudi_FailSafeConsistencyGuard_waitForFilesVisibility | /**
* Helper function to wait for all files belonging to single directory to appear.
*
* @param dirPath Dir Path
* @param files Files to appear/disappear
* @param event Appear/Disappear
* @throws TimeoutException
*/
public void waitForFilesVisibility(String dirPath, List<String> files, FileVisibility event) throws TimeoutException {
Path dir = new Path(dirPath);
List<String> filesWithoutSchemeAndAuthority = getFilesWithoutSchemeAndAuthority(files);
retryTillSuccess(dir, filesWithoutSchemeAndAuthority, event);
} | 3.68 |
flink_StateBackend_useManagedMemory | /** Whether the state backend uses Flink's managed memory. */
default boolean useManagedMemory() {
return false;
} | 3.68 |
hbase_RestoreSnapshotHelper_hasRegionsToAdd | /** Returns true if there're new regions */
public boolean hasRegionsToAdd() {
return this.regionsToAdd != null && this.regionsToAdd.size() > 0;
} | 3.68 |
flink_TableSource_explainSource | /**
* Describes the table source.
*
* @return A String explaining the {@link TableSource}.
*/
default String explainSource() {
return TableConnectorUtils.generateRuntimeName(
getClass(), getTableSchema().getFieldNames());
} | 3.68 |
framework_AbstractTestUI_setTransport | /**
* Sets the push transport according to the transport= URL parameter if such
* is given. Supports transport=xhr (disables push), transport=websocket
* (forces websocket into use), transport=streaming (forces streaming into
* use). Using ?transport=xyz disables the fallback transport.
*
* @param request
* The UI init request
*/
protected void setTransport(VaadinRequest request) {
String transport = request.getParameter("transport");
PushConfiguration config = getPushConfiguration();
if ("xhr".equals(transport)) {
config.setPushMode(PushMode.DISABLED);
} else if ("websocket".equals(transport)) {
enablePush(Transport.WEBSOCKET);
} else if ("websocket-xhr".equals(transport)) {
enablePush(Transport.WEBSOCKET_XHR);
} else if ("streaming".equals(transport)) {
enablePush(Transport.STREAMING);
} else if ("long-polling".equals(transport)) {
enablePush(Transport.LONG_POLLING);
} else if (transport != null) {
throw new IllegalArgumentException("Unknown transport value '"
+ transport
+ "'. Supported are xhr,websocket,streaming,long-polling");
}
} | 3.68 |
hudi_DFSHoodieDatasetInputReader_iteratorLimit | /**
* Creates an iterator returning the first {@code limitSize} elements of the given iterator. If the original iterator does not contain that many elements, the returned iterator will have the same
* behavior as the original iterator. The returned iterator supports {@code remove()} if the original iterator does.
*
* @param iterator the iterator to limit
* @param limitSize the maximum number of elements in the returned iterator
* @throws IllegalArgumentException if {@code limitSize} is negative
*/
private static <T> Iterator<T> iteratorLimit(
final Iterator<T> iterator, final int limitSize) {
ValidationUtils.checkArgument(iterator != null, "iterator is null");
ValidationUtils.checkArgument(limitSize >= 0, "limit is negative");
return new Iterator<T>() {
private int count;
@Override
public boolean hasNext() {
return count < limitSize && iterator.hasNext();
}
@Override
public T next() {
if (!hasNext()) {
throw new NoSuchElementException();
}
count++;
return iterator.next();
}
@Override
public void remove() {
iterator.remove();
}
};
} | 3.68 |
Subsets and Splits
No saved queries yet
Save your SQL queries to embed, download, and access them later. Queries will appear here once saved.