name
stringlengths
12
178
code_snippet
stringlengths
8
36.5k
score
float64
3.26
3.68
hbase_RequestConverter_buildUnassignRegionRequest
/** * Creates a protocol buffer UnassignRegionRequest * @return an UnassignRegionRequest */ public static UnassignRegionRequest buildUnassignRegionRequest(final byte[] regionName) { UnassignRegionRequest.Builder builder = UnassignRegionRequest.newBuilder(); builder.setRegion(buildRegionSpecifier(RegionSpecifierType.REGION_NAME, regionName)); return builder.build(); }
3.68
hbase_CallRunner_drop
/** * When we want to drop this call because of server is overloaded. */ public void drop() { try (Scope ignored = span.makeCurrent()) { if (call.disconnectSince() >= 0) { RpcServer.LOG.debug("{}: skipped {}", Thread.currentThread().getName(), call); span.addEvent("Client disconnect detected"); span.setStatus(StatusCode.OK); return; } // Set the response InetSocketAddress address = rpcServer.getListenerAddress(); call.setResponse(null, null, CALL_DROPPED_EXCEPTION, "Call dropped, server " + (address != null ? address : "(channel closed)") + " is overloaded, please retry."); TraceUtil.setError(span, CALL_DROPPED_EXCEPTION); call.sendResponseIfReady(); this.rpcServer.getMetrics().exception(CALL_DROPPED_EXCEPTION); } catch (ClosedChannelException cce) { InetSocketAddress address = rpcServer.getListenerAddress(); RpcServer.LOG.warn( "{}: caught a ClosedChannelException, " + "this means that the server " + (address != null ? address : "(channel closed)") + " was processing a request but the client went away. The error message was: {}", Thread.currentThread().getName(), cce.getMessage()); TraceUtil.setError(span, cce); } catch (Exception e) { RpcServer.LOG.warn("{}: caught: {}", Thread.currentThread().getName(), StringUtils.stringifyException(e)); TraceUtil.setError(span, e); } finally { if (!successful) { this.rpcServer.addCallSize(call.getSize() * -1); } cleanup(); span.end(); } }
3.68
hbase_Chunk_alloc
/** * Try to allocate <code>size</code> bytes from the chunk. If a chunk is tried to get allocated * before init() call, the thread doing the allocation will be in busy-wait state as it will keep * looping till the nextFreeOffset is set. * @return the offset of the successful allocation, or -1 to indicate not-enough-space */ public int alloc(int size) { while (true) { int oldOffset = nextFreeOffset.get(); if (oldOffset == UNINITIALIZED) { // The chunk doesn't have its data allocated yet. // Since we found this in curChunk, we know that whoever // CAS-ed it there is allocating it right now. So spin-loop // shouldn't spin long! Thread.yield(); continue; } if (oldOffset == OOM) { // doh we ran out of ram. return -1 to chuck this away. return -1; } if (oldOffset + size > data.capacity()) { return -1; // alloc doesn't fit } // TODO : If seqID is to be written add 8 bytes here for nextFreeOFfset // Try to atomically claim this chunk if (nextFreeOffset.compareAndSet(oldOffset, oldOffset + size)) { // we got the alloc allocCount.incrementAndGet(); return oldOffset; } // we raced and lost alloc, try again } }
3.68
pulsar_ProducerImpl_failPendingMessages
/** * This fails and clears the pending messages with the given exception. This method should be called from within the * ProducerImpl object mutex. */ private void failPendingMessages(ClientCnx cnx, PulsarClientException ex) { if (cnx == null) { final AtomicInteger releaseCount = new AtomicInteger(); final boolean batchMessagingEnabled = isBatchMessagingEnabled(); pendingMessages.forEach(op -> { releaseCount.addAndGet(batchMessagingEnabled ? op.numMessagesInBatch : 1); try { // Need to protect ourselves from any exception being thrown in the future handler from the // application ex.setSequenceId(op.sequenceId); // if message is chunked then call callback only on last chunk if (op.totalChunks <= 1 || (op.chunkId == op.totalChunks - 1)) { // Need to protect ourselves from any exception being thrown in the future handler from the // application op.sendComplete(ex); } } catch (Throwable t) { log.warn("[{}] [{}] Got exception while completing the callback for msg {}:", topic, producerName, op.sequenceId, t); } client.getMemoryLimitController().releaseMemory(op.uncompressedSize); ReferenceCountUtil.safeRelease(op.cmd); op.recycle(); }); pendingMessages.clear(); semaphoreRelease(releaseCount.get()); if (batchMessagingEnabled) { failPendingBatchMessages(ex); } } else { // If we have a connection, we schedule the callback and recycle on the event loop thread to avoid any // race condition since we also write the message on the socket from this thread cnx.ctx().channel().eventLoop().execute(() -> { synchronized (ProducerImpl.this) { failPendingMessages(null, ex); } }); } }
3.68
hbase_MetricsREST_incrementFailedPutRequests
/** * @param inc How much to add to failedPutCount. */ public void incrementFailedPutRequests(final int inc) { source.incrementFailedPutRequests(inc); }
3.68
dubbo_AbstractMethodConfig_setMock
/** * Set the property "mock" * * @param mock the value of mock * @since 2.7.6 * @deprecated use {@link #setMock(String)} instead */ @Deprecated public void setMock(Object mock) { if (mock == null) { return; } this.setMock(String.valueOf(mock)); }
3.68
framework_PopupDateField_setTextFieldEnabled
/** * Enables or disables the text field. By default the text field is enabled. * Disabling it causes only the button for date selection to be active, thus * preventing the user from entering invalid dates. * * See {@link http://dev.vaadin.com/ticket/6790}. * * @param state * <b>true</b> to enable text field, <b>false</b> to disable it. */ public void setTextFieldEnabled(boolean state) { getState().textFieldEnabled = state; }
3.68
dubbo_HttpHeaderUtil_parseRequestAttribute
/** * parse request attribute * @param rpcInvocation * @param request */ public static void parseRequestAttribute(RpcInvocation rpcInvocation, RequestFacade request) { int localPort = request.getLocalPort(); String localAddr = request.getLocalAddr(); int remotePort = request.getRemotePort(); String remoteAddr = request.getRemoteAddr(); rpcInvocation.put(RestConstant.REMOTE_ADDR, remoteAddr); rpcInvocation.put(RestConstant.LOCAL_ADDR, localAddr); rpcInvocation.put(RestConstant.REMOTE_PORT, remotePort); rpcInvocation.put(RestConstant.LOCAL_PORT, localPort); }
3.68
hadoop_MRJobConfUtil_getTaskProgressMinDeltaThreshold
/** * Retrieves the min delta progress required to log the task attempt current * progress. * @return the defined threshold in the conf. * returns the default value if * {@link #setTaskLogProgressDeltaThresholds} has not been called. */ public static double getTaskProgressMinDeltaThreshold() { if (progressMinDeltaThreshold == null) { return PROGRESS_MIN_DELTA_FACTOR * MRJobConfig.TASK_LOG_PROGRESS_DELTA_THRESHOLD_DEFAULT; } return progressMinDeltaThreshold.doubleValue(); }
3.68
hadoop_SaslInputStream_readMoreData
/** * Read more data and get them processed <br> * Entry condition: ostart = ofinish <br> * Exit condition: ostart <= ofinish <br> * * return (ofinish-ostart) (we have this many bytes for you), 0 (no data now, * but could have more later), or -1 (absolutely no more data) */ private int readMoreData() throws IOException { try { inStream.readFully(lengthBuf); int length = unsignedBytesToInt(lengthBuf); if (LOG.isDebugEnabled()) LOG.debug("Actual length is " + length); saslToken = new byte[length]; inStream.readFully(saslToken); } catch (EOFException e) { return -1; } try { if (saslServer != null) { // using saslServer obuffer = saslServer.unwrap(saslToken, 0, saslToken.length); } else { // using saslClient obuffer = saslClient.unwrap(saslToken, 0, saslToken.length); } } catch (SaslException se) { try { disposeSasl(); } catch (SaslException ignored) { } throw se; } ostart = 0; if (obuffer == null) ofinish = 0; else ofinish = obuffer.length; return ofinish; }
3.68
hudi_Triple_hashCode
/** * <p> * Returns a suitable hash code. * </p> * * @return the hash code */ @Override public int hashCode() { return (getLeft() == null ? 0 : getLeft().hashCode()) ^ (getMiddle() == null ? 0 : getMiddle().hashCode()) ^ (getRight() == null ? 0 : getRight().hashCode()); }
3.68
hadoop_BlockReaderUtil_readFully
/* See {@link BlockReader#readFully(byte[], int, int)} */ public static void readFully(BlockReader reader, byte[] buf, int off, int len) throws IOException { int toRead = len; while (toRead > 0) { int ret = reader.read(buf, off, toRead); if (ret < 0) { throw new IOException("Premature EOF from inputStream"); } toRead -= ret; off += ret; } }
3.68
pulsar_OpAddEntry_checkAndCompleteOp
/** * Checks if add-operation is completed. * * @return true if task is not already completed else returns false. */ private boolean checkAndCompleteOp(Object ctx) { long addOpCount = (ctx instanceof Long) ? (long) ctx : -1; if (addOpCount != -1 && ADD_OP_COUNT_UPDATER.compareAndSet(this, addOpCount, -1)) { return true; } log.info("Add-entry already completed for {}-{}", ledger != null ? ledger.getId() : -1, entryId); return false; }
3.68
hbase_ReplicationPeerConfig_isSyncReplication
/** * Use remote wal dir to decide whether a peer is sync replication peer */ public boolean isSyncReplication() { return !StringUtils.isBlank(this.remoteWALDir); }
3.68
framework_VaadinSession_addUI
/** * Adds an initialized UI to this session. * * @param ui * the initialized UI to add. */ public void addUI(UI ui) { assert hasLock(); if (ui.getUIId() == -1) { throw new IllegalArgumentException( "Can not add an UI that has not been initialized."); } if (ui.getSession() != this) { throw new IllegalArgumentException( "The UI belongs to a different session"); } Integer uiId = Integer.valueOf(ui.getUIId()); uIs.put(uiId, ui); String embedId = ui.getEmbedId(); if (embedId != null) { Integer previousUiId = embedIdMap.put(embedId, uiId); if (previousUiId != null) { UI previousUi = uIs.get(previousUiId); assert previousUi != null && embedId.equals(previousUi .getEmbedId()) : "UI id map and embed id map not in sync"; // Will fire cleanup events at the end of the request handling. previousUi.close(); } } }
3.68
hbase_LogLevel_doGetLevel
/** * Send HTTP request to get log level. * @throws HadoopIllegalArgumentException if arguments are invalid. * @throws Exception if unable to connect */ private void doGetLevel() throws Exception { process(protocol + "://" + hostName + "/logLevel?log=" + className); }
3.68
flink_BinaryArrayWriter_complete
/** Finally, complete write to set real size to row. */ @Override public void complete() { array.pointTo(segment, 0, cursor); }
3.68
pulsar_ManagedLedgerConfig_setMaxBacklogBetweenCursorsForCaching
/** * Set maximum backlog distance between backlogged curosr to avoid caching unused entry. * * @param maxBacklogBetweenCursorsForCaching */ public void setMaxBacklogBetweenCursorsForCaching(int maxBacklogBetweenCursorsForCaching) { this.maxBacklogBetweenCursorsForCaching = maxBacklogBetweenCursorsForCaching; }
3.68
flink_MailboxExecutor_execute
/** * Executes the given command at some time in the future in the mailbox thread. * * <p>An optional description can (and should) be added to ease debugging and error-reporting. * The description may contain placeholder that refer to the provided description arguments * using {@link java.util.Formatter} syntax. The actual description is only formatted on demand. * * @param command the runnable task to add to the mailbox for execution. * @param description the optional description for the command that is used for debugging and * error-reporting. * @throws RejectedExecutionException if this task cannot be accepted for execution, e.g. * because the mailbox is quiesced or closed. */ default void execute(ThrowingRunnable<? extends Exception> command, String description) { execute(command, description, EMPTY_ARGS); }
3.68
flink_InPlaceMutableHashTable_freeSegmentsAfterAppendPosition
/** * Releases the memory segments that are after the current append position. Note: The * situation that there are segments after the current append position can arise from a call * to resetAppendPosition(). */ public void freeSegmentsAfterAppendPosition() { final int appendSegmentIndex = (int) (appendPosition >>> segmentSizeBits); while (segments.size() > appendSegmentIndex + 1 && !closed) { freeMemorySegments.add(segments.get(segments.size() - 1)); segments.remove(segments.size() - 1); } }
3.68
framework_DateTimeField_setTextFieldEnabled
/** * Enables or disables the text field. By default the text field is enabled. * Disabling it causes only the button for date selection to be active, thus * preventing the user from entering invalid dates. * * See <a href="http://dev.vaadin.com/ticket/6790">issue 6790</a>. * * @param state * <b>true</b> to enable text field, <b>false</b> to disable it. */ public void setTextFieldEnabled(boolean state) { getState().textFieldEnabled = state; }
3.68
framework_Label_getCustomAttributes
/* * (non-Javadoc) * * @see com.vaadin.ui.AbstractComponent#getCustomAttributes() */ @Override protected Collection<String> getCustomAttributes() { Collection<String> result = super.getCustomAttributes(); result.add("value"); result.add("content-mode"); result.add("plain-text"); return result; }
3.68
flink_RoundRobinOperatorStateRepartitioner_repartition
/** Repartition all named states. */ private List<Map<StreamStateHandle, OperatorStateHandle>> repartition( GroupByStateNameResults nameToStateByMode, int newParallelism) { // We will use this to merge w.r.t. StreamStateHandles for each parallel subtask inside the // maps List<Map<StreamStateHandle, OperatorStateHandle>> mergeMapList = new ArrayList<>(newParallelism); // Initialize for (int i = 0; i < newParallelism; ++i) { mergeMapList.add(new HashMap<>()); } // Start with the state handles we distribute round robin by splitting by offsets Map<String, List<Tuple2<StreamStateHandle, OperatorStateHandle.StateMetaInfo>>> nameToDistributeState = nameToStateByMode.getByMode(OperatorStateHandle.Mode.SPLIT_DISTRIBUTE); repartitionSplitState(nameToDistributeState, newParallelism, mergeMapList); // Now we also add the state handles marked for union to all parallel instances Map<String, List<Tuple2<StreamStateHandle, OperatorStateHandle.StateMetaInfo>>> nameToUnionState = nameToStateByMode.getByMode(OperatorStateHandle.Mode.UNION); repartitionUnionState(nameToUnionState, mergeMapList); // Now we also add the state handles marked for uniform broadcast to all parallel instances Map<String, List<Tuple2<StreamStateHandle, OperatorStateHandle.StateMetaInfo>>> nameToBroadcastState = nameToStateByMode.getByMode(OperatorStateHandle.Mode.BROADCAST); repartitionBroadcastState(nameToBroadcastState, mergeMapList); return mergeMapList; }
3.68
hbase_ProcedureStoreTracker_setDeleted
/** * This method is used when restarting where we need to rebuild the ProcedureStoreTracker. The * {@link #delete(long)} method above assume that the {@link BitSetNode} exists, but when restart * this is not true, as we will read the wal files in reverse order so a delete may come first. */ public void setDeleted(long procId, boolean isDeleted) { BitSetNode node = getOrCreateNode(procId); assert node.contains(procId) : "expected procId=" + procId + " in the node=" + node; node.updateState(procId, isDeleted); trackProcIds(procId); }
3.68
hbase_AccessControlUtil_hasPermission
/** * Validates whether specified user has permission to perform actions on the mentioned table, * column family or column qualifier. * @param controller RpcController * @param protocol the AccessControlService protocol proxy * @param tableName Table name, it shouldn't be null or empty. * @param columnFamily The column family. Optional argument, can be empty. If empty then * validation will happen at table level. * @param columnQualifier The column qualifier. Optional argument, can be empty. If empty then * validation will happen at table and column family level. columnQualifier * will not be considered if columnFamily is passed as null or empty. * @param userName User name, it shouldn't be null or empty. * @param actions Actions * @return true if access allowed, otherwise false * @deprecated Use {@link Admin#hasUserPermissions(String, List)} instead. */ @Deprecated public static boolean hasPermission(RpcController controller, AccessControlService.BlockingInterface protocol, TableName tableName, byte[] columnFamily, byte[] columnQualifier, String userName, Permission.Action[] actions) throws ServiceException { AccessControlProtos.TablePermission.Builder tablePermissionBuilder = AccessControlProtos.TablePermission.newBuilder(); tablePermissionBuilder.setTableName(ProtobufUtil.toProtoTableName(tableName)); if (Bytes.len(columnFamily) > 0) { tablePermissionBuilder.setFamily(UnsafeByteOperations.unsafeWrap(columnFamily)); } if (Bytes.len(columnQualifier) > 0) { tablePermissionBuilder.setQualifier(UnsafeByteOperations.unsafeWrap(columnQualifier)); } for (Permission.Action a : actions) { tablePermissionBuilder.addAction(toPermissionAction(a)); } AccessControlProtos.HasPermissionRequest request = AccessControlProtos.HasPermissionRequest .newBuilder().setTablePermission(tablePermissionBuilder) .setUserName(ByteString.copyFromUtf8(userName)).build(); AccessControlProtos.HasPermissionResponse response = protocol.hasPermission(controller, request); return response.getHasPermission(); }
3.68
hadoop_ResourceSet_resourceLocalized
/** * Called when resource localized. * @param request The original request for the localized resource * @param location The path where the resource is localized * @return The list of symlinks for the localized resources. */ public Set<String> resourceLocalized(LocalResourceRequest request, Path location) { Set<String> symlinks = pendingResources.remove(request); if (symlinks == null) { return null; } else { for (String symlink : symlinks) { localizedResources.put(symlink, location); } return symlinks; } }
3.68
hadoop_HAServiceTarget_isAutoFailoverEnabled
/** * @return true if auto failover should be considered enabled */ public boolean isAutoFailoverEnabled() { return false; }
3.68
flink_SortUtil_putStringNormalizedKey
/** UTF-8 supports bytes comparison. */ public static void putStringNormalizedKey( StringData value, MemorySegment target, int offset, int numBytes) { BinaryStringData binaryString = (BinaryStringData) value; final int limit = offset + numBytes; final int end = binaryString.getSizeInBytes(); for (int i = 0; i < end && offset < limit; i++) { target.put(offset++, binaryString.byteAt(i)); } for (int i = offset; i < limit; i++) { target.put(i, (byte) 0); } }
3.68
flink_CliFrontend_getJarFile
/** * Gets the JAR file from the path. * * @param jarFilePath The path of JAR file * @return The JAR file * @throws FileNotFoundException The JAR file does not exist. */ private File getJarFile(String jarFilePath) throws FileNotFoundException { File jarFile = new File(jarFilePath); // Check if JAR file exists if (!jarFile.exists()) { throw new FileNotFoundException("JAR file does not exist: " + jarFile); } else if (!jarFile.isFile()) { throw new FileNotFoundException("JAR file is not a file: " + jarFile); } return jarFile; }
3.68
hadoop_Validate_checkValid
/** * Validates that the expression (that checks a field is valid) is true. * @param isValid indicates whether the given argument is valid. * @param argName the name of the argument being validated. * @param validValues the list of values that are allowed. */ public static void checkValid(boolean isValid, String argName, String validValues) { checkArgument(isValid, "'%s' is invalid. Valid values are: %s.", argName, validValues); }
3.68
graphhopper_VectorTile_getType
/** * <pre> * The type of geometry stored in this feature. * </pre> * * <code>optional .vector_tile.Tile.GeomType type = 3 [default = UNKNOWN];</code> */ public vector_tile.VectorTile.Tile.GeomType getType() { @SuppressWarnings("deprecation") vector_tile.VectorTile.Tile.GeomType result = vector_tile.VectorTile.Tile.GeomType.valueOf(type_); return result == null ? vector_tile.VectorTile.Tile.GeomType.UNKNOWN : result; }
3.68
streampipes_TerminatingBlocksFinder_getInstance
/** * Returns the singleton instance for TerminatingBlocksFinder. */ public static TerminatingBlocksFinder getInstance() { return INSTANCE; }
3.68
framework_VAbstractCalendarPanel_getForwardKey
/** * The key that selects the next day in the calendar. By default this is the * right arrow key but by overriding this method it can be changed to * whatever you like. * * @return the forward key */ protected int getForwardKey() { return KeyCodes.KEY_RIGHT; }
3.68
hadoop_ContentCounts_getSymlinkCount
// Get the number of symlinks. public long getSymlinkCount() { return contents.get(Content.SYMLINK); }
3.68
streampipes_InfluxDbClient_extractEvent
// Returns null, if replaceNullValues == false and if in items is a null value // Otherwise it returns a Map containing the runtimenames and the correctly parsed values Map<String, Object> extractEvent(List<Object> items) throws SpRuntimeException { if (items.size() != columns.size()) { throw new SpRuntimeException("Converter: Item list length is not the same as column list length"); } Map<String, Object> out = new HashMap<>(); // First element is the timestamp, which will be converted to milli seconds TemporalAccessor temporalAccessor = DateTimeFormatter.ISO_INSTANT.parse((String) items.get(0)); Instant time = Instant.from(temporalAccessor); out.put("time", time.toEpochMilli()); for (int i = 1; i < items.size(); i++) { // The order of columns and items is the same, because the order in columnsString (which is used for the // query) is based on the order of columns if (items.get(i) != null) { out.put(columns.get(i).getName(), items.get(i)); } else { if (replaceNullValues) { // Replace null values with defaults switch (columns.get(i).getDatatypes()) { case String: out.put(columns.get(i).getName(), ""); break; case Integer: out.put(columns.get(i).getName(), 0); break; case Float: out.put(columns.get(i).getName(), 0.0f); break; case Boolean: out.put(columns.get(i).getName(), false); break; default: throw new SpRuntimeException("Unexpected value: " + columns.get(i).getDatatypes()); } } else { // One field == null is enough to skip this event // Or maybe throw an exception instead? return null; } } } return out; }
3.68
hadoop_ConverterUtils_toApplicationAttemptId
/* * This method is deprecated, use {@link ApplicationAttemptId#toString()} * instead. */ @Public @Deprecated public static ApplicationAttemptId toApplicationAttemptId( String applicationAttemptIdStr) { return ApplicationAttemptId.fromString(applicationAttemptIdStr); }
3.68
hadoop_DeletedDirTracker_isInDeletedDirectory
/** * Probe for a path being deleted by virtue of the fact that an * ancestor dir has already been deleted. * @param path path to check * @return true if the parent dir is deleted. */ private boolean isInDeletedDirectory(Path path) { Preconditions.checkArgument(!path.isRoot(), "Root Dir"); return isDirectoryOrAncestorDeleted(path.getParent()); }
3.68
flink_OperatingSystem_isSolaris
/** * Checks whether the operating system this JVM runs on is Solaris. * * @return <code>true</code> if the operating system this JVM runs on is Solaris, <code>false * </code> otherwise */ public static boolean isSolaris() { return getCurrentOperatingSystem() == SOLARIS; }
3.68
morf_UpgradeHelper_copySourceSchema
/** * Gets the source schema from the {@code database}. * * @param database the database to connect to. * @param dataSource the dataSource to use. * @param exceptionRegexes – Regular expression for table exclusions. * @return the schema. */ public static Schema copySourceSchema(ConnectionResources database, DataSource dataSource, Collection<String> exceptionRegexes) { try (SchemaResource databaseSchemaResource = database.openSchemaResource(dataSource)) { return copy(databaseSchemaResource, exceptionRegexes); } }
3.68
hadoop_NMContainerTokenSecretManager_startContainerSuccessful
/** * Container start has gone through. We need to store the containerId in order * to block future container start requests with same container token. This * container token needs to be saved till its container token expires. */ public synchronized void startContainerSuccessful( ContainerTokenIdentifier tokenId) { removeAnyContainerTokenIfExpired(); ContainerId containerId = tokenId.getContainerID(); Long expTime = tokenId.getExpiryTimeStamp(); // We might have multiple containers with same expiration time. if (!recentlyStartedContainerTracker.containsKey(expTime)) { recentlyStartedContainerTracker .put(expTime, new ArrayList<ContainerId>()); } recentlyStartedContainerTracker.get(expTime).add(containerId); try { stateStore.storeContainerToken(containerId, expTime); } catch (IOException e) { LOG.error("Unable to store token for container " + containerId, e); } }
3.68
flink_ConfigOptions_intType
/** Defines that the value of the option should be of {@link Integer} type. */ public TypedConfigOptionBuilder<Integer> intType() { return new TypedConfigOptionBuilder<>(key, Integer.class); }
3.68
hbase_ProxyUserAuthenticationFilter_getDoasFromHeader
/** * The purpose of this function is to get the doAs parameter of a http request case insensitively * @return doAs parameter if exists or null otherwise */ public static String getDoasFromHeader(final HttpServletRequest request) { String doas = null; final Enumeration<String> headers = request.getHeaderNames(); while (headers.hasMoreElements()) { String header = headers.nextElement(); if (header.toLowerCase().equals("doas")) { doas = request.getHeader(header); break; } } return doas; }
3.68
hadoop_EditLogInputStream_scanNextOp
/** * Go through the next operation from the stream storage. * @return the txid of the next operation. */ protected long scanNextOp() throws IOException { FSEditLogOp next = readOp(); return next != null ? next.txid : HdfsServerConstants.INVALID_TXID; }
3.68
MagicPlugin_SelectorAction_getSelectorOption
// This is mainly here for MagicMeta interrogation public SelectorConfiguration getSelectorOption(ConfigurationSection section) { return new SelectorConfiguration(section); }
3.68
hadoop_CachingGetSpaceUsed_getRefreshInterval
/** * How long in between runs of the background refresh. * * @return refresh interval. */ @VisibleForTesting public long getRefreshInterval() { return refreshInterval; }
3.68
flink_ZooKeeperUtils_createJobGraphs
/** * Creates a {@link DefaultJobGraphStore} instance with {@link ZooKeeperStateHandleStore}, * {@link ZooKeeperJobGraphStoreWatcher} and {@link ZooKeeperJobGraphStoreUtil}. * * @param client The {@link CuratorFramework} ZooKeeper client to use * @param configuration {@link Configuration} object * @return {@link DefaultJobGraphStore} instance * @throws Exception if the submitted job graph store cannot be created */ public static JobGraphStore createJobGraphs( CuratorFramework client, Configuration configuration) throws Exception { checkNotNull(configuration, "Configuration"); RetrievableStateStorageHelper<JobGraph> stateStorage = createFileSystemStateStorage(configuration, HA_STORAGE_SUBMITTED_JOBGRAPH_PREFIX); // ZooKeeper submitted jobs root dir String zooKeeperJobsPath = configuration.getString(HighAvailabilityOptions.HA_ZOOKEEPER_JOBGRAPHS_PATH); // Ensure that the job graphs path exists client.newNamespaceAwareEnsurePath(zooKeeperJobsPath).ensure(client.getZookeeperClient()); // All operations will have the path as root CuratorFramework facade = client.usingNamespace(client.getNamespace() + zooKeeperJobsPath); final String zooKeeperFullJobsPath = client.getNamespace() + zooKeeperJobsPath; final ZooKeeperStateHandleStore<JobGraph> zooKeeperStateHandleStore = new ZooKeeperStateHandleStore<>(facade, stateStorage); final PathChildrenCache pathCache = new PathChildrenCache(facade, "/", false); return new DefaultJobGraphStore<>( zooKeeperStateHandleStore, new ZooKeeperJobGraphStoreWatcher(pathCache), ZooKeeperJobGraphStoreUtil.INSTANCE); }
3.68
flink_PartitionRequestQueue_notifyRequiredSegmentId
/** * Notify the id of required segment from the consumer. * * @param receiverId The input channel id to identify the consumer. * @param segmentId The id of required segment. */ void notifyRequiredSegmentId(InputChannelID receiverId, int segmentId) { if (fatalError) { return; } NetworkSequenceViewReader reader = allReaders.get(receiverId); if (reader != null) { reader.notifyRequiredSegmentId(segmentId); } }
3.68
hudi_MarkerDirState_writeMarkerTypeToFile
/** * Writes marker type, "TIMELINE_SERVER_BASED", to file. */ private void writeMarkerTypeToFile() { Path dirPath = new Path(markerDirPath); try { if (!fileSystem.exists(dirPath) || !MarkerUtils.doesMarkerTypeFileExist(fileSystem, markerDirPath)) { // There is no existing marker directory, create a new directory and write marker type fileSystem.mkdirs(dirPath); MarkerUtils.writeMarkerTypeToFile(MarkerType.TIMELINE_SERVER_BASED, fileSystem, markerDirPath); } } catch (IOException e) { throw new HoodieIOException("Failed to write marker type file in " + markerDirPath + ": " + e.getMessage(), e); } }
3.68
hbase_HRegionServer_isHealthy
/* * Verify that server is healthy */ private boolean isHealthy() { if (!dataFsOk) { // File system problem return false; } // Verify that all threads are alive boolean healthy = (this.leaseManager == null || this.leaseManager.isAlive()) && (this.cacheFlusher == null || this.cacheFlusher.isAlive()) && (this.walRoller == null || this.walRoller.isAlive()) && (this.compactionChecker == null || this.compactionChecker.isScheduled()) && (this.periodicFlusher == null || this.periodicFlusher.isScheduled()); if (!healthy) { stop("One or more threads are no longer alive -- stop"); } return healthy; }
3.68
hudi_ExpressionPredicates_bindPredicate
/** * Binds predicate to create a NOT predicate. * * @param predicate The predicate to negate. * @return A NOT predicate. */ public Predicate bindPredicate(Predicate predicate) { this.predicate = predicate; return this; }
3.68
flink_CatalogManager_getTableOrError
/** * Like {@link #getTable(ObjectIdentifier)}, but throws an error when the table is not available * in any of the catalogs. */ public ContextResolvedTable getTableOrError(ObjectIdentifier objectIdentifier) { return getTable(objectIdentifier) .orElseThrow( () -> new TableException( String.format( "Cannot find table '%s' in any of the catalogs %s, nor as a temporary table.", objectIdentifier, listCatalogs()))); }
3.68
hbase_HFileContext_isCompressedOrEncrypted
/** Returns true when on-disk blocks are compressed, and/or encrypted; false otherwise. */ public boolean isCompressedOrEncrypted() { Compression.Algorithm compressAlgo = getCompression(); boolean compressed = compressAlgo != null && compressAlgo != Compression.Algorithm.NONE; Encryption.Context cryptoContext = getEncryptionContext(); boolean encrypted = cryptoContext != null && cryptoContext != Encryption.Context.NONE; return compressed || encrypted; }
3.68
flink_HashBasedDataBuffer_append
/** * Partial data of the target record can be written if this {@link HashBasedDataBuffer} is full. * The remaining data of the target record will be written to the next data region (a new data * buffer or this data buffer after reset). */ @Override public boolean append(ByteBuffer source, int targetChannel, Buffer.DataType dataType) throws IOException { checkArgument(source.hasRemaining(), "Cannot append empty data."); checkState(!isFinished, "Sort buffer is already finished."); checkState(!isReleased, "Sort buffer is already released."); int totalBytes = source.remaining(); if (dataType.isBuffer()) { writeRecord(source, targetChannel); } else { writeEvent(source, targetChannel, dataType); } if (source.hasRemaining()) { return true; } ++numTotalRecords; numTotalBytes += totalBytes - source.remaining(); return false; }
3.68
dubbo_GovernanceRuleRepository_getRule
/** * Get the governance rule mapped to the given key and the given group * * @param key the key to represent a configuration * @param group the group where the key belongs to * @return target configuration mapped to the given key and the given group */ default String getRule(String key, String group) { return getRule(key, group, -1L); }
3.68
framework_GeneratedPropertyContainer_getWrappedItem
/** * Returns the wrapped Item that belongs to the wrapped container. * * @return wrapped item. * @since 7.6.8 */ public Item getWrappedItem() { return wrappedItem; }
3.68
hadoop_LambdaUtils_eval
/** * Utility method to evaluate a callable and fill in the future * with the result or the exception raised. * Once this method returns, the future will have been evaluated to * either a return value or an exception. * @param <T> type of future * @param result future for the result. * @param call callable to invoke. * @return the future passed in */ public static <T> CompletableFuture<T> eval( final CompletableFuture<T> result, final Callable<T> call) { try { result.complete(call.call()); } catch (Throwable tx) { result.completeExceptionally(tx); } return result; }
3.68
hbase_RemoteWithExtrasException_isServerOverloaded
/** Returns True if the server was considered overloaded when the exception was thrown. */ public boolean isServerOverloaded() { return serverOverloaded; }
3.68
incubator-hugegraph-toolchain_FailLogger_writeHeaderIfNeeded
/** * Write head to a specialized file, every input struct has one */ private void writeHeaderIfNeeded() { // header() == null means no need header if (this.struct.input().header() == null) { return; } String header = JsonUtil.toJson(this.struct.input().header()); /* * The files under failure path are like: * mapping/failure-data/input-1.header */ String fileName = this.struct.id() + Constants.HEADER_SUFFIX; String filePath = Paths.get(this.file.getParent(), fileName).toString(); File headerFile = new File(filePath); String charset = this.struct.input().charset(); try { FileUtils.writeStringToFile(headerFile, header, charset); } catch (IOException e) { throw new LoadException("Failed to write header '%s'", e); } }
3.68
hadoop_Paths_getParent
/** * Get the parent path of a string path: everything up to but excluding * the last "/" in the path. * @param pathStr path as a string * @return the parent or null if there is no parent. */ public static String getParent(String pathStr) { int lastSlash = pathStr.lastIndexOf('/'); if (lastSlash >= 0) { return pathStr.substring(0, lastSlash); } return null; }
3.68
framework_VAbstractOrderedLayout_setLayoutManager
/** * Set the layout manager for the layout. * * @param manager * The layout manager to use */ public void setLayoutManager(LayoutManager manager) { layoutManager = manager; }
3.68
hadoop_HSAuditLogger_addRemoteIP
/** * A helper api to add remote IP address */ static void addRemoteIP(StringBuilder b) { InetAddress ip = Server.getRemoteIp(); // ip address can be null for testcases if (ip != null) { add(Keys.IP, ip.getHostAddress(), b); } }
3.68
flink_WindowsGrouping_getTriggerWindow
/** @return the last triggered window. */ public TimeWindow getTriggerWindow() { return currentWindow; }
3.68
hbase_Import_convertKv
// helper: create a new KeyValue based on CF rename map private static Cell convertKv(Cell kv, Map<byte[], byte[]> cfRenameMap) { if (cfRenameMap != null) { // If there's a rename mapping for this CF, create a new KeyValue byte[] newCfName = cfRenameMap.get(CellUtil.cloneFamily(kv)); if (newCfName != null) { List<Tag> tags = PrivateCellUtil.getTags(kv); kv = new KeyValue(kv.getRowArray(), // row buffer kv.getRowOffset(), // row offset kv.getRowLength(), // row length newCfName, // CF buffer 0, // CF offset newCfName.length, // CF length kv.getQualifierArray(), // qualifier buffer kv.getQualifierOffset(), // qualifier offset kv.getQualifierLength(), // qualifier length kv.getTimestamp(), // timestamp KeyValue.Type.codeToType(kv.getTypeByte()), // KV Type kv.getValueArray(), // value buffer kv.getValueOffset(), // value offset kv.getValueLength(), // value length tags.size() == 0 ? null : tags); } } return kv; }
3.68
hudi_CleanerUtils_getCleanerMetadata
/** * Get Latest Version of Hoodie Cleaner Metadata - Output of cleaner operation. * @return Latest version of Clean metadata corresponding to clean instant * @throws IOException */ public static HoodieCleanMetadata getCleanerMetadata(HoodieTableMetaClient metaClient, byte[] details) throws IOException { CleanMetadataMigrator metadataMigrator = new CleanMetadataMigrator(metaClient); HoodieCleanMetadata cleanMetadata = TimelineMetadataUtils.deserializeHoodieCleanMetadata(details); return metadataMigrator.upgradeToLatest(cleanMetadata, cleanMetadata.getVersion()); }
3.68
AreaShop_FileManager_isSaveGroupsRequired
/** * Check if saving the groups file is required. * @return true if changes are made and saving is required, otherwise false */ public boolean isSaveGroupsRequired() { return saveGroupsRequired; }
3.68
hudi_AvroSchemaCompatibility_compatible
/** * Returns a details object representing a compatible schema pair. * * @return a SchemaCompatibilityDetails object with COMPATIBLE * SchemaCompatibilityType, and no other state. */ public static SchemaCompatibilityResult compatible() { return COMPATIBLE; }
3.68
hadoop_JsonSerialization_writer
/** * @return an ObjectWriter which pretty-prints its output */ public static ObjectWriter writer() { return WRITER; }
3.68
hbase_QuotaTableUtil_makeQuotaSnapshotScan
/** * Creates a {@link Scan} which returns only quota snapshots from the quota table. */ public static Scan makeQuotaSnapshotScan() { return makeQuotaSnapshotScanForTable(null); }
3.68
flink_NFA_advanceTime
/** * Prunes states assuming there will be no events with timestamp <b>lower</b> than the given * one. It clears the sharedBuffer and also emits all timed out partial matches. * * @param sharedBufferAccessor the accessor to SharedBuffer object that we need to work upon * while processing * @param nfaState The NFAState object that we need to affect while processing * @param timestamp timestamp that indicates that there will be no more events with lower * timestamp * @return all pending matches and timed outed partial matches * @throws Exception Thrown if the system cannot access the state. */ public Tuple2<Collection<Map<String, List<T>>>, Collection<Tuple2<Map<String, List<T>>, Long>>> advanceTime( final SharedBufferAccessor<T> sharedBufferAccessor, final NFAState nfaState, final long timestamp, final AfterMatchSkipStrategy afterMatchSkipStrategy) throws Exception { final List<Map<String, List<T>>> result = new ArrayList<>(); final Collection<Tuple2<Map<String, List<T>>, Long>> timeoutResult = new ArrayList<>(); final PriorityQueue<ComputationState> newPartialMatches = new PriorityQueue<>(NFAState.COMPUTATION_STATE_COMPARATOR); final PriorityQueue<ComputationState> potentialMatches = new PriorityQueue<>(NFAState.COMPUTATION_STATE_COMPARATOR); for (ComputationState computationState : nfaState.getPartialMatches()) { String currentStateName = computationState.getCurrentStateName(); boolean isTimeoutForPreviousEvent = windowTimes.containsKey(currentStateName) && isStateTimedOut( computationState, timestamp, computationState.getPreviousTimestamp(), windowTimes.get(currentStateName)); boolean isTimeoutForFirstEvent = isStateTimedOut( computationState, timestamp, computationState.getStartTimestamp(), windowTime); if (isTimeoutForPreviousEvent || isTimeoutForFirstEvent) { nfaState.setStateChanged(); if (getState(computationState).isPending()) { // save pending states for after-match pruning, where those states will be // released potentialMatches.add(computationState); continue; } if (handleTimeout) { // extract the timed out event pattern Map<String, List<T>> timedOutPattern = sharedBufferAccessor.materializeMatch( extractCurrentMatches(sharedBufferAccessor, computationState)); timeoutResult.add( Tuple2.of( timedOutPattern, isTimeoutForPreviousEvent ? computationState.getPreviousTimestamp() + windowTimes.get( computationState.getCurrentStateName()) : computationState.getStartTimestamp() + windowTime)); } // release timeout states sharedBufferAccessor.releaseNode( computationState.getPreviousBufferEntry(), computationState.getVersion()); } else { newPartialMatches.add(computationState); } } // If a timeout partial match "frees" some completed matches // Or if completed not-followed-by matches need pruning processMatchesAccordingToSkipStrategy( sharedBufferAccessor, nfaState, afterMatchSkipStrategy, potentialMatches, newPartialMatches, result); nfaState.setNewPartialMatches(newPartialMatches); sharedBufferAccessor.advanceTime(timestamp); return Tuple2.of(result, timeoutResult); }
3.68
framework_Slot_getContainerElement
/* * (non-Javadoc) * * @see com.google.gwt.user.client.ui.SimplePanel#getContainerElement() */ @SuppressWarnings("deprecation") @Override protected com.google.gwt.user.client.Element getContainerElement() { if (captionWrap == null) { return getElement(); } else { return DOM.asOld(captionWrap); } }
3.68
framework_PanelConnector_hasCaption
/** * Detects if caption div should be visible. * * @return {@code true} if caption div should be shown */ protected boolean hasCaption() { return getState().caption != null && !getState().caption.isEmpty(); }
3.68
flink_ColumnOperationUtils_dropFields
/** * Creates a projection list that removes given columns. * * <p><b>NOTE:</b> Resulting expression are still unresolved. * * @param inputFields names of current columns * @param dropExpressions columns to remove * @return projection expressions */ static List<Expression> dropFields(List<String> inputFields, List<Expression> dropExpressions) { Set<String> columnsToDrop = dropExpressions.stream() .map(expr -> expr.accept(dropColumnsExtractor)) .collect(Collectors.toSet()); columnsToDrop.forEach( c -> { if (!inputFields.contains(c)) { throw new ValidationException( format("Field %s does not exist in source table", c)); } }); return inputFields.stream() .filter(oldName -> !columnsToDrop.contains(oldName)) .map(ApiExpressionUtils::unresolvedRef) .collect(Collectors.toList()); }
3.68
framework_MenuBar_getPreferredHeight
/** * Gets the preferred height of the menu. * * @since 7.2.6 * * @return the preferred height */ protected int getPreferredHeight() { return table.getOffsetHeight(); }
3.68
flink_FlinkDatabaseMetaData_nullPlusNonNullIsNull
/** Null value plus non-null in flink will be null result. */ @Override public boolean nullPlusNonNullIsNull() throws SQLException { return true; }
3.68
hibernate-validator_ModCheckBase_extractDigits
/** * Parses the {@link String} value as a {@link List} of {@link Integer} objects * * @param value the input string to be parsed * * @return List of {@code Integer} objects. * * @throws NumberFormatException in case any of the characters is not a digit */ private List<Integer> extractDigits(final String value) throws NumberFormatException { List<Integer> digits = new ArrayList<Integer>( value.length() ); char[] chars = value.toCharArray(); for ( char c : chars ) { digits.add( extractDigit( c ) ); } return digits; }
3.68
flink_FileChannelMemoryMappedBoundedData_createWithRegionSize
/** * Creates new FileChannelMemoryMappedBoundedData, creating a memory mapped file at the given * path. Each mapped region (= ByteBuffer) will be of the given size. */ public static FileChannelMemoryMappedBoundedData createWithRegionSize( Path memMappedFilePath, int regionSize) throws IOException { checkNotNull(memMappedFilePath, "memMappedFilePath"); checkArgument(regionSize > 0, "regions size most be > 0"); final FileChannel fileChannel = FileChannel.open( memMappedFilePath, StandardOpenOption.READ, StandardOpenOption.WRITE, StandardOpenOption.CREATE_NEW); return new FileChannelMemoryMappedBoundedData(memMappedFilePath, fileChannel, regionSize); }
3.68
flink_PackagingTestUtils_assertJarContainsServiceEntry
/** * Verifies that the given jar contains a service entry file for the given service. * * <p>Caution: This only checks that the file exists; the content is not verified. */ public static void assertJarContainsServiceEntry(Path jarPath, Class<?> service) throws Exception { final URI jar = jarPath.toUri(); try (final FileSystem fileSystem = FileSystems.newFileSystem( new URI("jar:file", jar.getHost(), jar.getPath(), jar.getFragment()), Collections.emptyMap())) { assertThat(fileSystem.getPath("META-INF", "services", service.getName())).exists(); } }
3.68
hadoop_MountTableProcedure_updateMountTableDestination
/** * Update the destination of the mount point to target namespace and target * path. * * @param mount the mount point. * @param dstNs the target namespace. * @param dstPath the target path * @param conf the configuration of the router. */ private static void updateMountTableDestination(String mount, String dstNs, String dstPath, Configuration conf) throws IOException { String address = conf.getTrimmed(RBFConfigKeys.DFS_ROUTER_ADMIN_ADDRESS_KEY, RBFConfigKeys.DFS_ROUTER_ADMIN_ADDRESS_DEFAULT); InetSocketAddress routerSocket = NetUtils.createSocketAddr(address); RouterClient rClient = new RouterClient(routerSocket, conf); try { MountTableManager mountTable = rClient.getMountTableManager(); MountTable originalEntry = getMountEntry(mount, mountTable); if (originalEntry == null) { throw new IOException("Mount table " + mount + " doesn't exist"); } else { RemoteLocation remoteLocation = new RemoteLocation(dstNs, dstPath, mount); originalEntry.setDestinations(Arrays.asList(remoteLocation)); UpdateMountTableEntryRequest updateRequest = UpdateMountTableEntryRequest.newInstance(originalEntry); UpdateMountTableEntryResponse response = mountTable.updateMountTableEntry(updateRequest); if (!response.getStatus()) { throw new IOException("Failed update mount table " + mount); } rClient.getMountTableManager().refreshMountTableEntries( RefreshMountTableEntriesRequest.newInstance()); } } finally { rClient.close(); } }
3.68
flink_FieldParser_delimiterNext
/** * Checks if the delimiter starts at the given start position of the byte array. * * <p>Attention: This method assumes that enough characters follow the start position for the * delimiter check! * * @param bytes The byte array that holds the value. * @param startPos The index of the byte array where the check for the delimiter starts. * @param delim The delimiter to check for. * @return true if a delimiter starts at the given start position, false otherwise. */ public static final boolean delimiterNext(byte[] bytes, int startPos, byte[] delim) { for (int pos = 0; pos < delim.length; pos++) { // check each position if (delim[pos] != bytes[startPos + pos]) { return false; } } return true; }
3.68
framework_DefaultDeploymentConfiguration_checkXsrfProtection
/** * Log a warning if cross-site request forgery protection is disabled. */ private void checkXsrfProtection() { xsrfProtectionEnabled = !getApplicationOrSystemProperty( Constants.SERVLET_PARAMETER_DISABLE_XSRF_PROTECTION, "false") .equals("true"); if (!xsrfProtectionEnabled) { getLogger().warning(Constants.WARNING_XSRF_PROTECTION_DISABLED); } }
3.68
hbase_ClientMetaTableAccessor_getTableRegionsAndLocations
/** * Used to get table regions' info and server. * @param metaTable scanner over meta table * @param tableName table we're looking for, can be null for getting all regions * @param excludeOfflinedSplitParents don't return split parents * @return the list of regioninfos and server. The return value will be wrapped by a * {@link CompletableFuture}. */ private static CompletableFuture<List<Pair<RegionInfo, ServerName>>> getTableRegionsAndLocations( final AsyncTable<AdvancedScanResultConsumer> metaTable, final TableName tableName, final boolean excludeOfflinedSplitParents) { CompletableFuture<List<Pair<RegionInfo, ServerName>>> future = new CompletableFuture<>(); if (TableName.META_TABLE_NAME.equals(tableName)) { future.completeExceptionally(new IOException( "This method can't be used to locate meta regions;" + " use MetaTableLocator instead")); } // Make a version of CollectingVisitor that collects RegionInfo and ServerAddress CollectRegionLocationsVisitor visitor = new CollectRegionLocationsVisitor(excludeOfflinedSplitParents); addListener(scanMeta(metaTable, tableName, QueryType.REGION, visitor), (v, error) -> { if (error != null) { future.completeExceptionally(error); return; } future.complete(visitor.getResults()); }); return future; }
3.68
hadoop_TaskAttemptFailEvent_isFastFail
/** * Check if task should fast fail or retry * @return boolean value where true indicates the task should not retry */ public boolean isFastFail() { return fastFail; }
3.68
hbase_HRegion_doWALAppend
/** Returns writeEntry associated with this append */ private WriteEntry doWALAppend(WALEdit walEdit, BatchOperation<?> batchOp, MiniBatchOperationInProgress<Mutation> miniBatchOp, long now, NonceKey nonceKey) throws IOException { Preconditions.checkArgument(walEdit != null && !walEdit.isEmpty(), "WALEdit is null or empty!"); Preconditions.checkArgument( !walEdit.isReplay() || batchOp.getOrigLogSeqNum() != SequenceId.NO_SEQUENCE_ID, "Invalid replay sequence Id for replay WALEdit!"); WALKeyImpl walKey = createWALKeyForWALAppend(walEdit.isReplay(), batchOp, now, nonceKey.getNonceGroup(), nonceKey.getNonce()); // don't call the coproc hook for writes to the WAL caused by // system lifecycle events like flushes or compactions if (this.coprocessorHost != null && !walEdit.isMetaEdit()) { this.coprocessorHost.preWALAppend(walKey, walEdit); } try { long txid = this.wal.appendData(this.getRegionInfo(), walKey, walEdit); WriteEntry writeEntry = walKey.getWriteEntry(); // Call sync on our edit. if (txid != 0) { sync(txid, batchOp.durability); } /** * If above {@link HRegion#sync} throws Exception, the RegionServer should be aborted and * following {@link BatchOperation#writeMiniBatchOperationsToMemStore} will not be executed, * so there is no need to replicate to secondary replica, for this reason here we attach the * region replication action after the {@link HRegion#sync} is successful. */ this.attachRegionReplicationInWALAppend(batchOp, miniBatchOp, walKey, walEdit, writeEntry); return writeEntry; } catch (IOException ioe) { if (walKey.getWriteEntry() != null) { mvcc.complete(walKey.getWriteEntry()); } /** * If {@link WAL#sync} get a timeout exception, the only correct way is to abort the region * server, as the design of {@link WAL#sync}, is to succeed or die, there is no 'failure'. It * is usually not a big deal is because we set a very large default value(5 minutes) for * {@link AbstractFSWAL#WAL_SYNC_TIMEOUT_MS}, usually the WAL system will abort the region * server if it can not finish the sync within 5 minutes. */ if (ioe instanceof WALSyncTimeoutIOException) { if (rsServices != null) { rsServices.abort("WAL sync timeout,forcing server shutdown", ioe); } } throw ioe; } }
3.68
flink_JobVertex_setStrictlyCoLocatedWith
/** * Tells this vertex to strictly co locate its subtasks with the subtasks of the given vertex. * Strict co-location implies that the n'th subtask of this vertex will run on the same parallel * computing instance (TaskManager) as the n'th subtask of the given vertex. * * <p>NOTE: Co-location is only possible between vertices in a slot sharing group. * * <p>NOTE: This vertex must (transitively) depend on the vertex to be co-located with. That * means that the respective vertex must be a (transitive) input of this vertex. * * @param strictlyCoLocatedWith The vertex whose subtasks to co-locate this vertex's subtasks * with. * @throws IllegalArgumentException Thrown, if this vertex and the vertex to co-locate with are * not in a common slot sharing group. * @see #setSlotSharingGroup(SlotSharingGroup) */ public void setStrictlyCoLocatedWith(JobVertex strictlyCoLocatedWith) { if (this.slotSharingGroup == null || this.slotSharingGroup != strictlyCoLocatedWith.slotSharingGroup) { throw new IllegalArgumentException( "Strict co-location requires that both vertices are in the same slot sharing group."); } CoLocationGroupImpl thisGroup = this.coLocationGroup; CoLocationGroupImpl otherGroup = strictlyCoLocatedWith.coLocationGroup; if (otherGroup == null) { if (thisGroup == null) { CoLocationGroupImpl group = new CoLocationGroupImpl(this, strictlyCoLocatedWith); this.coLocationGroup = group; strictlyCoLocatedWith.coLocationGroup = group; } else { thisGroup.addVertex(strictlyCoLocatedWith); strictlyCoLocatedWith.coLocationGroup = thisGroup; } } else { if (thisGroup == null) { otherGroup.addVertex(this); this.coLocationGroup = otherGroup; } else { // both had yet distinct groups, we need to merge them thisGroup.mergeInto(otherGroup); } } }
3.68
hbase_MasterProcedureUtil_unwrapRemoteIOException
/** * This is a version of unwrapRemoteIOException that can do DoNotRetryIOE. We need to throw DNRIOE * to clients if a failed Procedure else they will keep trying. The default * proc.getException().unwrapRemoteException doesn't have access to DNRIOE from the procedure2 * module. */ public static IOException unwrapRemoteIOException(Procedure<?> proc) { Exception e = proc.getException().unwrapRemoteException(); // Do not retry ProcedureExceptions! return (e instanceof ProcedureException) ? new DoNotRetryIOException(e) : proc.getException().unwrapRemoteIOException(); }
3.68
hbase_WAL_getKey
/** * Gets the key */ public WALKeyImpl getKey() { return key; }
3.68
framework_PasswordFieldElement_getValue
/** * Return value of the password element. * * @since 8.0 * @return value of the password element */ @Override public String getValue() { return getAttribute("value"); }
3.68
flink_PrioritizedOperatorSubtaskState_tryComputeMixedLocalAndRemoteAlternative
/** * This method creates an alternative recovery option by replacing as much job manager state * with higher prioritized (=local) alternatives as possible. Returns empty Optional if the * JM state is empty or nothing could be replaced. * * @param jobManagerState the state that the task got assigned from the job manager (this * state lives in remote storage). * @param alternativesByPriority local alternatives to the job manager state, ordered by * priority. * @param identityExtractor function to extract an identifier from a state object. * @return A state collection where all JM state handles for which we could find local * * alternatives are replaced by the alternative with the highest priority. Empty * optional if no state could be replaced. * @param <STATE_OBJ_TYPE> the type of the state objects we process. * @param <ID_TYPE> the type of object that represents the id the state object type. */ static <STATE_OBJ_TYPE extends StateObject, ID_TYPE> Optional<StateObjectCollection<STATE_OBJ_TYPE>> tryComputeMixedLocalAndRemoteAlternative( StateObjectCollection<STATE_OBJ_TYPE> jobManagerState, List<StateObjectCollection<STATE_OBJ_TYPE>> alternativesByPriority, Function<STATE_OBJ_TYPE, ID_TYPE> identityExtractor) { List<STATE_OBJ_TYPE> result = Collections.emptyList(); // Build hash index over ids of the JM state Map<ID_TYPE, STATE_OBJ_TYPE> indexById = jobManagerState.stream() .collect(Collectors.toMap(identityExtractor, Function.identity())); // Move through all alternative in order from high to low priority for (StateObjectCollection<STATE_OBJ_TYPE> alternative : alternativesByPriority) { // Check all the state objects in the alternative if they can replace JM state for (STATE_OBJ_TYPE stateHandle : alternative) { // Remove the current state object's id from the index to check for a match if (indexById.remove(identityExtractor.apply(stateHandle)) != null) { if (result.isEmpty()) { // Lazy init result collection result = new ArrayList<>(jobManagerState.size()); } // If the id was still in the index, replace with higher prio alternative result.add(stateHandle); // If the index is empty we are already done, all JM state was replaces with // the best alternative. if (indexById.isEmpty()) { return Optional.of(new StateObjectCollection<>(result)); } } } } // Nothing useful to return if (result.isEmpty()) { return Optional.empty(); } // Add all remaining JM state objects that we could not replace from the index to the // final result result.addAll(indexById.values()); return Optional.of(new StateObjectCollection<>(result)); }
3.68
hudi_SpillableMapUtils_spillToDisk
/** * Write Value and other metadata necessary to disk. Each entry has the following sequence of data * <p> * |crc|timestamp|sizeOfKey|SizeOfValue|key|value| */ public static long spillToDisk(SizeAwareDataOutputStream outputStream, FileEntry fileEntry) throws IOException { return spill(outputStream, fileEntry); }
3.68
hadoop_UserDefinedValueAggregatorDescriptor_toString
/** * @return the string representation of this object. */ public String toString() { return "UserDefinedValueAggregatorDescriptor with class name:" + "\t" + this.className; }
3.68
flink_KeyGroupPartitioner_buildHistogramByAccumulatingCounts
/** * This method creates a histogram from the counts per key-group in {@link #counterHistogram}. */ private int buildHistogramByAccumulatingCounts() { int sum = 0; for (int i = 0; i < counterHistogram.length; ++i) { int currentSlotValue = counterHistogram[i]; counterHistogram[i] = sum; sum += currentSlotValue; } return sum; }
3.68
hudi_HoodieTableMetadataUtil_convertMetadataToFilesPartitionRecords
/** * Finds all files that were deleted as part of a clean and creates metadata table records for them. * * @param cleanMetadata * @param instantTime * @return a list of metadata table records */ public static List<HoodieRecord> convertMetadataToFilesPartitionRecords(HoodieCleanMetadata cleanMetadata, String instantTime) { List<HoodieRecord> records = new LinkedList<>(); int[] fileDeleteCount = {0}; List<String> deletedPartitions = new ArrayList<>(); cleanMetadata.getPartitionMetadata().forEach((partitionName, partitionMetadata) -> { final String partition = getPartitionIdentifier(partitionName); // Files deleted from a partition List<String> deletedFiles = partitionMetadata.getDeletePathPatterns(); HoodieRecord record = HoodieMetadataPayload.createPartitionFilesRecord(partition, Collections.emptyMap(), deletedFiles); records.add(record); fileDeleteCount[0] += deletedFiles.size(); boolean isPartitionDeleted = partitionMetadata.getIsPartitionDeleted(); if (isPartitionDeleted) { deletedPartitions.add(partitionName); } }); if (!deletedPartitions.isEmpty()) { // if there are partitions to be deleted, add them to delete list records.add(HoodieMetadataPayload.createPartitionListRecord(deletedPartitions, true)); } LOG.info("Updating at " + instantTime + " from Clean. #partitions_updated=" + records.size() + ", #files_deleted=" + fileDeleteCount[0] + ", #partitions_deleted=" + deletedPartitions.size()); return records; }
3.68
flink_TimeWindowUtil_isWindowFired
/** * Returns the window should fired or not on current progress. * * @param windowEnd the end of the time window. * @param currentProgress current progress of the window operator, it is processing time under * proctime, it is watermark value under rowtime. * @param shiftTimeZone the shifted timezone of the time window. */ public static boolean isWindowFired( long windowEnd, long currentProgress, ZoneId shiftTimeZone) { // Long.MAX_VALUE is a flag of min window end, directly return false if (windowEnd == Long.MAX_VALUE) { return false; } long windowTriggerTime = toEpochMillsForTimer(windowEnd - 1, shiftTimeZone); return currentProgress >= windowTriggerTime; }
3.68
hbase_StoreFileInfo_getPath
/** Returns The {@link Path} of the file */ public Path getPath() { return initialPath; }
3.68
flink_EncodingUtils_decodeHex
/** * Converts an array of characters representing hexadecimal values into an array of bytes of * those same values. The returned array will be half the length of the passed array, as it * takes two characters to represent any given byte. An exception is thrown if the passed char * array has an odd number of elements. * * <p>Copied from * https://github.com/apache/commons-codec/blob/master/src/main/java/org/apache/commons/codec/binary/Hex.java. * * @param str An array of characters containing hexadecimal digits * @return A byte array to contain the binary data decoded from the supplied char array. * @throws TableException Thrown if an odd number of characters or illegal characters are * supplied */ public static byte[] decodeHex(final String str) throws TableException { final int len = str.length(); if ((len & 0x01) != 0) { throw new TableException("Odd number of characters."); } final int outLen = len >> 1; final byte[] out = new byte[outLen]; // two characters form the hex value. for (int i = 0, j = 0; j < len; i++) { int f = toDigit(str.charAt(j), j) << 4; j++; f = f | toDigit(str.charAt(j), j); j++; out[i] = (byte) (f & 0xFF); } return out; }
3.68
druid_MySqlOutputVisitor_visit
/** * visit procedure create node */ @Override public boolean visit(SQLCreateProcedureStatement x) { if (x.isOrReplace()) { print0(ucase ? "CREATE OR REPLACE PROCEDURE " : "create or replace procedure "); } else { print0(ucase ? "CREATE PROCEDURE " : "create procedure "); } x.getName().accept(this); int paramSize = x.getParameters().size(); print0(" ("); if (paramSize > 0) { this.indentCount++; println(); for (int i = 0; i < paramSize; ++i) { if (i != 0) { print0(", "); println(); } SQLParameter param = x.getParameters().get(i); param.accept(this); } this.indentCount--; println(); } print(')'); if (x.getComment() != null) { println(); print(ucase ? "COMMENT " : "comment "); x.getComment().accept(this); } if (x.isDeterministic()) { println(); print(ucase ? "DETERMINISTIC" : "deterministic"); } if (x.isContainsSql()) { println(); print0(ucase ? "CONTAINS SQL" : "contains sql"); } if (x.isLanguageSql()) { println(); print0(ucase ? "LANGUAGE SQL" : "language sql"); } if (x.isNoSql()) { println(); print(ucase ? "NO SQL" : "no sql"); } if (x.isModifiesSqlData()) { println(); print(ucase ? "MODIFIES SQL DATA" : "modifies sql data"); } SQLName authid = x.getAuthid(); if (authid != null) { println(); print(ucase ? "SQL SECURITY " : "sql security "); authid.accept(this); } println(); x.getBlock().accept(this); return false; }
3.68
framework_Table_sort
/** * Sorts the table by currently selected sorting column. * * @throws UnsupportedOperationException * if the container data source does not implement * Container.Sortable */ public void sort() { if (getSortContainerPropertyId() == null) { return; } sort(new Object[] { sortContainerPropertyId }, new boolean[] { sortAscending }); } /** * Gets the container property IDs, which can be used to sort the item. * <p> * Note that the {@link #isSortEnabled()}
3.68
hbase_HFileReaderImpl_getComparator
/** Returns comparator */ @Override public CellComparator getComparator() { return this.hfileContext.getCellComparator(); }
3.68
hbase_WALPrettyPrinter_setRowFilter
/** * sets the row key by which output will be filtered when not null, serves as a filter; only log * entries from this row will be printed */ public void setRowFilter(String row) { this.row = row; }
3.68
cron-utils_FieldSpecialCharsDefinitionBuilder_withIntMapping
/** * Defines mapping between integer values with equivalent meaning. * * @param source - higher value * @param dest - lower value with equivalent meaning to source * @return this FieldSpecialCharsDefinitionBuilder instance */ @Override public FieldSpecialCharsDefinitionBuilder withIntMapping(final int source, final int dest) { super.withIntMapping(source, dest); return this; }
3.68