name
stringlengths
12
178
code_snippet
stringlengths
8
36.5k
score
float64
3.26
3.68
hbase_HMaster_createServerManager
/** * <p> * Create a {@link ServerManager} instance. * </p> * <p> * Will be overridden in tests. * </p> */ @InterfaceAudience.Private protected ServerManager createServerManager(MasterServices master, RegionServerList storage) throws IOException { // We put this out here in a method so can do a Mockito.spy and stub it out // w/ a mocked up ServerManager. setupClusterConnection(); return new ServerManager(master, storage); }
3.68
flink_ExternalSorter_newBuilder
/** Creates a builder for the {@link ExternalSorter}. */ public static <E> ExternalSorterBuilder<E> newBuilder( MemoryManager memoryManager, AbstractInvokable parentTask, TypeSerializer<E> serializer, TypeComparator<E> comparator) { return newBuilder( checkNotNull(memoryManager), checkNotNull(parentTask), checkNotNull(serializer), checkNotNull(comparator), parentTask.getExecutionConfig()); }
3.68
hbase_SaslServerAuthenticationProviders_createProviders
/** * Loads server authentication providers from the classpath and configuration, and then creates * the SaslServerAuthenticationProviders instance. */ static SaslServerAuthenticationProviders createProviders(Configuration conf) { ServiceLoader<SaslServerAuthenticationProvider> loader = ServiceLoader.load(SaslServerAuthenticationProvider.class); HashMap<Byte, SaslServerAuthenticationProvider> providers = new HashMap<>(); for (SaslServerAuthenticationProvider provider : loader) { addProviderIfNotExists(provider, providers); } addExtraProviders(conf, providers); if (LOG.isTraceEnabled()) { String loadedProviders = providers.values().stream() .map((provider) -> provider.getClass().getName()).collect(Collectors.joining(", ")); if (loadedProviders.isEmpty()) { loadedProviders = "None!"; } LOG.trace("Found SaslServerAuthenticationProviders {}", loadedProviders); } // Initialize the providers once, before we get into the RPC path. providers.forEach((b, provider) -> { try { // Give them a copy, just to make sure there is no funny-business going on. provider.init(new Configuration(conf)); } catch (IOException e) { LOG.error("Failed to initialize {}", provider.getClass(), e); throw new RuntimeException("Failed to initialize " + provider.getClass().getName(), e); } }); return new SaslServerAuthenticationProviders(conf, providers); }
3.68
streampipes_FileManager_storeFile
/** * Store a file in the internal file storage. * For csv files the bom is removed * * @param user who created the file * @param filename * @param fileInputStream content of file * @return */ public static FileMetadata storeFile(String user, String filename, InputStream fileInputStream) throws IOException { String filetype = filename.substring(filename.lastIndexOf(".") + 1); fileInputStream = cleanFile(fileInputStream, filetype); String internalFilename = makeInternalFilename(filetype); FileMetadata fileMetadata = makeFileMetadata(user, filename, internalFilename, filetype); new FileHandler().storeFile(internalFilename, fileInputStream); storeFileMetadata(fileMetadata); return fileMetadata; }
3.68
morf_SchemaHomology_differences
/** * @return the differences */ public List<String> differences() { return differences; }
3.68
hadoop_RequestLoggerFilter_setStatus
/** * Calls setStatus(int sc, String msg) on the wrapped * {@link HttpServletResponseWrapper} object. * * @param sc the status code * @param msg the status message * @deprecated {@link HttpServletResponseWrapper#setStatus(int, String)} is * deprecated. To set a status code use {@link #setStatus(int)}, to send an * error with a description use {@link #sendError(int, String)} */ @Override @Deprecated @SuppressWarnings("deprecation") public void setStatus(int sc, String msg) { super.setStatus(sc, msg); status = sc; message = msg; }
3.68
morf_HumanReadableStatementHelper_generateChangeColumnString
/** * Generates a human-readable "Change Column" string. * * @param tableName the name of the table on which the column currently exists * @param fromDefinition the original definition of the column * @param toDefinition the replacement definition of the column * @return a string containing the human-readable version of the action */ public static String generateChangeColumnString(final String tableName, final Column fromDefinition, final Column toDefinition) { // If this is not a rename operation if (fromDefinition.getName().equals(toDefinition.getName())) { return String.format("Change column %s on %s from %s %s to %s %s", fromDefinition.getName(), tableName, generateNullableString(fromDefinition), generateColumnDefinitionString(fromDefinition), generateNullableString(toDefinition), generateColumnDefinitionString(toDefinition)); } return String.format("Rename %s column %s [%s] on %s to %s %s [%s]", generateNullableString(fromDefinition), fromDefinition.getName(), generateColumnDefinitionString(fromDefinition), tableName, generateNullableString(toDefinition), toDefinition.getName(), generateColumnDefinitionString(toDefinition)); }
3.68
hudi_CopyOnWriteInputFormat_acceptFile
/** * A simple hook to filter files and directories from the input. * The method may be overridden. Hadoop's FileInputFormat has a similar mechanism and applies the * same filters by default. * * @param fileStatus The file status to check. * @return true, if the given file or directory is accepted */ public boolean acceptFile(FileStatus fileStatus) { final String name = fileStatus.getPath().getName(); return !name.startsWith("_") && !name.startsWith(".") && !localFilesFilter.filterPath(new Path(fileStatus.getPath().toUri())); }
3.68
hbase_QuotaSettingsFactory_unthrottleTable
/** * Remove the throttling for the specified table. * @param tableName the table * @return the quota settings */ public static QuotaSettings unthrottleTable(final TableName tableName) { return throttle(null, tableName, null, null, null, 0, null, QuotaScope.MACHINE); }
3.68
morf_OracleDialect_splitSqlStatement
/** * If the SQL statement line is greater than 2499 characters then split * it into multiple lines where each line is less than 2500 characters in * length. The split is done on a space character; if a space character * cannot be found then a warning will be logged but the statement line * will still be returned exceeding 2499 characters in length. * * @param sqlStatement the statement to split * @return the correctly formatted statement */ private String splitSqlStatement(String sqlStatement) { StringBuilder sql = new StringBuilder(); if (sqlStatement.length() >= 2500) { int splitAt = sqlStatement.lastIndexOf(' ', 2498); if (splitAt == -1) { log.warn("SQL statement greater than 2499 characters in length but unable to find white space (\" \") to split on."); sql.append(sqlStatement); } else { sql.append(sqlStatement, 0, splitAt); sql.append(System.getProperty("line.separator")); sql.append(splitSqlStatement(sqlStatement.substring(splitAt + 1))); } } else { sql.append(sqlStatement); } return sql.toString(); }
3.68
hbase_HFileWriterImpl_doCacheOnWrite
/** * Caches the last written HFile block. * @param offset the offset of the block we want to cache. Used to determine the cache key. */ private void doCacheOnWrite(long offset) { cacheConf.getBlockCache().ifPresent(cache -> { HFileBlock cacheFormatBlock = blockWriter.getBlockForCaching(cacheConf); try { cache.cacheBlock(new BlockCacheKey(name, offset, true, cacheFormatBlock.getBlockType()), cacheFormatBlock, cacheConf.isInMemory(), true); } finally { // refCnt will auto increase when block add to Cache, see RAMCache#putIfAbsent cacheFormatBlock.release(); } }); }
3.68
open-banking-gateway_BaseDatasafeDbStorageService_deduceTable
/** * Resolves objects' table name from path. * @param path Object path to resolve table from. * @return Table name that contains the object. */ protected String deduceTable(AbsoluteLocation<?> path) { return path.location().getWrapped().getHost(); }
3.68
flink_BatchTask_logAndThrowException
/** * Prints an error message and throws the given exception. If the exception is of the type * {@link ExceptionInChainedStubException} then the chain of contained exceptions is followed * until an exception of a different type is found. * * @param ex The exception to be thrown. * @param parent The parent task, whose information is included in the log message. * @throws Exception Always thrown. */ public static void logAndThrowException(Exception ex, AbstractInvokable parent) throws Exception { String taskName; if (ex instanceof ExceptionInChainedStubException) { do { ExceptionInChainedStubException cex = (ExceptionInChainedStubException) ex; taskName = cex.getTaskName(); ex = cex.getWrappedException(); } while (ex instanceof ExceptionInChainedStubException); } else { taskName = parent.getEnvironment().getTaskInfo().getTaskName(); } if (LOG.isErrorEnabled()) { LOG.error(constructLogString("Error in task code", taskName, parent), ex); } throw ex; }
3.68
dubbo_FileCacheStoreFactory_safeName
/** * sanitize a name for valid file or directory name * * @param name origin file name * @return sanitized version of name */ private static String safeName(String name) { int len = name.length(); StringBuilder sb = new StringBuilder(len); for (int i = 0; i < len; i++) { char c = name.charAt(i); if (LEGAL_CHARACTERS.contains(c)) { sb.append(c); } else { sb.append(ESCAPE_MARK); sb.append(String.format("%04x", (int) c)); } } return sb.toString(); }
3.68
shardingsphere-elasticjob_TransactionOperation_opAdd
/** * Operation add. * * @param key key * @param value value * @return TransactionOperation */ public static TransactionOperation opAdd(final String key, final String value) { return new TransactionOperation(Type.ADD, key, value); }
3.68
pulsar_PersistentSubscription_doUnsubscribe
/** * Handle unsubscribe command from the client API Check with the dispatcher is this consumer can proceed with * unsubscribe. * * @param consumer consumer object that is initiating the unsubscribe operation * @return CompletableFuture indicating the completion of unsubscribe operation */ @Override public CompletableFuture<Void> doUnsubscribe(Consumer consumer) { CompletableFuture<Void> future = new CompletableFuture<>(); try { if (dispatcher.canUnsubscribe(consumer)) { consumer.close(); return delete(); } future.completeExceptionally( new ServerMetadataException("Unconnected or shared consumer attempting to unsubscribe")); } catch (BrokerServiceException e) { log.warn("Error removing consumer {}", consumer); future.completeExceptionally(e); } return future; }
3.68
hadoop_PartHandle_toByteArray
/** * @return Serialized from in bytes. */ default byte[] toByteArray() { ByteBuffer bb = bytes(); byte[] ret = new byte[bb.remaining()]; bb.get(ret); return ret; }
3.68
hbase_CopyOnWriteArrayMap_find
/** * Binary search for a given key * @param needle The key to look for in all of the entries * @return Same return value as Arrays.binarySearch. Positive numbers mean the index. Otherwise * (-1 * insertion point) - 1 */ int find(K needle) { int begin = startIndex; int end = endIndex - 1; while (begin <= end) { int mid = begin + ((end - begin) / 2); K midKey = entries[mid].key; int compareRes = keyComparator.compare(midKey, needle); // 0 means equals // We found the key. if (compareRes == 0) { return mid; } else if (compareRes < 0) { // midKey is less than needle so we need // to look at farther up begin = mid + 1; } else { // midKey is greater than needle so we // need to look down. end = mid - 1; } } return (-1 * begin) - 1; }
3.68
hbase_AsyncAdmin_listTableDescriptors
/** * List all the userspace tables. * @return - returns a list of TableDescriptors wrapped by a {@link CompletableFuture}. */ default CompletableFuture<List<TableDescriptor>> listTableDescriptors() { return listTableDescriptors(false); }
3.68
hadoop_WritableName_getClass
/** * Return the class for a name. * Default is {@link Class#forName(String)}. * * @param name input name. * @param conf input configuration. * @return class for a name. * @throws IOException raised on errors performing I/O. */ public static synchronized Class<?> getClass(String name, Configuration conf ) throws IOException { Class<?> writableClass = NAME_TO_CLASS.get(name); if (writableClass != null) return writableClass; try { return conf.getClassByName(name); } catch (ClassNotFoundException e) { IOException newE = new IOException("WritableName can't load class: " + name); newE.initCause(e); throw newE; } }
3.68
flink_NFAStateSerializer_readObject
/* * Backwards compatible deserializing of NFAStateSerializer. */ private void readObject(ObjectInputStream in) throws IOException, ClassNotFoundException { in.defaultReadObject(); // the nested serializer will be null if this was read from a savepoint taken with versions // lower than Flink 1.7; in this case, we explicitly create instance for the nested // serializer. if (versionSerializer == null || nodeIdSerializer == null || eventIdSerializer == null) { this.versionSerializer = DeweyNumber.DeweyNumberSerializer.INSTANCE; this.eventIdSerializer = EventId.EventIdSerializer.INSTANCE; this.nodeIdSerializer = new NodeId.NodeIdSerializer(); } }
3.68
hudi_HoodieTableMetaClient_readObject
/** * This method is only used when this object is de-serialized in a spark executor. * * @deprecated */ private void readObject(java.io.ObjectInputStream in) throws IOException, ClassNotFoundException { in.defaultReadObject(); fs = null; // will be lazily initialized }
3.68
querydsl_NumberExpression_multiply
/** * Create a {@code this * right} expression * * <p>Get the result of the operation this * right</p> * * @param right * @return this * right */ public <N extends Number & Comparable<N>> NumberExpression<T> multiply(N right) { return Expressions.numberOperation(getType(), Ops.MULT, mixin, ConstantImpl.create(right)); }
3.68
framework_ContainerHierarchicalWrapper_getItem
/* * Gets the specified Item from the container. Don't add a JavaDoc comment * here, we use the default documentation from implemented interface. */ @Override public Item getItem(Object itemId) { return container.getItem(itemId); }
3.68
hadoop_FutureIOSupport_raiseInnerCause
/** * Extract the cause of a completion failure and rethrow it if an IOE * or RTE. * See {@link FutureIO#raiseInnerCause(CompletionException)}. * @param e exception. * @param <T> type of return value. * @return nothing, ever. * @throws IOException either the inner IOException, or a wrapper around * any non-Runtime-Exception * @throws RuntimeException if that is the inner cause. */ @Deprecated public static <T> T raiseInnerCause(final CompletionException e) throws IOException { return FutureIO.raiseInnerCause(e); }
3.68
framework_BindingValidationStatus_isError
/** * Gets whether the validation failed or not. * * @return {@code true} if validation failed, {@code false} if validation * passed */ public boolean isError() { return status == Status.ERROR; }
3.68
framework_FileDropTargetConnector_uploadNextFile
/** * Uploads a file from the waiting list in case there are no files being * uploaded. */ private void uploadNextFile() { Scheduler.get().scheduleDeferred(() -> { if (!uploading && !uploadUrls.isEmpty()) { uploading = true; String nextId = uploadUrls.keySet().stream().findAny().get(); String url = uploadUrls.remove(nextId); File file = filesToUpload.remove(nextId); FileUploadXHR xhr = (FileUploadXHR) FileUploadXHR.create(); xhr.setOnReadyStateChange(xmlHttpRequest -> { if (xmlHttpRequest.getReadyState() == XMLHttpRequest.DONE) { // Poll server for changes getRpcProxy(FileDropTargetRpc.class).poll(); uploading = false; uploadNextFile(); xmlHttpRequest.clearOnReadyStateChange(); } }); xhr.open("POST", getConnection().translateVaadinUri(url)); xhr.postFile(file); } }); }
3.68
dubbo_NacosDynamicConfiguration_innerReceive
/** * receive * * @param dataId data ID * @param group group * @param configInfo content */ @Override public void innerReceive(String dataId, String group, String configInfo) { String oldValue = cacheData.get(dataId); ConfigChangedEvent event = new ConfigChangedEvent(dataId, group, configInfo, getChangeType(configInfo, oldValue)); if (configInfo == null) { cacheData.remove(dataId); } else { cacheData.put(dataId, configInfo); } listeners.forEach(listener -> listener.process(event)); MetricsEventBus.publish(ConfigCenterEvent.toChangeEvent( applicationModel, event.getKey(), event.getGroup(), ConfigCenterEvent.NACOS_PROTOCOL, ConfigChangeType.ADDED.name(), SELF_INCREMENT_SIZE)); }
3.68
framework_VDragEvent_setDragImage
/** * Sets the drag image used for current drag and drop operation. Drag image * is displayed next to mouse cursor during drag and drop. * <p> * The element to be used as drag image will automatically get CSS style * name "v-drag-element". * * @param element * the dom element to be positioned next to mouse cursor * @param offsetX * the horizontal offset of drag image from mouse cursor * @param offsetY * the vertical offset of drag image from mouse cursor * * @since 7.2 */ public void setDragImage(Element element, int offsetX, int offsetY) { setDragImage(DOM.asOld(element), offsetX, offsetY); }
3.68
flink_PythonDependencyUtils_setPythonRequirements
/** * Specifies the third-party dependencies via a requirements file. The * `requirementsCachedDir` will be uploaded to support offline installation. These * dependencies will be installed by the command "pip install -r [requirements file] * --find-links [requirements cached dir]" before launching the Python UDF worker. * * @param requirementsFilePath The path of the requirements file. * @param requirementsCachedDir The path of the requirements cached directory. */ private void setPythonRequirements( Configuration pythonDependencyConfig, String requirementsFilePath, @Nullable String requirementsCachedDir) { Preconditions.checkNotNull(requirementsFilePath); if (!pythonDependencyConfig.contains(PYTHON_REQUIREMENTS_FILE_DISTRIBUTED_CACHE_INFO)) { pythonDependencyConfig.set( PYTHON_REQUIREMENTS_FILE_DISTRIBUTED_CACHE_INFO, new HashMap<>()); } pythonDependencyConfig.get(PYTHON_REQUIREMENTS_FILE_DISTRIBUTED_CACHE_INFO).clear(); removeCachedFilesByPrefix(PYTHON_REQUIREMENTS_FILE_PREFIX); removeCachedFilesByPrefix(PYTHON_REQUIREMENTS_CACHE_PREFIX); String fileKey = generateUniqueFileKey(PYTHON_REQUIREMENTS_FILE_PREFIX, requirementsFilePath); registerCachedFileIfNotExist(requirementsFilePath, fileKey); pythonDependencyConfig .get(PYTHON_REQUIREMENTS_FILE_DISTRIBUTED_CACHE_INFO) .put(FILE, fileKey); if (requirementsCachedDir != null) { String cacheDirKey = generateUniqueFileKey( PYTHON_REQUIREMENTS_CACHE_PREFIX, requirementsCachedDir); registerCachedFileIfNotExist(requirementsCachedDir, cacheDirKey); pythonDependencyConfig .get(PYTHON_REQUIREMENTS_FILE_DISTRIBUTED_CACHE_INFO) .put(CACHE, cacheDirKey); } }
3.68
hadoop_Utils_readVInt
/** * Decoding the variable-length integer. Synonymous to * <code>(int)Utils#readVLong(in)</code>. * * @param in * input stream * @return the decoded integer * @throws IOException raised on errors performing I/O. * * @see Utils#readVLong(DataInput) */ public static int readVInt(DataInput in) throws IOException { long ret = readVLong(in); if ((ret > Integer.MAX_VALUE) || (ret < Integer.MIN_VALUE)) { throw new RuntimeException( "Number too large to be represented as Integer"); } return (int) ret; }
3.68
graphhopper_PrepareRoutingSubnetworks_doWork
/** * Finds and marks all subnetworks according to {@link #setMinNetworkSize(int)} * * @return the total number of marked edges */ public int doWork() { if (minNetworkSize <= 0) { logger.info("Skipping subnetwork search: prepare.min_network_size: " + minNetworkSize); return 0; } StopWatch sw = new StopWatch().start(); logger.info("Start marking subnetworks, prepare.min_network_size: " + minNetworkSize + ", threads: " + threads + ", nodes: " + Helper.nf(graph.getNodes()) + ", edges: " + Helper.nf(graph.getEdges()) + ", jobs: " + prepareJobs + ", " + Helper.getMemInfo()); AtomicInteger total = new AtomicInteger(0); List<BitSet> flags = Stream.generate(() -> new BitSet(graph.getEdges())).limit(prepareJobs.size()).collect(Collectors.toList()); Stream<Runnable> runnables = IntStream.range(0, prepareJobs.size()).mapToObj(i -> () -> { PrepareJob job = prepareJobs.get(i); total.addAndGet(setSubnetworks(job.weighting, job.subnetworkEnc.getName().replaceAll("_subnetwork", ""), flags.get(i))); }); GHUtility.runConcurrently(runnables, threads); AllEdgesIterator iter = graph.getAllEdges(); while (iter.next()) { for (int i = 0; i < prepareJobs.size(); i++) { PrepareJob prepareJob = prepareJobs.get(i); iter.set(prepareJob.subnetworkEnc, flags.get(i).get(iter.getEdge())); } } logger.info("Finished finding and marking subnetworks for " + prepareJobs.size() + " jobs, took: " + sw.stop().getSeconds() + "s, " + Helper.getMemInfo()); return total.get(); }
3.68
framework_VCustomLayout_getLocation
/** * Get the location of an widget. * * @param w * the widget whose location to check * @return location name, or {@code null} if not found */ public String getLocation(Widget w) { for (final String location : locationToWidget.keySet()) { if (locationToWidget.get(location) == w) { return location; } } return null; }
3.68
hbase_ColumnFamilyDescriptorBuilder_setBlockCacheEnabled
/** * Set the blockCacheEnabled flag * @param blockCacheEnabled True if hfile DATA type blocks should be cached (We always cache * INDEX and BLOOM blocks; you cannot turn this off). * @return this (for chained invocation) */ public ModifyableColumnFamilyDescriptor setBlockCacheEnabled(boolean blockCacheEnabled) { return setValue(BLOCKCACHE_BYTES, Boolean.toString(blockCacheEnabled)); }
3.68
flink_CliFrontend_parseAndRun
/** * Parses the command line arguments and starts the requested action. * * @param args command line arguments of the client. * @return The return code of the program */ public int parseAndRun(String[] args) { // check for action if (args.length < 1) { CliFrontendParser.printHelp(customCommandLines); System.out.println("Please specify an action."); return 1; } // get action String action = args[0]; // remove action from parameters final String[] params = Arrays.copyOfRange(args, 1, args.length); try { // do action switch (action) { case ACTION_RUN: run(params); return 0; case ACTION_RUN_APPLICATION: runApplication(params); return 0; case ACTION_LIST: list(params); return 0; case ACTION_INFO: info(params); return 0; case ACTION_CANCEL: cancel(params); return 0; case ACTION_STOP: stop(params); return 0; case ACTION_SAVEPOINT: savepoint(params); return 0; case "-h": case "--help": CliFrontendParser.printHelp(customCommandLines); return 0; case "-v": case "--version": String version = EnvironmentInformation.getVersion(); String commitID = EnvironmentInformation.getRevisionInformation().commitId; System.out.print("Version: " + version); System.out.println( commitID.equals(EnvironmentInformation.UNKNOWN) ? "" : ", Commit ID: " + commitID); return 0; default: System.out.printf("\"%s\" is not a valid action.\n", action); System.out.println(); System.out.println( "Valid actions are \"run\", \"run-application\", \"list\", \"info\", \"savepoint\", \"stop\", or \"cancel\"."); System.out.println(); System.out.println( "Specify the version option (-v or --version) to print Flink version."); System.out.println(); System.out.println( "Specify the help option (-h or --help) to get help on the command."); return 1; } } catch (CliArgsException ce) { return handleArgException(ce); } catch (ProgramParametrizationException ppe) { return handleParametrizationException(ppe); } catch (ProgramMissingJobException pmje) { return handleMissingJobException(); } catch (Exception e) { return handleError(e); } }
3.68
hbase_Scan_getMaxResultsPerColumnFamily
/** Returns maximum number of values to return per row per CF */ public int getMaxResultsPerColumnFamily() { return this.storeLimit; }
3.68
hadoop_XException_getCause
/** * Returns the last parameter if it is an instance of <code>Throwable</code> * returns it else it returns NULL. * * @param params parameters to look for a cause. * * @return the last parameter if it is an instance of <code>Throwable</code> * returns it else it returns NULL. */ private static Throwable getCause(Object... params) { Throwable throwable = null; if (params != null && params.length > 0 && params[params.length - 1] instanceof Throwable) { throwable = (Throwable) params[params.length - 1]; } return throwable; }
3.68
framework_Button_getClientX
/** * Returns the mouse position (x coordinate) when the click took place. * The position is relative to the browser client area. * * @return The mouse cursor x position or -1 if unknown */ public int getClientX() { if (null != details) { return details.getClientX(); } else { return -1; } }
3.68
dubbo_InternalThreadLocal_set
/** * Sets the value for the current thread. */ @Override public final void set(V value) { if (value == null || value == InternalThreadLocalMap.UNSET) { remove(); } else { InternalThreadLocalMap threadLocalMap = InternalThreadLocalMap.get(); if (threadLocalMap.setIndexedVariable(index, value)) { addToVariablesToRemove(threadLocalMap, this); } } }
3.68
flink_RegisterApplicationMasterResponseReflector_getContainersFromPreviousAttemptsUnsafe
/** * Same as {@link #getContainersFromPreviousAttempts(RegisterApplicationMasterResponse)} but * allows to pass objects that are not of type {@link RegisterApplicationMasterResponse}. */ @VisibleForTesting List<Container> getContainersFromPreviousAttemptsUnsafe(final Object response) { if (getContainersFromPreviousAttemptsMethod.isPresent() && response != null) { try { @SuppressWarnings("unchecked") final List<Container> containers = (List<Container>) getContainersFromPreviousAttemptsMethod.get().invoke(response); if (containers != null && !containers.isEmpty()) { return containers; } } catch (Exception t) { logger.error("Error invoking 'getContainersFromPreviousAttempts()'", t); } } return Collections.emptyList(); }
3.68
hadoop_SaslInputStream_close
/** * Closes this input stream and releases any system resources associated with * the stream. * <p> * The <code>close</code> method of <code>SASLInputStream</code> calls the * <code>close</code> method of its underlying input stream. * * @exception IOException * if an I/O error occurs. */ @Override public void close() throws IOException { disposeSasl(); ostart = 0; ofinish = 0; inStream.close(); isOpen = false; }
3.68
hadoop_HamletImpl_parseSelector
/** * Parse selector into id and classes * @param selector in the form of (#id)?(.class)* * @return an two element array [id, "space-separated classes"]. * Either element could be null. * @throws WebAppException when both are null or syntax error. */ public static String[] parseSelector(String selector) { String[] result = new String[]{null, null}; Iterable<String> rs = SS.split(selector); Iterator<String> it = rs.iterator(); if (it.hasNext()) { String maybeId = it.next(); if (maybeId.charAt(0) == '#') { result[S_ID] = maybeId.substring(1); if (it.hasNext()) { result[S_CLASS] = SJ.join(Iterables.skip(rs, 1)); } } else { result[S_CLASS] = SJ.join(rs); } return result; } throw new WebAppException("Error parsing selector: "+ selector); }
3.68
hudi_CompactionUtils_getAllPendingCompactionOperations
/** * Get all PartitionPath + file-ids with pending Compaction operations and their target compaction instant time. * * @param metaClient Hoodie Table Meta Client */ public static Map<HoodieFileGroupId, Pair<String, HoodieCompactionOperation>> getAllPendingCompactionOperations( HoodieTableMetaClient metaClient) { List<Pair<HoodieInstant, HoodieCompactionPlan>> pendingCompactionPlanWithInstants = getAllPendingCompactionPlans(metaClient); return getAllPendingCompactionOperationsInPendingCompactionPlans(pendingCompactionPlanWithInstants); }
3.68
pulsar_PulsarZooKeeperClient_syncCallWithRetries
/** * Execute a sync zookeeper operation with a given retry policy. * * @param client * ZooKeeper client. * @param proc * Synchronous zookeeper operation wrapped in a {@link Callable}. * @param retryPolicy * Retry policy to execute the synchronous operation. * @param rateLimiter * Rate limiter for zookeeper calls * @param statsLogger * Stats Logger for zookeeper client. * @return result of the zookeeper operation * @throws KeeperException any non-recoverable exception or recoverable exception exhausted all retires. * @throws InterruptedException the operation is interrupted. */ public static<T> T syncCallWithRetries(PulsarZooKeeperClient client, ZooWorker.ZooCallable<T> proc, RetryPolicy retryPolicy, RateLimiter rateLimiter, OpStatsLogger statsLogger) throws KeeperException, InterruptedException { T result = null; boolean isDone = false; int attempts = 0; long startTimeNanos = MathUtils.nowInNano(); while (!isDone) { try { if (null != client) { client.waitForConnection(); } log.debug("Execute {} at {} retry attempt.", proc, attempts); if (null != rateLimiter) { rateLimiter.acquire(); } result = proc.call(); isDone = true; statsLogger.registerSuccessfulEvent(MathUtils.elapsedMicroSec(startTimeNanos), TimeUnit.MICROSECONDS); } catch (KeeperException e) { ++attempts; boolean rethrow = true; long elapsedTime = MathUtils.elapsedMSec(startTimeNanos); if (((null != client && isRecoverableException(e)) || null == client) && retryPolicy.allowRetry(attempts, elapsedTime)) { rethrow = false; } if (rethrow) { statsLogger.registerFailedEvent(MathUtils.elapsedMicroSec(startTimeNanos), TimeUnit.MICROSECONDS); log.debug("Stopped executing {} after {} attempts.", proc, attempts); throw e; } TimeUnit.MILLISECONDS.sleep(retryPolicy.nextRetryWaitTime(attempts, elapsedTime)); } } return result; }
3.68
framework_MessageSender_getCommunicationMethodName
/** * Returns a human readable string representation of the method used to * communicate with the server. * * @return A string representation of the current transport type */ public String getCommunicationMethodName() { String clientToServer = "XHR"; String serverToClient = "-"; if (push != null) { serverToClient = push.getTransportType(); if (push.isBidirectional()) { clientToServer = serverToClient; } } return "Client to server: " + clientToServer + ", " + "server to client: " + serverToClient; }
3.68
flink_OperationKey_getTriggerId
/** * Get the trigger id for the given operation key. * * @return trigger id */ public TriggerId getTriggerId() { return triggerId; }
3.68
hadoop_StoreContext_getStorageStatistics
/** * Get the storage statistics of this filesystem. * @return the storage statistics */ public S3AStorageStatistics getStorageStatistics() { return storageStatistics; }
3.68
morf_Criterion_lessThan
/** * Helper method to create a new "LESS THAN" expression. * * <blockquote><pre> * Criterion.lessThan(new Field("startdate"), 20091001);</pre></blockquote> * * @param field the field to evaluate in the expression (the left hand side of the expression) * @param value the value to evaluate in the expression (the right hand side) * @return a new Criterion object */ public static Criterion lessThan(AliasedField field, Object value) { return new Criterion(Operator.LT, field, value); }
3.68
hudi_SparkInternalSchemaConverter_convertFloatType
/** * Convert float type to other Type. * Now only support float -> double/String/Decimal * TODO: support more types */ private static boolean convertFloatType(WritableColumnVector oldV, WritableColumnVector newV, DataType newType, int len) { if (newType instanceof DoubleType || newType instanceof StringType || newType instanceof DecimalType) { for (int i = 0; i < len; i++) { if (oldV.isNullAt(i)) { newV.putNull(i); continue; } // float -> double/string/decimal if (newType instanceof DoubleType) { newV.putDouble(i, Double.valueOf(oldV.getFloat(i) + "")); } else if (newType instanceof StringType) { newV.putByteArray(i, getUTF8Bytes(oldV.getFloat(i) + "")); } else if (newType instanceof DecimalType) { Decimal oldDecimal = Decimal.apply(oldV.getFloat(i)); oldDecimal.changePrecision(((DecimalType) newType).precision(), ((DecimalType) newType).scale()); newV.putDecimal(i, oldDecimal, ((DecimalType) newType).precision()); } } return true; } return false; }
3.68
hbase_DefaultMobStoreCompactor_calculateMobLengthMap
/** * @param mobRefs multimap of original table name -> mob hfile */ private void calculateMobLengthMap(SetMultimap<TableName, String> mobRefs) throws IOException { FileSystem fs = store.getFileSystem(); HashMap<String, Long> map = mobLengthMap.get(); map.clear(); for (Entry<TableName, String> reference : mobRefs.entries()) { final TableName table = reference.getKey(); final String mobfile = reference.getValue(); if (MobFileName.isOldMobFileName(mobfile)) { disableIO.set(Boolean.TRUE); } List<Path> locations = mobStore.getLocations(table); for (Path p : locations) { try { FileStatus st = fs.getFileStatus(new Path(p, mobfile)); long size = st.getLen(); LOG.debug("Referenced MOB file={} size={}", mobfile, size); map.put(mobfile, size); break; } catch (FileNotFoundException exception) { LOG.debug("Mob file {} was not in location {}. May have other locations to try.", mobfile, p); } } if (!map.containsKey(mobfile)) { throw new FileNotFoundException("Could not find mob file " + mobfile + " in the list of " + "expected locations: " + locations); } } }
3.68
flink_EnvironmentInformation_getSizeOfFreeHeapMemory
/** * Gets an estimate of the size of the free heap memory. The estimate may vary, depending on the * current level of memory fragmentation and the number of dead objects. For a better (but more * heavy-weight) estimate, use {@link #getSizeOfFreeHeapMemoryWithDefrag()}. * * @return An estimate of the size of the free heap memory, in bytes. */ public static long getSizeOfFreeHeapMemory() { Runtime r = Runtime.getRuntime(); return getMaxJvmHeapMemory() - r.totalMemory() + r.freeMemory(); }
3.68
flink_ExecNodeContext_getVersion
/** The version of the ExecNode in the JSON plan. See {@link ExecNodeMetadata#version()}. */ public Integer getVersion() { return version; }
3.68
hadoop_ResourceInformation_toIndentedString
/** * Convert the given object to string with each line indented by 4 spaces * (except the first line). */ private String toIndentedString(java.lang.Object o) { if (o == null) { return "null"; } return o.toString().replace("\n", "\n "); }
3.68
flink_LogicalType_isNullable
/** Returns whether a value of this type can be {@code null}. */ public boolean isNullable() { return isNullable; }
3.68
querydsl_MathExpressions_random
/** * Return a random number expression with the given seed * * @param seed seed * @return random(seed) */ public static NumberExpression<Double> random(int seed) { return Expressions.numberOperation(Double.class, MathOps.RANDOM2, ConstantImpl.create(seed)); }
3.68
framework_FocusableHTML_addBlurHandler
/* * (non-Javadoc) * * @see * com.google.gwt.event.dom.client.HasBlurHandlers#addBlurHandler(com.google * .gwt.event.dom.client.BlurHandler) */ @Override public HandlerRegistration addBlurHandler(BlurHandler handler) { return addDomHandler(handler, BlurEvent.getType()); }
3.68
flink_HybridShuffleConfiguration_getSelectiveStrategySpillBufferRatio
/** The proportion of buffers to be spilled. Used by {@link HsSelectiveSpillingStrategy}. */ public float getSelectiveStrategySpillBufferRatio() { return selectiveStrategySpillBufferRatio; }
3.68
hadoop_OBSFileSystem_append
/** * Append to an existing file (optional operation). * * @param f the existing file to be appended * @param bufferSize the size of the buffer to be used * @param progress for reporting progress if it is not null * @throws IOException indicating that append is not supported */ @Override public FSDataOutputStream append(final Path f, final int bufferSize, final Progressable progress) throws IOException { if (!isFsBucket()) { throw new UnsupportedOperationException( "non-posix bucket. Append is not supported " + "by OBSFileSystem"); } LOG.debug("append: Append file {}.", f); String key = OBSCommonUtils.pathToKey(this, f); // get the status or throw an FNFE FileStatus status = getFileStatus(f); long objectLen = status.getLen(); // if the thread reaches here, there is something at the path if (status.isDirectory()) { // path references a directory: automatic error throw new FileAlreadyExistsException(f + " is a directory"); } return new FSDataOutputStream( new OBSBlockOutputStream( this, key, objectLen, new SemaphoredDelegatingExecutor( boundedMultipartUploadThreadPool, blockOutputActiveBlocks, true), true), null); }
3.68
hbase_OrderedInt32_decodeInt
/** * Read an {@code int} value from the buffer {@code src}. * @param src the {@link PositionedByteRange} to read the {@code int} from * @return the {@code int} read from the buffer */ public int decodeInt(PositionedByteRange src) { return OrderedBytes.decodeInt32(src); }
3.68
flink_SolutionSetUpdateBarrier_waitForSolutionSetUpdate
/** * Waits (blocking) on barrier. * * @throws InterruptedException */ public void waitForSolutionSetUpdate() throws InterruptedException { latch.await(); }
3.68
flink_TimeUtils_plural
/** * @param label the original label * @return both the singular format and plural format of the original label */ private static String[] plural(String label) { return new String[] {label, label + PLURAL_SUFFIX}; }
3.68
hadoop_InstantiationIOException_isNotInstanceOf
/** * Class does not implement the desired interface. * @param uri URI of filesystem * @param classname classname. * @param interfaceName required interface * @param key configuration key * @return an exception. */ public static InstantiationIOException isNotInstanceOf( @Nullable URI uri, String classname, String interfaceName, String key) { return new InstantiationIOException(Kind.IsNotImplementation, uri, classname, key, DOES_NOT_IMPLEMENT + " " + interfaceName, null); }
3.68
hadoop_ManifestPrinter_loadAndPrintManifest
/** * Load and print a manifest. * @param fs filesystem. * @param path path * @throws IOException failure to load * @return the manifest */ public ManifestSuccessData loadAndPrintManifest(FileSystem fs, Path path) throws IOException { // load the manifest println("Manifest file: %s", path); final ManifestSuccessData success = ManifestSuccessData.load(fs, path); printManifest(success); return success; }
3.68
hadoop_ManifestPrinter_main
/** * Entry point. */ public static void main(String[] argv) throws Exception { try { int res = ToolRunner.run(new ManifestPrinter(), argv); System.exit(res); } catch (ExitUtil.ExitException e) { ExitUtil.terminate(e); } }
3.68
framework_ServiceDestroyEvent_getSource
/* * (non-Javadoc) * * @see java.util.EventObject#getSource() */ @Override public VaadinService getSource() { return (VaadinService) super.getSource(); }
3.68
flink_CustomHeadersDecorator_getCustomHeaders
/** * Returns the custom headers added to the message. * * @return The custom headers as a collection of {@link HttpHeader}. */ @Override public Collection<HttpHeader> getCustomHeaders() { return customHeaders; }
3.68
hibernate-validator_ValidationOrderGenerator_insertInheritedGroups
/** * Recursively add inherited groups into the group chain. * * @param clazz the group interface * @param chain the group chain we are currently building */ private void insertInheritedGroups(Class<?> clazz, DefaultValidationOrder chain) { for ( Class<?> inheritedGroup : clazz.getInterfaces() ) { Group group = new Group( inheritedGroup ); chain.insertGroup( group ); insertInheritedGroups( inheritedGroup, chain ); } }
3.68
pulsar_FieldParser_stringToFloat
/** * Converts String to float. * * @param val * The String to be converted. * @return The converted Double value. */ public static Float stringToFloat(String val) { return Float.valueOf(trim(val)); }
3.68
morf_DatabaseType_reclassifyException
/** * Reclassifies driver-specific exceptions to standard driver agnostic exceptions, such as those * extending {@link java.sql.SQLException}. This can allow callers to extract error codes from * exceptions which are otherwise opaque. * * @param e the exception to reclassify. * @return The original exception or a reclassified exception. */ public default Exception reclassifyException(Exception e) { return e; }
3.68
flink_StreamExecutionEnvironment_close
/** * Close and clean up the execution environment. All the cached intermediate results will be * released physically. */ @Override public void close() throws Exception { for (AbstractID id : cachedTransformations.keySet()) { invalidateClusterDataset(id); } }
3.68
framework_TabsheetBaseConnector_onStateChanged
/* * (non-Javadoc) * * @see * com.vaadin.client.ui.AbstractComponentConnector#onStateChanged(com.vaadin * .client.communication.StateChangeEvent) */ @Override public void onStateChanged(StateChangeEvent stateChangeEvent) { super.onStateChanged(stateChangeEvent); VTabsheetBase widget = getWidget(); // Update member references widget.setEnabled(isEnabled()); // Widgets in the TabSheet before update (should be max 1) List<Widget> oldWidgets = new ArrayList<>(); for (Iterator<Widget> iterator = widget.getWidgetIterator(); iterator .hasNext();) { Widget child = iterator.next(); // filter out any current widgets (should be max 1) boolean found = false; for (ComponentConnector childComponent : getChildComponents()) { if (childComponent.getWidget().equals(child)) { found = true; break; } } if (!found) { oldWidgets.add(child); } } // Clear previous values widget.clearTabKeys(); int index = 0; for (TabState tab : getState().tabs) { final String key = tab.key; final boolean selected = key.equals(getState().selected); widget.addTabKey(key, !tab.enabled && tab.visible); if (selected) { widget.setActiveTabIndex(index); } widget.renderTab(tab, index); if (selected) { widget.selectTab(index); } index++; } int tabCount = widget.getTabCount(); while (tabCount-- > index) { widget.removeTab(index); } // Detach any old tab widget, should be max 1 for (Widget oldWidget : oldWidgets) { if (oldWidget.isAttached()) { oldWidget.removeFromParent(); } } }
3.68
hadoop_AzureNativeFileSystemStore_getHadoopBlockSize
/** * Returns the file block size. This is a fake value used for integration * of the Azure store with Hadoop. */ @Override public long getHadoopBlockSize() { return hadoopBlockSize; }
3.68
framework_VScrollTable_selectSelectedRows
/** For internal use only. May be removed or replaced in the future. */ public boolean selectSelectedRows(UIDL uidl) { boolean keyboardSelectionOverRowFetchInProgress = false; if (uidl.hasVariable("selected")) { final Set<String> selectedKeys = uidl .getStringArrayVariableAsSet("selected"); // Do not update focus if there is a single selected row // that is the same as the previous selection. This prevents // unwanted scrolling (#18247). boolean rowsUnSelected = removeUnselectedRowKeys(selectedKeys); boolean updateFocus = rowsUnSelected || selectedRowKeys.isEmpty() || focusedRow == null; if (scrollBody != null) { for (Widget w : scrollBody) { /* * Make the focus reflect to the server side state unless we * are currently selecting multiple rows with keyboard. */ VScrollTableRow row = (VScrollTableRow) w; boolean selected = selectedKeys.contains(row.getKey()); if (!selected && unSyncedselectionsBeforeRowFetch != null && unSyncedselectionsBeforeRowFetch .contains(row.getKey())) { selected = true; keyboardSelectionOverRowFetchInProgress = true; } if (selected && selectedKeys.size() == 1 && updateFocus) { /* * If a single item is selected, move focus to the * selected row. (#10522) */ setRowFocus(row); } if (selected != row.isSelected()) { row.toggleSelection(); if (!isSingleSelectMode() && !selected) { // Update selection range in case a row is // unselected from the middle of a range - #8076 removeRowFromUnsentSelectionRanges(row); } } } } } unSyncedselectionsBeforeRowFetch = null; return keyboardSelectionOverRowFetchInProgress; }
3.68
dubbo_AbstractStateRouter_supportContinueRoute
/** * Whether current router's implementation support call * {@link AbstractStateRouter#continueRoute(BitList, URL, Invocation, boolean, Holder)} * by router itself. * * @return support or not */ protected boolean supportContinueRoute() { return false; }
3.68
hbase_ClusterMetrics_getRegionCount
/** Returns the number of regions deployed on the cluster */ default int getRegionCount() { return getLiveServerMetrics().entrySet().stream() .mapToInt(v -> v.getValue().getRegionMetrics().size()).sum(); }
3.68
flink_Costs_setNetworkCost
/** * Sets the network cost for this Costs object. * * @param bytes The network cost to set, in bytes to be transferred. */ public void setNetworkCost(double bytes) { if (bytes == UNKNOWN || bytes >= 0) { this.networkCost = bytes; } else { throw new IllegalArgumentException(); } }
3.68
dubbo_ReflectUtils_hasMethod
/** * Check target bean class whether has specify method * @param beanClass * @param methodName * @return */ public static boolean hasMethod(Class<?> beanClass, String methodName) { try { BeanInfo beanInfo = Introspector.getBeanInfo(beanClass); Optional<MethodDescriptor> descriptor = Stream.of(beanInfo.getMethodDescriptors()) .filter(methodDescriptor -> methodName.equals(methodDescriptor.getName())) .findFirst(); return descriptor.isPresent(); } catch (Exception e) { } return false; }
3.68
hadoop_OBSBlockOutputStream_appendFsFile
/** * Append posix file. * * @throws IOException any problem */ private synchronized void appendFsFile() throws IOException { LOG.debug("bucket is posix, to append file. key is {}", key); final OBSDataBlocks.DataBlock block = getActiveBlock(); WriteFileRequest writeFileReq; if (block instanceof OBSDataBlocks.DiskBlock) { writeFileReq = OBSCommonUtils.newAppendFileRequest(fs, key, objectLen, (File) block.startUpload()); } else { writeFileReq = OBSCommonUtils.newAppendFileRequest(fs, key, objectLen, (InputStream) block.startUpload()); } OBSCommonUtils.appendFile(fs, writeFileReq); objectLen += block.dataSize(); }
3.68
hbase_ZKDump_getReplicationZnodesDump
/** * Returns a string with replication znodes and position of the replication log * @param zkw reference to the {@link ZKWatcher} which also contains configuration and operation * @return aq string of replication znodes and log positions */ public static String getReplicationZnodesDump(ZKWatcher zkw) throws KeeperException { StringBuilder sb = new StringBuilder(); getReplicationZnodesDump(zkw, sb); return sb.toString(); }
3.68
hadoop_OBSFileSystem_getReadAheadRange
/** * Return the read ahead range used by this filesystem. * * @return read ahead range */ @VisibleForTesting long getReadAheadRange() { return readAheadRange; }
3.68
morf_OracleDialect_tableNameWithSchemaName
/** * @see org.alfasoftware.morf.jdbc.SqlDialect#tableNameWithSchemaName(org.alfasoftware.morf.sql.element.TableReference) */ @Override protected String tableNameWithSchemaName(TableReference tableRef) { if (StringUtils.isEmpty(tableRef.getDblink())) { return super.tableNameWithSchemaName(tableRef); } else { return super.tableNameWithSchemaName(tableRef) + "@" + tableRef.getDblink(); } }
3.68
framework_Navigator_switchView
/** * Update the internal state of the navigator to reflect the actual * switching of views. * * This method should only be called by * {@link #navigateTo(View, String, String)} between showing the view and * calling {@link View#enter(ViewChangeEvent)}. If this method is * overridden, the overriding version must call the super method. * * @since 7.6 * @param event * a view change event with details of the change */ protected void switchView(ViewChangeEvent event) { currentView = event.getNewView(); }
3.68
flink_FlinkContainers_restartTaskManager
/** Restarts all TaskManager containers. */ public void restartTaskManager(RunnableWithException afterFailAction) throws Exception { taskManagers.forEach(GenericContainer::stop); afterFailAction.run(); taskManagers.forEach(GenericContainer::start); }
3.68
querydsl_DefaultEntitySerializer_wrap
// TODO move this to codegen private Type wrap(Type type) { if (type.equals(Types.BOOLEAN_P)) { return Types.BOOLEAN; } else if (type.equals(Types.BYTE_P)) { return Types.BYTE; } else if (type.equals(Types.CHAR)) { return Types.CHARACTER; } else if (type.equals(Types.DOUBLE_P)) { return Types.DOUBLE; } else if (type.equals(Types.FLOAT_P)) { return Types.FLOAT; } else if (type.equals(Types.INT)) { return Types.INTEGER; } else if (type.equals(Types.LONG_P)) { return Types.LONG; } else if (type.equals(Types.SHORT_P)) { return Types.SHORT; } else { return type; } }
3.68
pulsar_PartialRoundRobinMessageRouterImpl_choosePartition
/** * Choose a partition based on the topic metadata. * Key hash routing isn't supported. * * @param msg message * @param metadata topic metadata * @return the partition to route the message. */ public int choosePartition(Message<?> msg, TopicMetadata metadata) { final List<Integer> newPartialList = new ArrayList<>(getOrCreatePartialList(metadata)); return newPartialList .get(signSafeMod(PARTITION_INDEX_UPDATER.getAndIncrement(this), newPartialList.size())); }
3.68
hadoop_StageConfig_withTaskManifestDir
/** * Set builder value. * @param value new value * @return the builder */ public StageConfig withTaskManifestDir(Path value) { checkOpen(); taskManifestDir = value; return this; }
3.68
hadoop_DelegatingSSLSocketFactory_getChannelMode
/** * Get the channel mode of this instance. * @return a channel mode. */ public SSLChannelMode getChannelMode() { return channelMode; }
3.68
hudi_LegacyArchivedMetaEntryReader_getRecordIterator
/** * Returns the avro record iterator with given file statuses. */ private ClosableIterator<HoodieRecord<IndexedRecord>> getRecordIterator(FileStatus[] fsStatuses) throws IOException { return new ClosableIterator<HoodieRecord<IndexedRecord>>() { final Iterator<FileStatus> fsItr = Arrays.asList(fsStatuses).iterator(); HoodieLogFormat.Reader reader; ClosableIterator<HoodieRecord<IndexedRecord>> recordItr; @Override public void close() { if (this.reader != null) { closeLogFormatReader(reader); } } @Override public boolean hasNext() { if (recordItr != null && recordItr.hasNext()) { return true; } // new reader if possible if (reader != null) { while (reader.hasNext()) { HoodieLogBlock block = reader.next(); if (block instanceof HoodieAvroDataBlock) { HoodieAvroDataBlock avroBlock = (HoodieAvroDataBlock) block; recordItr = avroBlock.getRecordIterator(HoodieRecord.HoodieRecordType.AVRO); if (recordItr.hasNext()) { return true; } } } // no records in the reader, close the reader closeLogFormatReader(reader); reader = null; } // new reader while (fsItr.hasNext()) { FileStatus fs = fsItr.next(); try { reader = HoodieLogFormat.newReader(metaClient.getFs(), new HoodieLogFile(fs.getPath()), HoodieArchivedMetaEntry.getClassSchema()); } catch (IOException ioe) { throw new HoodieIOException("Error initializing the reader for archived log: " + fs.getPath(), ioe); } while (reader.hasNext()) { HoodieLogBlock block = reader.next(); if (block instanceof HoodieAvroDataBlock) { HoodieAvroDataBlock avroBlock = (HoodieAvroDataBlock) block; recordItr = avroBlock.getRecordIterator(HoodieRecord.HoodieRecordType.AVRO); if (recordItr.hasNext()) { return true; } } } } return false; } @Override public HoodieRecord<IndexedRecord> next() { return this.recordItr.next(); } }; }
3.68
hadoop_NMClientAsync_onRollbackLastReInitialization
/** * Callback for rollback of last re-initialization. * * @param containerId the Id of the container to restart. */ public void onRollbackLastReInitialization(ContainerId containerId) {}
3.68
flink_RetryRule_evaluate
/** * Retry a test in case of a failure with a specific exception. * * @throws Throwable */ @Override public void evaluate() throws Throwable { for (int currentRun = 0; currentRun <= timesOnFailure; currentRun++) { try { statement.evaluate(); break; // success } catch (Throwable t) { if (expectedException != null && expectedException.isAssignableFrom(t.getClass())) { throw t; } LOG.warn( String.format( "Test run failed (%d/%d).", currentRun, timesOnFailure + 1), t); if (!exceptionClass.isAssignableFrom(t.getClass()) || currentRun >= timesOnFailure) { // Throw the failure if retried too often, or if it is the wrong exception throw t; } } } }
3.68
flink_ParameterTool_fromPropertiesFile
/** * Returns {@link ParameterTool} for the given InputStream from {@link Properties} file. * * @param inputStream InputStream from the properties file * @return A {@link ParameterTool} * @throws IOException If the file does not exist * @see Properties */ public static ParameterTool fromPropertiesFile(InputStream inputStream) throws IOException { Properties props = new Properties(); props.load(inputStream); return fromMap((Map) props); }
3.68
querydsl_ComparableExpressionBase_min
/** * Create a {@code min(this)} expression * * <p>Get the minimum value of this expression (aggregation)</p> * * @return min(this) */ public ComparableExpressionBase<T> min() { return Expressions.comparableOperation(getType(), Ops.AggOps.MIN_AGG, mixin); }
3.68
hbase_MultiRowRangeFilter_getNextRangeIndex
/** * Calculates the position where the given rowkey fits in the ranges list. * @param rowKey the row key to calculate * @return index the position of the row key */ public int getNextRangeIndex(byte[] rowKey) { BasicRowRange temp; if (reversed) { temp = new ReversedRowRange(null, true, rowKey, true); } else { temp = new RowRange(rowKey, true, null, true); } // Because we make sure that `ranges` has the correct natural ordering (given it containing // RowRange or ReverseRowRange objects). This keeps us from having to have two different // implementations below. final int index = Collections.binarySearch(ranges, temp); if (index < 0) { int insertionPosition = -index - 1; // check if the row key in the range before the insertion position if (insertionPosition != 0 && ranges.get(insertionPosition - 1).contains(rowKey)) { return insertionPosition - 1; } // check if the row key is before the first range if (insertionPosition == 0 && !ranges.get(insertionPosition).contains(rowKey)) { return ROW_BEFORE_FIRST_RANGE; } if (!foundFirstRange) { foundFirstRange = true; } return insertionPosition; } // the row key equals one of the start keys, and the the range exclude the start key if (ranges.get(index).isSearchRowInclusive() == false) { exclusive = true; } return index; }
3.68
hudi_HoodieFileGroup_getAllFileSlicesIncludingInflight
/** * Get all the file slices including in-flight ones as seen in underlying file system. */ public Stream<FileSlice> getAllFileSlicesIncludingInflight() { return fileSlices.values().stream(); }
3.68
flink_FactoryUtil_forwardOptions
/** * Forwards the options declared in {@link DynamicTableFactory#forwardOptions()} and * possibly {@link FormatFactory#forwardOptions()} from {@link * DynamicTableFactory.Context#getEnrichmentOptions()} to the final options, if present. */ @SuppressWarnings({"unchecked"}) private void forwardOptions() { for (ConfigOption<?> option : factory.forwardOptions()) { enrichingOptions .getOptional(option) .ifPresent(o -> allOptions.set((ConfigOption<? super Object>) option, o)); } }
3.68
hudi_HoodieHFileUtils_createHFileReader
/** * Creates HFile reader for byte array with default `primaryReplicaReader` as true. * * @param fs File system. * @param dummyPath Dummy path to file to read. * @param content Content in byte array. * @return HFile reader * @throws IOException Upon error. */ public static HFile.Reader createHFileReader( FileSystem fs, Path dummyPath, byte[] content) { // Avoid loading default configs, from the FS, since this configuration is mostly // used as a stub to initialize HFile reader Configuration conf = new Configuration(false); HoodieAvroHFileReader.SeekableByteArrayInputStream bis = new HoodieAvroHFileReader.SeekableByteArrayInputStream(content); FSDataInputStream fsdis = new FSDataInputStream(bis); FSDataInputStreamWrapper stream = new FSDataInputStreamWrapper(fsdis); ReaderContext context = new ReaderContextBuilder() .withFilePath(dummyPath) .withInputStreamWrapper(stream) .withFileSize(content.length) .withFileSystem(fs) .withPrimaryReplicaReader(USE_PRIMARY_REPLICA_READER) .withReaderType(ReaderContext.ReaderType.STREAM) .build(); try { HFileInfo fileInfo = new HFileInfo(context, conf); HFile.Reader reader = HFile.createReader(context, fileInfo, new CacheConfig(conf), conf); fileInfo.initMetaAndIndex(reader); return reader; } catch (IOException e) { throw new HoodieIOException("Failed to initialize HFile reader for " + dummyPath, e); } }
3.68
hudi_ProtoConversionUtil_getMessageSchema
/** * Translates a Proto Message descriptor into an Avro Schema * @param descriptor the descriptor for the proto message * @param recursionDepths a map of the descriptor to the number of times it has been encountered in this depth first traversal of the schema. * This is used to cap the number of times we recurse on a schema. * @param path a string prefixed with the namespace of the original message being translated to avro and containing the current dot separated path tracking progress through the schema. * This value is used for a namespace when creating Avro records to avoid an error when reusing the same class name when unraveling a recursive schema. * @return an avro schema */ private Schema getMessageSchema(Descriptors.Descriptor descriptor, CopyOnWriteMap<Descriptors.Descriptor, Integer> recursionDepths, String path) { // Parquet does not handle recursive schemas so we "unravel" the proto N levels Integer currentRecursionCount = recursionDepths.getOrDefault(descriptor, 0); if (currentRecursionCount >= maxRecursionDepth) { return RECURSION_OVERFLOW_SCHEMA; } // The current path is used as a namespace to avoid record name collisions within recursive schemas Schema result = Schema.createRecord(descriptor.getName(), null, path, false); recursionDepths.put(descriptor, ++currentRecursionCount); List<Schema.Field> fields = new ArrayList<>(descriptor.getFields().size()); for (Descriptors.FieldDescriptor f : descriptor.getFields()) { // each branch of the schema traversal requires its own recursion depth tracking so copy the recursionDepths map fields.add(new Schema.Field(f.getName(), getFieldSchema(f, new CopyOnWriteMap<>(recursionDepths), path), null, getDefault(f))); } result.setFields(fields); return result; }
3.68
morf_ViewChangesDeploymentHelper_deregisterViewIfExists
/** * Creates SQL statements for removing given view from the view register. * * @param view View to be dropped. * @param updateDeployedViews Whether to update the DeployedViews table. * @return SQL statements to be run to drop the view. * @deprecated kept to ensure backwards compatibility. */ @Deprecated List<String> deregisterViewIfExists(View view, boolean updateDeployedViews) { return dropViewIfExists(view, false, updateDeployedViews); }
3.68
hbase_RegionSizeReportingChore_getPeriod
/** * Extracts the period for the chore from the configuration. * @param conf The configuration object. * @return The configured chore period or the default value. */ static int getPeriod(Configuration conf) { return conf.getInt(REGION_SIZE_REPORTING_CHORE_PERIOD_KEY, REGION_SIZE_REPORTING_CHORE_PERIOD_DEFAULT); }
3.68
zxing_RSSExpandedReader_mayFollow
// Whether the pairs, plus another pair of the specified type, would together // form a valid finder pattern sequence, either complete or partial private static boolean mayFollow(List<ExpandedPair> pairs, int value) { if (pairs.isEmpty()) { return true; } for (int[] sequence : FINDER_PATTERN_SEQUENCES) { if (pairs.size() + 1 <= sequence.length) { // the proposed sequence (i.e. pairs + value) would fit in this allowed sequence for (int i = pairs.size(); i < sequence.length; i++) { if (sequence[i] == value) { // we found our value in this allowed sequence, check to see if the elements preceding it match our existing // pairs; note our existing pairs may not be a full sequence (e.g. if processing a row in a stacked symbol) boolean matched = true; for (int j = 0; j < pairs.size(); j++) { int allowed = sequence[i - j - 1]; int actual = pairs.get(pairs.size() - j - 1).getFinderPattern().getValue(); if (allowed != actual) { matched = false; break; } } if (matched) { return true; } } } } } // the proposed finder pattern sequence is illegal return false; }
3.68