name
stringlengths
12
178
code_snippet
stringlengths
8
36.5k
score
float64
3.26
3.68
streampipes_TerminatingBlocksFinder_startsWithNumber
/** * Checks whether the given text t starts with a sequence of digits, followed by one of the given * strings. * * @param t The text to examine * @param len The length of the text to examine * @param str Any strings that may follow the digits. * @return true if at least one combination matches */ private static boolean startsWithNumber(final String t, final int len, final String... str) { int j = 0; while (j < len && isDigit(t.charAt(j))) { j++; } if (j != 0) { for (String s : str) { if (t.startsWith(s, j)) { return true; } } } return false; }
3.68
hadoop_PendingSet_getJobId
/** @return Job ID, if known. */ public String getJobId() { return jobId; }
3.68
hbase_HFileCleaner_checkAndUpdateConfigurations
/** * Check new configuration and update settings if value changed * @param conf The new configuration * @return true if any configuration for HFileCleaner changes, false if no change */ private boolean checkAndUpdateConfigurations(Configuration conf) { boolean updated = false; int throttlePoint = conf.getInt(HFILE_DELETE_THROTTLE_THRESHOLD, DEFAULT_HFILE_DELETE_THROTTLE_THRESHOLD); if (throttlePoint != this.throttlePoint) { LOG.debug("Updating throttle point, from {} to {}", this.throttlePoint, throttlePoint); this.throttlePoint = throttlePoint; updated = true; } int largeQueueInitSize = conf.getInt(LARGE_HFILE_QUEUE_INIT_SIZE, DEFAULT_LARGE_HFILE_QUEUE_INIT_SIZE); if (largeQueueInitSize != this.largeQueueInitSize) { LOG.debug("Updating largeQueueInitSize, from {} to {}", this.largeQueueInitSize, largeQueueInitSize); this.largeQueueInitSize = largeQueueInitSize; updated = true; } int smallQueueInitSize = conf.getInt(SMALL_HFILE_QUEUE_INIT_SIZE, DEFAULT_SMALL_HFILE_QUEUE_INIT_SIZE); if (smallQueueInitSize != this.smallQueueInitSize) { LOG.debug("Updating smallQueueInitSize, from {} to {}", this.smallQueueInitSize, smallQueueInitSize); this.smallQueueInitSize = smallQueueInitSize; updated = true; } int largeFileDeleteThreadNumber = conf.getInt(LARGE_HFILE_DELETE_THREAD_NUMBER, DEFAULT_LARGE_HFILE_DELETE_THREAD_NUMBER); if (largeFileDeleteThreadNumber != this.largeFileDeleteThreadNumber) { LOG.debug("Updating largeFileDeleteThreadNumber, from {} to {}", this.largeFileDeleteThreadNumber, largeFileDeleteThreadNumber); this.largeFileDeleteThreadNumber = largeFileDeleteThreadNumber; updated = true; } int smallFileDeleteThreadNumber = conf.getInt(SMALL_HFILE_DELETE_THREAD_NUMBER, DEFAULT_SMALL_HFILE_DELETE_THREAD_NUMBER); if (smallFileDeleteThreadNumber != this.smallFileDeleteThreadNumber) { LOG.debug("Updating smallFileDeleteThreadNumber, from {} to {}", this.smallFileDeleteThreadNumber, smallFileDeleteThreadNumber); this.smallFileDeleteThreadNumber = smallFileDeleteThreadNumber; updated = true; } long cleanerThreadTimeoutMsec = conf.getLong(HFILE_DELETE_THREAD_TIMEOUT_MSEC, DEFAULT_HFILE_DELETE_THREAD_TIMEOUT_MSEC); if (cleanerThreadTimeoutMsec != this.cleanerThreadTimeoutMsec) { this.cleanerThreadTimeoutMsec = cleanerThreadTimeoutMsec; updated = true; } long cleanerThreadCheckIntervalMsec = conf.getLong(HFILE_DELETE_THREAD_CHECK_INTERVAL_MSEC, DEFAULT_HFILE_DELETE_THREAD_CHECK_INTERVAL_MSEC); if (cleanerThreadCheckIntervalMsec != this.cleanerThreadCheckIntervalMsec) { this.cleanerThreadCheckIntervalMsec = cleanerThreadCheckIntervalMsec; updated = true; } return updated; }
3.68
flink_SqlLikeUtils_ilike
/** SQL {@code ILIKE} function with escape. */ public static boolean ilike(String s, String patternStr, String escape) { final String regex = sqlToRegexLike(patternStr, escape); Pattern pattern = Pattern.compile(regex, Pattern.CASE_INSENSITIVE); Matcher matcher = pattern.matcher(s); return matcher.matches(); }
3.68
morf_AbstractSqlDialectTest_testSimpleUpdate
/** * Tests that a simple update with field literal works. */ @Test public void testSimpleUpdate() { UpdateStatement stmt = new UpdateStatement(new TableReference(TEST_TABLE)).set(new FieldLiteral("A1001001").as(STRING_FIELD)); String value = varCharCast("'A1001001'"); String expectedSql = "UPDATE " + tableName(TEST_TABLE) + " SET stringField = " + stringLiteralPrefix() + value; assertEquals("Simple update", expectedSql, testDialect.convertStatementToSQL(stmt)); }
3.68
AreaShop_FileManager_loadRegionFiles
/** * Load all region files. */ public void loadRegionFiles() { regions.clear(); final File file = new File(regionsPath); if(!file.exists()) { if(!file.mkdirs()) { AreaShop.warn("Could not create region files directory: " + file.getAbsolutePath()); return; } plugin.setReady(true); } else if(file.isDirectory()) { loadRegionFilesNow(); } }
3.68
hadoop_ResourceUsage_getAMUsed
/* * AM-Used */ public Resource getAMUsed() { return getAMUsed(NL); }
3.68
flink_OpaqueMemoryResource_getSize
/** Gets the size, in bytes. */ public long getSize() { return size; }
3.68
flink_JoinOperator_projectTuple11
/** * Projects a pair of joined elements to a {@link Tuple} with the previously selected * fields. Requires the classes of the fields of the resulting tuples. * * @return The projected data set. * @see Tuple * @see DataSet */ public <T0, T1, T2, T3, T4, T5, T6, T7, T8, T9, T10> ProjectJoin<I1, I2, Tuple11<T0, T1, T2, T3, T4, T5, T6, T7, T8, T9, T10>> projectTuple11() { TypeInformation<?>[] fTypes = extractFieldTypes(fieldIndexes); TupleTypeInfo<Tuple11<T0, T1, T2, T3, T4, T5, T6, T7, T8, T9, T10>> tType = new TupleTypeInfo<Tuple11<T0, T1, T2, T3, T4, T5, T6, T7, T8, T9, T10>>(fTypes); return new ProjectJoin<I1, I2, Tuple11<T0, T1, T2, T3, T4, T5, T6, T7, T8, T9, T10>>( this.ds1, this.ds2, this.keys1, this.keys2, this.hint, this.fieldIndexes, this.isFieldInFirst, tType, this); }
3.68
hbase_Bytes_readAsVLong
/** * Reads a zero-compressed encoded long from input buffer and returns it. * @param buffer Binary array * @param offset Offset into array at which vint begins. * @return deserialized long from buffer. */ public static long readAsVLong(final byte[] buffer, final int offset) { byte firstByte = buffer[offset]; int len = WritableUtils.decodeVIntSize(firstByte); if (len == 1) { return firstByte; } long i = 0; for (int idx = 0; idx < len - 1; idx++) { byte b = buffer[offset + 1 + idx]; i = i << 8; i = i | (b & 0xFF); } return (WritableUtils.isNegativeVInt(firstByte) ? ~i : i); }
3.68
dubbo_PathUtil_resolvePathVariable
/** * generate real path from rawPath according to argInfo and method args * * @param rawPath * @param argInfos * @param args * @return */ public static String resolvePathVariable(String rawPath, List<ArgInfo> argInfos, List<Object> args) { String[] split = rawPath.split(SEPARATOR); List<String> strings = Arrays.asList(split); List<ArgInfo> pathArgInfos = new ArrayList<>(); for (ArgInfo argInfo : argInfos) { if (ParamType.PATH.supportAnno(argInfo.getParamAnnotationType())) { pathArgInfos.add(argInfo); } } for (ArgInfo pathArgInfo : pathArgInfos) { strings.set(pathArgInfo.getUrlSplitIndex(), String.valueOf(args.get(pathArgInfo.getIndex()))); } String pat = SEPARATOR; for (String string : strings) { if (string.length() == 0) { continue; } pat = pat + string + SEPARATOR; } if (pat.endsWith(SEPARATOR)) { pat = pat.substring(0, pat.lastIndexOf(SEPARATOR)); } return pat; }
3.68
hbase_ReplicationSink_stopReplicationSinkServices
/** * stop the thread pool executor. It is called when the regionserver is stopped. */ public void stopReplicationSinkServices() { try { if (this.sharedConn != null) { synchronized (sharedConnLock) { if (this.sharedConn != null) { this.sharedConn.close(); this.sharedConn = null; } } } } catch (IOException e) { LOG.warn("IOException while closing the connection", e); // ignoring as we are closing. } }
3.68
morf_UpdateStatement_getTable
/** * Gets the table being inserted into * * @return the table being inserted into */ public TableReference getTable() { return table; }
3.68
hadoop_Nfs3Constant_fromValue
/** * Convert to NFS procedure. * @param value specify the index of NFS procedure * @return the procedure corresponding to the value. */ public static NFSPROC3 fromValue(int value) { if (value < 0 || value >= values().length) { return null; } return values()[value]; }
3.68
flink_CommonTestUtils_setEnv
// This code is taken slightly modified from: http://stackoverflow.com/a/7201825/568695 // it changes the environment variables of this JVM. Use only for testing purposes! @SuppressWarnings("unchecked") public static void setEnv(Map<String, String> newenv, boolean clearExisting) { try { Map<String, String> env = System.getenv(); Class<?> clazz = env.getClass(); Field field = clazz.getDeclaredField("m"); field.setAccessible(true); Map<String, String> map = (Map<String, String>) field.get(env); if (clearExisting) { map.clear(); } map.putAll(newenv); // only for Windows Class<?> processEnvironmentClass = Class.forName("java.lang.ProcessEnvironment"); try { Field theCaseInsensitiveEnvironmentField = processEnvironmentClass.getDeclaredField("theCaseInsensitiveEnvironment"); theCaseInsensitiveEnvironmentField.setAccessible(true); Map<String, String> cienv = (Map<String, String>) theCaseInsensitiveEnvironmentField.get(null); if (clearExisting) { cienv.clear(); } cienv.putAll(newenv); } catch (NoSuchFieldException ignored) { } } catch (Exception e1) { throw new RuntimeException(e1); } }
3.68
morf_UpdateStatement_shallowCopy
/** * Performs a shallow copy to a builder, allowing a duplicate * to be created and modified. * * @return A builder, initialised as a duplicate of this statement. */ @Override public UpdateStatementBuilder shallowCopy() { return new UpdateStatementBuilder(this); }
3.68
flink_DefaultRollingPolicy_getInactivityInterval
/** * Returns time duration of allowed inactivity after which a part file will have to roll. * * @return Time duration in milliseconds */ public long getInactivityInterval() { return inactivityInterval; }
3.68
flink_PekkoUtils_getAddressFromRpcURL
/** * Extracts the {@link Address} from the given pekko URL. * * @param rpcURL to extract the {@link Address} from * @throws MalformedURLException if the {@link Address} could not be parsed from the given pekko * URL * @return Extracted {@link Address} from the given rpc URL */ @SuppressWarnings("RedundantThrows") // hidden checked exception coming from Pekko public static Address getAddressFromRpcURL(String rpcURL) throws MalformedURLException { return AddressFromURIString.apply(rpcURL); }
3.68
dubbo_Environment_reset
/** * Reset environment. * For test only. */ public void reset() { destroy(); initialize(); }
3.68
hudi_SixToFiveDowngradeHandler_runCompaction
/** * Utility method to run compaction for MOR table as part of downgrade step. */ private void runCompaction(HoodieTable table, HoodieEngineContext context, HoodieWriteConfig config, SupportsUpgradeDowngrade upgradeDowngradeHelper) { try { if (table.getMetaClient().getTableType() == HoodieTableType.MERGE_ON_READ) { // set required configs for scheduling compaction. HoodieInstantTimeGenerator.setCommitTimeZone(table.getMetaClient().getTableConfig().getTimelineTimezone()); HoodieWriteConfig compactionConfig = HoodieWriteConfig.newBuilder().withProps(config.getProps()).build(); compactionConfig.setValue(HoodieCompactionConfig.INLINE_COMPACT.key(), "true"); compactionConfig.setValue(HoodieCompactionConfig.INLINE_COMPACT_NUM_DELTA_COMMITS.key(), "1"); compactionConfig.setValue(HoodieCompactionConfig.INLINE_COMPACT_TRIGGER_STRATEGY.key(), CompactionTriggerStrategy.NUM_COMMITS.name()); compactionConfig.setValue(HoodieCompactionConfig.COMPACTION_STRATEGY.key(), UnBoundedCompactionStrategy.class.getName()); compactionConfig.setValue(HoodieMetadataConfig.ENABLE.key(), "false"); try (BaseHoodieWriteClient writeClient = upgradeDowngradeHelper.getWriteClient(compactionConfig, context)) { Option<String> compactionInstantOpt = writeClient.scheduleCompaction(Option.empty()); if (compactionInstantOpt.isPresent()) { writeClient.compact(compactionInstantOpt.get()); } } } } catch (Exception e) { throw new HoodieException(e); } }
3.68
hbase_LruAdaptiveBlockCache_getStats
/** * Get counter statistics for this cache. * <p> * Includes: total accesses, hits, misses, evicted blocks, and runs of the eviction processes. */ @Override public CacheStats getStats() { return this.stats; }
3.68
flink_ListStateDescriptor_getElementSerializer
/** * Gets the serializer for the elements contained in the list. * * @return The serializer for the elements in the list. */ public TypeSerializer<T> getElementSerializer() { // call getSerializer() here to get the initialization check and proper error message final TypeSerializer<List<T>> rawSerializer = getSerializer(); if (!(rawSerializer instanceof ListSerializer)) { throw new IllegalStateException(); } return ((ListSerializer<T>) rawSerializer).getElementSerializer(); }
3.68
hadoop_ReadStatistics_getBlockType
/** * @return block type of the input stream. If block type != CONTIGUOUS, * it is reading erasure coded data. */ public synchronized BlockType getBlockType() { return blockType; }
3.68
hbase_ByteBuff_read
// static helper methods public static int read(ReadableByteChannel channel, ByteBuffer buf, long offset, ChannelReader reader) throws IOException { if (buf.remaining() <= NIO_BUFFER_LIMIT) { return reader.read(channel, buf, offset); } int originalLimit = buf.limit(); int initialRemaining = buf.remaining(); int ret = 0; while (buf.remaining() > 0) { try { int ioSize = Math.min(buf.remaining(), NIO_BUFFER_LIMIT); buf.limit(buf.position() + ioSize); offset += ret; ret = reader.read(channel, buf, offset); if (ret < ioSize) { break; } } finally { buf.limit(originalLimit); } } int nBytes = initialRemaining - buf.remaining(); return (nBytes > 0) ? nBytes : ret; }
3.68
pulsar_OwnershipCache_updateBundleState
/** * Update bundle state in a local cache. * * @param bundle * @throws Exception */ public CompletableFuture<Void> updateBundleState(NamespaceBundle bundle, boolean isActive) { // Disable owned instance in local cache CompletableFuture<OwnedBundle> f = ownedBundlesCache.getIfPresent(bundle); if (f != null && f.isDone() && !f.isCompletedExceptionally()) { return f.thenAccept(ob -> ob.setActive(isActive)); } else { return CompletableFuture.completedFuture(null); } }
3.68
graphhopper_NavigateResponseConverter_getThenVoiceInstructionpart
/** * For close turns, it is important to announce the next turn in the earlier instruction. * e.g.: instruction i+1= turn right, instruction i+2=turn left, with instruction i+1 distance < VOICE_INSTRUCTION_MERGE_TRESHHOLD * The voice instruction should be like "turn right, then turn left" * <p> * For instruction i+1 distance > VOICE_INSTRUCTION_MERGE_TRESHHOLD an empty String will be returned */ private static String getThenVoiceInstructionpart(InstructionList instructions, int index, Locale locale, TranslationMap translationMap) { if (instructions.size() > index + 2) { Instruction firstInstruction = instructions.get(index + 1); if (firstInstruction.getDistance() < VOICE_INSTRUCTION_MERGE_TRESHHOLD) { Instruction secondInstruction = instructions.get(index + 2); if (secondInstruction.getSign() != Instruction.REACHED_VIA) return ", " + translationMap.getWithFallBack(locale).tr("navigate.then") + " " + secondInstruction.getTurnDescription(translationMap.getWithFallBack(locale)); } } return ""; }
3.68
pulsar_PulsarClientImpl_getConnection
/** * Only for test. */ @VisibleForTesting public CompletableFuture<ClientCnx> getConnection(final String topic) { TopicName topicName = TopicName.get(topic); return lookup.getBroker(topicName) .thenCompose(pair -> getConnection(pair.getLeft(), pair.getRight(), cnxPool.genRandomKeyToSelectCon())); }
3.68
flink_RemoteInputChannel_getAndResetUnannouncedCredit
/** * Gets the unannounced credit and resets it to <tt>0</tt> atomically. * * @return Credit which was not announced to the sender yet. */ public int getAndResetUnannouncedCredit() { return unannouncedCredit.getAndSet(0); }
3.68
framework_Page_showNotification
/** * Shows a notification message. * * @see Notification * * @param notification * The notification message to show * * @deprecated As of 7.0, use Notification.show(Page) instead. */ @Deprecated public void showNotification(Notification notification) { notification.show(this); }
3.68
hudi_HoodieFlinkWriteClient_getOrCreateWriteHandle
/** * Get or create a new write handle in order to reuse the file handles. * * @param record The first record in the bucket * @param config Write config * @param instantTime The instant time * @param table The table * @param recordItr Record iterator * @param overwrite Whether this is an overwrite operation * @return Existing write handle or create a new one */ private HoodieWriteHandle<?, ?, ?, ?> getOrCreateWriteHandle( HoodieRecord<T> record, HoodieWriteConfig config, String instantTime, HoodieTable<T, List<HoodieRecord<T>>, List<HoodieKey>, List<WriteStatus>> table, Iterator<HoodieRecord<T>> recordItr, boolean overwrite) { // caution: it's not a good practice to modify the handles internal. FlinkWriteHandleFactory.Factory<T, List<HoodieRecord<T>>, List<HoodieKey>, List<WriteStatus>> writeHandleFactory = FlinkWriteHandleFactory.getFactory(table.getMetaClient().getTableConfig(), config, overwrite); return writeHandleFactory.create(this.bucketToHandles, record, config, instantTime, table, recordItr); }
3.68
hadoop_AzureBlobFileSystem_setOwner
/** * Set owner of a path (i.e. a file or a directory). * The parameters owner and group cannot both be null. * * @param path The path * @param owner If it is null, the original username remains unchanged. * @param group If it is null, the original groupname remains unchanged. */ @Override public void setOwner(final Path path, final String owner, final String group) throws IOException { LOG.debug( "AzureBlobFileSystem.setOwner path: {}", path); TracingContext tracingContext = new TracingContext(clientCorrelationId, fileSystemId, FSOperationType.SET_OWNER, true, tracingHeaderFormat, listener); if (!getIsNamespaceEnabled(tracingContext)) { super.setOwner(path, owner, group); return; } if ((owner == null || owner.isEmpty()) && (group == null || group.isEmpty())) { throw new IllegalArgumentException("A valid owner or group must be specified."); } Path qualifiedPath = makeQualified(path); try { abfsStore.setOwner(qualifiedPath, owner, group, tracingContext); } catch (AzureBlobFileSystemException ex) { checkException(path, ex); } }
3.68
hadoop_StoreContext_createThrottledExecutor
/** * Create a new executor with the capacity defined in * {@link #executorCapacity}. * @return a new executor for exclusive use by the caller. */ public ExecutorService createThrottledExecutor() { return createThrottledExecutor(executorCapacity); }
3.68
framework_LayoutManager_getPaddingLeft
/** * Gets the left padding of the given element, provided that it has been * measured. These elements are guaranteed to be measured: * <ul> * <li>ManagedLayouts and their child Connectors * <li>Elements for which there is at least one ElementResizeListener * <li>Elements for which at least one ManagedLayout has registered a * dependency * </ul> * * A negative number is returned if the element has not been measured. If 0 * is returned, it might indicate that the element is not attached to the * DOM. * * @param element * the element to get the measured size for * @return the measured left padding of the element in pixels. */ public int getPaddingLeft(Element element) { assert needsMeasure( element) : "Getting measurement for element that is not measured"; return getMeasuredSize(element, nullSize).getPaddingLeft(); }
3.68
flink_FactoryUtil_createCatalogStoreFactoryHelper
/** * Creates a utility that helps validating options for a {@link CatalogStoreFactory}. * * <p>Note: This utility checks for left-over options in the final step. */ public static CatalogStoreFactoryHelper createCatalogStoreFactoryHelper( CatalogStoreFactory factory, CatalogStoreFactory.Context context) { return new CatalogStoreFactoryHelper(factory, context); }
3.68
hbase_StorageClusterStatusModel_getMaxHeapSizeMB
/** Returns the maximum heap size, in MB */ @XmlAttribute public int getMaxHeapSizeMB() { return maxHeapSizeMB; }
3.68
MagicPlugin_SpellResult_isFailure
/** * Determine if this result is a failure or not. * * <p>Note that a spell result can be neither failure nor * success. * * @return True if this cast was a failure. */ public boolean isFailure() { return failure; }
3.68
hadoop_AbstractManagedParentQueue_removeChildQueue
/** * Remove the specified child queue. * @param childQueueName name of the child queue to be removed * @return child queue. * @throws SchedulerDynamicEditException when removeChildQueue fails. */ public CSQueue removeChildQueue(String childQueueName) throws SchedulerDynamicEditException { CSQueue childQueue; writeLock.lock(); try { childQueue = queueContext.getQueueManager().getQueue(childQueueName); if (childQueue != null) { removeChildQueue(childQueue); } else { throw new SchedulerDynamicEditException("Cannot find queue to delete " + ": " + childQueueName); } } finally { writeLock.unlock(); } return childQueue; }
3.68
hadoop_RequestFactoryImpl_getBucket
/** * Get the target bucket. * @return the bucket. */ protected String getBucket() { return bucket; }
3.68
hbase_CatalogFamilyFormat_getServerNameColumn
/** * Returns the column qualifier for serialized region state * @param replicaId the replicaId of the region * @return a byte[] for sn column qualifier */ public static byte[] getServerNameColumn(int replicaId) { return replicaId == 0 ? HConstants.SERVERNAME_QUALIFIER : Bytes.toBytes(HConstants.SERVERNAME_QUALIFIER_STR + META_REPLICA_ID_DELIMITER + String.format(RegionInfo.REPLICA_ID_FORMAT, replicaId)); }
3.68
framework_AbstractJavaScriptExtension_addFunction
/** * Register a {@link JavaScriptFunction} that can be called from the * JavaScript using the provided name. A JavaScript function with the * provided name will be added to the connector wrapper object (initially * available as <code>this</code>). Calling that JavaScript function will * cause the call method in the registered {@link JavaScriptFunction} to be * invoked with the same arguments. * * @param functionName * the name that should be used for client-side callback * @param function * the {@link JavaScriptFunction} object that will be invoked * when the JavaScript function is called */ protected void addFunction(String functionName, JavaScriptFunction function) { callbackHelper.registerCallback(functionName, function); }
3.68
flink_MapView_entries
/** * Returns all entries of the map view. * * @return An iterable of all the key-value pairs in the map view. * @throws Exception Thrown if the system cannot access the map. */ public Iterable<Map.Entry<K, V>> entries() throws Exception { return map.entrySet(); }
3.68
framework_InMemoryDataProviderHelpers_propertyComparator
/** * Creates a comparator for the return type of the given * {@link ValueProvider}, sorted in the direction specified by the given * {@link SortDirection}. * * @param valueProvider * the value provider to use * @param sortDirection * the sort direction to use * @return the created comparator */ public static <V extends Comparable<? super V>, T> SerializableComparator<T> propertyComparator( ValueProvider<T, V> valueProvider, SortDirection sortDirection) { Objects.requireNonNull(valueProvider, "Value provider cannot be null"); Objects.requireNonNull(sortDirection, "Sort direction cannot be null"); Comparator<V> comparator = getNaturalSortComparator(sortDirection); return (a, b) -> comparator.compare(valueProvider.apply(a), valueProvider.apply(b)); }
3.68
hadoop_YarnRegistryViewForProviders_deleteChildren
/** * Delete the children of a path -but not the path itself. * It is not an error if the path does not exist * @param path path to delete * @param recursive flag to request recursive deletes * @throws IOException IO problems */ public void deleteChildren(String path, boolean recursive) throws IOException { List<String> childNames = null; try { childNames = registryOperations.list(path); } catch (PathNotFoundException e) { return; } for (String childName : childNames) { String child = join(path, childName); registryOperations.delete(child, recursive); } }
3.68
framework_UIDL_getFloatVariable
/** * Gets the value of the named variable. * * @param name * the name of the variable * @return the value of the variable */ public float getFloatVariable(String name) { return (float) var().getRawNumber(name); }
3.68
framework_UIConnector_scrollIntoView
/** * Tries to scroll the viewport so that the given connector is in view. * * @param componentConnector * The connector which should be visible * */ public void scrollIntoView(final ComponentConnector componentConnector) { if (componentConnector == null) { return; } Scheduler.get().scheduleDeferred(() -> componentConnector.getWidget() .getElement().scrollIntoView()); }
3.68
querydsl_BeanPath_createSet
/** * Create a new Set typed path * * @param <A> * @param property property name * @param type property type * @return property path */ @SuppressWarnings("unchecked") protected <A, E extends SimpleExpression<? super A>> SetPath<A, E> createSet(String property, Class<? super A> type, Class<? super E> queryType, PathInits inits) { return add(new SetPath<A, E>(type, (Class) queryType, forProperty(property), inits)); }
3.68
morf_UpdateStatement_getWhereCriterion
/** * Gets the where criteria. * * @return the where criteria */ public Criterion getWhereCriterion() { return whereCriterion; }
3.68
hbase_StorageClusterStatusModel_getRegion
/** * @param index the index * @return the region name */ public Region getRegion(int index) { return regions.get(index); }
3.68
hadoop_RemoteEditLogManifest_checkState
/** * Check that the logs are non-overlapping sequences of transactions, * in sorted order. They do not need to be contiguous. * @throws IllegalStateException if incorrect */ private void checkState() { Preconditions.checkNotNull(logs); RemoteEditLog prev = null; for (RemoteEditLog log : logs) { if (prev != null) { if (log.getStartTxId() <= prev.getEndTxId()) { throw new IllegalStateException( "Invalid log manifest (log " + log + " overlaps " + prev + ")\n" + this); } } prev = log; } }
3.68
flink_MurmurHashUtil_fmix
// Finalization mix - force all bits of a hash block to avalanche private static int fmix(int h1, int length) { h1 ^= length; return fmix(h1); }
3.68
hadoop_StringValueMin_getReport
/** * @return the string representation of the aggregated value */ public String getReport() { return minVal; }
3.68
hbase_HealthReport_getHealthReport
/** * Gets the health report of the region server. */ String getHealthReport() { return healthReport; }
3.68
dubbo_MetadataParamsFilter_instanceParamsExcluded
/** * params that need to be excluded before sending to registry center * * @return arrays of keys */ default String[] instanceParamsExcluded() { return new String[0]; }
3.68
hbase_RestoreTool_getTableInfoPath
/** * Returns value represent path for: * ""/$USER/SBACKUP_ROOT/backup_id/namespace/table/.hbase-snapshot/ * snapshot_1396650097621_namespace_table" this path contains .snapshotinfo, .tabledesc (0.96 and * 0.98) this path contains .snapshotinfo, .data.manifest (trunk) * @param tableName table name * @return path to table info * @throws IOException exception */ Path getTableInfoPath(TableName tableName) throws IOException { Path tableSnapShotPath = getTableSnapshotPath(backupRootPath, tableName, backupId); Path tableInfoPath = null; // can't build the path directly as the timestamp values are different FileStatus[] snapshots = fs.listStatus(tableSnapShotPath, new SnapshotDescriptionUtils.CompletedSnaphotDirectoriesFilter(fs)); for (FileStatus snapshot : snapshots) { tableInfoPath = snapshot.getPath(); // SnapshotManifest.DATA_MANIFEST_NAME = "data.manifest"; if (tableInfoPath.getName().endsWith("data.manifest")) { break; } } return tableInfoPath; }
3.68
framework_VCustomLayout_initImgElements
/** * Img elements needs some special handling in custom layout. Img elements * will get their onload events sunk. This way custom layout can notify * parent about possible size change. */ private void initImgElements() { NodeList<Element> nodeList = getElement().getElementsByTagName("IMG"); for (int i = 0; i < nodeList.getLength(); i++) { ImageElement img = ImageElement.as(nodeList.getItem(i)); DOM.sinkEvents(img, Event.ONLOAD); } }
3.68
flink_GenericDataSinkBase_getFormatWrapper
/** * Gets the class describing this sinks output format. * * @return The output format class. */ public UserCodeWrapper<? extends OutputFormat<IN>> getFormatWrapper() { return this.formatWrapper; }
3.68
hibernate-validator_TokenIterator_nextInterpolationTerm
/** * @return Returns the next interpolation term */ public String nextInterpolationTerm() { if ( !currentTokenAvailable ) { throw new IllegalStateException( "Trying to call #nextInterpolationTerm without calling #hasMoreInterpolationTerms" ); } currentTokenAvailable = false; return currentToken.getTokenValue(); }
3.68
hmily_NetUtils_getLocalIp
/** * Gets local ip. * * @return the local ip */ public static String getLocalIp() { if (localAddress == null) { synchronized (NetUtils.class) { if (localAddress == null) { try { localAddress = InetAddress.getLocalHost().getHostAddress(); } catch (UnknownHostException e) { localAddress = "0.0.0.0"; } } } } return localAddress; }
3.68
flink_OutputFormatBase_writeRecord
/** * Asynchronously write a record and deal with {@link OutputFormatBase#maxConcurrentRequests}. * To specify how a record is written, please override the {@link OutputFormatBase#send(Object)} * method. */ @Override public final void writeRecord(OUT record) throws IOException { checkAsyncErrors(); tryAcquire(1); final CompletionStage<V> completionStage; try { completionStage = send(record); } catch (Throwable e) { semaphore.release(); throw e; } completionStage.whenComplete( (result, throwable) -> { if (throwable == null) { callback.onSuccess(result); } else { callback.onFailure(throwable); } }); }
3.68
hbase_RegionPlan_getDestination
/** * Get the destination server for the plan for this region. * @return server info for destination */ public ServerName getDestination() { return dest; }
3.68
graphhopper_RestrictionConverter_convert
/** * OSM restriction relations specify turn restrictions between OSM ways (of course). This method converts such a * relation into a 'graph' representation, where the turn restrictions are specified in terms of edge/node IDs instead * of OSM IDs. * * @throws OSMRestrictionException if the given relation is either not valid in some way and/or cannot be handled and * shall be ignored */ public static Triple<ReaderRelation, GraphRestriction, RestrictionMembers> convert(ReaderRelation relation, BaseGraph baseGraph, LongFunction<Iterator<IntCursor>> edgesByWay) throws OSMRestrictionException { if (!isTurnRestriction(relation)) throw new IllegalArgumentException("expected a turn restriction: " + relation.getTags()); RestrictionMembers restrictionMembers = extractMembers(relation); if (!membersExist(restrictionMembers, edgesByWay, relation)) throw OSMRestrictionException.withoutWarning(); // every OSM way might be split into *multiple* edges, so now we need to figure out which edges are the ones // that are actually part of the given relation WayToEdgeConverter wayToEdgeConverter = new WayToEdgeConverter(baseGraph, edgesByWay); if (restrictionMembers.isViaWay()) { WayToEdgeConverter.EdgeResult res = wayToEdgeConverter .convertForViaWays(restrictionMembers.getFromWays(), restrictionMembers.getViaWays(), restrictionMembers.getToWays()); return new Triple<>(relation, GraphRestriction.way(res.getFromEdges(), res.getViaEdges(), res.getToEdges(), res.getNodes()), restrictionMembers); } else { int viaNode = relation.getTag("graphhopper:via_node", -1); if (viaNode < 0) throw new IllegalStateException("For some reason we did not set graphhopper:via_node for this relation: " + relation.getId()); WayToEdgeConverter.NodeResult res = wayToEdgeConverter .convertForViaNode(restrictionMembers.getFromWays(), viaNode, restrictionMembers.getToWays()); return new Triple<>(relation, GraphRestriction.node(res.getFromEdges(), viaNode, res.getToEdges()), restrictionMembers); } }
3.68
streampipes_StreamRequirementsBuilder_requiredPropertyWithUnaryMapping
/** * Sets a new property requirement and, in addition, adds a * {@link org.apache.streampipes.model.staticproperty.MappingPropertyUnary} static property to the pipeline element * definition. * * @param propertyRequirement The property requirement. * Use {@link org.apache.streampipes.sdk.helpers.EpRequirements} to * create a new requirement. * @param label The {@link org.apache.streampipes.sdk.helpers.Label} that defines the mapping property. * @param propertyScope The {@link org.apache.streampipes.model.schema.PropertyScope} of the requirement. * @return this */ public StreamRequirementsBuilder requiredPropertyWithUnaryMapping(EventProperty propertyRequirement, Label label, PropertyScope propertyScope) { propertyRequirement.setRuntimeName(label.getInternalId()); this.eventProperties.add(propertyRequirement); MappingPropertyUnary mp = new MappingPropertyUnary(label.getInternalId(), label .getInternalId(), label.getLabel(), label.getDescription()); mp.setPropertyScope(propertyScope.name()); this.mappingProperties.add(mp); return this; }
3.68
hbase_CleanerChore_shouldExclude
/** * Check if a path should not perform clear */ private boolean shouldExclude(FileStatus f) { if (!f.isDirectory()) { return false; } if (excludeDirs != null && !excludeDirs.isEmpty()) { for (String dirPart : excludeDirs) { // since we make excludeDirs end with '/', // if a path contains() the dirPart, the path should be excluded if (f.getPath().toString().contains(dirPart)) { return true; } } } return false; }
3.68
hadoop_ReadStatistics_getTotalZeroCopyBytesRead
/** * @return The total number of zero-copy bytes read. */ public synchronized long getTotalZeroCopyBytesRead() { return totalZeroCopyBytesRead; }
3.68
framework_VaadinSession_getConverterFactory
/** * Gets the {@code ConverterFactory} used to locate a suitable * {@code Converter} for fields in the session. * <p> * Note that the this and {@link #setConverterFactory(Object))} use Object * and not {@code ConverterFactory} in Vaadin 8 to avoid a core dependency * on the compatibility packages. * * @return The converter factory used in the session */ @Deprecated public Object getConverterFactory() { assert hasLock(); return converterFactory; }
3.68
hadoop_AuthenticationHandlerUtil_matchAuthScheme
/** * This method checks if the specified <code>authToken</code> belongs to the * specified HTTP authentication <code>scheme</code>. * * @param scheme HTTP authentication scheme to be checked * @param auth Authentication header value which is to be compared with the * authentication scheme. * @return true If the authentication header value corresponds to the * specified authentication scheme false Otherwise. */ public static boolean matchAuthScheme(String scheme, String auth) { if (scheme == null) { throw new NullPointerException(); } scheme = scheme.trim(); if (auth == null) { throw new NullPointerException(); } auth = auth.trim(); return auth.regionMatches(true, 0, scheme, 0, scheme.length()); }
3.68
hbase_Result_getFamilyMap
/** * Map of qualifiers to values. * <p> * Returns a Map of the form: <code>Map&lt;qualifier,value&gt;</code> * @param family column family to get * @return map of qualifiers to values */ public NavigableMap<byte[], byte[]> getFamilyMap(byte[] family) { if (this.familyMap == null) { getMap(); } if (isEmpty()) { return null; } NavigableMap<byte[], byte[]> returnMap = new TreeMap<>(Bytes.BYTES_COMPARATOR); NavigableMap<byte[], NavigableMap<Long, byte[]>> qualifierMap = familyMap.get(family); if (qualifierMap == null) { return returnMap; } for (Map.Entry<byte[], NavigableMap<Long, byte[]>> entry : qualifierMap.entrySet()) { byte[] value = entry.getValue().get(entry.getValue().firstKey()); returnMap.put(entry.getKey(), value); } return returnMap; }
3.68
hudi_HoodieInputFormatUtils_getTableMetaClientForBasePathUnchecked
/** * Extract HoodieTableMetaClient from a partition path (not base path) */ public static HoodieTableMetaClient getTableMetaClientForBasePathUnchecked(Configuration conf, Path partitionPath) throws IOException { Path baseDir = partitionPath; FileSystem fs = partitionPath.getFileSystem(conf); if (HoodiePartitionMetadata.hasPartitionMetadata(fs, partitionPath)) { HoodiePartitionMetadata metadata = new HoodiePartitionMetadata(fs, partitionPath); metadata.readFromFS(); int levels = metadata.getPartitionDepth(); baseDir = HoodieHiveUtils.getNthParent(partitionPath, levels); } else { for (int i = 0; i < partitionPath.depth(); i++) { if (fs.exists(new Path(baseDir, METAFOLDER_NAME))) { break; } else if (i == partitionPath.depth() - 1) { throw new TableNotFoundException(partitionPath.toString()); } else { baseDir = baseDir.getParent(); } } } LOG.info("Reading hoodie metadata from path " + baseDir.toString()); return HoodieTableMetaClient.builder().setConf(fs.getConf()).setBasePath(baseDir.toString()).build(); }
3.68
pulsar_NarClassLoader_getServiceDefinition
/** * Read a service definition as a String. */ public String getServiceDefinition(String serviceName) throws IOException { String serviceDefPath = narWorkingDirectory + "/META-INF/services/" + serviceName; return new String(Files.readAllBytes(Paths.get(serviceDefPath)), StandardCharsets.UTF_8); }
3.68
framework_VScrollTable_setFooterCell
/** * Set a footer cell for a specified column index. * * @param index * The index * @param cell * The footer cell */ public void setFooterCell(int index, FooterCell cell) { if (cell.isEnabled()) { // we're moving the cell DOM.removeChild(tr, cell.getElement()); orphan(cell); visibleCells.remove(cell); } if (index < visibleCells.size()) { // insert to right slot DOM.insertChild(tr, cell.getElement(), index); adopt(cell); visibleCells.add(index, cell); } else if (index == visibleCells.size()) { // simply append DOM.appendChild(tr, cell.getElement()); adopt(cell); visibleCells.add(cell); } else { throw new RuntimeException( "Header cells must be appended in order"); } }
3.68
framework_VTabsheetBase_setReadonly
/** * For internal use only. May be removed or replaced in the future. * * @param readonly * {@code true} if this widget should be read-only, {@code false} * otherwise */ public void setReadonly(boolean readonly) { this.readonly = readonly; }
3.68
hadoop_TimedHealthReporterService_serviceStop
/** * Method used to terminate the health monitoring service. */ @Override protected void serviceStop() throws Exception { if (timer != null) { timer.cancel(); } super.serviceStop(); }
3.68
hudi_CleanPlanActionExecutor_requestClean
/** * Creates a Cleaner plan if there are files to be cleaned and stores them in instant file. * Cleaner Plan contains absolute file paths. * * @param startCleanTime Cleaner Instant Time * @return Cleaner Plan if generated */ protected Option<HoodieCleanerPlan> requestClean(String startCleanTime) { final HoodieCleanerPlan cleanerPlan = requestClean(context); Option<HoodieCleanerPlan> option = Option.empty(); if (nonEmpty(cleanerPlan.getFilePathsToBeDeletedPerPartition()) && cleanerPlan.getFilePathsToBeDeletedPerPartition().values().stream().mapToInt(List::size).sum() > 0) { // Only create cleaner plan which does some work final HoodieInstant cleanInstant = new HoodieInstant(HoodieInstant.State.REQUESTED, HoodieTimeline.CLEAN_ACTION, startCleanTime); // Save to both aux and timeline folder try { table.getActiveTimeline().saveToCleanRequested(cleanInstant, TimelineMetadataUtils.serializeCleanerPlan(cleanerPlan)); LOG.info("Requesting Cleaning with instant time " + cleanInstant); } catch (IOException e) { LOG.error("Got exception when saving cleaner requested file", e); throw new HoodieIOException(e.getMessage(), e); } option = Option.of(cleanerPlan); } return option; }
3.68
aws-saas-boost_RefreshingProfileDefaultCredentialsProvider_resolveCredentials
/** * @see AwsCredentialsProvider#resolveCredentials() */ @Override public AwsCredentials resolveCredentials() { if (profileFilename == null) { return curriedBuilder.build().resolveCredentials(); } curriedBuilder.profileFile(ProfileFile.builder() .type(ProfileFile.Type.CREDENTIALS) .content(Path.of(new File(profileFilename).toURI())) .build()); return curriedBuilder.build().resolveCredentials(); }
3.68
hbase_BackupInfo_getProgress
/** * Get current progress */ public int getProgress() { return progress; }
3.68
hadoop_S3AReadOpContext_getPrefetchBlockSize
/** * Gets the size in bytes of a single prefetch block. * * @return the size in bytes of a single prefetch block. */ public int getPrefetchBlockSize() { return this.prefetchBlockSize; }
3.68
framework_VCalendarPanel_onChange
/* * (non-Javadoc) VT * * @see * com.google.gwt.event.dom.client.ChangeHandler#onChange(com.google.gwt * .event.dom.client.ChangeEvent) */ @Override public void onChange(ChangeEvent event) { /* * Value from dropdowns gets always set for the value. Like year and * month when resolution is month or year. */ if (event.getSource() == hours) { int h = hours.getSelectedIndex(); if (getDateTimeService().isTwelveHourClock()) { h = h + ampm.getSelectedIndex() * 12; } value.setHours(h); if (timeChangeListener != null) { timeChangeListener.changed(h, value.getMinutes(), value.getSeconds(), DateTimeService.getMilliseconds(value)); } event.preventDefault(); event.stopPropagation(); } else if (event.getSource() == mins) { final int m = mins.getSelectedIndex(); value.setMinutes(m); if (timeChangeListener != null) { timeChangeListener.changed(value.getHours(), m, value.getSeconds(), DateTimeService.getMilliseconds(value)); } event.preventDefault(); event.stopPropagation(); } else if (event.getSource() == sec) { final int s = sec.getSelectedIndex(); value.setSeconds(s); if (timeChangeListener != null) { timeChangeListener.changed(value.getHours(), value.getMinutes(), s, DateTimeService.getMilliseconds(value)); } event.preventDefault(); event.stopPropagation(); } else if (event.getSource() == ampm) { final int h = hours.getSelectedIndex() + (ampm.getSelectedIndex() * 12); value.setHours(h); if (timeChangeListener != null) { timeChangeListener.changed(h, value.getMinutes(), value.getSeconds(), DateTimeService.getMilliseconds(value)); } event.preventDefault(); event.stopPropagation(); } }
3.68
flink_StreamRecord_replace
/** * Replace the currently stored value by the given new value and the currently stored timestamp * with the new timestamp. This returns a StreamElement with the generic type parameter that * matches the new value. * * @param value The new value to wrap in this StreamRecord * @param timestamp The new timestamp in milliseconds * @return Returns the StreamElement with replaced value */ @SuppressWarnings("unchecked") public <X> StreamRecord<X> replace(X value, long timestamp) { this.timestamp = timestamp; this.value = (T) value; this.hasTimestamp = true; return (StreamRecord<X>) this; }
3.68
hudi_FlinkMergeHandle_newFileNameWithRollover
/** * Use the writeToken + "-" + rollNumber as the new writeToken of a mini-batch write. */ protected String newFileNameWithRollover(int rollNumber) { return FSUtils.makeBaseFileName(instantTime, writeToken + "-" + rollNumber, this.fileId, hoodieTable.getBaseFileExtension()); }
3.68
framework_AbstractMultiSelect_setValue
/** * Sets the value of this object which is a set of items to select. If the * new value is not equal to {@code getValue()}, fires a value change event. * May throw {@code IllegalArgumentException} if the value is not * acceptable. * <p> * The method effectively selects the given items and deselects previously * selected. The call is delegated to * {@link Multi#updateSelection(Set, Set)}. * * @see Multi#updateSelection(Set, Set) * * @param value * the items to select, not {@code null} * @throws NullPointerException * if the value is invalid */ @Override public void setValue(Set<T> value) { Objects.requireNonNull(value); Set<T> copy = value.stream().map(Objects::requireNonNull) .collect(Collectors.toCollection(LinkedHashSet::new)); updateSelection(copy, new LinkedHashSet<>(getSelectedItems())); }
3.68
zxing_IntentResult_getContents
/** * @return raw content of barcode */ public String getContents() { return contents; }
3.68
flink_ConnectedStreams_keyBy
/** * KeyBy operation for connected data stream. Assigns keys to the elements of input1 and input2 * using keySelector1 and keySelector2 with explicit type information for the common key type. * * @param keySelector1 The {@link KeySelector} used for grouping the first input * @param keySelector2 The {@link KeySelector} used for grouping the second input * @param keyType The type information of the common key type. * @return The partitioned {@link ConnectedStreams} */ public <KEY> ConnectedStreams<IN1, IN2> keyBy( KeySelector<IN1, KEY> keySelector1, KeySelector<IN2, KEY> keySelector2, TypeInformation<KEY> keyType) { return new ConnectedStreams<>( environment, inputStream1.keyBy(keySelector1, keyType), inputStream2.keyBy(keySelector2, keyType)); }
3.68
framework_FieldGroup_removeCommitHandler
/** * Removes the given commit handler. * * @see #addCommitHandler(CommitHandler) * * @param commitHandler * The commit handler to remove */ public void removeCommitHandler(CommitHandler commitHandler) { commitHandlers.remove(commitHandler); }
3.68
flink_DeltaIterationBase_setSolutionSetDelta
/** * Sets the contract of the step function that represents the solution set delta. This contract * is considered one of the two sinks of the step function (the other one being the next * workset). * * @param delta The contract representing the solution set delta. */ public void setSolutionSetDelta(Operator delta) { this.solutionSetDelta = delta; }
3.68
flink_FileStateHandle_discardState
/** * Discard the state by deleting the file that stores the state. If the parent directory of the * state is empty after deleting the state file, it is also deleted. * * @throws Exception Thrown, if the file deletion (not the directory deletion) fails. */ @Override public void discardState() throws Exception { final FileSystem fs = getFileSystem(); IOException actualException = null; boolean success = true; try { success = fs.delete(filePath, false); } catch (IOException e) { actualException = e; } if (!success || actualException != null) { if (fs.exists(filePath)) { throw Optional.ofNullable(actualException) .orElse( new IOException( "Unknown error caused the file '" + filePath + "' to not be deleted.")); } } }
3.68
hadoop_MawoConfiguration_getZKSessionTimeoutMS
/** * Get ZooKeeper session timeout in milli seconds. * @return value of ZooKeeper.session.timeout.ms */ public int getZKSessionTimeoutMS() { return Integer.parseInt(configsMap.get(ZK_SESSION_TIMEOUT_MS)); }
3.68
flink_CoGroupedStreams_window
/** Specifies the window on which the co-group operation works. */ @PublicEvolving public <W extends Window> WithWindow<T1, T2, KEY, W> window( WindowAssigner<? super TaggedUnion<T1, T2>, W> assigner) { return new WithWindow<>( input1, input2, keySelector1, keySelector2, keyType, assigner, null, null, null); }
3.68
hbase_Permission_implies
/** * check if given action is granted * @param action action to be checked * @return true if granted, false otherwise */ public boolean implies(Action action) { return actions.contains(action); }
3.68
framework_DataCommunicator_getDataProviderSize
/** * Getter method for finding the size of DataProvider. Can be overridden by * a subclass that uses a specific type of DataProvider and/or query. * * @return the size of data provider with current filter */ @SuppressWarnings({ "unchecked", "rawtypes" }) public int getDataProviderSize() { return getDataProvider().size(new Query(getFilter())); }
3.68
flink_SingleInputNode_setIncomingConnection
/** * Sets the connection through which this node receives its input. * * @param inConn The input connection to set. */ public void setIncomingConnection(DagConnection inConn) { this.inConn = inConn; }
3.68
rocketmq-connect_AbstractConfigManagementService_processDeleteConnectorRecord
/** * process deleted * * @param connectorName * @param schemaAndValue */ private void processDeleteConnectorRecord(String connectorName, SchemaAndValue schemaAndValue) { if (!connectorKeyValueStore.containsKey(connectorName)) { return; } Struct value = (Struct) schemaAndValue.value(); Object epoch = value.get(FIELD_EPOCH); // validate ConnectKeyValue oldConfig = connectorKeyValueStore.get(connectorName); // config update if ((Long) epoch > oldConfig.getEpoch()) { // remove connectorKeyValueStore.remove(connectorName); taskKeyValueStore.remove(connectorName); // reblance triggerListener(); } }
3.68
pulsar_BaseContext_getPulsarClient
/** * Get the pre-configured pulsar client. * * You can use this client to access Pulsar cluster. * The Function will be responsible for disposing this client. * * @return the instance of pulsar client */ default PulsarClient getPulsarClient() { throw new UnsupportedOperationException("not implemented"); }
3.68
graphhopper_State_getOutgoingVirtualEdge
/** * Returns the virtual edge that should be used by outgoing paths. * * @throws IllegalStateException if this State is not directed. */ public EdgeIteratorState getOutgoingVirtualEdge() { if (!isDirected) { throw new IllegalStateException( "This method may only be called for directed GPXExtensions"); } return outgoingVirtualEdge; }
3.68
flink_ExecutionEnvironment_readCsvFile
/** * Creates a CSV reader to read a comma separated value (CSV) file. The reader has options to * define parameters and field types and will eventually produce the DataSet that corresponds to * the read and parsed CSV input. * * @param filePath The path of the CSV file. * @return A CsvReader that can be used to configure the CSV input. */ public CsvReader readCsvFile(String filePath) { return new CsvReader(filePath, this); }
3.68
hadoop_StoragePolicySatisfyManager_clearPathIds
/** * Removes the SPS path id from the list of sps paths. * * @throws IOException */ private void clearPathIds(){ synchronized (pathsToBeTraversed) { Iterator<Long> iterator = pathsToBeTraversed.iterator(); while (iterator.hasNext()) { Long trackId = iterator.next(); try { namesystem.removeXattr(trackId, HdfsServerConstants.XATTR_SATISFY_STORAGE_POLICY); } catch (IOException e) { LOG.debug("Failed to remove sps xattr!", e); } iterator.remove(); } } }
3.68
querydsl_Expressions_stringTemplate
/** * Create a new Template expression * * @param template template * @param args template parameters * @return template expression */ public static StringTemplate stringTemplate(Template template, List<?> args) { return new StringTemplate(template, args); }
3.68
hadoop_FileIoProvider_listFiles
/** * Get a listing of the given directory using * {@link FileUtil#listFiles(File)}. * * @param volume target volume. null if unavailable. * @param dir Directory to be listed. * @return array of file objects representing the directory entries. * @throws IOException */ public File[] listFiles( @Nullable FsVolumeSpi volume, File dir) throws IOException { final long begin = profilingEventHook.beforeMetadataOp(volume, LIST); try { faultInjectorEventHook.beforeMetadataOp(volume, LIST); File[] children = FileUtil.listFiles(dir); profilingEventHook.afterMetadataOp(volume, LIST, begin); return children; } catch(Exception e) { onFailure(volume, begin); throw e; } }
3.68
hudi_HoodieCombineHiveInputFormat_getNumPaths
/** * Returns the number of Paths in the split. */ @Override public int getNumPaths() { return inputSplitShim.getNumPaths(); }
3.68
framework_FocusableComplexPanel_addFocusHandler
/* * (non-Javadoc) * * @see * com.google.gwt.event.dom.client.HasFocusHandlers#addFocusHandler(com. * google.gwt.event.dom.client.FocusHandler) */ @Override public HandlerRegistration addFocusHandler(FocusHandler handler) { return addDomHandler(handler, FocusEvent.getType()); }
3.68
hmily_PropertyKey_isAvailable
/** * Is available boolean. * * @param name the name * @return the boolean */ public boolean isAvailable(final PropertyName name) { return propertyName.equals(name); }
3.68