name
stringlengths
12
178
code_snippet
stringlengths
8
36.5k
score
float64
3.26
3.68
hadoop_BlockManagerParameters_withConf
/** * Sets the configuration object. * * @param configuration The configuration object. * @return The builder. */ public BlockManagerParameters withConf( final Configuration configuration) { this.conf = configuration; return this; }
3.68
hudi_FlinkCreateHandle_newFilePathWithRollover
/** * Use the writeToken + "-" + rollNumber as the new writeToken of a mini-batch write. */ private Path newFilePathWithRollover(int rollNumber) { final String dataFileName = FSUtils.makeBaseFileName(instantTime, writeToken + "-" + rollNumber, fileId, hoodieTable.getBaseFileExtension()); return makeNewFilePath(partitionPath, dataFileName); }
3.68
hbase_TableResource_exists
/** Returns true if the table exists n */ boolean exists() throws IOException { return servlet.getAdmin().tableExists(TableName.valueOf(table)); }
3.68
morf_MathsField_plus
/** * Provides the plus operation for SQL. * @param leftField left addendum * @param rightField right addendum * @return The function representing the sum */ public static MathsField plus(AliasedField leftField, AliasedField rightField ) { AliasedField rightOperand = rightField instanceof MathsField ? bracket((MathsField)rightField) : rightField; return new MathsField(leftField, MathsOperator.PLUS, rightOperand); }
3.68
hadoop_ReadBufferManager_purgeList
/** * Method to remove buffers associated with a {@link AbfsInputStream} * when its close method is called. * NOTE: This method is not threadsafe and must be called inside a * synchronised block. See caller. * @param stream associated input stream. * @param list list of buffers like {@link this#completedReadList} * or {@link this#inProgressList}. */ private void purgeList(AbfsInputStream stream, LinkedList<ReadBuffer> list) { for (Iterator<ReadBuffer> it = list.iterator(); it.hasNext();) { ReadBuffer readBuffer = it.next(); if (readBuffer.getStream() == stream) { it.remove(); // As failed ReadBuffers (bufferIndex = -1) are already pushed to free // list in doneReading method, we will skip adding those here again. if (readBuffer.getBufferindex() != -1) { freeList.push(readBuffer.getBufferindex()); } } } }
3.68
hbase_OpenRegionHandler_updateMeta
/** * Update ZK or META. This can take a while if for example the hbase:meta is not available -- if * server hosting hbase:meta crashed and we are waiting on it to come back -- so run in a thread * and keep updating znode state meantime so master doesn't timeout our region-in-transition. * Caller must cleanup region if this fails. */ private boolean updateMeta(final HRegion r, long masterSystemTime) { if (this.server.isStopped() || this.rsServices.isStopping()) { return false; } // Object we do wait/notify on. Make it boolean. If set, we're done. // Else, wait. final AtomicBoolean signaller = new AtomicBoolean(false); PostOpenDeployTasksThread t = new PostOpenDeployTasksThread(r, this.server, this.rsServices, signaller, masterSystemTime); t.start(); // Post open deploy task: // meta => update meta location in ZK // other region => update meta while ( !signaller.get() && t.isAlive() && !this.server.isStopped() && !this.rsServices.isStopping() && isRegionStillOpening() ) { synchronized (signaller) { try { // Wait for 10 seconds, so that server shutdown // won't take too long if this thread happens to run. if (!signaller.get()) signaller.wait(10000); } catch (InterruptedException e) { // Go to the loop check. } } } // Is thread still alive? We may have left above loop because server is // stopping or we timed out the edit. Is so, interrupt it. if (t.isAlive()) { if (!signaller.get()) { // Thread still running; interrupt LOG.debug("Interrupting thread " + t); t.interrupt(); } try { t.join(); } catch (InterruptedException ie) { LOG.warn("Interrupted joining " + r.getRegionInfo().getRegionNameAsString(), ie); Thread.currentThread().interrupt(); } } // Was there an exception opening the region? This should trigger on // InterruptedException too. If so, we failed. return (!Thread.interrupted() && t.getException() == null); }
3.68
hbase_ImmutableBytesWritable_getOffset
/** Return the offset into the buffer. */ public int getOffset() { return this.offset; }
3.68
flink_LongValue_equals
/* * (non-Javadoc) * @see java.lang.Object#equals(java.lang.Object) */ @Override public boolean equals(final Object obj) { if (obj instanceof LongValue) { return ((LongValue) obj).value == this.value; } return false; }
3.68
hibernate-validator_DomainNameUtil_isValidDomainAddress
/** * Checks validity of a domain name. * * @param domain the domain to check for validity * @return {@code true} if the provided string is a valid domain, {@code false} otherwise */ public static boolean isValidDomainAddress(String domain) { return isValidDomainAddress( domain, DOMAIN_PATTERN ); }
3.68
flink_TimeWindow_getEnd
/** * Gets the end timestamp of this window. The end timestamp is exclusive, meaning it is the * first timestamp that does not belong to this window any more. * * @return The exclusive end timestamp of this window. */ public long getEnd() { return end; }
3.68
hbase_QuotaSettingsFactory_limitNamespaceSpace
/** * Creates a {@link QuotaSettings} object to limit the FileSystem space usage for the given * namespace to the given size in bytes. When the space usage is exceeded by all tables in the * namespace, the provided {@link SpaceViolationPolicy} is enacted on all tables in the namespace. * @param namespace The namespace on which the quota should be applied. * @param sizeLimit The limit of the namespace's size in bytes. * @param violationPolicy The action to take when the the quota is exceeded. * @return An {@link QuotaSettings} object. */ public static QuotaSettings limitNamespaceSpace(final String namespace, long sizeLimit, final SpaceViolationPolicy violationPolicy) { return new SpaceLimitSettings(namespace, sizeLimit, violationPolicy); }
3.68
shardingsphere-elasticjob_TracingStorageDatabaseType_getDatabaseProductName
/** * Get database product name. * * @return database product name */ default String getDatabaseProductName() { return getType(); }
3.68
framework_KeyMapper_get
/** * Retrieves object with the key. * * @param key * the name with the desired value. * @return the object with the key. */ @Override public V get(String key) { return keyObjectMap.get(key); }
3.68
hudi_ClusteringCommand_scheduleClustering
/** * Schedule clustering table service. * <p> * Example: * > connect --path {path to hudi table} * > clustering schedule --sparkMaster local --sparkMemory 2g */ @ShellMethod(key = "clustering schedule", value = "Schedule Clustering") public String scheduleClustering( @ShellOption(value = "--sparkMaster", defaultValue = SparkUtil.DEFAULT_SPARK_MASTER, help = "Spark master") final String master, @ShellOption(value = "--sparkMemory", defaultValue = "1g", help = "Spark executor memory") final String sparkMemory, @ShellOption(value = "--propsFilePath", help = "path to properties file on localfs or dfs with configurations " + "for hoodie client for clustering", defaultValue = "") final String propsFilePath, @ShellOption(value = "--hoodieConfigs", help = "Any configuration that can be set in the properties file can " + "be passed here in the form of an array", defaultValue = "") final String[] configs) throws Exception { HoodieTableMetaClient client = HoodieCLI.getTableMetaClient(); boolean initialized = HoodieCLI.initConf(); HoodieCLI.initFS(initialized); String sparkPropertiesPath = Utils.getDefaultPropertiesFile(JavaConverters.mapAsScalaMapConverter(System.getenv()).asScala()); SparkLauncher sparkLauncher = SparkUtil.initLauncher(sparkPropertiesPath); // First get a clustering instant time and pass it to spark launcher for scheduling clustering String clusteringInstantTime = client.createNewInstantTime(); sparkLauncher.addAppArgs(SparkCommand.CLUSTERING_SCHEDULE.toString(), master, sparkMemory, client.getBasePath(), client.getTableConfig().getTableName(), clusteringInstantTime, propsFilePath); UtilHelpers.validateAndAddProperties(configs, sparkLauncher); Process process = sparkLauncher.launch(); InputStreamConsumer.captureOutput(process); int exitCode = process.waitFor(); if (exitCode != 0) { return "Failed to schedule clustering for " + clusteringInstantTime; } return "Succeeded to schedule clustering for " + clusteringInstantTime; }
3.68
hbase_QuotaUtil_calculateMutationSize
/* * ========================================================================= Data Size Helpers */ public static long calculateMutationSize(final Mutation mutation) { long size = 0; for (Map.Entry<byte[], List<Cell>> entry : mutation.getFamilyCellMap().entrySet()) { for (Cell cell : entry.getValue()) { size += cell.getSerializedSize(); } } return size; }
3.68
rocketmq-connect_MemoryStateManagementServiceImpl_initialize
/** * initialize cb config * * @param config */ @Override public void initialize(WorkerConfig config, RecordConverter converter) { this.tasks = new Table<>(); this.connectors = new ConcurrentHashMap<>(); }
3.68
hadoop_OBSFileSystem_getObsClient
/** * Return the OBS client used by this filesystem. * * @return OBS client */ @VisibleForTesting ObsClient getObsClient() { return obs; }
3.68
hmily_HmilyRepositoryFacade_releaseHmilyLocks
/** * Release hmily locks. * * @param locks locks */ public void releaseHmilyLocks(final Collection<HmilyLock> locks) { checkRows(hmilyRepository.releaseHmilyLocks(locks), locks.size()); }
3.68
flink_LeaderInformationRegister_hasLeaderInformation
/** * Checks whether the register holds non-empty {@link LeaderInformation} for the passed {@code * componentId}. */ public boolean hasLeaderInformation(String componentId) { return leaderInformationPerComponentId.containsKey(componentId); }
3.68
hadoop_CacheStats_getPageSize
/** * Get the OS page size. * * @return the OS page size. */ long getPageSize() { return usedBytesCount.rounder.osPageSize; }
3.68
hadoop_RollingFileSystemSink_rollLogDir
/** * Create a new directory based on the current interval and a new log file in * that directory. * * @throws IOException thrown if an error occurs while creating the * new directory or new log file */ private void rollLogDir() throws IOException { String fileName = source + "-" + InetAddress.getLocalHost().getHostName() + ".log"; Path targetFile = new Path(currentDirPath, fileName); fileSystem.mkdirs(currentDirPath); if (allowAppend) { createOrAppendLogFile(targetFile); } else { createLogFile(targetFile); } }
3.68
flink_Path_fromLocalFile
/** * Creates a path for the given local file. * * <p>This method is useful to make sure the path creation for local files works seamlessly * across different operating systems. Especially Windows has slightly different rules for * slashes between schema and a local file path, making it sometimes tricky to produce * cross-platform URIs for local files. * * @param file The file that the path should represent. * @return A path representing the local file URI of the given file. */ public static Path fromLocalFile(File file) { return new Path(file.toURI()); }
3.68
dubbo_ApolloDynamicConfiguration_createTargetListener
/** * Ignores the group parameter. * * @param key property key the native listener will listen on * @param group to distinguish different set of properties * @return */ private ApolloListener createTargetListener(String key, String group) { return new ApolloListener(); }
3.68
morf_SqlDialect_selectStatementPostStatementDirectives
/** * Returns any SQL code which should be added at the end of a statement for dialect-specific reasons. * * @param selectStatement The select statement * @return Any hint code required. */ protected String selectStatementPostStatementDirectives(@SuppressWarnings("unused") SelectStatement selectStatement) { return StringUtils.EMPTY; }
3.68
graphhopper_OSMReader_isBarrierNode
/** * @return true if the given node should be duplicated to create an artificial edge. If the node turns out to be a * junction between different ways this will be ignored and no artificial edge will be created. */ protected boolean isBarrierNode(ReaderNode node) { return node.hasTag("barrier") || node.hasTag("ford"); }
3.68
hadoop_DataNodeVolumeMetrics_getFileIoErrorSampleCount
// Based on fileIoErrorRate public long getFileIoErrorSampleCount() { return fileIoErrorRate.lastStat().numSamples(); }
3.68
pulsar_ManagedLedgerConfig_getMetadataWriteQuorumSize
/** * @return the metadataWriteQuorumSize */ public int getMetadataWriteQuorumSize() { return metadataWriteQuorumSize; }
3.68
flink_SortedGrouping_sortGroup
/** * Sorts {@link org.apache.flink.api.java.tuple.Tuple} or POJO elements within a group on the * specified field in the specified {@link Order}. * * <p><b>Note: Only groups of Tuple or Pojo elements can be sorted.</b> * * <p>Groups can be sorted by multiple fields by chaining {@link #sortGroup(String, Order)} * calls. * * @param field The Tuple or Pojo field on which the group is sorted. * @param order The Order in which the specified field is sorted. * @return A SortedGrouping with specified order of group element. * @see org.apache.flink.api.java.tuple.Tuple * @see Order */ public SortedGrouping<T> sortGroup(String field, Order order) { if (groupSortSelectorFunctionKey != null) { throw new InvalidProgramException( "Chaining sortGroup with KeySelector sorting is not supported"); } if (!Keys.ExpressionKeys.isSortKey(field, inputDataSet.getType())) { throw new InvalidProgramException("Selected sort key is not a sortable type"); } ExpressionKeys<T> ek = new ExpressionKeys<>(field, inputDataSet.getType()); addSortGroupInternal(ek, order); return this; }
3.68
framework_Page_getState
/** * Returns the page state. * <p> * The page state is transmitted to UIConnector together with * {@link UIState} rather than as an individual entity. * </p> * <p> * The state should be considered an internal detail of Page. Classes * outside of Page should not access it directly but only through public * APIs provided by Page. * </p> * * @since 7.1 * @param markAsDirty * true to mark the state as dirty * @return PageState object that can be read in any case and modified if * markAsDirty is true */ protected PageState getState(boolean markAsDirty) { if (markAsDirty) { uI.markAsDirty(); } return state; }
3.68
hbase_MetricsHeapMemoryManager_updateBlockedFlushCount
/** * Update/Set the blocked flush count histogram/gauge * @param blockedFlushCount the number of blocked memstore flush since last tuning. */ public void updateBlockedFlushCount(final long blockedFlushCount) { source.updateBlockedFlushCount(blockedFlushCount); }
3.68
hadoop_RehashPartitioner_getPartition
/** Rehash {@link Object#hashCode()} to partition. */ public int getPartition(K key, V value, int numReduceTasks) { int h = SEED ^ key.hashCode(); h ^= (h >>> 20) ^ (h >>> 12); h = h ^ (h >>> 7) ^ (h >>> 4); return (h & Integer.MAX_VALUE) % numReduceTasks; }
3.68
hbase_KeyValueUtil_toNewKeyCell
/** * Copies the key to a new KeyValue * @return the KeyValue that consists only the key part of the incoming cell */ public static KeyValue toNewKeyCell(final Cell cell) { byte[] bytes = new byte[keyLength(cell)]; appendKeyTo(cell, bytes, 0); KeyValue kv = new KeyValue.KeyOnlyKeyValue(bytes, 0, bytes.length); // Set the seq id. The new key cell could be used in comparisons so it // is important that it uses the seqid also. If not the comparsion would fail kv.setSequenceId(cell.getSequenceId()); return kv; }
3.68
framework_AbstractSplitPanel_setMaxSplitPosition
/** * Sets the maximum split position to the given position and unit. If the * split position is reversed, maximum and minimum are also reversed. * * @param pos * the maximum position of the split * @param unit * the unit (from {@link Sizeable}) in which the size is given. * Allowed units are UNITS_PERCENTAGE and UNITS_PIXELS */ public void setMaxSplitPosition(float pos, Unit unit) { setSplitPositionLimits(getSplitterState(false).minPosition, posMinUnit, pos, unit); }
3.68
hudi_MarkerHandler_getPendingMarkersToProcess
/** * @param markerDir marker directory path. * @return Pending markers from the requests to process. */ public Set<String> getPendingMarkersToProcess(String markerDir) { if (markerDirStateMap.containsKey(markerDir)) { MarkerDirState markerDirState = getMarkerDirState(markerDir); return markerDirState.getPendingMarkerCreationRequests(false).stream() .map(MarkerCreationFuture::getMarkerName).collect(Collectors.toSet()); } return Collections.emptySet(); }
3.68
hbase_HRegion_getMetrics
// TODO Needs to check whether we should expose our metrics system to CPs. If CPs themselves doing // the op and bypassing the core, this might be needed? Should be stop supporting the bypass // feature? public MetricsRegion getMetrics() { return metricsRegion; }
3.68
hbase_Scan_getLimit
/** Returns the limit of rows for this scan */ public int getLimit() { return limit; }
3.68
AreaShop_FileManager_saveWorldGuardRegions
/** * Save all worldGuard regions that need saving. */ public void saveWorldGuardRegions() { for(String world : worldRegionsRequireSaving) { World bukkitWorld = Bukkit.getWorld(world); if(bukkitWorld != null) { RegionManager manager = plugin.getRegionManager(bukkitWorld); if(manager != null) { try { if(plugin.getWorldGuard().getDescription().getVersion().startsWith("5.")) { manager.save(); } else { manager.saveChanges(); } } catch(Exception e) { AreaShop.warn("WorldGuard regions in world " + world + " could not be saved"); } } } } }
3.68
framework_Notification_getCaption
/** * Gets the caption part of the notification message. * * @return The message caption */ public String getCaption() { return getState(false).caption; }
3.68
framework_ErrorEvent_findErrorHandler
/** * Method for finding the error handler for the given session. * * @param session * The target session * * @return An ErrorHandler for the session or null if none was found */ public static ErrorHandler findErrorHandler(VaadinSession session) { if (session == null) { return null; } return session.getErrorHandler(); }
3.68
framework_VScrollTable_handleNavigation
/** * Handles the keyboard events handled by the table. * * @param keycode * The key code received * @param ctrl * Whether {@code CTRL} was pressed * @param shift * Whether {@code SHIFT} was pressed * @return true if the navigation event was handled */ protected boolean handleNavigation(int keycode, boolean ctrl, boolean shift) { if (keycode == KeyCodes.KEY_TAB || keycode == KeyCodes.KEY_SHIFT) { // Do not handle tab key return false; } // Down navigation if (!isSelectable() && keycode == getNavigationDownKey()) { scrollBodyPanel.setScrollPosition( scrollBodyPanel.getScrollPosition() + scrollingVelocity); return true; } else if (keycode == getNavigationDownKey()) { if (isMultiSelectModeAny() && moveFocusDown()) { selectFocusedRow(ctrl, shift); } else if (isSingleSelectMode() && !shift && moveFocusDown()) { selectFocusedRow(ctrl, shift); } return true; } // Up navigation if (!isSelectable() && keycode == getNavigationUpKey()) { scrollBodyPanel.setScrollPosition( scrollBodyPanel.getScrollPosition() - scrollingVelocity); return true; } else if (keycode == getNavigationUpKey()) { if (isMultiSelectModeAny() && moveFocusUp()) { selectFocusedRow(ctrl, shift); } else if (isSingleSelectMode() && !shift && moveFocusUp()) { selectFocusedRow(ctrl, shift); } return true; } if (keycode == getNavigationLeftKey()) { // Left navigation scrollBodyPanel.setHorizontalScrollPosition( scrollBodyPanel.getHorizontalScrollPosition() - scrollingVelocity); return true; } else if (keycode == getNavigationRightKey()) { // Right navigation scrollBodyPanel.setHorizontalScrollPosition( scrollBodyPanel.getHorizontalScrollPosition() + scrollingVelocity); return true; } // Select navigation if (isSelectable() && keycode == getNavigationSelectKey()) { if (isSingleSelectMode()) { boolean wasSelected = focusedRow.isSelected(); deselectAll(); if (!wasSelected || !nullSelectionAllowed) { focusedRow.toggleSelection(); } } else { focusedRow.toggleSelection(); removeRowFromUnsentSelectionRanges(focusedRow); } sendSelectedRows(); return true; } // Page Down navigation if (keycode == getNavigationPageDownKey()) { if (isSelectable()) { /* * If selectable we plagiate MSW behavior: first scroll to the * end of current view. If at the end, scroll down one page * length and keep the selected row in the bottom part of * visible area. */ if (!isFocusAtTheEndOfTable()) { VScrollTableRow lastVisibleRowInViewPort = scrollBody .getRowByRowIndex(firstRowInViewPort + getFullyVisibleRowCount() - 1); if (lastVisibleRowInViewPort != null && lastVisibleRowInViewPort != focusedRow) { // focused row is not at the end of the table, move // focus and select the last visible row setRowFocus(lastVisibleRowInViewPort); selectFocusedRow(ctrl, shift); updateFirstVisibleAndSendSelectedRows(); } else { int indexOfToBeFocused = focusedRow.getIndex() + getFullyVisibleRowCount(); if (indexOfToBeFocused >= totalRows) { indexOfToBeFocused = totalRows - 1; } VScrollTableRow toBeFocusedRow = scrollBody .getRowByRowIndex(indexOfToBeFocused); if (toBeFocusedRow != null) { /* * if the next focused row is rendered */ setRowFocus(toBeFocusedRow); selectFocusedRow(ctrl, shift); // TODO needs scrollintoview ? updateFirstVisibleAndSendSelectedRows(); } else { // scroll down by pixels and return, to wait for // new rows, then select the last item in the // viewport selectLastItemInNextRender = true; multiselectPending = shift; scrollByPagelength(1); } } } } else { /* No selections, go page down by scrolling */ scrollByPagelength(1); } return true; } // Page Up navigation if (keycode == getNavigationPageUpKey()) { if (isSelectable()) { /* * If selectable we plagiate MSW behavior: first scroll to the * end of current view. If at the end, scroll down one page * length and keep the selected row in the bottom part of * visible area. */ if (!isFocusAtTheBeginningOfTable()) { VScrollTableRow firstVisibleRowInViewPort = scrollBody .getRowByRowIndex(firstRowInViewPort); if (firstVisibleRowInViewPort != null && firstVisibleRowInViewPort != focusedRow) { // focus is not at the beginning of the table, move // focus and select the first visible row setRowFocus(firstVisibleRowInViewPort); selectFocusedRow(ctrl, shift); updateFirstVisibleAndSendSelectedRows(); } else { int indexOfToBeFocused = focusedRow.getIndex() - getFullyVisibleRowCount(); if (indexOfToBeFocused < 0) { indexOfToBeFocused = 0; } VScrollTableRow toBeFocusedRow = scrollBody .getRowByRowIndex(indexOfToBeFocused); if (toBeFocusedRow != null) { // if the next focused row is rendered setRowFocus(toBeFocusedRow); selectFocusedRow(ctrl, shift); // TODO needs scrollintoview ? updateFirstVisibleAndSendSelectedRows(); } else { // unless waiting for the next rowset already // scroll down by pixels and return, to wait for // new rows, then select the last item in the // viewport selectFirstItemInNextRender = true; multiselectPending = shift; scrollByPagelength(-1); } } } } else { /* No selections, go page up by scrolling */ scrollByPagelength(-1); } return true; } // Goto start navigation if (keycode == getNavigationStartKey()) { scrollBodyPanel.setScrollPosition(0); if (isSelectable()) { if (focusedRow != null && focusedRow.getIndex() == 0) { return false; } else { VScrollTableRow rowByRowIndex = (VScrollTableRow) scrollBody .iterator().next(); if (rowByRowIndex.getIndex() == 0) { setRowFocus(rowByRowIndex); selectFocusedRow(ctrl, shift); updateFirstVisibleAndSendSelectedRows(); } else { // first row of table will come in next row fetch if (ctrl) { focusFirstItemInNextRender = true; } else { selectFirstItemInNextRender = true; multiselectPending = shift; } } } } return true; } // Goto end navigation if (keycode == getNavigationEndKey()) { scrollBodyPanel.setScrollPosition(scrollBody.getOffsetHeight()); if (isSelectable()) { final int lastRendered = scrollBody.getLastRendered(); if (lastRendered + 1 == totalRows) { VScrollTableRow rowByRowIndex = scrollBody .getRowByRowIndex(lastRendered); if (focusedRow != rowByRowIndex) { setRowFocus(rowByRowIndex); selectFocusedRow(ctrl, shift); updateFirstVisibleAndSendSelectedRows(); } } else { if (ctrl) { focusLastItemInNextRender = true; } else { selectLastItemInNextRender = true; multiselectPending = shift; } } } return true; } return false; }
3.68
dubbo_DubboRelaxedBindingAutoConfiguration_dubboBasePackages
/** * The bean is used to scan the packages of Dubbo Service classes * * @param environment {@link Environment} instance * @return non-null {@link Set} * @since 2.7.8 */ @ConditionalOnMissingBean(name = BASE_PACKAGES_BEAN_NAME) @Bean(name = BASE_PACKAGES_BEAN_NAME) public Set<String> dubboBasePackages(Environment environment) { PropertyResolver propertyResolver = dubboScanBasePackagesPropertyResolver(environment); return propertyResolver.getProperty(BASE_PACKAGES_PROPERTY_NAME, Set.class, emptySet()); }
3.68
flink_HiveParserQBParseInfo_setNoScanAnalyzeCommand
/** @param isNoScanAnalyzeCommand the isNoScanAnalyzeCommand to set */ public void setNoScanAnalyzeCommand(boolean isNoScanAnalyzeCommand) { this.isNoScanAnalyzeCommand = isNoScanAnalyzeCommand; }
3.68
hadoop_FlowRunCoprocessor_prePut
/* * (non-Javadoc) * * This method adds the tags onto the cells in the Put. It is presumed that * all the cells in one Put have the same set of Tags. The existing cell * timestamp is overwritten for non-metric cells and each such cell gets a new * unique timestamp generated by {@link TimestampGenerator} * * @see * org.apache.hadoop.hbase.coprocessor.BaseRegionObserver#prePut(org.apache * .hadoop.hbase.coprocessor.ObserverContext, * org.apache.hadoop.hbase.client.Put, * org.apache.hadoop.hbase.regionserver.wal.WALEdit, * org.apache.hadoop.hbase.client.Durability) */ @Override public void prePut(ObserverContext<RegionCoprocessorEnvironment> e, Put put, WALEdit edit, Durability durability) throws IOException { Map<String, byte[]> attributes = put.getAttributesMap(); // Assumption is that all the cells in a put are the same operation. List<Tag> tags = new ArrayList<>(); if ((attributes != null) && (attributes.size() > 0)) { for (Map.Entry<String, byte[]> attribute : attributes.entrySet()) { Tag t = HBaseTimelineServerUtils.getTagFromAttribute(attribute); if (t != null) { tags.add(t); } } byte[] tagByteArray = HBaseTimelineServerUtils.convertTagListToByteArray(tags); NavigableMap<byte[], List<Cell>> newFamilyMap = new TreeMap<>( Bytes.BYTES_COMPARATOR); for (Map.Entry<byte[], List<Cell>> entry : put.getFamilyCellMap() .entrySet()) { List<Cell> newCells = new ArrayList<>(entry.getValue().size()); for (Cell cell : entry.getValue()) { // for each cell in the put add the tags // Assumption is that all the cells in // one put are the same operation // also, get a unique cell timestamp for non-metric cells // this way we don't inadvertently overwrite cell versions long cellTimestamp = getCellTimestamp(cell.getTimestamp(), tags); newCells.add(CellUtil.createCell(CellUtil.cloneRow(cell), CellUtil.cloneFamily(cell), CellUtil.cloneQualifier(cell), cellTimestamp, KeyValue.Type.Put, CellUtil.cloneValue(cell), tagByteArray)); } newFamilyMap.put(entry.getKey(), newCells); } // for each entry // Update the family map for the Put put.setFamilyCellMap(newFamilyMap); } }
3.68
hadoop_ContainerLogContext_getContainerId
/** * Get {@link ContainerId} of the container. * * @return the container ID */ public ContainerId getContainerId() { return containerId; }
3.68
framework_Notification_isHtmlContentAllowed
/** * Checks whether caption and description are interpreted as HTML or plain * text. * * @return {@code true} if the texts are used as HTML, {@code false} if used * as plain text * @see #setHtmlContentAllowed(boolean) */ public boolean isHtmlContentAllowed() { return getState(false).htmlContentAllowed; }
3.68
hadoop_AbfsInputStream_getBytesFromReadAhead
/** * Getter for bytes read from readAhead buffer that fills asynchronously. * * @return value of the counter in long. */ @VisibleForTesting public long getBytesFromReadAhead() { return bytesFromReadAhead; }
3.68
hbase_TableRegionModel_getLocation
/** Returns the name and port of the region server hosting the region */ @XmlAttribute public String getLocation() { return location; }
3.68
druid_SQLStatementParser_parseStatement
/** * @param tryBest - 为true去解析并忽略之后的错误 * 强制建议除非明确知道可以忽略才传tryBest=true, * 不然会忽略语法错误,且截断sql,导致update和delete无where条件下执行!!! */ public SQLStatement parseStatement(final boolean tryBest) { List<SQLStatement> list = new ArrayList<SQLStatement>(); this.parseStatementList(list, 1, null); if (tryBest) { if (lexer.token != Token.EOF) { throw new ParserException("sql syntax error, no terminated. " + lexer.info()); } } return list.get(0); }
3.68
dubbo_RpcServiceContext_isServerSide
/** * @deprecated Replace to isProviderSide() */ @Override @Deprecated public boolean isServerSide() { return isProviderSide(); }
3.68
framework_VDragEvent_syncContent
/** * Do additional content sync between <code>original</code> element and its * <code>copy</code> if needed. * * @since 7.2 * @param original * original element * @param copy * copy of original element */ private void syncContent(Element original, Element copy) { for (int i = 0; i < original.getChildCount(); i++) { Node child = original.getChild(i); if (child instanceof Element) { syncContent((Element) child, (Element) copy.getChild(i)); } } doSyncContent(original, copy); }
3.68
hbase_WALPrettyPrinter_processFile
/** * reads a log file and outputs its contents, one transaction at a time, as specified by the * currently configured options the HBase configuration relevant to this log file the path of the * log file to be read may be unable to access the configured filesystem or requested file. */ public void processFile(final Configuration conf, final Path p) throws IOException { FileSystem fs = p.getFileSystem(conf); if (!fs.exists(p)) { throw new FileNotFoundException(p.toString()); } if (!fs.isFile(p)) { throw new IOException(p + " is not a file"); } WALStreamReader log = WALFactory.createStreamReader(fs, p, conf, position > 0 ? position : -1); if (log instanceof AbstractProtobufWALReader) { List<String> writerClsNames = ((AbstractProtobufWALReader) log).getWriterClsNames(); if (writerClsNames != null && writerClsNames.size() > 0) { out.print("Writer Classes: "); for (int i = 0; i < writerClsNames.size(); i++) { out.print(writerClsNames.get(i)); if (i != writerClsNames.size() - 1) { out.print(" "); } } out.println(); } String cellCodecClsName = ((AbstractProtobufWALReader) log).getCodecClsName(); if (cellCodecClsName != null) { out.println("Cell Codec Class: " + cellCodecClsName); } } if (outputJSON && !persistentOutput) { out.print("["); firstTxn = true; } try { WAL.Entry entry; while ((entry = log.next()) != null) { WALKey key = entry.getKey(); WALEdit edit = entry.getEdit(); // begin building a transaction structure Map<String, Object> txn = key.toStringMap(); long writeTime = key.getWriteTime(); // check output filters if (!tableSet.isEmpty() && !tableSet.contains(txn.get("table").toString())) { continue; } if (sequence >= 0 && ((Long) txn.get("sequence")) != sequence) { continue; } if (region != null && !txn.get("region").equals(region)) { continue; } // initialize list into which we will store atomic actions List<Map<String, Object>> actions = new ArrayList<>(); for (Cell cell : edit.getCells()) { // add atomic operation to txn Map<String, Object> op = new HashMap<>(toStringMap(cell, outputOnlyRowKey, rowPrefix, row, outputValues)); if (op.isEmpty()) { continue; } actions.add(op); } if (actions.isEmpty()) { continue; } txn.put("actions", actions); if (outputJSON) { // JSON output is a straightforward "toString" on the txn object if (firstTxn) { firstTxn = false; } else { out.print(","); } // encode and print JSON out.print(GSON.toJson(txn)); } else { // Pretty output, complete with indentation by atomic action if (!outputOnlyRowKey) { out.println(String.format(outputTmpl, txn.get("sequence"), txn.get("table"), txn.get("region"), new Date(writeTime))); } for (int i = 0; i < actions.size(); i++) { Map<String, Object> op = actions.get(i); printCell(out, op, outputValues, outputOnlyRowKey); } } if (!outputOnlyRowKey) { out.println("edit heap size: " + entry.getEdit().heapSize()); out.println("position: " + log.getPosition()); } } } finally { log.close(); } if (outputJSON && !persistentOutput) { out.print("]"); } }
3.68
framework_DownloadStream_setBufferSize
/** * Sets the size of the download buffer. * * @param bufferSize * the size of the buffer in bytes. * * @since 7.0 */ public void setBufferSize(int bufferSize) { this.bufferSize = bufferSize; }
3.68
hbase_CacheConfig_shouldCacheDataOnWrite
/** * @return true if data blocks should be written to the cache when an HFile is written, false if * not */ public boolean shouldCacheDataOnWrite() { return this.cacheDataOnWrite; }
3.68
hbase_FileLink_getAvailablePath
/** Returns the path of the first available link. */ public Path getAvailablePath(FileSystem fs) throws IOException { for (int i = 0; i < locations.length; ++i) { if (fs.exists(locations[i])) { return locations[i]; } } throw new FileNotFoundException(toString()); }
3.68
hbase_ResponseConverter_getRegionInfos
/** * Get the list of region info from a GetOnlineRegionResponse * @param proto the GetOnlineRegionResponse * @return the list of region info */ public static List<RegionInfo> getRegionInfos(final GetOnlineRegionResponse proto) { if (proto == null || proto.getRegionInfoCount() == 0) return null; return ProtobufUtil.getRegionInfos(proto); }
3.68
hadoop_ECBlock_setParity
/** * Set true if it's for a parity block. * @param isParity is parity or not */ public void setParity(boolean isParity) { this.isParity = isParity; }
3.68
hadoop_AzureFileSystemInstrumentation_getBlockDownloadLatency
/** * Get the current rolling average of the download latency. * @return rolling average of download latency in milliseconds. */ public long getBlockDownloadLatency() { return currentBlockDownloadLatency.getCurrentAverage(); }
3.68
flink_ExtractionUtils_collectTypeHierarchy
/** * Collects the partially ordered type hierarchy (i.e. all involved super classes and super * interfaces) of the given type. */ static List<Type> collectTypeHierarchy(Type type) { Type currentType = type; Class<?> currentClass = toClass(type); final List<Type> typeHierarchy = new ArrayList<>(); while (currentClass != null) { // collect type typeHierarchy.add(currentType); // collect super interfaces for (Type genericInterface : currentClass.getGenericInterfaces()) { final Class<?> interfaceClass = toClass(genericInterface); if (interfaceClass != null) { typeHierarchy.addAll(collectTypeHierarchy(genericInterface)); } } currentType = currentClass.getGenericSuperclass(); currentClass = toClass(currentType); } return typeHierarchy; }
3.68
flink_StreamExecutionEnvironment_setParallelism
/** * Sets the parallelism for operations executed through this environment. Setting a parallelism * of x here will cause all operators (such as map, batchReduce) to run with x parallel * instances. This method overrides the default parallelism for this environment. The {@link * LocalStreamEnvironment} uses by default a value equal to the number of hardware contexts (CPU * cores / threads). When executing the program via the command line client from a JAR file, the * default degree of parallelism is the one configured for that setup. * * @param parallelism The parallelism */ public StreamExecutionEnvironment setParallelism(int parallelism) { config.setParallelism(parallelism); return this; }
3.68
hadoop_SingleFilePerBlockCache_get
/** * Gets the block having the given {@code blockNumber}. * * @throws IllegalArgumentException if buffer is null. */ @Override public void get(int blockNumber, ByteBuffer buffer) throws IOException { if (closed.get()) { return; } checkNotNull(buffer, "buffer"); Entry entry = getEntry(blockNumber); entry.takeLock(Entry.LockType.READ); try { buffer.clear(); readFile(entry.path, buffer); buffer.rewind(); validateEntry(entry, buffer); } finally { entry.releaseLock(Entry.LockType.READ); } }
3.68
flink_RpcUtils_terminateRpcService
/** * Shuts the given rpc services down and waits for their termination. * * @param rpcServices to shut down * @throws InterruptedException if the operation has been interrupted * @throws ExecutionException if a problem occurred */ @VisibleForTesting public static void terminateRpcService(RpcService... rpcServices) throws InterruptedException, ExecutionException { terminateAsyncCloseables( Arrays.stream(rpcServices) .map(rpcService -> (AutoCloseableAsync) rpcService::closeAsync) .collect(Collectors.toList())); }
3.68
flink_FlinkStatement_clearWarnings
// TODO We currently do not support this, but we can't throw a SQLException here because we want // to support jdbc tools such as beeline and sqlline. @Override public void clearWarnings() throws SQLException {}
3.68
framework_VaadinSession_valueBound
/** * @see javax.servlet.http.HttpSessionBindingListener#valueBound(HttpSessionBindingEvent) */ @Override public void valueBound(HttpSessionBindingEvent arg0) { // We are not interested in bindings }
3.68
hudi_FailSafeConsistencyGuard_retryTillSuccess
/** * Retries the predicate for configurable number of times till we the predicate returns success. * * @param dir directory of interest in which list of files are checked for visibility * @param files List of files to check for visibility * @param event {@link org.apache.hudi.common.fs.ConsistencyGuard.FileVisibility} event of interest. * @throws TimeoutException when retries are exhausted */ private void retryTillSuccess(Path dir, List<String> files, FileVisibility event) throws TimeoutException { long waitMs = consistencyGuardConfig.getInitialConsistencyCheckIntervalMs(); int attempt = 0; LOG.info("Max Attempts=" + consistencyGuardConfig.getMaxConsistencyChecks()); while (attempt < consistencyGuardConfig.getMaxConsistencyChecks()) { boolean success = checkFilesVisibility(attempt, dir, files, event); if (success) { return; } sleepSafe(waitMs); waitMs = waitMs * 2; // double check interval every attempt waitMs = Math.min(waitMs, consistencyGuardConfig.getMaxConsistencyCheckIntervalMs()); attempt++; } throw new TimeoutException("Timed out waiting for files to adhere to event " + event.name()); }
3.68
hadoop_DatanodeAdminProperties_setMaintenanceExpireTimeInMS
/** * Get the maintenance expiration time in milliseconds. * @param maintenanceExpireTimeInMS * the maintenance expiration time in milliseconds. */ public void setMaintenanceExpireTimeInMS( final long maintenanceExpireTimeInMS) { this.maintenanceExpireTimeInMS = maintenanceExpireTimeInMS; }
3.68
flink_JoinOperator_projectTuple17
/** * Projects a pair of joined elements to a {@link Tuple} with the previously selected * fields. Requires the classes of the fields of the resulting tuples. * * @return The projected data set. * @see Tuple * @see DataSet */ public <T0, T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14, T15, T16> ProjectJoin< I1, I2, Tuple17< T0, T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14, T15, T16>> projectTuple17() { TypeInformation<?>[] fTypes = extractFieldTypes(fieldIndexes); TupleTypeInfo< Tuple17< T0, T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14, T15, T16>> tType = new TupleTypeInfo< Tuple17< T0, T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14, T15, T16>>(fTypes); return new ProjectJoin< I1, I2, Tuple17< T0, T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14, T15, T16>>( this.ds1, this.ds2, this.keys1, this.keys2, this.hint, this.fieldIndexes, this.isFieldInFirst, tType, this); }
3.68
hudi_HoodieWriteHandle_getLogCreationCallback
/** * Returns a log creation hook impl. */ protected LogFileCreationCallback getLogCreationCallback() { return new LogFileCreationCallback() { @Override public boolean preFileCreation(HoodieLogFile logFile) { WriteMarkers writeMarkers = WriteMarkersFactory.get(config.getMarkersType(), hoodieTable, instantTime); return writeMarkers.createIfNotExists(partitionPath, logFile.getFileName(), IOType.CREATE, config, fileId, hoodieTable.getMetaClient().getActiveTimeline()).isPresent(); } }; }
3.68
flink_PythonCsvUtils_createCsvBulkWriterFactory
/** * Util for creating a {@link BulkWriter.Factory} that wraps {@link CsvBulkWriter#forSchema}. */ public static BulkWriter.Factory<RowData> createCsvBulkWriterFactory( CsvSchema schema, DataType physicalDataType) { return CsvFileFormatFactory.createCsvBulkWriterFactory( schema, (RowType) physicalDataType.getLogicalType()); }
3.68
flink_ResourceManagerUtils_parseRestBindPortFromWebInterfaceUrl
/** * Parse the port from the webInterfaceUrl. * * @param webInterfaceUrl The web interface url to be parsed * @return the parsed rest port or -1 if failed */ public static Integer parseRestBindPortFromWebInterfaceUrl(String webInterfaceUrl) { if (webInterfaceUrl != null) { final int lastColon = webInterfaceUrl.lastIndexOf(':'); if (lastColon == -1) { return -1; } else { try { return Integer.parseInt(webInterfaceUrl.substring(lastColon + 1)); } catch (NumberFormatException e) { return -1; } } } else { return -1; } }
3.68
hudi_TimelineUtils_getAllExtraMetadataForKey
/** * Get extra metadata for specified key from all active commit/deltacommit instants. */ public static Map<String, Option<String>> getAllExtraMetadataForKey(HoodieTableMetaClient metaClient, String extraMetadataKey) { return metaClient.getCommitsTimeline().filterCompletedInstants().getReverseOrderedInstants().collect(Collectors.toMap( HoodieInstant::getTimestamp, instant -> getMetadataValue(metaClient, extraMetadataKey, instant))); }
3.68
graphhopper_GTFSFeed_getTripGeometry
/** * Returns a trip geometry object (LineString) for a given trip id. * If the trip has a shape reference, this will be used for the geometry. * Otherwise, the ordered stoptimes will be used. * * @param trip_id trip id of desired trip geometry * @return the LineString representing the trip geometry. * @see LineString */ public LineString getTripGeometry(String trip_id){ CoordinateList coordinates = new CoordinateList(); LineString ls = null; Trip trip = trips.get(trip_id); // If trip has shape_id, use it to generate geometry. if (trip.shape_id != null) { Shape shape = getShape(trip.shape_id); if (shape != null) ls = shape.geometry; } // Use the ordered stoptimes. if (ls == null) { ls = getStraightLineForStops(trip_id); } return ls; }
3.68
hbase_Client_put
/** * Send a PUT request * @param cluster the cluster definition * @param path the path or URI * @param headers the HTTP headers to include, <tt>Content-Type</tt> must be supplied * @param content the content bytes * @return a Response object with response detail */ public Response put(Cluster cluster, String path, Header[] headers, byte[] content) throws IOException { HttpPut method = new HttpPut(path); try { method.setEntity(new InputStreamEntity(new ByteArrayInputStream(content), content.length)); HttpResponse resp = execute(cluster, method, headers, path); headers = resp.getAllHeaders(); content = getResponseBody(resp); return new Response(resp.getStatusLine().getStatusCode(), headers, content); } finally { method.releaseConnection(); } }
3.68
flink_OperatorChain_getAllOperators
/** * Returns an {@link Iterable} which traverses all operators in forward or reverse topological * order. */ protected Iterable<StreamOperatorWrapper<?, ?>> getAllOperators(boolean reverse) { return reverse ? new StreamOperatorWrapper.ReadIterator(tailOperatorWrapper, true) : new StreamOperatorWrapper.ReadIterator(mainOperatorWrapper, false); }
3.68
dubbo_Bytes_base642bytes
/** * from base64 string. * * @param str base64 string. * @param off offset. * @param len length. * @param code base64 code(0-63 is base64 char,64 is pad char). * @return byte array. */ public static byte[] base642bytes(final String str, final int off, final int len, final char[] code) { if (off < 0) { throw new IndexOutOfBoundsException("base642bytes: offset < 0, offset is " + off); } if (len < 0) { throw new IndexOutOfBoundsException("base642bytes: length < 0, length is " + len); } if (len == 0) { return new byte[0]; } if (off + len > str.length()) { throw new IndexOutOfBoundsException("base642bytes: offset + length > string length."); } if (code.length < 64) { throw new IllegalArgumentException("Base64 code length < 64."); } int rem = len % 4; if (rem == 1) { throw new IllegalArgumentException("base642bytes: base64 string length % 4 == 1."); } int num = len / 4, size = num * 3; if (code.length > 64) { if (rem != 0) { throw new IllegalArgumentException("base642bytes: base64 string length error."); } char pc = code[64]; if (str.charAt(off + len - 2) == pc) { size -= 2; --num; rem = 2; } else if (str.charAt(off + len - 1) == pc) { size--; --num; rem = 3; } } else { if (rem == 2) { size++; } else if (rem == 3) { size += 2; } } int r = off, w = 0; byte[] b = new byte[size]; for (int i = 0; i < num; i++) { int c1 = indexOf(code, str.charAt(r++)), c2 = indexOf(code, str.charAt(r++)); int c3 = indexOf(code, str.charAt(r++)), c4 = indexOf(code, str.charAt(r++)); b[w++] = (byte) ((c1 << 2) | (c2 >> 4)); b[w++] = (byte) ((c2 << 4) | (c3 >> 2)); b[w++] = (byte) ((c3 << 6) | c4); } if (rem == 2) { int c1 = indexOf(code, str.charAt(r++)), c2 = indexOf(code, str.charAt(r++)); b[w++] = (byte) ((c1 << 2) | (c2 >> 4)); } else if (rem == 3) { int c1 = indexOf(code, str.charAt(r++)), c2 = indexOf(code, str.charAt(r++)), c3 = indexOf(code, str.charAt(r++)); b[w++] = (byte) ((c1 << 2) | (c2 >> 4)); b[w++] = (byte) ((c2 << 4) | (c3 >> 2)); } return b; }
3.68
hudi_HoodiePipeline_schema
/** * Add table schema. */ public Builder schema(Schema schema) { for (Schema.UnresolvedColumn column : schema.getColumns()) { column(column.toString()); } if (schema.getPrimaryKey().isPresent()) { pk(schema.getPrimaryKey().get().getColumnNames().stream().map(EncodingUtils::escapeIdentifier).collect(Collectors.joining(", "))); } return this; }
3.68
morf_OracleMetaDataProvider_isPrimaryKeyIndex
/** * @see org.alfasoftware.morf.jdbc.DatabaseMetaDataProvider#isPrimaryKeyIndex(DatabaseMetaDataProvider.RealName) */ private boolean isPrimaryKeyIndex(String indexName) { return indexName.endsWith("_PK"); }
3.68
MagicPlugin_ActionFactory_removeResolver
/** * Unregister a resolver. * * @param actionResolver * The action resolver to remove. * @throws NullPointerException * When actionResolver is null. */ public static void removeResolver(ActionResolver actionResolver) { Preconditions.checkNotNull(actionResolver); Iterator<ActionResolver> it = resolvers.iterator(); while (it.hasNext()) { if (it.next().equals(actionResolver)) { it.remove(); } } }
3.68
flink_OperationManager_submitOperation
/** * Submit the operation to the {@link OperationManager}. The {@link OperationManager} manges the * lifecycle of the {@link Operation}, including register resources, fire the execution and so * on. * * @param fetcherSupplier offer the fetcher to get the results. * @return OperationHandle to fetch the results or check the status. */ public OperationHandle submitOperation( Function<OperationHandle, ResultFetcher> fetcherSupplier) { OperationHandle handle = OperationHandle.create(); Operation operation = new Operation(handle, () -> fetcherSupplier.apply(handle)); submitOperationInternal(handle, operation); return handle; }
3.68
flink_WindowedStream_min
/** * Applies an aggregation that gives the minimum value of the pojo data stream at the given * field expression for every window. * * <p>A field * expression is either the name of a public field or a getter method with * parentheses of the {@link DataStream}S underlying type. A dot can be used to drill down into * objects, as in {@code "field1.getInnerField2()" }. * * @param field The field expression based on which the aggregation will be applied. * @return The transformed DataStream. */ public SingleOutputStreamOperator<T> min(String field) { return aggregate( new ComparableAggregator<>( field, input.getType(), AggregationFunction.AggregationType.MIN, false, input.getExecutionConfig())); }
3.68
framework_TabsheetNotEnoughHorizontalSpace_getTestDescription
/* * (non-Javadoc) * * @see com.vaadin.tests.components.AbstractTestUI#getTestDescription() */ @Override protected String getTestDescription() { return "Scroll-buttons should render correctly on all browsers"; }
3.68
hbase_HFileReaderImpl_readMvccVersion
/** * Read mvcc. Does checks to see if we even need to read the mvcc at all. */ protected void readMvccVersion(final int offsetFromPos) { // See if we even need to decode mvcc. if (!this.reader.getHFileInfo().shouldIncludeMemStoreTS()) { return; } if (!this.reader.getHFileInfo().isDecodeMemstoreTS()) { currMemstoreTS = 0; currMemstoreTSLen = 1; return; } _readMvccVersion(offsetFromPos); }
3.68
hbase_MasterObserver_preMasterInitialization
/** * Call before the master initialization is set to true. * {@link org.apache.hadoop.hbase.master.HMaster} process. */ default void preMasterInitialization(final ObserverContext<MasterCoprocessorEnvironment> ctx) throws IOException { }
3.68
hadoop_ReadBufferManager_doneReading
/** * ReadBufferWorker thread calls this method to post completion. * * @param buffer the buffer whose read was completed * @param result the {@link ReadBufferStatus} after the read operation in the worker thread * @param bytesActuallyRead the number of bytes that the worker thread was actually able to read */ void doneReading(final ReadBuffer buffer, final ReadBufferStatus result, final int bytesActuallyRead) { if (LOGGER.isTraceEnabled()) { LOGGER.trace("ReadBufferWorker completed read file {} for offset {} outcome {} bytes {}", buffer.getStream().getPath(), buffer.getOffset(), result, bytesActuallyRead); } synchronized (this) { // If this buffer has already been purged during // close of InputStream then we don't update the lists. if (inProgressList.contains(buffer)) { inProgressList.remove(buffer); if (result == ReadBufferStatus.AVAILABLE && bytesActuallyRead > 0) { buffer.setStatus(ReadBufferStatus.AVAILABLE); buffer.setLength(bytesActuallyRead); } else { freeList.push(buffer.getBufferindex()); // buffer will be deleted as per the eviction policy. } // completed list also contains FAILED read buffers // for sending exception message to clients. buffer.setStatus(result); buffer.setTimeStamp(currentTimeMillis()); completedReadList.add(buffer); } } //outside the synchronized, since anyone receiving a wake-up from the latch must see safe-published results buffer.getLatch().countDown(); // wake up waiting threads (if any) }
3.68
flink_NetworkBufferPool_destroyAllBufferPools
/** * Destroys all buffer pools that allocate their buffers from this buffer pool (created via * {@link #createBufferPool(int, int)}). */ public void destroyAllBufferPools() { synchronized (factoryLock) { // create a copy to avoid concurrent modification exceptions LocalBufferPool[] poolsCopy = allBufferPools.toArray(new LocalBufferPool[allBufferPools.size()]); for (LocalBufferPool pool : poolsCopy) { pool.lazyDestroy(); } // some sanity checks if (allBufferPools.size() > 0 || numTotalRequiredBuffers > 0 || resizableBufferPools.size() > 0) { throw new IllegalStateException( "NetworkBufferPool is not empty after destroying all LocalBufferPools"); } } }
3.68
morf_Criterion_getSelectStatement
/** * Get the {@link SelectStatement} associated with the criterion. * * @return the selectStatement */ public SelectStatement getSelectStatement() { return selectStatement; }
3.68
framework_LayoutManager_getBorderTop
/** * Gets the top border of the given element, provided that it has been * measured. These elements are guaranteed to be measured: * <ul> * <li>ManagedLayouts and their child Connectors * <li>Elements for which there is at least one ElementResizeListener * <li>Elements for which at least one ManagedLayout has registered a * dependency * </ul> * * A negative number is returned if the element has not been measured. If 0 * is returned, it might indicate that the element is not attached to the * DOM. * * @param element * the element to get the measured size for * @return the measured top border of the element in pixels. */ public int getBorderTop(Element element) { assert needsMeasure( element) : "Getting measurement for element that is not measured"; return getMeasuredSize(element, nullSize).getBorderTop(); }
3.68
flink_TransientBlobCache_getStorageLocation
/** * Returns a file handle to the file associated with the given blob key on the blob server. * * @param jobId ID of the job this blob belongs to (or <tt>null</tt> if job-unrelated) * @param key identifying the file * @return file handle to the file * @throws IOException if creating the directory fails */ @VisibleForTesting public File getStorageLocation(@Nullable JobID jobId, BlobKey key) throws IOException { return BlobUtils.getStorageLocation(storageDir.deref(), jobId, key); }
3.68
streampipes_StreamPipesClient_processors
/** * Get API to work with data processors * * @return {@link DataProcessorApi} */ @Override public DataProcessorApi processors() { return new DataProcessorApi(config); }
3.68
hbase_ZNodeClearer_getMyEphemeralNodeFileName
/** * Get the name of the file used to store the znode contents */ public static String getMyEphemeralNodeFileName() { return System.getenv().get("HBASE_ZNODE_FILE"); }
3.68
hmily_ConfigProperty_of
/** * Of config property. * * @param name the name * @param value the value * @return the config property */ public static ConfigProperty of(final PropertyName name, final Object value) { return Optional.ofNullable(value).map(cf -> new ConfigProperty(name, cf)).orElse(null); }
3.68
hadoop_DockerCommand_getCommandOption
/** * Returns the docker sub-command string being used * e.g 'run'. */ public final String getCommandOption() { return this.command; }
3.68
hadoop_NativeS3FileSystem_getScheme
/** * Return the protocol scheme for the FileSystem. * * @return <code>s3n</code> */ @Override public String getScheme() { return "s3n"; }
3.68
framework_AbstractTextField_selectAll
/** * Selects all text in the field. * <p> * As a side effect the field will become focused. */ public void selectAll() { getRpcProxy(AbstractTextFieldClientRpc.class).selectAll(); focus(); }
3.68
morf_OracleMetaDataProvider_views
/** * @see org.alfasoftware.morf.metadata.Schema#views() */ @Override public Collection<View> views() { return viewMap().values(); }
3.68
graphhopper_AlternativeRoute_isAlreadyExisting
/** * This method returns true if the specified tid is already existent in the * traversalIDMap */ boolean isAlreadyExisting(final int tid) { final AtomicBoolean exists = new AtomicBoolean(false); traversalIdMap.forEach(new IntObjectPredicate<IntSet>() { @Override public boolean apply(int key, IntSet set) { if (set.contains(tid)) { exists.set(true); return false; } return true; } }); return exists.get(); }
3.68
hbase_OrderedBytes_isNumericNaN
/** * Return true when the next encoded value in {@code src} uses Numeric encoding and is * {@code NaN}, false otherwise. */ public static boolean isNumericNaN(PositionedByteRange src) { return NAN == (-1 == Integer.signum(src.peek()) ? DESCENDING : ASCENDING).apply(src.peek()); }
3.68
framework_VTabsheet_addTab
/** * Adds a tab to the tab bar. * * @return the added tab. */ public Tab addTab() { Tab t = new Tab(this); int tabIndex = getTabCount(); // Logical attach insert(t, tr, tabIndex, true); if (tabIndex == 0) { // Set the "first" style t.setStyleNames(false, true); } getTabsheet().selectionHandler.registerTab(t); t.setCloseHandler(this); // Save the size that is expected to be needed if this tab is // scrolled back to view after getting temporarily hidden. The tab // hasn't been initialized from tab state yet so this value is a // placeholder. tabWidths.put(t, t.getOffsetWidth()); return t; }
3.68
hibernate-validator_MetaDataBuilder_definedIn
/** * @param rootClass The root class. That is the class for which we currently * create a {@code BeanMetaData} * @param hierarchyClass The class on which the current constraint is defined on * * @return Returns {@code ConstraintOrigin.DEFINED_LOCALLY} if the * constraint was defined on the root bean, * {@code ConstraintOrigin.DEFINED_IN_HIERARCHY} otherwise. */ private ConstraintOrigin definedIn(Class<?> rootClass, Class<?> hierarchyClass) { if ( hierarchyClass.equals( rootClass ) ) { return ConstraintOrigin.DEFINED_LOCALLY; } else { return ConstraintOrigin.DEFINED_IN_HIERARCHY; } }
3.68
hadoop_ColumnHeader_getCData
/** * Get the cdata field for the TH. * @return CData. */ public String getCData() { return this.cdata; }
3.68
flink_MergingWindowSet_retireWindow
/** * Removes the given window from the set of in-flight windows. * * @param window The {@code Window} to remove. */ public void retireWindow(W window) { W removed = this.mapping.remove(window); if (removed == null) { throw new IllegalStateException( "Window " + window + " is not in in-flight window set."); } }
3.68