name
stringlengths
12
178
code_snippet
stringlengths
8
36.5k
score
float64
3.26
3.68
framework_HierarchyMapper_setItemCollapseAllowedProvider
/** * Sets the current item collapse allowed provider. * * @param itemCollapseAllowedProvider * the item collapse allowed provider */ public void setItemCollapseAllowedProvider( ItemCollapseAllowedProvider<T> itemCollapseAllowedProvider) { this.itemCollapseAllowedProvider = itemCollapseAllowedProvider; }
3.68
flink_ProcessPythonEnvironmentManager_createRetrievalToken
/** * Returns an empty RetrievalToken because no files will be transmit via ArtifactService in * process mode. * * @return The path of empty RetrievalToken. */ public String createRetrievalToken() throws IOException { File retrievalToken = new File( resource.baseDirectory, "retrieval_token_" + UUID.randomUUID().toString() + ".json"); if (retrievalToken.createNewFile()) { final DataOutputStream dos = new DataOutputStream(new FileOutputStream(retrievalToken)); dos.writeBytes("{\"manifest\": {}}"); dos.flush(); dos.close(); return retrievalToken.getAbsolutePath(); } else { throw new IOException( "Could not create the RetrievalToken file: " + retrievalToken.getAbsolutePath()); } }
3.68
flink_StreamExecTemporalSort_createSortRowTime
/** Create Sort logic based on row time. */ private Transformation<RowData> createSortRowTime( RowType inputType, Transformation<RowData> inputTransform, ExecNodeConfig config, ClassLoader classLoader) { GeneratedRecordComparator rowComparator = null; if (sortSpec.getFieldSize() > 1) { // skip the first field which is the rowtime field and would be ordered by timer. SortSpec specExcludeTime = sortSpec.createSubSortSpec(1); rowComparator = ComparatorCodeGenerator.gen( config, classLoader, "RowTimeSortComparator", inputType, specExcludeTime); } RowTimeSortOperator sortOperator = new RowTimeSortOperator( InternalTypeInfo.of(inputType), sortSpec.getFieldSpec(0).getFieldIndex(), rowComparator); OneInputTransformation<RowData, RowData> transform = ExecNodeUtil.createOneInputTransformation( inputTransform, createTransformationMeta(TEMPORAL_SORT_TRANSFORMATION, config), sortOperator, InternalTypeInfo.of(inputType), inputTransform.getParallelism(), false); if (inputsContainSingleton()) { transform.setParallelism(1); transform.setMaxParallelism(1); } EmptyRowDataKeySelector selector = EmptyRowDataKeySelector.INSTANCE; transform.setStateKeySelector(selector); transform.setStateKeyType(selector.getProducedType()); return transform; }
3.68
hadoop_BCFile_finish
/** * Finishing up the current block. */ public void finish() throws IOException { try { if (out != null) { out.flush(); out = null; } } finally { compressAlgo.returnCompressor(compressor); compressor = null; } }
3.68
hudi_DFSPropertiesConfiguration_addToGlobalProps
// test only public static TypedProperties addToGlobalProps(String key, String value) { GLOBAL_PROPS.put(key, value); return GLOBAL_PROPS; }
3.68
hudi_ConsistentBucketIdentifier_getBucketByFileId
/** * Get bucket of the given file group * * @param fileId the file group id. NOTE: not filePrefix (i.e., uuid) */ public ConsistentHashingNode getBucketByFileId(String fileId) { return fileIdToBucket.get(fileId); }
3.68
pulsar_ProtocolHandlers_load
/** * Load the protocol handlers for the given <tt>protocol</tt> list. * * @param conf the pulsar broker service configuration * @return the collection of protocol handlers */ public static ProtocolHandlers load(ServiceConfiguration conf) throws IOException { ProtocolHandlerDefinitions definitions = ProtocolHandlerUtils.searchForHandlers( conf.getProtocolHandlerDirectory(), conf.getNarExtractionDirectory()); ImmutableMap.Builder<String, ProtocolHandlerWithClassLoader> handlersBuilder = ImmutableMap.builder(); conf.getMessagingProtocols().forEach(protocol -> { ProtocolHandlerMetadata definition = definitions.handlers().get(protocol); if (null == definition) { throw new RuntimeException("No protocol handler is found for protocol `" + protocol + "`. Available protocols are : " + definitions.handlers()); } ProtocolHandlerWithClassLoader handler; try { handler = ProtocolHandlerUtils.load(definition, conf.getNarExtractionDirectory()); } catch (IOException e) { log.error("Failed to load the protocol handler for protocol `" + protocol + "`", e); throw new RuntimeException("Failed to load the protocol handler for protocol `" + protocol + "`"); } if (!handler.accept(protocol)) { handler.close(); log.error("Malformed protocol handler found for protocol `" + protocol + "`"); throw new RuntimeException("Malformed protocol handler found for protocol `" + protocol + "`"); } handlersBuilder.put(protocol, handler); log.info("Successfully loaded protocol handler for protocol `{}`", protocol); }); return new ProtocolHandlers(handlersBuilder.build()); }
3.68
hadoop_PutTracker_initialize
/** * Startup event. * @return true if the multipart should start immediately. * @throws IOException any IO problem. */ public boolean initialize() throws IOException { return false; }
3.68
hbase_RpcServer_getServiceAndInterface
/** * @param serviceName Some arbitrary string that represents a 'service'. * @param services Available service instances * @return Matching BlockingServiceAndInterface pair */ protected static BlockingServiceAndInterface getServiceAndInterface( final List<BlockingServiceAndInterface> services, final String serviceName) { for (BlockingServiceAndInterface bs : services) { if (bs.getBlockingService().getDescriptorForType().getName().equals(serviceName)) { return bs; } } return null; }
3.68
flink_IOUtils_deleteFilesRecursively
/** Delete the given directory or file recursively. */ public static void deleteFilesRecursively(Path path) throws Exception { File[] files = path.toFile().listFiles(); if (files == null || files.length == 0) { return; } for (File file : files) { if (!file.isDirectory()) { Files.deleteIfExists(file.toPath()); } else { deleteFilesRecursively(file.toPath()); } } }
3.68
hbase_ScannerContext_setTimeScope
/** * Change the scope in which the time limit is enforced */ void setTimeScope(LimitScope scope) { this.timeScope = scope; }
3.68
flink_Catalog_dropDatabase
/** * Drop a database. * * @param name Name of the database to be dropped. * @param ignoreIfNotExists Flag to specify behavior when the database does not exist: if set to * false, throw an exception, if set to true, do nothing. * @throws DatabaseNotExistException if the given database does not exist * @throws CatalogException in case of any runtime exception */ default void dropDatabase(String name, boolean ignoreIfNotExists) throws DatabaseNotExistException, DatabaseNotEmptyException, CatalogException { dropDatabase(name, ignoreIfNotExists, false); }
3.68
graphhopper_Snap_getSnappedPosition
/** * @return 0 if on edge. 1 if on pillar node and 2 if on tower node. */ public Position getSnappedPosition() { return snappedPosition; }
3.68
morf_RemoveTable_accept
/** * {@inheritDoc} * * @see org.alfasoftware.morf.upgrade.SchemaChange#accept(org.alfasoftware.morf.upgrade.SchemaChangeVisitor) */ @Override public void accept(SchemaChangeVisitor visitor) { visitor.visit(this); }
3.68
hbase_HFile_isHFileFormat
/** * Returns true if the specified file has a valid HFile Trailer. * @param fs filesystem * @param fileStatus the file to verify * @return true if the file has a valid HFile Trailer, otherwise false * @throws IOException if failed to read from the underlying stream */ public static boolean isHFileFormat(final FileSystem fs, final FileStatus fileStatus) throws IOException { final Path path = fileStatus.getPath(); final long size = fileStatus.getLen(); try (FSDataInputStreamWrapper fsdis = new FSDataInputStreamWrapper(fs, path)) { boolean isHBaseChecksum = fsdis.shouldUseHBaseChecksum(); assert !isHBaseChecksum; // Initially we must read with FS checksum. FixedFileTrailer.readFromStream(fsdis.getStream(isHBaseChecksum), size); return true; } catch (IllegalArgumentException e) { return false; } }
3.68
flink_StateDescriptor_setQueryable
/** * Sets the name for queries of state created from this descriptor. * * <p>If a name is set, the created state will be published for queries during runtime. The name * needs to be unique per job. If there is another state instance published under the same name, * the job will fail during runtime. * * @param queryableStateName State name for queries (unique name per job) * @throws IllegalStateException If queryable state name already set * @deprecated The Queryable State feature is deprecated since Flink 1.18, and will be removed * in a future Flink major version. */ @Deprecated public void setQueryable(String queryableStateName) { Preconditions.checkArgument( ttlConfig.getUpdateType() == StateTtlConfig.UpdateType.Disabled, "Queryable state is currently not supported with TTL"); if (this.queryableStateName == null) { this.queryableStateName = Preconditions.checkNotNull(queryableStateName, "Registration name"); } else { throw new IllegalStateException("Queryable state name already set"); } }
3.68
hbase_HbckTableInfo_dump
/** * This dumps data in a visually reasonable way for visual debugging */ private void dump(SortedSet<byte[]> splits, Multimap<byte[], HbckRegionInfo> regions) { // we display this way because the last end key should be displayed as well. StringBuilder sb = new StringBuilder(); for (byte[] k : splits) { sb.setLength(0); // clear out existing buffer, if any. sb.append(Bytes.toStringBinary(k) + ":\t"); for (HbckRegionInfo r : regions.get(k)) { sb.append("[ " + r.toString() + ", " + Bytes.toStringBinary(r.getEndKey()) + "]\t"); } hbck.getErrors().print(sb.toString()); } }
3.68
hbase_FileIOEngine_getAbsoluteOffsetInFile
/** * Get the absolute offset in given file with the relative global offset. * @return the absolute offset */ private long getAbsoluteOffsetInFile(int fileNum, long globalOffset) { return globalOffset - fileNum * sizePerFile; }
3.68
hudi_MarkerUtils_markerDirToInstantTime
/** * Get instantTime from full marker path, for example: * /var/folders/t3/th1dw75d0yz2x2k2qt6ys9zh0000gp/T/junit6502909693741900820/dataset/.hoodie/.temp/003 * ==> 003 * @param marker * @return */ public static String markerDirToInstantTime(String marker) { String[] ele = marker.split("/"); return ele[ele.length - 1]; }
3.68
framework_Window_isTabStopEnabled
/** * Get if it is prevented to leave a window with the tab key. * * @return true when the focus is limited to inside the window, false when * focus can leave the window */ public boolean isTabStopEnabled() { return getState(false).assistiveTabStop; }
3.68
framework_HierarchicalDataCommunicator_setItemCollapseAllowedProvider
/** * Sets the item collapse allowed provider for this * HierarchicalDataCommunicator. The provider should return {@code true} for * any item that the user can collapse. * <p> * <strong>Note:</strong> This callback will be accessed often when sending * data to the client. The callback should not do any costly operations. * * @param provider * the item collapse allowed provider, not {@code null} */ public void setItemCollapseAllowedProvider( ItemCollapseAllowedProvider<T> provider) { Objects.requireNonNull(provider, "Provider can't be null"); itemCollapseAllowedProvider = provider; // Update hierarchy mapper mapper.setItemCollapseAllowedProvider(provider); getActiveDataHandler().getActiveData().values().forEach(this::refresh); }
3.68
hadoop_OBSFileSystem_open
/** * Open an FSDataInputStream at the indicated Path. * * @param f the file path to open * @param bufferSize the size of the buffer to be used * @return the FSDataInputStream for the file * @throws IOException on any failure to open the file */ @Override public FSDataInputStream open(final Path f, final int bufferSize) throws IOException { LOG.debug("Opening '{}' for reading.", f); final FileStatus fileStatus = getFileStatus(f); if (fileStatus.isDirectory()) { throw new FileNotFoundException( "Can't open " + f + " because it is a directory"); } return new FSDataInputStream( new OBSInputStream(bucket, OBSCommonUtils.pathToKey(this, f), fileStatus.getLen(), obs, statistics, readAheadRange, this)); }
3.68
flink_BlobOutputStream_receiveAndCheckPutResponse
/** * Reads the response from the input stream and throws in case of errors. * * @param is stream to read from * @param md message digest to check the response against * @param blobType whether the BLOB should be permanent or transient * @throws IOException if the response is an error, the message digest does not match or reading * the response failed */ private static BlobKey receiveAndCheckPutResponse( InputStream is, MessageDigest md, BlobKey.BlobType blobType) throws IOException { int response = is.read(); if (response < 0) { throw new EOFException("Premature end of response"); } else if (response == RETURN_OKAY) { BlobKey remoteKey = BlobKey.readFromInputStream(is); byte[] localHash = md.digest(); if (blobType != remoteKey.getType()) { throw new IOException("Detected data corruption during transfer"); } if (!Arrays.equals(localHash, remoteKey.getHash())) { throw new IOException("Detected data corruption during transfer"); } return remoteKey; } else if (response == RETURN_ERROR) { Throwable cause = BlobUtils.readExceptionFromStream(is); throw new IOException("Server side error: " + cause.getMessage(), cause); } else { throw new IOException("Unrecognized response: " + response + '.'); } }
3.68
hadoop_ValidateRenamedFilesStage_addFileCommitted
/** * Add a file entry to the list of committed files. * @param entry entry */ private synchronized void addFileCommitted(FileEntry entry) { filesCommitted.add(entry); }
3.68
zxing_MonochromeRectangleDetector_findCornerFromCenter
/** * Attempts to locate a corner of the barcode by scanning up, down, left or right from a center * point which should be within the barcode. * * @param centerX center's x component (horizontal) * @param deltaX same as deltaY but change in x per step instead * @param left minimum value of x * @param right maximum value of x * @param centerY center's y component (vertical) * @param deltaY change in y per step. If scanning up this is negative; down, positive; * left or right, 0 * @param top minimum value of y to search through (meaningless when di == 0) * @param bottom maximum value of y * @param maxWhiteRun maximum run of white pixels that can still be considered to be within * the barcode * @return a {@link ResultPoint} encapsulating the corner that was found * @throws NotFoundException if such a point cannot be found */ private ResultPoint findCornerFromCenter(int centerX, int deltaX, int left, int right, int centerY, int deltaY, int top, int bottom, int maxWhiteRun) throws NotFoundException { int[] lastRange = null; for (int y = centerY, x = centerX; y < bottom && y >= top && x < right && x >= left; y += deltaY, x += deltaX) { int[] range; if (deltaX == 0) { // horizontal slices, up and down range = blackWhiteRange(y, maxWhiteRun, left, right, true); } else { // vertical slices, left and right range = blackWhiteRange(x, maxWhiteRun, top, bottom, false); } if (range == null) { if (lastRange == null) { throw NotFoundException.getNotFoundInstance(); } // lastRange was found if (deltaX == 0) { int lastY = y - deltaY; if (lastRange[0] < centerX) { if (lastRange[1] > centerX) { // straddle, choose one or the other based on direction return new ResultPoint(lastRange[deltaY > 0 ? 0 : 1], lastY); } return new ResultPoint(lastRange[0], lastY); } else { return new ResultPoint(lastRange[1], lastY); } } else { int lastX = x - deltaX; if (lastRange[0] < centerY) { if (lastRange[1] > centerY) { return new ResultPoint(lastX, lastRange[deltaX < 0 ? 0 : 1]); } return new ResultPoint(lastX, lastRange[0]); } else { return new ResultPoint(lastX, lastRange[1]); } } } lastRange = range; } throw NotFoundException.getNotFoundInstance(); }
3.68
Activiti_TreeMethodExpression_isParametersProvided
/** * @return <code>true</code> if this is a method invocation expression */ @Override public boolean isParametersProvided() { return node.isMethodInvocation(); }
3.68
druid_DruidPooledConnection_getConnectNotEmptyWaitNanos
/** * @since 1.0.17 */ public long getConnectNotEmptyWaitNanos() { return this.holder.getLastNotEmptyWaitNanos(); }
3.68
framework_Highlight_hide
/** * Hides the given highlight. * * @param highlight * Highlight to hide */ static void hide(Element highlight) { if (highlight != null && highlight.getParentElement() != null) { highlight.getParentElement().removeChild(highlight); highlights.remove(highlight); } }
3.68
hadoop_SuccessData_getSuccess
/** * Get the success flag. * @return did the job succeed? */ public boolean getSuccess() { return success; }
3.68
morf_AbstractSqlDialectTest_testMissingMetadataError
/** * Tests that passing a null value for the metadata fails */ @Test public void testMissingMetadataError() { InsertStatement stmt = new InsertStatement().into(new TableReference("missingTable")); try { testDialect.convertStatementToSQL(stmt, metadata, SqlDialect.IdTable.withDeterministicName(ID_VALUES_TABLE)); fail("Should have raised an exception when there was no metadata for the table being inserted into"); } catch(IllegalArgumentException e) { // Expected exception } }
3.68
framework_DragHandle_addStyleName
/** * Adds CSS style name to the drag handle element. * * @param styleName * a CSS style name */ public void addStyleName(String styleName) { element.addClassName(styleName); }
3.68
hbase_Scan_getStartRow
/** Returns the startrow */ public byte[] getStartRow() { return this.startRow; }
3.68
hadoop_ReencryptionHandler_notifyNewSubmission
/** * Called when a new zone is submitted for re-encryption. This will interrupt * the background thread if it's waiting for the next * DFS_NAMENODE_REENCRYPT_SLEEP_INTERVAL_KEY. */ synchronized void notifyNewSubmission() { LOG.debug("Notifying handler for new re-encryption command."); this.notify(); }
3.68
hbase_KeyValue_isLatestTimestamp
/** Returns True if this KeyValue has a LATEST_TIMESTAMP timestamp. */ public boolean isLatestTimestamp() { return Bytes.equals(getBuffer(), getTimestampOffset(), Bytes.SIZEOF_LONG, HConstants.LATEST_TIMESTAMP_BYTES, 0, Bytes.SIZEOF_LONG); }
3.68
hbase_RpcServer_getRequestUserName
/** * Returns the username for any user associated with the current RPC request or not present if no * user is set. */ public static Optional<String> getRequestUserName() { return getRequestUser().map(User::getShortName); }
3.68
morf_AbstractSqlDialectTest_testMergeComplex
/** * Tests a more complex merge. */ @Test public void testMergeComplex() { TableReference foo = new TableReference("foo").as("foo"); TableReference somewhere = new TableReference("somewhere"); TableReference join = new TableReference("join"); SelectStatement sourceStmt = new SelectStatement(somewhere.field("newId").as("id"), join.field("joinBar").as("bar")).from(somewhere).innerJoin(join, eq(somewhere.field("newId"), join.field("joinId"))).alias("alias"); MergeStatement stmt = new MergeStatement().into(foo).tableUniqueKey(foo.field("id")).from(sourceStmt); assertEquals("Select scripts are not the same", expectedMergeComplex(), testDialect.convertStatementToSQL(stmt)); }
3.68
hbase_Union2_decodeB
/** * Read an instance of the second type parameter from buffer {@code src}. */ public B decodeB(PositionedByteRange src) { return (B) decode(src); }
3.68
flink_StreamExecutionEnvironment_getExecutionPlan
/** * Creates the plan with which the system will execute the program, and returns it as a String * using a JSON representation of the execution data flow graph. Note that this needs to be * called, before the plan is executed. * * @return The execution plan of the program, as a JSON String. */ public String getExecutionPlan() { return getStreamGraph(false).getStreamingPlanAsJSON(); }
3.68
framework_Window_setDraggable
/** * Enables or disables that a window can be dragged (moved) by the user. By * default a window is draggable. * <p/> * * @param draggable * true if the window can be dragged by the user */ public void setDraggable(boolean draggable) { getState().draggable = draggable; }
3.68
rocketmq-connect_MetricsReporter_onHistogramAdded
/** * Called when a {@link Histogram} is added to the registry. * * @param name the histogram's name * @param histogram the histogram */ public void onHistogramAdded(String name, Histogram histogram) { MetricName metricName = MetricUtils.stringToMetricName(name); this.onHistogramAdded(metricName, MetricUtils.getHistogramValue(metricName, histogram)); }
3.68
flink_SkipListUtils_putValueLen
/** * Puts the length of value data. * * @param memorySegment memory segment for value space. * @param offset offset of value space in memory segment. * @param valueLen length of value data. */ public static void putValueLen(MemorySegment memorySegment, int offset, int valueLen) { memorySegment.putInt(offset + VALUE_LEN_OFFSET, valueLen); }
3.68
dubbo_DubboBootstrapStatedEvent_getDubboBootstrap
/** * Get {@link org.apache.dubbo.config.bootstrap.DubboBootstrap} instance * * @return non-null */ public DubboBootstrap getDubboBootstrap() { return (DubboBootstrap) super.getSource(); }
3.68
streampipes_FlinkRuntime_appendEnvironmentConfig
/** * This method can be called in case additional environment settings should be applied to the runtime. * * @param env The Stream Execution environment */ public void appendEnvironmentConfig(StreamExecutionEnvironment env) { //This sets the stream time characteristics //The default value is TimeCharacteristic.ProcessingTime if (this.streamTimeCharacteristic != null) { env.setStreamTimeCharacteristic(this.streamTimeCharacteristic); env.setParallelism(1); } }
3.68
framework_BrowserWindowOpener_getParameterNames
/** * Gets the names of all parameters set using * {@link #setParameter(String, String)}. * * @return an unmodifiable set of parameter names * * @see #setParameter(String, String) * @see #getParameter(String) */ public Set<String> getParameterNames() { return Collections.unmodifiableSet(getState().parameters.keySet()); }
3.68
hbase_DependentColumnFilter_dropDependentColumn
/** Returns true if we should drop the dependent column, false otherwise */ public boolean dropDependentColumn() { return this.dropDependentColumn; }
3.68
hbase_SnapshotScannerHDFSAclController_isHdfsAclSet
/** * Check if user global/namespace/table HDFS acls is already set */ private boolean isHdfsAclSet(Table aclTable, String userName, String namespace, TableName tableName) throws IOException { boolean isSet = SnapshotScannerHDFSAclStorage.hasUserGlobalHdfsAcl(aclTable, userName); if (namespace != null) { isSet = isSet || SnapshotScannerHDFSAclStorage.hasUserNamespaceHdfsAcl(aclTable, userName, namespace); } if (tableName != null) { isSet = isSet || SnapshotScannerHDFSAclStorage.hasUserNamespaceHdfsAcl(aclTable, userName, tableName.getNamespaceAsString()) || SnapshotScannerHDFSAclStorage.hasUserTableHdfsAcl(aclTable, userName, tableName); } return isSet; }
3.68
framework_TabSheetClose_setup
/* * (non-Javadoc) * * @see com.vaadin.tests.components.AbstractTestUI#setup(com.vaadin.server. * VaadinRequest) */ @Override protected void setup(VaadinRequest request) { TabSheet tabsheet = new TabSheet(); for (int loop = 0; loop < 3; loop++) { Tab tab = tabsheet.addTab(new CssLayout(), "tab " + loop); tab.setClosable(true); tab.setId("tab" + loop); } CssLayout layout = new CssLayout(); layout.addComponent(tabsheet); layout.setWidth("150px"); addComponent(layout); }
3.68
framework_Escalator_getSpacerHeightsSum
/** * Calculates the sum of all spacers. * * @return sum of all spacers, or 0 if no spacers present */ public double getSpacerHeightsSum() { return getHeights(rowIndexToSpacer.values()); }
3.68
hbase_Address_toStringWithoutDomain
/** * If hostname is a.b.c and the port is 123, return a:123 instead of a.b.c:123. * @return if host looks like it is resolved -- not an IP -- then strip the domain portion * otherwise returns same as {@link #toString()}} */ public String toStringWithoutDomain() { String hostname = getHostName(); List<String> parts = Splitter.on('.').splitToList(hostname); if (parts.size() > 1) { Iterator<String> i = parts.iterator(); String base = i.next(); while (i.hasNext()) { String part = i.next(); if (!StringUtils.isNumeric(part)) { return Address.fromParts(base, getPort()).toString(); } } } return toString(); }
3.68
rocketmq-connect_AbstractConfig_getList
/** * get list * * @param config * @param key * @return */ protected List<String> getList(KeyValue config, String key, String defaultValue) { if (config.containsKey(key) || Objects.isNull(config.getString(key))) { return Collections.singletonList(defaultValue); } return Arrays.asList(COMMA_WITH_WHITESPACE.split(config.getString(key), -1)); }
3.68
hbase_HRegion_isZeroLengthThenDelete
/** * make sure have been through lease recovery before get file status, so the file length can be * trusted. * @param p File to check. * @return True if file was zero-length (and if so, we'll delete it in here). */ private static boolean isZeroLengthThenDelete(final FileSystem fs, final FileStatus stat, final Path p) throws IOException { if (stat.getLen() > 0) { return false; } LOG.warn("File " + p + " is zero-length, deleting."); fs.delete(p, false); return true; }
3.68
zxing_CalendarParsedResult_getStart
/** * @return start time * @deprecated use {@link #getStartTimestamp()} */ @Deprecated public Date getStart() { return new Date(start); }
3.68
hbase_AbstractMemStore_upsert
/* * Inserts the specified Cell into MemStore and deletes any existing versions of the same * row/family/qualifier as the specified Cell. <p> First, the specified Cell is inserted into the * Memstore. <p> If there are any existing Cell in this MemStore with the same row, family, and * qualifier, they are removed. <p> Callers must hold the read lock. * @param cell the cell to be updated * @param readpoint readpoint below which we can safely remove duplicate KVs * @param memstoreSizing object to accumulate changed size */ private void upsert(Cell cell, long readpoint, MemStoreSizing memstoreSizing) { doAddOrUpsert(cell, readpoint, memstoreSizing, false); }
3.68
dubbo_RpcServiceContext_asyncCall
/** * one way async call, send request only, and result is not required * * @param runnable */ @Override public void asyncCall(Runnable runnable) { try { setAttachment(RETURN_KEY, Boolean.FALSE.toString()); runnable.run(); } catch (Throwable e) { // FIXME should put exception in future? throw new RpcException("oneway call error ." + e.getMessage(), e); } finally { removeAttachment(RETURN_KEY); } }
3.68
flink_CoGroupOperatorBase_setGroupOrder
/** * Sets the order of the elements within a group for the given input. * * @param inputNum The number of the input (here either <i>0</i> or <i>1</i>). * @param order The order for the elements in a group. */ public void setGroupOrder(int inputNum, Ordering order) { if (inputNum == 0) { this.groupOrder1 = order; } else if (inputNum == 1) { this.groupOrder2 = order; } else { throw new IndexOutOfBoundsException(); } }
3.68
hudi_FileIOUtils_readAsUTFStringLines
/** * Reads the input stream into String lines. * * @param input {@code InputStream} instance. * @return String lines in a list. */ public static List<String> readAsUTFStringLines(InputStream input) { List<String> lines = new ArrayList<>(); BufferedReader bufferedReader = new BufferedReader(new InputStreamReader(input, StandardCharsets.UTF_8)); lines = bufferedReader.lines().collect(Collectors.toList()); closeQuietly(bufferedReader); return lines; }
3.68
graphhopper_VectorTile_getLayersBuilderList
/** * <code>repeated .vector_tile.Tile.Layer layers = 3;</code> */ public java.util.List<vector_tile.VectorTile.Tile.Layer.Builder> getLayersBuilderList() { return getLayersFieldBuilder().getBuilderList(); }
3.68
zxing_MinimalEncoder_getMinSymbolSize
/** Returns the capacity in codewords of the smallest symbol that has enough capacity to fit the given minimal * number of codewords. **/ int getMinSymbolSize(int minimum) { switch (input.getShapeHint()) { case FORCE_SQUARE: for (int capacity : squareCodewordCapacities) { if (capacity >= minimum) { return capacity; } } break; case FORCE_RECTANGLE: for (int capacity : rectangularCodewordCapacities) { if (capacity >= minimum) { return capacity; } } break; } for (int capacity : allCodewordCapacities) { if (capacity >= minimum) { return capacity; } } return allCodewordCapacities[allCodewordCapacities.length - 1]; }
3.68
flink_ResourceManagerFactory_supportMultiLeaderSession
/** This indicates whether the process should be terminated after losing leadership. */ protected boolean supportMultiLeaderSession() { return true; }
3.68
framework_ComputedStyle_getWidthIncludingBorderPadding
/** * Returns the current width, padding and border from the DOM. * * @return the computed width including padding and borders */ public double getWidthIncludingBorderPadding() { double w = getWidth(); if (BrowserInfo.get().isIE() || isContentBox()) { // IE11 always returns only the width without padding/border w += getBorderWidth() + getPaddingWidth(); } return w; }
3.68
hbase_GroupingTableMap_initJob
/** * Use this before submitting a TableMap job. It will appropriately set up the JobConf. * @param table table to be processed * @param columns space separated list of columns to fetch * @param groupColumns space separated list of columns used to form the key used in collect * @param mapper map class * @param job job configuration object */ @SuppressWarnings("unchecked") public static void initJob(String table, String columns, String groupColumns, Class<? extends TableMap> mapper, JobConf job) { TableMapReduceUtil.initTableMapJob(table, columns, mapper, ImmutableBytesWritable.class, Result.class, job); job.set(GROUP_COLUMNS, groupColumns); }
3.68
hudi_AdbSyncTool_syncPartitions
/** * Syncs the list of storage partitions passed in (checks if the partition is in adb, if not adds it or if the * partition path does not match, it updates the partition path). */ private void syncPartitions(String tableName, List<String> writtenPartitionsSince) { try { if (config.getSplitStrings(META_SYNC_PARTITION_FIELDS).isEmpty()) { LOG.info("Not a partitioned table."); return; } Map<List<String>, String> partitions = syncClient.scanTablePartitions(tableName); List<PartitionEvent> partitionEvents = syncClient.getPartitionEvents(partitions, writtenPartitionsSince); List<String> newPartitions = filterPartitions(partitionEvents, PartitionEventType.ADD); LOG.info("New Partitions:{}", newPartitions); syncClient.addPartitionsToTable(tableName, newPartitions); List<String> updatePartitions = filterPartitions(partitionEvents, PartitionEventType.UPDATE); LOG.info("Changed Partitions:{}", updatePartitions); syncClient.updatePartitionsToTable(tableName, updatePartitions); } catch (Exception e) { throw new HoodieAdbSyncException("Failed to sync partitions for table:" + tableName, e); } }
3.68
framework_AbstractSplitPanel_getSecondComponent
/** * Gets the second component of this split panel. Depending on the direction * this is either the component shown at the top or to the left. * * @return the second component of this split panel */ public Component getSecondComponent() { return (Component) getState(false).secondChild; }
3.68
hbase_ServerNonceManager_addMvccToOperationContext
/** * Store the write point in OperationContext when the operation succeed. * @param group Nonce group. * @param nonce Nonce. * @param mvcc Write point of the succeed operation. */ public void addMvccToOperationContext(long group, long nonce, long mvcc) { if (nonce == HConstants.NO_NONCE) { return; } NonceKey nk = new NonceKey(group, nonce); OperationContext result = nonces.get(nk); assert result != null; synchronized (result) { result.setMvcc(mvcc); } }
3.68
hudi_HoodieRepairTool_printRepairInfo
/** * Prints the repair info. * * @param instantTimesToRepair A list instant times in consideration for repair * @param instantsWithDanglingFiles A list of instants with dangling files. */ private void printRepairInfo( List<String> instantTimesToRepair, List<ImmutablePair<String, List<String>>> instantsWithDanglingFiles) { int numInstantsToRepair = instantsWithDanglingFiles.size(); LOG.warn("Number of instants verified based on the base and log files: " + instantTimesToRepair.size()); LOG.warn("Instant timestamps: " + instantTimesToRepair); LOG.warn("Number of instants to repair: " + numInstantsToRepair); if (numInstantsToRepair > 0) { instantsWithDanglingFiles.forEach(e -> LOG.warn(" ** Removing files: " + e.getValue())); } }
3.68
hudi_HoodieInstantTimeGenerator_createNewInstantTime
/** * Returns next instant time in the correct format. * Ensures each instant time is at least 1 millisecond apart since we create instant times at millisecond granularity. * * @param shouldLock Whether the lock should be enabled to get the instant time. * @param timeGenerator TimeGenerator used to generate the instant time. * @param milliseconds Milliseconds to add to current time while generating the new instant time */ public static String createNewInstantTime(boolean shouldLock, TimeGenerator timeGenerator, long milliseconds) { return lastInstantTime.updateAndGet((oldVal) -> { String newCommitTime; do { Date d = new Date(timeGenerator.currentTimeMillis(!shouldLock) + milliseconds); if (commitTimeZone.equals(HoodieTimelineTimeZone.UTC)) { newCommitTime = d.toInstant().atZone(HoodieTimelineTimeZone.UTC.getZoneId()) .toLocalDateTime().format(MILLIS_INSTANT_TIME_FORMATTER); } else { newCommitTime = MILLIS_INSTANT_TIME_FORMATTER.format(convertDateToTemporalAccessor(d)); } } while (HoodieTimeline.compareTimestamps(newCommitTime, HoodieActiveTimeline.LESSER_THAN_OR_EQUALS, oldVal)); return newCommitTime; }); }
3.68
hbase_HFileArchiveUtil_getArchivePath
/** * Get the full path to the archive directory on the configured * {@link org.apache.hadoop.hbase.master.MasterFileSystem} * @param rootdir {@link Path} to the root directory where hbase files are stored (for building * the archive path) * @return the full {@link Path} to the archive directory, as defined by the configuration */ private static Path getArchivePath(final Path rootdir) { return new Path(rootdir, HConstants.HFILE_ARCHIVE_DIRECTORY); }
3.68
hbase_EnableTableProcedure_preEnable
/** * Action before enabling table. * @param env MasterProcedureEnv * @param state the procedure state */ private void preEnable(final MasterProcedureEnv env, final EnableTableState state) throws IOException, InterruptedException { runCoprocessorAction(env, state); }
3.68
pulsar_OwnershipCache_checkOwnershipAsync
/** * Check whether this broker owns given namespace bundle. * * @param bundle namespace bundle * @return future that will complete with check result */ public CompletableFuture<Boolean> checkOwnershipAsync(NamespaceBundle bundle) { Optional<CompletableFuture<OwnedBundle>> ownedBundleFuture = getOwnedBundleAsync(bundle); if (!ownedBundleFuture.isPresent()) { return CompletableFuture.completedFuture(false); } return ownedBundleFuture.get() .thenApply(bd -> bd != null && bd.isActive()); }
3.68
framework_Method_isNoLayout
/** * Checks whether this method is annotated with {@link NoLayout}. * * @since 7.4 * * @return <code>true</code> if this method has a NoLayout annotation; * otherwise <code>false</code> */ public boolean isNoLayout() { return TypeDataStore.isNoLayoutRpcMethod(this); }
3.68
shardingsphere-elasticjob_ConfigurationNode_isConfigPath
/** * Judge is configuration root path or not. * * @param path node path * @return is configuration root path or not */ public boolean isConfigPath(final String path) { return jobNodePath.getConfigNodePath().equals(path); }
3.68
framework_Calendar_autoScaleVisibleHoursOfDay
/** * Sets the displayed start and end time to fit all current events that were * retrieved from the last call to getEvents(). * <p> * If no events exist, nothing happens. * <p> * <b>NOTE: triggering this method only does this once for the current * events - events that are not in the current visible range, are * ignored!</b> * * @see #setFirstVisibleHourOfDay(int) * @see #setLastVisibleHourOfDay(int) */ public void autoScaleVisibleHoursOfDay() { if (minTimeInMinutes != null) { setFirstVisibleHourOfDay(minTimeInMinutes / 60); // Do not show the final hour if last minute ends on it setLastVisibleHourOfDay((maxTimeInMinutes - 1) / 60); } }
3.68
hbase_OrderedFloat32_decodeFloat
/** * Read a {@code float} value from the buffer {@code dst}. * @param dst the {@link PositionedByteRange} to read the {@code float} from * @return the {@code float} read from the buffer */ public float decodeFloat(PositionedByteRange dst) { return OrderedBytes.decodeFloat32(dst); }
3.68
hbase_HFileLink_build
/** * Create an HFileLink instance from table/region/family/hfile location * @param conf {@link Configuration} from which to extract specific archive locations * @param table Table name * @param region Region Name * @param family Family Name * @param hfile HFile Name * @return Link to the file with the specified table/region/family/hfile location * @throws IOException on unexpected error. */ public static HFileLink build(final Configuration conf, final TableName table, final String region, final String family, final String hfile) throws IOException { return HFileLink.buildFromHFileLinkPattern(conf, createPath(table, region, family, hfile)); }
3.68
shardingsphere-elasticjob_NettyRestfulServiceConfiguration_addControllerInstances
/** * Add instances of RestfulController. * * @param instances instances of RestfulController */ public void addControllerInstances(final RestfulController... instances) { controllerInstances.addAll(Arrays.asList(instances)); }
3.68
hbase_MasterObserver_preGetProcedures
/** * Called before a getProcedures request has been processed. * @param ctx the environment to interact with the framework and master */ default void preGetProcedures(ObserverContext<MasterCoprocessorEnvironment> ctx) throws IOException { }
3.68
flink_FlinkRelMdCollation_limit
/** Helper method to determine a limit's collation. */ public static List<RelCollation> limit(RelMetadataQuery mq, RelNode input) { return mq.collations(input); }
3.68
flink_TableFactoryService_findSingleInternal
/** * Finds a table factory of the given class, property map, and classloader. * * @param factoryClass desired factory class * @param properties properties that describe the factory configuration * @param classLoader classloader for service loading * @param <T> factory class type * @return the matching factory */ private static <T extends TableFactory> T findSingleInternal( Class<T> factoryClass, Map<String, String> properties, Optional<ClassLoader> classLoader) { List<TableFactory> tableFactories = discoverFactories(classLoader); List<T> filtered = filter(tableFactories, factoryClass, properties); if (filtered.size() > 1) { throw new AmbiguousTableFactoryException( filtered, factoryClass, tableFactories, properties); } else { return filtered.get(0); } }
3.68
hudi_SqlQueryBuilder_from
/** * Appends a FROM clause to a query. * * @param tables The table names to select from. * @return The {@link SqlQueryBuilder} instance. */ public SqlQueryBuilder from(String... tables) { if (tables == null || tables.length == 0) { throw new IllegalArgumentException("No table name provided with FROM clause. Please provide a table name to select from."); } sqlBuilder.append(" from "); sqlBuilder.append(String.join(", ", tables)); return this; }
3.68
hudi_HoodieIndexUtils_getExistingRecords
/** * Read existing records based on the given partition path and {@link HoodieRecordLocation} info. * <p> * This will perform merged read for MOR table, in case a FileGroup contains log files. * * @return {@link HoodieRecord}s that have the current location being set. */ private static <R> HoodieData<HoodieRecord<R>> getExistingRecords( HoodieData<HoodieRecordGlobalLocation> partitionLocations, HoodieWriteConfig config, HoodieTable hoodieTable) { final Option<String> instantTime = hoodieTable .getMetaClient() .getCommitsTimeline() .filterCompletedInstants() .lastInstant() .map(HoodieInstant::getTimestamp); return partitionLocations.flatMap(p -> new HoodieMergedReadHandle(config, instantTime, hoodieTable, Pair.of(p.getPartitionPath(), p.getFileId())) .getMergedRecords().iterator()); }
3.68
morf_AbstractSqlDialectTest_testAddMonths
/** * Test that AddMonths functionality behaves as expected. */ @Test public void testAddMonths() { String result = testDialect.getSqlFrom(addMonths(field("testField"), new FieldLiteral(-3))); assertEquals(expectedAddMonths(), result); }
3.68
framework_VSlider_getEventPosition
/** * TODO consider extracting touches support to an impl class specific for * webkit (only browser that really supports touches). * * @param event * the event whose position to check * @return the client position */ protected int getEventPosition(Event event) { if (isVertical()) { return WidgetUtil.getTouchOrMouseClientY(event); } else { return WidgetUtil.getTouchOrMouseClientX(event); } }
3.68
flink_RemoteInputChannel_requestSubpartition
/** Requests a remote subpartition. */ @VisibleForTesting @Override public void requestSubpartition() throws IOException, InterruptedException { if (partitionRequestClient == null) { LOG.debug( "{}: Requesting REMOTE subpartition {} of partition {}. {}", this, consumedSubpartitionIndex, partitionId, channelStatePersister); // Create a client and request the partition try { partitionRequestClient = connectionManager.createPartitionRequestClient(connectionId); } catch (IOException e) { // IOExceptions indicate that we could not open a connection to the remote // TaskExecutor throw new PartitionConnectionException(partitionId, e); } partitionRequestClient.requestSubpartition( partitionId, consumedSubpartitionIndex, this, 0); } }
3.68
flink_ExecNodePlanDumper_getReuseId
/** * Returns reuse id if the given node is a reuse node (that means it has multiple outputs), * else -1. */ public Integer getReuseId(ExecNode<?> node) { return mapReuseNodeToReuseId.getOrDefault(node, -1); }
3.68
hbase_AvlUtil_isLinked
/** Return true if the node is linked to a list, false otherwise */ public static <TNode extends AvlLinkedNode> boolean isLinked(TNode node) { return node.iterPrev != null && node.iterNext != null; }
3.68
framework_AbstractComponentConnector_getWidget
/** * Returns the widget associated with this paintable. The widget returned by * this method must not changed during the life time of the paintable. * * @return The widget associated with this paintable */ @Override public Widget getWidget() { if (widget == null) { if (Profiler.isEnabled()) { Profiler.enter("AbstractComponentConnector.createWidget for " + getClass().getSimpleName()); } widget = createWidget(); if (Profiler.isEnabled()) { Profiler.leave("AbstractComponentConnector.createWidget for " + getClass().getSimpleName()); } } return widget; }
3.68
framework_EncodeUtil_rfc5987Encode
/** * Encodes the given string to UTF-8 <code>value-chars</code> as defined in * RFC5987 for use in e.g. the <code>Content-Disposition</code> HTTP header. * * @param value * the string to encode, not <code>null</code> * @return the encoded string */ public static String rfc5987Encode(String value) { StringBuilder builder = new StringBuilder(); for (int i = 0; i < value.length();) { int cp = value.codePointAt(i); if (cp < 127 && (Character.isLetterOrDigit(cp) || cp == '.')) { builder.append((char) cp); } else { // Create string from a single code point String cpAsString = new String(new int[] { cp }, 0, 1); appendHexBytes(builder, cpAsString.getBytes(UTF_8)); } // Advance to the next code point i += Character.charCount(cp); } return builder.toString(); }
3.68
pulsar_AuthenticationProviderOpenID_verifyJWT
/** * Build and return a validator for the parameters. * * @param publicKey - the public key to use when configuring the validator * @param publicKeyAlg - the algorithm for the parameterized public key * @param jwt - jwt to be verified and returned (only if verified) * @return a validator to use for validating a JWT associated with the parameterized public key. * @throws AuthenticationException if the Public Key's algorithm is not supported or if the algorithm param does not * match the Public Key's actual algorithm. */ DecodedJWT verifyJWT(PublicKey publicKey, String publicKeyAlg, DecodedJWT jwt) throws AuthenticationException { if (publicKeyAlg == null) { incrementFailureMetric(AuthenticationExceptionCode.UNSUPPORTED_ALGORITHM); throw new AuthenticationException("PublicKey algorithm cannot be null"); } Algorithm alg; try { switch (publicKeyAlg) { case ALG_RS256: alg = Algorithm.RSA256((RSAPublicKey) publicKey, null); break; case ALG_RS384: alg = Algorithm.RSA384((RSAPublicKey) publicKey, null); break; case ALG_RS512: alg = Algorithm.RSA512((RSAPublicKey) publicKey, null); break; case ALG_ES256: alg = Algorithm.ECDSA256((ECPublicKey) publicKey, null); break; case ALG_ES384: alg = Algorithm.ECDSA384((ECPublicKey) publicKey, null); break; case ALG_ES512: alg = Algorithm.ECDSA512((ECPublicKey) publicKey, null); break; default: incrementFailureMetric(AuthenticationExceptionCode.UNSUPPORTED_ALGORITHM); throw new AuthenticationException("Unsupported algorithm: " + publicKeyAlg); } } catch (ClassCastException e) { incrementFailureMetric(AuthenticationExceptionCode.ALGORITHM_MISMATCH); throw new AuthenticationException("Expected PublicKey alg [" + publicKeyAlg + "] does match actual alg."); } // We verify issuer when retrieving the PublicKey, so it is not verified here. // The claim presence requirements are based on https://openid.net/specs/openid-connect-basic-1_0.html#IDToken Verification verifierBuilder = JWT.require(alg) .acceptLeeway(acceptedTimeLeewaySeconds) .withAnyOfAudience(allowedAudiences) .withClaimPresence(RegisteredClaims.ISSUED_AT) .withClaimPresence(RegisteredClaims.EXPIRES_AT) .withClaimPresence(RegisteredClaims.NOT_BEFORE) .withClaimPresence(RegisteredClaims.SUBJECT); if (isRoleClaimNotSubject) { verifierBuilder = verifierBuilder.withClaimPresence(roleClaim); } JWTVerifier verifier = verifierBuilder.build(); try { return verifier.verify(jwt); } catch (TokenExpiredException e) { incrementFailureMetric(AuthenticationExceptionCode.EXPIRED_JWT); throw new AuthenticationException("JWT expired: " + e.getMessage()); } catch (SignatureVerificationException e) { incrementFailureMetric(AuthenticationExceptionCode.ERROR_VERIFYING_JWT_SIGNATURE); throw new AuthenticationException("JWT signature verification exception: " + e.getMessage()); } catch (InvalidClaimException e) { incrementFailureMetric(AuthenticationExceptionCode.INVALID_JWT_CLAIM); throw new AuthenticationException("JWT contains invalid claim: " + e.getMessage()); } catch (AlgorithmMismatchException e) { incrementFailureMetric(AuthenticationExceptionCode.ALGORITHM_MISMATCH); throw new AuthenticationException("JWT algorithm does not match Public Key algorithm: " + e.getMessage()); } catch (JWTDecodeException e) { incrementFailureMetric(AuthenticationExceptionCode.ERROR_DECODING_JWT); throw new AuthenticationException("Error while decoding JWT: " + e.getMessage()); } catch (JWTVerificationException | IllegalArgumentException e) { incrementFailureMetric(AuthenticationExceptionCode.ERROR_VERIFYING_JWT); throw new AuthenticationException("JWT verification failed: " + e.getMessage()); } }
3.68
hbase_MergeTableRegionsProcedure_createMergedRegionInfo
/** * Create merged region info by looking at passed in <code>regionsToMerge</code> to figure what * extremes for start and end keys to use; merged region needs to have an extent sufficient to * cover all regions-to-merge. */ private static RegionInfo createMergedRegionInfo(final RegionInfo[] regionsToMerge) { byte[] lowestStartKey = null; byte[] highestEndKey = null; // Region Id is a timestamp. Merged region's id can't be less than that of // merging regions else will insert at wrong location in hbase:meta (See HBASE-710). long highestRegionId = -1; for (RegionInfo ri : regionsToMerge) { if (lowestStartKey == null) { lowestStartKey = ri.getStartKey(); } else if (Bytes.compareTo(ri.getStartKey(), lowestStartKey) < 0) { lowestStartKey = ri.getStartKey(); } if (highestEndKey == null) { highestEndKey = ri.getEndKey(); } else if (ri.isLast() || Bytes.compareTo(ri.getEndKey(), highestEndKey) > 0) { highestEndKey = ri.getEndKey(); } highestRegionId = ri.getRegionId() > highestRegionId ? ri.getRegionId() : highestRegionId; } // Merged region is sorted between two merging regions in META return RegionInfoBuilder.newBuilder(regionsToMerge[0].getTable()).setStartKey(lowestStartKey) .setEndKey(highestEndKey).setSplit(false) .setRegionId(highestRegionId + 1/* Add one so new merged region is highest */).build(); }
3.68
flink_HiveStatsUtil_getColumnStatisticsData
/** * Convert Flink ColumnStats to Hive ColumnStatisticsData according to Hive column type. Note we * currently assume that, in Flink, the max and min of ColumnStats will be same type as the * Flink column type. For example, for SHORT and Long columns, the max and min of their * ColumnStats should be of type SHORT and LONG. */ private static ColumnStatisticsData getColumnStatisticsData( DataType colType, CatalogColumnStatisticsDataBase colStat, String hiveVersion) { LogicalTypeRoot type = colType.getLogicalType().getTypeRoot(); if (type.equals(LogicalTypeRoot.CHAR) || type.equals(LogicalTypeRoot.VARCHAR)) { if (colStat instanceof CatalogColumnStatisticsDataString) { CatalogColumnStatisticsDataString stringColStat = (CatalogColumnStatisticsDataString) colStat; StringColumnStatsData hiveStringColumnStats = new StringColumnStatsData(); hiveStringColumnStats.clear(); if (null != stringColStat.getMaxLength()) { hiveStringColumnStats.setMaxColLen(stringColStat.getMaxLength()); } if (null != stringColStat.getAvgLength()) { hiveStringColumnStats.setAvgColLen(stringColStat.getAvgLength()); } if (null != stringColStat.getNullCount()) { hiveStringColumnStats.setNumNulls(stringColStat.getNullCount()); } if (null != stringColStat.getNdv()) { hiveStringColumnStats.setNumDVs(stringColStat.getNdv()); } return ColumnStatisticsData.stringStats(hiveStringColumnStats); } } else if (type.equals(LogicalTypeRoot.BOOLEAN)) { if (colStat instanceof CatalogColumnStatisticsDataBoolean) { CatalogColumnStatisticsDataBoolean booleanColStat = (CatalogColumnStatisticsDataBoolean) colStat; BooleanColumnStatsData hiveBoolStats = new BooleanColumnStatsData(); hiveBoolStats.clear(); if (null != booleanColStat.getTrueCount()) { hiveBoolStats.setNumTrues(booleanColStat.getTrueCount()); } if (null != booleanColStat.getFalseCount()) { hiveBoolStats.setNumFalses(booleanColStat.getFalseCount()); } if (null != booleanColStat.getNullCount()) { hiveBoolStats.setNumNulls(booleanColStat.getNullCount()); } return ColumnStatisticsData.booleanStats(hiveBoolStats); } } else if (type.equals(LogicalTypeRoot.TINYINT) || type.equals(LogicalTypeRoot.SMALLINT) || type.equals(LogicalTypeRoot.INTEGER) || type.equals(LogicalTypeRoot.BIGINT) || type.equals(LogicalTypeRoot.TIMESTAMP_WITH_LOCAL_TIME_ZONE) || type.equals(LogicalTypeRoot.TIME_WITHOUT_TIME_ZONE) || type.equals(LogicalTypeRoot.TIMESTAMP_WITH_TIME_ZONE)) { if (colStat instanceof CatalogColumnStatisticsDataLong) { CatalogColumnStatisticsDataLong longColStat = (CatalogColumnStatisticsDataLong) colStat; LongColumnStatsData hiveLongColStats = new LongColumnStatsData(); hiveLongColStats.clear(); if (null != longColStat.getMax()) { hiveLongColStats.setHighValue(longColStat.getMax()); } if (null != longColStat.getMin()) { hiveLongColStats.setLowValue(longColStat.getMin()); } if (null != longColStat.getNdv()) { hiveLongColStats.setNumDVs(longColStat.getNdv()); } if (null != longColStat.getNullCount()) { hiveLongColStats.setNumNulls(longColStat.getNullCount()); } return ColumnStatisticsData.longStats(hiveLongColStats); } } else if (type.equals(LogicalTypeRoot.FLOAT) || type.equals(LogicalTypeRoot.DOUBLE)) { if (colStat instanceof CatalogColumnStatisticsDataDouble) { CatalogColumnStatisticsDataDouble doubleColumnStatsData = (CatalogColumnStatisticsDataDouble) colStat; DoubleColumnStatsData hiveFloatStats = new DoubleColumnStatsData(); hiveFloatStats.clear(); if (null != doubleColumnStatsData.getMax()) { hiveFloatStats.setHighValue(doubleColumnStatsData.getMax()); } if (null != doubleColumnStatsData.getMin()) { hiveFloatStats.setLowValue(doubleColumnStatsData.getMin()); } if (null != doubleColumnStatsData.getNullCount()) { hiveFloatStats.setNumNulls(doubleColumnStatsData.getNullCount()); } if (null != doubleColumnStatsData.getNdv()) { hiveFloatStats.setNumDVs(doubleColumnStatsData.getNdv()); } return ColumnStatisticsData.doubleStats(hiveFloatStats); } } else if (type.equals(LogicalTypeRoot.DATE)) { if (colStat instanceof CatalogColumnStatisticsDataDate) { HiveShim hiveShim = HiveShimLoader.loadHiveShim(hiveVersion); return hiveShim.toHiveDateColStats((CatalogColumnStatisticsDataDate) colStat); } } else if (type.equals(LogicalTypeRoot.VARBINARY) || type.equals(LogicalTypeRoot.BINARY)) { if (colStat instanceof CatalogColumnStatisticsDataBinary) { CatalogColumnStatisticsDataBinary binaryColumnStatsData = (CatalogColumnStatisticsDataBinary) colStat; BinaryColumnStatsData hiveBinaryColumnStats = new BinaryColumnStatsData(); hiveBinaryColumnStats.clear(); if (null != binaryColumnStatsData.getMaxLength()) { hiveBinaryColumnStats.setMaxColLen(binaryColumnStatsData.getMaxLength()); } if (null != binaryColumnStatsData.getAvgLength()) { hiveBinaryColumnStats.setAvgColLen(binaryColumnStatsData.getAvgLength()); } if (null != binaryColumnStatsData.getNullCount()) { hiveBinaryColumnStats.setNumNulls(binaryColumnStatsData.getNullCount()); } return ColumnStatisticsData.binaryStats(hiveBinaryColumnStats); } } else if (type.equals(LogicalTypeRoot.DECIMAL)) { if (colStat instanceof CatalogColumnStatisticsDataDouble) { CatalogColumnStatisticsDataDouble flinkStats = (CatalogColumnStatisticsDataDouble) colStat; DecimalColumnStatsData hiveStats = new DecimalColumnStatsData(); if (flinkStats.getMax() != null) { // in older versions we cannot create HiveDecimal from Double, so convert Double // to BigDecimal first hiveStats.setHighValue( toThriftDecimal( HiveDecimal.create(BigDecimal.valueOf(flinkStats.getMax())))); } if (flinkStats.getMin() != null) { hiveStats.setLowValue( toThriftDecimal( HiveDecimal.create(BigDecimal.valueOf(flinkStats.getMin())))); } if (flinkStats.getNdv() != null) { hiveStats.setNumDVs(flinkStats.getNdv()); } if (flinkStats.getNullCount() != null) { hiveStats.setNumNulls(flinkStats.getNullCount()); } return ColumnStatisticsData.decimalStats(hiveStats); } } throw new CatalogException( String.format( "Flink does not support converting ColumnStats '%s' for Hive column " + "type '%s' yet", colStat, colType)); }
3.68
hudi_InternalSchemaBuilder_visit
/** * Use to traverse all types in internalSchema with visitor. * * @param schema hoodie internal schema * @return visitor expected result. */ public <T> T visit(InternalSchema schema, InternalSchemaVisitor<T> visitor) { return visitor.schema(schema, visit(schema.getRecord(), visitor)); }
3.68
graphhopper_VectorTile_addTags
/** * <pre> * Tags of this feature are encoded as repeated pairs of * integers. * A detailed description of tags is located in sections * 4.2 and 4.4 of the specification * </pre> * * <code>repeated uint32 tags = 2 [packed = true];</code> */ public Builder addTags(int value) { ensureTagsIsMutable(); tags_.add(value); onChanged(); return this; }
3.68
hbase_EnvironmentEdgeManager_injectEdge
/** * Injects the given edge such that it becomes the managed entity. If null is passed to this * method, the default type is assigned to the delegate. * @param edge the new edge. */ public static void injectEdge(EnvironmentEdge edge) { if (edge == null) { reset(); } else { delegate = edge; } }
3.68
morf_DatabaseUpgradeTableContribution_tables
/** * @see org.alfasoftware.morf.upgrade.TableContribution#tables() */ @Override public Collection<Table> tables() { return ImmutableList.of( deployedViewsTable(), upgradeAuditTable() ); }
3.68
hbase_ChainWALEmptyEntryFilter_setFilterEmptyEntry
/** * To allow the empty entries to get filtered, we want to set this optional flag to decide if we * want to filter the entries which have no cells or all cells got filtered though * {@link WALCellFilter}. * @param filterEmptyEntry flag */ @InterfaceAudience.Private public void setFilterEmptyEntry(final boolean filterEmptyEntry) { this.filterEmptyEntry = filterEmptyEntry; }
3.68
framework_RowReference_getElement
/** * Gets the table row element of the row. * * @return the element of the row */ public TableRowElement getElement() { return element; }
3.68
hbase_HRegion_lock
/** * Try to acquire a lock. Throw RegionTooBusyException if failed to get the lock in time. Throw * InterruptedIOException if interrupted while waiting for the lock. */ private void lock(final Lock lock, final int multiplier) throws IOException { try { final long waitTime = Math.min(maxBusyWaitDuration, busyWaitDuration * Math.min(multiplier, maxBusyWaitMultiplier)); if (!lock.tryLock(waitTime, TimeUnit.MILLISECONDS)) { // Don't print millis. Message is used as a key over in // RetriesExhaustedWithDetailsException processing. final String regionName = this.getRegionInfo() == null ? "unknown" : this.getRegionInfo().getRegionNameAsString(); final String serverName = this.getRegionServerServices() == null ? "unknown" : (this.getRegionServerServices().getServerName() == null ? "unknown" : this.getRegionServerServices().getServerName().toString()); RegionTooBusyException rtbe = new RegionTooBusyException( "Failed to obtain lock; regionName=" + regionName + ", server=" + serverName); LOG.warn("Region is too busy to allow lock acquisition.", rtbe); throw rtbe; } } catch (InterruptedException ie) { if (LOG.isDebugEnabled()) { LOG.debug("Interrupted while waiting for a lock in region {}", this); } throw throwOnInterrupt(ie); } }
3.68
flink_BinaryExternalSorter_setResultIteratorException
/** * Reports an exception to all threads that are waiting for the result iterator. * * @param ioex The exception to be reported to the threads that wait for the result iterator. */ private void setResultIteratorException(IOException ioex) { synchronized (this.iteratorLock) { if (this.iteratorException == null) { this.iteratorException = ioex; this.iteratorLock.notifyAll(); } } }
3.68
hbase_HFileLink_getReferencedTableName
/** * Get the Table name of the referenced link * @param fileName HFileLink file name * @return the name of the referenced Table */ public static TableName getReferencedTableName(final String fileName) { Matcher m = REF_OR_HFILE_LINK_PATTERN.matcher(fileName); if (!m.matches()) { throw new IllegalArgumentException(fileName + " is not a valid HFileLink name!"); } return (TableName.valueOf(m.group(1), m.group(2))); }
3.68
hbase_AvlUtil_insert
/** * Insert a node into the tree. This is useful when you want to create a new node or replace the * content depending if the node already exists or not. Using AvlInsertOrReplace class you can * return the node to add/replace. * @param root the current root of the tree * @param key the key for the node we are trying to insert * @param keyComparator the comparator to use to match node and key * @param insertOrReplace the class to use to insert or replace the node * @return the new root of the tree */ public static <TNode extends AvlNode> TNode insert(TNode root, Object key, final AvlKeyComparator<TNode> keyComparator, final AvlInsertOrReplace<TNode> insertOrReplace) { if (root == null) { return insertOrReplace.insert(key); } int cmp = keyComparator.compareKey(root, key); if (cmp < 0) { root.avlLeft = insert((TNode) root.avlLeft, key, keyComparator, insertOrReplace); } else if (cmp > 0) { root.avlRight = insert((TNode) root.avlRight, key, keyComparator, insertOrReplace); } else { TNode left = (TNode) root.avlLeft; TNode right = (TNode) root.avlRight; root = insertOrReplace.replace(key, root); root.avlLeft = left; root.avlRight = right; return root; } return balance(root); }
3.68