name
stringlengths
12
178
code_snippet
stringlengths
8
36.5k
score
float64
3.26
3.68
hbase_PrivateCellUtil_writeQualifierSkippingBytes
/** * Writes the qualifier from the given cell to the output stream excluding the common prefix * @param out The dataoutputstream to which the data has to be written * @param cell The cell whose contents has to be written * @param qlength the qualifier length */ public static void writeQualifierSkippingBytes(DataOutputStream out, Cell cell, int qlength, int commonPrefix) throws IOException { if (cell instanceof ByteBufferExtendedCell) { ByteBufferUtils.copyBufferToStream((DataOutput) out, ((ByteBufferExtendedCell) cell).getQualifierByteBuffer(), ((ByteBufferExtendedCell) cell).getQualifierPosition() + commonPrefix, qlength - commonPrefix); } else { out.write(cell.getQualifierArray(), cell.getQualifierOffset() + commonPrefix, qlength - commonPrefix); } }
3.68
morf_SqlDialect_sqlIsComment
/** * @param sql The SQL to test * @return true if the sql provided is a comment */ public boolean sqlIsComment(String sql) { return sql.startsWith("--") && !sql.contains("\n"); // multi-line statements may have comments as the top line }
3.68
flink_JobGraph_addUserJarBlobKey
/** * Adds the BLOB referenced by the key to the JobGraph's dependencies. * * @param key path of the JAR file required to run the job on a task manager */ public void addUserJarBlobKey(PermanentBlobKey key) { if (key == null) { throw new IllegalArgumentException(); } if (!userJarBlobKeys.contains(key)) { userJarBlobKeys.add(key); } }
3.68
hadoop_ECPolicyLoader_loadSchema
/** * Load a schema from a schema element in the XML configuration file. * @param element EC schema element * @return ECSchema */ private ECSchema loadSchema(Element element) { Map<String, String> schemaOptions = new HashMap<String, String>(); NodeList fields = element.getChildNodes(); for (int i = 0; i < fields.getLength(); i++) { Node fieldNode = fields.item(i); if (fieldNode instanceof Element) { Element field = (Element) fieldNode; String tagName = field.getTagName(); if ("k".equals(tagName)) { tagName = "numDataUnits"; } else if ("m".equals(tagName)) { tagName = "numParityUnits"; } // Get the nonnull text value. Text text = (Text) field.getFirstChild(); if (text != null) { String value = text.getData().trim(); schemaOptions.put(tagName, value); } else { throw new IllegalArgumentException("Value of <" + tagName + "> is null"); } } } return new ECSchema(schemaOptions); }
3.68
morf_InsertStatementBuilder_fields
/** * Specifies the fields to insert into the table. * * <p> * NOTE: This method should not be used in conjunction with {@link #values}. * </p> * * @param destinationFields the fields to insert into the database table * @return this, for method chaining. */ public InsertStatementBuilder fields(Iterable<? extends AliasedFieldBuilder> destinationFields) { if (fromTable != null) { throw new UnsupportedOperationException("Cannot specify both a source table and a list of fields"); } this.fields.addAll(Builder.Helper.<AliasedField>buildAll(destinationFields)); return this; }
3.68
morf_SqlUtils_update
/** * Constructs an update statement. * * <p>Usage is discouraged; this method will be deprecated at some point. Use * {@link UpdateStatement#update(TableReference)} for preference.</p> * * @param tableReference the database table to update * @return {@link UpdateStatement} */ public static UpdateStatement update(TableReference tableReference) { return new UpdateStatement(tableReference); }
3.68
morf_OracleMetaDataProvider_readViewMap
/** * Populate {@link #viewMap} with information from the database. Since JDBC metadata reading * is slow on Oracle, this uses an optimised query. * * @see <a href="http://docs.oracle.com/cd/B19306_01/server.102/b14237/statviews_2117.htm">ALL_VIEWS specification</a> */ private void readViewMap() { log.info("Starting read of view definitions"); long start = System.currentTimeMillis(); // Explicitly ignore the BIN$ tables as they are in the recycle bin (for flashback) final String viewsSql = "SELECT view_name FROM ALL_VIEWS WHERE owner=?"; runSQL(viewsSql, new ResultSetHandler() { @Override public void handle(ResultSet resultSet) throws SQLException { while (resultSet.next()) { final String viewName = resultSet.getString(1); if (isSystemTable(viewName)) continue; viewMap.put(viewName.toUpperCase(), new View() { @Override public String getName() { return viewName; } @Override public boolean knowsSelectStatement() { return false; } @Override public boolean knowsDependencies() { return false; } @Override public SelectStatement getSelectStatement() { throw new UnsupportedOperationException("Cannot return SelectStatement as [" + viewName + "] has been loaded from the database"); } @Override public String[] getDependencies() { throw new UnsupportedOperationException("Cannot return dependencies as [" + viewName + "] has been loaded from the database"); } }); } } }); long end = System.currentTimeMillis(); log.info(String.format("Read view metadata in %dms; %d views", end - start, viewMap.size())); }
3.68
framework_TableConnector_onUnregister
/* * (non-Javadoc) * * @see com.vaadin.client.ui.AbstractComponentConnector#onUnregister() */ @Override public void onUnregister() { super.onUnregister(); getWidget().onUnregister(); }
3.68
hbase_FSWALEntry_getFamilyNames
/** Returns the family names which are effected by this edit. */ Set<byte[]> getFamilyNames() { return familyNames; }
3.68
flink_ExecutionConfig_setExecutionMode
/** * Sets the execution mode to execute the program. The execution mode defines whether data * exchanges are performed in a batch or on a pipelined manner. * * <p>The default execution mode is {@link ExecutionMode#PIPELINED}. * * @param executionMode The execution mode to use. * @deprecated The {@link ExecutionMode} is deprecated because it's only used in DataSet APIs. * All Flink DataSet APIs are deprecated since Flink 1.18 and will be removed in a future * Flink major version. You can still build your application in DataSet, but you should move * to either the DataStream and/or Table API. * @see <a href="https://cwiki.apache.org/confluence/pages/viewpage.action?pageId=158866741"> * FLIP-131: Consolidate the user-facing Dataflow SDKs/APIs (and deprecate the DataSet * API</a> */ @Deprecated public void setExecutionMode(ExecutionMode executionMode) { configuration.set(EXECUTION_MODE, executionMode); }
3.68
graphhopper_GraphHopperWeb_setPostRequest
/** * Use new endpoint 'POST /route' instead of 'GET /route' */ public GraphHopperWeb setPostRequest(boolean postRequest) { this.postRequest = postRequest; return this; }
3.68
hbase_CommonFSUtils_invokeSetStoragePolicy
/* * All args have been checked and are good. Run the setStoragePolicy invocation. */ private static void invokeSetStoragePolicy(final FileSystem fs, final Path path, final String storagePolicy) throws IOException { Exception toThrow = null; try { fs.setStoragePolicy(path, storagePolicy); LOG.debug("Set storagePolicy={} for path={}", storagePolicy, path); } catch (Exception e) { toThrow = e; // This swallows FNFE, should we be throwing it? seems more likely to indicate dev // misuse than a runtime problem with HDFS. if (!warningMap.containsKey(fs)) { warningMap.put(fs, true); LOG.warn("Unable to set storagePolicy=" + storagePolicy + " for path=" + path + ". " + "DEBUG log level might have more details.", e); } else if (LOG.isDebugEnabled()) { LOG.debug("Unable to set storagePolicy=" + storagePolicy + " for path=" + path, e); } // Hadoop 2.8+, 3.0-a1+ added FileSystem.setStoragePolicy with a default implementation // that throws UnsupportedOperationException if (e instanceof UnsupportedOperationException) { if (LOG.isDebugEnabled()) { LOG.debug("The underlying FileSystem implementation doesn't support " + "setStoragePolicy. This is probably intentional on their part, since HDFS-9345 " + "appears to be present in your version of Hadoop. For more information check " + "the Hadoop documentation on 'ArchivalStorage', the Hadoop FileSystem " + "specification docs from HADOOP-11981, and/or related documentation from the " + "provider of the underlying FileSystem (its name should appear in the " + "stacktrace that accompanies this message). Note in particular that Hadoop's " + "local filesystem implementation doesn't support storage policies.", e); } } } if (toThrow != null) { throw new IOException(toThrow); } }
3.68
zxing_QRCodeDecoderMetaData_applyMirroredCorrection
/** * Apply the result points' order correction due to mirroring. * * @param points Array of points to apply mirror correction to. */ public void applyMirroredCorrection(ResultPoint[] points) { if (!mirrored || points == null || points.length < 3) { return; } ResultPoint bottomLeft = points[0]; points[0] = points[2]; points[2] = bottomLeft; // No need to 'fix' top-left and alignment pattern. }
3.68
hudi_RocksDBDAO_writeBatch
/** * Perform a batch write operation. */ public void writeBatch(BatchHandler handler) { try (WriteBatch batch = new WriteBatch()) { handler.apply(batch); getRocksDB().write(new WriteOptions(), batch); } catch (RocksDBException re) { throw new HoodieException(re); } }
3.68
hbase_KeyValueUtil_createKeyValueFromInputStream
/** * Create a KeyValue reading from the raw InputStream. Named * <code>createKeyValueFromInputStream</code> so doesn't clash with {@link #create(DataInput)} * @param in inputStream to read. * @param withTags whether the keyvalue should include tags are not * @return Created KeyValue OR if we find a length of zero, we will return null which can be * useful marking a stream as done. */ public static KeyValue createKeyValueFromInputStream(InputStream in, boolean withTags) throws IOException { byte[] intBytes = new byte[Bytes.SIZEOF_INT]; int bytesRead = 0; while (bytesRead < intBytes.length) { int n = in.read(intBytes, bytesRead, intBytes.length - bytesRead); if (n < 0) { if (bytesRead == 0) { throw new EOFException(); } throw new IOException("Failed read of int, read " + bytesRead + " bytes"); } bytesRead += n; } byte[] bytes = new byte[Bytes.toInt(intBytes)]; IOUtils.readFully(in, bytes, 0, bytes.length); return withTags ? new KeyValue(bytes, 0, bytes.length) : new NoTagsKeyValue(bytes, 0, bytes.length); }
3.68
aws-saas-boost_ExistingEnvironmentFactory_getExistingSaaSBoostStackDetails
// VisibleForTesting static Map<String, String> getExistingSaaSBoostStackDetails( CloudFormationClient cfn, String baseCloudFormationStackName) { LOGGER.debug("Getting CloudFormation stack details for SaaS Boost stack {}", baseCloudFormationStackName); Map<String, String> details = new HashMap<>(); List<String> requiredOutputs = List.of("PublicSubnet1", "PublicSubnet2", "PrivateSubnet1", "PrivateSubnet2", "EgressVpc", "LoggingBucket"); try { DescribeStacksResponse response = cfn.describeStacks( request -> request.stackName(baseCloudFormationStackName)); if (response.hasStacks() && !response.stacks().isEmpty()) { Stack stack = response.stacks().get(0); Map<String, String> outputs = stack.outputs().stream() .collect(Collectors.toMap(Output::outputKey, Output::outputValue)); for (String requiredOutput : requiredOutputs) { if (!outputs.containsKey(requiredOutput)) { throw new EnvironmentLoadException("Missing required CloudFormation stack output " + requiredOutput + " from stack " + baseCloudFormationStackName); } } LOGGER.info("Loaded stack outputs from stack " + baseCloudFormationStackName); Map<String, String> parameters = stack.parameters().stream() .collect(Collectors.toMap(Parameter::parameterKey, Parameter::parameterValue)); details.putAll(parameters); details.putAll(outputs); } } catch (SdkServiceException cfnError) { LOGGER.error("cloudformation:DescribeStacks error", cfnError); LOGGER.error(Utils.getFullStackTrace(cfnError)); throw cfnError; } return details; }
3.68
framework_TableQuery_executeUpdate
/** * Executes the given update query string using either the active connection * if a transaction is already open, or a new connection from this query's * connection pool. * * @param sh * an instance of StatementHelper, containing the query string * and parameter values. * @return Number of affected rows * @throws SQLException */ private int executeUpdate(StatementHelper sh) throws SQLException { PreparedStatement pstmt = null; Connection connection = null; try { connection = getConnection(); pstmt = connection.prepareStatement(sh.getQueryString()); sh.setParameterValuesToStatement(pstmt); getLogger().log(Level.FINE, "DB -> {0}", sh.getQueryString()); int retval = pstmt.executeUpdate(); return retval; } finally { releaseConnection(connection, pstmt, null); } }
3.68
hbase_HRegion_getCoprocessorHost
/** Returns the coprocessor host */ public RegionCoprocessorHost getCoprocessorHost() { return coprocessorHost; }
3.68
hbase_LruAdaptiveBlockCache_assertCounterSanity
/** * Sanity-checking for parity between actual block cache content and metrics. Intended only for * use with TRACE level logging and -ea JVM. */ private static void assertCounterSanity(long mapSize, long counterVal) { if (counterVal < 0) { LOG.trace("counterVal overflow. Assertions unreliable. counterVal=" + counterVal + ", mapSize=" + mapSize); return; } if (mapSize < Integer.MAX_VALUE) { double pct_diff = Math.abs((((double) counterVal) / ((double) mapSize)) - 1.); if (pct_diff > 0.05) { LOG.trace("delta between reported and actual size > 5%. counterVal=" + counterVal + ", mapSize=" + mapSize); } } }
3.68
hudi_BoundedInMemoryQueue_adjustBufferSizeIfNeeded
/** * Samples records with "RECORD_SAMPLING_RATE" frequency and computes average record size in bytes. It is used for * determining how many maximum records to queue. Based on change in avg size it ma increase or decrease available * permits. * * @param payload Payload to size */ private void adjustBufferSizeIfNeeded(final O payload) throws InterruptedException { if (this.samplingRecordCounter.incrementAndGet() % RECORD_SAMPLING_RATE != 0) { return; } final long recordSizeInBytes = payloadSizeEstimator.sizeEstimate(payload); final long newAvgRecordSizeInBytes = Math.max(1, (avgRecordSizeInBytes * numSamples + recordSizeInBytes) / (numSamples + 1)); final int newRateLimit = (int) Math.min(RECORD_CACHING_LIMIT, Math.max(1, this.memoryLimit / newAvgRecordSizeInBytes)); // If there is any change in number of records to cache then we will either release (if it increased) or acquire // (if it decreased) to adjust rate limiting to newly computed value. if (newRateLimit > currentRateLimit) { rateLimiter.release(newRateLimit - currentRateLimit); } else if (newRateLimit < currentRateLimit) { rateLimiter.acquire(currentRateLimit - newRateLimit); } currentRateLimit = newRateLimit; avgRecordSizeInBytes = newAvgRecordSizeInBytes; numSamples++; }
3.68
hudi_AvroSchemaUtils_resolveUnionSchema
/** * Passed in {@code Union} schema and will try to resolve the field with the {@code fieldSchemaFullName} * w/in the union returning its corresponding schema * * @param schema target schema to be inspected * @param fieldSchemaFullName target field-name to be looked up w/in the union * @return schema of the field w/in the union identified by the {@code fieldSchemaFullName} */ public static Schema resolveUnionSchema(Schema schema, String fieldSchemaFullName) { if (schema.getType() != Schema.Type.UNION) { return schema; } List<Schema> innerTypes = schema.getTypes(); if (innerTypes.size() == 2 && isNullable(schema)) { // this is a basic nullable field so handle it more efficiently return resolveNullableSchema(schema); } Schema nonNullType = innerTypes.stream() .filter(it -> it.getType() != Schema.Type.NULL && Objects.equals(it.getFullName(), fieldSchemaFullName)) .findFirst() .orElse(null); if (nonNullType == null) { throw new AvroRuntimeException( String.format("Unsupported Avro UNION type %s: Only UNION of a null type and a non-null type is supported", schema)); } return nonNullType; }
3.68
hbase_HFileLink_getOriginPath
/** Returns the origin path of the hfile. */ public Path getOriginPath() { return this.originPath; }
3.68
framework_Escalator_getScrollWidth
/** * Returns the scroll width for the escalator. Note that this is not * necessary the same as {@code Element.scrollWidth} in the DOM. * * @since 7.5.0 * @return the scroll width in pixels */ public double getScrollWidth() { return horizontalScrollbar.getScrollSize(); }
3.68
hbase_MasterObserver_postGetRSGroupInfoOfServer
/** * Called after getting region server group info of the passed server. * @param ctx the environment to interact with the framework and master * @param server server to get RSGroupInfo for */ default void postGetRSGroupInfoOfServer(final ObserverContext<MasterCoprocessorEnvironment> ctx, final Address server) throws IOException { }
3.68
hibernate-validator_ValueExtractorResolver_getMaximallySpecificValueExtractors
/** * Used to find all the maximally specific value extractors based on a declared type in the case of value unwrapping. * <p> * There might be several of them as there might be several type parameters. * <p> * Used for container element constraints. */ public Set<ValueExtractorDescriptor> getMaximallySpecificValueExtractors(Class<?> declaredType) { return getRuntimeCompliantValueExtractors( declaredType, registeredValueExtractors ); }
3.68
pulsar_PulsarMetadata_getPulsarColumns
/** * Convert pulsar schema into presto table metadata. */ @VisibleForTesting public List<ColumnMetadata> getPulsarColumns(TopicName topicName, SchemaInfo schemaInfo, boolean withInternalColumns, PulsarColumnHandle.HandleKeyValueType handleKeyValueType) { SchemaType schemaType = schemaInfo.getType(); if (schemaType.isStruct() || schemaType.isPrimitive()) { return getPulsarColumnsFromSchema(topicName, schemaInfo, withInternalColumns, handleKeyValueType); } else if (schemaType.equals(SchemaType.KEY_VALUE)) { return getPulsarColumnsFromKeyValueSchema(topicName, schemaInfo, withInternalColumns); } else { throw new IllegalArgumentException("Unsupported schema : " + schemaInfo); } }
3.68
flink_FileWriter_initializeState
/** * Initializes the state after recovery from a failure. * * <p>During this process: * * <ol> * <li>we set the initial value for part counter to the maximum value used before across all * tasks and buckets. This guarantees that we do not overwrite valid data, * <li>we commit any pending files for previous checkpoints (previous to the last successful * one from which we restore), * <li>we resume writing to the previous in-progress file of each bucket, and * <li>if we receive multiple states for the same bucket, we merge them. * </ol> * * @param bucketStates the state holding recovered state about active buckets. * @throws IOException if anything goes wrong during retrieving the state or * restoring/committing of any in-progress/pending part files */ public void initializeState(Collection<FileWriterBucketState> bucketStates) throws IOException { checkNotNull(bucketStates, "The retrieved state was null."); for (FileWriterBucketState state : bucketStates) { String bucketId = state.getBucketId(); if (LOG.isDebugEnabled()) { LOG.debug("Restoring: {}", state); } FileWriterBucket<IN> restoredBucket = bucketFactory.restoreBucket( bucketWriter, rollingPolicy, state, outputFileConfig); updateActiveBucketId(bucketId, restoredBucket); } registerNextBucketInspectionTimer(); }
3.68
hadoop_YarnVersionInfo_getBuildVersion
/** * Returns the buildVersion which includes version, * revision, user and date. * * @return buildVersion. */ public static String getBuildVersion(){ return YARN_VERSION_INFO._getBuildVersion(); }
3.68
hudi_HoodieTable_shouldTrackSuccessRecords
/** * When {@link HoodieTableConfig#POPULATE_META_FIELDS} is enabled, * we need to track written records within WriteStatus in two cases: * <ol> * <li> When the HoodieIndex being used is not implicit with storage * <li> If any of the metadata table partitions (record index, etc) which require written record tracking are enabled * </ol> */ public boolean shouldTrackSuccessRecords() { return config.populateMetaFields() && (!getIndex().isImplicitWithStorage() || getMetadataPartitionsNeedingWriteStatusTracking(config.getMetadataConfig(), getMetaClient())); }
3.68
shardingsphere-elasticjob_InstanceService_persistOnline
/** * Persist job online status. */ public void persistOnline() { jobNodeStorage.fillEphemeralJobNode(instanceNode.getLocalInstancePath(), instanceNode.getLocalInstanceValue()); }
3.68
MagicPlugin_PlayerController_onPlayerPreLogin
// TODO: Why not MONITOR? @EventHandler(priority = EventPriority.HIGHEST) public void onPlayerPreLogin(AsyncPlayerPreLoginEvent event) { if (event.getLoginResult() != AsyncPlayerPreLoginEvent.Result.ALLOWED) { // Did not emit any events prior to this, nothing to clean up return; } controller.onPreLogin(event); }
3.68
hbase_CacheConfig_shouldCacheCompressed
/** * Returns true if this {@link BlockCategory} should be compressed in blockcache, false otherwise */ public boolean shouldCacheCompressed(BlockCategory category) { switch (category) { case DATA: return this.cacheDataOnRead && this.cacheDataCompressed; default: return false; } }
3.68
framework_Table_parseItemIdToCells
/** * Update a cache array for a row, register any relevant listeners etc. * * This is an internal method extracted from * {@link #getVisibleCellsNoCache(int, int, boolean)} and should be removed * when the Table is rewritten. */ private void parseItemIdToCells(Object[][] cells, Object id, int i, int firstIndex, RowHeaderMode headmode, int cols, Object[] colids, int firstIndexNotInCache, boolean[] iscomponent, HashSet<Property<?>> oldListenedProperties) { cells[CELL_ITEMID][i] = id; cells[CELL_KEY][i] = itemIdMapper.key(id); if (headmode != ROW_HEADER_MODE_HIDDEN) { switch (headmode) { case INDEX: cells[CELL_HEADER][i] = String.valueOf(i + firstIndex + 1); break; default: try { cells[CELL_HEADER][i] = getItemCaption(id); } catch (Exception e) { exceptionsDuringCachePopulation.add(e); cells[CELL_HEADER][i] = ""; } } try { cells[CELL_ICON][i] = getItemIcon(id); } catch (Exception e) { exceptionsDuringCachePopulation.add(e); cells[CELL_ICON][i] = null; } } int index = firstIndex + i; int indexInOldBuffer = index - pageBufferFirstIndex; boolean inPageBuffer = index < firstIndexNotInCache && index >= pageBufferFirstIndex && id.equals(pageBuffer[CELL_ITEMID][indexInOldBuffer]); GeneratedRow generatedRow = null; if (rowGenerator != null) { if (inPageBuffer) { generatedRow = (GeneratedRow) pageBuffer[CELL_GENERATED_ROW][indexInOldBuffer]; } else { generatedRow = rowGenerator.generateRow(this, cells[CELL_ITEMID][i]); } cells[CELL_GENERATED_ROW][i] = generatedRow; } int firstNotCollapsed = -1; for (int j = 0; j < cols; j++) { if (isColumnCollapsed(colids[j])) { continue; } else if (firstNotCollapsed == -1) { firstNotCollapsed = j; } Property<?> p = null; Object value = ""; boolean isGeneratedColumn = columnGenerators.containsKey(colids[j]); boolean isGenerated = isGeneratedColumn || generatedRow != null; if (!isGenerated) { try { p = getContainerProperty(id, colids[j]); } catch (Exception e) { exceptionsDuringCachePopulation.add(e); value = null; } } // check if current pageBuffer already has row if (p != null || isGenerated) { if (inPageBuffer) { // we already have data in our cache, // recycle it instead of fetching it via // getValue/getPropertyValue if (generatedRow != null) { value = extractGeneratedValue(generatedRow, j, j == firstNotCollapsed); } else { value = pageBuffer[CELL_FIRSTCOL + j][indexInOldBuffer]; } if (!isGeneratedColumn && iscomponent[j] || !(value instanceof Component)) { listenProperty(p, oldListenedProperties); } } else { if (generatedRow != null) { value = extractGeneratedValue(generatedRow, j, j == firstNotCollapsed); } else { if (isGeneratedColumn) { ColumnGenerator cg = columnGenerators .get(colids[j]); try { value = cg.generateCell(this, id, colids[j]); } catch (Exception e) { exceptionsDuringCachePopulation.add(e); value = null; } if (value != null && !(value instanceof Component) && !(value instanceof String)) { // Avoid errors if a generator returns // something // other than a Component or a String value = value.toString(); } } else if (iscomponent[j]) { try { value = p.getValue(); } catch (Exception e) { exceptionsDuringCachePopulation.add(e); value = null; } listenProperty(p, oldListenedProperties); } else if (p != null) { try { value = getPropertyValue(id, colids[j], p); } catch (Exception e) { exceptionsDuringCachePopulation.add(e); value = null; } /* * If returned value is Component (via fieldfactory * or overridden getPropertyValue) we expect it to * listen property value changes. Otherwise if * property emits value change events, table will * start to listen them and refresh content when * needed. */ if (!(value instanceof Component)) { listenProperty(p, oldListenedProperties); } } else { try { value = getPropertyValue(id, colids[j], null); } catch (Exception e) { exceptionsDuringCachePopulation.add(e); value = null; } } } } } if (value instanceof Component) { registerComponent((Component) value); } cells[CELL_FIRSTCOL + j][i] = value; } }
3.68
flink_StreamGraphGenerator_transformFeedback
/** * Transforms a {@code FeedbackTransformation}. * * <p>This will recursively transform the input and the feedback edges. We return the * concatenation of the input IDs and the feedback IDs so that downstream operations can be * wired to both. * * <p>This is responsible for creating the IterationSource and IterationSink which are used to * feed back the elements. */ private <T> Collection<Integer> transformFeedback(FeedbackTransformation<T> iterate) { if (shouldExecuteInBatchMode) { throw new UnsupportedOperationException( "Iterations are not supported in BATCH" + " execution mode. If you want to execute such a pipeline, please set the " + "'" + ExecutionOptions.RUNTIME_MODE.key() + "'=" + RuntimeExecutionMode.STREAMING.name()); } if (iterate.getFeedbackEdges().size() <= 0) { throw new IllegalStateException( "Iteration " + iterate + " does not have any feedback edges."); } List<Transformation<?>> inputs = iterate.getInputs(); checkState(inputs.size() == 1); Transformation<?> input = inputs.get(0); List<Integer> resultIds = new ArrayList<>(); // first transform the input stream(s) and store the result IDs Collection<Integer> inputIds = transform(input); resultIds.addAll(inputIds); // the recursive transform might have already transformed this if (alreadyTransformed.containsKey(iterate)) { return alreadyTransformed.get(iterate); } // create the fake iteration source/sink pair Tuple2<StreamNode, StreamNode> itSourceAndSink = streamGraph.createIterationSourceAndSink( iterate.getId(), getNewIterationNodeId(), getNewIterationNodeId(), iterate.getWaitTime(), iterate.getParallelism(), iterate.getMaxParallelism(), iterate.getMinResources(), iterate.getPreferredResources()); StreamNode itSource = itSourceAndSink.f0; StreamNode itSink = itSourceAndSink.f1; // We set the proper serializers for the sink/source streamGraph.setSerializers( itSource.getId(), null, null, iterate.getOutputType().createSerializer(executionConfig)); streamGraph.setSerializers( itSink.getId(), iterate.getOutputType().createSerializer(executionConfig), null, null); // also add the feedback source ID to the result IDs, so that downstream operators will // add both as input resultIds.add(itSource.getId()); // at the iterate to the already-seen-set with the result IDs, so that we can transform // the feedback edges and let them stop when encountering the iterate node alreadyTransformed.put(iterate, resultIds); // so that we can determine the slot sharing group from all feedback edges List<Integer> allFeedbackIds = new ArrayList<>(); for (Transformation<T> feedbackEdge : iterate.getFeedbackEdges()) { Collection<Integer> feedbackIds = transform(feedbackEdge); allFeedbackIds.addAll(feedbackIds); for (Integer feedbackId : feedbackIds) { streamGraph.addEdge(feedbackId, itSink.getId(), 0); } } String slotSharingGroup = determineSlotSharingGroup(null, allFeedbackIds); // slot sharing group of iteration node must exist if (slotSharingGroup == null) { slotSharingGroup = "SlotSharingGroup-" + iterate.getId(); } itSink.setSlotSharingGroup(slotSharingGroup); itSource.setSlotSharingGroup(slotSharingGroup); return resultIds; }
3.68
hadoop_S3ClientFactory_withMetrics
/** * Metrics binding. This is the S3A-level * statistics interface, which will be wired * up to the AWS callbacks. * @param statistics statistics implementation * @return this object */ public S3ClientCreationParameters withMetrics( @Nullable final StatisticsFromAwsSdk statistics) { metrics = statistics; return this; }
3.68
hadoop_TwoColumnLayout_header
/** * @return the class that will render the header of the page. */ protected Class<? extends SubView> header() { return HeaderBlock.class; }
3.68
hadoop_TextSplitter_bigDecimalToString
/** * Return the string encoded in a BigDecimal. * Repeatedly multiply the input value by 65536; the integer portion after such a multiplication * represents a single character in base 65536. Convert that back into a char and create a * string out of these until we have no data left. */ String bigDecimalToString(BigDecimal bd) { BigDecimal cur = bd.stripTrailingZeros(); StringBuilder sb = new StringBuilder(); for (int numConverted = 0; numConverted < MAX_CHARS; numConverted++) { cur = cur.multiply(ONE_PLACE); int curCodePoint = cur.intValue(); if (0 == curCodePoint) { break; } cur = cur.subtract(new BigDecimal(curCodePoint)); sb.append(Character.toChars(curCodePoint)); } return sb.toString(); }
3.68
hbase_MasterObserver_preGetTableDescriptors
/** * Called before a getTableDescriptors request has been processed. * @param ctx the environment to interact with the framework and master * @param tableNamesList the list of table names, or null if querying for all * @param descriptors an empty list, can be filled with what to return in coprocessor * @param regex regular expression used for filtering the table names */ default void preGetTableDescriptors(ObserverContext<MasterCoprocessorEnvironment> ctx, List<TableName> tableNamesList, List<TableDescriptor> descriptors, String regex) throws IOException { }
3.68
framework_GridElement_getErrorMessage
/** * Gets the error message text, or <code>null</code> if no message is * present. */ public String getErrorMessage() { WebElement messageWrapper = findElement( By.className("v-grid-editor-message")); List<WebElement> divs = messageWrapper .findElements(By.tagName("div")); if (divs.isEmpty()) { return null; } else { return divs.get(0).getText(); } }
3.68
flink_Tuple16_toString
/** * Creates a string representation of the tuple in the form (f0, f1, f2, f3, f4, f5, f6, f7, f8, * f9, f10, f11, f12, f13, f14, f15), where the individual fields are the value returned by * calling {@link Object#toString} on that field. * * @return The string representation of the tuple. */ @Override public String toString() { return "(" + StringUtils.arrayAwareToString(this.f0) + "," + StringUtils.arrayAwareToString(this.f1) + "," + StringUtils.arrayAwareToString(this.f2) + "," + StringUtils.arrayAwareToString(this.f3) + "," + StringUtils.arrayAwareToString(this.f4) + "," + StringUtils.arrayAwareToString(this.f5) + "," + StringUtils.arrayAwareToString(this.f6) + "," + StringUtils.arrayAwareToString(this.f7) + "," + StringUtils.arrayAwareToString(this.f8) + "," + StringUtils.arrayAwareToString(this.f9) + "," + StringUtils.arrayAwareToString(this.f10) + "," + StringUtils.arrayAwareToString(this.f11) + "," + StringUtils.arrayAwareToString(this.f12) + "," + StringUtils.arrayAwareToString(this.f13) + "," + StringUtils.arrayAwareToString(this.f14) + "," + StringUtils.arrayAwareToString(this.f15) + ")"; }
3.68
pulsar_PulsarAdminImpl_lookups
/** * @return does a looks up for the broker serving the topic */ public Lookup lookups() { return lookups; }
3.68
hbase_Bytes_putByteBuffer
/** * Add the whole content of the ByteBuffer to the bytes arrays. The ByteBuffer is modified. * @param bytes the byte array * @param offset position in the array * @param buf ByteBuffer to write out * @return incremented offset */ public static int putByteBuffer(byte[] bytes, int offset, ByteBuffer buf) { int len = buf.remaining(); buf.get(bytes, offset, len); return offset + len; }
3.68
flink_SkipListUtils_helpGetValuePointer
/** * Returns the value pointer of the node. * * @param node the node. * @param spaceAllocator the space allocator. */ static long helpGetValuePointer(long node, Allocator spaceAllocator) { Chunk chunk = spaceAllocator.getChunkById(SpaceUtils.getChunkIdByAddress(node)); int offsetInChunk = SpaceUtils.getChunkOffsetByAddress(node); MemorySegment segment = chunk.getMemorySegment(offsetInChunk); int offsetInByteBuffer = chunk.getOffsetInSegment(offsetInChunk); return getValuePointer(segment, offsetInByteBuffer); }
3.68
flink_Tuple14_equals
/** * Deep equality for tuples by calling equals() on the tuple members. * * @param o the object checked for equality * @return true if this is equal to o. */ @Override public boolean equals(Object o) { if (this == o) { return true; } if (!(o instanceof Tuple14)) { return false; } @SuppressWarnings("rawtypes") Tuple14 tuple = (Tuple14) o; if (f0 != null ? !f0.equals(tuple.f0) : tuple.f0 != null) { return false; } if (f1 != null ? !f1.equals(tuple.f1) : tuple.f1 != null) { return false; } if (f2 != null ? !f2.equals(tuple.f2) : tuple.f2 != null) { return false; } if (f3 != null ? !f3.equals(tuple.f3) : tuple.f3 != null) { return false; } if (f4 != null ? !f4.equals(tuple.f4) : tuple.f4 != null) { return false; } if (f5 != null ? !f5.equals(tuple.f5) : tuple.f5 != null) { return false; } if (f6 != null ? !f6.equals(tuple.f6) : tuple.f6 != null) { return false; } if (f7 != null ? !f7.equals(tuple.f7) : tuple.f7 != null) { return false; } if (f8 != null ? !f8.equals(tuple.f8) : tuple.f8 != null) { return false; } if (f9 != null ? !f9.equals(tuple.f9) : tuple.f9 != null) { return false; } if (f10 != null ? !f10.equals(tuple.f10) : tuple.f10 != null) { return false; } if (f11 != null ? !f11.equals(tuple.f11) : tuple.f11 != null) { return false; } if (f12 != null ? !f12.equals(tuple.f12) : tuple.f12 != null) { return false; } if (f13 != null ? !f13.equals(tuple.f13) : tuple.f13 != null) { return false; } return true; }
3.68
hudi_AvroSchemaUtils_isStrictProjectionOf
/** * Validate whether the {@code targetSchema} is a strict projection of {@code sourceSchema}. * * Schema B is considered a strict projection of schema A iff * <ol> * <li>Schemas A and B are equal, or</li> * <li>Schemas A and B are array schemas and element-type of B is a strict projection * of the element-type of A, or</li> * <li>Schemas A and B are map schemas and value-type of B is a strict projection * of the value-type of A, or</li> * <li>Schemas A and B are union schemas (of the same size) and every element-type of B * is a strict projection of the corresponding element-type of A, or</li> * <li>Schemas A and B are record schemas and every field of the record B has corresponding * counterpart (w/ the same name) in the schema A, such that the schema of the field of the schema * B is also a strict projection of the A field's schema</li> * </ol> */ public static boolean isStrictProjectionOf(Schema sourceSchema, Schema targetSchema) { return isProjectionOfInternal(sourceSchema, targetSchema, Objects::equals); }
3.68
hadoop_TimelineReaderWebServicesUtils_parseIntStr
/** * Interpret passed string as a integer. * @param str Passed string. * @return integer representation if string is not null, null otherwise. */ static Integer parseIntStr(String str) { return str == null ? null : Integer.parseInt(str.trim()); }
3.68
hbase_MasterRpcServices_execProcedure
/** * Triggers an asynchronous attempt to run a distributed procedure. {@inheritDoc} */ @Override public ExecProcedureResponse execProcedure(RpcController controller, ExecProcedureRequest request) throws ServiceException { try { server.checkInitialized(); ProcedureDescription desc = request.getProcedure(); MasterProcedureManager mpm = server.getMasterProcedureManagerHost().getProcedureManager(desc.getSignature()); if (mpm == null) { throw new ServiceException( new DoNotRetryIOException("The procedure is not registered: " + desc.getSignature())); } LOG.info(server.getClientIdAuditPrefix() + " procedure request for: " + desc.getSignature()); mpm.checkPermissions(desc, getAccessChecker(), RpcServer.getRequestUser().orElse(null)); mpm.execProcedure(desc); // send back the max amount of time the client should wait for the procedure // to complete long waitTime = SnapshotDescriptionUtils.DEFAULT_MAX_WAIT_TIME; return ExecProcedureResponse.newBuilder().setExpectedTimeout(waitTime).build(); } catch (ForeignException e) { throw new ServiceException(e.getCause()); } catch (IOException e) { throw new ServiceException(e); } }
3.68
shardingsphere-elasticjob_RequestBodyDeserializerFactory_getRequestBodyDeserializer
/** * Get deserializer for specific HTTP content type. * * <p> * This method will look for a deserializer instance of specific MIME type. * If deserializer not found, this method would look for deserializer factory by MIME type. * If it is still not found, the MIME type would be marked as <code>MISSING_DESERIALIZER</code>. * </p> * * <p> * Some default deserializer will be provided by {@link DeserializerFactory}, * so developers can implement {@link RequestBodyDeserializer} and register it by SPI to override default deserializer. * </p> * * @param contentType HTTP content type * @return Deserializer */ public static RequestBodyDeserializer getRequestBodyDeserializer(final String contentType) { RequestBodyDeserializer result = REQUEST_BODY_DESERIALIZERS.get(contentType); if (null == result) { synchronized (RequestBodyDeserializerFactory.class) { if (null == REQUEST_BODY_DESERIALIZERS.get(contentType)) { instantiateRequestBodyDeserializerFromFactories(contentType); } result = REQUEST_BODY_DESERIALIZERS.get(contentType); } } if (MISSING_DESERIALIZER == result) { throw new RequestBodyDeserializerNotFoundException(contentType); } return result; }
3.68
hadoop_DumpUtil_dumpChunks
/** * Print data in hex format in an array of chunks. * @param header header. * @param chunks chunks. */ public static void dumpChunks(String header, ECChunk[] chunks) { System.out.println(); System.out.println(header); for (int i = 0; i < chunks.length; i++) { dumpChunk(chunks[i]); } System.out.println(); }
3.68
flink_DataSinkNode_getOutgoingConnections
/** * Gets all outgoing connections, which is an empty set for the data sink. * * @return An empty list. */ @Override public List<DagConnection> getOutgoingConnections() { return Collections.emptyList(); }
3.68
framework_AbstractJavaScriptExtension_callFunction
/** * Invoke a named function that the connector JavaScript has added to the * JavaScript connector wrapper object. The arguments can be any boxed * primitive type, String, {@link JsonValue} or arrays of any other * supported type. Complex types (e.g. List, Set, Map, Connector or any * JavaBean type) must be explicitly serialized to a {@link JsonValue} * before sending. This can be done either with * {@link JsonCodec#encode(Object, JsonValue, java.lang.reflect.Type, com.vaadin.ui.ConnectorTracker)} * or using the factory methods in {@link Json}. * * @param name * the name of the function * @param arguments * function arguments */ protected void callFunction(String name, Object... arguments) { callbackHelper.invokeCallback(name, arguments); }
3.68
flink_TopologyGraph_link
/** * Link an edge from `from` node to `to` node if no loop will occur after adding this edge. * Returns if this edge is successfully added. */ boolean link(ExecNode<?> from, ExecNode<?> to) { TopologyNode fromNode = getOrCreateTopologyNode(from); TopologyNode toNode = getOrCreateTopologyNode(to); if (canReach(toNode, fromNode)) { // invalid edge, as `to` is the predecessor of `from` return false; } else { // link `from` and `to` fromNode.outputs.add(toNode); toNode.inputs.add(fromNode); return true; } }
3.68
hibernate-validator_GroupSequenceProviderCheck_hasPublicDefaultConstructor
/** * Checks that the given {@code TypeElement} has a public * default constructor. * * @param element The {@code TypeElement} to check. * * @return True if the given {@code TypeElement} has a public default constructor, false otherwise */ private boolean hasPublicDefaultConstructor(TypeElement element) { return element.accept( new ElementKindVisitor8<Boolean, Void>( Boolean.FALSE ) { @Override public Boolean visitTypeAsClass(TypeElement typeElement, Void aVoid) { List<? extends Element> enclosedElements = typeElement.getEnclosedElements(); for ( Element enclosedElement : enclosedElements ) { if ( enclosedElement.accept( this, aVoid ) ) { return Boolean.TRUE; } } return Boolean.FALSE; } @Override public Boolean visitExecutableAsConstructor(ExecutableElement constructorElement, Void aVoid) { if ( constructorElement.getModifiers().contains( Modifier.PUBLIC ) && constructorElement.getParameters().isEmpty() ) { return Boolean.TRUE; } return Boolean.FALSE; } }, null ); }
3.68
framework_DropTargetExtensionConnector_removeDropTargetStyle
/** * Remove class name from the drop target element indication that data can * be dropped onto it. */ protected void removeDropTargetStyle() { getDropTargetElement() .removeClassName(getStylePrimaryName(getDropTargetElement()) + STYLE_SUFFIX_DROPTARGET); }
3.68
morf_GraphBasedUpgradeBuilder_handleStandardNode
/** * Insert standard node (node with dependencies which doesn't require exclusive * execution) into the graph. * * @param processedNodes nodes processed so far * @param node to be inserted * @param root of the graph */ private void handleStandardNode(List<GraphBasedUpgradeNode> processedNodes, GraphBasedUpgradeNode node, GraphBasedUpgradeNode root) { // if nothing has been processed add node as child of the root if (processedNodes.isEmpty()) { LOG.debug("Root empty, adding node: " + node.getName() + " as child of the root"); addEdge(root, node); return; } Set<String> remainingReads = new HashSet<>(node.getReads()); Set<String> remainingModifies = new HashSet<>(node.getModifies()); Set<String> removeAtNextModify = new HashSet<>(); for (int i = processedNodes.size() - 1; i >= 0; i--) { GraphBasedUpgradeNode processed = processedNodes.get(i); // if exclusive execution is NOT required for the processed node if (!processed.requiresExclusiveExecution()) { analyzeDependency(processed, node, remainingReads, remainingModifies, removeAtNextModify); } // stop processing if there are no dependencies to consider anymore if (remainingModifies.isEmpty() && remainingReads.isEmpty()) { break; } // if processed requires exclusive execution, add an edge only if current node has no parents if (processed.requiresExclusiveExecution() && node.getParents().isEmpty()) { addEdge(processed, node); LOG.debug("Node: " + node.getName() + " depends on " + processed.getName() + " because it had no parent and the dependency has no dependencies defined"); break; } } // if no dependencies have been found add as a child of the root if (node.getParents().isEmpty()) { LOG.debug("No dependencies found for node: " + node.getName() + " - adding as child of the root"); addEdge(root, node); } }
3.68
graphhopper_DistanceCalcEarth_calcDist
/** * Calculates distance of (from, to) in meter. * <p> * http://en.wikipedia.org/wiki/Haversine_formula a = sin²(Δlat/2) + * cos(lat1).cos(lat2).sin²(Δlong/2) c = 2.atan2(√a, √(1−a)) d = R.c */ @Override public double calcDist(double fromLat, double fromLon, double toLat, double toLon) { double normedDist = calcNormalizedDist(fromLat, fromLon, toLat, toLon); return R * 2 * asin(sqrt(normedDist)); }
3.68
hbase_StoreFileWriter_toCompactionEventTrackerBytes
/** * Used when write {@link HStoreFile#COMPACTION_EVENT_KEY} to new file's file info. The compacted * store files's name is needed. But if the compacted store file is a result of compaction, it's * compacted files which still not archived is needed, too. And don't need to add compacted files * recursively. If file A, B, C compacted to new file D, and file D compacted to new file E, will * write A, B, C, D to file E's compacted files. So if file E compacted to new file F, will add E * to F's compacted files first, then add E's compacted files: A, B, C, D to it. And no need to * add D's compacted file, as D's compacted files has been in E's compacted files, too. See * HBASE-20724 for more details. * @param storeFiles The compacted store files to generate this new file * @return bytes of CompactionEventTracker */ private byte[] toCompactionEventTrackerBytes(Collection<HStoreFile> storeFiles) { Set<String> notArchivedCompactedStoreFiles = this.compactedFilesSupplier.get().stream() .map(sf -> sf.getPath().getName()).collect(Collectors.toSet()); Set<String> compactedStoreFiles = new HashSet<>(); for (HStoreFile storeFile : storeFiles) { compactedStoreFiles.add(storeFile.getFileInfo().getPath().getName()); for (String csf : storeFile.getCompactedStoreFiles()) { if (notArchivedCompactedStoreFiles.contains(csf)) { compactedStoreFiles.add(csf); } } } return ProtobufUtil.toCompactionEventTrackerBytes(compactedStoreFiles); }
3.68
hudi_StreamerUtil_medianInstantTime
/** * Returns the median instant time between the given two instant time. */ public static Option<String> medianInstantTime(String highVal, String lowVal) { try { long high = HoodieActiveTimeline.parseDateFromInstantTime(highVal).getTime(); long low = HoodieActiveTimeline.parseDateFromInstantTime(lowVal).getTime(); ValidationUtils.checkArgument(high > low, "Instant [" + highVal + "] should have newer timestamp than instant [" + lowVal + "]"); long median = low + (high - low) / 2; final String instantTime = HoodieActiveTimeline.formatDate(new Date(median)); if (HoodieTimeline.compareTimestamps(lowVal, HoodieTimeline.GREATER_THAN_OR_EQUALS, instantTime) || HoodieTimeline.compareTimestamps(highVal, HoodieTimeline.LESSER_THAN_OR_EQUALS, instantTime)) { return Option.empty(); } return Option.of(instantTime); } catch (ParseException e) { throw new HoodieException("Get median instant time with interval [" + lowVal + ", " + highVal + "] error", e); } }
3.68
zxing_BinaryBitmap_rotateCounterClockwise45
/** * Returns a new object with rotated image data by 45 degrees counterclockwise. * Only callable if {@link #isRotateSupported()} is true. * * @return A rotated version of this object. */ public BinaryBitmap rotateCounterClockwise45() { LuminanceSource newSource = binarizer.getLuminanceSource().rotateCounterClockwise45(); return new BinaryBitmap(binarizer.createBinarizer(newSource)); }
3.68
pulsar_PulsarAdminImpl_bookies
/** * @return the bookies management object */ public Bookies bookies() { return bookies; }
3.68
flink_LogicalType_isAnyOf
/** * Returns whether the root of the type is part of at least one family of the {@code typeFamily} * or not. * * @param typeFamilies The families to check against for equality */ public boolean isAnyOf(LogicalTypeFamily... typeFamilies) { return Arrays.stream(typeFamilies).anyMatch(tf -> this.typeRoot.getFamilies().contains(tf)); }
3.68
hadoop_OBSInputStream_close
/** * Close the stream. This triggers publishing of the stream statistics back to * the filesystem statistics. This operation is synchronized, so that only one * thread can attempt to close the connection; all later/blocked calls are * no-ops. * * @throws IOException on any problem */ @Override public synchronized void close() throws IOException { if (!closed) { closed = true; // close or abort the stream closeStream("close() operation", this.contentRangeFinish); // this is actually a no-op super.close(); } }
3.68
dubbo_TTable_getWidth
/** * get current width * * @return width */ public int getWidth() { // if not auto resize, return preset width if (!isAutoResize) { return width; } // if it's auto resize, then calculate the possible max width int maxWidth = 0; for (String data : rows) { maxWidth = max(width(data), maxWidth); } return maxWidth; }
3.68
flink_TaskStateStats_getCheckpointedSize
/** @return Total persisted size over all subtasks of this checkpoint. */ public long getCheckpointedSize() { return summaryStats.getCheckpointedSize().getSum(); }
3.68
hadoop_FederationRouterRMTokenInputValidator_validate
/** * We will check with the RouterMasterKeyRequest{@link RouterMasterKeyRequest} * to ensure that the request object is not empty and that the RouterMasterKey is not empty. * * @param request RouterMasterKey Request. * @throws FederationStateStoreInvalidInputException if the request is invalid. */ public static void validate(RouterMasterKeyRequest request) throws FederationStateStoreInvalidInputException { // Verify the request to ensure that the request is not empty, // if the request is found to be empty, an exception will be thrown. if (request == null) { String message = "Missing RouterMasterKey Request." + " Please try again by specifying a router master key request information."; LOG.warn(message); throw new FederationStateStoreInvalidInputException(message); } // Check whether the masterKey is empty, // if the masterKey is empty, throw an exception message. RouterMasterKey masterKey = request.getRouterMasterKey(); if (masterKey == null) { String message = "Missing RouterMasterKey." + " Please try again by specifying a router master key information."; LOG.warn(message); throw new FederationStateStoreInvalidInputException(message); } }
3.68
framework_VaadinSession_accessSynchronously
/** * Locks this session and runs the provided Runnable right away. * <p> * It is generally recommended to use {@link #access(Runnable)} instead of * this method for accessing a session from a different thread as * {@link #access(Runnable)} can be used while holding the lock of another * session. To avoid causing deadlocks, this methods throws an exception if * it is detected than another session is also locked by the current thread. * </p> * <p> * This method behaves differently than {@link #access(Runnable)} in some * situations: * <ul> * <li>If the current thread is currently holding the lock of this session, * {@link #accessSynchronously(Runnable)} runs the task right away whereas * {@link #access(Runnable)} defers the task to a later point in time.</li> * <li>If some other thread is currently holding the lock for this session, * {@link #accessSynchronously(Runnable)} blocks while waiting for the lock * to be available whereas {@link #access(Runnable)} defers the task to a * later point in time.</li> * </ul> * </p> * * @param runnable * the runnable which accesses the session * * @throws IllegalStateException * if the current thread holds the lock for another session * * @since 7.1 * * @see #lock() * @see #getCurrent() * @see #access(Runnable) * @see UI#accessSynchronously(Runnable) */ public void accessSynchronously(Runnable runnable) { VaadinService.verifyNoOtherSessionLocked(this); Map<Class<?>, CurrentInstance> old = null; lock(); try { old = CurrentInstance.setCurrent(this); runnable.run(); } finally { unlock(); if (old != null) { CurrentInstance.restoreInstances(old); } } }
3.68
hadoop_SubApplicationTableRW_setMetricsTTL
/** * @param metricsTTL time to live parameter for the metricss in this table. * @param hbaseConf configururation in which to set the metrics TTL config * variable. */ public void setMetricsTTL(int metricsTTL, Configuration hbaseConf) { hbaseConf.setInt(METRICS_TTL_CONF_NAME, metricsTTL); }
3.68
framework_GridRowDragger_setDropIndexCalculator
/** * Sets the drop index calculator for the target grid. With this callback * you can have a custom drop location instead of the actual one. * <p> * By default, items are placed on the index they are dropped into in the * target grid. * <p> * If you want to always drop items to the end of the target grid, you can * use {@link DropIndexCalculator#ALWAYS_DROP_TO_END}. * * @param dropIndexCalculator * the drop index calculator */ public void setDropIndexCalculator( DropIndexCalculator<T> dropIndexCalculator) { this.dropTargetIndexCalculator = dropIndexCalculator; }
3.68
flink_DateTimeUtils_formatDate
/** Helper for CAST({date} AS VARCHAR(n)). */ public static String formatDate(int date) { final StringBuilder buf = new StringBuilder(10); formatDate(buf, date); return buf.toString(); }
3.68
pulsar_BrokerService_createPendingLoadTopic
/** * Create pending topic and on completion it picks the next one until processes all topics in * {@link #pendingTopicLoadingQueue}.<br/> * It also tries to acquire {@link #topicLoadRequestSemaphore} so throttle down newly incoming topics and release * permit if it was successful to acquire it. */ private void createPendingLoadTopic() { TopicLoadingContext pendingTopic = pendingTopicLoadingQueue.poll(); if (pendingTopic == null) { return; } final String topic = pendingTopic.getTopic(); checkTopicNsOwnership(topic).thenRun(() -> { CompletableFuture<Optional<Topic>> pendingFuture = pendingTopic.getTopicFuture(); final Semaphore topicLoadSemaphore = topicLoadRequestSemaphore.get(); final boolean acquiredPermit = topicLoadSemaphore.tryAcquire(); checkOwnershipAndCreatePersistentTopic(topic, pendingTopic.isCreateIfMissing(), pendingFuture, pendingTopic.getProperties(), pendingTopic.getTopicPolicies()); pendingFuture.handle((persistentTopic, ex) -> { // release permit and process next pending topic if (acquiredPermit) { topicLoadSemaphore.release(); } createPendingLoadTopic(); return null; }); }).exceptionally(e -> { log.error("Failed to create pending topic {}", topic, e); pendingTopic.getTopicFuture() .completeExceptionally((e instanceof RuntimeException && e.getCause() != null) ? e.getCause() : e); // schedule to process next pending topic inactivityMonitor.schedule(this::createPendingLoadTopic, 100, TimeUnit.MILLISECONDS); return null; }); }
3.68
hbase_BlockingRpcConnection_closeConn
// close socket, reader, and clean up all pending calls. private void closeConn(IOException e) { if (thread == null) { return; } thread.interrupt(); thread = null; closeSocket(); if (callSender != null) { callSender.cleanup(e); } for (Call call : calls.values()) { call.setException(e); } calls.clear(); }
3.68
flink_OuterJoinPaddingUtil_padRight
/** * Returns a padding result with the given right row. * * @param rightRow the right row to pad * @return the reusable null padding result */ public final RowData padRight(RowData rightRow) { return joinedRow.replace(leftNullPaddingRow, rightRow); }
3.68
hadoop_LongBitFormat_combine
/** Combine the value to the record. */ public long combine(long value, long record) { if (value < MIN) { throw new IllegalArgumentException( "Illagal value: " + NAME + " = " + value + " < MIN = " + MIN); } if (value > MAX) { throw new IllegalArgumentException( "Illagal value: " + NAME + " = " + value + " > MAX = " + MAX); } return (record & ~MASK) | (value << OFFSET); }
3.68
hadoop_SelectBinding_select
/** * Build and execute a select request. * @param readContext the read context, which includes the source path. * @param expression the SQL expression. * @param builderOptions query options * @param objectAttributes object attributes from a HEAD request * @return an FSDataInputStream whose wrapped stream is a SelectInputStream * @throws IllegalArgumentException argument failure * @throws IOException failure building, validating or executing the request. * @throws PathIOException source path is a directory. */ @Retries.RetryTranslated public FSDataInputStream select( final S3AReadOpContext readContext, final String expression, final Configuration builderOptions, final S3ObjectAttributes objectAttributes) throws IOException { return new FSDataInputStream( executeSelect(readContext, objectAttributes, builderOptions, buildSelectRequest( readContext.getPath(), expression, builderOptions ))); }
3.68
hbase_HRegion_sawNoSuchFamily
/** * Records that a {@link NoSuchColumnFamilyException} has been observed. */ void sawNoSuchFamily() { wrongFamily = true; }
3.68
hbase_HFileLink_create
/** * Create a new HFileLink * <p> * It also adds a back-reference to the hfile back-reference directory to simplify the * reference-count and the cleaning process. * @param conf {@link Configuration} to read for the archive directory name * @param fs {@link FileSystem} on which to write the HFileLink * @param dstFamilyPath - Destination path (table/region/cf/) * @param dstTableName - Destination table name * @param dstRegionName - Destination region name * @param linkedTable - Linked Table Name * @param linkedRegion - Linked Region Name * @param hfileName - Linked HFile name * @param createBackRef - Whether back reference should be created. Defaults to true. * @return the file link name. * @throws IOException on file or parent directory creation failure */ public static String create(final Configuration conf, final FileSystem fs, final Path dstFamilyPath, final String familyName, final String dstTableName, final String dstRegionName, final TableName linkedTable, final String linkedRegion, final String hfileName, final boolean createBackRef) throws IOException { String name = createHFileLinkName(linkedTable, linkedRegion, hfileName); String refName = createBackReferenceName(dstTableName, dstRegionName); // Make sure the destination directory exists fs.mkdirs(dstFamilyPath); // Make sure the FileLink reference directory exists Path archiveStoreDir = HFileArchiveUtil.getStoreArchivePath(conf, linkedTable, linkedRegion, familyName); Path backRefPath = null; if (createBackRef) { Path backRefssDir = getBackReferencesDir(archiveStoreDir, hfileName); fs.mkdirs(backRefssDir); // Create the reference for the link backRefPath = new Path(backRefssDir, refName); fs.createNewFile(backRefPath); } try { // Create the link if (fs.createNewFile(new Path(dstFamilyPath, name))) { return name; } } catch (IOException e) { LOG.error("couldn't create the link=" + name + " for " + dstFamilyPath, e); // Revert the reference if the link creation failed if (createBackRef) { fs.delete(backRefPath, false); } throw e; } throw new IOException( "File link=" + name + " already exists under " + dstFamilyPath + " folder."); }
3.68
hadoop_SplitCompressionInputStream_getAdjustedEnd
/** * After calling createInputStream, the values of start or end * might change. So this method can be used to get the new value of end. * @return The changed value of end */ public long getAdjustedEnd() { return end; }
3.68
hbase_ThroughputControlUtil_getNameForThrottling
/** * Generate a name for throttling, to prevent name conflict when multiple IO operation running * parallel on the same store. * @param store the Store instance on which IO operation is happening * @param opName Name of the IO operation, e.g. "flush", "compaction", etc. * @return The name for throttling */ public static String getNameForThrottling(HStore store, String opName) { int counter; for (;;) { counter = NAME_COUNTER.get(); int next = counter == Integer.MAX_VALUE ? 0 : counter + 1; if (NAME_COUNTER.compareAndSet(counter, next)) { break; } } return store.getRegionInfo().getEncodedName() + NAME_DELIMITER + store.getColumnFamilyDescriptor().getNameAsString() + NAME_DELIMITER + opName + NAME_DELIMITER + counter; }
3.68
hbase_SnapshotManager_restoreSnapshot
/** * Restore the specified snapshot. The restore will fail if the destination table has a snapshot * or restore in progress. * @param snapshot Snapshot Descriptor * @param tableDescriptor Table Descriptor * @param nonceKey unique identifier to prevent duplicated RPC * @param restoreAcl true to restore acl of snapshot * @return procId the ID of the restore snapshot procedure */ private synchronized long restoreSnapshot(final SnapshotDescription snapshot, final TableDescriptor tableDescriptor, final NonceKey nonceKey, final boolean restoreAcl) throws HBaseSnapshotException { final TableName tableName = tableDescriptor.getTableName(); // make sure we aren't running a snapshot on the same table if (isTableTakingAnySnapshot(tableName)) { throw new RestoreSnapshotException("Snapshot in progress on the restore table=" + tableName); } // make sure we aren't running a restore on the same table if (isRestoringTable(tableName)) { throw new RestoreSnapshotException("Restore already in progress on the table=" + tableName); } try { long procId = master.getMasterProcedureExecutor().submitProcedure( new RestoreSnapshotProcedure(master.getMasterProcedureExecutor().getEnvironment(), tableDescriptor, snapshot, restoreAcl), nonceKey); this.restoreTableToProcIdMap.put(tableName, procId); return procId; } catch (Exception e) { String msg = "Couldn't restore the snapshot=" + ClientSnapshotDescriptionUtils.toString(snapshot) + " on table=" + tableName; LOG.error(msg, e); throw new RestoreSnapshotException(msg, e); } }
3.68
flink_BlobCacheSizeTracker_update
/** * Update the least used index for the BLOBs so that the tracker can easily find out the least * recently used BLOBs. */ public void update(JobID jobId, BlobKey blobKey) { checkNotNull(jobId); checkNotNull(blobKey); synchronized (lock) { caches.get(Tuple2.of(jobId, blobKey)); } }
3.68
hudi_WriteOperationType_value
/** * Getter for value. * @return string form of WriteOperationType */ public String value() { return value; }
3.68
hadoop_DiskBalancerWorkItem_getErrorCount
/** * Returns the number of errors encountered. * * @return long */ public long getErrorCount() { return errorCount; }
3.68
morf_MySqlDialect_getSqlForDateToYyyymmdd
/** * @see org.alfasoftware.morf.jdbc.SqlDialect#getSqlForDateToYyyymmdd(org.alfasoftware.morf.sql.element.Function) */ @Override protected String getSqlForDateToYyyymmdd(Function function) { return String.format("CAST(DATE_FORMAT(%s, '%%Y%%m%%d') AS DECIMAL(8))",getSqlFrom(function.getArguments().get(0))); }
3.68
hadoop_StagingCommitter_failDestinationExists
/** * Generate a {@link PathExistsException} because the destination exists. * Lists some of the child entries first, to help diagnose the problem. * @param path path which exists * @param description description (usually task/job ID) * @return an exception to throw */ protected PathExistsException failDestinationExists(final Path path, final String description) { LOG.error("{}: Failing commit by job {} to write" + " to existing output path {}.", description, getJobContext().getJobID(), path); // List the first 10 descendants, to give some details // on what is wrong but not overload things if there are many files. try { int limit = 10; RemoteIterator<LocatedFileStatus> lf = getDestFS().listFiles(path, true); LOG.info("Partial Directory listing"); while (limit > 0 && lf.hasNext()) { limit--; LocatedFileStatus status = lf.next(); LOG.info("{}: {}", status.getPath(), status.isDirectory() ? " dir" : ("file size " + status.getLen() + " bytes")); } cleanupRemoteIterator(lf); } catch (IOException e) { LOG.info("Discarding exception raised when listing {}: " + e, path); LOG.debug("stack trace ", e); } return new PathExistsException(path.toString(), description + ": " + InternalCommitterConstants.E_DEST_EXISTS); }
3.68
framework_ApplicationConnection_unregisterPaintable
/** * @deprecated As of 7.0. No longer serves any purpose. */ @Deprecated public void unregisterPaintable(ServerConnector p) { getLogger().info("unregisterPaintable (unnecessarily) called for " + Util.getConnectorString(p)); }
3.68
hbase_HRegion_checkTargetRegion
/** * Checks whether the given regionName is either equal to our region, or that the regionName is * the primary region to our corresponding range for the secondary replica. */ private void checkTargetRegion(byte[] encodedRegionName, String exceptionMsg, Object payload) throws WrongRegionException { if (Bytes.equals(this.getRegionInfo().getEncodedNameAsBytes(), encodedRegionName)) { return; } if ( !RegionReplicaUtil.isDefaultReplica(this.getRegionInfo()) && Bytes.equals(encodedRegionName, this.fs.getRegionInfoForFS().getEncodedNameAsBytes()) ) { return; } throw new WrongRegionException( exceptionMsg + payload + " targetted for region " + Bytes.toStringBinary(encodedRegionName) + " does not match this region: " + this.getRegionInfo()); }
3.68
hbase_TableSplit_compareTo
/** * Compares this split against the given one. * @param split The split to compare to. * @return The result of the comparison. * @see java.lang.Comparable#compareTo(java.lang.Object) */ @Override public int compareTo(TableSplit split) { // If The table name of the two splits is the same then compare start row // otherwise compare based on table names int tableNameComparison = getTable().compareTo(split.getTable()); return tableNameComparison != 0 ? tableNameComparison : Bytes.compareTo(getStartRow(), split.getStartRow()); }
3.68
morf_HumanReadableStatementHelper_paren
/** * Wraps a string in parenthesis if the field is considered {@link #isComplexField}. * * @param string the string to process. * @param field the field to evaluate. * @return the original string, or one wrapped in parenthesis. */ private static String paren(final String string, final AliasedField field) { if (isComplexField(field)) { return "(" + string + ")"; } else { return string; } }
3.68
streampipes_ArticleSentencesExtractor_getInstance
/** * Returns the singleton instance for {@link ArticleSentencesExtractor}. */ public static ArticleSentencesExtractor getInstance() { return INSTANCE; }
3.68
hadoop_CommonAuditContext_currentAuditContext
/** * Get the current common audit context. Thread local. * @return the audit context of this thread. */ public static CommonAuditContext currentAuditContext() { return ACTIVE_CONTEXT.get(); }
3.68
hudi_CompactionOperation_convertFromAvroRecordInstance
/** * Convert Avro generated Compaction operation to POJO for Spark RDD operation. * * @param operation Hoodie Compaction Operation * @return */ public static CompactionOperation convertFromAvroRecordInstance(HoodieCompactionOperation operation) { CompactionOperation op = new CompactionOperation(); op.baseInstantTime = operation.getBaseInstantTime(); op.dataFileName = Option.ofNullable(operation.getDataFilePath()); op.dataFileCommitTime = op.dataFileName.map(p -> FSUtils.getCommitTime(new Path(p).getName())); op.deltaFileNames = new ArrayList<>(operation.getDeltaFilePaths()); op.id = new HoodieFileGroupId(operation.getPartitionPath(), operation.getFileId()); op.metrics = operation.getMetrics() == null ? new HashMap<>() : new HashMap<>(operation.getMetrics()); op.bootstrapFilePath = Option.ofNullable(operation.getBootstrapFilePath()); return op; }
3.68
framework_AbstractConnector_getConnection
/* * (non-Javadoc) * * @see com.vaadin.client.VPaintable#getConnection() */ @Override public final ApplicationConnection getConnection() { return connection; }
3.68
streampipes_Networking_getIpAddressForOsx
/** * this method is a workaround for developers using osx * in OSX InetAddress.getLocalHost().getHostAddress() always returns 127.0.0.1 * as a workaround developers must manually set the SP_HOST environment variable with the actual ip * with this method the IP is set automatically * * @return IP */ private static String getIpAddressForOsx() { Socket socket = new Socket(); String result = DEFAULT_LOCALHOST_IP; try { socket.connect(new InetSocketAddress("streampipes.apache.org", 80)); result = socket.getLocalAddress().getHostAddress(); socket.close(); } catch (IOException e) { LOG.error(e.getMessage()); LOG.error("IP address was not set automatically. Use the environment variable SP_HOST to set it manually."); } return result; }
3.68
hbase_Mutation_getCellBuilder
/** * get a CellBuilder instance that already has relevant Type and Row set. * @param cellBuilderType e.g CellBuilderType.SHALLOW_COPY * @param cellType e.g Cell.Type.Put * @return CellBuilder which already has relevant Type and Row set. */ protected CellBuilder getCellBuilder(CellBuilderType cellBuilderType, Cell.Type cellType) { CellBuilder builder = CellBuilderFactory.create(cellBuilderType).setRow(row).setType(cellType); return new CellBuilder() { @Override public CellBuilder setRow(byte[] row) { return this; } @Override public CellBuilder setType(Cell.Type type) { return this; } @Override public CellBuilder setRow(byte[] row, int rOffset, int rLength) { return this; } @Override public CellBuilder setFamily(byte[] family) { builder.setFamily(family); return this; } @Override public CellBuilder setFamily(byte[] family, int fOffset, int fLength) { builder.setFamily(family, fOffset, fLength); return this; } @Override public CellBuilder setQualifier(byte[] qualifier) { builder.setQualifier(qualifier); return this; } @Override public CellBuilder setQualifier(byte[] qualifier, int qOffset, int qLength) { builder.setQualifier(qualifier, qOffset, qLength); return this; } @Override public CellBuilder setTimestamp(long timestamp) { builder.setTimestamp(timestamp); return this; } @Override public CellBuilder setValue(byte[] value) { builder.setValue(value); return this; } @Override public CellBuilder setValue(byte[] value, int vOffset, int vLength) { builder.setValue(value, vOffset, vLength); return this; } @Override public Cell build() { return builder.build(); } @Override public CellBuilder clear() { builder.clear(); // reset the row and type builder.setRow(row); builder.setType(cellType); return this; } }; }
3.68
hadoop_HdfsFileStatus_mtime
/** * Set the modification time of this entity (default = 0). * @param mtime Last modified time * @return This Builder instance */ public Builder mtime(long mtime) { this.mtime = mtime; return this; }
3.68
hbase_Reference_toString
/** * @see java.lang.Object#toString() */ @Override public String toString() { return "" + this.region; }
3.68
hadoop_BlockPoolTokenSecretManager_createIdentifier
/** Return an empty BlockTokenIdentifer */ @Override public BlockTokenIdentifier createIdentifier() { return new BlockTokenIdentifier(); }
3.68
hbase_SnapshotOfRegionAssignmentFromMeta_getRegionNameToRegionInfoMap
/** * Get the regioninfo for a region * @return the regioninfo */ public Map<String, RegionInfo> getRegionNameToRegionInfoMap() { return this.regionNameToRegionInfoMap; }
3.68
hibernate-validator_ValidationXmlParser_parseValidationXml
/** * Tries to check whether a <i>validation.xml</i> file exists and parses it. * * @return The parameters parsed out of <i>validation.xml</i> wrapped in an instance of {@code ConfigurationImpl.ValidationBootstrapParameters}. */ public final BootstrapConfiguration parseValidationXml() { InputStream in = getValidationXmlInputStream(); if ( in == null ) { return BootstrapConfigurationImpl.getDefaultBootstrapConfiguration(); } ClassLoader previousTccl = run( GetClassLoader.fromContext() ); try { run( SetContextClassLoader.action( ValidationXmlParser.class.getClassLoader() ) ); // HV-970 The parser helper is only loaded if there actually is a validation.xml file; // this avoids accessing javax.xml.stream.* (which does not exist on Android) when not actually // working with the XML configuration XmlParserHelper xmlParserHelper = new XmlParserHelper(); // the InputStream supports mark and reset in.mark( Integer.MAX_VALUE ); XMLEventReader xmlEventReader = xmlParserHelper.createXmlEventReader( VALIDATION_XML_FILE, new CloseIgnoringInputStream( in ) ); String schemaVersion = xmlParserHelper.getSchemaVersion( VALIDATION_XML_FILE, xmlEventReader ); xmlEventReader.close(); in.reset(); // The validation is done first as StAX builders used below are assuming that the XML file is correct and don't // do any validation of the input. Schema schema = getSchema( xmlParserHelper, schemaVersion ); Validator validator = schema.newValidator(); validator.validate( new StreamSource( new CloseIgnoringInputStream( in ) ) ); in.reset(); xmlEventReader = xmlParserHelper.createXmlEventReader( VALIDATION_XML_FILE, new CloseIgnoringInputStream( in ) ); ValidationConfigStaxBuilder validationConfigStaxBuilder = new ValidationConfigStaxBuilder( xmlEventReader ); xmlEventReader.close(); in.reset(); return validationConfigStaxBuilder.build(); } catch (XMLStreamException | IOException | SAXException e) { throw LOG.getUnableToParseValidationXmlFileException( VALIDATION_XML_FILE, e ); } finally { run( SetContextClassLoader.action( previousTccl ) ); closeStream( in ); } }
3.68
flink_SubpartitionDiskCacheManager_addBuffer
/** This method is only called by the task thread. */ private void addBuffer(Buffer buffer) { synchronized (allBuffers) { allBuffers.add(new Tuple2<>(buffer, bufferIndex)); } bufferIndex++; }
3.68