name
stringlengths
12
178
code_snippet
stringlengths
8
36.5k
score
float64
3.26
3.68
flink_IntervalJoinOperator_sideOutput
/** Write skipped late arriving element to SideOutput. */ protected <T> void sideOutput(T value, long timestamp, boolean isLeft) { if (isLeft) { if (leftLateDataOutputTag != null) { output.collect(leftLateDataOutputTag, new StreamRecord<>((T1) value, timestamp)); } } else { if (rightLateDataOutputTag != null) { output.collect(rightLateDataOutputTag, new StreamRecord<>((T2) value, timestamp)); } } }
3.68
graphhopper_LandmarkStorage_setLogDetails
/** * By default do not log many details. */ public void setLogDetails(boolean logDetails) { this.logDetails = logDetails; }
3.68
hadoop_DockerCommand_setClientConfigDir
/** * Add the client configuration directory to the docker command. * * The client configuration option proceeds any of the docker subcommands * (such as run, load, pull, etc). Ordering will be handled by * container-executor. Docker expects the value to be a directory containing * the file config.json. This file is typically generated via docker login. * * @param clientConfigDir - directory containing the docker client config. */ public void setClientConfigDir(String clientConfigDir) { if (clientConfigDir != null) { addCommandArguments("docker-config", clientConfigDir); } }
3.68
flink_HistoryServerArchiveFetcher_updateJobOverview
/** * This method replicates the JSON response that would be given by the JobsOverviewHandler when * listing both running and finished jobs. * * <p>Every job archive contains a joboverview.json file containing the same structure. Since * jobs are archived on their own however the list of finished jobs only contains a single job. * * <p>For the display in the HistoryServer WebFrontend we have to combine these overviews. */ private static void updateJobOverview(File webOverviewDir, File webDir) { try (JsonGenerator gen = jacksonFactory.createGenerator( HistoryServer.createOrGetFile(webDir, JobsOverviewHeaders.URL))) { File[] overviews = new File(webOverviewDir.getPath()).listFiles(); if (overviews != null) { Collection<JobDetails> allJobs = new ArrayList<>(overviews.length); for (File overview : overviews) { MultipleJobsDetails subJobs = mapper.readValue(overview, MultipleJobsDetails.class); allJobs.addAll(subJobs.getJobs()); } mapper.writeValue(gen, new MultipleJobsDetails(allJobs)); } } catch (IOException ioe) { LOG.error("Failed to update job overview.", ioe); } }
3.68
framework_ApplicationConnection_getApplicationState
/** * Returns the state of this application. An application state goes from * "initializing" to "running" to "stopped". There is no way for an * application to go back to a previous state, i.e. a stopped application * can never be re-started * * @since 7.6 * @return the current state of this application */ public ApplicationState getApplicationState() { return applicationState; }
3.68
hudi_BufferedRandomAccessFile_loadNewBlockToBuffer
/** * Load a new data block. Returns false, when EOF is reached. * @return - whether new data block was loaded or not * @throws IOException */ private boolean loadNewBlockToBuffer() throws IOException { if (this.isEOF) { return false; } // read next block into buffer this.seek(this.currentPosition); // if currentPosition is at start, EOF has been reached return this.currentPosition != this.validLastPosition; }
3.68
morf_SqlUtils_windowFunction
/** * Encapsulates the generation of an PARTITION BY SQL statement. * * <p>The call structure imitates the end SQL and is structured as follows:</p> * * <blockquote><pre> * SqlUtils.windowFunction([function]) = [function] * |----&gt; .partitionBy([fields]...) = [function] OVER (PARTITION BY [fields]) * |----&gt; .orderBy([fields]...) = [function] OVER (PARTITION BY [fields] ORDER BY [fields]) * |----&gt; .orderBy([fields]...) = [function] OVER (ORDER BY [fields]) * </pre></blockquote> * * Restrictions: * <ul> * <li>partitionBy(..) is optional: If not specified it treats all the rows of the result set as a single group.</li> * <li>orderBy(..) is optional. If not specified the entire partition will be used as the window frame. If specified a range between the first row and the current row of the window is used (i.e. RANGE UNBOUNDED PRECEDING AND CURRENT ROW for Oracle).</li> * <li>The default direction for fields in orderBy(..) is ASC.</li> * </ul> * @author Copyright (c) Alfa Financial Software 2017 * @param function The function * @return The windowing function builder */ public static WindowFunction.Builder windowFunction(Function function) { return WindowFunction.over(function); }
3.68
dubbo_RpcContextAttachment_copyOf
/** * Also see {@link RpcServiceContext#copyOf(boolean)} * * @return a copy of RpcContextAttachment with deep copied attachments */ public RpcContextAttachment copyOf(boolean needCopy) { if (!isValid()) { return null; } if (needCopy) { RpcContextAttachment copy = new RpcContextAttachment(); if (CollectionUtils.isNotEmptyMap(attachments)) { copy.attachments.putAll(this.attachments); } if (asyncContext != null) { copy.asyncContext = this.asyncContext; } return copy; } else { return this; } }
3.68
hbase_HMaster_run
// Main run loop. Calls through to the regionserver run loop AFTER becoming active Master; will // block in here until then. @Override public void run() { try { installShutdownHook(); registerConfigurationObservers(); Threads.setDaemonThreadRunning(new Thread(TraceUtil.tracedRunnable(() -> { try { int infoPort = putUpJettyServer(); startActiveMasterManager(infoPort); } catch (Throwable t) { // Make sure we log the exception. String error = "Failed to become Active Master"; LOG.error(error, t); // Abort should have been called already. if (!isAborted()) { abort(error, t); } } }, "HMaster.becomeActiveMaster")), getName() + ":becomeActiveMaster"); while (!isStopped() && !isAborted()) { sleeper.sleep(); } final Span span = TraceUtil.createSpan("HMaster exiting main loop"); try (Scope ignored = span.makeCurrent()) { stopInfoServer(); closeClusterConnection(); stopServiceThreads(); if (this.rpcServices != null) { this.rpcServices.stop(); } closeZooKeeper(); closeTableDescriptors(); span.setStatus(StatusCode.OK); } finally { span.end(); } } finally { if (this.clusterSchemaService != null) { // If on way out, then we are no longer active master. this.clusterSchemaService.stopAsync(); try { this.clusterSchemaService .awaitTerminated(getConfiguration().getInt(HBASE_MASTER_WAIT_ON_SERVICE_IN_SECONDS, DEFAULT_HBASE_MASTER_WAIT_ON_SERVICE_IN_SECONDS), TimeUnit.SECONDS); } catch (TimeoutException te) { LOG.warn("Failed shutdown of clusterSchemaService", te); } } this.activeMaster = false; } }
3.68
hbase_Random64_seedUniquifier
/** * Copy from {@link Random#seedUniquifier()} */ private static long seedUniquifier() { for (;;) { long current = seedUniquifier.get(); long next = current * 181783497276652981L; if (seedUniquifier.compareAndSet(current, next)) { return next; } } }
3.68
starts_Attribute_isUnknown
/** * Returns <code>true</code> if this type of attribute is unknown. The default * implementation of this method always returns <code>true</code>. * * @return <code>true</code> if this type of attribute is unknown. */ public boolean isUnknown() { return true; }
3.68
hadoop_LongLong_and
/** And operation (&). */ long and(long mask) { return d0 & mask; }
3.68
framework_PushConfiguration_getPushMode
/* * (non-Javadoc) * * @see com.vaadin.ui.PushConfiguration#getPushMode() */ @Override public PushMode getPushMode() { return getState(false).mode; }
3.68
morf_AbstractSqlDialectTest_testInsertFromSelectWithMismatchedFieldsError
/** * Tests that an insert from a select with mis matched fields generates an error. */ @Test public void testInsertFromSelectWithMismatchedFieldsError() { SelectStatement sourceStmt = new SelectStatement(new FieldReference("id"), new FieldReference("version"), new FieldReference(STRING_FIELD)) .from(new TableReference(OTHER_TABLE)); InsertStatement stmt = new InsertStatement().into(new TableReference(TEST_TABLE)) .fields(new FieldReference("id"), new FieldReference("version"), new FieldReference(STRING_FIELD), new FieldReference(INT_FIELD)) .from(sourceStmt); try { testDialect.convertStatementToSQL(stmt, metadata, SqlDialect.IdTable.withDeterministicName(ID_VALUES_TABLE)); fail("Should error due to mismatched field counts"); } catch (IllegalArgumentException e) { // Expected exception } }
3.68
dubbo_NamedThreadFactory_getThreadNum
// for test public AtomicInteger getThreadNum() { return mThreadNum; }
3.68
hadoop_AzureBlobFileSystem_access
/** * Checks if the user can access a path. The mode specifies which access * checks to perform. If the requested permissions are granted, then the * method returns normally. If access is denied, then the method throws an * {@link AccessControlException}. * * @param path Path to check * @param mode type of access to check * @throws AccessControlException if access is denied * @throws java.io.FileNotFoundException if the path does not exist * @throws IOException see specific implementation */ @Override public void access(final Path path, final FsAction mode) throws IOException { LOG.debug("AzureBlobFileSystem.access path : {}, mode : {}", path, mode); Path qualifiedPath = makeQualified(path); try { TracingContext tracingContext = new TracingContext(clientCorrelationId, fileSystemId, FSOperationType.ACCESS, tracingHeaderFormat, listener); this.abfsStore.access(qualifiedPath, mode, tracingContext); } catch (AzureBlobFileSystemException ex) { checkCheckAccessException(path, ex); } }
3.68
framework_Window_hasCloseShortcut
/** * Checks if a close window shortcut key has already been registered. * * @since 7.6 * @param keyCode * the keycode for invoking the shortcut * @param modifiers * the (optional) modifiers for invoking the shortcut. Can be set * to null to be explicit about not having modifiers. * @return true, if an exactly matching shortcut has been registered. */ public boolean hasCloseShortcut(int keyCode, int... modifiers) { for (CloseShortcut shortcut : closeShortcuts) { if (shortcut.equals(keyCode, modifiers)) { return true; } } return false; }
3.68
open-banking-gateway_EncryptionProviderConfig_fintechOnlyEncryptionProvider
/** * Fintech data and consent access encryption. * @param fintechOnlyKeyPairConfig Asymmetric encryption key configuration. * @return Fintech data encryption */ @Bean FintechOnlyEncryptionServiceProvider fintechOnlyEncryptionProvider(FintechOnlyKeyPairConfig fintechOnlyKeyPairConfig) { return new FintechOnlyEncryptionServiceProvider(new CmsEncryptionOper(fintechOnlyKeyPairConfig)); }
3.68
hbase_HStoreFile_closeStoreFile
/** * @param evictOnClose whether to evict blocks belonging to this file */ public synchronized void closeStoreFile(boolean evictOnClose) throws IOException { if (this.initialReader != null) { this.initialReader.close(evictOnClose); this.initialReader = null; } }
3.68
streampipes_TagActionMap_addTagAction
/** * Adds a particular {@link TagAction} for a given tag. If a TagAction already exists for that * tag, a chained action, consisting of the previous and the new {@link TagAction} is created. * * @param tag The tag (will be stored internally 1. as it is, 2. lower-case, 3. upper-case) * @param action The {@link TagAction} */ protected void addTagAction(final String tag, final TagAction action) { TagAction previousAction = get(tag); if (previousAction == null) { setTagAction(tag, action); } else { setTagAction(tag, new CommonTagActions.Chained(previousAction, action)); } }
3.68
morf_SchemaAdapter_viewNames
/** * @see org.alfasoftware.morf.metadata.Schema#viewNames() */ @Override public Collection<String> viewNames() { return delegate.viewNames(); }
3.68
hbase_MonitoredRPCHandlerImpl_getStatus
/** * Gets the status of this handler; if it is currently servicing an RPC, this status will include * the RPC information. * @return a String describing the current status. */ @Override public String getStatus() { if (getState() != State.RUNNING) { return super.getStatus(); } return super.getStatus() + " from " + getClient() + ": " + getRPC(); }
3.68
hadoop_MawoConfiguration_getRpcServerPort
/** * Get MaWo RPC server Port. * @return value of rpc.server.port */ public int getRpcServerPort() { return Integer.parseInt(configsMap.get(RPC_SERVER_PORT)); }
3.68
graphhopper_DistanceCalc3D_calcDist
/** * @param fromHeight in meters above 0 * @param toHeight in meters above 0 */ public double calcDist(double fromLat, double fromLon, double fromHeight, double toLat, double toLon, double toHeight) { double len = super.calcDist(fromLat, fromLon, toLat, toLon); double delta = Math.abs(toHeight - fromHeight); return Math.sqrt(delta * delta + len * len); }
3.68
flink_RateLimiterStrategy_perSecond
/** * Creates a {@code RateLimiterStrategy} that is limiting the number of records per second. * * @param recordsPerSecond The number of records produced per second. The actual number of * produced records is subject to rounding due to dividing the number of produced records * among the parallel instances. */ static RateLimiterStrategy perSecond(double recordsPerSecond) { return parallelism -> new GuavaRateLimiter(recordsPerSecond / parallelism); }
3.68
hudi_HoodiePipeline_pk
/** * Add primary keys. */ public Builder pk(String... pks) { this.pk = String.join(",", pks); return this; }
3.68
flink_CliClient_executeInNonInteractiveMode
/** Opens the non-interactive CLI shell. */ public void executeInNonInteractiveMode(String content) { try { terminal = terminalFactory.get(); executeFile(content, terminal.output(), ExecutionMode.NON_INTERACTIVE_EXECUTION); } finally { closeTerminal(); } }
3.68
hadoop_MoveStep_getIdealStorage
/** * Gets the IdealStorage. * * @return float */ @Override public double getIdealStorage() { return idealStorage; }
3.68
framework_ApplicationConnection_highlightConnector
/** * Sends a request to the server to print details to console that will help * the developer to locate the corresponding server-side connector in the * source code. * * @param serverConnector * @deprecated as of 7.1. Replaced by * {@link UIConnector#showServerDebugInfo(ServerConnector)} */ @Deprecated void highlightConnector(ServerConnector serverConnector) { getUIConnector().showServerDebugInfo(serverConnector); }
3.68
framework_VComboBox_getEmptySelectionCaption
/** * Gets the empty selection caption. * * @since 8.0.7 * @return the empty selection caption */ public String getEmptySelectionCaption() { return emptySelectionCaption; }
3.68
framework_AutoScroller_setScrollArea
/** * Set the auto scroll area height or width depending on the scrolling axis. * This is the amount of pixels from the edge of the grid that the scroll is * triggered. * <p> * Defaults to 100px. * * @param px * the pixel height/width for the auto scroll area depending on * direction */ public void setScrollArea(int px) { scrollAreaPX = px; }
3.68
framework_DateCell_setVerticalSized
/** * @param isVerticalSized * if true, this DateCell is sized with CSS and not via * {@link #setHeightPX(int)} */ public void setVerticalSized(boolean isVerticalSized) { if (isVerticalSized) { addStyleDependentName("Vsized"); // recalc heights&size for events. all other height sizes come // from css startingSlotHeight = slotElements[0].getOffsetHeight(); // Update slotHeight for each DateCellDayEvent child updateEventCellsHeight(); recalculateEventPositions(); if (isToday()) { recalculateTimeBarPosition(); } } else { removeStyleDependentName("Vsized"); } }
3.68
querydsl_GeometryExpressions_translate
/** * Translates the geometry to a new location using the numeric parameters as offsets. * * @param expr geometry * @param deltax x offset * @param deltay y offset * @param deltaz z offset * @param <T> * @return geometry */ public static <T extends Geometry> GeometryExpression<T> translate(Expression<T> expr, float deltax, float deltay, float deltaz) { return geometryOperation(expr.getType(), SpatialOps.TRANSLATE2, expr, ConstantImpl.create(deltax), ConstantImpl.create(deltay), ConstantImpl.create(deltaz)); }
3.68
hadoop_AMRMTokenSecretManager_createIdentifier
/** * Creates an empty TokenId to be used for de-serializing an * {@link AMRMTokenIdentifier} by the RPC layer. */ @Override public AMRMTokenIdentifier createIdentifier() { return new AMRMTokenIdentifier(); }
3.68
framework_VScrollTable_insertRows
/** * Inserts rows as provided in the rowData starting at firstIndex. * * @param rowData * @param firstIndex * @param rows * the number of rows * @return a list of the rows added. */ protected List<VScrollTableRow> insertRows(UIDL rowData, int firstIndex, int rows) { aligns = tHead.getColumnAlignments(); final Iterator<?> it = rowData.iterator(); List<VScrollTableRow> insertedRows = new ArrayList<VScrollTableRow>(); if (firstIndex == lastRendered + 1) { while (it.hasNext()) { final VScrollTableRow row = prepareRow((UIDL) it.next()); addRow(row); insertedRows.add(row); if (postponeSanityCheckForLastRendered) { lastRendered++; } else { setLastRendered(lastRendered + 1); } } fixSpacers(); } else if (firstIndex + rows == firstRendered) { final VScrollTableRow[] rowArray = new VScrollTableRow[rows]; int i = rows; while (it.hasNext()) { i--; rowArray[i] = prepareRow((UIDL) it.next()); } for (i = 0; i < rows; i++) { addRowBeforeFirstRendered(rowArray[i]); insertedRows.add(rowArray[i]); firstRendered--; } } else { // insert in the middle int ix = firstIndex; while (it.hasNext()) { VScrollTableRow row = prepareRow((UIDL) it.next()); insertRowAt(row, ix); insertedRows.add(row); if (postponeSanityCheckForLastRendered) { lastRendered++; } else { setLastRendered(lastRendered + 1); } ix++; } fixSpacers(); } return insertedRows; }
3.68
hbase_ByteBufferIOEngine_shutdown
/** * No operation for the shutdown in the memory IO engine */ @Override public void shutdown() { // Nothing to do. }
3.68
hbase_BlockCacheUtil_validateBlockAddition
/** * Validate that the existing and newBlock are the same without including the nextBlockMetadata, * if not, throw an exception. If they are the same without the nextBlockMetadata, return the * comparison. * @param existing block that is existing in the cache. * @param newBlock block that is trying to be cached. * @param cacheKey the cache key of the blocks. * @return comparison of the existing block to the newBlock. */ public static int validateBlockAddition(Cacheable existing, Cacheable newBlock, BlockCacheKey cacheKey) { int comparison = compareCacheBlock(existing, newBlock, false); if (comparison != 0) { throw new RuntimeException( "Cached block contents differ, which should not have happened." + "cacheKey:" + cacheKey); } if ((existing instanceof HFileBlock) && (newBlock instanceof HFileBlock)) { comparison = ((HFileBlock) existing).getNextBlockOnDiskSize() - ((HFileBlock) newBlock).getNextBlockOnDiskSize(); } return comparison; }
3.68
hbase_RegionNormalizerManager_normalizeRegions
/** * Submit tables for normalization. * @param tables a list of tables to submit. * @param isHighPriority {@code true} when these requested tables should skip to the front of the * queue. * @return {@code true} when work was queued, {@code false} otherwise. */ public boolean normalizeRegions(List<TableName> tables, boolean isHighPriority) { if (workQueue == null) { return false; } if (isHighPriority) { workQueue.putAllFirst(tables); } else { workQueue.putAll(tables); } return true; }
3.68
streampipes_PipelineManager_stopPipeline
/** * Stops all processing elements of the pipeline * * @param pipelineId of pipeline to be stopped * @param forceStop when it is true, the pipeline is stopped, even if not all processing element * containers could be reached * @return pipeline status of the start operation */ public static PipelineOperationStatus stopPipeline(String pipelineId, boolean forceStop) { Pipeline pipeline = getPipeline(pipelineId); return Operations.stopPipeline(pipeline, forceStop); }
3.68
hbase_HFileBlock_overwriteHeader
/** * Rewinds {@code buf} and writes first 4 header fields. {@code buf} position is modified as * side-effect. */ private void overwriteHeader() { bufWithoutChecksum.rewind(); blockType.write(bufWithoutChecksum); bufWithoutChecksum.putInt(onDiskSizeWithoutHeader); bufWithoutChecksum.putInt(uncompressedSizeWithoutHeader); bufWithoutChecksum.putLong(prevBlockOffset); if (this.fileContext.isUseHBaseChecksum()) { bufWithoutChecksum.put(fileContext.getChecksumType().getCode()); bufWithoutChecksum.putInt(fileContext.getBytesPerChecksum()); bufWithoutChecksum.putInt(onDiskDataSizeWithHeader); } }
3.68
querydsl_AbstractJDOQuery_close
/** * Close the query and related resources */ @Override public void close() { for (Query query : queries) { query.closeAll(); } }
3.68
pulsar_CompletableFutureCancellationHandler_setCancelAction
/** * Set the action to run when the future gets cancelled or timeouts. * The cancellation or timeout might be originating from any "upstream" future. * The implementation ensures that the cancel action gets called once. * Handles possible race conditions that might happen when the future gets cancelled * before the cancel action is set to this handler. In this case, the * cancel action gets called when the action is set. * * @param cancelAction the action to run when the the future gets cancelled or timeouts */ public void setCancelAction(Runnable cancelAction) { if (this.cancelAction != null || cancelHandled.get()) { throw new IllegalStateException("cancelAction can only be set once."); } this.cancelAction = Objects.requireNonNull(cancelAction); // handle race condition in the case that the future was already cancelled when the handler is set runCancelActionOnceIfCancelled(); }
3.68
dubbo_NacosRegistry_isConformRules
/** * Verify whether it is a dubbo service * * @param serviceName * @return * @since 2.7.12 */ private boolean isConformRules(String serviceName) { return serviceName.split(NAME_SEPARATOR, -1).length == 4; }
3.68
streampipes_DataProcessorApi_subscribe
/** * Subscribe to the input stream of the sink * * @param processor The data processor to subscribe to * @param index The index of the input stream * @param brokerConfigOverride Additional kafka settings which will override the default value (see docs) * @param callback The callback where events will be received */ @Override public ISubscription subscribe(DataProcessorInvocation processor, InputStreamIndex index, IBrokerConfigOverride brokerConfigOverride, EventProcessor callback) { return new SubscriptionManager( brokerConfigOverride, processor.getInputStreams().get(index.toIndex()).getEventGrounding(), callback) .subscribe(); }
3.68
open-banking-gateway_FintechSecureStorage_fintechOnlyPrvKeyToPrivate
/** * Register Fintech private key in FinTechs' private Datasafe storage * @param id Key ID * @param key Key to store * @param fintech Owner of the key * @param password Keystore/Datasafe protection password */ @SneakyThrows public void fintechOnlyPrvKeyToPrivate(UUID id, PubAndPrivKey key, Fintech fintech, Supplier<char[]> password) { try (OutputStream os = datasafeServices.privateService().write( WriteRequest.forPrivate( fintech.getUserIdAuth(password), FINTECH_ONLY_KEYS_ID, new FintechOnlyPrvKeyTuple(fintech.getId(), id).toDatasafePathWithoutParent())) ) { serde.writeKey(key.getPublicKey(), key.getPrivateKey(), os); } }
3.68
hudi_HoodieIndex_tagLocation
/** * Looks up the index and tags each incoming record with a location of a file that contains the row (if it is actually * present). */ @Deprecated @PublicAPIMethod(maturity = ApiMaturityLevel.DEPRECATED) public I tagLocation(I records, HoodieEngineContext context, HoodieTable hoodieTable) throws HoodieIndexException { throw new HoodieNotSupportedException("Deprecated API should not be called"); }
3.68
flink_CompensatedSum_add
/** * Increments the Kahan sum by adding two sums, and updating the correction term for reducing * numeric errors. */ public CompensatedSum add(CompensatedSum other) { double correctedSum = other.value() + (delta + other.delta()); double updatedValue = value + correctedSum; double updatedDelta = correctedSum - (updatedValue - value); return new CompensatedSum(updatedValue, updatedDelta); }
3.68
querydsl_NumberExpression_between
/** * Create a {@code this between from and to} expression * * <p>Is equivalent to {@code from <= this <= to}</p> * * @param <A> * @param from inclusive start of range * @param to inclusive end of range * @return this between from and to */ public final <A extends Number & Comparable<?>> BooleanExpression between(@Nullable Expression<A> from, @Nullable Expression<A> to) { if (from == null) { if (to != null) { return Expressions.booleanOperation(Ops.LOE, mixin, to); } else { throw new IllegalArgumentException("Either from or to needs to be non-null"); } } else if (to == null) { return Expressions.booleanOperation(Ops.GOE, mixin, from); } else { return Expressions.booleanOperation(Ops.BETWEEN, mixin, from, to); } }
3.68
framework_VaadinService_destroy
/** * Called when the servlet, portlet or similar for this service is being * destroyed. After this method has been called, no more requests will be * handled by this service. * * @see #addServiceDestroyListener(ServiceDestroyListener) * @see Servlet#destroy() * @see Portlet#destroy() * * @since 7.2 */ public void destroy() { ServiceDestroyEvent event = new ServiceDestroyEvent(this); serviceDestroyListeners .forEach(listener -> listener.serviceDestroy(event)); }
3.68
flink_SourceOutputWithWatermarks_createWithSeparateOutputs
/** * Creates a new SourceOutputWithWatermarks that emits records to the given DataOutput and * watermarks to the different WatermarkOutputs. */ public static <E> SourceOutputWithWatermarks<E> createWithSeparateOutputs( PushingAsyncDataInput.DataOutput<E> recordsOutput, WatermarkOutput onEventWatermarkOutput, WatermarkOutput periodicWatermarkOutput, TimestampAssigner<E> timestampAssigner, WatermarkGenerator<E> watermarkGenerator) { return new SourceOutputWithWatermarks<>( recordsOutput, onEventWatermarkOutput, periodicWatermarkOutput, timestampAssigner, watermarkGenerator); }
3.68
framework_StaticSection_addRowAt
/** * Adds a new row at the given index. * * @param index * the index of the new row * @return the added row * @throws IndexOutOfBoundsException * if {@code index < 0 || index > getRowCount()} */ public ROW addRowAt(int index) { ROW row = createRow(); rows.add(index, row); getState(true).rows.add(index, row.getRowState()); getGrid().getColumns().stream().forEach(row::addCell); return row; }
3.68
hbase_RegionReplicationSink_waitUntilStopped
/** * Make sure that we have finished all the replicating requests. * <p/> * After returning, we can make sure there will be no new replicating requests to secondary * replicas. * <p/> * This is used to keep the replicating order the same with the WAL edit order when writing. */ public void waitUntilStopped() throws InterruptedException { synchronized (entries) { while (!stopped) { entries.wait(); } } }
3.68
morf_ArchiveDataSetReader_open
/** * @see org.alfasoftware.morf.xml.XmlStreamProvider#open() */ @Override public void open() { super.open(); if (zipFile != null) { throw new IllegalStateException("Archive data set instance for [" + file + "] already open"); } try { zipFile = new ZipFile(file); } catch (IOException e) { throw new RuntimeException("Error opening zip archive [" + file + "]", e); } ArrayList<? extends ZipEntry> list = Collections.list(zipFile.entries()); if (list.isEmpty()) { throw new IllegalArgumentException("Archive file [" + file + "] is empty"); } boolean tableAdded = false; // add all the table names for (ZipEntry entry : Collections.list(zipFile.entries())) { Matcher matcher = filenamePattern.matcher(entry.getName()); if (matcher.matches()) { addTableName(matcher.group(1), entry.getName()); tableAdded = true; } } if (!tableAdded) { throw new IllegalArgumentException("Archive file [" + file + "] contains no tables in root directory"); } }
3.68
flink_NFACompiler_getInnerIgnoreCondition
/** * @return The {@link IterativeCondition condition} for the {@code IGNORE} edge that * corresponds to the specified {@link Pattern} and extended with stop(until) condition * if necessary. It is applicable only for inner states of a complex state like looping * or times. */ @SuppressWarnings("unchecked") private IterativeCondition<T> getInnerIgnoreCondition(Pattern<T, ?> pattern) { Quantifier.ConsumingStrategy consumingStrategy = pattern.getQuantifier().getInnerConsumingStrategy(); if (headOfGroup(pattern)) { // for the head pattern of a group pattern, we should consider the // inner consume strategy of the group pattern consumingStrategy = currentGroupPattern.getQuantifier().getInnerConsumingStrategy(); } IterativeCondition<T> innerIgnoreCondition = null; switch (consumingStrategy) { case STRICT: innerIgnoreCondition = null; break; case SKIP_TILL_NEXT: innerIgnoreCondition = new RichNotCondition<>((IterativeCondition<T>) pattern.getCondition()); break; case SKIP_TILL_ANY: innerIgnoreCondition = BooleanConditions.trueFunction(); break; } if (currentGroupPattern != null && currentGroupPattern.getUntilCondition() != null) { innerIgnoreCondition = extendWithUntilCondition( innerIgnoreCondition, (IterativeCondition<T>) currentGroupPattern.getUntilCondition(), false); } return innerIgnoreCondition; }
3.68
hbase_Bytes_toStringBinary
/** * Write a printable representation of a byte array. Non-printable characters are hex escaped in * the format \\x%02X, eg: \x00 \x05 etc * @param b array to write out * @param off offset to start at * @param len length to write * @return string output */ public static String toStringBinary(final byte[] b, int off, int len) { StringBuilder result = new StringBuilder(); // Just in case we are passed a 'len' that is > buffer length... if (off >= b.length) return result.toString(); if (off + len > b.length) len = b.length - off; for (int i = off; i < off + len; ++i) { int ch = b[i] & 0xFF; if (ch >= ' ' && ch <= '~' && ch != '\\') { result.append((char) ch); } else { result.append("\\x"); result.append(HEX_CHARS_UPPER[ch / 0x10]); result.append(HEX_CHARS_UPPER[ch % 0x10]); } } return result.toString(); }
3.68
morf_TestingDatabaseEquivalentStringComparator_compare
/** * @see org.alfasoftware.morf.stringcomparator.DatabaseEquivalentStringComparator#compare(java.lang.Comparable, java.lang.Comparable) */ @SuppressWarnings({ "rawtypes", "unchecked" }) @Override public int compare(Comparable<?> left, Comparable<?> right) { return ((Comparable)left).compareTo(right); }
3.68
flink_SharedBuffer_upsertEntry
/** * Inserts or updates a shareBufferNode in cache. * * @param nodeId id of the event * @param entry SharedBufferNode */ void upsertEntry(NodeId nodeId, Lockable<SharedBufferNode> entry) { this.entryCache.put(nodeId, entry); }
3.68
hbase_BalancerClusterState_getOrComputeWeightedLocality
/** * Returns locality weighted by region size in MB. Will create locality cache if it does not * already exist. */ public double getOrComputeWeightedLocality(int region, int server, BalancerClusterState.LocalityType type) { return getRegionSizeMB(region) * getOrComputeLocality(region, server, type); }
3.68
graphhopper_VectorTile_getTagsList
/** * <pre> * Tags of this feature are encoded as repeated pairs of * integers. * A detailed description of tags is located in sections * 4.2 and 4.4 of the specification * </pre> * * <code>repeated uint32 tags = 2 [packed = true];</code> */ public java.util.List<java.lang.Integer> getTagsList() { return java.util.Collections.unmodifiableList(tags_); }
3.68
framework_ComputedStyle_getBorderWidth
/** * Returns the sum of the left and right border width. * * @since 7.5.3 * @return the sum of the left and right border */ public double getBorderWidth() { double borderWidth = getDoubleProperty("borderLeftWidth"); borderWidth += getDoubleProperty("borderRightWidth"); return borderWidth; }
3.68
flink_BinarySegmentUtils_hash
/** * hash segments to int. * * @param segments Source segments. * @param offset Source segments offset. * @param numBytes the number bytes to hash. */ public static int hash(MemorySegment[] segments, int offset, int numBytes) { if (inFirstSegment(segments, offset, numBytes)) { return MurmurHashUtils.hashBytes(segments[0], offset, numBytes); } else { return hashMultiSeg(segments, offset, numBytes); } }
3.68
zxing_MatrixToImageWriter_writeToFile
/** * @param matrix {@link BitMatrix} to write * @param format image format * @param file file {@link File} to write image to * @param config output configuration * @throws IOException if writes to the file fail * @deprecated use {@link #writeToPath(BitMatrix, String, Path, MatrixToImageConfig)} */ @Deprecated public static void writeToFile(BitMatrix matrix, String format, File file, MatrixToImageConfig config) throws IOException { writeToPath(matrix, format, file.toPath(), config); }
3.68
hbase_MetricsSource_getPeerID
/** * Get the slave peer ID */ public String getPeerID() { return id; }
3.68
hbase_MasterProcedureScheduler_dumpLocks
/** * For debugging. Expensive. */ public String dumpLocks() throws IOException { schedLock(); try { // TODO: Refactor so we stream out locks for case when millions; i.e. take a PrintWriter return this.locking.toString(); } finally { schedUnlock(); } }
3.68
open-banking-gateway_EncryptionWithInitVectorOper_encryption
/** * Encryption cipher * @param keyWithIv Symmetric key and initialization vector * @return Symmetric encryption cipher */ @SneakyThrows public Cipher encryption(SecretKeyWithIv keyWithIv) { Cipher cipher = Cipher.getInstance(encSpec.getCipherAlgo()); cipher.init( Cipher.ENCRYPT_MODE, keyWithIv.getSecretKey(), new IvParameterSpec(keyWithIv.getIv()) ); return cipher; }
3.68
flink_LimitedConnectionsFileSystem_getMaxNumOpenOutputStreams
/** Gets the maximum number of concurrently open output streams. */ public int getMaxNumOpenOutputStreams() { return maxNumOpenOutputStreams; }
3.68
hadoop_JobQueueChangeEvent_getJobId
/** Get the Job ID */ public JobID getJobId() { return JobID.forName(datum.jobid.toString()); }
3.68
hbase_SimpleServerRpcConnection_initByteBuffToReadInto
// It creates the ByteBuff and CallCleanup and assign to Connection instance. private void initByteBuffToReadInto(int length) { this.data = rpcServer.bbAllocator.allocate(length); this.callCleanup = data::release; }
3.68
hadoop_CachedDNSToSwitchMapping_getUncachedHosts
/** * @param names a list of hostnames to probe for being cached * @return the hosts from 'names' that have not been cached previously */ private List<String> getUncachedHosts(List<String> names) { // find out all names without cached resolved location List<String> unCachedHosts = new ArrayList<String>(names.size()); for (String name : names) { if (cache.get(name) == null) { unCachedHosts.add(name); } } return unCachedHosts; }
3.68
flink_DefaultVertexParallelismStore_applyJobResourceRequirements
/** * Create a new {@link VertexParallelismStore} that reflects given {@link * JobResourceRequirements}. * * @param oldVertexParallelismStore old vertex parallelism store that serves as a base for the * new one * @param jobResourceRequirements to apply over the old vertex parallelism store * @return new vertex parallelism store iff it was updated */ public static Optional<VertexParallelismStore> applyJobResourceRequirements( VertexParallelismStore oldVertexParallelismStore, JobResourceRequirements jobResourceRequirements) { final DefaultVertexParallelismStore newVertexParallelismStore = new DefaultVertexParallelismStore(); boolean changed = false; for (final JobVertexID jobVertexId : jobResourceRequirements.getJobVertices()) { final VertexParallelismInformation oldVertexParallelismInfo = oldVertexParallelismStore.getParallelismInfo(jobVertexId); final JobVertexResourceRequirements.Parallelism parallelismSettings = jobResourceRequirements.getParallelism(jobVertexId); final int minParallelism = parallelismSettings.getLowerBound(); final int parallelism = parallelismSettings.getUpperBound(); newVertexParallelismStore.setParallelismInfo( jobVertexId, new DefaultVertexParallelismInfo( minParallelism, parallelism, oldVertexParallelismInfo.getMaxParallelism(), RESCALE_MAX_REJECT)); changed |= oldVertexParallelismInfo.getMinParallelism() != minParallelism || oldVertexParallelismInfo.getParallelism() != parallelism; } return changed ? Optional.of(newVertexParallelismStore) : Optional.empty(); }
3.68
hadoop_UnmanagedApplicationManager_createRMProxy
/** * Returns RM proxy for the specified protocol type. Unit test cases can * override this method and return mock proxy instances. * * @param protocol protocol of the proxy * @param config configuration * @param user ugi for the proxy connection * @param token token for the connection * @param <T> type of the proxy * @return the proxy instance * @throws IOException if fails to create the proxy */ protected <T> T createRMProxy(Class<T> protocol, Configuration config, UserGroupInformation user, Token<AMRMTokenIdentifier> token) throws IOException { return AMRMClientUtils.createRMProxy(config, protocol, user, token); }
3.68
dubbo_DubboBootstrap_application
/** * Set the {@link ApplicationConfig} * * @param applicationConfig the {@link ApplicationConfig} * @return current {@link DubboBootstrap} instance */ public DubboBootstrap application(ApplicationConfig applicationConfig) { applicationConfig.setScopeModel(applicationModel); configManager.setApplication(applicationConfig); return this; } // {@link RegistryConfig}
3.68
rocketmq-connect_MetricUtils_getMeterValue
/** * get meter value * * @param name * @param meter * @return */ public static Double getMeterValue(MetricName name, Meter meter) { if (name.getType().equals(Stat.NoneType.none.name())) { throw new IllegalArgumentException("Meter type configuration error"); } Stat.RateType rateType = Stat.RateType.valueOf(name.getType()); switch (rateType) { case MeanRate: return meter.getMeanRate(); case OneMinuteRate: return meter.getOneMinuteRate(); case FiveMinuteRate: return meter.getFiveMinuteRate(); case FifteenMinuteRate: return meter.getFifteenMinuteRate(); default: return 0.0; } }
3.68
rocketmq-connect_FileSinkConnector_start
/** * Start the component * * @param config component context */ @Override public void start(KeyValue config) { this.config = config; }
3.68
framework_VScrollTable_setText
/** * Sets the text of the footer. * * @param footerText * The text in the footer */ public void setText(String footerText) { if (footerText == null || footerText.equals("")) { footerText = "&nbsp;"; } DOM.setInnerHTML(captionContainer, footerText); }
3.68
hbase_HDFSBlocksDistribution_getHost
/** Returns the host name */ public String getHost() { return host; }
3.68
hudi_HoodieMetaSyncOperations_getLastCommitTimeSynced
/** * Get the timestamp of last sync. */ default Option<String> getLastCommitTimeSynced(String tableName) { return Option.empty(); }
3.68
querydsl_Expressions_dslOperation
/** * Create a new Operation expression * * @param type type of expression * @param operator operator * @param args operation arguments * @return operation expression */ public static <T> DslOperation<T> dslOperation(Class<? extends T> type, Operator operator, Expression<?>... args) { return new DslOperation<T>(type, operator, args); }
3.68
hadoop_ServiceLauncher_isClassnameDefined
/** * Probe for service classname being defined. * @return true if the classname is set */ private boolean isClassnameDefined() { return serviceClassName != null && !serviceClassName.isEmpty(); }
3.68
hbase_ChunkCreator_getJumboChunk
/** * Creates and inits a chunk of a special size, bigger than a regular chunk size. Such a chunk * will never come from pool and will always be on demand allocated. * @return the chunk that was initialized * @param jumboSize the special size to be used */ Chunk getJumboChunk(int jumboSize) { int allocSize = jumboSize + SIZEOF_CHUNK_HEADER; if (allocSize <= this.getChunkSize(ChunkType.DATA_CHUNK)) { LOG.warn("Jumbo chunk size " + jumboSize + " must be more than regular chunk size " + this.getChunkSize(ChunkType.DATA_CHUNK) + ". Converting to regular chunk."); return getChunk(); } // the new chunk is going to hold the jumbo cell data and needs to be referenced by // a strong map. return getChunk(ChunkType.JUMBO_CHUNK, allocSize); }
3.68
flink_CatalogFactory_supportedProperties
/** @deprecated Implement the {@link Factory} based stack instead. */ @Deprecated default List<String> supportedProperties() { // Default implementation for catalogs implementing the new {@link Factory} stack instead. return null; }
3.68
morf_HumanReadableStatementHelper_generateOrderByClause
/** * Generates a string describing a record ordering clause. If there is no ordering clause * then an empty string is returned. */ private static String generateOrderByClause(final List<AliasedField> fields) { if (fields == null || fields.isEmpty()) { return ""; } else { return " ordered by " + generateFieldSymbolStrings(fields); } }
3.68
hbase_AsyncAdmin_compact
/** * Compact a column family within a table. When the returned CompletableFuture is done, it only * means the compact request was sent to HBase and may need some time to finish the compact * operation. Throws {@link org.apache.hadoop.hbase.TableNotFoundException} if table not found. * @param tableName table to compact * @param columnFamily column family within a table. If not present, compact the table's all * column families. */ default CompletableFuture<Void> compact(TableName tableName, byte[] columnFamily) { return compact(tableName, columnFamily, CompactType.NORMAL); }
3.68
hbase_EventHandler_prepare
/** * Event handlers should do all the necessary checks in this method (rather than in the * constructor, or in process()) so that the caller, which is mostly executed in the ipc context * can fail fast. Process is executed async from the client ipc, so this method gives a quick * chance to do some basic checks. Should be called after constructing the EventHandler, and * before process(). * @return the instance of this class * @throws Exception when something goes wrong */ public EventHandler prepare() throws Exception { return this; }
3.68
hbase_MunkresAssignment_findUncoveredZero
/** * Find a zero cost assignment which is not covered. If there are no zero cost assignments which * are uncovered, then null will be returned. * @return pair of row and column indices of an uncovered zero or null */ private Pair<Integer, Integer> findUncoveredZero() { for (int r = 0; r < rows; r++) { if (leastInRow[r] == 0) { return new Pair<>(r, leastInRowIndex[r]); } } return null; }
3.68
framework_AbstractTextField_setValue
/** * Sets the value of this text field. If the new value is not equal to * {@code getValue()}, fires a {@link ValueChangeEvent}. Throws * {@code NullPointerException} if the value is null. * * @param value * the new value, not {@code null} * @throws NullPointerException * if {@code value} is {@code null} */ @Override public void setValue(String value) { Objects.requireNonNull(value, "value cannot be null"); setValue(value, false); }
3.68
pulsar_ThreadRuntime_start
/** * The core logic that initialize the thread container and executes the function. */ @Override public void start() throws Exception { // extract class loader for function ClassLoader functionClassLoader = getFunctionClassLoader(instanceConfig, instanceConfig.getFunctionId(), jarFile, narExtractionDirectory, fnCache, connectorsManager, functionsManager, InstanceUtils.calculateSubjectType(instanceConfig.getFunctionDetails())); ClassLoader transformFunctionClassLoader = transformFunctionFile == null ? null : getFunctionClassLoader( instanceConfig, instanceConfig.getTransformFunctionId(), transformFunctionFile, narExtractionDirectory, fnCache, connectorsManager, functionsManager, Function.FunctionDetails.ComponentType.FUNCTION); // re-initialize JavaInstanceRunnable so that variables in constructor can be re-initialized this.javaInstanceRunnable = new JavaInstanceRunnable( instanceConfig, clientBuilder, pulsarClient, pulsarAdmin, stateStorageImplClass, stateStorageServiceUrl, secretsProvider, collectorRegistry, functionClassLoader, transformFunctionClassLoader); log.info("ThreadContainer starting function with instanceId {} functionId {} namespace {}", instanceConfig.getInstanceId(), instanceConfig.getFunctionId(), instanceConfig.getFunctionDetails().getNamespace()); this.fnThread = new Thread(threadGroup, javaInstanceRunnable, String.format("%s-%s", FunctionCommon.getFullyQualifiedName(instanceConfig.getFunctionDetails()), instanceConfig.getInstanceId())); this.fnThread.setUncaughtExceptionHandler(new Thread.UncaughtExceptionHandler() { @Override public void uncaughtException(Thread t, Throwable e) { log.error("Uncaught exception in thread {}", t, e); } }); this.fnThread.start(); }
3.68
framework_VGridLayout_hiddenEmptyColumn
/** * Checks if it is ok to hide (or ignore) the given column. * * @param columnIndex * the column to check * @return true, if the column should be interpreted as non-existant (hides * extra spacing) */ private boolean hiddenEmptyColumn(int columnIndex) { return hideEmptyRowsAndColumns && !colHasComponentsOrColSpan(columnIndex) && !explicitColRatios.contains(columnIndex); }
3.68
AreaShop_RegionSign_remove
/** * Remove this sign from the region. */ public void remove() { getLocation().getBlock().setType(Material.AIR); signsFeature.getSignsRef().remove(getStringLocation()); SignsFeature.getAllSigns().remove(getStringLocation()); SignsFeature.getSignsByChunk().get(getStringChunk()).remove(this); getRegion().setSetting("general.signs." + key, null); }
3.68
morf_SelectStatementBuilder_useImplicitJoinOrder
/** * If supported by the dialect, hints to the database that joins should be applied in the order * they are written in the SQL statement. * * <p>This is supported to greater or lesser extends on different SQL dialects. For instance, * MySQL has no means to force ordering on anything except inner joins, but we do our best. As * a result, this is not a panacea and may need to be combined with * {@link #useIndex(TableReference, String)} to achieve a consistent effect across * platforms.</p> * * <p>In general, as with all query plan modification, <strong>do not use this unless you know * exactly what you are doing</strong>.</p> * * <p>As for all query plan modification (see also {@link #optimiseForRowCount(int)} * and {@link #useIndex(TableReference, String)}): where supported on the target database, these directives * applied in the SQL in the order they are called on {@link SelectStatement}. This usually * affects their precedence or relative importance, depending on the platform.</p> * * @return this, for method chaining. */ public SelectStatementBuilder useImplicitJoinOrder() { this.hints.add(new UseImplicitJoinOrder()); return this; }
3.68
querydsl_EntityType_getUncapSimpleName
/** * Use {@link #getModifiedSimpleName()} */ @Deprecated public String getUncapSimpleName() { return modifiedSimpleName; }
3.68
flink_GenericDataSinkBase_addInput
/** * Adds to the input the union of the given operators. * * @param inputs The operator(s) to be unioned with the input. * @deprecated This method will be removed in future versions. Use the {@link * org.apache.flink.api.common.operators.Union} operator instead. */ @Deprecated public void addInput(Operator<IN>... inputs) { checkNotNull(inputs, "The input may not be null."); this.input = Operator.createUnionCascade(this.input, inputs); }
3.68
hbase_TableBackupClient_addManifest
/** * Add manifest for the current backup. The manifest is stored within the table backup directory. * @param backupInfo The current backup info * @throws IOException exception */ protected void addManifest(BackupInfo backupInfo, BackupManager backupManager, BackupType type, Configuration conf) throws IOException { // set the overall backup phase : store manifest backupInfo.setPhase(BackupPhase.STORE_MANIFEST); BackupManifest manifest; // Since we have each table's backup in its own directory structure, // we'll store its manifest with the table directory. for (TableName table : backupInfo.getTables()) { manifest = new BackupManifest(backupInfo, table); ArrayList<BackupImage> ancestors = backupManager.getAncestors(backupInfo, table); for (BackupImage image : ancestors) { manifest.addDependentImage(image); } if (type == BackupType.INCREMENTAL) { // We'll store the log timestamps for this table only in its manifest. Map<TableName, Map<String, Long>> tableTimestampMap = new HashMap<>(); tableTimestampMap.put(table, backupInfo.getIncrTimestampMap().get(table)); manifest.setIncrTimestampMap(tableTimestampMap); ArrayList<BackupImage> ancestorss = backupManager.getAncestors(backupInfo); for (BackupImage image : ancestorss) { manifest.addDependentImage(image); } } manifest.store(conf); } // For incremental backup, we store a overall manifest in // <backup-root-dir>/WALs/<backup-id> // This is used when created the next incremental backup if (type == BackupType.INCREMENTAL) { manifest = new BackupManifest(backupInfo); // set the table region server start and end timestamps for incremental backup manifest.setIncrTimestampMap(backupInfo.getIncrTimestampMap()); ArrayList<BackupImage> ancestors = backupManager.getAncestors(backupInfo); for (BackupImage image : ancestors) { manifest.addDependentImage(image); } manifest.store(conf); } }
3.68
hadoop_EditLogOutputStream_shouldForceSync
/** * Implement the policy when to automatically sync the buffered edits log * The buffered edits can be flushed when the buffer becomes full or * a certain period of time is elapsed. * * @return true if the buffered data should be automatically synced to disk */ public boolean shouldForceSync() { return false; }
3.68
rocketmq-connect_JsonConverterConfig_schemasEnabled
/** * Return whether schemas are enabled. * * @return true if enabled, or false otherwise */ public boolean schemasEnabled() { return schemasEnabled; }
3.68
flink_StateTtlConfig_setStateVisibility
/** * Sets the state visibility. * * @param stateVisibility The state visibility configures whether expired user value can be * returned or not. */ @Nonnull public Builder setStateVisibility(@Nonnull StateVisibility stateVisibility) { this.stateVisibility = stateVisibility; return this; }
3.68
dubbo_AbstractH2TransportListener_headersToMap
/** * Parse metadata to a KV pairs map. * * @param trailers the metadata from remote * @return KV pairs map */ protected Map<String, Object> headersToMap(Http2Headers trailers, Supplier<Object> convertUpperHeaderSupplier) { if (trailers == null) { return Collections.emptyMap(); } Map<String, Object> attachments = new HashMap<>(trailers.size()); for (Map.Entry<CharSequence, CharSequence> header : trailers) { String key = header.getKey().toString(); if (key.endsWith(TripleConstant.HEADER_BIN_SUFFIX) && key.length() > TripleConstant.HEADER_BIN_SUFFIX.length()) { try { String realKey = key.substring(0, key.length() - TripleConstant.HEADER_BIN_SUFFIX.length()); byte[] value = StreamUtils.decodeASCIIByte(header.getValue()); attachments.put(realKey, value); } catch (Exception e) { LOGGER.error(PROTOCOL_FAILED_PARSE, "", "", "Failed to parse response attachment key=" + key, e); } } else { attachments.put(key, header.getValue().toString()); } } // try converting upper key Object obj = convertUpperHeaderSupplier.get(); if (obj == null) { return attachments; } if (obj instanceof String) { String json = TriRpcStatus.decodeMessage((String) obj); Map<String, String> map = JsonUtils.toJavaObject(json, Map.class); for (Map.Entry<String, String> entry : map.entrySet()) { Object val = attachments.remove(entry.getKey()); if (val != null) { attachments.put(entry.getValue(), val); } } } else { // If convertUpperHeaderSupplier does not return String, just fail... // Internal invocation, use INTERNAL_ERROR instead. LOGGER.error( INTERNAL_ERROR, "wrong internal invocation", "", "Triple convertNoLowerCaseHeader error, obj is not String"); } return attachments; }
3.68
framework_Slot_getVerticalSpacing
/** * Get the vertical amount in pixels of the spacing. * * @return the height of the spacing element or zero if this slot doesn't * have spacing */ protected int getVerticalSpacing() { if (spacer == null) { return 0; } else if (layout.getLayoutManager() != null) { return layout.getLayoutManager().getOuterHeight(spacer); } return spacer.getOffsetHeight(); }
3.68
flink_FlinkRelMdCollation_table
/** Helper method to determine a {@link org.apache.calcite.rel.core.TableScan}'s collation. */ public static List<RelCollation> table(RelOptTable table) { // Behavior change since CALCITE-4215: the default collations is null. // In Flink, the default is an empty list. List<RelCollation> collations = table.getCollationList(); return collations == null ? Collections.emptyList() : collations; }
3.68
flink_ShortValue_setValue
/** * Sets the encapsulated short to the specified value. * * @param value the new value of the encapsulated short. */ public void setValue(short value) { this.value = value; }
3.68