name
stringlengths
12
178
code_snippet
stringlengths
8
36.5k
score
float64
3.26
3.68
querydsl_MySQLQueryFactory_insertOnDuplicateKeyUpdate
/** * Create a INSERT ... ON DUPLICATE KEY UPDATE clause * * @param entity table to insert to * @param clauses clauses * @return insert clause */ public SQLInsertClause insertOnDuplicateKeyUpdate(RelationalPath<?> entity, Expression<?>... clauses) { SQLInsertClause insert = insert(entity); StringBuilder flag = new StringBuilder(" on duplicate key update "); for (int i = 0; i < clauses.length; i++) { flag.append(i > 0 ? ", " : "").append("{").append(i).append("}"); } insert.addFlag(Position.END, ExpressionUtils.template(String.class, flag.toString(), clauses)); return insert; }
3.68
hadoop_TFile_getKeyNear
/** * Get a sample key that is within a block whose starting offset is greater * than or equal to the specified offset. * * @param offset * The file offset. * @return the key that fits the requirement; or null if no such key exists * (which could happen if the offset is close to the end of the * TFile). * @throws IOException raised on errors performing I/O. */ public RawComparable getKeyNear(long offset) throws IOException { int blockIndex = readerBCF.getBlockIndexNear(offset); if (blockIndex == -1) return null; checkTFileDataIndex(); return new ByteArray(tfileIndex.getEntry(blockIndex).key); }
3.68
morf_TempTransitionalBuilderWrapper_wrapper
/** * Temporary builder wrapper. This is to ensure other methods can return builders rather than instances, until * * * @param instance the instance to wrap. * @return a builder that wraps the given instance. * @param <T> the type of the instance being wrapped */ public static <T> Builder<T> wrapper(final T instance){ return new BuilderWrapper<>(instance); }
3.68
hadoop_IOStatisticsSupport_stubDurationTracker
/** * Get a stub duration tracker. * @return a stub tracker. */ public static DurationTracker stubDurationTracker() { return StubDurationTracker.STUB_DURATION_TRACKER; }
3.68
flink_InputSelection_areAllInputsSelected
/** * Tests if all inputs are selected. * * @return {@code true} if the input mask equals -1, {@code false} otherwise. */ public boolean areAllInputsSelected() { return inputMask == -1L; }
3.68
framework_VSlider_setMaxValue
/** * Sets the maximum value for slider. * * @param value * the maximum value to use */ public void setMaxValue(double value) { max = value; }
3.68
flink_InMemoryPartition_overwriteRecordAt
/** * UNSAFE!! overwrites record causes inconsistency or data loss for overwriting everything but * records of the exact same size * * @param pointer pointer to start of record * @param record record to overwrite old one with * @throws IOException * @deprecated Don't use this, overwrites record and causes inconsistency or data loss for * overwriting everything but records of the exact same size */ @Deprecated public void overwriteRecordAt(long pointer, T record) throws IOException { long tmpPointer = this.writeView.getCurrentPointer(); this.writeView.resetTo(pointer); this.serializer.serialize(record, this.writeView); this.writeView.resetTo(tmpPointer); }
3.68
flink_CliFrontend_disposeSavepoint
/** Sends a SavepointDisposalRequest to the job manager. */ private void disposeSavepoint( ClusterClient<?> clusterClient, String savepointPath, Duration clientTimeout) throws FlinkException { checkNotNull( savepointPath, "Missing required argument: savepoint path. " + "Usage: bin/flink savepoint -d <savepoint-path>"); logAndSysout("Disposing savepoint '" + savepointPath + "'."); final CompletableFuture<Acknowledge> disposeFuture = clusterClient.disposeSavepoint(savepointPath); logAndSysout("Waiting for response..."); try { disposeFuture.get(clientTimeout.toMillis(), TimeUnit.MILLISECONDS); } catch (Exception e) { throw new FlinkException("Disposing the savepoint '" + savepointPath + "' failed.", e); } logAndSysout("Savepoint '" + savepointPath + "' disposed."); }
3.68
hadoop_LoggingAuditor_getLastHeader
/** * Get the last header used. * @return the last referrer header generated. */ public String getLastHeader() { return lastHeader; }
3.68
hbase_CompactingMemStore_setIndexType
// setter is used only for testability void setIndexType(IndexType type) { indexType = type; // Because this functionality is for testing only and tests are setting in-memory flush size // according to their need, there is no setting of in-memory flush size, here. // If it is needed, please change in-memory flush size explicitly }
3.68
morf_SqlDialect_tableNameWithSchemaName
/** * @param tableRef The table for which the schema name will be retrieved * @return full table name that includes a schema name and DB-link if present */ protected String tableNameWithSchemaName(TableReference tableRef) { if (StringUtils.isEmpty(tableRef.getSchemaName())) { return schemaNamePrefix() + tableRef.getName(); } else { return tableRef.getSchemaName().toUpperCase() + "." + tableRef.getName(); } }
3.68
graphhopper_PathSimplification_updateInterval
/** * @param p point index * @param s partition index */ private boolean updateInterval(int p, int s) { boolean nextIntervalHasOnlyOnePoint = false; // update interval boundaries final int updatedStart = currIntervalStart[s] - removedPointsInPrevIntervals[s]; final int updatedEnd = currIntervalEnd[s] - removedPointsInPrevIntervals[s] - removedPointsInCurrInterval[s]; this.partitions.get(s).setInterval(currIntervalIndex[s], updatedStart, updatedEnd); // update the removed point counters removedPointsInPrevIntervals[s] += removedPointsInCurrInterval[s]; removedPointsInCurrInterval[s] = 0; // prepare for the next interval currIntervalIndex[s]++; currIntervalStart[s] = p; if (currIntervalIndex[s] >= this.partitions.get(s).size()) { partitionFinished[s] = true; } else { int length = this.partitions.get(s).getIntervalLength(currIntervalIndex[s]); currIntervalEnd[s] += length; // special case at via points etc. if (length == 0) { nextIntervalHasOnlyOnePoint = true; } } return nextIntervalHasOnlyOnePoint; }
3.68
flink_AbstractStreamOperator_getPartitionedState
/** * Creates a partitioned state handle, using the state backend configured for this task. * * @throws IllegalStateException Thrown, if the key/value state was already initialized. * @throws Exception Thrown, if the state backend cannot create the key/value state. */ protected <S extends State, N> S getPartitionedState( N namespace, TypeSerializer<N> namespaceSerializer, StateDescriptor<S, ?> stateDescriptor) throws Exception { return stateHandler.getPartitionedState(namespace, namespaceSerializer, stateDescriptor); }
3.68
hadoop_ExitStatus_getExitCode
/** @return the command line exit code. */ public int getExitCode() { return code; }
3.68
querydsl_JTSGeometryExpressions_ymax
/** * Returns Y maxima of a bounding box 2d or 3d or a geometry. * * @param expr geometry * @return y maxima */ public static NumberExpression<Double> ymax(JTSGeometryExpression<?> expr) { return Expressions.numberOperation(Double.class, SpatialOps.YMAX, expr); }
3.68
hudi_StreamWriteFunction_writeBuffer
/** * Prepare the write data buffer: patch up all the records with correct partition path. */ public List<HoodieRecord> writeBuffer() { // rewrite all the records with new record key return records.stream() .map(record -> record.toHoodieRecord(partitionPath)) .collect(Collectors.toList()); }
3.68
dubbo_PojoUtils_getGenericClassByIndex
/** * Get parameterized type * * @param genericType generic type * @param index index of the target parameterized type * @return Return Person.class for List<Person>, return Person.class for Map<String, Person> when index=0 */ private static Type getGenericClassByIndex(Type genericType, int index) { Type clazz = null; // find parameterized type if (genericType instanceof ParameterizedType) { ParameterizedType t = (ParameterizedType) genericType; Type[] types = t.getActualTypeArguments(); clazz = types[index]; } return clazz; }
3.68
hibernate-validator_ValidatorFactoryConfigurationHelper_run
/** * Runs the given privileged action, using a privileged block if required. * <p> * <b>NOTE:</b> This must never be changed into a publicly available method to avoid execution of arbitrary * privileged actions within HV's protection domain. */ @IgnoreForbiddenApisErrors(reason = "SecurityManager is deprecated in JDK17") private static <T> T run(PrivilegedAction<T> action) { return System.getSecurityManager() != null ? AccessController.doPrivileged( action ) : action.run(); }
3.68
hbase_HRegionLocation_getRegion
/** Returns regionInfo */ public RegionInfo getRegion() { return regionInfo; }
3.68
hbase_Bytes_head
/** * Make a new byte array from a subset of bytes at the head of another. * @param a array * @param length amount of bytes to grab * @return First <code>length</code> bytes from <code>a</code> */ public static byte[] head(final byte[] a, final int length) { if (a.length < length) { return null; } byte[] result = new byte[length]; System.arraycopy(a, 0, result, 0, length); return result; }
3.68
hadoop_FlowActivityDocument_merge
/** * Merge the {@link FlowActivityDocument} that is passed with the current * document for upsert. * * @param flowActivityDocument * that has to be merged */ @Override public void merge(FlowActivityDocument flowActivityDocument) { if (flowActivityDocument.getDayTimestamp() > 0) { this.dayTimestamp = flowActivityDocument.getDayTimestamp(); } this.flowName = flowActivityDocument.getFlowName(); this.user = flowActivityDocument.getUser(); this.id = flowActivityDocument.getId(); this.flowActivities.addAll(flowActivityDocument.getFlowActivities()); }
3.68
hbase_DynamicMetricsRegistry_newRate
/** * Create a mutable rate metric (for throughput measurement) * @param name of the metric * @param desc description * @param extended produce extended stat (stdev/min/max etc.) if true * @return a new mutable rate metric object */ public MutableRate newRate(String name, String desc, boolean extended) { return newRate(name, desc, extended, true); }
3.68
flink_NumericColumnSummary_getNonMissingCount
/** The number of values that are not null, NaN, or Infinity. */ public long getNonMissingCount() { return nonMissingCount; }
3.68
framework_VAbsoluteLayout_getWidget
/* * (non-Javadoc) * * @see com.google.gwt.user.client.ui.ComplexPanel#getWidget(int) */ @Override public Widget getWidget(int index) { for (int i = 0, j = 0; i < super.getWidgetCount(); i++) { Widget w = super.getWidget(i); if (w instanceof AbsoluteWrapper) { if (j == index) { return w; } else { j++; } } } return null; }
3.68
hmily_SpringCloudHmilyAccountApplication_main
/** * The entry point of application. * * @param args the input arguments */ public static void main(final String[] args) { SpringApplication.run(SpringCloudHmilyAccountApplication.class, args); }
3.68
dubbo_ReferenceConfigBase_getServiceInterfaceClass
/** * Get service interface class of this reference. * The actual service type of remote provider. * * @return */ public Class<?> getServiceInterfaceClass() { Class<?> actualInterface = interfaceClass; if (interfaceClass == GenericService.class) { try { if (getInterfaceClassLoader() != null) { actualInterface = Class.forName(interfaceName, false, getInterfaceClassLoader()); } else { actualInterface = Class.forName(interfaceName); } } catch (ClassNotFoundException e) { return null; } } return actualInterface; }
3.68
framework_Panel_focus
/** * Moves keyboard focus to the component. {@see Focusable#focus()} * */ @Override public void focus() { super.focus(); }
3.68
hbase_ZKUtil_createNodeIfNotExistsAndWatch
/** * Creates the specified znode to be a persistent node carrying the specified data. Returns true * if the node was successfully created, false if the node already existed. If the node is created * successfully, a watcher is also set on the node. If the node is not created successfully * because it already exists, this method will also set a watcher on the node but return false. If * there is another problem, a KeeperException will be thrown. * @param zkw zk reference * @param znode path of node * @param data data of node * @return true if node created, false if not, watch set in both cases * @throws KeeperException if unexpected zookeeper exception */ public static boolean createNodeIfNotExistsAndWatch(ZKWatcher zkw, String znode, byte[] data) throws KeeperException { boolean ret = true; try { zkw.getRecoverableZooKeeper().create(znode, data, zkw.createACL(znode), CreateMode.PERSISTENT); } catch (KeeperException.NodeExistsException nee) { ret = false; } catch (InterruptedException e) { zkw.interruptedException(e); return false; } try { zkw.getRecoverableZooKeeper().exists(znode, zkw); } catch (InterruptedException e) { zkw.interruptedException(e); return false; } return ret; }
3.68
hadoop_BlockData_getFileSize
/** * Gets the size of the associated file. * @return the size of the associated file. */ public long getFileSize() { return fileSize; }
3.68
morf_AbstractSqlDialectTest_testAlterColumnMakePrimary
/** * Test changing column to be the primary key. */ @Test public void testAlterColumnMakePrimary() { testAlterTableColumn(TEST_TABLE, AlterationType.ALTER, getColumn(TEST_TABLE, DATE_FIELD), column(DATE_FIELD, DataType.DATE).nullable().primaryKey(), expectedAlterColumnMakePrimaryStatements()); }
3.68
hudi_CompletionTimeQueryView_load
/** * This is method to read instant completion time. * This would also update 'startToCompletionInstantTimeMap' map with start time/completion time pairs. * Only instants starts from 'startInstant' (inclusive) are considered. */ private void load() { // load active instants first. this.metaClient.getActiveTimeline() .filterCompletedInstants().getInstantsAsStream() .forEach(instant -> setCompletionTime(instant.getTimestamp(), instant.getCompletionTime())); // then load the archived instants. HoodieArchivedTimeline.loadInstants(metaClient, new HoodieArchivedTimeline.StartTsFilter(this.cursorInstant), HoodieArchivedTimeline.LoadMode.SLIM, r -> true, this::readCompletionTime); }
3.68
hadoop_RenameOperation_convertToIOException
/** * Convert a passed in exception (expected to be an IOE or AWS exception) * into an IOException. * @param ex exception caught * @return the exception to throw in the failure handler. */ protected IOException convertToIOException(final Exception ex) { if (ex instanceof IOException) { return (IOException) ex; } else if (ex instanceof SdkException) { return translateException("rename " + sourcePath + " to " + destPath, sourcePath.toString(), (SdkException) ex); } else { // should never happen, but for completeness return new IOException(ex); } }
3.68
hbase_ForeignExceptionUtil_toStackTrace
/** * Unwind a serialized array of {@link StackTraceElementMessage}s to a {@link StackTraceElement}s. * @param traceList list that was serialized * @return the deserialized list or <tt>null</tt> if it couldn't be unwound (e.g. wasn't set on * the sender). */ public static StackTraceElement[] toStackTrace(List<StackTraceElementMessage> traceList) { if (traceList == null || traceList.isEmpty()) { return new StackTraceElement[0]; // empty array } StackTraceElement[] trace = new StackTraceElement[traceList.size()]; for (int i = 0; i < traceList.size(); i++) { StackTraceElementMessage elem = traceList.get(i); trace[i] = new StackTraceElement(elem.getDeclaringClass(), elem.getMethodName(), elem.hasFileName() ? elem.getFileName() : null, elem.getLineNumber()); } return trace; }
3.68
hadoop_ResourceSkyline_setContainerSpec
/** * Set containerSpec. * * @param containerSpecConfig containerSpec. */ public final void setContainerSpec(final Resource containerSpecConfig) { this.containerSpec = containerSpecConfig; }
3.68
hbase_BalanceResponse_newBuilder
/** * Creates a new {@link BalanceResponse.Builder} */ public static Builder newBuilder() { return new Builder(); }
3.68
flink_CoGroupOperator_getPartitioner
/** * Gets the custom partitioner used by this join, or {@code null}, if none is set. * * @return The custom partitioner used by this join; */ public Partitioner<?> getPartitioner() { return customPartitioner; }
3.68
hadoop_Cluster_getJob
/** * Get job corresponding to jobid. * * @param jobId * @return object of {@link Job} * @throws IOException * @throws InterruptedException */ public Job getJob(JobID jobId) throws IOException, InterruptedException { JobStatus status = client.getJobStatus(jobId); if (status != null) { JobConf conf; try { conf = new JobConf(status.getJobFile()); } catch (RuntimeException ex) { // If job file doesn't exist it means we can't find the job if (ex.getCause() instanceof FileNotFoundException) { return null; } else { throw ex; } } return Job.getInstance(this, status, conf); } return null; }
3.68
flink_ResultPartition_finish
/** * Finishes the result partition. * * <p>After this operation, it is not possible to add further data to the result partition. * * <p>For BLOCKING results, this will trigger the deployment of consuming tasks. */ @Override public void finish() throws IOException { checkInProduceState(); isFinished = true; }
3.68
hadoop_ServiceMetricsSink_putMetrics
/** * Publishes service and component metrics to ATS. */ @Override public void putMetrics(MetricsRecord record) { if (serviceTimelinePublisher.isStopped()) { log.warn("ServiceTimelinePublisher has stopped. " + "Not publishing any more metrics to ATS."); return; } boolean isServiceMetrics = false; boolean isComponentMetrics = false; String appId = null; for (MetricsTag tag : record.tags()) { if (tag.name().equals("type") && tag.value().equals("service")) { isServiceMetrics = true; } else if (tag.name().equals("type") && tag.value().equals("component")) { isComponentMetrics = true; break; // if component metrics, no more information required from tag so // break the loop } else if (tag.name().equals("appId")) { appId = tag.value(); } } if (isServiceMetrics && appId != null) { log.debug("Publishing service metrics. {}", record); serviceTimelinePublisher.publishMetrics(record.metrics(), appId, ServiceTimelineEntityType.SERVICE_ATTEMPT.toString(), record.timestamp()); } else if (isComponentMetrics) { log.debug("Publishing Component metrics. {}", record); serviceTimelinePublisher.publishMetrics(record.metrics(), record.name(), ServiceTimelineEntityType.COMPONENT.toString(), record.timestamp()); } }
3.68
flink_TableDescriptor_schema
/** * Define the schema of the {@link TableDescriptor}. * * <p>The schema is typically required. It is optional only in cases where the schema can be * inferred, e.g. {@link Table#insertInto(TableDescriptor)}. */ public Builder schema(@Nullable Schema schema) { this.schema = schema; return this; }
3.68
hadoop_HttpFSExceptionProvider_toResponse
/** * Maps different exceptions thrown by HttpFSServer to HTTP status codes. * <ul> * <li>SecurityException : HTTP UNAUTHORIZED</li> * <li>FileNotFoundException : HTTP NOT_FOUND</li> * <li>IOException : INTERNAL_HTTP SERVER_ERROR</li> * <li>UnsupporteOperationException : HTTP BAD_REQUEST</li> * <li>all other exceptions : HTTP INTERNAL_SERVER_ERROR </li> * </ul> * * @param throwable exception thrown. * * @return mapped HTTP status code */ @Override public Response toResponse(Throwable throwable) { Response.Status status; if (throwable instanceof FileSystemAccessException) { throwable = throwable.getCause(); } if (throwable instanceof ContainerException) { throwable = throwable.getCause(); } if (throwable instanceof SecurityException) { status = Response.Status.UNAUTHORIZED; } else if (throwable instanceof FileNotFoundException) { status = Response.Status.NOT_FOUND; } else if (throwable instanceof IOException) { status = Response.Status.INTERNAL_SERVER_ERROR; logErrorFully(status, throwable); } else if (throwable instanceof UnsupportedOperationException) { status = Response.Status.BAD_REQUEST; logErrorFully(status, throwable); } else if (throwable instanceof IllegalArgumentException) { status = Response.Status.BAD_REQUEST; logErrorFully(status, throwable); } else { status = Response.Status.INTERNAL_SERVER_ERROR; logErrorFully(status, throwable); } return createResponse(status, throwable); }
3.68
framework_Form_getOwnActionManager
/** * Gets the {@link ActionManager} responsible for handling {@link Action}s * added to this Form.<br/> * Note that Form has another ActionManager inherited from * {@link AbstractField}. The ownActionManager handles Actions attached to * this Form specifically, while the ActionManager in AbstractField * delegates to the containing Window (i.e global Actions). * * @return */ protected ActionManager getOwnActionManager() { if (ownActionManager == null) { ownActionManager = new ActionManager(this); } return ownActionManager; }
3.68
pulsar_ClientCnx_idleCheck
/** * Check client connection is now free. This method will not change the state to idle. * @return true if the connection is eligible. */ public boolean idleCheck() { if (pendingRequests != null && !pendingRequests.isEmpty()) { return false; } if (waitingLookupRequests != null && !waitingLookupRequests.isEmpty()) { return false; } if (!consumers.isEmpty()) { return false; } if (!producers.isEmpty()) { return false; } if (!transactionMetaStoreHandlers.isEmpty()) { return false; } return true; }
3.68
hadoop_RequestFactoryImpl_copyEncryptionParameters
/** * Propagate encryption parameters from source file if set else use the * current filesystem encryption settings. * @param copyObjectRequestBuilder copy object request builder. * @param srcom source object metadata. */ protected void copyEncryptionParameters(HeadObjectResponse srcom, CopyObjectRequest.Builder copyObjectRequestBuilder) { final S3AEncryptionMethods algorithm = getServerSideEncryptionAlgorithm(); String sourceKMSId = srcom.ssekmsKeyId(); if (isNotEmpty(sourceKMSId)) { // source KMS ID is propagated LOG.debug("Propagating SSE-KMS settings from source {}", sourceKMSId); copyObjectRequestBuilder.ssekmsKeyId(sourceKMSId); return; } switch (algorithm) { case SSE_S3: copyObjectRequestBuilder.serverSideEncryption(algorithm.getMethod()); break; case SSE_KMS: copyObjectRequestBuilder.serverSideEncryption(ServerSideEncryption.AWS_KMS); // Set the KMS key if present, else S3 uses AWS managed key. EncryptionSecretOperations.getSSEAwsKMSKey(encryptionSecrets) .ifPresent(copyObjectRequestBuilder::ssekmsKeyId); break; case DSSE_KMS: copyObjectRequestBuilder.serverSideEncryption(ServerSideEncryption.AWS_KMS_DSSE); EncryptionSecretOperations.getSSEAwsKMSKey(encryptionSecrets) .ifPresent(copyObjectRequestBuilder::ssekmsKeyId); break; case SSE_C: EncryptionSecretOperations.getSSECustomerKey(encryptionSecrets) .ifPresent(base64customerKey -> copyObjectRequestBuilder .copySourceSSECustomerAlgorithm(ServerSideEncryption.AES256.name()) .copySourceSSECustomerKey(base64customerKey) .copySourceSSECustomerKeyMD5( Md5Utils.md5AsBase64(Base64.getDecoder().decode(base64customerKey))) .sseCustomerAlgorithm(ServerSideEncryption.AES256.name()) .sseCustomerKey(base64customerKey).sseCustomerKeyMD5( Md5Utils.md5AsBase64(Base64.getDecoder().decode(base64customerKey)))); break; case CSE_KMS: case CSE_CUSTOM: case NONE: break; default: LOG.warn(UNKNOWN_ALGORITHM + ": " + algorithm); } }
3.68
hbase_HRegion_prepareDeleteTimestamps
/** * Set up correct timestamps in the KVs in Delete object. * <p/> * Caller should have the row and region locks. */ private void prepareDeleteTimestamps(Mutation mutation, Map<byte[], List<Cell>> familyMap, byte[] byteNow) throws IOException { for (Map.Entry<byte[], List<Cell>> e : familyMap.entrySet()) { byte[] family = e.getKey(); List<Cell> cells = e.getValue(); assert cells instanceof RandomAccess; Map<byte[], Integer> kvCount = new TreeMap<>(Bytes.BYTES_COMPARATOR); int listSize = cells.size(); for (int i = 0; i < listSize; i++) { Cell cell = cells.get(i); // Check if time is LATEST, change to time of most recent addition if so // This is expensive. if ( cell.getTimestamp() == HConstants.LATEST_TIMESTAMP && PrivateCellUtil.isDeleteType(cell) ) { byte[] qual = CellUtil.cloneQualifier(cell); Integer count = kvCount.get(qual); if (count == null) { kvCount.put(qual, 1); } else { kvCount.put(qual, count + 1); } count = kvCount.get(qual); Get get = new Get(CellUtil.cloneRow(cell)); get.readVersions(count); get.addColumn(family, qual); if (coprocessorHost != null) { if ( !coprocessorHost.prePrepareTimeStampForDeleteVersion(mutation, cell, byteNow, get) ) { updateDeleteLatestVersionTimestamp(cell, get, count, byteNow); } } else { updateDeleteLatestVersionTimestamp(cell, get, count, byteNow); } } else { PrivateCellUtil.updateLatestStamp(cell, byteNow); } } } }
3.68
flink_FlinkPipelineTranslationUtil_getJobGraphUnderUserClassLoader
/** * Transmogrifies the given {@link Pipeline} under the userClassloader to a {@link JobGraph}. */ public static JobGraph getJobGraphUnderUserClassLoader( final ClassLoader userClassloader, final Pipeline pipeline, final Configuration configuration, final int defaultParallelism) { final ClassLoader contextClassLoader = Thread.currentThread().getContextClassLoader(); try { Thread.currentThread().setContextClassLoader(userClassloader); return FlinkPipelineTranslationUtil.getJobGraph( userClassloader, pipeline, configuration, defaultParallelism); } finally { Thread.currentThread().setContextClassLoader(contextClassLoader); } }
3.68
flink_ContextResolvedTable_getTable
/** Returns the original metadata object returned by the catalog. */ @SuppressWarnings("unchecked") public <T extends CatalogBaseTable> T getTable() { return (T) resolvedTable.getOrigin(); }
3.68
hbase_SaslClientAuthenticationProvider_getRealUser
/** * Returns the "real" user, the user who has the credentials being authenticated by the remote * service, in the form of an {@link UserGroupInformation} object. It is common in the Hadoop * "world" to have distinct notions of a "real" user and a "proxy" user. A "real" user is the user * which actually has the credentials (often, a Kerberos ticket), but some code may be running as * some other user who has no credentials. This method gives the authentication provider a chance * to acknowledge this is happening and ensure that any RPCs are executed with the real user's * credentials, because executing them as the proxy user would result in failure because no * credentials exist to authenticate the RPC. Not all implementations will need to implement this * method. By default, the provided User's UGI is returned directly. */ default UserGroupInformation getRealUser(User ugi) { return ugi.getUGI(); }
3.68
pulsar_ConsumerConfiguration_getAckTimeoutRedeliveryBackoff
/** * @return the configured {@link RedeliveryBackoff} for the consumer */ public RedeliveryBackoff getAckTimeoutRedeliveryBackoff() { return conf.getAckTimeoutRedeliveryBackoff(); }
3.68
hadoop_WritableFactories_setFactory
/** * Define a factory for a class. * @param c input c. * @param factory input factory. */ public static void setFactory(Class c, WritableFactory factory) { CLASS_TO_FACTORY.put(c, factory); }
3.68
hbase_MasterObserver_preSwitchExceedThrottleQuota
/** * Called before switching exceed throttle quota state. * @param ctx the coprocessor instance's environment * @param enable the exceed throttle quota value */ default void preSwitchExceedThrottleQuota(final ObserverContext<MasterCoprocessorEnvironment> ctx, final boolean enable) throws IOException { }
3.68
zxing_BitMatrix_rotate90
/** * Modifies this {@code BitMatrix} to represent the same but rotated 90 degrees counterclockwise */ public void rotate90() { int newWidth = height; int newHeight = width; int newRowSize = (newWidth + 31) / 32; int[] newBits = new int[newRowSize * newHeight]; for (int y = 0; y < height; y++) { for (int x = 0; x < width; x++) { int offset = y * rowSize + (x / 32); if (((bits[offset] >>> (x & 0x1f)) & 1) != 0) { int newOffset = (newHeight - 1 - x) * newRowSize + (y / 32); newBits[newOffset] |= 1 << (y & 0x1f); } } } width = newWidth; height = newHeight; rowSize = newRowSize; bits = newBits; }
3.68
hbase_StripeCompactionPolicy_setMajorRange
/** * Sets compaction "major range". Major range is the key range for which all the files are * included, so they can be treated like major-compacted files. * @param startRow Left boundary, inclusive. * @param endRow Right boundary, exclusive. */ public void setMajorRange(byte[] startRow, byte[] endRow) { this.majorRangeFromRow = startRow; this.majorRangeToRow = endRow; }
3.68
hadoop_TimelineReaderWebServicesUtils_getUserName
/** * Get username from caller UGI. * @param callerUGI caller UGI. * @return username. */ static String getUserName(UserGroupInformation callerUGI) { return ((callerUGI != null) ? callerUGI.getUserName().trim() : ""); }
3.68
dubbo_SlidingWindow_getPaneValue
/** * Get statistic value from pane at the specified timestamp. * * @param timeMillis the specified timestamp in milliseconds. * @return the statistic value if pane at the specified timestamp is up-to-date; otherwise null. */ public T getPaneValue(long timeMillis) { if (timeMillis < 0) { return null; } int paneIdx = calculatePaneIdx(timeMillis); Pane<T> pane = referenceArray.get(paneIdx); if (pane == null || !pane.isTimeInWindow(timeMillis)) { return null; } return pane.getValue(); }
3.68
hadoop_MetricsLoggerTask_getFilteredAttributes
/** * Get the list of attributes for the MBean, filtering out a few attribute * types. */ private static Set<String> getFilteredAttributes(MBeanInfo mBeanInfo) { Set<String> attributeNames = new HashSet<>(); for (MBeanAttributeInfo attributeInfo : mBeanInfo.getAttributes()) { if (!attributeInfo.getType().equals( "javax.management.openmbean.TabularData") && !attributeInfo.getType().equals( "javax.management.openmbean.CompositeData") && !attributeInfo.getType().equals( "[Ljavax.management.openmbean.CompositeData;")) { attributeNames.add(attributeInfo.getName()); } } return attributeNames; }
3.68
framework_DragEndEvent_getComponent
/** * Returns the drag source component where the dragend event occurred. * * @return Component which was dragged. */ @Override @SuppressWarnings("unchecked") public T getComponent() { return (T) super.getComponent(); }
3.68
hudi_BaseRollbackActionExecutor_deleteInflightAndRequestedInstant
/** * Delete Inflight instant if enabled. * @param deleteInstant Enable Deletion of Inflight instant * @param activeTimeline Hoodie active timeline * @param instantToBeDeleted Instant to be deleted */ protected void deleteInflightAndRequestedInstant(boolean deleteInstant, HoodieActiveTimeline activeTimeline, HoodieInstant instantToBeDeleted) { // Remove the rolled back inflight commits if (deleteInstant) { LOG.info("Deleting instant=" + instantToBeDeleted); activeTimeline.deletePending(instantToBeDeleted); if (instantToBeDeleted.isInflight() && !table.getMetaClient().getTimelineLayoutVersion().isNullVersion()) { // Delete corresponding requested instant instantToBeDeleted = new HoodieInstant(HoodieInstant.State.REQUESTED, instantToBeDeleted.getAction(), instantToBeDeleted.getTimestamp()); activeTimeline.deletePending(instantToBeDeleted); } LOG.info("Deleted pending commit " + instantToBeDeleted); } else { LOG.warn("Rollback finished without deleting inflight instant file. Instant=" + instantToBeDeleted); } }
3.68
hadoop_StateStoreMetrics_setLocationCache
/** * set the count of the location cache access information. * @param name Name of the record. * @param count count of the record. */ public void setLocationCache(String name, long count) { MutableGaugeLong counter = (MutableGaugeLong) registry.get(name); if (counter == null) { counter = registry.newGauge(name, name, count); } counter.set(count); }
3.68
flink_PushLocalAggIntoScanRuleBase_isInputRefOnly
/** * Currently, we only supports to push down aggregate above calc which has input ref only. * * @param calc BatchPhysicalCalc * @return true if OK to be pushed down */ protected boolean isInputRefOnly(BatchPhysicalCalc calc) { RexProgram program = calc.getProgram(); // check if condition exists. All filters should have been pushed down. if (program.getCondition() != null) { return false; } return !program.getProjectList().isEmpty() && program.getProjectList().stream() .map(calc.getProgram()::expandLocalRef) .allMatch(RexInputRef.class::isInstance); }
3.68
hbase_Cluster_add
/** * Add a node to the cluster * @param name host name * @param port service port */ public Cluster add(String name, int port) { StringBuilder sb = new StringBuilder(); sb.append(name); sb.append(':'); sb.append(port); return add(sb.toString()); }
3.68
flink_Hardware_getSizeOfPhysicalMemory
/** * Returns the size of the physical memory in bytes. * * @return the size of the physical memory in bytes or {@code -1}, if the size could not be * determined. */ public static long getSizeOfPhysicalMemory() { // first try if the JVM can directly tell us what the system memory is // this works only on Oracle JVMs try { Class<?> clazz = Class.forName("com.sun.management.OperatingSystemMXBean"); Method method = clazz.getMethod("getTotalPhysicalMemorySize"); OperatingSystemMXBean operatingSystemMXBean = ManagementFactory.getOperatingSystemMXBean(); // someone may install different beans, so we need to check whether the bean // is in fact the sun management bean if (clazz.isInstance(operatingSystemMXBean)) { return (Long) method.invoke(operatingSystemMXBean); } } catch (ClassNotFoundException e) { // this happens on non-Oracle JVMs, do nothing and use the alternative code paths } catch (NoSuchMethodException | IllegalAccessException | InvocationTargetException e) { LOG.warn( "Access to physical memory size: " + "com.sun.management.OperatingSystemMXBean incompatibly changed.", e); } // we now try the OS specific access paths switch (OperatingSystem.getCurrentOperatingSystem()) { case LINUX: return getSizeOfPhysicalMemoryForLinux(); case WINDOWS: return getSizeOfPhysicalMemoryForWindows(); case MAC_OS: return getSizeOfPhysicalMemoryForMac(); case FREE_BSD: return getSizeOfPhysicalMemoryForFreeBSD(); case UNKNOWN: LOG.error("Cannot determine size of physical memory for unknown operating system"); return -1; default: LOG.error("Unrecognized OS: " + OperatingSystem.getCurrentOperatingSystem()); return -1; } }
3.68
hbase_ServerManager_stop
/** * Stop the ServerManager. */ public void stop() { if (flushedSeqIdFlusher != null) { flushedSeqIdFlusher.shutdown(); } if (persistFlushedSequenceId) { try { persistRegionLastFlushedSequenceIds(); } catch (IOException e) { LOG.warn("Failed to persist last flushed sequence id of regions" + " to file system", e); } } }
3.68
framework_BeanValidationBinder_getRequiredConfigurator
/** * Gets field required indicator configuration logic. * * @see #setRequiredConfigurator(RequiredFieldConfigurator) * * @return required indicator configurator, may be {@code null} */ public RequiredFieldConfigurator getRequiredConfigurator() { return requiredConfigurator; }
3.68
framework_AbsoluteLayout_setPosition
/** * Sets the position of a component in the layout. * * @param component * @param position */ public void setPosition(Component component, ComponentPosition position) { if (!componentToCoordinates.containsKey(component)) { throw new IllegalArgumentException( "Component must be a child of this layout"); } internalSetPosition(component, position); }
3.68
pulsar_OwnershipCache_getOwnerAsync
/** * Method to get the current owner of the <code>ServiceUnit</code>. * * @param suName * name of the <code>ServiceUnit</code> * @return The ephemeral node data showing the current ownership info in <code>ZooKeeper</code> * or empty if no ownership info is found */ public CompletableFuture<Optional<NamespaceEphemeralData>> getOwnerAsync(NamespaceBundle suName) { CompletableFuture<OwnedBundle> ownedBundleFuture = ownedBundlesCache.getIfPresent(suName); if (ownedBundleFuture != null) { // Either we're the owners or we're trying to become the owner. return ownedBundleFuture.thenApply(serviceUnit -> { // We are the owner of the service unit return Optional.of(serviceUnit.isActive() ? selfOwnerInfo : selfOwnerInfoDisabled); }); } // If we're not the owner, we need to check if anybody else is String path = ServiceUnitUtils.path(suName); return lockManager.readLock(path); }
3.68
morf_SqlDialect_replaceTableFromStatements
/** * This method: * - Uses the provided select statement to generate a CTAS statement using a temporary name * - Drops the original table * - Renames the new table using the original table's name * - Adds indexes from the original table * * @param originalTable the original table this method will replace * @param selectStatement the statement used to populate the replacement table via a CTAS * @return a list of statements for the operation */ public List<String> replaceTableFromStatements(Table originalTable, SelectStatement selectStatement) { // Due to morf's oracle table length restrictions, our temporary table name cannot be longer than 27 characters final Table newTable = SchemaUtils.table("tmp_" + StringUtils.substring(originalTable.getName(), 0, 23)) .columns(originalTable.columns()); validateStatement(originalTable, selectStatement); // Generate the SQL for the CTAS and post-CTAS operations final List<String> createTableStatements = Lists.newArrayList(); createTableStatements.addAll(addTableFromStatementsWithCasting(newTable, selectStatement)); createTableStatements.addAll(dropTables(ImmutableList.of(originalTable), false, true)); createTableStatements.addAll(renameTableStatements(newTable, originalTable)); createTableStatements.addAll(createAllIndexStatements(originalTable)); return createTableStatements; }
3.68
flink_Acknowledge_get
/** * Gets the singleton instance. * * @return The singleton instance. */ public static Acknowledge get() { return INSTANCE; }
3.68
framework_Button_isHtmlContentAllowed
/** * Return HTML rendering setting. * * @return <code>true</code> if the caption text is to be rendered as HTML, * <code>false</code> otherwise * * @deprecated as of 8.0.0, use {@link #isCaptionAsHtml()} instead. */ @Deprecated public boolean isHtmlContentAllowed() { return getState(false).captionAsHtml; }
3.68
flink_HiveParserASTNode_setUnknownTokenBoundaries
/** * For every node in this subtree, make sure it's start/stop token's are set. Walk depth first, * visit bottom up. Only updates nodes with at least one token index < 0. * * <p>In contrast to the method in the parent class, this method is iterative. */ @Override public void setUnknownTokenBoundaries() { Deque<HiveParserASTNode> stack1 = new ArrayDeque<HiveParserASTNode>(); Deque<HiveParserASTNode> stack2 = new ArrayDeque<HiveParserASTNode>(); stack1.push(this); while (!stack1.isEmpty()) { HiveParserASTNode next = stack1.pop(); stack2.push(next); if (next.children != null) { for (int i = next.children.size() - 1; i >= 0; i--) { stack1.push((HiveParserASTNode) next.children.get(i)); } } } while (!stack2.isEmpty()) { HiveParserASTNode next = stack2.pop(); if (next.children == null) { if (next.startIndex < 0 || next.stopIndex < 0) { next.startIndex = next.stopIndex = next.token.getTokenIndex(); } } else if (next.startIndex >= 0 && next.stopIndex >= 0) { continue; } else if (next.children.size() > 0) { HiveParserASTNode firstChild = (HiveParserASTNode) next.children.get(0); HiveParserASTNode lastChild = (HiveParserASTNode) next.children.get(next.children.size() - 1); next.startIndex = firstChild.getTokenStartIndex(); next.stopIndex = lastChild.getTokenStopIndex(); } } }
3.68
hadoop_HttpFSServerWebApp_getAdminGroup
/** * Returns HttpFSServer admin group. * * @return httpfs admin group. */ public String getAdminGroup() { return adminGroup; }
3.68
hbase_RatioBasedCompactionPolicy_setMinThreshold
/** * Overwrite min threshold for compaction */ public void setMinThreshold(int minThreshold) { comConf.setMinFilesToCompact(minThreshold); }
3.68
hbase_ByteBufferUtils_toDouble
/** * Reads a double value at the given buffer's offset. * @param buffer input byte buffer to read * @param offset offset where double is * @return double value at offset */ public static double toDouble(ByteBuffer buffer, int offset) { return Double.longBitsToDouble(toLong(buffer, offset)); }
3.68
hadoop_IdentityMapper_map
/** The identity function. Input key/value pair is written directly to * output.*/ public void map(K key, V val, OutputCollector<K, V> output, Reporter reporter) throws IOException { output.collect(key, val); }
3.68
hudi_HoodieCompactionAdminTool_printOperationResult
/** * Print Operation Result. * * @param initialLine Initial Line * @param result Result */ private <T> void printOperationResult(String initialLine, List<T> result) { System.out.println(initialLine); for (T r : result) { System.out.print(r); } }
3.68
hadoop_WindowsGetSpaceUsed_refresh
/** * Override to hook in DUHelper class. */ @Override protected void refresh() { used.set(DUHelper.getFolderUsage(getDirPath())); }
3.68
hbase_BucketCache_disableWriter
// Used for test void disableWriter() { this.writerEnabled = false; }
3.68
graphhopper_PointList_clone
/** * Clones this PointList. If this PointList was immutable, the cloned will be mutable. If this PointList was a * {@link ShallowImmutablePointList}, the cloned PointList will be a regular PointList. */ public PointList clone(boolean reverse) { PointList clonePL = new PointList(size(), is3D()); if (is3D()) for (int i = 0; i < size(); i++) { clonePL.add(this.getLat(i), this.getLon(i), this.getEle(i)); } else for (int i = 0; i < size(); i++) { clonePL.add(this.getLat(i), this.getLon(i)); } if (reverse) clonePL.reverse(); return clonePL; }
3.68
hadoop_TimelineReaderWebServicesUtils_createTimelineEntityFilters
/** * Parse the passed filters represented as strings and convert them into a * {@link TimelineEntityFilters} object. * @param limit Limit to number of entities to return. * @param createdTimeStart Created time start for the entities to return. * @param createdTimeEnd Created time end for the entities to return. * @param relatesTo Entities to return must match relatesTo. * @param isRelatedTo Entities to return must match isRelatedTo. * @param infofilters Entities to return must match these info filters. * @param conffilters Entities to return must match these metric filters. * @param metricfilters Entities to return must match these metric filters. * @param eventfilters Entities to return must match these event filters. * @return a {@link TimelineEntityFilters} object. * @throws TimelineParseException if any problem occurs during parsing. */ static TimelineEntityFilters createTimelineEntityFilters(String limit, Long createdTimeStart, Long createdTimeEnd, String relatesTo, String isRelatedTo, String infofilters, String conffilters, String metricfilters, String eventfilters, String fromid) throws TimelineParseException { return new TimelineEntityFilters.Builder() .entityLimit(parseLongStr(limit)) .createdTimeBegin(createdTimeStart) .createTimeEnd(createdTimeEnd) .relatesTo(parseRelationFilters(relatesTo)) .isRelatedTo(parseRelationFilters(isRelatedTo)) .infoFilters(parseKVFilters(infofilters, false)) .configFilters(parseKVFilters(conffilters, true)) .metricFilters(parseMetricFilters(metricfilters)) .eventFilters(parseEventFilters(eventfilters)) .fromId(parseStr(fromid)).build(); }
3.68
AreaShop_FileManager_saveIsRequiredForRegionWorld
/** * Indicates that a/multiple WorldGuard regions need to be saved. * @param worldName The world where the regions that should be saved is in */ public void saveIsRequiredForRegionWorld(String worldName) { worldRegionsRequireSaving.add(worldName); }
3.68
framework_VFilterSelect_selectPrevItem
/** * Selects the previous item in the filtered selections. */ public void selectPrevItem() { debug("VFS.SP: selectPrevItem()"); final int index = menu.getSelectedIndex() - 1; if (index > -1) { selectItem(menu.getItems().get(index)); } else if (index == -1) { selectPrevPage(); } else { if (!menu.getItems().isEmpty()) { selectLastItem(); } } }
3.68
flink_JavaFieldPredicates_ofType
/** * Match the {@link Class} of the {@link JavaField}. * * @return A {@link DescribedPredicate} returning true, if and only if the tested {@link * JavaField} has the same type of the given {@code clazz}. */ public static DescribedPredicate<JavaField> ofType(String fqClassName) { String className = getClassSimpleNameFromFqName(fqClassName); return DescribedPredicate.describe( "of type " + className, field -> field.getType().getName().equals(fqClassName)); }
3.68
hadoop_MapReduceTrackingUriPlugin_getTrackingUri
/** * Gets the URI to access the given application on MapReduce history server * @param id the ID for which a URI is returned * @return the tracking URI * @throws URISyntaxException */ @Override public URI getTrackingUri(ApplicationId id) throws URISyntaxException { String jobSuffix = id.toString().replaceFirst("^application_", "job_"); String historyServerAddress = MRWebAppUtil.getJHSWebappURLWithScheme(getConf()); return new URI(historyServerAddress + "/jobhistory/job/"+ jobSuffix); }
3.68
flink_ResourceCounter_withResource
/** * Creates a resource counter with the given resourceProfile and its count. * * @param resourceProfile resourceProfile for the given count * @param count count of the given resourceProfile * @return ResourceCounter which contains the specified resourceProfile and its count */ public static ResourceCounter withResource(ResourceProfile resourceProfile, int count) { Preconditions.checkArgument(count >= 0); return count == 0 ? empty() : new ResourceCounter(Collections.singletonMap(resourceProfile, count)); }
3.68
starts_AnnotationVisitor_visitAnnotation
/** * Visits a nested annotation value of the annotation. * * @param name * the value name. * @param desc * the class descriptor of the nested annotation class. * @return a visitor to visit the actual nested annotation value, or * <code>null</code> if this visitor is not interested in visiting this * nested annotation. <i>The nested annotation value must be fully * visited before calling other methods on this annotation * visitor</i>. */ public AnnotationVisitor visitAnnotation(String name, String desc) { if (av != null) { return av.visitAnnotation(name, desc); } return null; }
3.68
hadoop_UnmanagedApplicationManager_finishApplicationMaster
/** * Unregisters from the resource manager and stops the request handler thread. * * @param request the finishApplicationMaster request * @return the response * @throws YarnException if finishAM call fails * @throws IOException if finishAM call fails */ public FinishApplicationMasterResponse finishApplicationMaster( FinishApplicationMasterRequest request) throws YarnException, IOException { if (this.userUgi == null) { if (this.connectionInitiated) { // This is possible if the async launchUAM is still // blocked and retrying. Return a dummy response in this case. LOG.warn("Unmanaged AM still not successfully launched/registered yet." + " Stopping the UAM heartbeat thread anyways."); return FinishApplicationMasterResponse.newInstance(false); } else { throw new YarnException("finishApplicationMaster should not " + "be called before createAndRegister"); } } FinishApplicationMasterResponse response = this.rmProxyRelayer.finishApplicationMaster(request); if (response.getIsUnregistered()) { shutDownConnections(); } return response; }
3.68
flink_BoundedBlockingSubpartition_createWithMemoryMappedFile
/** * Creates a BoundedBlockingSubpartition that stores the partition data in memory mapped file. * Data is written to and read from the mapped memory region. Disk spilling happens lazily, when * the OS swaps out the pages from the memory mapped file. */ public static BoundedBlockingSubpartition createWithMemoryMappedFile( int index, ResultPartition parent, File tempFile) throws IOException { final MemoryMappedBoundedData bd = MemoryMappedBoundedData.create(tempFile.toPath()); return new BoundedBlockingSubpartition(index, parent, bd, false); }
3.68
zxing_MinimalECIInput_charAt
/** * Returns the {@code byte} value at the specified index. An index ranges from zero * to {@code length() - 1}. The first {@code byte} value of the sequence is at * index zero, the next at index one, and so on, as for array * indexing. * * @param index the index of the {@code byte} value to be returned * * @return the specified {@code byte} value as character or the FNC1 character * * @throws IndexOutOfBoundsException * if the {@code index} argument is negative or not less than * {@code length()} * @throws IllegalArgumentException * if the value at the {@code index} argument is an ECI (@see #isECI) */ public char charAt(int index) { if (index < 0 || index >= length()) { throw new IndexOutOfBoundsException("" + index); } if (isECI(index)) { throw new IllegalArgumentException("value at " + index + " is not a character but an ECI"); } return isFNC1(index) ? (char) fnc1 : (char) bytes[index]; }
3.68
framework_Table_getCacheRate
/** * @see #setCacheRate(double) * * @return the current cache rate value */ public double getCacheRate() { return cacheRate; }
3.68
dubbo_ReflectUtils_isInstance
/** * Check if one object is the implementation for a given interface. * <p> * This method will not trigger classloading for the given interface, therefore it will not lead to error when * the given interface is not visible by the classloader * * @param obj Object to examine * @param interfaceClazzName The given interface * @return true if the object implements the given interface, otherwise return false */ public static boolean isInstance(Object obj, String interfaceClazzName) { for (Class<?> clazz = obj.getClass(); clazz != null && !clazz.equals(Object.class); clazz = clazz.getSuperclass()) { Class<?>[] interfaces = clazz.getInterfaces(); for (Class<?> itf : interfaces) { if (itf.getName().equals(interfaceClazzName)) { return true; } } } return false; }
3.68
streampipes_OutputStrategies_custom
/** * Creates a {@link org.apache.streampipes.model.output.CustomOutputStrategy}. * * @param outputBoth If two input streams are expected by a pipeline element, you can use outputBoth to indicate * whether the properties of both input streams should be available to the pipeline developer for * selection. * @return CustomOutputStrategy */ public static CustomOutputStrategy custom(boolean outputBoth) { return new CustomOutputStrategy(outputBoth); }
3.68
hbase_PrivateCellUtil_equalsIgnoreMvccVersion
/** * special case for Cell.equals */ public static boolean equalsIgnoreMvccVersion(Cell a, Cell b) { // row boolean res = CellUtil.matchingRows(a, b); if (!res) return res; // family res = CellUtil.matchingColumn(a, b); if (!res) return res; // timestamp: later sorts first if (!CellUtil.matchingTimestamp(a, b)) return false; // type int c = (0xff & b.getTypeByte()) - (0xff & a.getTypeByte()); if (c != 0) return false; else return true; }
3.68
framework_DeclarativeValueProvider_addValue
/** * Sets a {@code value} for the item {@code t}. * * @param t * a data item * @param value * a value for the item {@code t} */ void addValue(T t, String value) { values.put(t, value); }
3.68
hbase_AsyncAdmin_listUnknownServers
/** * List all the unknown region servers. */ default CompletableFuture<List<ServerName>> listUnknownServers() { return this.getClusterMetrics(EnumSet.of(Option.UNKNOWN_SERVERS)) .thenApply(ClusterMetrics::getUnknownServerNames); }
3.68
hbase_QuotaTableUtil_getUserRowKey
/* * ========================================================================= Quota table row key * helpers */ protected static byte[] getUserRowKey(final String user) { return Bytes.add(QUOTA_USER_ROW_KEY_PREFIX, Bytes.toBytes(user)); }
3.68
hadoop_SaslParticipant_evaluateChallengeOrResponse
/** * @see {@link SaslServer#evaluateResponse} * @see {@link SaslClient#evaluateChallenge} */ public byte[] evaluateChallengeOrResponse(byte[] challengeOrResponse) throws SaslException { if (saslClient != null) { return saslClient.evaluateChallenge(challengeOrResponse); } else { return saslServer.evaluateResponse(challengeOrResponse); } }
3.68
morf_Version2to4TransformingReader_processEscape
/** * @param cbuf The output buffer * @param off the current root output * @param charsRead the number of chars read from the input buffer * @param idx The current scan index in the input buffer * @param sequenceLength The length of the sequence we're replacing * @param escapedString The output escaped sequence * @return The number of */ private int processEscape(char[] cbuf, int off, int charsRead, int idx, int sequenceLength, String escapedString) { // can be less than zero if we read past the end of this buffer and into the next int charsRemainingInBuffer = Math.max(charsRead - idx - sequenceLength, 0); // Create a temporary buffer to hold the remainder of the buffer we haven't yet scanned // There might be an existing temporary buffer, in which case keep that too. char[] escapedChars = escapedString.toCharArray(); char[] newTemporary = new char[escapedChars.length + charsRemainingInBuffer + temporary.length]; // write the escaped string System.arraycopy(escapedChars, 0, newTemporary, 0, escapedChars.length); // copy in what's left System.arraycopy(cbuf, off + idx + sequenceLength, newTemporary, escapedChars.length, charsRemainingInBuffer); // keep any existing buffer System.arraycopy(temporary, 0, newTemporary, escapedChars.length + charsRemainingInBuffer, temporary.length); temporary = newTemporary; skipChars = 2; // truncate the returned output to where we've got to return idx; }
3.68
flink_CompositeTypeSerializerSnapshot_readOuterSnapshot
/** * Reads the outer snapshot, i.e. any information beyond the nested serializers of the outer * serializer. * * <p>The base implementation of this methods reads nothing, i.e. it assumes that the outer * serializer only has nested serializers and no extra information. Otherwise, if the outer * serializer contains some extra information that has been persisted as part of the serializer * snapshot, this must be overridden. Note that this method and the corresponding methods {@link * #writeOuterSnapshot(DataOutputView)}, {@link * #resolveOuterSchemaCompatibility(TypeSerializer)} needs to be implemented. * * @param readOuterSnapshotVersion the read version of the outer snapshot. * @param in the {@link DataInputView} to read the outer snapshot from. * @param userCodeClassLoader the user code class loader. */ protected void readOuterSnapshot( int readOuterSnapshotVersion, DataInputView in, ClassLoader userCodeClassLoader) throws IOException {}
3.68
hadoop_SnappyCompressor_reinit
/** * Prepare the compressor to be used in a new stream with settings defined in * the given Configuration * * @param conf Configuration from which new setting are fetched */ @Override public void reinit(Configuration conf) { reset(); }
3.68
open-banking-gateway_PathHeadersMapperTemplate_forExecution
/** * Converts context object into object that can be used for ASPSP API call. * @param context Context to convert * @return Object that can be used with {@code Xs2aAdapter} to perform ASPSP API calls */ public ValidatedPathHeaders<P, H> forExecution(C context) { return new ValidatedPathHeaders<>( toPath.map(context), toHeaders.map(context) ); }
3.68