name
stringlengths 12
178
| code_snippet
stringlengths 8
36.5k
| score
float64 3.26
3.68
|
---|---|---|
AreaShop_GeneralRegion_runCommands | /**
* Run commands as the CommandsSender, replacing all tags with the relevant values.
* @param sender The sender that should perform the command
* @param commands A list of the commands to run (without slash and with tags)
*/
public void runCommands(CommandSender sender, List<String> commands) {
if(commands == null || commands.isEmpty()) {
return;
}
for(String command : commands) {
if(command == null || command.isEmpty()) {
continue;
}
// It is not ideal we have to disable language replacements here, but otherwise giving language variables
// to '/areashop message' by a command in the config gets replaced and messes up the fancy formatting.
command = Message.fromString(command).replacements(this).noLanguageReplacements().getSingle();
boolean result;
String error = null;
String stacktrace = null;
try {
result = plugin.getServer().dispatchCommand(sender, command);
} catch(CommandException e) {
result = false;
error = e.getMessage();
stacktrace = ExceptionUtils.getStackTrace(e);
}
boolean printed = false;
if(!result) {
printed = true;
if(error != null) {
AreaShop.warn("Command execution failed, command=" + command + ", error=" + error + ", stacktrace:");
AreaShop.warn(stacktrace);
AreaShop.warn("--- End of stacktrace ---");
} else {
AreaShop.warn("Command execution failed, command=" + command);
}
}
if(!printed) {
AreaShop.debug("Command run, executor=" + sender.getName() + ", command=" + command);
}
}
} | 3.68 |
hmily_XaResourcePool_removeResource | /**
* Remove resource xa resource wrapped.
*
* @param xid the xid
* @return the xa resource wrapped
*/
public XaResourceWrapped removeResource(final Xid xid) {
String gid = new String(xid.getGlobalTransactionId());
if (xids.containsKey(gid)) {
xids.get(gid).remove(xid);
}
return pool.remove(xid);
} | 3.68 |
hbase_HRegionFileSystem_getStoreHomedir | /**
* @param tabledir {@link Path} to where the table is being stored
* @param encodedName Encoded region name.
* @param family {@link ColumnFamilyDescriptor} describing the column family
* @return Path to family/Store home directory.
*/
public static Path getStoreHomedir(final Path tabledir, final String encodedName,
final byte[] family) {
return new Path(tabledir, new Path(encodedName, Bytes.toString(family)));
} | 3.68 |
framework_VLoadingIndicator_setConnection | /**
* Sets the {@link ApplicationConnection} which uses this loading indicator.
* Only used internally.
*
* @param connection
* The ApplicationConnection for this loading indicator
*/
void setConnection(ApplicationConnection connection) {
this.connection = connection;
} | 3.68 |
framework_GridElement_getHeader | /**
* Get the header element.
*
* @return The thead element
*/
public TestBenchElement getHeader() {
return getSubPart("#header");
} | 3.68 |
hbase_PrivateCellUtil_writeCell | /**
* Writes the cell to the given OutputStream
* @param cell the cell to be written
* @param out the outputstream
* @param withTags if tags are to be written or not
* @return the total bytes written
*/
public static int writeCell(Cell cell, OutputStream out, boolean withTags) throws IOException {
if (cell instanceof ExtendedCell) {
return ((ExtendedCell) cell).write(out, withTags);
} else {
ByteBufferUtils.putInt(out, estimatedSerializedSizeOfKey(cell));
ByteBufferUtils.putInt(out, cell.getValueLength());
writeFlatKey(cell, out);
writeValue(out, cell, cell.getValueLength());
int tagsLength = cell.getTagsLength();
if (withTags) {
byte[] len = new byte[Bytes.SIZEOF_SHORT];
Bytes.putAsShort(len, 0, tagsLength);
out.write(len);
if (tagsLength > 0) {
writeTags(out, cell, tagsLength);
}
}
int lenWritten =
(2 * Bytes.SIZEOF_INT) + estimatedSerializedSizeOfKey(cell) + cell.getValueLength();
if (withTags) {
lenWritten += Bytes.SIZEOF_SHORT + tagsLength;
}
return lenWritten;
}
} | 3.68 |
hbase_StoreFileWriter_getHFileWriter | /**
* For use in testing.
*/
HFile.Writer getHFileWriter() {
return writer;
} | 3.68 |
hbase_SyncFuture_reset | /**
* Call this method to clear old usage and get it ready for new deploy.
* @param txid the new transaction id
*/
SyncFuture reset(long txid, boolean forceSync) {
if (t != null && t != Thread.currentThread()) {
throw new IllegalStateException();
}
t = Thread.currentThread();
if (!isDone()) {
throw new IllegalStateException("" + txid + " " + Thread.currentThread());
}
this.doneTxid = NOT_DONE;
this.forceSync = forceSync;
this.txid = txid;
this.throwable = null;
return this;
} | 3.68 |
querydsl_JTSMultiSurfaceExpression_centroid | /**
* The mathematical centroid for this MultiSurface. The result is not guaranteed to be on
* this MultiSurface.
*
* @return centroid
*/
public JTSPointExpression<Point> centroid() {
if (centroid == null) {
centroid = JTSGeometryExpressions.pointOperation(SpatialOps.CENTROID, mixin);
}
return centroid;
} | 3.68 |
flink_FileLock_normalizeFileName | /**
* Normalize the file name, which only allows slash, backslash, digits and letters.
*
* @param fileName Original file name
* @return File name with illegal characters stripped
*/
private static String normalizeFileName(String fileName) {
return fileName.replaceAll("[^\\w/\\\\]", "");
} | 3.68 |
hbase_RpcThrottleStorage_switchRpcThrottle | /**
* Store the rpc throttle value.
* @param enable Set to <code>true</code> to enable, <code>false</code> to disable.
* @throws IOException if an unexpected io exception occurs
*/
public void switchRpcThrottle(boolean enable) throws IOException {
try {
byte[] upData = Bytes.toBytes(enable);
ZKUtil.createSetData(zookeeper, rpcThrottleZNode, upData);
} catch (KeeperException e) {
throw new IOException("Failed to store rpc throttle", e);
}
} | 3.68 |
Activiti_IntegerToLong_primTransform | /**
* {@inheritDoc}
*/
@Override
protected Object primTransform(Object anObject) throws Exception {
return Long.valueOf((Integer) anObject);
} | 3.68 |
shardingsphere-elasticjob_ResponseBodySerializerFactory_getResponseBodySerializer | /**
* Get serializer for specific HTTP content type.
*
* <p>
* This method will look for a serializer instance of specific MIME type.
* If serializer not found, this method would look for serializer factory by MIME type.
* If it is still not found, the MIME type would be marked as <code>MISSING_SERIALIZER</code>.
* </p>
*
* <p>
* Some default serializer will be provided by {@link SerializerFactory},
* so developers can implement {@link ResponseBodySerializer} and register it by SPI to override default serializer.
* </p>
*
* @param contentType HTTP content type
* @return serializer
*/
public static ResponseBodySerializer getResponseBodySerializer(final String contentType) {
ResponseBodySerializer result = RESPONSE_BODY_SERIALIZERS.get(contentType);
if (null == result) {
synchronized (ResponseBodySerializerFactory.class) {
if (null == RESPONSE_BODY_SERIALIZERS.get(contentType)) {
instantiateResponseBodySerializerFromFactories(contentType);
}
result = RESPONSE_BODY_SERIALIZERS.get(contentType);
}
}
if (MISSING_SERIALIZER == result) {
throw new ResponseBodySerializerNotFoundException(contentType);
}
return result;
} | 3.68 |
hmily_OrderServiceImpl_mockInventoryWithTryTimeout | /**
* 模拟在订单支付操作中,库存在try阶段中的timeout
*
* @param count 购买数量
* @param amount 支付金额
* @return string
*/
@Override
public String mockInventoryWithTryTimeout(Integer count, BigDecimal amount) {
Order order = saveOrder(count, amount);
return paymentService.mockPaymentInventoryWithTryTimeout(order);
} | 3.68 |
zxing_BitMatrix_getEnclosingRectangle | /**
* This is useful in detecting the enclosing rectangle of a 'pure' barcode.
*
* @return {@code left,top,width,height} enclosing rectangle of all 1 bits, or null if it is all white
*/
public int[] getEnclosingRectangle() {
int left = width;
int top = height;
int right = -1;
int bottom = -1;
for (int y = 0; y < height; y++) {
for (int x32 = 0; x32 < rowSize; x32++) {
int theBits = bits[y * rowSize + x32];
if (theBits != 0) {
if (y < top) {
top = y;
}
if (y > bottom) {
bottom = y;
}
if (x32 * 32 < left) {
int bit = 0;
while ((theBits << (31 - bit)) == 0) {
bit++;
}
if ((x32 * 32 + bit) < left) {
left = x32 * 32 + bit;
}
}
if (x32 * 32 + 31 > right) {
int bit = 31;
while ((theBits >>> bit) == 0) {
bit--;
}
if ((x32 * 32 + bit) > right) {
right = x32 * 32 + bit;
}
}
}
}
}
if (right < left || bottom < top) {
return null;
}
return new int[] {left, top, right - left + 1, bottom - top + 1};
} | 3.68 |
flink_DataStream_partitionCustom | // private helper method for custom partitioning
private <K> DataStream<T> partitionCustom(Partitioner<K> partitioner, Keys<T> keys) {
KeySelector<T, K> keySelector =
KeySelectorUtil.getSelectorForOneKey(
keys, partitioner, getType(), getExecutionConfig());
return setConnectionType(
new CustomPartitionerWrapper<>(clean(partitioner), clean(keySelector)));
} | 3.68 |
hbase_ZKAuthentication_loginServer | /**
* Log in the current zookeeper server process using the given configuration keys for the
* credential file and login principal.
* <p>
* <strong>This is only applicable when running on secure hbase</strong> On regular HBase (without
* security features), this will safely be ignored.
* </p>
* @param conf The configuration data to use
* @param keytabFileKey Property key used to configure the path to the credential file
* @param userNameKey Property key used to configure the login principal
* @param hostname Current hostname to use in any credentials
* @throws IOException underlying exception from SecurityUtil.login() call
*/
public static void loginServer(Configuration conf, String keytabFileKey, String userNameKey,
String hostname) throws IOException {
login(conf, keytabFileKey, userNameKey, hostname, ZooKeeperSaslServer.LOGIN_CONTEXT_NAME_KEY,
JaasConfiguration.SERVER_KEYTAB_KERBEROS_CONFIG_NAME);
} | 3.68 |
pulsar_TlsHostnameVerifier_normaliseAddress | /*
* Normalize IPv6 or DNS name.
*/
static String normaliseAddress(final String hostname) {
if (hostname == null) {
return hostname;
}
try {
final InetAddress inetAddress = InetAddress.getByName(hostname);
return inetAddress.getHostAddress();
} catch (final UnknownHostException unexpected) { // Should not happen, because we check for IPv6 address above
return hostname;
}
} | 3.68 |
hbase_MetricsConnection_getConnectionCount | /** Return the connection count of the metrics within a scope */
public long getConnectionCount() {
return connectionCount.getCount();
} | 3.68 |
framework_VTooltip_setMaxWidth | /**
* Sets the maximum width of the tooltip popup.
*
* @param maxWidth
* The maximum width the tooltip popup (in pixels)
*/
public void setMaxWidth(int maxWidth) {
this.maxWidth = maxWidth;
} | 3.68 |
hbase_WAL_sync | /**
* @param txid Transaction id to sync to.
* @param forceSync Flag to force sync rather than flushing to the buffer. Example - Hadoop hflush
* vs hsync.
* @throws when timeout, it would throw {@link WALSyncTimeoutIOException}.
*/
default void sync(long txid, boolean forceSync) throws IOException {
sync(txid);
} | 3.68 |
hadoop_TaskAttemptContainerLaunchedEvent_getShufflePort | /**
* Get the port that the shuffle handler is listening on. This is only
* valid if the type of the event is TA_CONTAINER_LAUNCHED
* @return the port the shuffle handler is listening on.
*/
public int getShufflePort() {
return shufflePort;
} | 3.68 |
hbase_QuotaState_getGlobalLimiterWithoutUpdatingLastQuery | /**
* Return the limiter associated with this quota without updating internal last query stats
* @return the quota limiter
*/
synchronized QuotaLimiter getGlobalLimiterWithoutUpdatingLastQuery() {
return globalLimiter;
} | 3.68 |
streampipes_ParserDescriptionBuilder_create | /**
* Creates a new format description using the builder pattern.
*
* @param id A unique identifier of the new element, e.g., com.mycompany.sink.mynewdatasink
* @param label A human-readable name of the element.
* Will later be shown as the element name in the StreamPipes UI.
* @param description A human-readable description of the element.
*/
public static ParserDescriptionBuilder create(String id, String label, String description) {
return new ParserDescriptionBuilder(id, label, description);
} | 3.68 |
hbase_ClientMetaTableAccessor_getTableHRegionLocations | /**
* Used to get all region locations for the specific table
* @param metaTable scanner over meta table
* @param tableName table we're looking for, can be null for getting all regions
* @return the list of region locations. The return value will be wrapped by a
* {@link CompletableFuture}.
*/
public static CompletableFuture<List<HRegionLocation>> getTableHRegionLocations(
AsyncTable<AdvancedScanResultConsumer> metaTable, TableName tableName) {
CompletableFuture<List<HRegionLocation>> future = new CompletableFuture<>();
addListener(getTableRegionsAndLocations(metaTable, tableName, true), (locations, err) -> {
if (err != null) {
future.completeExceptionally(err);
} else if (locations == null || locations.isEmpty()) {
future.complete(Collections.emptyList());
} else {
List<HRegionLocation> regionLocations =
locations.stream().map(loc -> new HRegionLocation(loc.getFirst(), loc.getSecond()))
.collect(Collectors.toList());
future.complete(regionLocations);
}
});
return future;
} | 3.68 |
flink_TypeInferenceExtractor_forProcedure | /** Extracts a type in inference from a {@link Procedure}. */
public static TypeInference forProcedure(
DataTypeFactory typeFactory, Class<? extends Procedure> procedure) {
final ProcedureMappingExtractor mappingExtractor =
new ProcedureMappingExtractor(
typeFactory,
procedure,
ProcedureDefinition.PROCEDURE_CALL,
ProcedureMappingExtractor.createParameterSignatureExtraction(1),
ProcedureMappingExtractor.createReturnTypeResultExtraction(),
ProcedureMappingExtractor.createParameterAndReturnTypeVerification());
return extractTypeInference(mappingExtractor);
} | 3.68 |
hadoop_CsiGrpcClient_createControllerBlockingStub | /**
* Creates a blocking stub for CSI controller plugin on the given channel.
* @return the blocking stub
*/
public ControllerGrpc.ControllerBlockingStub createControllerBlockingStub(){
return ControllerGrpc.newBlockingStub(channel);
} | 3.68 |
querydsl_JTSGeometryExpressions_setSRID | /**
* Sets the SRID on a geometry to a particular integer value.
*
* @param expr geometry
* @param srid SRID
* @param <T>
* @return geometry
*/
public static <T extends Geometry> JTSGeometryExpression<T> setSRID(Expression<T> expr, int srid) {
return geometryOperation(expr.getType(), SpatialOps.SET_SRID,
expr, ConstantImpl.create(srid));
} | 3.68 |
flink_ResourceManager_registerTaskExecutorInternal | /**
* Registers a new TaskExecutor.
*
* @param taskExecutorRegistration task executor registration parameters
* @return RegistrationResponse
*/
private RegistrationResponse registerTaskExecutorInternal(
TaskExecutorGateway taskExecutorGateway,
TaskExecutorRegistration taskExecutorRegistration) {
ResourceID taskExecutorResourceId = taskExecutorRegistration.getResourceId();
WorkerRegistration<WorkerType> oldRegistration =
taskExecutors.remove(taskExecutorResourceId);
if (oldRegistration != null) {
// TODO :: suggest old taskExecutor to stop itself
log.debug(
"Replacing old registration of TaskExecutor {}.",
taskExecutorResourceId.getStringWithMetadata());
// remove old task manager registration from slot manager
slotManager.unregisterTaskManager(
oldRegistration.getInstanceID(),
new ResourceManagerException(
String.format(
"TaskExecutor %s re-connected to the ResourceManager.",
taskExecutorResourceId.getStringWithMetadata())));
}
final Optional<WorkerType> newWorkerOptional =
getWorkerNodeIfAcceptRegistration(taskExecutorResourceId);
String taskExecutorAddress = taskExecutorRegistration.getTaskExecutorAddress();
if (!newWorkerOptional.isPresent()) {
log.warn(
"Discard registration from TaskExecutor {} at ({}) because the framework did "
+ "not recognize it",
taskExecutorResourceId.getStringWithMetadata(),
taskExecutorAddress);
return new TaskExecutorRegistrationRejection(
"The ResourceManager does not recognize this TaskExecutor.");
} else {
WorkerType newWorker = newWorkerOptional.get();
WorkerRegistration<WorkerType> registration =
new WorkerRegistration<>(
taskExecutorGateway,
newWorker,
taskExecutorRegistration.getDataPort(),
taskExecutorRegistration.getJmxPort(),
taskExecutorRegistration.getHardwareDescription(),
taskExecutorRegistration.getMemoryConfiguration(),
taskExecutorRegistration.getTotalResourceProfile(),
taskExecutorRegistration.getDefaultSlotResourceProfile(),
taskExecutorRegistration.getNodeId());
log.info(
"Registering TaskManager with ResourceID {} ({}) at ResourceManager",
taskExecutorResourceId.getStringWithMetadata(),
taskExecutorAddress);
taskExecutors.put(taskExecutorResourceId, registration);
taskManagerHeartbeatManager.monitorTarget(
taskExecutorResourceId, new TaskExecutorHeartbeatSender(taskExecutorGateway));
return new TaskExecutorRegistrationSuccess(
registration.getInstanceID(),
resourceId,
clusterInformation,
latestTokens.get());
}
} | 3.68 |
hadoop_NativeTaskOutputFiles_getInputFileForWrite | /**
* Create a local reduce input file name.
*
* @param mapId a map task id
* @param size the size of the file
*/
public Path getInputFileForWrite(TaskID mapId, long size, Configuration conf)
throws IOException {
return lDirAlloc.getLocalPathForWrite(
String.format(REDUCE_INPUT_FILE_FORMAT_STRING, TASKTRACKER_OUTPUT, mapId.getId()), size,
conf);
} | 3.68 |
flink_ZooKeeperLeaderElectionHaServices_tryDeleteEmptyParentZNodes | /**
* Tries to delete empty parent znodes.
*
* <p>IMPORTANT: This method can be removed once all supported ZooKeeper versions support the
* container {@link org.apache.zookeeper.CreateMode}.
*
* @throws Exception if the deletion fails for other reason than {@link
* KeeperException.NotEmptyException}
*/
private void tryDeleteEmptyParentZNodes() throws Exception {
// try to delete the parent znodes if they are empty
String remainingPath =
getParentPath(
getNormalizedPath(
curatorFrameworkWrapper.asCuratorFramework().getNamespace()));
final CuratorFramework nonNamespaceClient =
curatorFrameworkWrapper.asCuratorFramework().usingNamespace(null);
while (!isRootPath(remainingPath)) {
try {
nonNamespaceClient.delete().forPath(remainingPath);
} catch (KeeperException.NotEmptyException ignored) {
// We can only delete empty znodes
break;
}
remainingPath = getParentPath(remainingPath);
}
} | 3.68 |
hbase_BoundedRecoveredEditsOutputSink_writeRemainingEntryBuffers | /**
* Write out the remaining RegionEntryBuffers and close the writers.
* @return true when there is no error.
*/
private boolean writeRemainingEntryBuffers() throws IOException {
for (EntryBuffers.RegionEntryBuffer buffer : entryBuffers.buffers.values()) {
closeCompletionService.submit(() -> {
append(buffer);
return null;
});
}
boolean progressFailed = false;
try {
for (int i = 0, n = entryBuffers.buffers.size(); i < n; i++) {
Future<Void> future = closeCompletionService.take();
future.get();
if (!progressFailed && reporter != null && !reporter.progress()) {
progressFailed = true;
}
}
} catch (InterruptedException e) {
IOException iie = new InterruptedIOException();
iie.initCause(e);
throw iie;
} catch (ExecutionException e) {
throw new IOException(e.getCause());
} finally {
closeThreadPool.shutdownNow();
}
return !progressFailed;
} | 3.68 |
flink_AfterMatchSkipStrategy_skipPastLastEvent | /**
* Discards every partial match that started before emitted match ended.
*
* @return the created AfterMatchSkipStrategy
*/
public static SkipPastLastStrategy skipPastLastEvent() {
return SkipPastLastStrategy.INSTANCE;
} | 3.68 |
flink_StreamExecutionEnvironment_fromElements | /**
* Creates a new data stream that contains the given elements. The framework will determine the
* type according to the based type user supplied. The elements should be the same or be the
* subclass to the based type. The sequence of elements must not be empty. Note that this
* operation will result in a non-parallel data stream source, i.e. a data stream source with a
* degree of parallelism one.
*
* @param type The based class type in the collection.
* @param data The array of elements to create the data stream from.
* @param <OUT> The type of the returned data stream
* @return The data stream representing the given array of elements
* @deprecated This method will be removed a future release, possibly as early as version 2.0.
* Use {@link #fromData(OUT...)} instead.
*/
@SafeVarargs
@Deprecated
public final <OUT> DataStreamSource<OUT> fromElements(Class<OUT> type, OUT... data) {
if (data.length == 0) {
throw new IllegalArgumentException(
"fromElements needs at least one element as argument");
}
TypeInformation<OUT> typeInfo;
try {
typeInfo = TypeExtractor.getForClass(type);
} catch (Exception e) {
throw new RuntimeException(
"Could not create TypeInformation for type "
+ type.getName()
+ "; please specify the TypeInformation manually via "
+ "StreamExecutionEnvironment#fromElements(Collection, TypeInformation)",
e);
}
return fromCollection(Arrays.asList(data), typeInfo);
} | 3.68 |
morf_SchemaChangeSequence_executeStatement | /**
* @see org.alfasoftware.morf.upgrade.DataEditor#executeStatement(org.alfasoftware.morf.sql.Statement)
*/
@Override
public void executeStatement(Statement statement) {
visitor.visit(new ExecuteStatement(statement));
statement.accept(schemaAndDataChangeVisitor);
} | 3.68 |
hbase_ProcedureStore_postSync | /**
* triggered when the store sync is completed.
*/
default void postSync() {
} | 3.68 |
morf_AliasedField_multiplyBy | /**
* @param expression value to multiply this field by.
* @return A new expression using {@link MathsField} and {@link MathsOperator#MULTIPLY}.
*/
public final MathsField multiplyBy(AliasedField expression) {
return new MathsField(this, MathsOperator.MULTIPLY, potentiallyBracketExpression(expression));
} | 3.68 |
framework_VScrollTable_emphasis | /**
* TODO needs different drop modes ?? (on cells, on rows), now only
* supports rows
*/
private void emphasis(TableDDDetails details) {
deEmphasis();
UIObject.setStyleName(getElement(), getStylePrimaryName() + "-drag",
true);
// iterate old and new emphasized row
for (Widget w : scrollBody.renderedRows) {
VScrollTableRow row = (VScrollTableRow) w;
if (details != null && details.overkey == row.rowKey) {
String stylename = ROWSTYLEBASE + details.dropLocation
.toString().toLowerCase(Locale.ROOT);
VScrollTableRow.setStyleName(row.getElement(), stylename,
true);
lastEmphasized = details;
return;
}
}
} | 3.68 |
morf_CompositeSchema_viewExists | /**
* @see org.alfasoftware.morf.metadata.Schema#viewExists(java.lang.String)
*/
@Override
public boolean viewExists(String name) {
for (Schema schema : delegates)
if (schema.viewExists(name))
return true;
return false;
} | 3.68 |
hbase_ZKWatcher_keeperException | /**
* Handles KeeperExceptions in client calls.
* <p>
* This may be temporary but for now this gives one place to deal with these.
* <p>
* TODO: Currently this method rethrows the exception to let the caller handle
* <p>
* @param ke the exception to rethrow
* @throws KeeperException if a ZooKeeper operation fails
*/
public void keeperException(KeeperException ke) throws KeeperException {
LOG.error(prefix("Received unexpected KeeperException, re-throwing exception"), ke);
throw ke;
} | 3.68 |
hbase_SimpleServerRpcConnection_readAndProcess | /**
* Read off the wire. If there is not enough data to read, update the connection state with what
* we have and returns.
* @return Returns -1 if failure (and caller will close connection), else zero or more.
*/
public int readAndProcess() throws IOException, InterruptedException {
// If we have not read the connection setup preamble, look to see if that is on the wire.
if (!connectionPreambleRead) {
int count = readPreamble();
if (!connectionPreambleRead) {
return count;
}
}
// Try and read in an int. it will be length of the data to read (or -1 if a ping). We catch the
// integer length into the 4-byte this.dataLengthBuffer.
int count = read4Bytes();
if (count < 0 || dataLengthBuffer.remaining() > 0) {
return count;
}
// We have read a length and we have read the preamble. It is either the connection header
// or it is a request.
if (data == null) {
dataLengthBuffer.flip();
int dataLength = dataLengthBuffer.getInt();
if (dataLength == RpcClient.PING_CALL_ID) {
if (!useWrap) { // covers the !useSasl too
dataLengthBuffer.clear();
return 0; // ping message
}
}
if (dataLength < 0) { // A data length of zero is legal.
throw new DoNotRetryIOException(
"Unexpected data length " + dataLength + "!! from " + getHostAddress());
}
if (dataLength > this.rpcServer.maxRequestSize) {
String msg = "RPC data length of " + dataLength + " received from " + getHostAddress()
+ " is greater than max allowed " + this.rpcServer.maxRequestSize + ". Set \""
+ SimpleRpcServer.MAX_REQUEST_SIZE
+ "\" on server to override this limit (not recommended)";
SimpleRpcServer.LOG.warn(msg);
if (connectionHeaderRead && connectionPreambleRead) {
incRpcCount();
// Construct InputStream for the non-blocking SocketChannel
// We need the InputStream because we want to read only the request header
// instead of the whole rpc.
ByteBuffer buf = ByteBuffer.allocate(1);
InputStream is = new InputStream() {
@Override
public int read() throws IOException {
SimpleServerRpcConnection.this.rpcServer.channelRead(channel, buf);
buf.flip();
int x = buf.get();
buf.flip();
return x;
}
};
CodedInputStream cis = CodedInputStream.newInstance(is);
int headerSize = cis.readRawVarint32();
Message.Builder builder = RequestHeader.newBuilder();
ProtobufUtil.mergeFrom(builder, cis, headerSize);
RequestHeader header = (RequestHeader) builder.build();
// Notify the client about the offending request
SimpleServerCall reqTooBig = new SimpleServerCall(header.getCallId(), this.service, null,
null, null, null, this, 0, this.addr, EnvironmentEdgeManager.currentTime(), 0,
this.rpcServer.bbAllocator, this.rpcServer.cellBlockBuilder, null, responder);
RequestTooBigException reqTooBigEx = new RequestTooBigException(msg);
this.rpcServer.metrics.exception(reqTooBigEx);
// Make sure the client recognizes the underlying exception
// Otherwise, throw a DoNotRetryIOException.
if (
VersionInfoUtil.hasMinimumVersion(connectionHeader.getVersionInfo(),
RequestTooBigException.MAJOR_VERSION, RequestTooBigException.MINOR_VERSION)
) {
reqTooBig.setResponse(null, null, reqTooBigEx, msg);
} else {
reqTooBig.setResponse(null, null, new DoNotRetryIOException(msg), msg);
}
// In most cases we will write out the response directly. If not, it is still OK to just
// close the connection without writing out the reqTooBig response. Do not try to write
// out directly here, and it will cause deserialization error if the connection is slow
// and we have a half writing response in the queue.
reqTooBig.sendResponseIfReady();
}
// Close the connection
return -1;
}
// Initialize this.data with a ByteBuff.
// This call will allocate a ByteBuff to read request into and assign to this.data
// Also when we use some buffer(s) from pool, it will create a CallCleanup instance also and
// assign to this.callCleanup
initByteBuffToReadInto(dataLength);
// Increment the rpc count. This counter will be decreased when we write
// the response. If we want the connection to be detected as idle properly, we
// need to keep the inc / dec correct.
incRpcCount();
}
count = channelDataRead(channel, data);
if (count >= 0 && data.remaining() == 0) { // count==0 if dataLength == 0
process();
}
return count;
} | 3.68 |
flink_Tuple9_toString | /**
* Creates a string representation of the tuple in the form (f0, f1, f2, f3, f4, f5, f6, f7,
* f8), where the individual fields are the value returned by calling {@link Object#toString} on
* that field.
*
* @return The string representation of the tuple.
*/
@Override
public String toString() {
return "("
+ StringUtils.arrayAwareToString(this.f0)
+ ","
+ StringUtils.arrayAwareToString(this.f1)
+ ","
+ StringUtils.arrayAwareToString(this.f2)
+ ","
+ StringUtils.arrayAwareToString(this.f3)
+ ","
+ StringUtils.arrayAwareToString(this.f4)
+ ","
+ StringUtils.arrayAwareToString(this.f5)
+ ","
+ StringUtils.arrayAwareToString(this.f6)
+ ","
+ StringUtils.arrayAwareToString(this.f7)
+ ","
+ StringUtils.arrayAwareToString(this.f8)
+ ")";
} | 3.68 |
dubbo_DubboBootstrap_isStopped | /**
* @return true if the dubbo application is stopping.
* @see #isStopped()
*/
public boolean isStopped() {
return applicationDeployer.isStopped();
} | 3.68 |
hadoop_FederationStateStoreFacade_getPoliciesConfigurations | /**
* Get the policies that is represented as
* {@link SubClusterPolicyConfiguration} for all currently active queues in
* the system.
*
* @return the policies for all currently active queues in the system
* @throws YarnException if the call to the state store is unsuccessful
*/
public Map<String, SubClusterPolicyConfiguration> getPoliciesConfigurations()
throws YarnException {
try {
if (federationCache.isCachingEnabled()) {
return federationCache.getPoliciesConfigurations();
} else {
GetSubClusterPoliciesConfigurationsRequest request =
GetSubClusterPoliciesConfigurationsRequest.newInstance();
return buildPolicyConfigMap(stateStore.getPoliciesConfigurations(request));
}
} catch (Throwable ex) {
throw new YarnException(ex);
}
} | 3.68 |
hadoop_OpportunisticContainerContext_matchAllocationToOutstandingRequest | /**
* This method matches a returned list of Container Allocations to any
* outstanding OPPORTUNISTIC ResourceRequest.
* @param capability Capability
* @param allocations Allocations.
*/
public void matchAllocationToOutstandingRequest(Resource capability,
List<Allocation> allocations) {
for (OpportunisticContainerAllocator.Allocation allocation : allocations) {
SchedulerRequestKey schedulerKey =
SchedulerRequestKey.extractFrom(allocation.getContainer());
Map<Resource, EnrichedResourceRequest> asks =
outstandingOpReqs.get(schedulerKey);
if (asks == null) {
continue;
}
EnrichedResourceRequest err = asks.get(capability);
if (err != null) {
int numContainers = err.getRequest().getNumContainers();
numContainers--;
err.getRequest().setNumContainers(numContainers);
if (numContainers == 0) {
asks.remove(capability);
if (asks.size() == 0) {
outstandingOpReqs.remove(schedulerKey);
}
} else {
if (!ResourceRequest.isAnyLocation(allocation.getResourceName())) {
err.removeLocation(allocation.getResourceName());
}
}
getOppSchedulerMetrics().addAllocateOLatencyEntry(
Time.monotonicNow() - err.getTimestamp());
}
}
} | 3.68 |
hadoop_ShortWritable_write | /** write short value */
@Override
public void write(DataOutput out) throws IOException {
out.writeShort(value);
} | 3.68 |
flink_AvroDeserializationSchema_forSpecific | /**
* Creates {@link AvroDeserializationSchema} that produces classes that were generated from avro
* schema.
*
* @param tClass class of record to be produced
* @param encoding Avro serialization approach to use for decoding
* @return deserialized record
*/
public static <T extends SpecificRecord> AvroDeserializationSchema<T> forSpecific(
Class<T> tClass, AvroEncoding encoding) {
return new AvroDeserializationSchema<>(tClass, null, encoding);
} | 3.68 |
hudi_AvroSchemaUtils_checkSchemaCompatible | /**
* Checks whether writer schema is compatible with table schema considering {@code AVRO_SCHEMA_VALIDATE_ENABLE}
* and {@code SCHEMA_ALLOW_AUTO_EVOLUTION_COLUMN_DROP} options.
* To avoid collision of {@code SCHEMA_ALLOW_AUTO_EVOLUTION_COLUMN_DROP} and {@code DROP_PARTITION_COLUMNS}
* partition column names should be passed as {@code dropPartitionColNames}.
* Passed empty set means {@code DROP_PARTITION_COLUMNS} is disabled.
*
* @param tableSchema the latest dataset schema
* @param writerSchema writer schema
* @param shouldValidate whether {@link AvroSchemaCompatibility} check being performed
* @param allowProjection whether column dropping check being performed
* @param dropPartitionColNames partition column names to being excluded from column dropping check
* @throws SchemaCompatibilityException if writer schema is not compatible
*/
public static void checkSchemaCompatible(
Schema tableSchema,
Schema writerSchema,
boolean shouldValidate,
boolean allowProjection,
Set<String> dropPartitionColNames) throws SchemaCompatibilityException {
String errorMessage = null;
if (!allowProjection && !canProject(tableSchema, writerSchema, dropPartitionColNames)) {
errorMessage = "Column dropping is not allowed";
}
// TODO(HUDI-4772) re-enable validations in case partition columns
// being dropped from the data-file after fixing the write schema
if (dropPartitionColNames.isEmpty() && shouldValidate && !isSchemaCompatible(tableSchema, writerSchema)) {
errorMessage = "Failed schema compatibility check";
}
if (errorMessage != null) {
String errorDetails = String.format(
"%s\nwriterSchema: %s\ntableSchema: %s",
errorMessage,
writerSchema,
tableSchema);
throw new SchemaCompatibilityException(errorDetails);
}
} | 3.68 |
hibernate-validator_ConstraintTypeStaxBuilder_run | /**
* Runs the given privileged action, using a privileged block if required.
*
* <b>NOTE:</b> This must never be changed into a publicly available method to avoid execution of arbitrary
* privileged actions within HV's protection domain.
*/
@IgnoreForbiddenApisErrors(reason = "SecurityManager is deprecated in JDK17")
private static <T> T run(PrivilegedAction<T> action) {
return System.getSecurityManager() != null ? AccessController.doPrivileged( action ) : action.run();
} | 3.68 |
hbase_ResponseConverter_buildRunCatalogScanResponse | /**
* Creates a response for the catalog scan request
* @return A RunCatalogScanResponse
*/
public static RunCatalogScanResponse buildRunCatalogScanResponse(int numCleaned) {
return RunCatalogScanResponse.newBuilder().setScanResult(numCleaned).build();
} | 3.68 |
hbase_MoveWithAck_isSuccessfulScan | /**
* Tries to scan a row from passed region
*/
private void isSuccessfulScan(RegionInfo region) throws IOException {
Scan scan = new Scan().withStartRow(region.getStartKey()).setRaw(true).setOneRowLimit()
.setMaxResultSize(1L).setCaching(1).setFilter(new FirstKeyOnlyFilter()).setCacheBlocks(false);
try (Table table = conn.getTable(region.getTable());
ResultScanner scanner = table.getScanner(scan)) {
scanner.next();
} catch (IOException e) {
LOG.error("Could not scan region: {}", region.getEncodedName(), e);
throw e;
}
} | 3.68 |
hadoop_ParsedTaskAttempt_dumpParsedTaskAttempt | /** Dump the extra info of ParsedTaskAttempt */
void dumpParsedTaskAttempt() {
LOG.info("ParsedTaskAttempt details:" + obtainCounters()
+ ";DiagnosticInfo=" + obtainDiagnosticInfo() + "\n"
+ obtainTrackerName() + ";" + obtainHttpPort() + ";"
+ obtainShufflePort() + ";rack=" + getHostName().getRackName()
+ ";host=" + getHostName().getHostName());
} | 3.68 |
framework_GridLayoutWithNonIntegerWidth_getTestDescription | /*
* (non-Javadoc)
*
* @see com.vaadin.tests.components.AbstractTestUI#getTestDescription()
*/
@Override
protected String getTestDescription() {
return "Neither of the panels should contain scrollbars";
} | 3.68 |
hbase_RecoverLeaseFSUtils_recoverFileLease | /**
* Recover the lease from HDFS, retrying multiple times.
*/
public static void recoverFileLease(FileSystem fs, Path p, Configuration conf,
CancelableProgressable reporter) throws IOException {
if (fs instanceof FilterFileSystem) {
fs = ((FilterFileSystem) fs).getRawFileSystem();
}
// lease recovery not needed for local file system case.
if (!(fs instanceof DistributedFileSystem)) {
return;
}
recoverDFSFileLease((DistributedFileSystem) fs, p, conf, reporter);
} | 3.68 |
hadoop_RecordStore_getDriver | /**
* Get the State Store driver.
*
* @return State Store driver.
*/
public StateStoreDriver getDriver() {
return this.driver;
} | 3.68 |
hadoop_OBSDataBlocks_create | /**
* Create a temp file and a {@link DiskBlock} instance to manage it.
*
* @param index block index
* @param limit limit of the block.
* @return the new block
* @throws IOException IO problems
*/
@Override
DataBlock create(final long index, final int limit) throws IOException {
File destFile = createTmpFileForWrite(
String.format("obs-block-%04d-", index), limit,
getOwner().getConf());
return new DiskBlock(destFile, limit, index);
} | 3.68 |
framework_AbstractSingleSelect_setSelectedItem | /**
* This method updates the internal selection state of the server-side of
* {@code AbstractSingleSelect}.
*
* @param value
* the value that should be selected
* @param userOriginated
* {@code true} if selection was done by user, {@code false} if
* not
*
* @since 8.5
*/
protected void setSelectedItem(T value, boolean userOriginated) {
if (isSelected(value)) {
return;
}
// Update selection
T oldValue = selectedItem;
selectedItem = value;
// Re-generate selected item data
if (oldValue != null) {
getDataCommunicator().refresh(oldValue);
}
if (value != null) {
getDataCommunicator().refresh(value);
}
// Deselection can be handled immediately
updateSelectedItemState(value);
// Update diffstate to make sure null can be selected later.
updateDiffstate("selectedItemKey", Json.createObject());
fireEvent(new SingleSelectionEvent<>(AbstractSingleSelect.this,
oldValue, userOriginated));
} | 3.68 |
flink_DefaultLookupCache_maximumSize | /** Specifies the maximum number of entries of the cache. */
public Builder maximumSize(long maximumSize) {
this.maximumSize = maximumSize;
return this;
} | 3.68 |
hadoop_DelegationTokenIdentifier_stringifyToken | /** @return a string representation of the token */
public static String stringifyToken(final Token<?> token) throws IOException {
DelegationTokenIdentifier ident = new DelegationTokenIdentifier();
ByteArrayInputStream buf = new ByteArrayInputStream(token.getIdentifier());
DataInputStream in = new DataInputStream(buf);
ident.readFields(in);
if (token.getService().getLength() > 0) {
return ident + " on " + token.getService();
} else {
return ident.toString();
}
} | 3.68 |
hibernate-validator_ValidatorImpl_validateReturnValueForGroup | //TODO GM: if possible integrate with validateParameterForGroup()
private <T> void validateReturnValueForGroup(BaseBeanValidationContext<T> validationContext, ExecutableMetaData executableMetaData, T bean, Object value,
Group group) {
Contracts.assertNotNull( executableMetaData, "executableMetaData may not be null" );
// TODO GM: define behavior with respect to redefined default sequences. Should only the
// sequence from the validated bean be honored or also default sequence definitions up in
// the inheritance tree?
// For now a redefined default sequence will only be considered if specified at the bean
// hosting the validated itself, but no other default sequence from parent types
if ( group.isDefaultGroup() ) {
Iterator<Sequence> defaultGroupSequence = validationContext.getRootBeanMetaData().getDefaultValidationSequence( bean );
while ( defaultGroupSequence.hasNext() ) {
Sequence sequence = defaultGroupSequence.next();
int numberOfViolations = validationContext.getFailingConstraints().size();
for ( GroupWithInheritance expandedGroup : sequence ) {
for ( Group defaultGroupSequenceElement : expandedGroup ) {
validateReturnValueForSingleGroup( validationContext, executableMetaData, bean, value, defaultGroupSequenceElement.getDefiningClass() );
if ( shouldFailFast( validationContext ) ) {
return;
}
}
//stop processing after first group with errors occurred
if ( validationContext.getFailingConstraints().size() > numberOfViolations ) {
return;
}
}
}
}
else {
validateReturnValueForSingleGroup( validationContext, executableMetaData, bean, value, group.getDefiningClass() );
}
} | 3.68 |
pulsar_OffloadPoliciesImpl_mergeConfiguration | /**
* Merge different level offload policies.
*
* <p>policies level priority: topic > namespace > broker
*
* @param topicLevelPolicies topic level offload policies
* @param nsLevelPolicies namespace level offload policies
* @param brokerProperties broker level offload configuration
* @return offload policies
*/
public static OffloadPoliciesImpl mergeConfiguration(OffloadPoliciesImpl topicLevelPolicies,
OffloadPoliciesImpl nsLevelPolicies,
Properties brokerProperties) {
try {
boolean allConfigValuesAreNull = true;
OffloadPoliciesImpl offloadPolicies = new OffloadPoliciesImpl();
for (Field field : CONFIGURATION_FIELDS) {
Object object;
if (topicLevelPolicies != null && field.get(topicLevelPolicies) != null) {
object = field.get(topicLevelPolicies);
} else if (nsLevelPolicies != null && field.get(nsLevelPolicies) != null) {
object = field.get(nsLevelPolicies);
} else {
object = getCompatibleValue(brokerProperties, field);
}
if (object != null) {
field.set(offloadPolicies, object);
if (allConfigValuesAreNull) {
allConfigValuesAreNull = false;
}
}
}
if (allConfigValuesAreNull) {
return null;
} else {
return offloadPolicies;
}
} catch (Exception e) {
log.error("Failed to merge configuration.", e);
return null;
}
} | 3.68 |
hibernate-validator_Sequence_addInheritedGroups | /**
* Recursively add inherited (groups defined on superclasses).
*
* @param group the group for which the inherited groups need to be added to {@code expandedGroups}
* @param expandedGroups The list into which to add all groups
*/
private void addInheritedGroups(Group group, Set<Group> expandedGroups) {
for ( Class<?> inheritedGroup : group.getDefiningClass().getInterfaces() ) {
if ( isGroupSequence( inheritedGroup ) ) {
throw LOG.getSequenceDefinitionsNotAllowedException();
}
Group g = new Group( inheritedGroup );
expandedGroups.add( g );
addInheritedGroups( g, expandedGroups );
}
} | 3.68 |
hadoop_TFile_begin | /**
* Get the begin location of the TFile.
*
* @return If TFile is not empty, the location of the first key-value pair.
* Otherwise, it returns end().
*/
Location begin() {
return begin;
} | 3.68 |
flink_LogicalTypeChecks_getFieldCount | /** Returns the field count of row and structured types. Other types return 1. */
public static int getFieldCount(LogicalType logicalType) {
return logicalType.accept(FIELD_COUNT_EXTRACTOR);
} | 3.68 |
framework_VSlider_setResolution | /**
* Sets the resolution (precision level) for slider as the number of
* fractional digits that are considered significant. Determines how big
* change is used when increasing or decreasing the value, and where more
* precise values get rounded.
*
* @param resolution
* the number of digits after the decimal point
*/
public void setResolution(int resolution) {
this.resolution = resolution;
} | 3.68 |
framework_Range_contains | /**
* Checks whether an integer is found within this range.
*
* @param integer
* an integer to test for presence in this range
* @return <code>true</code> if <code>integer</code> is in this range
*/
public boolean contains(final int integer) {
return getStart() <= integer && integer < getEnd();
} | 3.68 |
hbase_TimeRangeTracker_toByteArray | /**
* This method used to serialize TimeRangeTracker (TRT) by protobuf while this breaks the forward
* compatibility on HFile.(See HBASE-21008) In previous hbase version ( < 2.0.0 ) we use
* DataOutput to serialize TRT, these old versions don't have capability to deserialize TRT which
* is serialized by protobuf. So we need to revert the change of serializing TimeRangeTracker back
* to DataOutput. For more information, please check HBASE-21012.
* @param tracker TimeRangeTracker needed to be serialized.
* @return byte array filled with serialized TimeRangeTracker.
* @throws IOException if something goes wrong in writeLong.
*/
public static byte[] toByteArray(TimeRangeTracker tracker) throws IOException {
try (ByteArrayOutputStream bos = new ByteArrayOutputStream()) {
try (DataOutputStream dos = new DataOutputStream(bos)) {
dos.writeLong(tracker.getMin());
dos.writeLong(tracker.getMax());
return bos.toByteArray();
}
}
} | 3.68 |
hmily_HmilyReflector_executor | /**
* Executor object.
*
* @param action the action
* @param executorType the executor type
* @param hmilyParticipant the hmily participant
* @return the object
* @throws Exception the exception
*/
public static Object executor(final HmilyActionEnum action, final ExecutorTypeEnum executorType, final HmilyParticipant hmilyParticipant) throws Exception {
setContext(action, hmilyParticipant);
if (executorType == ExecutorTypeEnum.RPC && hmilyParticipant.getRole() != HmilyRoleEnum.START.getCode()) {
if (action == HmilyActionEnum.CONFIRMING) {
return executeRpc(hmilyParticipant.getConfirmHmilyInvocation());
} else {
return executeRpc(hmilyParticipant.getCancelHmilyInvocation());
}
} else {
if (action == HmilyActionEnum.CONFIRMING) {
return executeLocal(hmilyParticipant.getConfirmHmilyInvocation(), hmilyParticipant.getTargetClass(), hmilyParticipant.getConfirmMethod());
} else {
return executeLocal(hmilyParticipant.getCancelHmilyInvocation(), hmilyParticipant.getTargetClass(), hmilyParticipant.getCancelMethod());
}
}
} | 3.68 |
flink_CoreOptions_fileSystemConnectionLimitIn | /**
* The total number of input connections that a file system for the given scheme may open.
* Unlimited be default.
*/
public static ConfigOption<Integer> fileSystemConnectionLimitIn(String scheme) {
return ConfigOptions.key("fs." + scheme + ".limit.input").intType().defaultValue(-1);
} | 3.68 |
pulsar_ZKSessionWatcher_checkConnectionStatus | // task that runs every TICK_TIME to check zk connection
// NOT ThreadSafe:
// If zk client can't ensure the order, it may lead to problems.
// Currently,we only use it in single thread, it will be fine. but we shouldn't leave any potential problems
// in the future.
private void checkConnectionStatus() {
try {
CompletableFuture<Watcher.Event.KeeperState> future = new CompletableFuture<>();
zk.exists("/", false, (StatCallback) (rc, path, ctx, stat) -> {
switch (KeeperException.Code.get(rc)) {
case CONNECTIONLOSS:
future.complete(Watcher.Event.KeeperState.Disconnected);
break;
case SESSIONEXPIRED:
future.complete(Watcher.Event.KeeperState.Expired);
break;
case OK:
default:
future.complete(Watcher.Event.KeeperState.SyncConnected);
}
}, null);
Watcher.Event.KeeperState zkClientState;
try {
zkClientState = future.get(tickTimeMillis, TimeUnit.MILLISECONDS);
} catch (TimeoutException e) {
// Consider zk disconnection if zk operation takes more than TICK_TIME
zkClientState = Watcher.Event.KeeperState.Disconnected;
}
checkState(zkClientState);
} catch (RejectedExecutionException | InterruptedException e) {
task.cancel(true);
} catch (Throwable t) {
log.warn("Error while checking ZK connection status", t);
}
} | 3.68 |
flink_SharedResourceHolder_get | /**
* Try to get an existing instance of the given resource. If an instance does not exist, create
* a new one with the given factory.
*
* @param resource the singleton object that identifies the requested static resource
*/
public static <T> T get(Resource<T> resource) {
return holder.getInternal(resource);
} | 3.68 |
hadoop_ClusterMetrics_getRunningReduces | /**
* Get the number of running reduce tasks in the cluster.
*
* @return running reduces
*/
public int getRunningReduces() {
return runningReduces;
} | 3.68 |
dubbo_ApplicationModel_getApplicationConfig | /**
* @deprecated Replace to {@link ApplicationModel#getCurrentConfig()}
*/
@Deprecated
public static ApplicationConfig getApplicationConfig() {
return defaultModel().getCurrentConfig();
} | 3.68 |
hadoop_StagingCommitter_getTaskAttemptPath | /**
* Compute the path where the output of a task attempt is stored until
* that task is committed.
*
* @param context the context of the task attempt.
* @param out The output path to put things in.
* @return the path where a task attempt should be stored.
*/
public static Path getTaskAttemptPath(TaskAttemptContext context, Path out) {
return new Path(getPendingTaskAttemptsPath(context, out),
String.valueOf(context.getTaskAttemptID()));
} | 3.68 |
hadoop_ActiveAuditManagerS3A_noteSpanReferenceLost | /**
* Span reference lost from GC operations.
* This is only called when an attempt is made to retrieve on
* the active thread or when a prune operation is cleaning up.
*
* @param threadId thread ID.
*/
private void noteSpanReferenceLost(long threadId) {
auditor.noteSpanReferenceLost(threadId);
} | 3.68 |
pulsar_PortManager_nextLockedFreePort | /**
* Return a locked available port.
*
* @return locked available port.
*/
public static synchronized int nextLockedFreePort() {
int exceptionCount = 0;
while (true) {
try (ServerSocket ss = new ServerSocket(0)) {
int port = ss.getLocalPort();
if (!checkPortIfLocked(port)) {
PORTS.add(port);
return port;
}
} catch (Exception e) {
exceptionCount++;
if (exceptionCount > 100) {
throw new RuntimeException("Unable to allocate socket port", e);
}
}
}
} | 3.68 |
flink_ZooKeeperUtils_fromConfig | /**
* Return the configured {@link ZkClientACLMode}.
*
* @param config The config to parse
* @return Configured ACL mode or the default defined by {@link
* HighAvailabilityOptions#ZOOKEEPER_CLIENT_ACL} if not configured.
*/
public static ZkClientACLMode fromConfig(Configuration config) {
String aclMode = config.getString(HighAvailabilityOptions.ZOOKEEPER_CLIENT_ACL);
if (aclMode == null || aclMode.equalsIgnoreCase(OPEN.name())) {
return OPEN;
} else if (aclMode.equalsIgnoreCase(CREATOR.name())) {
return CREATOR;
} else {
String message = "Unsupported ACL option: [" + aclMode + "] provided";
LOG.error(message);
throw new IllegalConfigurationException(message);
}
} | 3.68 |
hadoop_BlockManagerParameters_getConf | /**
* @return The configuration object.
*/
public Configuration getConf() {
return conf;
} | 3.68 |
flink_DynamicSourceUtils_prepareDynamicSource | /**
* Prepares the given {@link DynamicTableSource}. It check whether the source is compatible with
* the given schema and applies initial parameters.
*/
public static void prepareDynamicSource(
String tableDebugName,
ResolvedCatalogTable table,
DynamicTableSource source,
boolean isBatchMode,
ReadableConfig config,
List<SourceAbilitySpec> sourceAbilities) {
final ResolvedSchema schema = table.getResolvedSchema();
validateAndApplyMetadata(tableDebugName, schema, source, sourceAbilities);
if (source instanceof ScanTableSource) {
validateScanSource(
tableDebugName, schema, (ScanTableSource) source, isBatchMode, config);
prepareRowLevelModificationScan(source);
}
// lookup table source is validated in LookupJoin node
} | 3.68 |
framework_VCalendarPanel_focusPreviousMonth | /**
* Selects the previous month
*/
private void focusPreviousMonth() {
if (focusedDate == null) {
return;
}
Date requestedPreviousMonthDate = (Date) focusedDate.clone();
removeOneMonth(requestedPreviousMonthDate);
if (!isDateInsideRange(requestedPreviousMonthDate, Resolution.MONTH)) {
return;
}
if (!isDateInsideRange(requestedPreviousMonthDate, Resolution.DAY)) {
requestedPreviousMonthDate = adjustDateToFitInsideRange(
requestedPreviousMonthDate);
}
focusedDate.setTime(requestedPreviousMonthDate.getTime());
displayedMonth.setMonth(displayedMonth.getMonth() - 1);
renderCalendar();
} | 3.68 |
hadoop_UpdateContainerTokenEvent_isResourceChange | /**
* Is this update a ResourceChange.
*
* @return isResourceChange.
*/
public boolean isResourceChange() {
return isResourceChange;
} | 3.68 |
hadoop_AbfsTokenRenewer_handleKind | /**
* Checks if this particular object handles the Kind of token passed.
*
* @param kind the kind of the token
* @return true if it handles passed token kind false otherwise.
*/
@Override
public boolean handleKind(Text kind) {
return AbfsDelegationTokenIdentifier.TOKEN_KIND.equals(kind);
} | 3.68 |
framework_Escalator_insertColumns | /**
* {@inheritDoc}
* <p>
* <em>Implementation detail:</em> This method does no DOM modifications
* (i.e. is very cheap to call) if there is no data for rows when this
* method is called.
*
* @see #hasColumnAndRowData()
*/
@Override
public void insertColumns(final int index, final int numberOfColumns) {
// Validate
if (index < 0 || index > getColumnCount()) {
throw new IndexOutOfBoundsException("The given index(" + index
+ ") was outside of the current number of columns (0.."
+ getColumnCount() + ")");
}
if (numberOfColumns < 1) {
throw new IllegalArgumentException(
"Number of columns must be 1 or greater (was "
+ numberOfColumns);
}
// Add to bookkeeping
flyweightRow.addCells(index, numberOfColumns);
for (int i = 0; i < numberOfColumns; i++) {
columns.add(index, new Column());
}
// Adjust frozen columns
boolean frozen = index < frozenColumns;
if (frozen) {
frozenColumns += numberOfColumns;
}
// Add to DOM
header.paintInsertColumns(index, numberOfColumns, frozen);
body.paintInsertColumns(index, numberOfColumns, frozen);
footer.paintInsertColumns(index, numberOfColumns, frozen);
// this needs to be before the scrollbar adjustment.
boolean scrollbarWasNeeded = horizontalScrollbar
.getOffsetSize() < horizontalScrollbar.getScrollSize();
scroller.recalculateScrollbarsForVirtualViewport();
boolean scrollbarIsNowNeeded = horizontalScrollbar
.getOffsetSize() < horizontalScrollbar.getScrollSize();
if (!scrollbarWasNeeded && scrollbarIsNowNeeded) {
// This might as a side effect move rows around (when scrolled
// all the way down) and require the DOM to be up to date, i.e.
// the column to be added
body.verifyEscalatorCount();
}
// fix initial width
if (header.getRowCount() > 0 || body.getRowCount() > 0
|| footer.getRowCount() > 0) {
Map<Integer, Double> colWidths = new HashMap<Integer, Double>();
Double width = Double.valueOf(Column.DEFAULT_COLUMN_WIDTH_PX);
for (int i = index; i < index + numberOfColumns; i++) {
Integer col = Integer.valueOf(i);
colWidths.put(col, width);
}
getColumnConfiguration().setColumnWidths(colWidths);
}
// Adjust scrollbar
double pixelsToInsertedColumn = columnConfiguration
.getCalculatedColumnsWidth(Range.withLength(0, index));
final boolean columnsWereAddedToTheLeftOfViewport = scroller.lastScrollLeft > pixelsToInsertedColumn;
if (columnsWereAddedToTheLeftOfViewport) {
double insertedColumnsWidth = columnConfiguration
.getCalculatedColumnsWidth(
Range.withLength(index, numberOfColumns));
horizontalScrollbar.setScrollPos(
scroller.lastScrollLeft + insertedColumnsWidth);
}
/*
* Colspans make any kind of automatic clever content re-rendering
* impossible: As soon as anything has colspans, adding one might
* affect surrounding colspans, modifying the DOM structure once
* again, ending in a cascade of updates. Because we don't know how
* the data is updated.
*
* So, instead, we don't do anything. The client code is responsible
* for re-rendering the content (if so desired). Everything Just
* Works (TM) if colspans aren't used.
*/
} | 3.68 |
hbase_RegionServerObserver_postReplicationSinkBatchMutate | /**
* This will be called after replication sink mutations are executed on the sink table as part of
* batch call.
* @param ctx the environment to interact with the framework and region server.
* @param walEntry wal entry from which mutation is formed.
* @param mutation mutation to be applied at sink cluster.
* @throws IOException if something goes wrong.
*/
default void postReplicationSinkBatchMutate(
ObserverContext<RegionServerCoprocessorEnvironment> ctx, AdminProtos.WALEntry walEntry,
Mutation mutation) throws IOException {
} | 3.68 |
hbase_HBaseTestingUtility_deleteTable | /**
* Drop an existing table
* @param tableName existing table
*/
public void deleteTable(TableName tableName) throws IOException {
try {
getAdmin().disableTable(tableName);
} catch (TableNotEnabledException e) {
LOG.debug("Table: " + tableName + " already disabled, so just deleting it.");
}
getAdmin().deleteTable(tableName);
} | 3.68 |
hudi_ConsistentBucketIndexBulkInsertPartitionerWithRows_initializeBucketIdentifier | /**
* Initialize hashing metadata of input records. The metadata of all related partitions will be loaded, and
* the mapping from partition to its bucket identifier is constructed.
*/
private Map<String, ConsistentBucketIdentifier> initializeBucketIdentifier(JavaRDD<Row> rows) {
return rows.map(this.extractor::getPartitionPath).distinct().collect().stream()
.collect(Collectors.toMap(p -> p, this::getBucketIdentifier));
} | 3.68 |
framework_LegacyCommunicationManager_isConnectorVisibleToClient | /**
* Checks if the connector is visible in context. For Components,
* {@link #isComponentVisibleToClient(Component)} is used. For other types
* of connectors, the contextual visibility of its first Component ancestor
* is used. If no Component ancestor is found, the connector is not visible.
*
* @deprecated As of 7.1. See #11411.
*
* @param connector
* The connector to check
* @return <code>true</code> if the connector is visible to the client,
* <code>false</code> otherwise
*/
@Deprecated
public static boolean isConnectorVisibleToClient(
ClientConnector connector) {
if (connector instanceof Component) {
return isComponentVisibleToClient((Component) connector);
} else {
ClientConnector parent = connector.getParent();
if (parent == null) {
return false;
} else {
return isConnectorVisibleToClient(parent);
}
}
} | 3.68 |
flink_NetUtils_parseHostPortAddress | /**
* Converts a string of the form "host:port" into an {@link InetSocketAddress}.
*
* @param hostPort The "host:port" string.
* @return The converted InetSocketAddress.
*/
public static InetSocketAddress parseHostPortAddress(String hostPort) {
URL url = validateHostPortString(hostPort);
return new InetSocketAddress(url.getHost(), url.getPort());
} | 3.68 |
hbase_ThriftUtilities_resultsFromHBase | /**
* Converts multiple {@link Result}s (HBase) into a list of {@link TResult}s (Thrift).
* @param in array of <code>Result</code>s to convert
* @return list of converted <code>TResult</code>s
* @see #resultFromHBase(Result)
*/
public static List<TResult> resultsFromHBase(Result[] in) {
List<TResult> out = new ArrayList<>(in.length);
for (Result result : in) {
out.add(resultFromHBase(result));
}
return out;
} | 3.68 |
hibernate-validator_MappingXmlParser_run | /**
* Runs the given privileged action, using a privileged block if required.
* <p>
* <b>NOTE:</b> This must never be changed into a publicly available method to avoid execution of arbitrary
* privileged actions within HV's protection domain.
*/
@IgnoreForbiddenApisErrors(reason = "SecurityManager is deprecated in JDK17")
private static <T> T run(PrivilegedAction<T> action) {
return System.getSecurityManager() != null ? AccessController.doPrivileged( action ) : action.run();
} | 3.68 |
flink_PartitionCommitPolicy_validatePolicyChain | /** Validate commit policy. */
static void validatePolicyChain(boolean isEmptyMetastore, String policyKind) {
if (policyKind != null) {
String[] policyStrings = policyKind.split(",");
for (String policy : policyStrings) {
if (isEmptyMetastore && METASTORE.equalsIgnoreCase(policy)) {
throw new ValidationException(
"Can not configure a 'metastore' partition commit"
+ " policy for a file system table. You can only configure 'metastore'"
+ " partition commit policy for a hive table.");
}
}
}
} | 3.68 |
dubbo_AbstractServerCall_buildInvocation | /**
* Build the RpcInvocation with metadata and execute headerFilter
*
* @return RpcInvocation
*/
protected RpcInvocation buildInvocation(MethodDescriptor methodDescriptor) {
final URL url = invoker.getUrl();
RpcInvocation inv = new RpcInvocation(
url.getServiceModel(),
methodDescriptor.getMethodName(),
serviceDescriptor.getInterfaceName(),
url.getProtocolServiceKey(),
methodDescriptor.getParameterClasses(),
new Object[0]);
inv.setTargetServiceUniqueName(url.getServiceKey());
inv.setReturnTypes(methodDescriptor.getReturnTypes());
inv.setObjectAttachments(StreamUtils.toAttachments(requestMetadata));
inv.put(REMOTE_ADDRESS_KEY, stream.remoteAddress());
// handle timeout
String timeout = (String) requestMetadata.get(TripleHeaderEnum.TIMEOUT.getHeader());
try {
if (Objects.nonNull(timeout)) {
this.timeout = parseTimeoutToMills(timeout);
}
} catch (Throwable t) {
LOGGER.warn(
PROTOCOL_FAILED_PARSE,
"",
"",
String.format(
"Failed to parse request timeout set from:%s, service=%s " + "method=%s",
timeout, serviceDescriptor.getInterfaceName(), methodName));
}
if (null != requestMetadata.get(TripleHeaderEnum.CONSUMER_APP_NAME_KEY.getHeader())) {
inv.put(
TripleHeaderEnum.CONSUMER_APP_NAME_KEY,
requestMetadata.get(TripleHeaderEnum.CONSUMER_APP_NAME_KEY.getHeader()));
}
return inv;
} | 3.68 |
querydsl_SimpleExpression_when | /**
* Create a case expression builder
*
* @param other
* @return case expression builder
*/
public CaseForEqBuilder<T> when(Expression<? extends T> other) {
return new CaseForEqBuilder<T>(mixin, other);
} | 3.68 |
flink_TypeExtractor_createTypeInfoFromInput | /**
* Finds the type information to a type variable.
*
* <p>It solve the following:
*
* <p>Return the type information for "returnTypeVar" given that "inType" has type information
* "inTypeInfo". Thus "inType" must contain "returnTypeVar" in a "inputTypeHierarchy", otherwise
* null is returned.
*/
private <IN1> TypeInformation<?> createTypeInfoFromInput(
TypeVariable<?> returnTypeVar,
List<Type> inputTypeHierarchy,
Type inType,
TypeInformation<IN1> inTypeInfo) {
TypeInformation<?> info = null;
// use a factory to find corresponding type information to type variable
final List<Type> factoryHierarchy = new ArrayList<>(inputTypeHierarchy);
final TypeInfoFactory<?> factory = getClosestFactory(factoryHierarchy, inType);
if (factory != null) {
// the type that defines the factory is last in factory hierarchy
final Type factoryDefiningType = factoryHierarchy.get(factoryHierarchy.size() - 1);
// defining type has generics, the factory need to be asked for a mapping of subtypes to
// type information
if (factoryDefiningType instanceof ParameterizedType) {
final Type[] typeParams = typeToClass(factoryDefiningType).getTypeParameters();
final Type[] actualParams =
((ParameterizedType) factoryDefiningType).getActualTypeArguments();
// go thru all elements and search for type variables
for (int i = 0; i < actualParams.length; i++) {
final Map<String, TypeInformation<?>> componentInfo =
inTypeInfo.getGenericParameters();
final String typeParamName = typeParams[i].toString();
if (!componentInfo.containsKey(typeParamName)
|| componentInfo.get(typeParamName) == null) {
throw new InvalidTypesException(
"TypeInformation '"
+ inTypeInfo.getClass().getSimpleName()
+ "' does not supply a mapping of TypeVariable '"
+ typeParamName
+ "' to corresponding TypeInformation. "
+ "Input type inference can only produce a result with this information. "
+ "Please implement method 'TypeInformation.getGenericParameters()' for this.");
}
info =
createTypeInfoFromInput(
returnTypeVar,
factoryHierarchy,
actualParams[i],
componentInfo.get(typeParamName));
if (info != null) {
break;
}
}
}
}
// the input is a type variable
else if (sameTypeVars(inType, returnTypeVar)) {
return inTypeInfo;
} else if (inType instanceof TypeVariable) {
Type resolvedInType =
materializeTypeVariable(inputTypeHierarchy, (TypeVariable<?>) inType);
if (resolvedInType != inType) {
info =
createTypeInfoFromInput(
returnTypeVar, inputTypeHierarchy, resolvedInType, inTypeInfo);
}
}
// input is an array
else if (inType instanceof GenericArrayType) {
TypeInformation<?> componentInfo = null;
if (inTypeInfo instanceof BasicArrayTypeInfo) {
componentInfo = ((BasicArrayTypeInfo<?, ?>) inTypeInfo).getComponentInfo();
} else if (inTypeInfo instanceof PrimitiveArrayTypeInfo) {
componentInfo =
BasicTypeInfo.getInfoFor(inTypeInfo.getTypeClass().getComponentType());
} else if (inTypeInfo instanceof ObjectArrayTypeInfo) {
componentInfo = ((ObjectArrayTypeInfo<?, ?>) inTypeInfo).getComponentInfo();
}
info =
createTypeInfoFromInput(
returnTypeVar,
inputTypeHierarchy,
((GenericArrayType) inType).getGenericComponentType(),
componentInfo);
}
// the input is a tuple
else if (inTypeInfo instanceof TupleTypeInfo
&& isClassType(inType)
&& Tuple.class.isAssignableFrom(typeToClass(inType))) {
ParameterizedType tupleBaseClass;
// get tuple from possible tuple subclass
while (!(isClassType(inType)
&& typeToClass(inType).getSuperclass().equals(Tuple.class))) {
inputTypeHierarchy.add(inType);
inType = typeToClass(inType).getGenericSuperclass();
}
inputTypeHierarchy.add(inType);
// we can assume to be parameterized since we
// already did input validation
tupleBaseClass = (ParameterizedType) inType;
Type[] tupleElements = tupleBaseClass.getActualTypeArguments();
// go thru all tuple elements and search for type variables
for (int i = 0; i < tupleElements.length; i++) {
info =
createTypeInfoFromInput(
returnTypeVar,
inputTypeHierarchy,
tupleElements[i],
((TupleTypeInfo<?>) inTypeInfo).getTypeAt(i));
if (info != null) {
break;
}
}
}
// the input is a pojo
else if (inTypeInfo instanceof PojoTypeInfo && isClassType(inType)) {
// build the entire type hierarchy for the pojo
getTypeHierarchy(inputTypeHierarchy, inType, Object.class);
// determine a field containing the type variable
List<Field> fields = getAllDeclaredFields(typeToClass(inType), false);
for (Field field : fields) {
Type fieldType = field.getGenericType();
if (fieldType instanceof TypeVariable
&& sameTypeVars(
returnTypeVar,
materializeTypeVariable(
inputTypeHierarchy, (TypeVariable<?>) fieldType))) {
return getTypeOfPojoField(inTypeInfo, field);
} else if (fieldType instanceof ParameterizedType
|| fieldType instanceof GenericArrayType) {
List<Type> typeHierarchyWithFieldType = new ArrayList<>(inputTypeHierarchy);
typeHierarchyWithFieldType.add(fieldType);
TypeInformation<?> foundInfo =
createTypeInfoFromInput(
returnTypeVar,
typeHierarchyWithFieldType,
fieldType,
getTypeOfPojoField(inTypeInfo, field));
if (foundInfo != null) {
return foundInfo;
}
}
}
}
return info;
} | 3.68 |
hadoop_AbstractClientRequestInterceptor_shutdown | /**
* Disposes the {@link ClientRequestInterceptor}.
*/
@Override
public void shutdown() {
if (this.nextInterceptor != null) {
this.nextInterceptor.shutdown();
}
} | 3.68 |
AreaShop_RentRegion_getTimeLeftString | /**
* Get a formatted string indicating the rent time that is left.
* @return Time left on the rent, for example '29 days', '3 months', '1 second'
*/
public String getTimeLeftString() {
return Utils.millisToHumanFormat(getTimeLeft());
} | 3.68 |
hadoop_BufferData_setCaching | /**
* Indicates that a caching operation is in progress.
*
* @param actionFuture the {@code Future} of a caching action.
*
* @throws IllegalArgumentException if actionFuture is null.
*/
public synchronized void setCaching(Future<Void> actionFuture) {
Validate.checkNotNull(actionFuture, "actionFuture");
this.throwIfStateIncorrect(State.PREFETCHING, State.READY);
this.state = State.CACHING;
this.action = actionFuture;
} | 3.68 |
hbase_MetricsConnection_getPutTracker | /** putTracker metric */
public CallTracker getPutTracker() {
return putTracker;
} | 3.68 |
framework_VAbstractDropHandler_dragOver | /**
* Default implementation does nothing.
*/
@Override
public void dragOver(VDragEvent drag) {
} | 3.68 |
framework_DetailsManagerConnector_getDetailsComponentConnectorId | /**
* Returns the connector id for a details component.
*
* @param rowIndex
* the row index of details component
* @return connector id; {@code null} if row or id is not found
*/
private String getDetailsComponentConnectorId(int rowIndex) {
JsonObject row = getWidget().getDataSource().getRow(rowIndex);
if (row == null || !row.hasKey(GridState.JSONKEY_DETAILS_VISIBLE)
|| row.getString(GridState.JSONKEY_DETAILS_VISIBLE).isEmpty()) {
return null;
}
return row.getString(GridState.JSONKEY_DETAILS_VISIBLE);
} | 3.68 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.