name
stringlengths 12
178
| code_snippet
stringlengths 8
36.5k
| score
float64 3.26
3.68
|
---|---|---|
hadoop_RouterQuotaManager_clear | /**
* Clean up the cache.
*/
public void clear() {
writeLock.lock();
try {
this.cache.clear();
} finally {
writeLock.unlock();
}
} | 3.68 |
Activiti_TreeValueExpression_isReadOnly | /**
* Evaluates the expression as an lvalue and determines if {@link #setValue(ELContext, Object)}
* will always fail.
* @param context used to resolve properties (<code>base.property</code> and <code>base[property]</code>)
* and to determine the result from the last base/property pair
* @return <code>true</code> if {@link #setValue(ELContext, Object)} always fails.
* @throws ELException if evaluation fails (e.g. property not found, type conversion failed, ...)
*/
@Override
public boolean isReadOnly(ELContext context) throws ELException {
return node.isReadOnly(bindings, context);
} | 3.68 |
hibernate-validator_GroupConversionHelper_convertGroup | /**
* Converts the given validation group as per the group conversion
* configuration for this property (as e.g. specified via
* {@code @ConvertGroup}.
*
* @param from The group to convert.
*
* @return The converted group. Will be the original group itself in case no
* conversion is to be performed.
*/
public Class<?> convertGroup(Class<?> from) {
Class<?> to = groupConversions.get( from );
return to != null ? to : from;
} | 3.68 |
flink_PanedWindowProcessFunction_isPaneLate | /** checks whether the pane is late (e.g. can be / has been cleanup) */
private boolean isPaneLate(W pane) {
// whether the pane is late depends on the last window which the pane is belongs to is late
return windowAssigner.isEventTime() && isWindowLate(windowAssigner.getLastWindow(pane));
} | 3.68 |
framework_VaadinFinderLocatorStrategy_eliminateDuplicates | /**
* Go through a list, removing all duplicate elements from it. This method
* is used to avoid accumulation of duplicate entries in result lists
* resulting from low-context recursion.
*
* Preserves first entry in list, removes others. Preserves list order.
*
* @return list passed as parameter, after modification
*/
private final <T> List<T> eliminateDuplicates(List<T> list) {
LinkedHashSet<T> set = new LinkedHashSet<>(list);
list.clear();
list.addAll(set);
return list;
} | 3.68 |
zxing_MinimalEncoder_getLastASCII | /** Peeks ahead and returns 1 if the postfix consists of exactly two digits, 2 if the postfix consists of exactly
* two consecutive digits and a non extended character or of 4 digits.
* Returns 0 in any other case
**/
int getLastASCII() {
int length = input.length();
int from = fromPosition + characterLength;
if (length - from > 4 || from >= length) {
return 0;
}
if (length - from == 1) {
if (isExtendedASCII(input.charAt(from), input.getFNC1Character())) {
return 0;
}
return 1;
}
if (length - from == 2) {
if (isExtendedASCII(input.charAt(from), input.getFNC1Character()) || isExtendedASCII(input.charAt(from + 1),
input.getFNC1Character())) {
return 0;
}
if (HighLevelEncoder.isDigit(input.charAt(from)) && HighLevelEncoder.isDigit(input.charAt(from + 1))) {
return 1;
}
return 2;
}
if (length - from == 3) {
if (HighLevelEncoder.isDigit(input.charAt(from)) && HighLevelEncoder.isDigit(input.charAt(from + 1))
&& !isExtendedASCII(input.charAt(from + 2), input.getFNC1Character())) {
return 2;
}
if (HighLevelEncoder.isDigit(input.charAt(from + 1)) && HighLevelEncoder.isDigit(input.charAt(from + 2))
&& !isExtendedASCII(input.charAt(from), input.getFNC1Character())) {
return 2;
}
return 0;
}
if (HighLevelEncoder.isDigit(input.charAt(from)) && HighLevelEncoder.isDigit(input.charAt(from + 1))
&& HighLevelEncoder.isDigit(input.charAt(from + 2)) && HighLevelEncoder.isDigit(input.charAt(from + 3))) {
return 2;
}
return 0;
} | 3.68 |
hbase_MemStoreLABImpl_copyBBECellInto | /**
* Mostly a duplicate of {@link #copyCellInto(Cell, int)}} done for perf sake. It presumes
* ByteBufferExtendedCell instead of Cell so we deal with a specific type rather than the super
* generic Cell. Removes instanceof checks. Shrinkage is enough to make this inline where before
* it was too big. Uses less CPU. See HBASE-20875 for evidence.
* @see #copyCellInto(Cell, int)
*/
private Cell copyBBECellInto(ByteBufferExtendedCell cell, int maxAlloc) {
int size = cell.getSerializedSize();
Preconditions.checkArgument(size >= 0, "negative size");
// Callers should satisfy large allocations from JVM heap so limit fragmentation.
if (size > maxAlloc) {
return null;
}
Chunk c = null;
int allocOffset = 0;
while (true) {
// Try to get the chunk
c = getOrMakeChunk();
// We may get null because the some other thread succeeded in getting the lock
// and so the current thread has to try again to make its chunk or grab the chunk
// that the other thread created
// Try to allocate from this chunk
if (c != null) {
allocOffset = c.alloc(size);
if (allocOffset != -1) {
// We succeeded - this is the common case - small alloc
// from a big buffer
break;
}
// not enough space!
// try to retire this chunk
tryRetireChunk(c);
}
}
return copyBBECToChunkCell(cell, c.getData(), allocOffset, size);
} | 3.68 |
morf_AbstractSqlDialectTest_testCastToBoolean | /**
* Tests the output of a cast to a boolean.
*/
@Test
public void testCastToBoolean() {
String result = testDialect.getSqlFrom(new Cast(new FieldReference("value"), DataType.BOOLEAN, 10));
assertEquals(expectedBooleanCast(), result);
} | 3.68 |
hbase_ZKConfig_makeZKPropsFromHbaseConfig | /**
* Make a Properties object holding ZooKeeper config. Parses the corresponding config options from
* the HBase XML configs and generates the appropriate ZooKeeper properties.
* @param conf Configuration to read from.
* @return Properties holding mappings representing ZooKeeper config file.
*/
private static Properties makeZKPropsFromHbaseConfig(Configuration conf) {
Properties zkProperties = new Properties();
// Directly map all of the hbase.zookeeper.property.KEY properties.
// Synchronize on conf so no loading of configs while we iterate
synchronized (conf) {
for (Entry<String, String> entry : conf) {
String key = entry.getKey();
if (key.startsWith(HConstants.ZK_CFG_PROPERTY_PREFIX)) {
String zkKey = key.substring(HConstants.ZK_CFG_PROPERTY_PREFIX_LEN);
String value = entry.getValue();
// If the value has variables substitutions, need to do a get.
if (value.contains(VARIABLE_START)) {
value = conf.get(key);
}
zkProperties.setProperty(zkKey, value);
}
}
}
// If clientPort is not set, assign the default.
if (zkProperties.getProperty(HConstants.CLIENT_PORT_STR) == null) {
zkProperties.put(HConstants.CLIENT_PORT_STR, HConstants.DEFAULT_ZOOKEEPER_CLIENT_PORT);
}
// Create the server.X properties.
int peerPort = conf.getInt("hbase.zookeeper.peerport", 2888);
int leaderPort = conf.getInt("hbase.zookeeper.leaderport", 3888);
final String[] serverHosts = conf.getStrings(HConstants.ZOOKEEPER_QUORUM, HConstants.LOCALHOST);
String serverHost;
String address;
String key;
for (int i = 0; i < serverHosts.length; ++i) {
if (serverHosts[i].contains(":")) {
serverHost = serverHosts[i].substring(0, serverHosts[i].indexOf(':'));
} else {
serverHost = serverHosts[i];
}
address = serverHost + ":" + peerPort + ":" + leaderPort;
key = "server." + i;
zkProperties.put(key, address);
}
return zkProperties;
} | 3.68 |
dubbo_StringUtils_toCommaDelimitedString | /**
* Create the common-delimited {@link String} by one or more {@link String} members
*
* @param one one {@link String}
* @param others others {@link String}
* @return <code>null</code> if <code>one</code> or <code>others</code> is <code>null</code>
* @since 2.7.8
*/
public static String toCommaDelimitedString(String one, String... others) {
String another = arrayToDelimitedString(others, COMMA_SEPARATOR);
return isEmpty(another) ? one : one + COMMA_SEPARATOR + another;
} | 3.68 |
pulsar_ConcurrentLongHashMap_forEach | /**
* Iterate over all the entries in the map and apply the processor function to each of them.
* <p>
* <b>Warning: Do Not Guarantee Thread-Safety.</b>
* @param processor the processor to apply to each entry
*/
public void forEach(EntryProcessor<V> processor) {
for (int i = 0; i < sections.length; i++) {
sections[i].forEach(processor);
}
} | 3.68 |
flink_AvroDeserializationSchema_forGeneric | /**
* Creates {@link AvroDeserializationSchema} that produces {@link GenericRecord} using provided
* schema.
*
* @param schema schema of produced records
* @param encoding Avro serialization approach to use for decoding
* @return deserialized record in form of {@link GenericRecord}
*/
public static AvroDeserializationSchema<GenericRecord> forGeneric(
Schema schema, AvroEncoding encoding) {
return new AvroDeserializationSchema<>(GenericRecord.class, schema, encoding);
} | 3.68 |
hadoop_Signer_sign | /**
* Returns a signed string.
*
* @param str string to sign.
*
* @return the signed string.
*/
public synchronized String sign(String str) {
if (str == null || str.length() == 0) {
throw new IllegalArgumentException("NULL or empty string to sign");
}
byte[] secret = secretProvider.getCurrentSecret();
String signature = computeSignature(secret, str);
return str + SIGNATURE + signature;
} | 3.68 |
framework_Escalator_getCalculatedColumnsWidth | /**
* Calculates the width of the columns in a given range.
*
* @param columns
* the columns to calculate
* @return the total width of the columns in the given
* <code>columns</code>
*/
double getCalculatedColumnsWidth(final Range columns) {
/*
* This is an assert instead of an exception, since this is an
* internal method.
*/
assert columns
.isSubsetOf(Range.between(0, getColumnCount())) : "Range "
+ "was outside of current column range (i.e.: "
+ Range.between(0, getColumnCount())
+ ", but was given :" + columns;
double sum = 0;
for (int i = columns.getStart(); i < columns.getEnd(); i++) {
double columnWidthActual = getColumnWidthActual(i);
sum += columnWidthActual;
}
return sum;
} | 3.68 |
framework_GridDropTarget_setDropMode | /**
* Sets the drop mode of this drop target.
* <p>
* When using {@link DropMode#ON_TOP}, and the grid is either empty or has
* empty space after the last row, the drop can still happen on the empty
* space, and the {@link GridDropEvent#getDropTargetRow()} will return an
* empty optional.
* <p>
* When using {@link DropMode#BETWEEN} or
* {@link DropMode#ON_TOP_OR_BETWEEN}, and there is at least one row in the
* grid, any drop after the last row in the grid will get the last row as
* the {@link GridDropEvent#getDropTargetRow()}. If there are no rows in the
* grid, then it will return an empty optional.
* <p>
* If using {@link DropMode#ON_GRID}, then the drop will not happen on any
* row, but instead just "on the grid". The target row will not be present
* in this case.
* <p>
* <em>NOTE: {@link DropMode#ON_GRID} is used automatically when the grid
* has been sorted and {@link #setDropAllowedOnRowsWhenSorted(boolean)} is
* {@code false} - since the drop location would not necessarily match the
* correct row because of the sorting. During the sorting, any calls to this
* method don't have any effect until the sorting has been removed, or
* {@link #setDropAllowedOnRowsWhenSorted(boolean)} is set back to
* {@code true}.</em>
*
* @param dropMode
* Drop mode that describes the allowed drop locations within the
* Grid's row.
* @see GridDropEvent#getDropLocation()
* @see #setDropAllowedOnRowsWhenSorted(boolean)
*/
public void setDropMode(DropMode dropMode) {
if (dropMode == null) {
throw new IllegalArgumentException("Drop mode cannot be null");
}
if (cachedDropMode != null) {
cachedDropMode = dropMode;
} else {
internalSetDropMode(dropMode);
}
} | 3.68 |
flink_ParameterTool_getConfiguration | /**
* Returns a {@link Configuration} object from this {@link ParameterTool}.
*
* @return A {@link Configuration}
*/
public Configuration getConfiguration() {
final Configuration conf = new Configuration();
for (Map.Entry<String, String> entry : data.entrySet()) {
conf.setString(entry.getKey(), entry.getValue());
}
return conf;
} | 3.68 |
hbase_FSTableDescriptors_writeTableDescriptor | /**
* Attempts to write a new table descriptor to the given table's directory. It begins at the
* currentSequenceId + 1 and tries 10 times to find a new sequence number not already in use.
* <p/>
* Removes the current descriptor file if passed in.
* @return Descriptor file or null if we failed write.
*/
private static Path writeTableDescriptor(final FileSystem fs, final TableDescriptor td,
final Path tableDir, final FileStatus currentDescriptorFile) throws IOException {
// Here we will write to the final directory directly to avoid renaming as on OSS renaming is
// not atomic and has performance issue. The reason why we could do this is that, in the below
// code we will not overwrite existing files, we will write a new file instead. And when
// loading, we will skip the half written file, please see the code in getTableDescriptorFromFs
Path tableInfoDir = new Path(tableDir, TABLEINFO_DIR);
// In proc v2 we have table lock so typically, there will be no concurrent writes. Keep the
// retry logic here since we may still want to write the table descriptor from for example,
// HBCK2?
int currentSequenceId = currentDescriptorFile == null
? 0
: getTableInfoSequenceIdAndFileLength(currentDescriptorFile.getPath()).sequenceId;
// Put arbitrary upperbound on how often we retry
int maxAttempts = 10;
int maxSequenceId = currentSequenceId + maxAttempts;
byte[] bytes = TableDescriptorBuilder.toByteArray(td);
for (int newSequenceId = currentSequenceId + 1; newSequenceId
<= maxSequenceId; newSequenceId++) {
String fileName = getTableInfoFileName(newSequenceId, bytes);
Path filePath = new Path(tableInfoDir, fileName);
try (FSDataOutputStream out = fs.create(filePath, false)) {
out.write(bytes);
} catch (FileAlreadyExistsException e) {
LOG.debug("{} exists; retrying up to {} times", filePath, maxAttempts, e);
continue;
} catch (IOException e) {
LOG.debug("Failed write {}; retrying up to {} times", filePath, maxAttempts, e);
continue;
}
deleteTableDescriptorFiles(fs, tableInfoDir, newSequenceId - 1);
return filePath;
}
return null;
} | 3.68 |
hadoop_BufferPool_tryAcquire | /**
* Acquires a buffer if one is immediately available. Otherwise returns null.
* @param blockNumber the id of the block to try acquire.
* @return the acquired block's {@code BufferData} or null.
*/
public synchronized BufferData tryAcquire(int blockNumber) {
return acquireHelper(blockNumber, false);
} | 3.68 |
flink_MiniCluster_start | /**
* Starts the mini cluster, based on the configured properties.
*
* @throws Exception This method passes on any exception that occurs during the startup of the
* mini cluster.
*/
public void start() throws Exception {
synchronized (lock) {
checkState(!running, "MiniCluster is already running");
LOG.info("Starting Flink Mini Cluster");
LOG.debug("Using configuration {}", miniClusterConfiguration);
final Configuration configuration = miniClusterConfiguration.getConfiguration();
final boolean useSingleRpcService =
miniClusterConfiguration.getRpcServiceSharing() == RpcServiceSharing.SHARED;
try {
workingDirectory =
WorkingDirectory.create(
ClusterEntrypointUtils.generateWorkingDirectoryFile(
configuration,
Optional.of(PROCESS_WORKING_DIR_BASE),
"minicluster_" + ResourceID.generate()));
initializeIOFormatClasses(configuration);
rpcSystem = rpcSystemSupplier.get();
LOG.info("Starting Metrics Registry");
metricRegistry =
createMetricRegistry(
configuration,
rpcSystem.deref().getMaximumMessageSizeInBytes(configuration));
// bring up all the RPC services
LOG.info("Starting RPC Service(s)");
final RpcServiceFactory dispatcherResourceManagerComponentRpcServiceFactory;
final RpcService metricQueryServiceRpcService;
if (useSingleRpcService) {
// we always need the 'commonRpcService' for auxiliary calls
commonRpcService = createLocalRpcService(configuration, rpcSystem.deref());
final CommonRpcServiceFactory commonRpcServiceFactory =
new CommonRpcServiceFactory(commonRpcService);
taskManagerRpcServiceFactory = commonRpcServiceFactory;
dispatcherResourceManagerComponentRpcServiceFactory = commonRpcServiceFactory;
metricQueryServiceRpcService =
MetricUtils.startLocalMetricsRpcService(
configuration, rpcSystem.deref());
} else {
// start a new service per component, possibly with custom bind addresses
final String jobManagerExternalAddress =
miniClusterConfiguration.getJobManagerExternalAddress();
final String taskManagerExternalAddress =
miniClusterConfiguration.getTaskManagerExternalAddress();
final String jobManagerExternalPortRange =
miniClusterConfiguration.getJobManagerExternalPortRange();
final String taskManagerExternalPortRange =
miniClusterConfiguration.getTaskManagerExternalPortRange();
final String jobManagerBindAddress =
miniClusterConfiguration.getJobManagerBindAddress();
final String taskManagerBindAddress =
miniClusterConfiguration.getTaskManagerBindAddress();
dispatcherResourceManagerComponentRpcServiceFactory =
new DedicatedRpcServiceFactory(
configuration,
jobManagerExternalAddress,
jobManagerExternalPortRange,
jobManagerBindAddress,
rpcSystem.deref());
taskManagerRpcServiceFactory =
new DedicatedRpcServiceFactory(
configuration,
taskManagerExternalAddress,
taskManagerExternalPortRange,
taskManagerBindAddress,
rpcSystem.deref());
// we always need the 'commonRpcService' for auxiliary calls
// bind to the JobManager address with port 0
commonRpcService =
createRemoteRpcService(
configuration, jobManagerBindAddress, 0, rpcSystem.deref());
metricQueryServiceRpcService =
MetricUtils.startRemoteMetricsRpcService(
configuration,
commonRpcService.getAddress(),
null,
rpcSystem.deref());
}
metricRegistry.startQueryService(metricQueryServiceRpcService, null);
processMetricGroup =
MetricUtils.instantiateProcessMetricGroup(
metricRegistry,
RpcUtils.getHostname(commonRpcService),
ConfigurationUtils.getSystemResourceMetricsProbingInterval(
configuration));
ioExecutor =
Executors.newFixedThreadPool(
ClusterEntrypointUtils.getPoolSize(configuration),
new ExecutorThreadFactory("mini-cluster-io"));
delegationTokenManager =
DefaultDelegationTokenManagerFactory.create(
configuration,
miniClusterConfiguration.getPluginManager(),
commonRpcService.getScheduledExecutor(),
ioExecutor);
// Obtaining delegation tokens and propagating them to the local JVM receivers in a
// one-time fashion is required because BlobServer may connect to external file
// systems
delegationTokenManager.obtainDelegationTokens();
delegationTokenReceiverRepository =
new DelegationTokenReceiverRepository(
configuration, miniClusterConfiguration.getPluginManager());
haServicesFactory = createHighAvailabilityServicesFactory(configuration);
haServices = createHighAvailabilityServices(configuration, ioExecutor);
blobServer =
BlobUtils.createBlobServer(
configuration,
Reference.borrowed(workingDirectory.getBlobStorageDirectory()),
haServices.createBlobStore());
blobServer.start();
heartbeatServices = HeartbeatServices.fromConfiguration(configuration);
blobCacheService =
BlobUtils.createBlobCacheService(
configuration,
Reference.borrowed(workingDirectory.getBlobStorageDirectory()),
haServices.createBlobStore(),
new InetSocketAddress(
InetAddress.getLocalHost(), blobServer.getPort()));
startTaskManagers();
MetricQueryServiceRetriever metricQueryServiceRetriever =
new RpcMetricQueryServiceRetriever(
metricRegistry.getMetricQueryServiceRpcService());
setupDispatcherResourceManagerComponents(
configuration,
dispatcherResourceManagerComponentRpcServiceFactory,
metricQueryServiceRetriever);
resourceManagerLeaderRetriever = haServices.getResourceManagerLeaderRetriever();
dispatcherLeaderRetriever = haServices.getDispatcherLeaderRetriever();
clusterRestEndpointLeaderRetrievalService =
haServices.getClusterRestEndpointLeaderRetriever();
dispatcherGatewayRetriever =
new RpcGatewayRetriever<>(
commonRpcService,
DispatcherGateway.class,
DispatcherId::fromUuid,
new ExponentialBackoffRetryStrategy(
21, Duration.ofMillis(5L), Duration.ofMillis(20L)));
resourceManagerGatewayRetriever =
new RpcGatewayRetriever<>(
commonRpcService,
ResourceManagerGateway.class,
ResourceManagerId::fromUuid,
new ExponentialBackoffRetryStrategy(
21, Duration.ofMillis(5L), Duration.ofMillis(20L)));
webMonitorLeaderRetriever = new LeaderRetriever();
resourceManagerLeaderRetriever.start(resourceManagerGatewayRetriever);
dispatcherLeaderRetriever.start(dispatcherGatewayRetriever);
clusterRestEndpointLeaderRetrievalService.start(webMonitorLeaderRetriever);
} catch (Exception e) {
// cleanup everything
try {
close();
} catch (Exception ee) {
e.addSuppressed(ee);
}
throw e;
}
// create a new termination future
terminationFuture = new CompletableFuture<>();
// now officially mark this as running
running = true;
LOG.info("Flink Mini Cluster started successfully");
}
} | 3.68 |
morf_AbstractSqlDialectTest_expectedSqlForMathOperations6 | /**
* @return expected SQL for math operation 6
*/
protected String expectedSqlForMathOperations6() {
return "a + b / (c - d)";
} | 3.68 |
hbase_IndividualBytesFieldCell_getQualifierArray | // 3) Qualifier
@Override
public byte[] getQualifierArray() {
// Qualifier could be null
return (qualifier == null) ? HConstants.EMPTY_BYTE_ARRAY : qualifier;
} | 3.68 |
streampipes_AdapterResourceManager_encryptAndCreate | /**
* Takes an {@link AdapterDescription}, encrypts the password properties and stores it to the database
*
* @param adapterDescription input adapter description
* @return the id of the created adapter
*/
public String encryptAndCreate(AdapterDescription adapterDescription) {
AdapterDescription encryptedAdapterDescription = cloneAndEncrypt(adapterDescription);
encryptedAdapterDescription.setRev(null);
return db.storeAdapter(encryptedAdapterDescription);
} | 3.68 |
framework_AbstractMultiSelect_updateSelection | /**
* Updates the selection by adding and removing the given items.
*
* @param addedItems
* the items added to selection, not {@code} null
* @param removedItems
* the items removed from selection, not {@code} null
* @param userOriginated
* {@code true} if this was used originated, {@code false} if not
*/
protected void updateSelection(Set<T> addedItems, Set<T> removedItems,
boolean userOriginated) {
Objects.requireNonNull(addedItems);
Objects.requireNonNull(removedItems);
// if there are duplicates, some item is both added & removed, just
// discard that and leave things as was before
DataProvider<T, ?> dataProvider = internalGetDataProvider();
addedItems.removeIf(item -> {
Object addedId = dataProvider.getId(item);
return removedItems.stream().map(dataProvider::getId).anyMatch(
addedId::equals) ? removedItems.remove(item) : false;
});
if (isAllSelected(addedItems) && isNoneSelected(removedItems)) {
return;
}
updateSelection(set -> {
// order of add / remove does not matter since no duplicates
set.removeIf(item -> {
Object itemId = dataProvider.getId(item);
return removedItems.stream().map(dataProvider::getId)
.anyMatch(itemId::equals);
});
set.addAll(addedItems);
}, userOriginated);
} | 3.68 |
framework_VDragAndDropManager_get | /**
* Returns the current drag and drop manager instance. If one doesn't exist
* yet, it's created.
*
* @return the current drag and drop manager
*/
public static VDragAndDropManager get() {
if (instance == null) {
instance = GWT.create(VDragAndDropManager.class);
}
return instance;
} | 3.68 |
hbase_StorageClusterStatusModel_setReadRequestsCount | /**
* @param readRequestsCount The current total read requests made to region
*/
public void setReadRequestsCount(long readRequestsCount) {
this.readRequestsCount = readRequestsCount;
} | 3.68 |
zxing_AlignmentPattern_combineEstimate | /**
* Combines this object's current estimate of a finder pattern position and module size
* with a new estimate. It returns a new {@code FinderPattern} containing an average of the two.
*/
AlignmentPattern combineEstimate(float i, float j, float newModuleSize) {
float combinedX = (getX() + j) / 2.0f;
float combinedY = (getY() + i) / 2.0f;
float combinedModuleSize = (estimatedModuleSize + newModuleSize) / 2.0f;
return new AlignmentPattern(combinedX, combinedY, combinedModuleSize);
} | 3.68 |
pulsar_ManagedCursorImpl_asyncReplayEntries | /**
* Async replays given positions: a. before reading it filters out already-acked messages b. reads remaining entries
* async and gives it to given ReadEntriesCallback c. returns all already-acked messages which are not replayed so,
* those messages can be removed by caller(Dispatcher)'s replay-list and it won't try to replay it again
*
*/
@Override
public Set<? extends Position> asyncReplayEntries(final Set<? extends Position> positions,
ReadEntriesCallback callback, Object ctx) {
return asyncReplayEntries(positions, callback, ctx, false);
} | 3.68 |
hadoop_FileIoProvider_openAndSeek | /**
* Create a FileInputStream using
* {@link FileInputStream#FileInputStream(File)} and position
* it at the given offset.
*
* Wraps the created input stream to intercept read calls
* before delegating to the wrapped stream.
*
* @param volume target volume. null if unavailable.
* @param f File object.
* @param offset the offset position, measured in bytes from the
* beginning of the file, at which to set the file
* pointer.
* @throws FileNotFoundException
*/
public FileInputStream openAndSeek(
@Nullable FsVolumeSpi volume, File f, long offset) throws IOException {
final long begin = profilingEventHook.beforeMetadataOp(volume, OPEN);
FileInputStream fis = null;
try {
faultInjectorEventHook.beforeMetadataOp(volume, OPEN);
fis = new WrappedFileInputStream(volume,
FsDatasetUtil.openAndSeek(f, offset));
profilingEventHook.afterMetadataOp(volume, OPEN, begin);
return fis;
} catch(Exception e) {
IOUtils.closeStream(fis);
onFailure(volume, begin);
throw e;
}
} | 3.68 |
pulsar_ProducerConfiguration_setBlockIfQueueFull | /**
* Set whether the {@link Producer#send} and {@link Producer#sendAsync} operations should block when the outgoing
* message queue is full.
* <p>
* Default is <code>false</code>. If set to <code>false</code>, send operations will immediately fail with
* {@link PulsarClientException.ProducerQueueIsFullError} when there is no space left in pending queue.
*
* @param blockIfQueueFull
* whether to block {@link Producer#send} and {@link Producer#sendAsync} operations on queue full
* @return
*/
public ProducerConfiguration setBlockIfQueueFull(boolean blockIfQueueFull) {
conf.setBlockIfQueueFull(blockIfQueueFull);
return this;
} | 3.68 |
flink_FsStateBackend_isUsingAsynchronousSnapshots | /**
* Gets whether the key/value data structures are asynchronously snapshotted, which is always
* true for this state backend.
*/
public boolean isUsingAsynchronousSnapshots() {
return true;
} | 3.68 |
framework_Label_readDesign | /*
* (non-Javadoc)
*
* @see com.vaadin.ui.AbstractComponent#readDesign(org.jsoup.nodes .Element,
* com.vaadin.ui.declarative.DesignContext)
*/
@Override
public void readDesign(Element design, DesignContext designContext) {
super.readDesign(design, designContext);
String innerHtml = design.html();
boolean plainText = design.hasAttr(DESIGN_ATTR_PLAIN_TEXT);
if (plainText) {
setContentMode(ContentMode.TEXT);
} else {
setContentMode(ContentMode.HTML);
}
if (innerHtml != null && !"".equals(innerHtml)) {
if (plainText) {
innerHtml = DesignFormatter.decodeFromTextNode(innerHtml);
}
setValue(innerHtml);
}
} | 3.68 |
morf_Function_toString | /**
* @see java.lang.Object#toString()
*/
@Override
public String toString() {
return type.toString() + "(" + StringUtils.join(arguments, ", ") + ")" + super.toString();
} | 3.68 |
framework_AbstractClientConnector_addExtension | /**
* Add an extension to this connector. This method is protected to allow
* extensions to select which targets they can extend.
*
* @param extension
* the extension to add
*/
protected void addExtension(Extension extension) {
ClientConnector previousParent = extension.getParent();
if (equals(previousParent)) {
// Nothing to do, already attached
return;
} else if (previousParent != null) {
throw new IllegalStateException(
"Moving an extension from one parent to another is not supported");
}
extensions.add(extension);
extension.setParent(this);
markAsDirty();
} | 3.68 |
hbase_RegionServerObserver_preRollWALWriterRequest | /**
* This will be called before executing user request to roll a region server WAL.
* @param ctx the environment to interact with the framework and region server.
*/
default void preRollWALWriterRequest(
final ObserverContext<RegionServerCoprocessorEnvironment> ctx) throws IOException {
} | 3.68 |
flink_KeyedStateTransformation_transform | /**
* Method for passing user defined operators along with the type information that will transform
* the OperatorTransformation.
*
* <p><b>IMPORTANT:</b> Any output from this operator will be discarded.
*
* @param factory A factory returning transformation logic type of the return stream
* @return An {@link StateBootstrapTransformation} that can be added to a {@link Savepoint}.
*/
public StateBootstrapTransformation<T> transform(SavepointWriterOperatorFactory factory) {
return new StateBootstrapTransformation<>(
stream, operatorMaxParallelism, factory, keySelector, keyType);
} | 3.68 |
framework_AbstractMultiSelect_addValueChangeListener | /**
* Adds a value change listener. The listener is called when the selection
* set of this multi select is changed either by the user or
* programmatically.
*
* @see #addSelectionListener(MultiSelectionListener)
*
* @param listener
* the value change listener, not null
* @return a registration for the listener
*/
@Override
public Registration addValueChangeListener(
HasValue.ValueChangeListener<Set<T>> listener) {
return addSelectionListener(
event -> listener.valueChange(new ValueChangeEvent<>(this,
event.getOldValue(), event.isUserOriginated())));
} | 3.68 |
framework_VTree_nodeIsInBranch | /**
* Examines the children of the branch node and returns true if a node is in
* that branch
*
* @param node
* The node to search for
* @param branch
* The branch to search in
* @return True if found, false if not found
*/
private boolean nodeIsInBranch(TreeNode node, TreeNode branch) {
if (node == branch) {
return true;
}
for (TreeNode child : branch.getChildren()) {
if (child == node) {
return true;
}
if (!child.isLeaf() && child.getState()) {
if (nodeIsInBranch(node, child)) {
return true;
}
}
}
return false;
} | 3.68 |
flink_FlinkRelMetadataQuery_getUniqueGroups | /**
* Returns the (minimum) unique groups of the given columns.
*
* @param rel the relational expression
* @param columns the given columns in a specified relational expression. The given columns
* should not be null.
* @return the (minimum) unique columns which should be a sub-collection of the given columns,
* and should not be null or empty. If none unique columns can be found, return the given
* columns.
*/
public ImmutableBitSet getUniqueGroups(RelNode rel, ImmutableBitSet columns) {
for (; ; ) {
try {
Preconditions.checkArgument(columns != null);
if (columns.isEmpty()) {
return columns;
}
ImmutableBitSet uniqueGroups =
uniqueGroupsHandler.getUniqueGroups(rel, this, columns);
Preconditions.checkArgument(uniqueGroups != null && !uniqueGroups.isEmpty());
Preconditions.checkArgument(columns.contains(uniqueGroups));
return uniqueGroups;
} catch (JaninoRelMetadataProvider.NoHandler e) {
uniqueGroupsHandler = revise(e.relClass, FlinkMetadata.UniqueGroups.DEF);
}
}
} | 3.68 |
hbase_AsyncTable_batchAll | /**
* A simple version of batch. It will fail if there are any failures and you will get the whole
* result list at once if the operation is succeeded.
* @param actions list of Get, Put, Delete, Increment, Append and RowMutations objects
* @return A list of the result for the actions. Wrapped by a {@link CompletableFuture}.
*/
default <T> CompletableFuture<List<T>> batchAll(List<? extends Row> actions) {
return allOf(batch(actions));
} | 3.68 |
pulsar_BrokerInterceptor_messageProduced | /**
* Intercept after a message is produced.
*
* @param cnx client Connection
* @param producer Producer object
* @param publishContext Publish Context
*/
default void messageProduced(ServerCnx cnx, Producer producer, long startTimeNs, long ledgerId,
long entryId, Topic.PublishContext publishContext) {
} | 3.68 |
pulsar_PulsarClientImplementationBindingImpl_decodeKeyValueEncodingType | /**
* Decode the kv encoding type from the schema info.
*
* @param schemaInfo the schema info
* @return the kv encoding type
*/
public KeyValueEncodingType decodeKeyValueEncodingType(SchemaInfo schemaInfo) {
return KeyValueSchemaInfo.decodeKeyValueEncodingType(schemaInfo);
} | 3.68 |
pulsar_ConnectionPool_connectToResolvedAddresses | /**
* Try to connect to a sequence of IP addresses until a successful connection can be made, or fail if no
* address is working.
*/
private CompletableFuture<Channel> connectToResolvedAddresses(InetSocketAddress logicalAddress,
InetSocketAddress unresolvedPhysicalAddress,
Iterator<InetSocketAddress> resolvedPhysicalAddress,
InetSocketAddress sniHost) {
CompletableFuture<Channel> future = new CompletableFuture<>();
// Successfully connected to server
connectToAddress(logicalAddress, resolvedPhysicalAddress.next(), unresolvedPhysicalAddress, sniHost)
.thenAccept(future::complete)
.exceptionally(exception -> {
if (resolvedPhysicalAddress.hasNext()) {
// Try next IP address
connectToResolvedAddresses(logicalAddress, unresolvedPhysicalAddress,
resolvedPhysicalAddress, sniHost)
.thenAccept(future::complete)
.exceptionally(ex -> {
// This is already unwinding the recursive call
future.completeExceptionally(ex);
return null;
});
} else {
// Failed to connect to any IP address
future.completeExceptionally(exception);
}
return null;
});
return future;
} | 3.68 |
framework_Heartbeat_init | /**
* Initializes the heartbeat for the given application connection.
*
* @param applicationConnection
* the connection
*/
public void init(ApplicationConnection applicationConnection) {
connection = applicationConnection;
setInterval(connection.getConfiguration().getHeartbeatInterval());
uri = SharedUtil.addGetParameters(
connection.translateVaadinUri(
ApplicationConstants.APP_PROTOCOL_PREFIX
+ ApplicationConstants.HEARTBEAT_PATH + '/'),
UIConstants.UI_ID_PARAMETER + "="
+ connection.getConfiguration().getUIId());
connection.addHandler(
ApplicationConnection.ApplicationStoppedEvent.TYPE,
event -> setInterval(-1));
} | 3.68 |
flink_GeneratorFunction_open | /**
* Initialization method for the function. It is called once before the actual data mapping
* methods.
*/
default void open(SourceReaderContext readerContext) throws Exception {} | 3.68 |
querydsl_ProjectableSQLQuery_addJoinFlag | /**
* Add the given String literal as a join flag to the last added join
*
* @param flag join flag
* @param position position
* @return the current object
*/
@Override
@SuppressWarnings("unchecked")
public Q addJoinFlag(String flag, JoinFlag.Position position) {
queryMixin.addJoinFlag(new JoinFlag(flag, position));
return (Q) this;
} | 3.68 |
hbase_AssignmentVerificationReport_getNonFavoredAssignedRegions | /**
* Return the regions not assigned to its favored nodes
* @return regions not assigned to its favored nodes
*/
List<RegionInfo> getNonFavoredAssignedRegions() {
return nonFavoredAssignedRegionList;
} | 3.68 |
framework_ListenerMethod_getTarget | /**
* Returns the target object which contains the trigger method.
*
* @return The target object
*/
public Object getTarget() {
return target;
} | 3.68 |
hadoop_AbfsOutputStream_toString | /**
* Appending AbfsOutputStream statistics to base toString().
*
* @return String with AbfsOutputStream statistics.
*/
@Override
public String toString() {
final StringBuilder sb = new StringBuilder(super.toString());
sb.append("AbfsOutputStream@").append(this.hashCode());
sb.append("){");
sb.append(outputStreamStatistics.toString());
sb.append("}");
return sb.toString();
} | 3.68 |
hbase_TableOutputFormat_getRecordWriter | /**
* Creates a new record writer. Be aware that the baseline javadoc gives the impression that there
* is a single {@link RecordWriter} per job but in HBase, it is more natural if we give you a new
* RecordWriter per call of this method. You must close the returned RecordWriter when done.
* Failure to do so will drop writes.
* @param context The current task context.
* @return The newly created writer instance.
* @throws IOException When creating the writer fails.
* @throws InterruptedException When the job is cancelled.
*/
@Override
public RecordWriter<KEY, Mutation> getRecordWriter(TaskAttemptContext context)
throws IOException, InterruptedException {
return new TableRecordWriter();
} | 3.68 |
flink_FloatParser_parseField | /**
* Static utility to parse a field of type float from a byte sequence that represents text
* characters (such as when read from a file stream).
*
* @param bytes The bytes containing the text data that should be parsed.
* @param startPos The offset to start the parsing.
* @param length The length of the byte sequence (counting from the offset).
* @param delimiter The delimiter that terminates the field.
* @return The parsed value.
* @throws IllegalArgumentException Thrown when the value cannot be parsed because the text
* represents not a correct number.
*/
public static final float parseField(byte[] bytes, int startPos, int length, char delimiter) {
final int limitedLen = nextStringLength(bytes, startPos, length, delimiter);
if (limitedLen > 0
&& (Character.isWhitespace(bytes[startPos])
|| Character.isWhitespace(bytes[startPos + limitedLen - 1]))) {
throw new NumberFormatException(
"There is leading or trailing whitespace in the numeric field.");
}
final String str = new String(bytes, startPos, limitedLen, ConfigConstants.DEFAULT_CHARSET);
return Float.parseFloat(str);
} | 3.68 |
hudi_SparkHoodieBackedTableMetadataWriter_create | /**
* Return a Spark based implementation of {@code HoodieTableMetadataWriter} which can be used to
* write to the metadata table.
* <p>
* If the metadata table does not exist, an attempt is made to bootstrap it but there is no guaranteed that
* table will end up bootstrapping at this time.
*
* @param conf
* @param writeConfig
* @param context
* @param inflightInstantTimestamp Timestamp of an instant which is in-progress. This instant is ignored while
* attempting to bootstrap the table.
* @return An instance of the {@code HoodieTableMetadataWriter}
*/
public static HoodieTableMetadataWriter create(Configuration conf,
HoodieWriteConfig writeConfig,
HoodieEngineContext context,
Option<String> inflightInstantTimestamp) {
return new SparkHoodieBackedTableMetadataWriter(
conf, writeConfig, EAGER, context, inflightInstantTimestamp);
} | 3.68 |
incubator-hugegraph-toolchain_FileUtil_countLines | /**
* NOTE: If there is no blank line at the end of the file,
* one line will be missing
*/
public static int countLines(File file) {
if (!file.exists()) {
throw new IllegalArgumentException(String.format(
"The file %s doesn't exist", file));
}
long fileLength = file.length();
try (FileInputStream fis = new FileInputStream(file);
BufferedInputStream bis = new BufferedInputStream(fis)) {
/*
* The last character may be an EOL or a non-EOL character.
* If it is the EOL, need to add 1 line; if it is the non-EOL,
* also need to add 1 line, because the next character means the EOF
* and should also be counted as a line.
*/
int number = 0;
for (int i = 0; i < fileLength - 1; i++) {
if (bis.read() == '\n') {
number++;
}
}
if (fileLength > 0) {
number++;
}
return number;
} catch (IOException e) {
throw new InternalException("Failed to count lines of file %s",
file);
}
} | 3.68 |
flink_FieldParser_endsWithDelimiter | /**
* Checks if the given bytes ends with the delimiter at the given end position.
*
* @param bytes The byte array that holds the value.
* @param endPos The index of the byte array where the check for the delimiter ends.
* @param delim The delimiter to check for.
* @return true if a delimiter ends at the given end position, false otherwise.
*/
public static final boolean endsWithDelimiter(byte[] bytes, int endPos, byte[] delim) {
if (endPos < delim.length - 1) {
return false;
}
for (int pos = 0; pos < delim.length; ++pos) {
if (delim[pos] != bytes[endPos - delim.length + 1 + pos]) {
return false;
}
}
return true;
} | 3.68 |
hadoop_LocalCacheDirectoryManager_getRelativePathForLocalization | /**
* This method will return relative path from the first available vacant
* directory.
*
* @return {@link String} relative path for localization
*/
public synchronized String getRelativePathForLocalization() {
if (nonFullDirectories.isEmpty()) {
totalSubDirectories++;
Directory newDir = new Directory(totalSubDirectories);
nonFullDirectories.add(newDir);
knownDirectories.put(newDir.getRelativePath(), newDir);
}
Directory subDir = nonFullDirectories.peek();
if (subDir.incrementAndGetCount() >= perDirectoryFileLimit) {
nonFullDirectories.remove();
}
return subDir.getRelativePath();
} | 3.68 |
flink_ColumnStats_getMinValue | /**
* Deprecated because Number type max/min is not well supported comparable type, e.g. {@link
* java.util.Date}, {@link java.sql.Timestamp}.
*
* <p>Returns null if this instance is constructed by {@link ColumnStats.Builder}.
*/
@Deprecated
public Number getMinValue() {
return minValue;
} | 3.68 |
AreaShop_AreaShop_getWorldEditHandler | /**
* Function to get WorldGuardInterface for version dependent things.
* @return WorldGuardInterface
*/
public WorldEditInterface getWorldEditHandler() {
return this.worldEditInterface;
} | 3.68 |
morf_RenameTable_indexes | /**
* @see org.alfasoftware.morf.metadata.Table#indexes()
*/
@Override
public List<Index> indexes() {
return baseTable.indexes();
} | 3.68 |
flink_DeclarativeSlotManager_freeSlot | /**
* Free the given slot from the given allocation. If the slot is still allocated by the given
* allocation id, then the slot will be marked as free and will be subject to new slot requests.
*
* @param slotId identifying the slot to free
* @param allocationId with which the slot is presumably allocated
*/
@Override
public void freeSlot(SlotID slotId, AllocationID allocationId) {
checkInit();
LOG.debug("Freeing slot {}.", slotId);
slotTracker.notifyFree(slotId);
checkResourceRequirementsWithDelay();
} | 3.68 |
hbase_PrivateCellUtil_createFirstOnRowColTS | /**
* Creates the first cell with the row/family/qualifier of this cell and the given timestamp. Uses
* the "maximum" type that guarantees that the new cell is the lowest possible for this
* combination of row, family, qualifier, and timestamp. This cell's own timestamp is ignored.
* @param cell - cell
*/
public static Cell createFirstOnRowColTS(Cell cell, long ts) {
if (cell instanceof ByteBufferExtendedCell) {
return new FirstOnRowColTSByteBufferExtendedCell(
((ByteBufferExtendedCell) cell).getRowByteBuffer(),
((ByteBufferExtendedCell) cell).getRowPosition(), cell.getRowLength(),
((ByteBufferExtendedCell) cell).getFamilyByteBuffer(),
((ByteBufferExtendedCell) cell).getFamilyPosition(), cell.getFamilyLength(),
((ByteBufferExtendedCell) cell).getQualifierByteBuffer(),
((ByteBufferExtendedCell) cell).getQualifierPosition(), cell.getQualifierLength(), ts);
}
return new FirstOnRowColTSCell(cell.getRowArray(), cell.getRowOffset(), cell.getRowLength(),
cell.getFamilyArray(), cell.getFamilyOffset(), cell.getFamilyLength(),
cell.getQualifierArray(), cell.getQualifierOffset(), cell.getQualifierLength(), ts);
} | 3.68 |
flink_Task_getExecutionState | /**
* Returns the current execution state of the task.
*
* @return The current execution state of the task.
*/
public ExecutionState getExecutionState() {
return this.executionState;
} | 3.68 |
flink_HsMemoryDataManager_getNextBufferIndexToConsume | // Write lock should be acquired before invoke this method.
@Override
public List<Integer> getNextBufferIndexToConsume(HsConsumerId consumerId) {
ArrayList<Integer> consumeIndexes = new ArrayList<>(numSubpartitions);
for (int channel = 0; channel < numSubpartitions; channel++) {
HsSubpartitionConsumerInternalOperations viewOperation =
subpartitionViewOperationsMap.get(channel).get(consumerId);
// Access consuming offset without lock to prevent deadlock.
// A consuming thread may being blocked on the memory data manager lock, while holding
// the viewOperation lock.
consumeIndexes.add(
viewOperation == null ? -1 : viewOperation.getConsumingOffset(false) + 1);
}
return consumeIndexes;
} | 3.68 |
zilla_ManyToOneRingBuffer_maxMsgLength | /**
* {@inheritDoc}
*/
public int maxMsgLength()
{
return maxMsgLength;
} | 3.68 |
hbase_MetricSampleQuantiles_insertBatch | /**
* Merges items from buffer into the samples array in one pass. This is more efficient than doing
* an insert on every item.
*/
private void insertBatch() {
if (bufferCount == 0) {
return;
}
Arrays.sort(buffer, 0, bufferCount);
// Base case: no samples
int start = 0;
if (samples.isEmpty()) {
SampleItem newItem = new SampleItem(buffer[0], 1, 0);
samples.add(newItem);
start++;
}
ListIterator<SampleItem> it = samples.listIterator();
SampleItem item = it.next();
for (int i = start; i < bufferCount; i++) {
long v = buffer[i];
while (it.nextIndex() < samples.size() && item.value < v) {
item = it.next();
}
// If we found that bigger item, back up so we insert ourselves before it
if (item.value > v) {
it.previous();
}
// We use different indexes for the edge comparisons, because of the above
// if statement that adjusts the iterator
int delta;
if (it.previousIndex() == 0 || it.nextIndex() == samples.size()) {
delta = 0;
} else {
delta = ((int) Math.floor(allowableError(it.nextIndex()))) - 1;
}
SampleItem newItem = new SampleItem(v, 1, delta);
it.add(newItem);
item = newItem;
}
bufferCount = 0;
} | 3.68 |
flink_CombinedWatermarkStatus_updateCombinedWatermark | /**
* Checks whether we need to update the combined watermark.
*
* <p><b>NOTE:</b>It can update {@link #isIdle()} status.
*
* @return true, if the combined watermark changed
*/
public boolean updateCombinedWatermark() {
long minimumOverAllOutputs = Long.MAX_VALUE;
// if we don't have any outputs minimumOverAllOutputs is not valid, it's still
// at its initial Long.MAX_VALUE state and we must not emit that
if (partialWatermarks.isEmpty()) {
return false;
}
boolean allIdle = true;
for (PartialWatermark partialWatermark : partialWatermarks) {
if (!partialWatermark.isIdle()) {
minimumOverAllOutputs =
Math.min(minimumOverAllOutputs, partialWatermark.getWatermark());
allIdle = false;
}
}
this.idle = allIdle;
if (!allIdle && minimumOverAllOutputs > combinedWatermark) {
combinedWatermark = minimumOverAllOutputs;
return true;
}
return false;
} | 3.68 |
framework_ErrorEvent_getThrowable | /**
* Gets the contained throwable, the cause of the error.
*
* @return
*/
public Throwable getThrowable() {
return throwable;
} | 3.68 |
hudi_HoodieIndexUtils_tagAsNewRecordIfNeeded | /**
* Get tagged record for the passed in {@link HoodieRecord}.
*
* @param record instance of {@link HoodieRecord} for which tagging is requested
* @param location {@link HoodieRecordLocation} for the passed in {@link HoodieRecord}
* @return the tagged {@link HoodieRecord}
*/
public static <R> HoodieRecord<R> tagAsNewRecordIfNeeded(HoodieRecord<R> record, Option<HoodieRecordLocation> location) {
if (location.isPresent()) {
// When you have a record in multiple files in the same partition, then <row key, record> collection
// will have 2 entries with the same exact in memory copy of the HoodieRecord and the 2
// separate filenames that the record is found in. This will result in setting
// currentLocation 2 times and it will fail the second time. So creating a new in memory
// copy of the hoodie record.
HoodieRecord<R> newRecord = record.newInstance();
newRecord.unseal();
newRecord.setCurrentLocation(location.get());
newRecord.seal();
return newRecord;
} else {
return record;
}
} | 3.68 |
hbase_TableName_valueOf | /**
* Construct a TableName
* @throws IllegalArgumentException if fullName equals old root or old meta. Some code depends on
* this.
*/
public static TableName valueOf(String name) {
for (TableName tn : tableCache) {
if (name.equals(tn.getNameAsString())) {
return tn;
}
}
final int namespaceDelimIndex = name.indexOf(NAMESPACE_DELIM);
if (namespaceDelimIndex < 0) {
return createTableNameIfNecessary(ByteBuffer.wrap(NamespaceDescriptor.DEFAULT_NAMESPACE_NAME),
ByteBuffer.wrap(Bytes.toBytes(name)));
} else {
// indexOf is by character, not byte (consider multi-byte characters)
String ns = name.substring(0, namespaceDelimIndex);
String qualifier = name.substring(namespaceDelimIndex + 1);
return createTableNameIfNecessary(ByteBuffer.wrap(Bytes.toBytes(ns)),
ByteBuffer.wrap(Bytes.toBytes(qualifier)));
}
} | 3.68 |
morf_GraphBasedUpgrade_getPreUpgradeStatements | /**
* @return statements which must be executed before the upgrade
*/
public List<String> getPreUpgradeStatements() {
return preUpgradeStatements;
} | 3.68 |
hadoop_FederationBlock_initSubClusterPageItem | /**
* We will initialize the specific SubCluster's data within this method.
*
* @param tbody HTML TBody.
* @param subClusterInfo Sub-cluster information.
* @param lists Used to record data that needs to be displayed in JS.
*/
private void initSubClusterPageItem(TBODY<TABLE<Hamlet>> tbody,
SubClusterInfo subClusterInfo, List<Map<String, String>> lists) {
Map<String, String> subClusterMap = new HashMap<>();
// Prepare subCluster
SubClusterId subClusterId = subClusterInfo.getSubClusterId();
String subClusterIdText = subClusterId.getId();
// Prepare WebAppAddress
String webAppAddress = subClusterInfo.getRMWebServiceAddress();
String herfWebAppAddress = "";
if (webAppAddress != null && !webAppAddress.isEmpty()) {
herfWebAppAddress =
WebAppUtils.getHttpSchemePrefix(this.router.getConfig()) + webAppAddress;
}
// Prepare Capability
String capability = subClusterInfo.getCapability();
ClusterMetricsInfo subClusterMetricsInfo = getClusterMetricsInfo(capability);
if (subClusterMetricsInfo == null) {
return;
}
// Prepare LastStartTime & LastHeartBeat
Date lastStartTime = new Date(subClusterInfo.getLastStartTime());
Date lastHeartBeat = new Date(subClusterInfo.getLastHeartBeat());
// Prepare Resource
long totalMB = subClusterMetricsInfo.getTotalMB();
String totalMBDesc = StringUtils.byteDesc(totalMB * BYTES_IN_MB);
long totalVirtualCores = subClusterMetricsInfo.getTotalVirtualCores();
String resources = String.format("<memory:%s, vCores:%s>", totalMBDesc, totalVirtualCores);
// Prepare Node
long totalNodes = subClusterMetricsInfo.getTotalNodes();
long activeNodes = subClusterMetricsInfo.getActiveNodes();
String nodes = String.format("<totalNodes:%s, activeNodes:%s>", totalNodes, activeNodes);
// Prepare HTML Table
String stateStyle = "color:#dc3545;font-weight:bolder";
SubClusterState state = subClusterInfo.getState();
if (SubClusterState.SC_RUNNING == state) {
stateStyle = "color:#28a745;font-weight:bolder";
}
tbody.tr().$id(subClusterIdText)
.td().$class("details-control").a(herfWebAppAddress, subClusterIdText).__()
.td().$style(stateStyle).__(state.name()).__()
.td().__(lastStartTime).__()
.td().__(lastHeartBeat).__()
.td(resources)
.td(nodes)
.__();
// Formatted memory information
long allocatedMB = subClusterMetricsInfo.getAllocatedMB();
String allocatedMBDesc = StringUtils.byteDesc(allocatedMB * BYTES_IN_MB);
long availableMB = subClusterMetricsInfo.getAvailableMB();
String availableMBDesc = StringUtils.byteDesc(availableMB * BYTES_IN_MB);
long pendingMB = subClusterMetricsInfo.getPendingMB();
String pendingMBDesc = StringUtils.byteDesc(pendingMB * BYTES_IN_MB);
long reservedMB = subClusterMetricsInfo.getReservedMB();
String reservedMBDesc = StringUtils.byteDesc(reservedMB * BYTES_IN_MB);
subClusterMap.put("totalmemory", totalMBDesc);
subClusterMap.put("allocatedmemory", allocatedMBDesc);
subClusterMap.put("availablememory", availableMBDesc);
subClusterMap.put("pendingmemory", pendingMBDesc);
subClusterMap.put("reservedmemory", reservedMBDesc);
subClusterMap.put("subcluster", subClusterId.getId());
subClusterMap.put("capability", capability);
lists.add(subClusterMap);
} | 3.68 |
rocketmq-connect_RocketMqDatabaseHistory_recoverRecords | /**
* Recover records
*
* @param records
*/
@Override
protected void recoverRecords(Consumer<HistoryRecord> records) {
DefaultLitePullConsumer consumer = null;
try {
consumer = RocketMqAdminUtil.initDefaultLitePullConsumer(rocketMqConfig, false);
consumer.start();
// Select message queue
MessageQueue messageQueue = new ZeroMessageQueueSelector().select(new ArrayList<>(consumer.fetchMessageQueues(topicName)), null, null);
consumer.assign(Collections.singleton(messageQueue));
consumer.seekToBegin(messageQueue);
// Read all messages in the topic ...
long lastProcessedOffset = UNLIMITED_VALUE;
Long maxOffset = null;
int recoveryAttempts = 0;
do {
if (recoveryAttempts > maxRecoveryAttempts) {
throw new IllegalStateException(
"The database schema history couldn't be recovered.");
}
// Get db schema history topic end offset
maxOffset = getMaxOffsetOfSchemaHistoryTopic(maxOffset, messageQueue);
log.debug("End offset of database schema history topic is {}", maxOffset);
// Poll record from db schema history topic
List<MessageExt> recoveredRecords = consumer.poll(pollInterval);
int numRecordsProcessed = 0;
for (MessageExt message : recoveredRecords) {
if (message.getQueueOffset() > lastProcessedOffset) {
HistoryRecord recordObj = new HistoryRecord(reader.read(message.getBody()));
log.trace("Recovering database history: {}", recordObj);
if (recordObj == null || !recordObj.isValid()) {
log.warn("Skipping invalid database history record '{}'. " +
"This is often not an issue, but if it happens repeatedly please check the '{}' topic.",
recordObj, topicName);
} else {
records.accept(recordObj);
log.trace("Recovered database history: {}", recordObj);
}
lastProcessedOffset = message.getQueueOffset();
++numRecordsProcessed;
}
}
if (numRecordsProcessed == 0) {
log.debug("No new records found in the database schema history; will retry");
recoveryAttempts++;
} else {
log.debug("Processed {} records from database schema history", numRecordsProcessed);
}
} while (lastProcessedOffset < maxOffset - 1);
} catch (MQClientException | MQBrokerException | IOException | RemotingException | InterruptedException e) {
throw new DatabaseHistoryException(e);
} finally {
if (consumer != null) {
consumer.shutdown();
}
}
} | 3.68 |
flink_SnapshotDirectory_cleanup | /**
* Calling this method will attempt delete the underlying snapshot directory recursively, if the
* state is "ongoing". In this case, the state will be set to "deleted" as a result of this
* call.
*
* @return <code>true</code> if delete is successful, <code>false</code> otherwise.
* @throws IOException if an exception happens during the delete.
*/
public boolean cleanup() throws IOException {
if (state.compareAndSet(State.ONGOING, State.DELETED)) {
FileUtils.deleteDirectory(directory.toFile());
}
return true;
} | 3.68 |
dubbo_AbstractTripleReactorSubscriber_subscribe | /**
* Binding the downstream, and call subscription#request(1).
*
* @param downstream downstream
*/
public void subscribe(final CallStreamObserver<T> downstream) {
if (downstream == null) {
throw new NullPointerException();
}
if (this.downstream == null && SUBSCRIBED.compareAndSet(false, true)) {
this.downstream = downstream;
subscription.request(1);
}
} | 3.68 |
streampipes_Formats_smileFormat | /**
* Defines the transport format SMILE used by a data stream at runtime.
*
* @return The {@link org.apache.streampipes.model.grounding.TransportFormat} of type SMILE.
*/
public static TransportFormat smileFormat() {
return new TransportFormat(MessageFormat.SMILE);
} | 3.68 |
MagicPlugin_MageDataStore_obtainLock | /**
* Force-obtain a lock for a mage
*/
default void obtainLock(MageData mage) {} | 3.68 |
AreaShop_Utils_evaluateToDouble | /**
* Evaluate string input to a number.
* Uses JavaScript for expressions.
* @param input The input string
* @param region The region to apply replacements for and use for logging
* @return double evaluated from the input or a very high default in case of a script exception
*/
public static double evaluateToDouble(String input, GeneralRegion region) {
// Replace variables
input = Message.fromString(input).replacements(region).getSingle();
// Check for simple number
if(isDouble(input)) {
return Double.parseDouble(input);
}
// Lazy init scriptEngine
if(scriptEngine == null) {
scriptEngine = new ScriptEngineManager().getEngineByName("JavaScript");
}
// Evaluate expression
Object result;
try {
result = scriptEngine.eval(input);
} catch(ScriptException e) {
AreaShop.warn("Price of region", region.getName(), "is set with an invalid expression: '" + input + "', exception:", ExceptionUtils.getStackTrace(e));
return 99999999999.0; // High fallback for safety
}
// Handle the result
if(Utils.isDouble(result.toString())) {
return Double.parseDouble(result.toString());
} else {
AreaShop.warn("Price of region", region.getName(), "is set with the expression '" + input + "' that returns a result that is not a number:", result);
return 99999999999.0; // High fallback for safety
}
} | 3.68 |
MagicPlugin_Wand_setMage | // This should be used sparingly, if at all... currently only
// used when applying an upgrade to a wand while not held
public void setMage(Mage mage) {
this.mage = mage;
} | 3.68 |
hadoop_TaskPool_resetStatisticsContext | /**
* Reset the statistics context if it was set earlier.
* This unbinds the current thread from any statistics
* context.
*/
private void resetStatisticsContext() {
if (ioStatisticsContext != null) {
IOStatisticsContext.setThreadIOStatisticsContext(null);
}
} | 3.68 |
dubbo_AbstractZookeeperTransporter_fetchAndUpdateZookeeperClientCache | /**
* get the ZookeeperClient from cache, the ZookeeperClient must be connected.
* <p>
* It is not private method for unit test.
*
* @param addressList
* @return
*/
public ZookeeperClient fetchAndUpdateZookeeperClientCache(List<String> addressList) {
ZookeeperClient zookeeperClient = null;
for (String address : addressList) {
if ((zookeeperClient = zookeeperClientMap.get(address)) != null && zookeeperClient.isConnected()) {
break;
}
}
// mapping new backup address
if (zookeeperClient != null && zookeeperClient.isConnected()) {
writeToClientMap(addressList, zookeeperClient);
}
return zookeeperClient;
} | 3.68 |
flink_HiveParserSemanticAnalyzer_doPhase1 | /**
* Phase 1: (including, but not limited to): 1. Gets all the aliases for all the tables /
* subqueries and makes the appropriate mapping in aliasToTabs, aliasToSubq 2. Gets the location
* of the destination and names the clause "inclause" + i 3. Creates a map from a string
* representation of an aggregation tree to the actual aggregation AST 4. Creates a mapping from
* the clause name to the select expression AST in destToSelExpr 5. Creates a mapping from a
* table alias to the lateral view AST's in aliasToLateralViews
*/
@SuppressWarnings({"fallthrough", "nls"})
public boolean doPhase1(
HiveParserASTNode ast,
HiveParserQB qb,
HiveParserBaseSemanticAnalyzer.Phase1Ctx ctx1,
HiveParserPlannerContext plannerCtx)
throws SemanticException {
boolean phase1Result = true;
HiveParserQBParseInfo qbp = qb.getParseInfo();
boolean skipRecursion = false;
if (ast.getToken() != null) {
skipRecursion = true;
switch (ast.getToken().getType()) {
case HiveASTParser.TOK_SELECTDI:
qb.countSelDi();
// fall through
case HiveASTParser.TOK_SELECT:
qb.countSel();
qbp.setSelExprForClause(ctx1.dest, ast);
int posn = 0;
if (((HiveParserASTNode) ast.getChild(0)).getToken().getType()
== HiveASTParser.QUERY_HINT) {
HiveASTParseDriver pd = new HiveASTParseDriver();
String queryHintStr = ast.getChild(0).getText();
if (LOG.isDebugEnabled()) {
LOG.debug("QUERY HINT: " + queryHintStr);
}
try {
HiveParserASTNode hintNode = pd.parseHint(queryHintStr);
qbp.setHints(hintNode);
posn++;
} catch (HiveASTParseException e) {
throw new SemanticException(
"failed to parse query hint: " + e.getMessage(), e);
}
}
if ((ast.getChild(posn).getChild(0).getType() == HiveASTParser.TOK_TRANSFORM)) {
queryProperties.setUsesScript(true);
}
LinkedHashMap<String, HiveParserASTNode> aggregations =
doPhase1GetAggregationsFromSelect(ast, qb, ctx1.dest);
doPhase1GetColumnAliasesFromSelect(ast, qbp);
qbp.setAggregationExprsForClause(ctx1.dest, aggregations);
qbp.setDistinctFuncExprsForClause(
ctx1.dest, doPhase1GetDistinctFuncExprs(aggregations));
break;
case HiveASTParser.TOK_WHERE:
qbp.setWhrExprForClause(ctx1.dest, ast);
if (!HiveParserSubQueryUtils.findSubQueries((HiveParserASTNode) ast.getChild(0))
.isEmpty()) {
queryProperties.setFilterWithSubQuery(true);
}
break;
case HiveASTParser.TOK_INSERT_INTO:
String tabName =
getUnescapedName(
(HiveParserASTNode) ast.getChild(0).getChild(0),
catalogRegistry.getCurrentCatalog(),
catalogRegistry.getCurrentDatabase());
qbp.addInsertIntoTable(tabName, ast);
// TODO: hive doesn't break here, so we copy what's below here
handleTokDestination(ctx1, ast, qbp, plannerCtx);
break;
case HiveASTParser.TOK_DESTINATION:
handleTokDestination(ctx1, ast, qbp, plannerCtx);
break;
case HiveASTParser.TOK_FROM:
int childCount = ast.getChildCount();
if (childCount != 1) {
throw new SemanticException(
HiveParserUtils.generateErrorMessage(
ast, "Multiple Children " + childCount));
}
if (!qbp.getIsSubQ()) {
qbp.setQueryFromExpr(ast);
}
// Check if this is a subquery / lateral view
HiveParserASTNode frm = (HiveParserASTNode) ast.getChild(0);
if (frm.getToken().getType() == HiveASTParser.TOK_TABREF) {
processTable(qb, frm);
} else if (frm.getToken().getType() == HiveASTParser.TOK_VIRTUAL_TABLE) {
// Create a temp table with the passed values in it then rewrite this
// portion of the tree to be from that table.
HiveParserASTNode newFrom = genValuesTempTable(frm, qb);
ast.setChild(0, newFrom);
processTable(qb, newFrom);
} else if (frm.getToken().getType() == HiveASTParser.TOK_SUBQUERY) {
processSubQuery(qb, frm);
} else if (frm.getToken().getType() == HiveASTParser.TOK_LATERAL_VIEW
|| frm.getToken().getType() == HiveASTParser.TOK_LATERAL_VIEW_OUTER) {
queryProperties.setHasLateralViews(true);
processLateralView(qb, frm);
} else if (HiveParserUtils.isJoinToken(frm)) {
processJoin(qb, frm);
qbp.setJoinExpr(frm);
} else if (frm.getToken().getType() == HiveASTParser.TOK_PTBLFUNCTION) {
queryProperties.setHasPTF(true);
processPTF(qb, frm);
}
break;
case HiveASTParser.TOK_CLUSTERBY:
// Get the clusterby aliases - these are aliased to the entries in the select
// list
queryProperties.setHasClusterBy(true);
qbp.setClusterByExprForClause(ctx1.dest, ast);
break;
case HiveASTParser.TOK_DISTRIBUTEBY:
// Get the distribute by aliases - these are aliased to the entries in the
// select list
queryProperties.setHasDistributeBy(true);
qbp.setDistributeByExprForClause(ctx1.dest, ast);
if (qbp.getClusterByForClause(ctx1.dest) != null) {
throw new SemanticException(
HiveParserUtils.generateErrorMessage(
ast, ErrorMsg.CLUSTERBY_DISTRIBUTEBY_CONFLICT.getMsg()));
} else if (qbp.getOrderByForClause(ctx1.dest) != null) {
throw new SemanticException(
HiveParserUtils.generateErrorMessage(
ast, ErrorMsg.ORDERBY_DISTRIBUTEBY_CONFLICT.getMsg()));
}
break;
case HiveASTParser.TOK_SORTBY:
// Get the sort by aliases - these are aliased to the entries in the select list
queryProperties.setHasSortBy(true);
qbp.setSortByExprForClause(ctx1.dest, ast);
if (qbp.getClusterByForClause(ctx1.dest) != null) {
throw new SemanticException(
HiveParserUtils.generateErrorMessage(
ast, ErrorMsg.CLUSTERBY_SORTBY_CONFLICT.getMsg()));
} else if (qbp.getOrderByForClause(ctx1.dest) != null) {
throw new SemanticException(
HiveParserUtils.generateErrorMessage(
ast, ErrorMsg.ORDERBY_SORTBY_CONFLICT.getMsg()));
}
break;
case HiveASTParser.TOK_ORDERBY:
// Get the order by aliases - these are aliased to the entries in the select
// list
queryProperties.setHasOrderBy(true);
qbp.setOrderByExprForClause(ctx1.dest, ast);
if (qbp.getClusterByForClause(ctx1.dest) != null) {
throw new SemanticException(
HiveParserUtils.generateErrorMessage(
ast, ErrorMsg.CLUSTERBY_ORDERBY_CONFLICT.getMsg()));
}
break;
case HiveASTParser.TOK_GROUPBY:
case HiveASTParser.TOK_ROLLUP_GROUPBY:
case HiveASTParser.TOK_CUBE_GROUPBY:
case HiveASTParser.TOK_GROUPING_SETS:
// Get the groupby aliases - these are aliased to the entries in the select list
queryProperties.setHasGroupBy(true);
if (qbp.getJoinExpr() != null) {
queryProperties.setHasJoinFollowedByGroupBy(true);
}
if (qbp.getSelForClause(ctx1.dest).getToken().getType()
== HiveASTParser.TOK_SELECTDI) {
throw new SemanticException(
HiveParserUtils.generateErrorMessage(
ast, ErrorMsg.SELECT_DISTINCT_WITH_GROUPBY.getMsg()));
}
qbp.setGroupByExprForClause(ctx1.dest, ast);
skipRecursion = true;
// Rollup and Cubes are syntactic sugar on top of grouping sets
if (ast.getToken().getType() == HiveASTParser.TOK_ROLLUP_GROUPBY) {
qbp.getDestRollups().add(ctx1.dest);
} else if (ast.getToken().getType() == HiveASTParser.TOK_CUBE_GROUPBY) {
qbp.getDestCubes().add(ctx1.dest);
} else if (ast.getToken().getType() == HiveASTParser.TOK_GROUPING_SETS) {
qbp.getDestGroupingSets().add(ctx1.dest);
}
break;
case HiveASTParser.TOK_HAVING:
qbp.setHavingExprForClause(ctx1.dest, ast);
qbp.addAggregationExprsForClause(
ctx1.dest, doPhase1GetAggregationsFromSelect(ast, qb, ctx1.dest));
break;
case HiveASTParser.KW_WINDOW:
if (!qb.hasWindowingSpec(ctx1.dest)) {
throw new SemanticException(
HiveParserUtils.generateErrorMessage(
ast,
"Query has no Cluster/Distribute By; but has a Window definition"));
}
handleQueryWindowClauses(qb, ctx1, ast);
break;
case HiveASTParser.TOK_LIMIT:
if (ast.getChildCount() == 2) {
qbp.setDestLimit(
ctx1.dest,
new Integer(ast.getChild(0).getText()),
new Integer(ast.getChild(1).getText()));
} else {
qbp.setDestLimit(ctx1.dest, 0, new Integer(ast.getChild(0).getText()));
}
break;
case HiveASTParser.TOK_ANALYZE:
// Case of analyze command
String tableName =
getUnescapedName((HiveParserASTNode) ast.getChild(0).getChild(0))
.toLowerCase();
String originTableName =
getUnescapedOriginTableName(
(HiveParserASTNode) ast.getChild(0).getChild(0));
qb.setTabAlias(tableName, originTableName, tableName);
qb.addAlias(tableName);
qb.getParseInfo().setIsAnalyzeCommand(true);
qb.getParseInfo().setNoScanAnalyzeCommand(this.noscan);
qb.getParseInfo().setPartialScanAnalyzeCommand(this.partialscan);
// Allow analyze the whole table and dynamic partitions
HiveConf.setVar(conf, HiveConf.ConfVars.DYNAMICPARTITIONINGMODE, "nonstrict");
HiveConf.setVar(conf, HiveConf.ConfVars.HIVEMAPREDMODE, "nonstrict");
break;
case HiveASTParser.TOK_UNIONALL:
if (!qbp.getIsSubQ()) {
// this shouldn't happen. The parser should have converted the union to be
// contained in a subquery. Just in case, we keep the error as a fallback.
throw new SemanticException(
HiveParserUtils.generateErrorMessage(
ast, ErrorMsg.UNION_NOTIN_SUBQ.getMsg()));
}
skipRecursion = false;
break;
case HiveASTParser.TOK_INSERT:
HiveParserASTNode destination = (HiveParserASTNode) ast.getChild(0);
Tree tab = destination.getChild(0);
// Proceed if AST contains partition & If Not Exists
if (destination.getChildCount() == 2
&& tab.getChildCount() == 2
&& destination.getChild(1).getType() == HiveASTParser.TOK_IFNOTEXISTS) {
ObjectIdentifier tableIdentifier =
getObjectIdentifier(
catalogRegistry, (HiveParserASTNode) tab.getChild(0));
Tree partitions = tab.getChild(1);
int numChildren = partitions.getChildCount();
HashMap<String, String> partition = new HashMap<>();
for (int i = 0; i < numChildren; i++) {
String partitionName = partitions.getChild(i).getChild(0).getText();
Tree pvalue = partitions.getChild(i).getChild(1);
if (pvalue == null) {
break;
}
String partitionVal = stripQuotes(pvalue.getText());
partition.put(partitionName, partitionVal);
}
// if it is a dynamic partition throw the exception
if (numChildren != partition.size()) {
throw new SemanticException(
ErrorMsg.INSERT_INTO_DYNAMICPARTITION_IFNOTEXISTS.getMsg(
partition.toString()));
}
Optional<CatalogPartition> catalogPartition =
catalogRegistry.getPartition(
tableIdentifier, new CatalogPartitionSpec(partition));
// Check partition exists if it exists skip the overwrite
if (catalogPartition.isPresent()) {
phase1Result = false;
skipRecursion = true;
LOG.info(
"Partition already exists so insert into overwrite "
+ "skipped for partition : "
+ partition);
break;
}
ResolvedCatalogTable catalogTable =
(ResolvedCatalogTable)
(getCatalogTable(tableIdentifier.asSummaryString(), qb));
validatePartColumnType(
catalogTable,
partition,
(HiveParserASTNode) tab,
conf,
frameworkConfig,
cluster);
}
skipRecursion = false;
break;
case HiveASTParser.TOK_LATERAL_VIEW:
case HiveASTParser.TOK_LATERAL_VIEW_OUTER:
// todo: nested LV
assert ast.getChildCount() == 1;
qb.getParseInfo().getDestToLateralView().put(ctx1.dest, ast);
break;
case HiveASTParser.TOK_CTE:
processCTE(qb, ast);
break;
default:
skipRecursion = false;
break;
}
}
if (!skipRecursion) {
// Iterate over the rest of the children
int childCount = ast.getChildCount();
for (int childPos = 0; childPos < childCount && phase1Result; ++childPos) {
phase1Result =
doPhase1((HiveParserASTNode) ast.getChild(childPos), qb, ctx1, plannerCtx);
}
}
return phase1Result;
} | 3.68 |
hbase_MetricsConnection_getDeleteTracker | /** deleteTracker metric */
public CallTracker getDeleteTracker() {
return deleteTracker;
} | 3.68 |
hadoop_RpcScheduler_addResponseTime | /**
* Store a processing time value for an RPC call into this scheduler.
*
* @param callName The name of the call.
* @param schedulable The schedulable representing the incoming call.
* @param details The details of processing time.
*/
@SuppressWarnings("deprecation")
default void addResponseTime(String callName, Schedulable schedulable,
ProcessingDetails details) {
// For the sake of backwards compatibility with old implementations of
// this interface, a default implementation is supplied which uses the old
// method. All new implementations MUST override this interface and should
// NOT use the other addResponseTime method.
int queueTime = (int) details.get(ProcessingDetails.Timing.QUEUE,
RpcMetrics.DEFAULT_METRIC_TIME_UNIT);
int processingTime = (int) details.get(ProcessingDetails.Timing.PROCESSING,
RpcMetrics.DEFAULT_METRIC_TIME_UNIT);
addResponseTime(callName, schedulable.getPriorityLevel(),
queueTime, processingTime);
} | 3.68 |
framework_VAbstractTextualDate_cleanFormat | /**
* Clean date format string to make it suitable for
* {@link #getFormatString()}.
*
* @see #getFormatString()
*
* @param format
* date format string
* @return cleaned up string
*/
protected String cleanFormat(String format) {
// Remove unsupported patterns
// TODO support for 'G', era designator (used at least in Japan)
format = format.replaceAll("[GzZwWkK]", "");
// Remove extra delimiters ('/' and '.')
while (format.startsWith("/") || format.startsWith(".")
|| format.startsWith("-")) {
format = format.substring(1);
}
while (format.endsWith("/") || format.endsWith(".")
|| format.endsWith("-")) {
format = format.substring(0, format.length() - 1);
}
// Remove duplicate delimiters
format = format.replaceAll("//", "/");
format = format.replaceAll("\\.\\.", ".");
format = format.replaceAll("--", "-");
return format.trim();
} | 3.68 |
dubbo_DubboBootstrap_start | /**
* Start dubbo application
*
* @param wait If true, wait for startup to complete, or else no waiting.
* @return
*/
public DubboBootstrap start(boolean wait) {
Future future = applicationDeployer.start();
if (wait) {
try {
future.get();
} catch (Exception e) {
throw new IllegalStateException("await dubbo application start finish failure", e);
}
}
return this;
} | 3.68 |
pulsar_AuthorizationProvider_initialize | /**
* Perform initialization for the authorization provider.
*
* @param conf
* broker config object
* @param pulsarResources
* Resources component for access to metadata
* @throws IOException
* if the initialization fails
*/
default void initialize(ServiceConfiguration conf, PulsarResources pulsarResources) throws IOException {
} | 3.68 |
framework_JSR356WebsocketInitializer_getAttributeName | /**
* Returns the name of the attribute in the servlet context where the
* pre-initialized Atmosphere object is stored.
*
* @param servletName
* The name of the servlet
* @return The attribute name which contains the initialized Atmosphere
* object
*/
public static String getAttributeName(String servletName) {
return JSR356WebsocketInitializer.class.getName() + "." + servletName;
} | 3.68 |
flink_ProducerMergedPartitionFileReader_lazyInitializeFileChannel | /**
* Initialize the file channel in a lazy manner, which can reduce usage of the file descriptor
* resource.
*/
private void lazyInitializeFileChannel() {
if (fileChannel == null) {
try {
fileChannel = FileChannel.open(dataFilePath, StandardOpenOption.READ);
} catch (IOException e) {
ExceptionUtils.rethrow(e, "Failed to open file channel.");
}
}
} | 3.68 |
hudi_HoodieFunctionalIndexMetadata_fromJson | /**
* Deserialize from JSON string to create an instance of this class.
*
* @param json Input JSON string.
* @return Deserialized instance of HoodieFunctionalIndexMetadata.
* @throws IOException If any deserialization errors occur.
*/
public static HoodieFunctionalIndexMetadata fromJson(String json) throws IOException {
if (json == null || json.isEmpty()) {
return new HoodieFunctionalIndexMetadata();
}
return JsonUtils.getObjectMapper().readValue(json, HoodieFunctionalIndexMetadata.class);
} | 3.68 |
flink_InternalSourceReaderMetricGroup_watermarkEmitted | /**
* Called when a watermark was emitted.
*
* <p>Note this function should be called before the actual watermark is emitted such that
* chained processing does not influence the statistics.
*/
public void watermarkEmitted(long watermark) {
if (watermark == MAX_WATERMARK_TIMESTAMP) {
return;
}
lastWatermark = watermark;
if (firstWatermark) {
parentMetricGroup.gauge(MetricNames.WATERMARK_LAG, this::getWatermarkLag);
firstWatermark = false;
}
} | 3.68 |
framework_AbstractClientConnector_getRpcProxy | /**
* Returns an RPC proxy for a given server to client RPC interface for this
* component.
*
* TODO more javadoc, subclasses, ...
*
* @param rpcInterface
* RPC interface type
*
* @since 7.0
*/
protected <T extends ClientRpc> T getRpcProxy(final Class<T> rpcInterface) {
// create, initialize and return a dynamic proxy for RPC
try {
if (!rpcProxyMap.containsKey(rpcInterface)) {
Class<?> proxyClass = Proxy.getProxyClass(
rpcInterface.getClassLoader(), rpcInterface);
Constructor<?> constructor = proxyClass
.getConstructor(InvocationHandler.class);
T rpcProxy = rpcInterface.cast(constructor
.newInstance(new RpcInvocationHandler(rpcInterface)));
// cache the proxy
rpcProxyMap.put(rpcInterface, rpcProxy);
}
return (T) rpcProxyMap.get(rpcInterface);
} catch (Exception e) {
// TODO exception handling?
throw new RuntimeException(e);
}
} | 3.68 |
hbase_LruBlockCache_evictBlocksByHfileName | /**
* Evicts all blocks for a specific HFile. This is an expensive operation implemented as a
* linear-time search through all blocks in the cache. Ideally this should be a search in a
* log-access-time map.
* <p>
* This is used for evict-on-close to remove all blocks of a specific HFile.
* @return the number of blocks evicted
*/
@Override
public int evictBlocksByHfileName(String hfileName) {
int numEvicted = 0;
for (BlockCacheKey key : map.keySet()) {
if (key.getHfileName().equals(hfileName)) {
if (evictBlock(key)) {
++numEvicted;
}
}
}
if (victimHandler != null) {
numEvicted += victimHandler.evictBlocksByHfileName(hfileName);
}
return numEvicted;
} | 3.68 |
framework_ContainerHierarchicalWrapper_removeListener | /**
* @deprecated As of 7.0, replaced by
* {@link #removePropertySetChangeListener(Container.PropertySetChangeListener)}
*/
@Override
@Deprecated
public void removeListener(Container.PropertySetChangeListener listener) {
removePropertySetChangeListener(listener);
} | 3.68 |
flink_CrossOperator_projectFirst | /**
* Continues a ProjectCross transformation and adds fields of the first cross input.
*
* <p>If the first cross input is a {@link Tuple} {@link DataSet}, fields can be selected by
* their index. If the first cross input is not a Tuple DataSet, no parameters should be
* passed.
*
* <p>Fields of the first and second input can be added by chaining the method calls of
* {@link
* org.apache.flink.api.java.operators.CrossOperator.CrossProjection#projectFirst(int...)}
* and {@link
* org.apache.flink.api.java.operators.CrossOperator.CrossProjection#projectSecond(int...)}.
*
* @param firstFieldIndexes If the first input is a Tuple DataSet, the indexes of the
* selected fields. For a non-Tuple DataSet, do not provide parameters. The order of
* fields in the output tuple is defined by to the order of field indexes.
* @return An extended CrossProjection.
* @see Tuple
* @see DataSet
* @see org.apache.flink.api.java.operators.CrossOperator.CrossProjection
* @see org.apache.flink.api.java.operators.CrossOperator.ProjectCross
*/
protected CrossProjection<I1, I2> projectFirst(int... firstFieldIndexes) {
boolean isFirstTuple;
if (ds1.getType() instanceof TupleTypeInfo && firstFieldIndexes.length > 0) {
isFirstTuple = true;
} else {
isFirstTuple = false;
}
if (!isFirstTuple && firstFieldIndexes.length != 0) {
// field index provided for non-Tuple input
throw new IllegalArgumentException(
"Input is not a Tuple. Call projectFirst() without arguments to include it.");
} else if (firstFieldIndexes.length > (22 - this.fieldIndexes.length)) {
// to many field indexes provided
throw new IllegalArgumentException(
"You may select only up to twenty-two (22) fields in total.");
}
int offset = this.fieldIndexes.length;
if (isFirstTuple) {
// extend index and flag arrays
this.fieldIndexes =
Arrays.copyOf(
this.fieldIndexes,
this.fieldIndexes.length + firstFieldIndexes.length);
this.isFieldInFirst =
Arrays.copyOf(
this.isFieldInFirst,
this.isFieldInFirst.length + firstFieldIndexes.length);
// copy field indexes
int maxFieldIndex = numFieldsDs1;
for (int i = 0; i < firstFieldIndexes.length; i++) {
// check if indexes in range
Preconditions.checkElementIndex(firstFieldIndexes[i], maxFieldIndex);
this.isFieldInFirst[offset + i] = true;
this.fieldIndexes[offset + i] = firstFieldIndexes[i];
}
} else {
// extend index and flag arrays
this.fieldIndexes = Arrays.copyOf(this.fieldIndexes, this.fieldIndexes.length + 1);
this.isFieldInFirst =
Arrays.copyOf(this.isFieldInFirst, this.isFieldInFirst.length + 1);
// add input object to output tuple
this.isFieldInFirst[offset] = true;
this.fieldIndexes[offset] = -1;
}
return this;
} | 3.68 |
hbase_ScheduledChore_cleanup | /**
* Override to run cleanup tasks when the Chore encounters an error and must stop running
*/
protected void cleanup() {
} | 3.68 |
framework_PushMode_isEnabled | /**
* Checks whether the push mode is using push functionality.
*
* @return <code>true</code> if this mode requires push functionality;
* <code>false</code> if no push functionality is used for this
* mode.
*/
public boolean isEnabled() {
return this != DISABLED;
} | 3.68 |
hbase_CompactionConfiguration_getMinLocalityToForceCompact | /**
* @return Block locality ratio, the ratio at which we will include old regions with a single
* store file for major compaction. Used to improve block locality for regions that
* haven't had writes in a while but are still being read.
*/
public float getMinLocalityToForceCompact() {
return minLocalityToForceCompact;
} | 3.68 |
hbase_HFileArchiveManager_disable | /**
* Disable all archiving of files for a given table
* <p>
* Inherently an <b>asynchronous operation</b>.
* @param zooKeeper watcher for the ZK cluster
* @param table name of the table to disable
* @throws KeeperException if an unexpected ZK connection issues occurs
*/
private void disable(ZKWatcher zooKeeper, byte[] table) throws KeeperException {
// ensure the latest state of the archive node is found
zooKeeper.syncOrTimeout(archiveZnode);
// if the top-level archive node is gone, then we are done
if (ZKUtil.checkExists(zooKeeper, archiveZnode) < 0) {
return;
}
// delete the table node, from the archive
String tableNode = this.getTableNode(table);
// make sure the table is the latest version so the delete takes
zooKeeper.syncOrTimeout(tableNode);
LOG.debug("Attempting to delete table node:" + tableNode);
ZKUtil.deleteNodeRecursively(zooKeeper, tableNode);
} | 3.68 |
flink_RocksDBNativeMetricOptions_enableEstimateLiveDataSize | /** Returns an estimate of the amount of live data in bytes. */
public void enableEstimateLiveDataSize() {
this.properties.add(RocksDBProperty.EstimateLiveDataSize.getRocksDBProperty());
} | 3.68 |
hbase_MasterObserver_postSetSplitOrMergeEnabled | /**
* Called after setting split / merge switch
* @param ctx the coprocessor instance's environment
* @param newValue the new value submitted in the call
* @param switchType type of switch
*/
default void postSetSplitOrMergeEnabled(final ObserverContext<MasterCoprocessorEnvironment> ctx,
final boolean newValue, final MasterSwitchType switchType) throws IOException {
} | 3.68 |
querydsl_PathBuilder_getCollection | /**
* Create a new Collection typed path
*
* @param <A>
* @param <E>
* @param property property name
* @param type property type
* @param queryType expression type
* @return property path
*/
public <A, E extends SimpleExpression<A>> CollectionPath<A, E> getCollection(String property, Class<A> type, Class<? super E> queryType) {
validate(property, Collection.class);
return super.createCollection(property, type, queryType, PathInits.DIRECT);
} | 3.68 |
hbase_WALSplitUtil_writeRegionSequenceIdFile | /**
* Create a file with name as region's max sequence id
*/
public static void writeRegionSequenceIdFile(FileSystem walFS, Path regionDir, long newMaxSeqId)
throws IOException {
FileStatus[] files = getSequenceIdFiles(walFS, regionDir);
long maxSeqId = getMaxSequenceId(files);
if (maxSeqId > newMaxSeqId) {
throw new IOException("The new max sequence id " + newMaxSeqId
+ " is less than the old max sequence id " + maxSeqId);
}
// write a new seqId file
Path newSeqIdFile =
new Path(getRegionDirRecoveredEditsDir(regionDir), newMaxSeqId + SEQUENCE_ID_FILE_SUFFIX);
if (newMaxSeqId != maxSeqId) {
try {
if (!walFS.createNewFile(newSeqIdFile) && !walFS.exists(newSeqIdFile)) {
throw new IOException("Failed to create SeqId file:" + newSeqIdFile);
}
LOG.debug("Wrote file={}, newMaxSeqId={}, maxSeqId={}", newSeqIdFile, newMaxSeqId,
maxSeqId);
} catch (FileAlreadyExistsException ignored) {
// latest hdfs throws this exception. it's all right if newSeqIdFile already exists
}
}
// remove old ones
for (FileStatus status : files) {
if (!newSeqIdFile.equals(status.getPath())) {
walFS.delete(status.getPath(), false);
}
}
} | 3.68 |
Subsets and Splits