name
stringlengths 12
178
| code_snippet
stringlengths 8
36.5k
| score
float64 3.26
3.68
|
---|---|---|
hmily_HmilyParticipantUndo_getHmilyLocks | /**
* Get hmily locks.
*
* @return hmily locks
*/
public Collection<HmilyLock> getHmilyLocks() {
return dataSnapshot.getTuples().stream()
.map(tuple -> new HmilyLock(transId, participantId, resourceId, tuple.getTableName(), Joiner.on("_").join(tuple.getPrimaryKeyValues()))).collect(Collectors.toList());
} | 3.68 |
AreaShop_RentRegion_getDurationString | /**
* Get the duration string, includes 'number indentifier'.
* @return The duration string
*/
public String getDurationString() {
return getStringSetting("rent.duration");
} | 3.68 |
hadoop_CommitTaskStage_getPath | /**
* Get the manifest path.
* @return The path the manifest was saved to.
*/
public Path getPath() {
return path;
} | 3.68 |
framework_DateTimeField_isTextFieldEnabled | /**
* Checks whether the text field is enabled (default) or not.
*
* @see #setTextFieldEnabled(boolean)
*
* @return <b>true</b> if the text field is enabled, <b>false</b> otherwise.
*/
public boolean isTextFieldEnabled() {
return getState(false).textFieldEnabled;
} | 3.68 |
flink_RestServerEndpoint_start | /**
* Starts this REST server endpoint.
*
* @throws Exception if we cannot start the RestServerEndpoint
*/
public final void start() throws Exception {
synchronized (lock) {
Preconditions.checkState(
state == State.CREATED, "The RestServerEndpoint cannot be restarted.");
log.info("Starting rest endpoint.");
final Router router = new Router();
final CompletableFuture<String> restAddressFuture = new CompletableFuture<>();
handlers = initializeHandlers(restAddressFuture);
/* sort the handlers such that they are ordered the following:
* /jobs
* /jobs/overview
* /jobs/:jobid
* /jobs/:jobid/config
* /:*
*/
Collections.sort(handlers, RestHandlerUrlComparator.INSTANCE);
checkAllEndpointsAndHandlersAreUnique(handlers);
handlers.forEach(handler -> registerHandler(router, handler, log));
ChannelInitializer<SocketChannel> initializer =
new ChannelInitializer<SocketChannel>() {
@Override
protected void initChannel(SocketChannel ch) throws ConfigurationException {
RouterHandler handler = new RouterHandler(router, responseHeaders);
// SSL should be the first handler in the pipeline
if (isHttpsEnabled()) {
ch.pipeline()
.addLast(
"ssl",
new RedirectingSslHandler(
restAddress,
restAddressFuture,
sslHandlerFactory));
}
ch.pipeline()
.addLast(new HttpServerCodec())
.addLast(new FileUploadHandler(uploadDir))
.addLast(
new FlinkHttpObjectAggregator(
maxContentLength, responseHeaders));
for (InboundChannelHandlerFactory factory :
inboundChannelHandlerFactories) {
Optional<ChannelHandler> channelHandler =
factory.createHandler(configuration, responseHeaders);
if (channelHandler.isPresent()) {
ch.pipeline().addLast(channelHandler.get());
}
}
ch.pipeline()
.addLast(new ChunkedWriteHandler())
.addLast(handler.getName(), handler)
.addLast(new PipelineErrorHandler(log, responseHeaders));
}
};
NioEventLoopGroup bossGroup =
new NioEventLoopGroup(
1, new ExecutorThreadFactory("flink-rest-server-netty-boss"));
NioEventLoopGroup workerGroup =
new NioEventLoopGroup(
0, new ExecutorThreadFactory("flink-rest-server-netty-worker"));
bootstrap = new ServerBootstrap();
bootstrap
.group(bossGroup, workerGroup)
.channel(NioServerSocketChannel.class)
.childHandler(initializer);
Iterator<Integer> portsIterator;
try {
portsIterator = NetUtils.getPortRangeFromString(restBindPortRange);
} catch (IllegalConfigurationException e) {
throw e;
} catch (Exception e) {
throw new IllegalArgumentException(
"Invalid port range definition: " + restBindPortRange);
}
int chosenPort = 0;
while (portsIterator.hasNext()) {
try {
chosenPort = portsIterator.next();
final ChannelFuture channel;
if (restBindAddress == null) {
channel = bootstrap.bind(chosenPort);
} else {
channel = bootstrap.bind(restBindAddress, chosenPort);
}
serverChannel = channel.syncUninterruptibly().channel();
break;
} catch (final Exception e) {
// syncUninterruptibly() throws checked exceptions via Unsafe
// continue if the exception is due to the port being in use, fail early
// otherwise
if (!(e instanceof java.net.BindException)) {
throw e;
}
}
}
if (serverChannel == null) {
throw new BindException(
"Could not start rest endpoint on any port in port range "
+ restBindPortRange);
}
log.debug("Binding rest endpoint to {}:{}.", restBindAddress, chosenPort);
final InetSocketAddress bindAddress = (InetSocketAddress) serverChannel.localAddress();
final String advertisedAddress;
if (bindAddress.getAddress().isAnyLocalAddress()) {
advertisedAddress = this.restAddress;
} else {
advertisedAddress = bindAddress.getAddress().getHostAddress();
}
port = bindAddress.getPort();
log.info("Rest endpoint listening at {}:{}", advertisedAddress, port);
restBaseUrl = new URL(determineProtocol(), advertisedAddress, port, "").toString();
restAddressFuture.complete(restBaseUrl);
state = State.RUNNING;
startInternal();
}
} | 3.68 |
hbase_ServerManager_startChore | /**
* start chore in ServerManager
*/
public void startChore() {
Configuration c = master.getConfiguration();
if (persistFlushedSequenceId) {
new Thread(() -> {
// after AM#loadMeta, RegionStates should be loaded, and some regions are
// deleted by drop/split/merge during removeDeletedRegionFromLoadedFlushedSequenceIds,
// but these deleted regions are not added back to RegionStates,
// so we can safely remove deleted regions.
removeDeletedRegionFromLoadedFlushedSequenceIds();
}, "RemoveDeletedRegionSyncThread").start();
int flushPeriod =
c.getInt(FLUSHEDSEQUENCEID_FLUSHER_INTERVAL, FLUSHEDSEQUENCEID_FLUSHER_INTERVAL_DEFAULT);
flushedSeqIdFlusher = new FlushedSequenceIdFlusher("FlushedSequenceIdFlusher", flushPeriod);
master.getChoreService().scheduleChore(flushedSeqIdFlusher);
}
} | 3.68 |
graphhopper_AbstractBidirCHAlgo_fillEdgesFromUsingFilter | /**
* @param edgeFilter edge filter used to fill edges. the {@link #levelEdgeFilter} reference will be set to
* edgeFilter by this method, so make sure edgeFilter does not use it directly.
*/
protected void fillEdgesFromUsingFilter(CHEdgeFilter edgeFilter) {
// we temporarily ignore the additionalEdgeFilter
CHEdgeFilter tmpFilter = levelEdgeFilter;
levelEdgeFilter = edgeFilter;
finishedFrom = !fillEdgesFrom();
levelEdgeFilter = tmpFilter;
} | 3.68 |
flink_FromElementsFunction_setOutputType | /**
* Set element type and re-serialize element if required. Should only be called before
* serialization/deserialization of this function.
*/
@Override
public void setOutputType(TypeInformation<T> outTypeInfo, ExecutionConfig executionConfig) {
Preconditions.checkState(
elements != null,
"The output type should've been specified before shipping the graph to the cluster");
checkIterable(elements, outTypeInfo.getTypeClass());
TypeSerializer<T> newSerializer = outTypeInfo.createSerializer(executionConfig);
if (Objects.equals(serializer, newSerializer)) {
return;
}
serializer = newSerializer;
try {
serializeElements();
} catch (IOException ex) {
throw new UncheckedIOException(ex);
}
} | 3.68 |
Activiti_BpmnDeploymentHelper_setResourceNamesOnProcessDefinitions | /**
* Updates all the process definition entities to have the correct resource names.
*/
public void setResourceNamesOnProcessDefinitions(ParsedDeployment parsedDeployment) {
for (ProcessDefinitionEntity processDefinition : parsedDeployment.getAllProcessDefinitions()) {
String resourceName = parsedDeployment.getResourceForProcessDefinition(processDefinition).getName();
processDefinition.setResourceName(resourceName);
}
} | 3.68 |
hbase_CompactionPolicy_getConf | /** Returns The current compaction configuration settings. */
public CompactionConfiguration getConf() {
return this.comConf;
} | 3.68 |
flink_DefaultRollingPolicy_getRolloverInterval | /**
* Returns the maximum time duration a part file can stay open before rolling.
*
* @return Time duration in milliseconds
*/
public long getRolloverInterval() {
return rolloverInterval;
} | 3.68 |
graphhopper_VectorTile_getLayersList | /**
* <code>repeated .vector_tile.Tile.Layer layers = 3;</code>
*/
public java.util.List<vector_tile.VectorTile.Tile.Layer> getLayersList() {
if (layersBuilder_ == null) {
return java.util.Collections.unmodifiableList(layers_);
} else {
return layersBuilder_.getMessageList();
}
} | 3.68 |
flink_BridgingSqlFunction_getRowTypeInference | /**
* The conversion to a row type is handled on the caller side. This allows us to perform it
* SQL/Table API-specific. This is in particular important to set the aliases of fields
* correctly (see {@link FlinkRelBuilder#pushFunctionScan(RelBuilder, SqlOperator, int,
* Iterable, List)}).
*/
@Override
public SqlReturnTypeInference getRowTypeInference() {
return getReturnTypeInference();
} | 3.68 |
flink_ExecutionEnvironment_addDefaultKryoSerializer | /**
* Adds a new Kryo default serializer to the Runtime.
*
* @param type The class of the types serialized with the given serializer.
* @param serializerClass The class of the serializer to use.
*/
public void addDefaultKryoSerializer(
Class<?> type, Class<? extends Serializer<?>> serializerClass) {
config.addDefaultKryoSerializer(type, serializerClass);
} | 3.68 |
hbase_Get_setRowOffsetPerColumnFamily | /**
* Set offset for the row per Column Family. This offset is only within a particular row/CF
* combination. It gets reset back to zero when we move to the next row or CF.
* @param offset is the number of kvs that will be skipped.
* @return this for invocation chaining
*/
public Get setRowOffsetPerColumnFamily(int offset) {
this.storeOffset = offset;
return this;
} | 3.68 |
hbase_SnapshotInfo_getSnapshotStats | /**
* Returns the snapshot stats
* @param conf the {@link Configuration} to use
* @param snapshotDesc HBaseProtos.SnapshotDescription to get stats from
* @param filesMap {@link Map} store files map for all snapshots, it may be null
* @return the snapshot stats
*/
public static SnapshotStats getSnapshotStats(final Configuration conf,
final SnapshotProtos.SnapshotDescription snapshotDesc, final Map<Path, Integer> filesMap)
throws IOException {
Path rootDir = CommonFSUtils.getRootDir(conf);
FileSystem fs = FileSystem.get(rootDir.toUri(), conf);
Path snapshotDir = SnapshotDescriptionUtils.getCompletedSnapshotDir(snapshotDesc, rootDir);
SnapshotManifest manifest = SnapshotManifest.open(conf, fs, snapshotDir, snapshotDesc);
final SnapshotStats stats = new SnapshotStats(conf, fs, snapshotDesc);
SnapshotReferenceUtil.concurrentVisitReferencedFiles(conf, fs, manifest,
"SnapshotsStatsAggregation", new SnapshotReferenceUtil.SnapshotVisitor() {
@Override
public void storeFile(final RegionInfo regionInfo, final String family,
final SnapshotRegionManifest.StoreFile storeFile) throws IOException {
if (!storeFile.hasReference()) {
stats.addStoreFile(regionInfo, family, storeFile, filesMap);
}
}
});
return stats;
} | 3.68 |
hudi_HoodieCreateHandle_write | /**
* Writes all records passed.
*/
public void write() {
Iterator<String> keyIterator;
if (hoodieTable.requireSortedRecords()) {
// Sorting the keys limits the amount of extra memory required for writing sorted records
keyIterator = recordMap.keySet().stream().sorted().iterator();
} else {
keyIterator = recordMap.keySet().stream().iterator();
}
while (keyIterator.hasNext()) {
final String key = keyIterator.next();
HoodieRecord<T> record = recordMap.get(key);
write(record, useWriterSchema ? writeSchemaWithMetaFields : writeSchema, config.getProps());
}
} | 3.68 |
querydsl_GenericExporter_setEmbeddedAnnotation | /**
* Set the embedded annotation
*
* @param embeddedAnnotation embedded annotation
*/
public void setEmbeddedAnnotation(Class<? extends Annotation> embeddedAnnotation) {
this.embeddedAnnotation = embeddedAnnotation;
} | 3.68 |
framework_LayoutDependencyTree_noMoreChangesExpected | /**
* Returns whether all required layouting and measuring has been done for
* this component to both directions and there are no more blockers waiting
* for handling.
*
* @param connector
* the connector to check, should not be {@code null}
* @return {@code true} if nothing is pending, {@code false} otherwise
*/
public boolean noMoreChangesExpected(ComponentConnector connector) {
return getDependency(connector.getConnectorId(), HORIZONTAL)
.noMoreChangesExpected()
&& getDependency(connector.getConnectorId(), VERTICAL)
.noMoreChangesExpected();
} | 3.68 |
hadoop_BCFile_getBCFileVersion | /**
* Get version of BCFile file being read.
*
* @return version of BCFile file being read.
*/
public Version getBCFileVersion() {
return version;
} | 3.68 |
hbase_Scan_setMaxResultsPerColumnFamily | /**
* Set the maximum number of values to return per row per Column Family
* @param limit the maximum number of values returned / row / CF
*/
public Scan setMaxResultsPerColumnFamily(int limit) {
this.storeLimit = limit;
return this;
} | 3.68 |
framework_ExternalResource_getMIMEType | /**
* Gets the MIME type of the resource.
*
* @see com.vaadin.server.Resource#getMIMEType()
*/
@Override
public String getMIMEType() {
if (mimeType == null) {
mimeType = FileTypeResolver.getMIMEType(getURL());
}
return mimeType;
} | 3.68 |
AreaShop_RegionGroup_getAutoRegions | /**
* Get automatically added regions.
* @return Set of regions automatically added by the configuration
*/
public Set<String> getAutoRegions() {
if(autoDirty) {
autoRegions = new HashSet<>();
for(GeneralRegion region : plugin.getFileManager().getRegions()) {
if(worlds.contains(region.getWorldName())) {
autoRegions.add(region.getName());
}
}
autoDirty = false;
}
return autoRegions;
} | 3.68 |
hbase_UnsafeAccess_toLong | /**
* Reads a long value at the given Object's offset considering it was written in big-endian
* format.
* @return long value at offset
*/
public static long toLong(Object ref, long offset) {
if (LITTLE_ENDIAN) {
return Long.reverseBytes(HBasePlatformDependent.getLong(ref, offset));
}
return HBasePlatformDependent.getLong(ref, offset);
} | 3.68 |
hbase_RegionServerAccounting_setGlobalMemStoreLimits | // Called by the tuners.
void setGlobalMemStoreLimits(long newGlobalMemstoreLimit) {
if (this.memType == MemoryType.HEAP) {
this.globalMemStoreLimit = newGlobalMemstoreLimit;
this.globalMemStoreLimitLowMark =
(long) (this.globalMemStoreLimit * this.globalMemStoreLimitLowMarkPercent);
} else {
this.globalOnHeapMemstoreLimit = newGlobalMemstoreLimit;
this.globalOnHeapMemstoreLimitLowMark =
(long) (this.globalOnHeapMemstoreLimit * this.globalMemStoreLimitLowMarkPercent);
}
} | 3.68 |
hbase_SyncFuture_getThread | /**
* Returns the thread that owned this sync future, use with caution as we return the reference to
* the actual thread object.
* @return the associated thread instance.
*/
Thread getThread() {
return t;
} | 3.68 |
framework_DesignContext_instantiateComponent | /**
* Creates a Component corresponding to the given node. Does not set the
* attributes for the created object.
*
* @param node
* a node of an html tree
* @return a Component corresponding to node, with no attributes set.
*/
private Component instantiateComponent(Node node) {
String tag = node.nodeName();
ComponentMapper componentMapper = Design.getComponentMapper();
Component component = componentMapper.tagToComponent(tag,
Design.getComponentFactory(), this);
assert tagEquals(tag, componentMapper.componentToTag(component, this));
return component;
} | 3.68 |
flink_BuiltInSqlFunction_name | /** @see BuiltInFunctionDefinition.Builder#name(String) */
public Builder name(String name) {
this.name = name;
return this;
} | 3.68 |
hbase_SnapshotInfo_isArchivedFileStillReferenced | /**
* Check if for a give file in archive, if there are other snapshots/tables still reference it.
* @param filePath file path in archive
* @param snapshotFilesMap a map for store files in snapshots about how many snapshots refer to
* it.
* @return true or false
*/
private boolean isArchivedFileStillReferenced(final Path filePath,
final Map<Path, Integer> snapshotFilesMap) {
Integer c = snapshotFilesMap.get(filePath);
// Check if there are other snapshots or table from clone_snapshot() (via back-reference)
// still reference to it.
if ((c != null) && (c == 1)) {
Path parentDir = filePath.getParent();
Path backRefDir = HFileLink.getBackReferencesDir(parentDir, filePath.getName());
try {
if (CommonFSUtils.listStatus(fs, backRefDir) == null) {
return false;
}
} catch (IOException e) {
// For the purpose of this function, IOException is ignored and treated as
// the file is still being referenced.
}
}
return true;
} | 3.68 |
druid_FilterAdapter_statement_executeQuery | // /////////////////////////////
@Override
public ResultSetProxy statement_executeQuery(FilterChain chain, StatementProxy statement, String sql)
throws SQLException {
return chain.statement_executeQuery(statement, sql);
} | 3.68 |
framework_VGridLayout_getVerticalSpacing | /**
* Returns the spacing between the cells vertically in pixels.
*
* @return
*/
protected int getVerticalSpacing() {
return LayoutManager.get(client).getOuterHeight(spacingMeasureElement);
} | 3.68 |
hbase_HBaseTestingUtility_ensureSomeNonStoppedRegionServersAvailable | /**
* Make sure that at least the specified number of region servers are running. We don't count the
* ones that are currently stopping or are stopped.
* @param num minimum number of region servers that should be running
* @return true if we started some servers
*/
public boolean ensureSomeNonStoppedRegionServersAvailable(final int num) throws IOException {
boolean startedServer = ensureSomeRegionServersAvailable(num);
int nonStoppedServers = 0;
for (JVMClusterUtil.RegionServerThread rst : getMiniHBaseCluster().getRegionServerThreads()) {
HRegionServer hrs = rst.getRegionServer();
if (hrs.isStopping() || hrs.isStopped()) {
LOG.info("A region server is stopped or stopping:" + hrs);
} else {
nonStoppedServers++;
}
}
for (int i = nonStoppedServers; i < num; ++i) {
LOG.info("Started new server=" + getMiniHBaseCluster().startRegionServer());
startedServer = true;
}
return startedServer;
} | 3.68 |
framework_VListSelect_getRows | /**
* Returns the number of visible items for the list select.
*
* @return the number of items to show
* @see ListBox#setVisibleItemCount(int)
*/
public int getRows() {
return select.getVisibleItemCount();
} | 3.68 |
framework_ApplicationConnection_hasEventListeners | /**
* @deprecated As of 7.0. Use
* {@link AbstractComponentConnector#hasEventListener(String)}
* instead
*/
@Deprecated
public boolean hasEventListeners(Widget widget, String eventIdentifier) {
ComponentConnector connector = getConnectorMap().getConnector(widget);
if (connector == null) {
/*
* No connector will exist in cases where Vaadin widgets have been
* re-used without implementing server<->client communication.
*/
return false;
}
return hasEventListeners(connector, eventIdentifier);
} | 3.68 |
hadoop_AbfsHttpOperation_elapsedTimeMs | /**
* Returns the elapsed time in milliseconds.
*/
private long elapsedTimeMs(final long startTime) {
return (System.nanoTime() - startTime) / ONE_MILLION;
} | 3.68 |
MagicPlugin_ConfigUtils_getIntegerList | /**
* Gets a list of integers. Non-valid entries will not be in the list.
* There will be no null slots. If the list is not defined, the
* default will be returned. 'null' can be passed for the default
* and an empty list will be returned instead. The node must be
* an actual list and not just an integer.
*
* @param section the ConfigurationSection to load the list from
* @param path path to node (dot notation)
*/
public static List<Integer> getIntegerList(ConfigurationSection section, String path) {
List<Object> raw = getList(section, path);
if (raw == null) {
return new ArrayList<>();
}
List<Integer> list = new ArrayList<>();
for (Object o : raw) {
Integer i = castInt(o);
if (i != null) {
list.add(i);
}
}
return list;
} | 3.68 |
morf_AbstractSqlDialectTest_testCaseSelect | /**
* Tests case clauses in a select statement.
*/
@Test
public void testCaseSelect() {
WhenCondition whenCondition = new WhenCondition(
eq(new FieldReference(CHAR_FIELD), new FieldLiteral('Y')),
new FieldReference(INT_FIELD));
SelectStatement stmt = new SelectStatement(
new FieldReference(STRING_FIELD),
new FieldReference(BOOLEAN_FIELD),
new FieldReference(CHAR_FIELD) ,
new CaseStatement(new FieldReference(FLOAT_FIELD), whenCondition))
.from(new TableReference(TEST_TABLE));
String value = varCharCast("'Y'");
String expectedSql = "SELECT stringField, booleanField, charField, CASE WHEN (charField = " + stringLiteralPrefix() + value +") THEN intField ELSE floatField END FROM " + tableName(TEST_TABLE);
assertEquals("Select with case statement", expectedSql, testDialect.convertStatementToSQL(stmt));
} | 3.68 |
rocketmq-connect_DeadLetterQueueConfig_includeRecordDetailsInErrorLog | /**
* include error log
*
* @return
*/
public boolean includeRecordDetailsInErrorLog() {
return config.getProperties().containsKey(ERRORS_LOG_INCLUDE_MESSAGES_CONFIG) ?
Boolean.valueOf(config.getProperties().get(ERRORS_LOG_INCLUDE_MESSAGES_CONFIG)) : ERRORS_LOG_INCLUDE_MESSAGES_DEFAULT;
} | 3.68 |
hadoop_ContainerServiceRecordProcessor_initTypeToInfoMapping | /**
* Initializes the DNS record type to descriptor mapping based on the
* provided service record.
* @param serviceRecord the registry service record.
* @throws Exception if an issue arises.
*/
@Override public void initTypeToInfoMapping(ServiceRecord serviceRecord)
throws Exception {
if (serviceRecord.get(YarnRegistryAttributes.YARN_IP) != null) {
for (int type : getRecordTypes()) {
switch (type) {
case Type.A:
createAInfo(serviceRecord);
break;
case Type.AAAA:
createAAAAInfo(serviceRecord);
break;
case Type.PTR:
createPTRInfo(serviceRecord);
break;
case Type.TXT:
createTXTInfo(serviceRecord);
break;
default:
throw new IllegalArgumentException("Unknown type " + type);
}
}
}
} | 3.68 |
dubbo_ConverterUtil_convertIfPossible | /**
* Convert the value of source to target-type value if possible
*
* @param source the value of source
* @param targetType the target type
* @param <T> the target type
* @return <code>null</code> if can't be converted
* @since 2.7.8
*/
public <T> T convertIfPossible(Object source, Class<T> targetType) {
Converter converter = getConverter(source.getClass(), targetType);
if (converter != null) {
return (T) converter.convert(source);
}
return null;
} | 3.68 |
hudi_BaseTableMetadata_getAllFilesInPartition | /**
* Return the list of files in a partition.
* <p>
* If the Metadata Table is enabled, the listing is retrieved from the stored metadata. Otherwise, the list of
* partitions is retrieved directly from the underlying {@code FileSystem}.
* <p>
* On any errors retrieving the listing from the metadata, defaults to using the file system listings.
*
* @param partitionPath The absolute path of the partition to list
*/
@Override
public FileStatus[] getAllFilesInPartition(Path partitionPath) throws IOException {
ValidationUtils.checkArgument(isMetadataTableInitialized);
try {
return fetchAllFilesInPartition(partitionPath);
} catch (Exception e) {
throw new HoodieMetadataException("Failed to retrieve files in partition " + partitionPath + " from metadata", e);
}
} | 3.68 |
zxing_WifiConfigManager_isHexOfLength | /**
* @param value input to check
* @param allowedLengths allowed lengths, if any
* @return true if value is a non-null, non-empty string of hex digits, and if allowed lengths are given, has
* an allowed length
*/
private static boolean isHexOfLength(CharSequence value, int... allowedLengths) {
if (value == null || !HEX_DIGITS.matcher(value).matches()) {
return false;
}
if (allowedLengths.length == 0) {
return true;
}
for (int length : allowedLengths) {
if (value.length() == length) {
return true;
}
}
return false;
} | 3.68 |
flink_FlinkSqlOperatorTable_instance | /** Returns the Flink operator table, creating it if necessary. */
public static synchronized FlinkSqlOperatorTable instance(boolean isBatchMode) {
FlinkSqlOperatorTable instance = cachedInstances.get(isBatchMode);
if (instance == null) {
// Creates and initializes the standard operator table.
// Uses two-phase construction, because we can't initialize the
// table until the constructor of the sub-class has completed.
instance = new FlinkSqlOperatorTable();
instance.init();
// ensure no dynamic function declares directly
validateNoDynamicFunction(instance);
// register functions based on batch or streaming mode
final FlinkSqlOperatorTable finalInstance = instance;
dynamicFunctions(isBatchMode).forEach(f -> finalInstance.register(f));
cachedInstances.put(isBatchMode, finalInstance);
}
return instance;
} | 3.68 |
flink_KubernetesUtils_createCompletedCheckpointStore | /**
* Create a {@link DefaultCompletedCheckpointStore} with {@link KubernetesStateHandleStore}.
*
* @param configuration configuration to build a RetrievableStateStorageHelper
* @param kubeClient flink kubernetes client
* @param configMapName ConfigMap name
* @param executor executor to run blocking calls
* @param lockIdentity lock identity to check the leadership
* @param maxNumberOfCheckpointsToRetain max number of checkpoints to retain on state store
* handle
* @param restoreMode the mode in which the job is restoring
* @return a {@link DefaultCompletedCheckpointStore} with {@link KubernetesStateHandleStore}.
* @throws Exception when create the storage helper failed
*/
public static CompletedCheckpointStore createCompletedCheckpointStore(
Configuration configuration,
FlinkKubeClient kubeClient,
Executor executor,
String configMapName,
@Nullable String lockIdentity,
int maxNumberOfCheckpointsToRetain,
SharedStateRegistryFactory sharedStateRegistryFactory,
Executor ioExecutor,
RestoreMode restoreMode)
throws Exception {
final RetrievableStateStorageHelper<CompletedCheckpoint> stateStorage =
new FileSystemStateStorageHelper<>(
HighAvailabilityServicesUtils.getClusterHighAvailableStoragePath(
configuration),
COMPLETED_CHECKPOINT_FILE_SUFFIX);
final KubernetesStateHandleStore<CompletedCheckpoint> stateHandleStore =
new KubernetesStateHandleStore<>(
kubeClient,
configMapName,
stateStorage,
k -> k.startsWith(CHECKPOINT_ID_KEY_PREFIX),
lockIdentity);
Collection<CompletedCheckpoint> checkpoints =
DefaultCompletedCheckpointStoreUtils.retrieveCompletedCheckpoints(
stateHandleStore, KubernetesCheckpointStoreUtil.INSTANCE);
return new DefaultCompletedCheckpointStore<>(
maxNumberOfCheckpointsToRetain,
stateHandleStore,
KubernetesCheckpointStoreUtil.INSTANCE,
checkpoints,
sharedStateRegistryFactory.create(ioExecutor, checkpoints, restoreMode),
executor);
} | 3.68 |
hmily_ZookeeperRepository_nextPath | /**
* Next path string.
*
* @return the string
*/
public String nextPath() {
path = path + "/" + nodes[index];
index++;
return path;
} | 3.68 |
hbase_ExploringCompactionPolicy_filesInRatio | /**
* Check that all files satisfy the constraint
*
* <pre>
* FileSize(i) <= ( Sum(0,N,FileSize(_)) - FileSize(i)) * Ratio.
* </pre>
*
* @param files List of store files to consider as a compaction candidate.
* @param currentRatio The ratio to use.
* @return a boolean if these files satisfy the ratio constraints.
*/
private boolean filesInRatio(List<HStoreFile> files, double currentRatio) {
if (files.size() < 2) {
return true;
}
long totalFileSize = getTotalStoreSize(files);
for (HStoreFile file : files) {
long singleFileSize = file.getReader().length();
long sumAllOtherFileSizes = totalFileSize - singleFileSize;
if (singleFileSize > sumAllOtherFileSizes * currentRatio) {
return false;
}
}
return true;
} | 3.68 |
flink_SegmentsUtil_getBoolean | /**
* get boolean from segments.
*
* @param segments target segments.
* @param offset value offset.
*/
public static boolean getBoolean(MemorySegment[] segments, int offset) {
if (inFirstSegment(segments, offset, 1)) {
return segments[0].getBoolean(offset);
} else {
return getBooleanMultiSegments(segments, offset);
}
} | 3.68 |
flink_CatalogManager_alterTable | /**
* Alters a table in a given fully qualified path with table changes.
*
* @param table The table to put in the given path
* @param changes The table changes from the original table to the new table.
* @param objectIdentifier The fully qualified path where to alter the table.
* @param ignoreIfNotExists If false exception will be thrown if the table or database or
* catalog to be altered does not exist.
*/
public void alterTable(
CatalogBaseTable table,
List<TableChange> changes,
ObjectIdentifier objectIdentifier,
boolean ignoreIfNotExists) {
execute(
(catalog, path) -> {
final CatalogBaseTable resolvedTable = resolveCatalogBaseTable(table);
catalog.alterTable(path, resolvedTable, changes, ignoreIfNotExists);
if (resolvedTable instanceof CatalogTable) {
catalogModificationListeners.forEach(
listener ->
listener.onEvent(
AlterTableEvent.createEvent(
CatalogContext.createContext(
objectIdentifier.getCatalogName(),
catalog),
objectIdentifier,
resolvedTable,
ignoreIfNotExists)));
}
},
objectIdentifier,
ignoreIfNotExists,
"AlterTable");
} | 3.68 |
hbase_ClusterStatusTracker_isClusterUp | /**
* Checks if cluster is up.
* @return true if the cluster up ('shutdown' is its name up in zk) znode exists with data, false
* if not
*/
public boolean isClusterUp() {
return super.getData(false) != null;
} | 3.68 |
hibernate-validator_AbstractStaxBuilder_readAttribute | /**
* Reads a value of an attribute of a given element.
*
* @param startElement an element to get an attribute from
* @param qName a {@link QName} of an attribute to read
*
* @return a value of an attribute if it is present, {@link Optional#empty()} otherwise
*/
protected Optional<String> readAttribute(StartElement startElement, QName qName) {
Attribute attribute = startElement.getAttributeByName( qName );
return Optional.ofNullable( attribute ).map( Attribute::getValue );
} | 3.68 |
pulsar_TopicsBase_encodeWithSchema | // Encode message with corresponding schema, do necessary conversion before encoding
private byte[] encodeWithSchema(String input, Schema schema) {
try {
switch (schema.getSchemaInfo().getType()) {
case INT8:
return schema.encode(Byte.parseByte(input));
case INT16:
return schema.encode(Short.parseShort(input));
case INT32:
return schema.encode(Integer.parseInt(input));
case INT64:
return schema.encode(Long.parseLong(input));
case STRING:
return schema.encode(input);
case FLOAT:
return schema.encode(Float.parseFloat(input));
case DOUBLE:
return schema.encode(Double.parseDouble(input));
case BOOLEAN:
return schema.encode(Boolean.parseBoolean(input));
case BYTES:
return schema.encode(input.getBytes());
case DATE:
return schema.encode(DateFormat.getDateInstance().parse(input));
case TIME:
return schema.encode(new Time(Long.parseLong(input)));
case TIMESTAMP:
return schema.encode(new Timestamp(Long.parseLong(input)));
case INSTANT:
return schema.encode(Instant.parse(input));
case LOCAL_DATE:
return schema.encode(LocalDate.parse(input));
case LOCAL_TIME:
return schema.encode(LocalTime.parse(input));
case LOCAL_DATE_TIME:
return schema.encode(LocalDateTime.parse(input));
case JSON:
GenericJsonWriter jsonWriter = new GenericJsonWriter();
return jsonWriter.write(new GenericJsonRecord(null, null,
ObjectMapperFactory.getMapper().reader().readTree(input), schema.getSchemaInfo()));
case AVRO:
AvroBaseStructSchema avroSchema = ((AvroBaseStructSchema) schema);
Decoder decoder = DecoderFactory.get().jsonDecoder(avroSchema.getAvroSchema(), input);
DatumReader<GenericData.Record> reader = new GenericDatumReader(avroSchema.getAvroSchema());
GenericRecord genericRecord = reader.read(null, decoder);
GenericAvroWriter avroWriter = new GenericAvroWriter(avroSchema.getAvroSchema());
return avroWriter.write(new GenericAvroRecord(null,
avroSchema.getAvroSchema(), null, genericRecord));
case PROTOBUF_NATIVE:
case KEY_VALUE:
default:
throw new PulsarClientException.InvalidMessageException("");
}
} catch (Exception e) {
if (log.isDebugEnabled()) {
log.debug("Fail to encode value {} with schema {} for rest produce request", input,
new String(schema.getSchemaInfo().getSchema()));
}
return new byte[0];
}
} | 3.68 |
flink_ExtendedParser_parse | /**
* Parse the input statement to the {@link Operation}.
*
* @param statement the command to evaluate
* @return parsed operation that represents the command
*/
public Optional<Operation> parse(String statement) {
for (ExtendedParseStrategy strategy : PARSE_STRATEGIES) {
if (strategy.match(statement)) {
return Optional.of(strategy.convert(statement));
}
}
return Optional.empty();
} | 3.68 |
flink_SourceTestSuiteBase_checkResultWithSemantic | /**
* Compare the test data with the result.
*
* <p>If the source is bounded, limit should be null.
*
* @param resultIterator the data read from the job
* @param testData the test data
* @param semantic the supported semantic, see {@link CheckpointingMode}
* @param limit expected number of the data to read from the job
*/
protected void checkResultWithSemantic(
CloseableIterator<T> resultIterator,
List<List<T>> testData,
CheckpointingMode semantic,
Integer limit) {
if (limit != null) {
Runnable runnable =
() ->
CollectIteratorAssertions.assertThat(resultIterator)
.withNumRecordsLimit(limit)
.matchesRecordsFromSource(testData, semantic);
assertThatFuture(runAsync(runnable)).eventuallySucceeds();
} else {
CollectIteratorAssertions.assertThat(resultIterator)
.matchesRecordsFromSource(testData, semantic);
}
} | 3.68 |
pulsar_RelativeTimeUtil_nsToSeconds | /**
* Convert nanoseconds to seconds and keep three decimal places.
* @param ns
* @return seconds
*/
public static double nsToSeconds(long ns) {
double seconds = (double) ns / 1_000_000_000;
BigDecimal bd = new BigDecimal(seconds);
return bd.setScale(3, RoundingMode.HALF_UP).doubleValue();
} | 3.68 |
morf_TableSetSchema_tableExists | /**
* @see org.alfasoftware.morf.metadata.Schema#tableExists(java.lang.String)
*/
@Override
public boolean tableExists(final String name) {
return tables.stream().anyMatch(table -> table.getName().equalsIgnoreCase(name));
} | 3.68 |
hadoop_OperationAuditorOptions_builder | /**
* Create one.
* @return a new option instance
*/
public static OperationAuditorOptions builder() {
return new OperationAuditorOptions();
} | 3.68 |
hadoop_MawoConfiguration_getZKRetryIntervalMS | /**
* Get ZooKeeper retry interval value in milli seconds.
* @return value of ZooKeeper.retry.interval.ms
*/
public int getZKRetryIntervalMS() {
return Integer.parseInt(configsMap.get(ZK_RETRY_INTERVAL_MS));
} | 3.68 |
hudi_MarkerHandler_stop | /**
* Stops the dispatching of marker creation requests.
*/
public void stop() {
if (dispatchingThreadFuture != null) {
dispatchingThreadFuture.cancel(true);
}
dispatchingExecutorService.shutdown();
batchingExecutorService.shutdown();
} | 3.68 |
hadoop_DatanodeCacheManager_getLiveDatanodeStorageReport | /**
* Returns the live datanodes and its storage details, which has available
* space (> 0) to schedule block moves. This will return array of datanodes
* from its local cache. It has a configurable refresh interval in millis and
* periodically refresh the datanode cache by fetching latest
* {@link Context#getLiveDatanodeStorageReport()} once it elapsed refresh
* interval.
*
* @throws IOException
*/
public DatanodeMap getLiveDatanodeStorageReport(
Context spsContext) throws IOException {
long now = Time.monotonicNow();
long elapsedTimeMs = now - lastAccessedTime;
boolean refreshNeeded = elapsedTimeMs >= refreshIntervalMs;
lastAccessedTime = now;
if (refreshNeeded) {
if (LOG.isDebugEnabled()) {
LOG.debug("elapsedTimeMs > refreshIntervalMs : {} > {},"
+ " so refreshing cache", elapsedTimeMs, refreshIntervalMs);
}
datanodeMap.reset(); // clear all previously cached items.
// Fetch live datanodes from namenode and prepare DatanodeMap.
DatanodeStorageReport[] liveDns = spsContext
.getLiveDatanodeStorageReport();
for (DatanodeStorageReport storage : liveDns) {
StorageReport[] storageReports = storage.getStorageReports();
List<StorageType> storageTypes = new ArrayList<>();
List<Long> remainingSizeList = new ArrayList<>();
for (StorageReport t : storageReports) {
if (t.getRemaining() > 0) {
storageTypes.add(t.getStorage().getStorageType());
remainingSizeList.add(t.getRemaining());
}
}
datanodeMap.addTarget(storage.getDatanodeInfo(), storageTypes,
remainingSizeList);
}
if (LOG.isDebugEnabled()) {
LOG.debug("LIVE datanodes: {}", datanodeMap);
}
// get network topology
cluster = spsContext.getNetworkTopology(datanodeMap);
}
return datanodeMap;
} | 3.68 |
pulsar_AuthenticationDataSource_hasDataFromPeer | /**
* Check if data from peer are available.
*
* @return true if this authentication data contain data from peer
*/
default boolean hasDataFromPeer() {
return false;
} | 3.68 |
flink_CrossOperator_projectTuple23 | /**
* Projects a pair of crossed elements to a {@link Tuple} with the previously selected
* fields.
*
* @return The projected data set.
* @see Tuple
* @see DataSet
*/
public <
T0,
T1,
T2,
T3,
T4,
T5,
T6,
T7,
T8,
T9,
T10,
T11,
T12,
T13,
T14,
T15,
T16,
T17,
T18,
T19,
T20,
T21,
T22>
ProjectCross<
I1,
I2,
Tuple23<
T0,
T1,
T2,
T3,
T4,
T5,
T6,
T7,
T8,
T9,
T10,
T11,
T12,
T13,
T14,
T15,
T16,
T17,
T18,
T19,
T20,
T21,
T22>>
projectTuple23() {
TypeInformation<?>[] fTypes = extractFieldTypes(fieldIndexes);
TupleTypeInfo<
Tuple23<
T0,
T1,
T2,
T3,
T4,
T5,
T6,
T7,
T8,
T9,
T10,
T11,
T12,
T13,
T14,
T15,
T16,
T17,
T18,
T19,
T20,
T21,
T22>>
tType =
new TupleTypeInfo<
Tuple23<
T0,
T1,
T2,
T3,
T4,
T5,
T6,
T7,
T8,
T9,
T10,
T11,
T12,
T13,
T14,
T15,
T16,
T17,
T18,
T19,
T20,
T21,
T22>>(fTypes);
return new ProjectCross<
I1,
I2,
Tuple23<
T0,
T1,
T2,
T3,
T4,
T5,
T6,
T7,
T8,
T9,
T10,
T11,
T12,
T13,
T14,
T15,
T16,
T17,
T18,
T19,
T20,
T21,
T22>>(
this.ds1, this.ds2, this.fieldIndexes, this.isFieldInFirst, tType, this, hint);
} | 3.68 |
hbase_AbstractStateMachineRegionProcedure_getRegion | /** Returns The RegionInfo of the region we are operating on. */
public RegionInfo getRegion() {
return this.hri;
} | 3.68 |
hbase_SnapshotHFileCleaner_getFileCacheForTesting | /**
* Exposed for Testing!
* @return the cache of all hfiles
*/
public SnapshotFileCache getFileCacheForTesting() {
return this.cache;
} | 3.68 |
shardingsphere-elasticjob_ExecutionService_setMisfire | /**
* Set misfire flag if sharding items still running.
*
* @param items sharding items need to be set misfire flag
*/
public void setMisfire(final Collection<Integer> items) {
for (int each : items) {
jobNodeStorage.createJobNodeIfNeeded(ShardingNode.getMisfireNode(each));
}
} | 3.68 |
hudi_HoodieIndexUtils_checkIfValidCommit | /**
* Check if the given commit timestamp is valid for the timeline.
*
* The commit timestamp is considered to be valid if:
* 1. the commit timestamp is present in the timeline, or
* 2. the commit timestamp is less than the first commit timestamp in the timeline
*
* @param commitTimeline The timeline
* @param commitTs The commit timestamp to check
* @return true if the commit timestamp is valid for the timeline
*/
public static boolean checkIfValidCommit(HoodieTimeline commitTimeline, String commitTs) {
return !commitTimeline.empty() && commitTimeline.containsOrBeforeTimelineStarts(commitTs);
} | 3.68 |
zxing_FinderPatternFinder_findRowSkip | /**
* @return number of rows we could safely skip during scanning, based on the first
* two finder patterns that have been located. In some cases their position will
* allow us to infer that the third pattern must lie below a certain point farther
* down in the image.
*/
private int findRowSkip() {
int max = possibleCenters.size();
if (max <= 1) {
return 0;
}
ResultPoint firstConfirmedCenter = null;
for (FinderPattern center : possibleCenters) {
if (center.getCount() >= CENTER_QUORUM) {
if (firstConfirmedCenter == null) {
firstConfirmedCenter = center;
} else {
// We have two confirmed centers
// How far down can we skip before resuming looking for the next
// pattern? In the worst case, only the difference between the
// difference in the x / y coordinates of the two centers.
// This is the case where you find top left last.
hasSkipped = true;
return (int) (Math.abs(firstConfirmedCenter.getX() - center.getX()) -
Math.abs(firstConfirmedCenter.getY() - center.getY())) / 2;
}
}
}
return 0;
} | 3.68 |
flink_RecordsBySplits_addFinishedSplits | /**
* Mark multiple splits with the given IDs as finished.
*
* @param splitIds the IDs of the finished splits.
*/
public void addFinishedSplits(Collection<String> splitIds) {
finishedSplits.addAll(splitIds);
} | 3.68 |
flink_CommonTestUtils_blockForeverNonInterruptibly | /**
* Permanently blocks the current thread. The thread cannot be woken up via {@link
* Thread#interrupt()}.
*/
public static void blockForeverNonInterruptibly() {
final Object lock = new Object();
//noinspection InfiniteLoopStatement
while (true) {
try {
//noinspection SynchronizationOnLocalVariableOrMethodParameter
synchronized (lock) {
lock.wait();
}
} catch (InterruptedException ignored) {
}
}
} | 3.68 |
AreaShop_GeneralRegion_setLandlord | /**
* Set the landlord of this region (the player that receives all revenue of this region).
* @param landlord The UUID of the player that should be set as landlord
* @param name The backup name of the player (for in case that the UUID cannot be resolved to a playername)
*/
public void setLandlord(UUID landlord, String name) {
if(landlord != null) {
setSetting("general.landlord", landlord.toString());
}
String properName = Utils.toName(landlord);
if(properName == null) {
properName = name;
}
setSetting("general.landlordName", properName);
} | 3.68 |
flink_FileSourceSplit_fileModificationTime | /** Returns the modification time of the file, from {@link FileStatus#getModificationTime()}. */
public long fileModificationTime() {
return fileModificationTime;
} | 3.68 |
hadoop_TimelineDomain_getReaders | /**
* Get the reader (and/or reader group) list string
*
* @return the reader (and/or reader group) list string
*/
@XmlElement(name = "readers")
public String getReaders() {
return readers;
} | 3.68 |
flink_BlobClient_downloadFromBlobServer | /**
* Downloads the given BLOB from the given server and stores its contents to a (local) file.
*
* <p>Transient BLOB files are deleted after a successful copy of the server's data into the
* given <tt>localJarFile</tt>.
*
* @param jobId job ID the BLOB belongs to or <tt>null</tt> if job-unrelated
* @param blobKey BLOB key
* @param localJarFile the local file to write to
* @param serverAddress address of the server to download from
* @param blobClientConfig client configuration for the connection
* @param numFetchRetries number of retries before failing
* @throws IOException if an I/O error occurs during the download
*/
static void downloadFromBlobServer(
@Nullable JobID jobId,
BlobKey blobKey,
File localJarFile,
InetSocketAddress serverAddress,
Configuration blobClientConfig,
int numFetchRetries)
throws IOException {
final byte[] buf = new byte[BUFFER_SIZE];
LOG.info("Downloading {}/{} from {}", jobId, blobKey, serverAddress);
// loop over retries
int attempt = 0;
while (true) {
try (final BlobClient bc = new BlobClient(serverAddress, blobClientConfig);
final InputStream is = bc.getInternal(jobId, blobKey);
final OutputStream os = new FileOutputStream(localJarFile)) {
while (true) {
final int read = is.read(buf);
if (read < 0) {
break;
}
os.write(buf, 0, read);
}
return;
} catch (Throwable t) {
String message =
"Failed to fetch BLOB "
+ jobId
+ "/"
+ blobKey
+ " from "
+ serverAddress
+ " and store it under "
+ localJarFile.getAbsolutePath();
if (attempt < numFetchRetries) {
if (LOG.isDebugEnabled()) {
LOG.error(message + " Retrying...", t);
} else {
LOG.error(message + " Retrying...");
}
} else {
LOG.error(message + " No retries left.", t);
throw new IOException(message, t);
}
// retry
++attempt;
LOG.info(
"Downloading {}/{} from {} (retry {})",
jobId,
blobKey,
serverAddress,
attempt);
}
} // end loop over retries
} | 3.68 |
framework_Table_handleColumnResizeEvent | /**
* Handles the column resize event sent by the client.
*
* @param variables
*/
private void handleColumnResizeEvent(Map<String, Object> variables) {
if (variables.containsKey("columnResizeEventColumn")) {
Object cid = variables.get("columnResizeEventColumn");
Object propertyId = null;
if (cid != null) {
propertyId = columnIdMap.get(cid.toString());
Object prev = variables.get("columnResizeEventPrev");
int previousWidth = -1;
if (prev != null) {
previousWidth = Integer.valueOf(prev.toString());
}
Object curr = variables.get("columnResizeEventCurr");
int currentWidth = -1;
if (curr != null) {
currentWidth = Integer.valueOf(curr.toString());
}
fireColumnResizeEvent(propertyId, previousWidth, currentWidth);
}
}
} | 3.68 |
hbase_RestCsrfPreventionFilter_isBrowser | /**
* This method interrogates the User-Agent String and returns whether it refers to a browser. If
* its not a browser, then the requirement for the CSRF header will not be enforced; if it is a
* browser, the requirement will be enforced.
* <p>
* A User-Agent String is considered to be a browser if it matches any of the regex patterns from
* browser-useragent-regex; the default behavior is to consider everything a browser that matches
* the following: "^Mozilla.*,^Opera.*". Subclasses can optionally override this method to use
* different behavior.
* @param userAgent The User-Agent String, or null if there isn't one
* @return true if the User-Agent String refers to a browser, false if not
*/
protected boolean isBrowser(String userAgent) {
if (userAgent == null) {
return false;
}
for (Pattern pattern : browserUserAgents) {
Matcher matcher = pattern.matcher(userAgent);
if (matcher.matches()) {
return true;
}
}
return false;
} | 3.68 |
hadoop_AbfsOutputStream_uploadBlockAsync | /**
* Upload a block of data.
* This will take the block.
*
* @param blockToUpload block to upload.
* @throws IOException upload failure
*/
private void uploadBlockAsync(DataBlocks.DataBlock blockToUpload,
boolean isFlush, boolean isClose)
throws IOException {
if (this.isAppendBlob) {
writeAppendBlobCurrentBufferToService();
return;
}
if (!blockToUpload.hasData()) {
return;
}
numOfAppendsToServerSinceLastFlush++;
final int bytesLength = blockToUpload.dataSize();
final long offset = position;
position += bytesLength;
outputStreamStatistics.bytesToUpload(bytesLength);
outputStreamStatistics.writeCurrentBuffer();
DataBlocks.BlockUploadData blockUploadData = blockToUpload.startUpload();
final Future<Void> job =
executorService.submit(() -> {
AbfsPerfTracker tracker =
client.getAbfsPerfTracker();
try (AbfsPerfInfo perfInfo = new AbfsPerfInfo(tracker,
"writeCurrentBufferToService", "append")) {
AppendRequestParameters.Mode
mode = APPEND_MODE;
if (isFlush & isClose) {
mode = FLUSH_CLOSE_MODE;
} else if (isFlush) {
mode = FLUSH_MODE;
}
/*
* Parameters Required for an APPEND call.
* offset(here) - refers to the position in the file.
* bytesLength - Data to be uploaded from the block.
* mode - If it's append, flush or flush_close.
* leaseId - The AbfsLeaseId for this request.
*/
AppendRequestParameters reqParams = new AppendRequestParameters(
offset, 0, bytesLength, mode, false, leaseId, isExpectHeaderEnabled);
AbfsRestOperation op =
client.append(path, blockUploadData.toByteArray(), reqParams,
cachedSasToken.get(), new TracingContext(tracingContext));
cachedSasToken.update(op.getSasToken());
perfInfo.registerResult(op.getResult());
perfInfo.registerSuccess(true);
outputStreamStatistics.uploadSuccessful(bytesLength);
return null;
} finally {
IOUtils.close(blockUploadData, blockToUpload);
}
});
writeOperations.add(new WriteOperation(job, offset, bytesLength));
// Try to shrink the queue
shrinkWriteOperationQueue();
} | 3.68 |
hadoop_ArrayFile_seek | /**
* Positions the reader before its <code>n</code>th value.
*
* @param n n key.
* @throws IOException raised on errors performing I/O.
*/
public synchronized void seek(long n) throws IOException {
key.set(n);
seek(key);
} | 3.68 |
morf_Cast_getWidth | /**
* @return the width
*/
public int getWidth() {
return width;
} | 3.68 |
hbase_HBaseTestingUtility_available | /**
* Checks to see if a specific port is available.
* @param port the port number to check for availability
* @return <tt>true</tt> if the port is available, or <tt>false</tt> if not
*/
public static boolean available(int port) {
ServerSocket ss = null;
DatagramSocket ds = null;
try {
ss = new ServerSocket(port);
ss.setReuseAddress(true);
ds = new DatagramSocket(port);
ds.setReuseAddress(true);
return true;
} catch (IOException e) {
// Do nothing
} finally {
if (ds != null) {
ds.close();
}
if (ss != null) {
try {
ss.close();
} catch (IOException e) {
/* should not be thrown */
}
}
}
return false;
} | 3.68 |
flink_TieredStorageConfiguration_getDiskIOSchedulerBufferRequestTimeout | /**
* Maximum time to wait when requesting read buffers from the buffer pool before throwing an
* exception in {@link DiskIOScheduler}.
*
* @return timeout duration.
*/
public Duration getDiskIOSchedulerBufferRequestTimeout() {
return diskIOSchedulerRequestTimeout;
} | 3.68 |
hudi_ImmutableTriple_getRight | /**
* {@inheritDoc}
*/
@Override
public R getRight() {
return right;
} | 3.68 |
hibernate-validator_ValidatorBean_isNullable | // TODO to be removed once using CDI API 4.x
public boolean isNullable() {
return false;
} | 3.68 |
hmily_SingletonHolder_get | /**
* Get t.
*
* @param <T> the type parameter
* @param clazz the clazz
* @return the t
*/
@SuppressWarnings("unchecked")
public <T> T get(final Class<T> clazz) {
return (T) SINGLES.get(clazz.getName());
} | 3.68 |
flink_SqlFunctionUtils_lpad | /**
* Returns the string str left-padded with the string pad to a length of len characters. If str
* is longer than len, the return value is shortened to len characters.
*/
public static String lpad(String base, int len, String pad) {
if (len < 0 || "".equals(pad)) {
return null;
} else if (len == 0) {
return "";
}
char[] data = new char[len];
char[] baseChars = base.toCharArray();
char[] padChars = pad.toCharArray();
// the length of the padding needed
int pos = Math.max(len - base.length(), 0);
// copy the padding
for (int i = 0; i < pos; i += pad.length()) {
for (int j = 0; j < pad.length() && j < pos - i; j++) {
data[i + j] = padChars[j];
}
}
// copy the base
int i = 0;
while (pos + i < len && i < base.length()) {
data[pos + i] = baseChars[i];
i += 1;
}
return new String(data);
} | 3.68 |
hbase_MasterProcedureScheduler_logLockedResource | // ============================================================================
// Table Locking Helpers
// ============================================================================
/**
* Get lock info for a resource of specified type and name and log details
*/
private void logLockedResource(LockedResourceType resourceType, String resourceName) {
if (!LOG.isDebugEnabled()) {
return;
}
LockedResource lockedResource = getLockResource(resourceType, resourceName);
if (lockedResource != null) {
String msg = resourceType.toString() + " '" + resourceName + "', shared lock count="
+ lockedResource.getSharedLockCount();
Procedure<?> proc = lockedResource.getExclusiveLockOwnerProcedure();
if (proc != null) {
msg += ", exclusively locked by procId=" + proc.getProcId();
}
LOG.debug(msg);
}
} | 3.68 |
framework_PropertyFormatter_getPropertyDataSource | /**
* Gets the current data source of the formatter, if any.
*
* @return the current data source as a Property, or <code>null</code> if
* none defined.
*/
@Override
public Property<T> getPropertyDataSource() {
return dataSource;
} | 3.68 |
hadoop_BaseSolver_toRecurringRDL | /**
* Translate the estimated {@link Resource} requirements of the pipeline to
* Hadoop's {@link ReservationSubmissionRequest}.
*
* @param containerSpec the {@link Resource} to be allocated to each
* container;
* @param containerRequests the predicted {@link Resource} to be allocated to
* the job in each discrete time intervals;
* @param config configuration file for BaseSolver.
* @return {@link ReservationSubmissionRequest} to be submitted to Hadoop to
* make recurring resource reservation for the pipeline.
*/
public final ReservationSubmissionRequest toRecurringRDL(
final Resource containerSpec,
final RLESparseResourceAllocation containerRequests,
final Configuration config) {
final int timeInterval =
config.getInt(ResourceEstimatorConfiguration.TIME_INTERVAL_KEY, 5);
long pipelineSubmissionTime = containerRequests.getEarliestStartTime();
long pipelineFinishTime = containerRequests.getLatestNonNullTime();
final long containerMemAlloc = containerSpec.getMemorySize();
final long jobLen =
(pipelineFinishTime - pipelineSubmissionTime) / timeInterval;
List<ReservationRequest> reservationRequestList = new ArrayList<>();
for (int i = 0; i < jobLen; i++) {
// container spec, # of containers, concurrency, duration
ReservationRequest reservationRequest = ReservationRequest
.newInstance(containerSpec, (int) (
containerRequests.getCapacityAtTime(i * timeInterval)
.getMemorySize() / containerMemAlloc), 1, timeInterval);
reservationRequestList.add(reservationRequest);
}
ReservationRequests reservationRequests = ReservationRequests
.newInstance(reservationRequestList,
ReservationRequestInterpreter.R_ALL);
ReservationDefinition reservationDefinition = ReservationDefinition
.newInstance(pipelineSubmissionTime, pipelineFinishTime,
reservationRequests, "LpSolver#toRecurringRDL");
ReservationId reservationId =
ReservationId.newInstance(RAND.nextLong(), RAND.nextLong());
ReservationSubmissionRequest reservationSubmissionRequest =
ReservationSubmissionRequest
.newInstance(reservationDefinition, "resourceestimator",
reservationId);
return reservationSubmissionRequest;
} | 3.68 |
framework_Upload_getBytesRead | /**
* Gets read bytes of the file currently being uploaded.
*
* @return bytes
*/
public long getBytesRead() {
return totalBytes;
} | 3.68 |
hbase_FileArchiverNotifierImpl_computeSnapshotSizes | /**
* Computes the size of each snapshot against the table referenced by {@code this}.
* @param snapshots A sorted list of snapshots against {@code tn}.
* @return A list of the size for each snapshot against {@code tn}.
*/
List<SnapshotWithSize> computeSnapshotSizes(List<String> snapshots) throws IOException {
final List<SnapshotWithSize> snapshotSizes = new ArrayList<>(snapshots.size());
final Path rootDir = CommonFSUtils.getRootDir(conf);
// Get the map of store file names to store file path for this table
final Set<String> tableReferencedStoreFiles;
try {
tableReferencedStoreFiles = FSUtils.getTableStoreFilePathMap(fs, rootDir).keySet();
} catch (InterruptedException e) {
Thread.currentThread().interrupt();
return null;
}
if (LOG.isTraceEnabled()) {
LOG.trace("Paths for " + tn + ": " + tableReferencedStoreFiles);
}
// For each snapshot on this table, get the files which the snapshot references which
// the table does not.
Set<String> snapshotReferencedFiles = new HashSet<>();
for (String snapshotName : snapshots) {
Path snapshotDir = SnapshotDescriptionUtils.getCompletedSnapshotDir(snapshotName, rootDir);
SnapshotDescription sd = SnapshotDescriptionUtils.readSnapshotInfo(fs, snapshotDir);
SnapshotManifest manifest = SnapshotManifest.open(conf, fs, snapshotDir, sd);
if (LOG.isTraceEnabled()) {
LOG.trace("Files referenced by other snapshots: " + snapshotReferencedFiles);
}
// Get the set of files from the manifest that this snapshot references which are not also
// referenced by the originating table.
Set<StoreFileReference> unreferencedStoreFileNames =
getStoreFilesFromSnapshot(manifest, (sfn) -> !tableReferencedStoreFiles.contains(sfn)
&& !snapshotReferencedFiles.contains(sfn));
if (LOG.isTraceEnabled()) {
LOG.trace("Snapshot " + snapshotName + " solely references the files: "
+ unreferencedStoreFileNames);
}
// Compute the size of the store files for this snapshot
long size = getSizeOfStoreFiles(tn, unreferencedStoreFileNames);
if (LOG.isTraceEnabled()) {
LOG.trace("Computed size of " + snapshotName + " to be " + size);
}
// Persist this snapshot's size into the map
snapshotSizes.add(new SnapshotWithSize(snapshotName, size));
// Make sure that we don't double-count the same file
for (StoreFileReference ref : unreferencedStoreFileNames) {
for (String fileNames : ref.getFamilyToFilesMapping().values()) {
snapshotReferencedFiles.add(fileNames);
}
}
}
return snapshotSizes;
} | 3.68 |
framework_MonthEventLabel_setCalendar | /**
* Set the Calendar instance this label belongs to.
*
* @param calendar
* The calendar instance
*/
public void setCalendar(VCalendar calendar) {
this.calendar = calendar;
} | 3.68 |
hadoop_PlacementConstraintTransformations_transform | /**
* This method performs the transformation of the
* {@link #placementConstraint}.
*
* @return the transformed placement constraint.
*/
public PlacementConstraint transform() {
AbstractConstraint constraintExpr =
placementConstraint.getConstraintExpr();
// Visit the constraint tree to perform the transformation.
constraintExpr = constraintExpr.accept(this);
return new PlacementConstraint(constraintExpr);
} | 3.68 |
MagicPlugin_CraftingController_isCraftingSlot | // Borrowed from InventoryView and pruned,
// TODO: Switch to InventoryView.getSlotType when dropping 1.9 compat
public final boolean isCraftingSlot(InventoryView view, int slot) {
if (slot >= 0 && slot < view.getTopInventory().getSize()) {
if (view.getType() == InventoryType.WORKBENCH || view.getType() == InventoryType.CRAFTING) {
return slot > 0;
}
}
return false;
} | 3.68 |
hudi_TimelineServerBasedWriteMarkers_getConfigMap | /**
* Gets parameter map for marker creation request.
*
* @param partitionPath Relative partition path.
* @param markerFileName Marker file name.
* @return parameter map.
*/
private Map<String, String> getConfigMap(
String partitionPath, String markerFileName, boolean initEarlyConflictDetectionConfigs) {
Map<String, String> paramsMap = new HashMap<>();
paramsMap.put(MARKER_DIR_PATH_PARAM, markerDirPath.toString());
if (StringUtils.isNullOrEmpty(partitionPath)) {
paramsMap.put(MARKER_NAME_PARAM, markerFileName);
} else {
paramsMap.put(MARKER_NAME_PARAM, partitionPath + "/" + markerFileName);
}
if (initEarlyConflictDetectionConfigs) {
paramsMap.put(MARKER_BASEPATH_PARAM, basePath);
}
return paramsMap;
} | 3.68 |
hbase_MergeTableRegionsProcedure_rollbackCloseRegionsForMerge | /**
* Rollback close regions
**/
private void rollbackCloseRegionsForMerge(MasterProcedureEnv env) throws IOException {
AssignmentManagerUtil.reopenRegionsForRollback(env, Arrays.asList(regionsToMerge),
getRegionReplication(env), getServerName(env));
} | 3.68 |
morf_OracleDialect_indexDeploymentStatements | /**
* @see org.alfasoftware.morf.jdbc.SqlDialect#indexDeploymentStatements(org.alfasoftware.morf.metadata.Table, org.alfasoftware.morf.metadata.Index)
*/
@Override
protected Collection<String> indexDeploymentStatements(Table table, Index index) {
StringBuilder createIndexStatement = new StringBuilder();
// Specify the preamble
createIndexStatement.append("CREATE ");
if (index.isUnique()) {
createIndexStatement.append("UNIQUE ");
}
// Name the index
createIndexStatement
.append("INDEX ")
.append(schemaNamePrefix())
.append(index.getName())
// Specify which table the index is over
.append(" ON ")
.append(schemaNamePrefix())
.append(truncatedTableName(table.getName()))
// Specify the fields that are used in the index
.append(" (")
.append(Joiner.on(", ").join(index.columnNames()))
.append(")");
return Collections.singletonList(createIndexStatement.toString());
} | 3.68 |
flink_HiveParserASTNode_getName | /*
* (non-Javadoc)
*
* @see org.apache.hadoop.hive.ql.lib.Node#getName()
*/
@Override
public String getName() {
return String.valueOf(super.getToken().getType());
} | 3.68 |
hbase_StoreHotnessProtector_logDisabledMessageOnce | /**
* {@link #init(Configuration)} is called for every Store that opens on a RegionServer. Here we
* make a lightweight attempt to log this message once per RegionServer, rather than per-Store.
* The goal is just to draw attention to this feature if debugging overload due to heavy writes.
*/
private static void logDisabledMessageOnce() {
if (!loggedDisableMessage) {
LOG.info(
"StoreHotnessProtector is disabled. Set {} > 0 to enable, "
+ "which may help mitigate load under heavy write pressure.",
PARALLEL_PUT_STORE_THREADS_LIMIT);
loggedDisableMessage = true;
}
} | 3.68 |
framework_VFilterSelect_getItemOffsetHeight | /*
* Gets the height of one menu item.
*/
int getItemOffsetHeight() {
List<MenuItem> items = getItems();
return items != null && !items.isEmpty()
? items.get(0).getOffsetHeight()
: 0;
} | 3.68 |
hudi_HoodieAvroIndexedRecord_writeRecordPayload | /**
* NOTE: This method is declared final to make sure there's no polymorphism and therefore
* JIT compiler could perform more aggressive optimizations
*/
@SuppressWarnings("unchecked")
@Override
protected final void writeRecordPayload(IndexedRecord payload, Kryo kryo, Output output) {
// NOTE: We're leveraging Spark's default [[GenericAvroSerializer]] to serialize Avro
Serializer<GenericRecord> avroSerializer = kryo.getSerializer(GenericRecord.class);
kryo.writeObjectOrNull(output, payload, avroSerializer);
} | 3.68 |
querydsl_Expressions_asNumber | /**
* Create a new NumberExpression
*
* @param value Number
* @return new NumberExpression
*/
public static <T extends Number & Comparable<?>> NumberExpression<T> asNumber(T value) {
return asNumber(constant(value));
} | 3.68 |
flink_PekkoRpcActor_envelopeSelfMessage | /**
* Hook to envelope self messages.
*
* @param message to envelope
* @return enveloped message
*/
protected Object envelopeSelfMessage(Object message) {
return message;
} | 3.68 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.