name
stringlengths 12
178
| code_snippet
stringlengths 8
36.5k
| score
float64 3.26
3.68
|
---|---|---|
hbase_Timer_updateMicros | /**
* Update the timer with the given duration in microseconds
* @param durationMicros the duration of the event in microseconds
*/
default void updateMicros(long durationMicros) {
update(durationMicros, TimeUnit.MICROSECONDS);
} | 3.68 |
hadoop_RegistryTypeUtils_retrieveAddressesUriType | /**
* Get a single URI endpoint
* @param epr endpoint
* @return the uri of the first entry in the address list. Null if the endpoint
* itself is null
* @throws InvalidRecordException if the type is wrong, there are no addresses
* or the payload ill-formatted
*/
public static List<String> retrieveAddressesUriType(Endpoint epr)
throws InvalidRecordException {
if (epr == null) {
return null;
}
requireAddressType(ADDRESS_URI, epr);
List<Map<String, String>> addresses = epr.addresses;
if (addresses.size() < 1) {
throw new InvalidRecordException(epr.toString(),
"No addresses in endpoint");
}
List<String> results = new ArrayList<String>(addresses.size());
for (Map<String, String> address : addresses) {
results.add(getAddressField(address, ADDRESS_URI));
}
return results;
} | 3.68 |
framework_GridRowDragger_getDraggedItems | /**
* Returns the currently dragged items captured from the source grid no drag
* start event, or {@code null} if no drag active.
*
* @return the currently dragged items or {@code null}
*/
protected List<T> getDraggedItems() {
return draggedItems;
} | 3.68 |
framework_FilesystemContainer_removeItem | /*
* (non-Javadoc)
*
* @see com.vaadin.data.Container#removeItem(java.lang.Object)
*/
@Override
public boolean removeItem(Object itemId)
throws UnsupportedOperationException {
throw new UnsupportedOperationException(
"File system container does not support this operation");
} | 3.68 |
flink_SegmentsUtil_copyToView | /**
* Copy bytes of segments to output view. Note: It just copies the data in, not include the
* length.
*
* @param segments source segments
* @param offset offset for segments
* @param sizeInBytes size in bytes
* @param target target output view
*/
public static void copyToView(
MemorySegment[] segments, int offset, int sizeInBytes, DataOutputView target)
throws IOException {
for (MemorySegment sourceSegment : segments) {
int curSegRemain = sourceSegment.size() - offset;
if (curSegRemain > 0) {
int copySize = Math.min(curSegRemain, sizeInBytes);
byte[] bytes = allocateReuseBytes(copySize);
sourceSegment.get(offset, bytes, 0, copySize);
target.write(bytes, 0, copySize);
sizeInBytes -= copySize;
offset = 0;
} else {
offset -= sourceSegment.size();
}
if (sizeInBytes == 0) {
return;
}
}
if (sizeInBytes != 0) {
throw new RuntimeException(
"No copy finished, this should be a bug, "
+ "The remaining length is: "
+ sizeInBytes);
}
} | 3.68 |
flink_BoundedData_createReader | /**
* Gets a reader for the bounded data. Multiple readers may be created. This call only succeeds
* once the write phase was finished via {@link #finishWrite()}.
*/
default BoundedData.Reader createReader() throws IOException {
return createReader(new NoOpResultSubpartitionView());
} | 3.68 |
hbase_HRegion_compactStores | /**
* This is a helper function that compact all the stores synchronously.
* <p>
* It is used by utilities and testing
*/
public void compactStores() throws IOException {
for (HStore s : stores.values()) {
Optional<CompactionContext> compaction = s.requestCompaction();
if (compaction.isPresent()) {
compact(compaction.get(), s, NoLimitThroughputController.INSTANCE, null);
}
}
} | 3.68 |
hudi_ClusteringOperator_readRecordsForGroupWithLogs | /**
* Read records from baseFiles, apply updates and convert to Iterator.
*/
@SuppressWarnings("unchecked")
private Iterator<RowData> readRecordsForGroupWithLogs(List<ClusteringOperation> clusteringOps, String instantTime) {
List<Iterator<RowData>> recordIterators = new ArrayList<>();
long maxMemoryPerCompaction = IOUtils.getMaxMemoryPerCompaction(new FlinkTaskContextSupplier(null), writeConfig);
LOG.info("MaxMemoryPerCompaction run as part of clustering => " + maxMemoryPerCompaction);
for (ClusteringOperation clusteringOp : clusteringOps) {
try {
Option<HoodieFileReader> baseFileReader = StringUtils.isNullOrEmpty(clusteringOp.getDataFilePath())
? Option.empty()
: Option.of(HoodieFileReaderFactory.getReaderFactory(table.getConfig().getRecordMerger().getRecordType()).getFileReader(table.getHadoopConf(), new Path(clusteringOp.getDataFilePath())));
HoodieMergedLogRecordScanner scanner = HoodieMergedLogRecordScanner.newBuilder()
.withFileSystem(table.getMetaClient().getFs())
.withBasePath(table.getMetaClient().getBasePath())
.withLogFilePaths(clusteringOp.getDeltaFilePaths())
.withReaderSchema(readerSchema)
.withLatestInstantTime(instantTime)
.withMaxMemorySizeInBytes(maxMemoryPerCompaction)
.withReadBlocksLazily(writeConfig.getCompactionLazyBlockReadEnabled())
.withReverseReader(writeConfig.getCompactionReverseLogReadEnabled())
.withBufferSize(writeConfig.getMaxDFSStreamBufferSize())
.withSpillableMapBasePath(writeConfig.getSpillableMapBasePath())
.withDiskMapType(writeConfig.getCommonConfig().getSpillableDiskMapType())
.withBitCaskDiskMapCompressionEnabled(writeConfig.getCommonConfig().isBitCaskDiskMapCompressionEnabled())
.withRecordMerger(writeConfig.getRecordMerger())
.build();
HoodieTableConfig tableConfig = table.getMetaClient().getTableConfig();
HoodieFileSliceReader<? extends IndexedRecord> hoodieFileSliceReader = new HoodieFileSliceReader(baseFileReader, scanner, readerSchema,
tableConfig.getPreCombineField(),writeConfig.getRecordMerger(),
tableConfig.getProps(),
tableConfig.populateMetaFields() ? Option.empty() : Option.of(Pair.of(tableConfig.getRecordKeyFieldProp(),
tableConfig.getPartitionFieldProp())));
recordIterators.add(StreamSupport.stream(Spliterators.spliteratorUnknownSize(hoodieFileSliceReader, Spliterator.NONNULL), false).map(hoodieRecord -> {
try {
return this.transform(hoodieRecord.toIndexedRecord(readerSchema, new Properties()).get().getData());
} catch (IOException e) {
throw new HoodieIOException("Failed to read next record", e);
}
}).iterator());
} catch (IOException e) {
throw new HoodieClusteringException("Error reading input data for " + clusteringOp.getDataFilePath()
+ " and " + clusteringOp.getDeltaFilePaths(), e);
}
}
return new ConcatenatingIterator<>(recordIterators);
} | 3.68 |
pulsar_OffloadIndexBlockImpl_toStream | /**
* Get the content of the index block as InputStream.
* Read out in format:
* | index_magic_header | index_block_len | data_object_len | data_header_len |
* | index_entry_count | segment_metadata_len | segment metadata | index entries... |
*/
@Override
public OffloadIndexBlock.IndexInputStream toStream() throws IOException {
int indexEntryCount = this.indexEntries.size();
byte[] ledgerMetadataByte = buildLedgerMetadataFormat(this.segmentMetadata);
int segmentMetadataLength = ledgerMetadataByte.length;
int indexBlockLength = 4 /* magic header */
+ 4 /* index block length */
+ 8 /* data object length */
+ 8 /* data header length */
+ 4 /* index entry count */
+ 4 /* segment metadata length */
+ segmentMetadataLength
+ indexEntryCount * (8 + 4 + 8); /* messageEntryId + blockPartId + blockOffset */
ByteBuf out = PulsarByteBufAllocator.DEFAULT.buffer(indexBlockLength, indexBlockLength);
out.writeInt(INDEX_MAGIC_WORD)
.writeInt(indexBlockLength)
.writeLong(dataObjectLength)
.writeLong(dataHeaderLength)
.writeInt(indexEntryCount)
.writeInt(segmentMetadataLength);
// write metadata
out.writeBytes(ledgerMetadataByte);
// write entries
this.indexEntries.entrySet().forEach(entry ->
out.writeLong(entry.getValue().getEntryId())
.writeInt(entry.getValue().getPartId())
.writeLong(entry.getValue().getOffset()));
return new OffloadIndexBlock.IndexInputStream(new ByteBufInputStream(out, true), indexBlockLength);
} | 3.68 |
framework_AbstractColorPicker_setHistoryVisibility | /**
* Sets the visibility of the Color History.
*
* @param visible
* The visibility
*/
public void setHistoryVisibility(boolean visible) {
historyVisible = visible;
if (window != null) {
window.setHistoryVisible(visible);
}
} | 3.68 |
hadoop_CustomTokenProviderAdapter_bind | /**
* Bind to the filesystem by passing the binding call on
* to any custom token provider adaptee which implements
* {@link BoundDTExtension}.
* No-op if they don't.
* @param fsURI URI of the filesystem.
* @param conf configuration of this extension.
* @throws IOException failure.
*/
@Override
public void bind(final URI fsURI,
final Configuration conf)
throws IOException {
ExtensionHelper.bind(adaptee, fsURI, conf);
} | 3.68 |
hbase_HBaseTestingUtility_predicateTableEnabled | /**
* Returns a {@link Predicate} for checking that table is enabled
*/
public Waiter.Predicate<IOException> predicateTableEnabled(final TableName tableName) {
return new ExplainingPredicate<IOException>() {
@Override
public String explainFailure() throws IOException {
return explainTableState(tableName, TableState.State.ENABLED);
}
@Override
public boolean evaluate() throws IOException {
return getAdmin().tableExists(tableName) && getAdmin().isTableEnabled(tableName);
}
};
} | 3.68 |
flink_MemorySegment_putBoolean | /**
* Writes one byte containing the byte value into this buffer at the given position.
*
* @param index The position at which the memory will be written.
* @param value The char value to be written.
* @throws IndexOutOfBoundsException Thrown, if the index is negative, or larger than the
* segment size minus 1.
*/
public void putBoolean(int index, boolean value) {
put(index, (byte) (value ? 1 : 0));
} | 3.68 |
hbase_EndpointObserver_preEndpointInvocation | /**
* Called before an Endpoint service method is invoked. The request message can be altered by
* returning a new instance. Throwing an exception will abort the invocation. Calling
* {@link org.apache.hadoop.hbase.coprocessor.ObserverContext#bypass()} has no effect in this
* hook.
* @param ctx the environment provided by the region server
* @param service the endpoint service
* @param request Request message expected by given {@code Service}'s method (by the name
* {@code methodName}).
* @param methodName the invoked service method
* @return the possibly modified message
*/
default Message preEndpointInvocation(ObserverContext<RegionCoprocessorEnvironment> ctx,
Service service, String methodName, Message request) throws IOException {
return request;
} | 3.68 |
dubbo_ProviderConfig_getExportBackground | /**
* @deprecated replace with {@link ModuleConfig#getBackground()}
* @see ModuleConfig#getBackground()
*/
@Deprecated
@Parameter(key = EXPORT_BACKGROUND_KEY, excluded = true)
public Boolean getExportBackground() {
return exportBackground;
} | 3.68 |
hbase_Connection_getTable | /**
* Retrieve a Table implementation for accessing a table. The returned Table is not thread safe, a
* new instance should be created for each using thread. This is a lightweight operation, pooling
* or caching of the returned Table is neither required nor desired.
* <p>
* The caller is responsible for calling {@link Table#close()} on the returned table instance.
* <p>
* Since 0.98.1 this method no longer checks table existence. An exception will be thrown if the
* table does not exist only when the first operation is attempted.
* @param tableName the name of the table
* @param pool The thread pool to use for batch operations, null to use a default pool.
* @return a Table to use for interactions with this table
*/
default Table getTable(TableName tableName, ExecutorService pool) throws IOException {
return getTableBuilder(tableName, pool).build();
} | 3.68 |
morf_SchemaBean_getTable | /**
* {@inheritDoc}
*
* @see org.alfasoftware.morf.metadata.Schema#getTable(java.lang.String)
*/
@Override
public Table getTable(String name) {
return tables.get(name.toUpperCase());
} | 3.68 |
hbase_YammerHistogramUtils_getHistogramReport | /** Returns a summary of {@code hist}. */
public static String getHistogramReport(final Histogram hist) {
Snapshot sn = hist.getSnapshot();
return "mean=" + DOUBLE_FORMAT.format(sn.getMean()) + ", min="
+ DOUBLE_FORMAT.format(sn.getMin()) + ", max=" + DOUBLE_FORMAT.format(sn.getMax())
+ ", stdDev=" + DOUBLE_FORMAT.format(sn.getStdDev()) + ", 50th="
+ DOUBLE_FORMAT.format(sn.getMedian()) + ", 75th="
+ DOUBLE_FORMAT.format(sn.get75thPercentile()) + ", 95th="
+ DOUBLE_FORMAT.format(sn.get95thPercentile()) + ", 99th="
+ DOUBLE_FORMAT.format(sn.get99thPercentile()) + ", 99.9th="
+ DOUBLE_FORMAT.format(sn.get999thPercentile()) + ", 99.99th="
+ DOUBLE_FORMAT.format(sn.getValue(0.9999)) + ", 99.999th="
+ DOUBLE_FORMAT.format(sn.getValue(0.99999));
} | 3.68 |
morf_SqlDialect_postInsertWithPresetAutonumStatements | /**
* Creates SQL to execute after bulk-inserting to a table.
*
* @param table The table that was populated.
* @param executor The executor to use
* @param connection The connection to use
* @param insertingUnderAutonumLimit Determines whether we are inserting under an auto-numbering limit.
*/
@SuppressWarnings("unused")
public void postInsertWithPresetAutonumStatements(Table table, SqlScriptExecutor executor, Connection connection, boolean insertingUnderAutonumLimit) {
} | 3.68 |
hadoop_RouterAuditLogger_createSuccessLog | /**
* A helper api for creating an audit log for a successful event.
*/
static String createSuccessLog(String user, String operation, String target,
ApplicationId appId, SubClusterId subClusterID) {
StringBuilder b =
createStringBuilderForSuccessEvent(user, operation, target);
if (appId != null) {
add(Keys.APPID, appId.toString(), b);
}
if (subClusterID != null) {
add(Keys.SUBCLUSTERID, subClusterID.toString(), b);
}
return b.toString();
} | 3.68 |
hbase_ReplicationStorageFactory_getReplicationQueueStorage | /**
* Create a new {@link ReplicationQueueStorage}.
*/
public static ReplicationQueueStorage getReplicationQueueStorage(Connection conn,
Configuration conf, TableName tableName) {
Class<? extends ReplicationQueueStorage> clazz = conf.getClass(REPLICATION_QUEUE_IMPL,
TableReplicationQueueStorage.class, ReplicationQueueStorage.class);
try {
Constructor<? extends ReplicationQueueStorage> c =
clazz.getConstructor(Connection.class, TableName.class);
return c.newInstance(conn, tableName);
} catch (Exception e) {
LOG.debug(
"failed to create ReplicationQueueStorage with Connection, try creating with Configuration",
e);
return ReflectionUtils.newInstance(clazz, conf, tableName);
}
} | 3.68 |
zxing_CalendarParsedResult_isEndAllDay | /**
* @return true if end time was specified as a whole day
*/
public boolean isEndAllDay() {
return endAllDay;
} | 3.68 |
flink_Schema_derived | /**
* Convenience method for stating explicitly that a schema is empty and should be fully derived
* by the framework.
*
* <p>The semantics are equivalent to calling {@code Schema.newBuilder().build()}.
*
* <p>Note that derivation depends on the context. Usually, the method that accepts a {@link
* Schema} instance will mention whether schema derivation is supported or not.
*/
public static Schema derived() {
return EMPTY;
} | 3.68 |
hadoop_MetricsCache_metrics | /**
* @deprecated use metricsEntrySet() instead
* @return entry set of metrics
*/
@Deprecated
public Set<Map.Entry<String, Number>> metrics() {
Map<String, Number> map = new LinkedHashMap<String, Number>(
metrics.size());
for (Map.Entry<String, AbstractMetric> mapEntry : metrics.entrySet()) {
map.put(mapEntry.getKey(), mapEntry.getValue().value());
}
return map.entrySet();
} | 3.68 |
morf_SqlServer_sqlDialect | /**
* @see org.alfasoftware.morf.jdbc.DatabaseType#sqlDialect(java.lang.String)
*/
@Override
public SqlDialect sqlDialect(String schemaName) {
return new SqlServerDialect(schemaName);
} | 3.68 |
flink_DispatcherGateway_triggerSavepointAndGetLocation | /**
* Triggers a savepoint with the given savepoint directory as a target, returning a future that
* completes with the savepoint location when it is complete.
*
* @param jobId the job id
* @param targetDirectory Target directory for the savepoint.
* @param formatType Binary format of the savepoint.
* @param savepointMode context of the savepoint operation
* @param timeout Timeout for the asynchronous operation
* @return Future which is completed once the operation is triggered successfully
*/
default CompletableFuture<String> triggerSavepointAndGetLocation(
JobID jobId,
String targetDirectory,
SavepointFormatType formatType,
TriggerSavepointMode savepointMode,
@RpcTimeout Time timeout) {
throw new UnsupportedOperationException();
} | 3.68 |
hbase_TraceUtil_endSpan | /**
* Finish the {@code span} when the given {@code future} is completed.
*/
private static void endSpan(CompletableFuture<?> future, Span span) {
FutureUtils.addListener(future, (resp, error) -> {
if (error != null) {
setError(span, error);
} else {
span.setStatus(StatusCode.OK);
}
span.end();
});
} | 3.68 |
framework_SharedUtil_containsDuplicates | /**
* Checks if the given array contains duplicates (according to
* {@link Object#equals(Object)}.
*
* @param values
* the array to check for duplicates
* @return <code>true</code> if the array contains duplicates,
* <code>false</code> otherwise
*/
public static boolean containsDuplicates(Object[] values) {
int uniqueCount = new HashSet<Object>(Arrays.asList(values)).size();
return uniqueCount != values.length;
} | 3.68 |
AreaShop_CommandsFeature_runEventCommands | /**
* Run command for a certain event.
* @param region Region to execute the events for
* @param event The event
* @param before The 'before' or 'after' commands
*/
public void runEventCommands(GeneralRegion region, GeneralRegion.RegionEvent event, boolean before) {
ConfigurationSection eventCommandProfileSection = region.getConfigurationSectionSetting("general.eventCommandProfile", "eventCommandProfiles");
if(eventCommandProfileSection == null) {
return;
}
List<String> commands = eventCommandProfileSection.getStringList(event.getValue() + "." + (before ? "before" : "after"));
if(commands == null || commands.isEmpty()) {
return;
}
region.runCommands(Bukkit.getConsoleSender(), commands);
} | 3.68 |
hadoop_LocalSASKeyGeneratorImpl_getContainerSASUri | /**
* Implementation to generate SAS Key for a container
*/
@Override
public URI getContainerSASUri(String accountName, String container)
throws SASKeyGenerationException {
LOG.debug("Retrieving Container SAS URI For {}@{}", container, accountName);
try {
CachedSASKeyEntry cacheKey = new CachedSASKeyEntry(accountName, container, "/");
URI cacheResult = cache.get(cacheKey);
if (cacheResult != null) {
return cacheResult;
}
CloudStorageAccount account =
getSASKeyBasedStorageAccountInstance(accountName);
CloudBlobClient client = account.createCloudBlobClient();
URI sasKey = client.getCredentials().transformUri(
client.getContainerReference(container).getUri());
cache.put(cacheKey, sasKey);
return sasKey;
} catch (StorageException stoEx) {
throw new SASKeyGenerationException("Encountered StorageException while"
+ " generating SAS Key for container " + container + " inside "
+ "storage account " + accountName, stoEx);
} catch (URISyntaxException uriSyntaxEx) {
throw new SASKeyGenerationException("Encountered URISyntaxException while"
+ " generating SAS Key for container " + container + " inside storage"
+ " account " + accountName, uriSyntaxEx);
}
} | 3.68 |
framework_DataProvider_ofCollection | /**
* Creates a new data provider backed by a collection.
* <p>
* The collection is used as-is. Changes in the collection will be visible
* via the created data provider. The caller should copy the collection if
* necessary.
*
* @param <T>
* the data item type
* @param items
* the collection of data, not <code>null</code>
* @return a new list data provider
*/
public static <T> ListDataProvider<T> ofCollection(Collection<T> items) {
return new ListDataProvider<>(items);
} | 3.68 |
framework_Tree_isExpanded | /**
* Check is an item is expanded.
*
* @param itemId
* the item id.
* @return true if the item is expanded.
*/
public boolean isExpanded(Object itemId) {
return expanded.contains(itemId);
} | 3.68 |
hadoop_AuthenticationHandlerUtil_getAuthenticationHandlerClassName | /**
* This method provides an instance of {@link AuthenticationHandler} based on
* specified <code>authHandlerName</code>.
*
* @param authHandler The short-name (or fully qualified class name) of the
* authentication handler.
* @return an instance of AuthenticationHandler implementation.
*/
public static String getAuthenticationHandlerClassName(String authHandler) {
if (authHandler == null) {
throw new NullPointerException();
}
String handlerName = authHandler.toLowerCase(Locale.ENGLISH);
String authHandlerClassName = null;
if (handlerName.equals(PseudoAuthenticationHandler.TYPE)) {
authHandlerClassName = PseudoAuthenticationHandler.class.getName();
} else if (handlerName.equals(KerberosAuthenticationHandler.TYPE)) {
authHandlerClassName = KerberosAuthenticationHandler.class.getName();
} else if (handlerName.equals(LdapAuthenticationHandler.TYPE)) {
authHandlerClassName = LdapAuthenticationHandler.class.getName();
} else if (handlerName.equals(MultiSchemeAuthenticationHandler.TYPE)) {
authHandlerClassName = MultiSchemeAuthenticationHandler.class.getName();
} else {
authHandlerClassName = authHandler;
}
return authHandlerClassName;
} | 3.68 |
hadoop_WeightedPolicyInfo_getAMRMPolicyWeights | /**
* Getter for AMRMProxy weights.
*
* @return the AMRMProxy weights.
*/
public Map<SubClusterIdInfo, Float> getAMRMPolicyWeights() {
return amrmPolicyWeights;
} | 3.68 |
framework_Table_addColumnCollapseListener | /**
* Adds a column collapse listener to the Table. A column collapse listener
* is called when the collapsed state of a column changes.
*
* @since 7.6
*
* @param listener
* The listener to attach
*/
public void addColumnCollapseListener(ColumnCollapseListener listener) {
addListener(TableConstants.COLUMN_COLLAPSE_EVENT_ID,
ColumnCollapseEvent.class, listener,
ColumnCollapseEvent.METHOD);
} | 3.68 |
dubbo_LoadingStrategy_onlyExtensionClassLoaderPackages | /**
* To restrict some class that should load from Dubbo's ClassLoader.
* For example, we can restrict the class declaration in `org.apache.dubbo` package should
* be loaded from Dubbo's ClassLoader and users cannot declare these classes.
*
* @return class packages should load
* @since 3.0.4
*/
default String[] onlyExtensionClassLoaderPackages() {
return new String[] {};
} | 3.68 |
hudi_DateTimeUtils_instantToMicros | /**
* Converts provided {@link Instant} to microseconds (from epoch)
*/
public static long instantToMicros(Instant instant) {
long seconds = instant.getEpochSecond();
int nanos = instant.getNano();
if (seconds < 0 && nanos > 0) {
long micros = Math.multiplyExact(seconds + 1, 1_000_000L);
long adjustment = (nanos / 1_000L) - 1_000_000;
return Math.addExact(micros, adjustment);
} else {
long micros = Math.multiplyExact(seconds, 1_000_000L);
return Math.addExact(micros, nanos / 1_000L);
}
} | 3.68 |
hudi_AbstractStreamWriteFunction_isConfirming | // -------------------------------------------------------------------------
// Getter/Setter
// -------------------------------------------------------------------------
@VisibleForTesting
public boolean isConfirming() {
return this.confirming;
} | 3.68 |
flink_ExecutionConfig_setParallelism | /**
* Sets the parallelism for operations executed through this environment. Setting a parallelism
* of x here will cause all operators (such as join, map, reduce) to run with x parallel
* instances.
*
* <p>This method overrides the default parallelism for this environment. The local execution
* environment uses by default a value equal to the number of hardware contexts (CPU cores /
* threads). When executing the program via the command line client from a JAR file, the default
* parallelism is the one configured for that setup.
*
* @param parallelism The parallelism to use
*/
public ExecutionConfig setParallelism(int parallelism) {
if (parallelism != PARALLELISM_UNKNOWN) {
if (parallelism < 1 && parallelism != PARALLELISM_DEFAULT) {
throw new IllegalArgumentException(
"Parallelism must be at least one, or ExecutionConfig.PARALLELISM_DEFAULT (use system default).");
}
configuration.set(CoreOptions.DEFAULT_PARALLELISM, parallelism);
}
return this;
} | 3.68 |
hmily_PropertyName_chop | /**
* Return a new {@link PropertyName} by chopping this name to the given
* {@code size}. For example, {@code chop(1)} on the name {@code foo.bar} will return
* {@code foo}.
*
* @param size the size to chop
* @return the chopped name
*/
public PropertyName chop(final int size) {
if (size >= getElementSize()) {
return this;
}
String[] elements = new String[size];
System.arraycopy(this.elements, 0, elements, 0, size);
return new PropertyName(elements);
} | 3.68 |
morf_SelectStatementBuilder_getHints | /**
* @return all hints in the order they were declared.
*/
List<Hint> getHints() {
return hints;
} | 3.68 |
pulsar_ServiceURI_create | /**
* Create a service uri instance from a {@link URI} instance.
*
* @param uri {@link URI} instance
* @return a service uri instance
* @throws NullPointerException if {@code uriStr} is null
* @throws IllegalArgumentException if the given string violates RFC 2396
*/
public static ServiceURI create(URI uri) {
requireNonNull(uri, "service uri instance is null");
String serviceName;
final String[] serviceInfos;
String scheme = uri.getScheme();
if (null != scheme) {
scheme = scheme.toLowerCase();
final String serviceSep = "+";
String[] schemeParts = StringUtils.split(scheme, serviceSep);
serviceName = schemeParts[0];
serviceInfos = new String[schemeParts.length - 1];
System.arraycopy(schemeParts, 1, serviceInfos, 0, serviceInfos.length);
} else {
serviceName = null;
serviceInfos = new String[0];
}
String userAndHostInformation = uri.getAuthority();
checkArgument(!Strings.isNullOrEmpty(userAndHostInformation),
"authority component is missing in service uri : " + uri);
String serviceUser;
List<String> serviceHosts;
int atIndex = userAndHostInformation.indexOf('@');
Splitter splitter = Splitter.on(CharMatcher.anyOf(",;"));
if (atIndex > 0) {
serviceUser = userAndHostInformation.substring(0, atIndex);
serviceHosts = splitter.splitToList(userAndHostInformation.substring(atIndex + 1));
} else {
serviceUser = null;
serviceHosts = splitter.splitToList(userAndHostInformation);
}
serviceHosts = serviceHosts
.stream()
.map(host -> validateHostName(serviceName, serviceInfos, host))
.collect(Collectors.toList());
String servicePath = uri.getPath();
checkArgument(null != servicePath,
"service path component is missing in service uri : " + uri);
return new ServiceURI(
serviceName,
serviceInfos,
serviceUser,
serviceHosts.toArray(new String[serviceHosts.size()]),
servicePath,
uri);
} | 3.68 |
graphhopper_VectorTile_clearFeatures | /**
* <pre>
* The actual features in this tile.
* </pre>
*
* <code>repeated .vector_tile.Tile.Feature features = 2;</code>
*/
public Builder clearFeatures() {
if (featuresBuilder_ == null) {
features_ = java.util.Collections.emptyList();
bitField0_ = (bitField0_ & ~0x00000004);
onChanged();
} else {
featuresBuilder_.clear();
}
return this;
} | 3.68 |
flink_Tuple10_setFields | /**
* Sets new values to all fields of the tuple.
*
* @param f0 The value for field 0
* @param f1 The value for field 1
* @param f2 The value for field 2
* @param f3 The value for field 3
* @param f4 The value for field 4
* @param f5 The value for field 5
* @param f6 The value for field 6
* @param f7 The value for field 7
* @param f8 The value for field 8
* @param f9 The value for field 9
*/
public void setFields(T0 f0, T1 f1, T2 f2, T3 f3, T4 f4, T5 f5, T6 f6, T7 f7, T8 f8, T9 f9) {
this.f0 = f0;
this.f1 = f1;
this.f2 = f2;
this.f3 = f3;
this.f4 = f4;
this.f5 = f5;
this.f6 = f6;
this.f7 = f7;
this.f8 = f8;
this.f9 = f9;
} | 3.68 |
morf_AbstractSqlDialectTest_expectedSelectSumWithExpression | /**
* @return the decimal representation of a literal for testing
*/
protected String expectedSelectSumWithExpression() {
return "SELECT SUM(intField * 2 / 3) FROM " + tableName(TEST_TABLE);
} | 3.68 |
hadoop_SlowPeerTracker_addReport | /**
* Add a new report. DatanodeIds can be the DataNodeIds or addresses
* We don't care as long as the caller is consistent.
*
* @param slowNode DataNodeId of the peer suspected to be slow.
* @param reportingNode DataNodeId of the node reporting on its peer.
* @param slowNodeMetrics Aggregate latency metrics of slownode as reported by the
* reporting node.
*/
public void addReport(String slowNode, String reportingNode, OutlierMetrics slowNodeMetrics) {
ConcurrentMap<String, LatencyWithLastReportTime> nodeEntries = allReports.get(slowNode);
if (nodeEntries == null) {
// putIfAbsent guards against multiple writers.
allReports.putIfAbsent(slowNode, new ConcurrentHashMap<>());
nodeEntries = allReports.get(slowNode);
}
// Replace the existing entry from this node, if any.
nodeEntries.put(reportingNode,
new LatencyWithLastReportTime(timer.monotonicNow(), slowNodeMetrics));
} | 3.68 |
hadoop_OBSWriteOperationHelper_completeMultipartUpload | /**
* Complete a multipart upload operation.
*
* @param destKey Object key
* @param uploadId multipart operation Id
* @param partETags list of partial uploads
* @return the result
* @throws ObsException on problems.
*/
CompleteMultipartUploadResult completeMultipartUpload(
final String destKey, final String uploadId,
final List<PartEtag> partETags)
throws ObsException {
Preconditions.checkNotNull(uploadId);
Preconditions.checkNotNull(partETags);
Preconditions.checkArgument(!partETags.isEmpty(),
"No partitions have been uploaded");
LOG.debug("Completing multipart upload {} with {} parts", uploadId,
partETags.size());
// a copy of the list is required, so that the OBS SDK doesn't
// attempt to sort an unmodifiable list.
return obs.completeMultipartUpload(
new CompleteMultipartUploadRequest(bucket, destKey, uploadId,
new ArrayList<>(partETags)));
} | 3.68 |
hbase_ProcedureMember_submitSubprocedure | /**
* Submit an subprocedure for execution. This starts the local acquire phase.
* @param subproc the subprocedure to execute.
* @return <tt>true</tt> if the subprocedure was started correctly, <tt>false</tt> if it could not
* be started. In the latter case, the subprocedure holds a reference to the exception
* that caused the failure.
*/
@SuppressWarnings("FutureReturnValueIgnored")
public boolean submitSubprocedure(Subprocedure subproc) {
// if the submitted subprocedure was null, bail.
if (subproc == null) {
LOG.warn("Submitted null subprocedure, nothing to run here.");
return false;
}
String procName = subproc.getName();
if (procName == null || procName.length() == 0) {
LOG.error("Subproc name cannot be null or the empty string");
return false;
}
// make sure we aren't already running an subprocedure of that name
Subprocedure rsub = subprocs.get(procName);
if (rsub != null) {
if (!rsub.isComplete()) {
LOG.error("Subproc '" + procName + "' is already running. Bailing out");
return false;
}
LOG.warn("A completed old subproc " + procName + " is still present, removing");
if (!subprocs.remove(procName, rsub)) {
LOG.error("Another thread has replaced existing subproc '" + procName + "'. Bailing out");
return false;
}
}
LOG.debug("Submitting new Subprocedure:" + procName);
// kick off the subprocedure
try {
if (subprocs.putIfAbsent(procName, subproc) == null) {
this.pool.submit(subproc);
return true;
} else {
LOG.error("Another thread has submitted subproc '" + procName + "'. Bailing out");
return false;
}
} catch (RejectedExecutionException e) {
subprocs.remove(procName, subproc);
// the thread pool is full and we can't run the subprocedure
String msg = "Subprocedure pool is full!";
subproc.cancel(msg, e.getCause());
}
LOG.error("Failed to start subprocedure '" + procName + "'");
return false;
} | 3.68 |
hbase_FileChangeWatcher_compareAndSetState | /**
* Atomically sets the state to <code>update</code> if and only if the state is currently one of
* <code>expectedStates</code>.
* @param expectedStates the expected states.
* @param update the new state.
* @return true if the update succeeds, or false if the current state does not equal any of the
* <code>expectedStates</code>.
*/
private synchronized boolean compareAndSetState(State[] expectedStates, State update) {
for (State expected : expectedStates) {
if (state == expected) {
setState(update);
return true;
}
}
return false;
} | 3.68 |
morf_FieldReference_getDirection | /**
* Gets the sort direction of the field.
*
* @return the direction
*/
public Direction getDirection() {
return direction;
} | 3.68 |
zxing_MatrixUtil_embedDataBits | // Embed "dataBits" using "getMaskPattern". On success, modify the matrix and return true.
// For debugging purposes, it skips masking process if "getMaskPattern" is -1.
// See 8.7 of JISX0510:2004 (p.38) for how to embed data bits.
static void embedDataBits(BitArray dataBits, int maskPattern, ByteMatrix matrix)
throws WriterException {
int bitIndex = 0;
int direction = -1;
// Start from the right bottom cell.
int x = matrix.getWidth() - 1;
int y = matrix.getHeight() - 1;
while (x > 0) {
// Skip the vertical timing pattern.
if (x == 6) {
x -= 1;
}
while (y >= 0 && y < matrix.getHeight()) {
for (int i = 0; i < 2; ++i) {
int xx = x - i;
// Skip the cell if it's not empty.
if (!isEmpty(matrix.get(xx, y))) {
continue;
}
boolean bit;
if (bitIndex < dataBits.getSize()) {
bit = dataBits.get(bitIndex);
++bitIndex;
} else {
// Padding bit. If there is no bit left, we'll fill the left cells with 0, as described
// in 8.4.9 of JISX0510:2004 (p. 24).
bit = false;
}
// Skip masking if mask_pattern is -1.
if (maskPattern != -1 && MaskUtil.getDataMaskBit(maskPattern, xx, y)) {
bit = !bit;
}
matrix.set(xx, y, bit);
}
y += direction;
}
direction = -direction; // Reverse the direction.
y += direction;
x -= 2; // Move to the left.
}
// All bits should be consumed.
if (bitIndex != dataBits.getSize()) {
throw new WriterException("Not all bits consumed: " + bitIndex + '/' + dataBits.getSize());
}
} | 3.68 |
querydsl_SQLExpressions_percentileCont | /**
* Calculates a percentile based on a continuous distribution of the column value
*
* @param arg argument
* @return percentile_cont(arg)
*/
public static <T extends Number> WithinGroup<T> percentileCont(Expression<T> arg) {
return new WithinGroup<T>(arg.getType(), SQLOps.PERCENTILECONT, arg);
} | 3.68 |
dubbo_Proxy_newInstance | /**
* get instance with special handler.
*
* @return instance.
*/
public Object newInstance(InvocationHandler handler) {
Constructor<?> constructor;
try {
constructor = classToCreate.getDeclaredConstructor(InvocationHandler.class);
return constructor.newInstance(handler);
} catch (ReflectiveOperationException e) {
throw new RuntimeException(e);
}
} | 3.68 |
hbase_HRegion_computeHDFSBlocksDistribution | /**
* This is a helper function to compute HDFS block distribution on demand
* @param conf configuration
* @param tableDescriptor TableDescriptor of the table
* @param regionInfo encoded name of the region
* @param tablePath the table directory
* @return The HDFS blocks distribution for the given region.
*/
public static HDFSBlocksDistribution computeHDFSBlocksDistribution(Configuration conf,
TableDescriptor tableDescriptor, RegionInfo regionInfo, Path tablePath) throws IOException {
HDFSBlocksDistribution hdfsBlocksDistribution = new HDFSBlocksDistribution();
FileSystem fs = tablePath.getFileSystem(conf);
HRegionFileSystem regionFs = new HRegionFileSystem(conf, fs, tablePath, regionInfo);
for (ColumnFamilyDescriptor family : tableDescriptor.getColumnFamilies()) {
List<LocatedFileStatus> locatedFileStatusList =
HRegionFileSystem.getStoreFilesLocatedStatus(regionFs, family.getNameAsString(), true);
if (locatedFileStatusList == null) {
continue;
}
for (LocatedFileStatus status : locatedFileStatusList) {
Path p = status.getPath();
if (StoreFileInfo.isReference(p) || HFileLink.isHFileLink(p)) {
// Only construct StoreFileInfo object if its not a hfile, save obj
// creation
StoreFileInfo storeFileInfo = new StoreFileInfo(conf, fs, status);
hdfsBlocksDistribution.add(storeFileInfo.computeHDFSBlocksDistribution(fs));
} else if (StoreFileInfo.isHFile(p)) {
// If its a HFile, then lets just add to the block distribution
// lets not create more objects here, not even another HDFSBlocksDistribution
FSUtils.addToHDFSBlocksDistribution(hdfsBlocksDistribution, status.getBlockLocations());
} else {
throw new IOException("path=" + p + " doesn't look like a valid StoreFile");
}
}
}
return hdfsBlocksDistribution;
} | 3.68 |
hadoop_NvidiaGPUPluginForRuntimeV2_getTopologyInfo | // Get the topology metrics info from nvdia-smi
public String getTopologyInfo() throws IOException {
return Shell.execCommand(environment,
new String[]{pathOfGpuBinary, "topo",
"-m"}, MAX_EXEC_TIMEOUT_MS);
} | 3.68 |
hbase_DataBlockEncoding_isCorrectEncoder | /**
* Check if given encoder has this id.
* @param encoder encoder which id will be checked
* @param encoderId id which we except
* @return true if id is right for given encoder, false otherwise
* @exception IllegalArgumentException thrown when there is no matching data block encoder
*/
public static boolean isCorrectEncoder(DataBlockEncoder encoder, short encoderId) {
DataBlockEncoding algorithm = getEncodingById(encoderId);
String encoderCls = encoder.getClass().getName();
return encoderCls.equals(algorithm.encoderCls);
} | 3.68 |
AreaShop_GeneralRegion_resetRegionFlags | /**
* Reset all flags of the region.
*/
public void resetRegionFlags() {
ProtectedRegion region = getRegion();
if(region != null) {
region.setFlag(plugin.getWorldGuardHandler().fuzzyMatchFlag("greeting"), null);
region.setFlag(plugin.getWorldGuardHandler().fuzzyMatchFlag("farewell"), null);
}
} | 3.68 |
hudi_HoodieWriteCommitKafkaCallback_createProducer | /**
* Method helps to create {@link KafkaProducer}. Here we set acks = all and retries = 3 by default to ensure no data
* loss.
*
* @param hoodieConfig Kafka configs
* @return A {@link KafkaProducer}
*/
public KafkaProducer<String, String> createProducer(HoodieConfig hoodieConfig) {
Properties kafkaProducerProps = new Properties();
// bootstrap.servers
kafkaProducerProps.setProperty(ProducerConfig.BOOTSTRAP_SERVERS_CONFIG, bootstrapServers);
// default "all" to ensure no message loss
kafkaProducerProps.setProperty(ProducerConfig.ACKS_CONFIG, hoodieConfig
.getStringOrDefault(ACKS));
// retries 3 times by default
kafkaProducerProps.setProperty(ProducerConfig.RETRIES_CONFIG, hoodieConfig
.getStringOrDefault(RETRIES));
kafkaProducerProps.setProperty(ProducerConfig.KEY_SERIALIZER_CLASS_CONFIG,
"org.apache.kafka.common.serialization.StringSerializer");
kafkaProducerProps.setProperty(ProducerConfig.VALUE_SERIALIZER_CLASS_CONFIG,
"org.apache.kafka.common.serialization.StringSerializer");
LOG.debug("Callback kafka producer init with configs: "
+ HoodieWriteCommitCallbackUtil.convertToJsonString(kafkaProducerProps));
return new KafkaProducer<String, String>(kafkaProducerProps);
} | 3.68 |
hbase_SnapshotManager_cleanupCompletedSnapshotInMap | /**
* Remove the procedures that are marked as finished
*/
private synchronized void cleanupCompletedSnapshotInMap() {
ProcedureExecutor<MasterProcedureEnv> procExec = master.getMasterProcedureExecutor();
Iterator<Map.Entry<SnapshotDescription, Long>> it = snapshotToProcIdMap.entrySet().iterator();
while (it.hasNext()) {
Map.Entry<SnapshotDescription, Long> entry = it.next();
Long procId = entry.getValue();
if (procExec.isRunning() && procExec.isFinished(procId)) {
it.remove();
}
}
} | 3.68 |
hadoop_FSDataOutputStreamBuilder_blockSize | /**
* Set block size.
*
* @param blkSize block size.
* @return B Generics Type.
*/
public B blockSize(long blkSize) {
blockSize = blkSize;
return getThisBuilder();
} | 3.68 |
hadoop_DataNodeFaultInjector_delay | /**
* Just delay a while.
*/
public void delay() {} | 3.68 |
flink_StringUtils_writeNullableString | /**
* Writes a String to the given output. The string may be null. The written string can be read
* with {@link #readNullableString(DataInputView)}-
*
* @param str The string to write, or null.
* @param out The output to write to.
* @throws IOException Thrown, if the writing or the serialization fails.
*/
public static void writeNullableString(@Nullable String str, DataOutputView out)
throws IOException {
if (str != null) {
out.writeBoolean(true);
writeString(str, out);
} else {
out.writeBoolean(false);
}
} | 3.68 |
morf_Function_daysBetween | /**
* The number of days between two dates including one bound, but excluding the other;
* so {@code daysBetween(2012-12-20, 2012-12-21)} is 1.
*
* @param fromDate Lower bound.
* @param toDate Upper bound.
* @return function An instance of the "days between" function.
*/
public static AliasedField daysBetween(AliasedField fromDate, AliasedField toDate) {
return new Function(FunctionType.DAYS_BETWEEN, toDate, fromDate);
} | 3.68 |
framework_AbstractDateField_setRangeStart | /**
* Sets the start range for this component. If the value is set before this
* date (taking the resolution into account), the component will not
* validate. If {@code startDate} is set to {@code null}, any value before
* {@code endDate} will be accepted by the range
* <p>
* Note: Negative, i.e. BC dates are not supported.
* <p>
* Note: It's usually recommended to use only one of the following at the
* same time: Range validator with Binder or DateField's setRangeStart
* check.
*
* @param startDate
* - the allowed range's start date
*/
public void setRangeStart(T startDate) {
if (afterDate(startDate, convertFromDateString(getState().rangeEnd))) {
throw new IllegalStateException(
"startDate cannot be later than endDate");
}
getState().rangeStart = convertToDateString(startDate);
} | 3.68 |
open-banking-gateway_ServiceContextProviderForFintech_fintechFacingSecretKeyBasedEncryption | /**
* To be consumed by {@link de.adorsys.opba.protocol.facade.services.AuthSessionHandler} if new auth session started.
*/
private <REQUEST extends FacadeServiceableGetter> RequestScoped fintechFacingSecretKeyBasedEncryption(
REQUEST request,
ServiceSession session,
Long bankProtocolId
) {
BankProfile profile = session.getBankProfile();
// FinTech requests should be signed, so creating Fintech entity if it does not exist.
Fintech fintech = authenticator.authenticateOrCreateFintech(request.getFacadeServiceable(), session);
return provider.registerForFintechSession(
fintech,
profile,
session,
bankProtocolId,
consentAuthorizationEncryptionServiceProvider,
consentAuthorizationEncryptionServiceProvider.generateKey(),
() -> request.getFacadeServiceable().getSessionPassword().toCharArray()
);
} | 3.68 |
pulsar_PackagesStorageProvider_newProvider | /**
* Construct a provider from the provided class.
*
* @param providerClassName the provider class name
* @return an instance of package storage provider
* @throws IOException
*/
static PackagesStorageProvider newProvider(String providerClassName) throws IOException {
Class<?> providerClass;
try {
providerClass = Class.forName(providerClassName);
Object obj = providerClass.getDeclaredConstructor().newInstance();
checkArgument(obj instanceof PackagesStorageProvider,
"The package storage provider has to be an instance of " + PackagesStorageProvider.class.getName());
return (PackagesStorageProvider) obj;
} catch (Exception e) {
throw new IOException(e);
}
} | 3.68 |
AreaShop_TeleportFeature_isSafe | /**
* Checks if a certain location is safe to teleport to.
* @param location The location to check
* @return true if it is safe, otherwise false
*/
private boolean isSafe(Location location) {
Block feet = location.getBlock();
Block head = feet.getRelative(BlockFace.UP);
Block below = feet.getRelative(BlockFace.DOWN);
Block above = head.getRelative(BlockFace.UP);
// Check the block at the feet and head of the player
if((feet.getType().isSolid() && !canSpawnIn(feet.getType())) || feet.isLiquid()) {
return false;
} else if((head.getType().isSolid() && !canSpawnIn(head.getType())) || head.isLiquid()) {
return false;
} else if(!below.getType().isSolid() || cannotSpawnOn(below.getType()) || below.isLiquid()) {
return false;
} else if(above.isLiquid() || cannotSpawnBeside(above.getType())) {
return false;
}
// Get all blocks around the player (below foot level, foot level, head level and above head level)
Set<Material> around = new HashSet<>();
for(int y = 0; y <= 3; y++) {
for(int x = -1; x <= 1; x++) {
for(int z = -1; z <= 1; z++) {
// Skip blocks in the column of the player
if(x == 0 && z == 0) {
continue;
}
around.add(below.getRelative(x, y, z).getType());
}
}
}
// Check the blocks around the player
for(Material material : around) {
if(cannotSpawnBeside(material)) {
return false;
}
}
return true;
} | 3.68 |
framework_TwinColSelectElement_selectByText | /**
* Selects the option with the given option text, i.e. adds it to the right
* side column.
*
* @param text
* the text of the option to select
*/
public void selectByText(String text) {
if (isReadOnly()) {
throw new ReadOnlyException();
}
options.deselectAll();
options.selectByVisibleText(text);
selButton.click();
} | 3.68 |
hadoop_BlockStorageMovementNeeded_isDirWorkDone | /**
* Return true if all the pending work is done and directory fully
* scanned, otherwise false.
*/
public synchronized boolean isDirWorkDone() {
return (pendingWorkCount <= 0 && fullyScanned);
} | 3.68 |
hbase_ReplicationSource_getWalEntryFilter | /**
* Call after {@link #initializeWALEntryFilter(UUID)} else it will be null.
* @return WAL Entry Filter Chain to use on WAL files filtering *out* WALEntry edits.
*/
WALEntryFilter getWalEntryFilter() {
return walEntryFilter;
} | 3.68 |
flink_TimestampExtractor_toProperties | /**
* This method is a default implementation that uses java serialization and it is discouraged.
* All implementation should provide a more specific set of properties.
*/
@Override
public Map<String, String> toProperties() {
Map<String, String> properties = new HashMap<>();
properties.put(
Rowtime.ROWTIME_TIMESTAMPS_TYPE, Rowtime.ROWTIME_TIMESTAMPS_TYPE_VALUE_CUSTOM);
properties.put(Rowtime.ROWTIME_TIMESTAMPS_CLASS, this.getClass().getName());
properties.put(
Rowtime.ROWTIME_TIMESTAMPS_SERIALIZED, EncodingUtils.encodeObjectToString(this));
return properties;
} | 3.68 |
flink_AbstractCatalogStore_open | /** Opens the catalog store. */
@Override
public void open() {
isOpen = true;
} | 3.68 |
hadoop_Server_destroyServices | /**
* Destroys the server services.
*/
protected void destroyServices() {
List<Service> list = new ArrayList<Service>(services.values());
Collections.reverse(list);
for (Service service : list) {
try {
log.debug("Destroying service [{}]", service.getInterface());
service.destroy();
} catch (Throwable ex) {
log.error("Could not destroy service [{}], {}",
new Object[]{service.getInterface(), ex.getMessage(), ex});
}
}
log.info("Services destroyed");
} | 3.68 |
dubbo_RestRPCInvocationUtil_getRestMethodMetadataAndInvokerPair | /**
* get InvokerAndRestMethodMetadataPair from rpc context
*
* @param request
* @return
*/
public static InvokerAndRestMethodMetadataPair getRestMethodMetadataAndInvokerPair(RequestFacade request) {
PathMatcher pathMather = createPathMatcher(request);
return getRestMethodMetadataAndInvokerPair(pathMather, request.getServiceDeployer());
} | 3.68 |
graphhopper_AbstractAccessParser_isBarrier | /**
* @return true if the given OSM node blocks access for this vehicle, false otherwise
*/
public boolean isBarrier(ReaderNode node) {
// note that this method will be only called for certain nodes as defined by OSMReader!
String firstValue = node.getFirstPriorityTag(restrictions);
if (restrictedValues.contains(firstValue) || node.hasTag("locked", "yes"))
return true;
else if (intendedValues.contains(firstValue))
return false;
else if (node.hasTag("barrier", barriers))
return true;
else
return blockFords && node.hasTag("ford", "yes");
} | 3.68 |
flink_Rowtime_timestampsFromExtractor | /**
* Sets a custom timestamp extractor to be used for the rowtime attribute.
*
* @param extractor The {@link TimestampExtractor} to extract the rowtime attribute from the
* physical type.
*/
public Rowtime timestampsFromExtractor(TimestampExtractor extractor) {
internalProperties.putProperties(extractor.toProperties());
return this;
} | 3.68 |
hbase_ByteBufferOutputStream_writeInt | /**
* Writes an <code>int</code> to the underlying output stream as four bytes, high byte first.
* @param i the <code>int</code> to write
* @throws IOException if an I/O error occurs.
*/
@Override
public void writeInt(int i) throws IOException {
checkSizeAndGrow(Bytes.SIZEOF_INT);
ByteBufferUtils.putInt(this.curBuf, i);
} | 3.68 |
hbase_MetricsStochasticBalancer_updateStochasticCost | /**
* Reports stochastic load balancer costs to JMX
*/
public void updateStochasticCost(String tableName, String costFunctionName,
String costFunctionDesc, Double value) {
stochasticSource.updateStochasticCost(tableName, costFunctionName, costFunctionDesc, value);
} | 3.68 |
hbase_FastPathRpcHandler_loadCallRunner | /**
* @param cr Task gotten via fastpath.
* @return True if we successfully loaded our task
*/
boolean loadCallRunner(final CallRunner cr) {
this.loadedCallRunner = cr;
this.semaphore.release();
return true;
} | 3.68 |
hbase_MetricsSource_updateTableLevelMetrics | /**
* Update the table level replication metrics per table
* @param walEntries List of pairs of WAL entry and it's size
*/
public void updateTableLevelMetrics(List<Pair<Entry, Long>> walEntries) {
for (Pair<Entry, Long> walEntryWithSize : walEntries) {
Entry entry = walEntryWithSize.getFirst();
long entrySize = walEntryWithSize.getSecond();
String tableName = entry.getKey().getTableName().getNameAsString();
long writeTime = entry.getKey().getWriteTime();
long age = EnvironmentEdgeManager.currentTime() - writeTime;
// get the replication metrics source for table at the run time
MetricsReplicationTableSource tableSource = this.getSingleSourceSourceByTable()
.computeIfAbsent(tableName, t -> CompatibilitySingletonFactory
.getInstance(MetricsReplicationSourceFactory.class).getTableSource(t));
tableSource.setLastShippedAge(age);
tableSource.incrShippedBytes(entrySize);
}
} | 3.68 |
hbase_TableMapReduceUtil_initTableSnapshotMapperJob | /**
* Sets up the job for reading from a table snapshot. It bypasses hbase servers and read directly
* from snapshot files.
* @param snapshotName The name of the snapshot (of a table) to read from.
* @param scan The scan instance with the columns, time range etc.
* @param mapper The mapper class to use.
* @param outputKeyClass The class of the output key.
* @param outputValueClass The class of the output value.
* @param job The current job to adjust. Make sure the passed job is carrying all
* necessary HBase configuration.
* @param addDependencyJars upload HBase jars and jars for any of the configured job classes via
* the distributed cache (tmpjars).
* @param tmpRestoreDir a temporary directory to copy the snapshot files into. Current user
* should have write permissions to this directory, and this should not
* be a subdirectory of rootdir. After the job is finished, restore
* directory can be deleted.
* @param splitAlgo algorithm to split
* @param numSplitsPerRegion how many input splits to generate per one region
* @throws IOException When setting up the details fails.
* @see TableSnapshotInputFormat
*/
public static void initTableSnapshotMapperJob(String snapshotName, Scan scan,
Class<? extends TableMapper> mapper, Class<?> outputKeyClass, Class<?> outputValueClass,
Job job, boolean addDependencyJars, Path tmpRestoreDir, RegionSplitter.SplitAlgorithm splitAlgo,
int numSplitsPerRegion) throws IOException {
TableSnapshotInputFormat.setInput(job, snapshotName, tmpRestoreDir, splitAlgo,
numSplitsPerRegion);
initTableMapperJob(snapshotName, scan, mapper, outputKeyClass, outputValueClass, job,
addDependencyJars, false, TableSnapshotInputFormat.class);
resetCacheConfig(job.getConfiguration());
} | 3.68 |
hbase_KeyValueCodec_getDecoder | /**
* Implementation depends on {@link InputStream#available()}
*/
@Override
public Decoder getDecoder(final InputStream is) {
return new KeyValueDecoder(is);
} | 3.68 |
hadoop_RunnableCallable_toString | /**
* Returns the class name of the wrapper callable/runnable.
*
* @return the class name of the wrapper callable/runnable.
*/
@Override
public String toString() {
return (runnable != null) ? runnable.getClass().getSimpleName() : callable.getClass().getSimpleName();
} | 3.68 |
framework_AbstractSplitPanel_getMaxSplitPosition | /**
* Returns the current maximum position of the splitter, in
* {@link #getMaxSplitPositionUnit()} units.
*
* @return the maximum position of the splitter
*/
public float getMaxSplitPosition() {
return getSplitterState(false).maxPosition;
} | 3.68 |
dubbo_ConsumerModel_getMethodModel | /**
* @param method methodName
* @param argsType method arguments type
* @return
*/
public ConsumerMethodModel getMethodModel(String method, String[] argsType) {
Optional<ConsumerMethodModel> consumerMethodModel = methodModels.entrySet().stream()
.filter(entry -> entry.getKey().getName().equals(method))
.map(Map.Entry::getValue)
.filter(methodModel -> Arrays.equals(argsType, methodModel.getParameterTypes()))
.findFirst();
return consumerMethodModel.orElse(null);
} | 3.68 |
rocketmq-connect_DatabaseDialect_parseTableNameToTableId | /**
* parse to Table Id
*
* @param fqn
* @return
*/
default TableId parseTableNameToTableId(String fqn) {
List<String> parts = identifierRules().parseQualifiedIdentifier(fqn);
if (parts.isEmpty()) {
throw new IllegalArgumentException("Invalid fully qualified name: '" + fqn + "'");
}
if (parts.size() == 1) {
return new TableId(null, null, parts.get(0));
}
if (parts.size() == 3) {
return new TableId(parts.get(0), parts.get(1), parts.get(2));
}
if (useCatalog()) {
return new TableId(parts.get(0), null, parts.get(1));
}
return new TableId(null, parts.get(0), parts.get(1));
} | 3.68 |
hibernate-validator_NotEmptyValidatorForArraysOfDouble_isValid | /**
* Checks the array is not {@code null} and not empty.
*
* @param array the array to validate
* @param constraintValidatorContext context in which the constraint is evaluated
* @return returns {@code true} if the array is not {@code null} and the array is not empty
*/
@Override
public boolean isValid(double[] array, ConstraintValidatorContext constraintValidatorContext) {
if ( array == null ) {
return false;
}
return array.length > 0;
} | 3.68 |
MagicPlugin_BaseSpell_sendMessage | /**
* Send a message to a player.
*
* <p>Use this to send messages to the player that are important.
*
* @param message The message to send
*/
@Override
public void sendMessage(String message) {
sendMessage(mage, message);
} | 3.68 |
hbase_CatalogFamilyFormat_isMergeQualifierPrefix | /** Returns True if the column in <code>cell</code> matches the regex 'info:merge.*'. */
public static boolean isMergeQualifierPrefix(Cell cell) {
// Check to see if has family and that qualifier starts with the merge qualifier 'merge'
return CellUtil.matchingFamily(cell, HConstants.CATALOG_FAMILY)
&& PrivateCellUtil.qualifierStartsWith(cell, HConstants.MERGE_QUALIFIER_PREFIX);
} | 3.68 |
framework_VDragEvent_getDropDetails | /**
* Returns the details of the drag and drop operation.
*
* TODO consider using similar smaller (than map) API as in Transferable
*
* TODO clean up when drop handler changes
*
* @return the drop details
*/
public Map<String, Object> getDropDetails() {
return dropDetails;
} | 3.68 |
hudi_RunLengthDecoder_readIntLittleEndianPaddedOnBitWidth | /**
* Reads the next byteWidth little endian int.
*/
private int readIntLittleEndianPaddedOnBitWidth() throws IOException {
switch (bytesWidth) {
case 0:
return 0;
case 1:
return in.read();
case 2: {
int ch2 = in.read();
int ch1 = in.read();
return (ch1 << 8) + ch2;
}
case 3: {
int ch3 = in.read();
int ch2 = in.read();
int ch1 = in.read();
return (ch1 << 16) + (ch2 << 8) + ch3;
}
case 4: {
return readIntLittleEndian();
}
default:
throw new RuntimeException("Unreachable");
}
} | 3.68 |
graphhopper_Entity_getDateField | /**
* Fetch the given column of the current row, and interpret it as a date in the format YYYYMMDD.
* @return the date value as Java LocalDate, or null if it could not be parsed.
*/
protected LocalDate getDateField(String column, boolean required) throws IOException {
String str = getFieldCheckRequired(column, required);
LocalDate dateTime = null;
if (str != null) try {
dateTime = LocalDate.parse(str, DateTimeFormatter.BASIC_ISO_DATE);
checkRangeInclusive(2000, 2100, dateTime.getYear());
} catch (IllegalArgumentException iae) {
feed.errors.add(new DateParseError(tableName, row, column));
}
return dateTime;
} | 3.68 |
hadoop_ShortWritable_readFields | /** read the short value */
@Override
public void readFields(DataInput in) throws IOException {
value = in.readShort();
} | 3.68 |
querydsl_MongodbExpressions_all | /**
* Find documents where the value of a field is an array that contains all the specific elements.
*
* @param expr expression
* @param params params
*/
public static <T, Q extends SimpleExpression<? super T>> BooleanExpression all(ListPath<T, Q> expr, Collection<T> params) {
return Expressions.booleanOperation(MongodbOps.ALL, expr, ConstantImpl.create(params));
} | 3.68 |
hadoop_SlowPeerTracker_isSlowPeerTrackerEnabled | /**
* If SlowPeerTracker is enabled, return true, else returns false.
*
* @return true if slow peer tracking is enabled, else false.
*/
public boolean isSlowPeerTrackerEnabled() {
return true;
} | 3.68 |
framework_VAbstractCalendarPanel_setDateTimeService | /**
* Sets date time service for the widget.
*
* @param dateTimeService
* date time service
*/
public void setDateTimeService(DateTimeService dateTimeService) {
this.dateTimeService = dateTimeService;
} | 3.68 |
framework_ConnectorTracker_cleanConnectorMap | /**
* Cleans the connector map from all connectors that are no longer attached
* to the application. This should only be called by the framework.
*
* @deprecated use {@link #cleanConnectorMap(boolean)} instead
*/
@Deprecated
public void cleanConnectorMap() {
removeUnregisteredConnectors();
cleanStreamVariables();
// Do this expensive check only with assertions enabled
assert isHierarchyComplete() : "The connector hierarchy is corrupted. "
+ "Check for missing calls to super.setParent(), super.attach() and super.detach() "
+ "and that all custom component containers call child.setParent(this) when a child is added and child.setParent(null) when the child is no longer used. "
+ "See previous log messages for details.";
Iterator<ClientConnector> iterator = connectorIdToConnector.values()
.iterator();
GlobalResourceHandler globalResourceHandler = uI.getSession()
.getGlobalResourceHandler(false);
while (iterator.hasNext()) {
ClientConnector connector = iterator.next();
assert connector != null;
if (connector.getUI() != uI) {
// If connector is no longer part of this uI,
// remove it from the map. If it is re-attached to the
// application at some point it will be re-added through
// registerConnector(connector)
// This code should never be called as cleanup should take place
// in detach()
getLogger().log(Level.WARNING,
"cleanConnectorMap unregistered connector {0}. This should have been done when the connector was detached.",
getConnectorAndParentInfo(connector));
if (globalResourceHandler != null) {
globalResourceHandler.unregisterConnector(connector);
}
uninitializedConnectors.remove(connector);
diffStates.remove(connector);
iterator.remove();
} else if (!uninitializedConnectors.contains(connector)
&& !LegacyCommunicationManager
.isConnectorVisibleToClient(connector)) {
// Connector was visible to the client but is no longer (e.g.
// setVisible(false) has been called or SelectiveRenderer tells
// it's no longer shown) -> make sure that the full state is
// sent again when/if made visible
uninitializedConnectors.add(connector);
diffStates.remove(connector);
assert isRemovalSentToClient(connector) : "Connector "
+ connector + " (id = " + connector.getConnectorId()
+ ") is no longer visible to the client, but no corresponding hierarchy change was sent.";
if (fineLogging) {
getLogger().log(Level.FINE,
"cleanConnectorMap removed state for {0} as it is not visible",
getConnectorAndParentInfo(connector));
}
}
}
} | 3.68 |
hbase_MobUtils_isMobReferenceCell | /**
* Whether the current cell is a mob reference cell.
* @param cell The current cell.
* @return True if the cell has a mob reference tag, false if it doesn't.
*/
public static boolean isMobReferenceCell(Cell cell) {
if (cell.getTagsLength() > 0) {
Optional<Tag> tag = PrivateCellUtil.getTag(cell, TagType.MOB_REFERENCE_TAG_TYPE);
if (tag.isPresent()) {
return true;
}
}
return false;
} | 3.68 |
hbase_BucketCache_getBlock | /**
* Get the buffer of the block with the specified key.
* @param key block's cache key
* @param caching true if the caller caches blocks on cache misses
* @param repeat Whether this is a repeat lookup for the same block
* @param updateCacheMetrics Whether we should update cache metrics or not
* @return buffer of specified cache key, or null if not in cache
*/
@Override
public Cacheable getBlock(BlockCacheKey key, boolean caching, boolean repeat,
boolean updateCacheMetrics) {
if (!cacheEnabled) {
return null;
}
RAMQueueEntry re = ramCache.get(key);
if (re != null) {
if (updateCacheMetrics) {
cacheStats.hit(caching, key.isPrimary(), key.getBlockType());
}
re.access(accessCount.incrementAndGet());
return re.getData();
}
BucketEntry bucketEntry = backingMap.get(key);
if (bucketEntry != null) {
long start = System.nanoTime();
ReentrantReadWriteLock lock = offsetLock.getLock(bucketEntry.offset());
try {
lock.readLock().lock();
// We can not read here even if backingMap does contain the given key because its offset
// maybe changed. If we lock BlockCacheKey instead of offset, then we can only check
// existence here.
if (bucketEntry.equals(backingMap.get(key))) {
// Read the block from IOEngine based on the bucketEntry's offset and length, NOTICE: the
// block will use the refCnt of bucketEntry, which means if two HFileBlock mapping to
// the same BucketEntry, then all of the three will share the same refCnt.
Cacheable cachedBlock = ioEngine.read(bucketEntry);
if (ioEngine.usesSharedMemory()) {
// If IOEngine use shared memory, cachedBlock and BucketEntry will share the
// same RefCnt, do retain here, in order to count the number of RPC references
cachedBlock.retain();
}
// Update the cache statistics.
if (updateCacheMetrics) {
cacheStats.hit(caching, key.isPrimary(), key.getBlockType());
cacheStats.ioHit(System.nanoTime() - start);
}
bucketEntry.access(accessCount.incrementAndGet());
if (this.ioErrorStartTime > 0) {
ioErrorStartTime = -1;
}
return cachedBlock;
}
} catch (HBaseIOException hioex) {
// When using file io engine persistent cache,
// the cache map state might differ from the actual cache. If we reach this block,
// we should remove the cache key entry from the backing map
backingMap.remove(key);
fullyCachedFiles.remove(key.getHfileName());
LOG.debug("Failed to fetch block for cache key: {}.", key, hioex);
} catch (IOException ioex) {
LOG.error("Failed reading block " + key + " from bucket cache", ioex);
checkIOErrorIsTolerated();
} finally {
lock.readLock().unlock();
}
}
if (!repeat && updateCacheMetrics) {
cacheStats.miss(caching, key.isPrimary(), key.getBlockType());
}
return null;
} | 3.68 |
flink_ExistingField_getExpression | /**
* Returns an {@link Expression} that casts a {@link Long}, {@link Timestamp}, or timestamp
* formatted {@link String} field (e.g., "2018-05-28 12:34:56.000") into a rowtime attribute.
*/
@Override
public Expression getExpression(ResolvedFieldReference[] fieldAccesses) {
ResolvedFieldReference fieldAccess = fieldAccesses[0];
DataType type = fromLegacyInfoToDataType(fieldAccess.resultType());
FieldReferenceExpression fieldReferenceExpr =
new FieldReferenceExpression(fieldAccess.name(), type, 0, fieldAccess.fieldIndex());
switch (type.getLogicalType().getTypeRoot()) {
case BIGINT:
case TIMESTAMP_WITHOUT_TIME_ZONE:
return fieldReferenceExpr;
case VARCHAR:
DataType outputType = TIMESTAMP(3).bridgedTo(Timestamp.class);
return CallExpression.permanent(
CAST,
Arrays.asList(fieldReferenceExpr, typeLiteral(outputType)),
outputType);
default:
throw new RuntimeException("Unsupport type: " + type);
}
} | 3.68 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.