name
stringlengths 12
178
| code_snippet
stringlengths 8
36.5k
| score
float64 3.26
3.68
|
---|---|---|
flink_FlinkPipelineTranslationUtil_translateToJSONExecutionPlan | /** Extracts the execution plan (as JSON) from the given {@link Pipeline}. */
public static String translateToJSONExecutionPlan(
ClassLoader userClassloader, Pipeline pipeline) {
FlinkPipelineTranslator pipelineTranslator =
getPipelineTranslator(userClassloader, pipeline);
return pipelineTranslator.translateToJSONExecutionPlan(pipeline);
} | 3.68 |
framework_BeanItemContainer_getBeanClassForCollection | /**
* Internal helper method to support the deprecated {@link Collection}
* container.
*
* @param <BT>
* @param collection
* @return
* @throws IllegalArgumentException
*/
@SuppressWarnings("unchecked")
@Deprecated
private static <BT> Class<? extends BT> getBeanClassForCollection(
Collection<? extends BT> collection)
throws IllegalArgumentException {
if (collection == null || collection.isEmpty()) {
throw new IllegalArgumentException(
"The collection passed to BeanItemContainer constructor must not be null or empty. Use the other BeanItemContainer constructor.");
}
return (Class<? extends BT>) collection.iterator().next().getClass();
} | 3.68 |
morf_ChangeColumn_verifyDataTypeChanges | /**
* Verify That any data type transistions are allowed.
*/
private void verifyDataTypeChanges() {
// if there's no change, there's no problem
if (Objects.equals(fromColumn.getType(), toColumn.getType())) {
return;
}
// look up what target types we are allowed to change to
Collection<DataType> allowableTargetTypes = allowedDataTypeChanges.get(fromColumn.getType());
if (!allowableTargetTypes.contains(toColumn.getType())) {
throw new IllegalArgumentException(String.format("Attempting to change the data type of [%s]. Changes from %s to %s are not supported.", fromColumn.getName(), fromColumn.getType(), toColumn.getType()));
}
} | 3.68 |
dubbo_ProviderConfig_setExportBackground | /**
* Whether export should run in background or not.
*
* @deprecated replace with {@link ModuleConfig#setBackground(Boolean)}
* @see ModuleConfig#setBackground(Boolean)
*/
@Deprecated
public void setExportBackground(Boolean exportBackground) {
this.exportBackground = exportBackground;
} | 3.68 |
flink_SignalHandler_handle | /**
* Handle an incoming signal.
*
* @param signal The incoming signal
*/
@Override
public void handle(Signal signal) {
LOG.info(
"RECEIVED SIGNAL {}: SIG{}. Shutting down as requested.",
signal.getNumber(),
signal.getName());
prevHandler.handle(signal);
} | 3.68 |
open-banking-gateway_PsuFintechAssociationService_readInboxFromFinTech | /**
* Allows to read consent specification that was required by the FinTech
* @param session Authorization session for the consent grant
* @param fintechUserPassword PSU/Fintech users' password
* @return Consent specification
*/
@Transactional
public FintechConsentSpecSecureStorage.FinTechUserInboxData readInboxFromFinTech(AuthSession session, String fintechUserPassword) {
return vault.fromInboxForAuth(
session,
fintechUserPassword::toCharArray
);
} | 3.68 |
morf_RenameIndex_getToIndexName | /**
* Gets the name of the index after the change
*
* @return the name of the index after the change
*/
public String getToIndexName() {
return toIndexName;
} | 3.68 |
hudi_HoodieTable_rollbackInflightLogCompaction | /**
* Rollback failed compactions. Inflight rollbacks for compactions revert the .inflight file
* to the .requested file.
*
* @param inflightInstant Inflight Compaction Instant
*/
public void rollbackInflightLogCompaction(HoodieInstant inflightInstant, Function<String, Option<HoodiePendingRollbackInfo>> getPendingRollbackInstantFunc) {
final String commitTime = getPendingRollbackInstantFunc.apply(inflightInstant.getTimestamp()).map(entry
-> entry.getRollbackInstant().getTimestamp())
.orElse(getMetaClient().createNewInstantTime());
scheduleRollback(context, commitTime, inflightInstant, false, config.shouldRollbackUsingMarkers(),
false);
rollback(context, commitTime, inflightInstant, true, false);
} | 3.68 |
hadoop_BoundDTExtension_getUserAgentSuffix | /**
* Get a suffix for the UserAgent suffix of HTTP requests, which
* can be used to identify the principal making ABFS requests.
* @return an empty string, or a key=value string to be added to the UA
* header.
*/
default String getUserAgentSuffix() {
return "";
} | 3.68 |
druid_DruidDataSource_addFilter | /**
* 会去重复
*
* @param filter
*/
private void addFilter(Filter filter) {
boolean exists = false;
for (Filter initedFilter : this.filters) {
if (initedFilter.getClass() == filter.getClass()) {
exists = true;
break;
}
}
if (!exists) {
filter.init(this);
this.filters.add(filter);
}
} | 3.68 |
hbase_HFileInfo_write | /**
* Write out this instance on the passed in <code>out</code> stream. We write it as a protobuf.
* @see #read(DataInputStream)
*/
void write(final DataOutputStream out) throws IOException {
HFileProtos.FileInfoProto.Builder builder = HFileProtos.FileInfoProto.newBuilder();
for (Map.Entry<byte[], byte[]> e : this.map.entrySet()) {
HBaseProtos.BytesBytesPair.Builder bbpBuilder = HBaseProtos.BytesBytesPair.newBuilder();
bbpBuilder.setFirst(UnsafeByteOperations.unsafeWrap(e.getKey()));
bbpBuilder.setSecond(UnsafeByteOperations.unsafeWrap(e.getValue()));
builder.addMapEntry(bbpBuilder.build());
}
out.write(ProtobufMagic.PB_MAGIC);
builder.build().writeDelimitedTo(out);
} | 3.68 |
flink_TGetQueryIdReq_findByThriftId | /** Find the _Fields constant that matches fieldId, or null if its not found. */
public static _Fields findByThriftId(int fieldId) {
switch (fieldId) {
case 1: // OPERATION_HANDLE
return OPERATION_HANDLE;
default:
return null;
}
} | 3.68 |
framework_Table_refreshRenderedCells | /**
* Refreshes the rows in the internal cache. Only if
* {@link #resetPageBuffer()} is called before this then all values are
* guaranteed to be recreated.
*/
protected void refreshRenderedCells() {
if (!isAttached()) {
return;
}
if (!isContentRefreshesEnabled) {
return;
}
// Collects the basic facts about the table page
final int pagelen = getPageLength();
int rows, totalRows;
rows = totalRows = size();
int firstIndex = Math.min(getCurrentPageFirstItemIndex(),
totalRows - 1);
if (rows > 0 && firstIndex >= 0) {
rows -= firstIndex;
}
if (pagelen > 0 && pagelen < rows) {
rows = pagelen;
}
// If "to be painted next" variables are set, use them
if (lastToBeRenderedInClient - firstToBeRenderedInClient > 0) {
rows = lastToBeRenderedInClient - firstToBeRenderedInClient + 1;
}
if (firstToBeRenderedInClient >= 0) {
if (firstToBeRenderedInClient < totalRows) {
firstIndex = firstToBeRenderedInClient;
} else {
firstIndex = totalRows - 1;
}
} else {
// initial load
// #8805 send one extra row in the beginning in case a partial
// row is shown on the UI
if (firstIndex > 0) {
firstIndex = firstIndex - 1;
rows = rows + 1;
}
firstToBeRenderedInClient = firstIndex;
}
if (totalRows > 0) {
if (rows + firstIndex > totalRows) {
rows = totalRows - firstIndex;
}
} else {
rows = 0;
}
// Saves the results to internal buffer
pageBuffer = getVisibleCellsNoCache(firstIndex, rows, true);
if (rows > 0) {
pageBufferFirstIndex = firstIndex;
}
setRowCacheInvalidated(true);
markAsDirty();
maybeThrowCacheUpdateExceptions();
} | 3.68 |
hbase_RESTServlet_getConnectionCache | /** Returns the ConnectionCache instance */
public ConnectionCache getConnectionCache() {
return connectionCache;
} | 3.68 |
hbase_RecoverableZooKeeper_getAcl | /**
* getAcl is an idempotent operation. Retry before throwing exception
* @return list of ACLs
*/
public List<ACL> getAcl(String path, Stat stat) throws KeeperException, InterruptedException {
final Span span = TraceUtil.createSpan("RecoverableZookeeper.getAcl");
try (Scope ignored = span.makeCurrent()) {
RetryCounter retryCounter = retryCounterFactory.create();
while (true) {
try {
span.setStatus(StatusCode.OK);
return checkZk().getACL(path, stat);
} catch (KeeperException e) {
switch (e.code()) {
case CONNECTIONLOSS:
case OPERATIONTIMEOUT:
case REQUESTTIMEOUT:
TraceUtil.setError(span, e);
retryOrThrow(retryCounter, e, "getAcl");
break;
default:
TraceUtil.setError(span, e);
throw e;
}
}
retryCounter.sleepUntilNextRetry();
}
} finally {
span.end();
}
} | 3.68 |
querydsl_MetaDataExporter_setCatalogPattern | /**
* a catalog name; must match the catalog name as it
* is stored in the database; "" retrieves those without a catalog;
* <code>null</code> means that the catalog name should not be used to narrow
* the search
*/
public void setCatalogPattern(@Nullable String catalogPattern) {
this.catalogPattern = catalogPattern;
} | 3.68 |
dubbo_DefaultModuleDeployer_prepare | /**
* Prepare for export/refer service, trigger initializing application and module
*/
@Override
public void prepare() {
applicationDeployer.initialize();
this.initialize();
} | 3.68 |
hudi_CompactionTask_newBuilder | /**
* Utility to create builder for {@link CompactionTask}.
*
* @return Builder for {@link CompactionTask}.
*/
public static Builder newBuilder() {
return new Builder();
} | 3.68 |
flink_ConfluentRegistryAvroSerializationSchema_forGeneric | /**
* Creates {@link AvroSerializationSchema} that produces byte arrays that were generated from
* Avro schema and writes the writer schema to Confluent Schema Registry.
*
* @param subject subject of schema registry to produce
* @param schema schema that will be used for serialization
* @param schemaRegistryUrl URL of schema registry to connect
* @param registryConfigs map with additional schema registry configs (for example SSL
* properties)
* @return serialized record
*/
public static ConfluentRegistryAvroSerializationSchema<GenericRecord> forGeneric(
String subject,
Schema schema,
String schemaRegistryUrl,
@Nullable Map<String, ?> registryConfigs) {
return new ConfluentRegistryAvroSerializationSchema<>(
GenericRecord.class,
schema,
new CachedSchemaCoderProvider(
subject,
schemaRegistryUrl,
DEFAULT_IDENTITY_MAP_CAPACITY,
registryConfigs));
} | 3.68 |
hbase_SnapshotInfo_getSharedStoreFilesSize | /** Returns the total size of the store files shared */
public long getSharedStoreFilesSize() {
return hfilesSize.get();
} | 3.68 |
hudi_CompactionStrategy_generateCompactionPlan | /**
* Generate Compaction plan. Allows clients to order and filter the list of compactions to be set. The default
* implementation takes care of setting compactor Id from configuration allowing subclasses to only worry about
* ordering and filtering compaction operations
*
* @param writeConfig Hoodie Write Config
* @param operations Compaction Operations to be ordered and filtered
* @param pendingCompactionPlans Pending Compaction Plans for strategy to schedule next compaction plan
* @return Compaction plan to be scheduled.
*/
public HoodieCompactionPlan generateCompactionPlan(HoodieWriteConfig writeConfig,
List<HoodieCompactionOperation> operations, List<HoodieCompactionPlan> pendingCompactionPlans) {
// Strategy implementation can overload this method to set specific compactor-id
return HoodieCompactionPlan.newBuilder()
.setOperations(orderAndFilter(writeConfig, operations, pendingCompactionPlans))
.setVersion(CompactionUtils.LATEST_COMPACTION_METADATA_VERSION).build();
} | 3.68 |
hbase_KeyValue_getTypeByte | /** Return the KeyValue.TYPE byte representation */
byte getTypeByte(int keyLength) {
return this.bytes[this.offset + keyLength - 1 + ROW_OFFSET];
} | 3.68 |
hudi_GenericRecordFullPayloadGenerator_validate | /**
* Validate whether the record match schema.
*
* @param record Record to validate.
* @return True if matches.
*/
public boolean validate(GenericRecord record) {
return genericData.validate(baseSchema, record);
} | 3.68 |
framework_CurrentInstance_defineFallbackResolver | /**
* Adds a CurrentInstanceFallbackResolver, that is triggered when
* {@link #get(Class)} can't find a suitable instance for the given type
* parameter.
*
* @param type
* the class used on {@link #get(Class)} invocations to retrieve
* the current instance
* @param fallbackResolver
* the resolver, not <code>null</code>
*
* @throws IllegalArgumentException
* if there's already a defined fallback resolver for the given
* type
* @since 8.5.2
*/
public static <T> void defineFallbackResolver(Class<T> type,
CurrentInstanceFallbackResolver<T> fallbackResolver) {
if (fallbackResolver == null) {
throw new IllegalArgumentException(
"The fallback resolver can not be null.");
}
if (fallbackResolvers.putIfAbsent(type, fallbackResolver) != null) {
throw new IllegalArgumentException(
"A fallback resolver for the type " + type
+ " is already defined.");
}
} | 3.68 |
hbase_RecoverableZooKeeper_getData | /**
* getData is an idempotent operation. Retry before throwing exception
*/
public byte[] getData(String path, boolean watch, Stat stat)
throws KeeperException, InterruptedException {
return getData(path, null, watch, stat);
} | 3.68 |
rocketmq-connect_ColumnDefinition_nullability | /**
* Indicates the nullability of values in the column.
*
* @return the nullability status of the given column; never null
*/
public Nullability nullability() {
return nullability;
} | 3.68 |
flink_CheckpointCommitter_setOperatorId | /**
* Internally used to set the operator ID after instantiation.
*
* @param id
* @throws Exception
*/
public void setOperatorId(String id) throws Exception {
this.operatorId = id;
} | 3.68 |
flink_Predicates_areFieldOfType | /**
* Tests that the field has the fully qualified type of {@code fqClassName} with the given
* modifiers.
*
* <p>Attention: changing the description will add a rule into the stored.rules.
*/
public static DescribedPredicate<JavaField> areFieldOfType(
String fqClassName, JavaModifier... modifiers) {
return DescribedPredicate.describe(
String.format(
"are %s, and of type %s",
Arrays.stream(modifiers)
.map(JavaModifier::toString)
.map(String::toLowerCase)
.collect(Collectors.joining(", ")),
getClassSimpleNameFromFqName(fqClassName)),
field ->
field.getModifiers().containsAll(Arrays.asList(modifiers))
&& field.getRawType().getName().equals(fqClassName));
} | 3.68 |
flink_SqlWindowTableFunction_checkIntervalOperands | /**
* Checks whether the operands starting from position {@code startPos} are all of type
* {@code INTERVAL}, returning whether successful.
*
* @param callBinding The call binding
* @param startPos The start position to validate (starting index is 0)
* @return true if validation passes
*/
boolean checkIntervalOperands(SqlCallBinding callBinding, int startPos) {
final SqlValidator validator = callBinding.getValidator();
for (int i = startPos; i < callBinding.getOperandCount(); i++) {
final RelDataType type = validator.getValidatedNodeType(callBinding.operand(i));
if (!SqlTypeUtil.isInterval(type)) {
return false;
}
}
return true;
} | 3.68 |
flink_HiveParserQB_isSimpleSelectQuery | // to find target for fetch task conversion optimizer (not allows subqueries)
public boolean isSimpleSelectQuery() {
if (!qbp.isSimpleSelectQuery() || isCTAS() || qbp.isAnalyzeCommand()) {
return false;
}
for (HiveParserQBExpr qbexpr : aliasToSubq.values()) {
if (!qbexpr.isSimpleSelectQuery()) {
return false;
}
}
return true;
} | 3.68 |
flink_EncodingFormat_listWritableMetadata | /**
* Returns the map of metadata keys and their corresponding data types that can be consumed by
* this format for writing. By default, this method returns an empty map.
*
* <p>Metadata columns add additional columns to the table's schema. An encoding format is
* responsible to accept requested metadata columns at the end of consumed rows and persist
* them.
*
* <p>See {@link SupportsWritingMetadata} for more information.
*
* <p>Note: This method is only used if the outer {@link DynamicTableSink} implements {@link
* SupportsWritingMetadata} and calls this method in {@link
* SupportsWritingMetadata#listWritableMetadata()}.
*/
default Map<String, DataType> listWritableMetadata() {
return Collections.emptyMap();
} | 3.68 |
hudi_BaseHoodieWriteClient_reOrderColPosition | /**
* reorder the position of col.
*
* @param colName column which need to be reordered. if we want to change col from a nested filed, the fullName should be specified.
* @param referColName reference position.
* @param orderType col position change type. now support three change types: first/after/before
*/
public void reOrderColPosition(String colName, String referColName, TableChange.ColumnPositionChange.ColumnPositionType orderType) {
if (colName == null || orderType == null || referColName == null) {
return;
}
//get internalSchema
Pair<InternalSchema, HoodieTableMetaClient> pair = getInternalSchemaAndMetaClient();
InternalSchema newSchema = new InternalSchemaChangeApplier(pair.getLeft())
.applyReOrderColPositionChange(colName, referColName, orderType);
commitTableChange(newSchema, pair.getRight());
} | 3.68 |
dubbo_TriHttp2RemoteFlowController_markedWritability | /**
* Save the state of writability.
*/
void markedWritability(boolean isWritable) {
this.markedWritable = isWritable;
} | 3.68 |
framework_SelectorPredicate_setIndex | /**
* @param index
* the index to set
*/
public void setIndex(int index) {
this.index = index;
} | 3.68 |
flink_BinaryRowWriter_reset | /** First, reset. */
@Override
public void reset() {
this.cursor = fixedSize;
for (int i = 0; i < nullBitsSizeInBytes; i += 8) {
segment.putLong(i, 0L);
}
} | 3.68 |
hadoop_JobTokenIdentifier_getKind | /** {@inheritDoc} */
@Override
public Text getKind() {
return KIND_NAME;
} | 3.68 |
flink_ExecutionEnvironment_setRestartStrategy | /**
* Sets the restart strategy configuration. The configuration specifies which restart strategy
* will be used for the execution graph in case of a restart.
*
* @param restartStrategyConfiguration Restart strategy configuration to be set
*/
@PublicEvolving
public void setRestartStrategy(
RestartStrategies.RestartStrategyConfiguration restartStrategyConfiguration) {
config.setRestartStrategy(restartStrategyConfiguration);
} | 3.68 |
framework_DragAndDropWrapper_getDragImageComponent | /**
* Gets the component that will be used as the drag image. Only used when
* wrapper is set to {@link DragStartMode#COMPONENT_OTHER}
*
* @return <code>null</code> if no component is set.
*/
public Component getDragImageComponent() {
return dragImageComponent;
} | 3.68 |
querydsl_JDOExpressions_selectDistinct | /**
* Create a new detached {@link JDOQuery} instance with the given projection
*
* @param exprs projection
* @return select(distinct exprs)
*/
public static JDOQuery<Tuple> selectDistinct(Expression<?>... exprs) {
return select(exprs).distinct();
} | 3.68 |
hadoop_RemoteMethod_getMethodName | /**
* Get the name of the method.
*
* @return Name of the method.
*/
public String getMethodName() {
return this.methodName;
} | 3.68 |
flink_JobEdge_setUpstreamSubtaskStateMapper | /**
* Sets the channel state rescaler used for rescaling persisted data on upstream side of this
* JobEdge.
*
* @param upstreamSubtaskStateMapper The channel state rescaler selector to use.
*/
public void setUpstreamSubtaskStateMapper(SubtaskStateMapper upstreamSubtaskStateMapper) {
this.upstreamSubtaskStateMapper = checkNotNull(upstreamSubtaskStateMapper);
} | 3.68 |
hadoop_OBSFileSystem_listFiles | /**
* List the statuses and block locations of the files in the given path. Does
* not guarantee to return the iterator that traverses statuses of the files
* in a sorted order.
*
* <pre>
* If the path is a directory,
* if recursive is false, returns files in the directory;
* if recursive is true, return files in the subtree rooted at the path.
* If the path is a file, return the file's status and block locations.
* </pre>
*
* @param f a path
* @param recursive if the subdirectories need to be traversed recursively
* @return an iterator that traverses statuses of the files/directories in the
* given path
* @throws FileNotFoundException if {@code path} does not exist
* @throws IOException if any I/O error occurred
*/
@Override
public RemoteIterator<LocatedFileStatus> listFiles(final Path f,
final boolean recursive)
throws FileNotFoundException, IOException {
Path path = OBSCommonUtils.qualify(this, f);
LOG.debug("listFiles({}, {})", path, recursive);
try {
// lookup dir triggers existence check
final FileStatus fileStatus = getFileStatus(path);
if (fileStatus.isFile()) {
// simple case: File
LOG.debug("Path is a file");
return new OBSListing
.SingleStatusRemoteIterator(
OBSCommonUtils.toLocatedFileStatus(this, fileStatus));
} else {
LOG.debug(
"listFiles: doing listFiles of directory {} - recursive {}",
path, recursive);
// directory: do a bulk operation
String key = OBSCommonUtils.maybeAddTrailingSlash(
OBSCommonUtils.pathToKey(this, path));
String delimiter = recursive ? null : "/";
LOG.debug("Requesting all entries under {} with delimiter '{}'",
key, delimiter);
return obsListing.createLocatedFileStatusIterator(
obsListing.createFileStatusListingIterator(
path,
OBSCommonUtils.createListObjectsRequest(this, key,
delimiter),
OBSListing.ACCEPT_ALL,
new OBSListing.AcceptFilesOnly(path)));
}
} catch (ObsException e) {
throw OBSCommonUtils.translateException("listFiles", path, e);
}
} | 3.68 |
querydsl_JTSGeometryExpression_intersection | /**
* Returns a geometric object that represents the Point set intersection of this geometric
* object with anotherGeometry.
*
* @param geometry other geometry
* @return intersection of this and the other geometry
*/
public JTSGeometryExpression<Geometry> intersection(Expression<? extends Geometry> geometry) {
return JTSGeometryExpressions.geometryOperation(SpatialOps.INTERSECTION, mixin, geometry);
} | 3.68 |
hadoop_DelegationTokenIdentifier_toStringStable | /*
* A frozen version of toString() to be used to be backward compatible.
* When backward compatibility is not needed, use toString(), which provides
* more info and is supposed to evolve, see HDFS-9732.
* Don't change this method except for major revisions.
*
* NOTE:
* Currently this method is used by CLI for backward compatibility.
*/
@Override
public String toStringStable() {
StringBuilder sbld = new StringBuilder();
sbld
.append(getKind()).append(" token ").append(getSequenceNumber())
.append(" for ").append(getUser().getShortUserName())
.append(" with renewer ").append(getRenewer());
return sbld.toString();
} | 3.68 |
flink_DeclarativeAggregateFunction_operand | /**
* Arg of accumulate and retract, the input value (usually obtained from a new arrived data).
*/
public final UnresolvedReferenceExpression operand(int i) {
String name = String.valueOf(i);
if (getAggBufferNames().contains(name)) {
throw new IllegalStateException(
String.format("Agg buffer name(%s) should not same to operands.", name));
}
return unresolvedRef(name);
} | 3.68 |
flink_SourceTestSuiteBase_testIdleReader | /**
* Test connector source with an idle reader.
*
* <p>This test will create 4 split in the external system, write test data to all splits, and
* consume back via a Flink job with 5 parallelism, so at least one parallelism / source reader
* will be idle (assigned with no splits). If the split enumerator of the source doesn't signal
* NoMoreSplitsEvent to the idle source reader, the Flink job will never spin to FINISHED state.
*
* <p>The number and order of records in each split consumed by Flink need to be identical to
* the test data written into the external system to pass this test. There's no requirement for
* record order across splits.
*
* <p>A bounded source is required for this test.
*/
@TestTemplate
@DisplayName("Test source with at least one idle parallelism")
public void testIdleReader(
TestEnvironment testEnv,
DataStreamSourceExternalContext<T> externalContext,
CheckpointingMode semantic)
throws Exception {
// Step 1: Preparation
TestingSourceSettings sourceSettings =
TestingSourceSettings.builder()
.setBoundedness(Boundedness.BOUNDED)
.setCheckpointingMode(semantic)
.build();
TestEnvironmentSettings envOptions =
TestEnvironmentSettings.builder()
.setConnectorJarPaths(externalContext.getConnectorJarPaths())
.build();
Source<T, ?, ?> source = tryCreateSource(externalContext, sourceSettings);
// Step 2: Write test data to external system
int splitNumber = 4;
List<List<T>> testRecordsLists = new ArrayList<>();
for (int i = 0; i < splitNumber; i++) {
testRecordsLists.add(generateAndWriteTestData(i, externalContext, sourceSettings));
}
// Step 3: Build and execute Flink job
StreamExecutionEnvironment execEnv = testEnv.createExecutionEnvironment(envOptions);
DataStreamSource<T> stream =
execEnv.fromSource(source, WatermarkStrategy.noWatermarks(), "Tested Source")
.setParallelism(splitNumber + 1);
CollectIteratorBuilder<T> iteratorBuilder = addCollectSink(stream);
JobClient jobClient = submitJob(execEnv, "Idle Reader Test");
// Step 4: Validate test data
try (CloseableIterator<T> resultIterator = iteratorBuilder.build(jobClient)) {
LOG.info("Checking test results");
checkResultWithSemantic(resultIterator, testRecordsLists, semantic, null);
}
// Step 5: Clean up
waitForJobStatus(jobClient, singletonList(JobStatus.FINISHED));
} | 3.68 |
framework_FilesystemContainer_toString | /**
* @see java.lang.Object#toString()
*/
@Override
public String toString() {
if ("".equals(file.getName())) {
return file.getAbsolutePath();
}
return file.getName();
} | 3.68 |
flink_ProjectOperator_projectTuple2 | /**
* Projects a {@link Tuple} {@link DataSet} to the previously selected fields.
*
* @return The projected DataSet.
* @see Tuple
* @see DataSet
*/
public <T0, T1> ProjectOperator<T, Tuple2<T0, T1>> projectTuple2() {
TypeInformation<?>[] fTypes = extractFieldTypes(fieldIndexes, ds.getType());
TupleTypeInfo<Tuple2<T0, T1>> tType = new TupleTypeInfo<Tuple2<T0, T1>>(fTypes);
return new ProjectOperator<T, Tuple2<T0, T1>>(this.ds, this.fieldIndexes, tType);
} | 3.68 |
flink_ZooKeeperUtils_treeCacheSelectorForPath | /**
* Returns a {@link TreeCacheSelector} that only accepts a specific node.
*
* @param fullPath node to accept
* @return tree cache selector
*/
private static TreeCacheSelector treeCacheSelectorForPath(String fullPath) {
return new TreeCacheSelector() {
@Override
public boolean traverseChildren(String childPath) {
return false;
}
@Override
public boolean acceptChild(String childPath) {
return fullPath.equals(childPath);
}
};
} | 3.68 |
hbase_LruBlockCache_evict | /**
* Eviction method.
*/
void evict() {
// Ensure only one eviction at a time
if (!evictionLock.tryLock()) {
return;
}
try {
evictionInProgress = true;
long currentSize = this.size.get();
long bytesToFree = currentSize - minSize();
if (LOG.isTraceEnabled()) {
LOG.trace("Block cache LRU eviction started; Attempting to free "
+ StringUtils.byteDesc(bytesToFree) + " of total=" + StringUtils.byteDesc(currentSize));
}
if (bytesToFree <= 0) {
return;
}
// Instantiate priority buckets
BlockBucket bucketSingle = new BlockBucket("single", bytesToFree, blockSize, singleSize());
BlockBucket bucketMulti = new BlockBucket("multi", bytesToFree, blockSize, multiSize());
BlockBucket bucketMemory = new BlockBucket("memory", bytesToFree, blockSize, memorySize());
// Scan entire map putting into appropriate buckets
for (LruCachedBlock cachedBlock : map.values()) {
switch (cachedBlock.getPriority()) {
case SINGLE: {
bucketSingle.add(cachedBlock);
break;
}
case MULTI: {
bucketMulti.add(cachedBlock);
break;
}
case MEMORY: {
bucketMemory.add(cachedBlock);
break;
}
}
}
long bytesFreed = 0;
if (forceInMemory || memoryFactor > 0.999f) {
long s = bucketSingle.totalSize();
long m = bucketMulti.totalSize();
if (bytesToFree > (s + m)) {
// this means we need to evict blocks in memory bucket to make room,
// so the single and multi buckets will be emptied
bytesFreed = bucketSingle.free(s);
bytesFreed += bucketMulti.free(m);
if (LOG.isTraceEnabled()) {
LOG.trace(
"freed " + StringUtils.byteDesc(bytesFreed) + " from single and multi buckets");
}
bytesFreed += bucketMemory.free(bytesToFree - bytesFreed);
if (LOG.isTraceEnabled()) {
LOG.trace(
"freed " + StringUtils.byteDesc(bytesFreed) + " total from all three buckets ");
}
} else {
// this means no need to evict block in memory bucket,
// and we try best to make the ratio between single-bucket and
// multi-bucket is 1:2
long bytesRemain = s + m - bytesToFree;
if (3 * s <= bytesRemain) {
// single-bucket is small enough that no eviction happens for it
// hence all eviction goes from multi-bucket
bytesFreed = bucketMulti.free(bytesToFree);
} else if (3 * m <= 2 * bytesRemain) {
// multi-bucket is small enough that no eviction happens for it
// hence all eviction goes from single-bucket
bytesFreed = bucketSingle.free(bytesToFree);
} else {
// both buckets need to evict some blocks
bytesFreed = bucketSingle.free(s - bytesRemain / 3);
if (bytesFreed < bytesToFree) {
bytesFreed += bucketMulti.free(bytesToFree - bytesFreed);
}
}
}
} else {
PriorityQueue<BlockBucket> bucketQueue = new PriorityQueue<>(3);
bucketQueue.add(bucketSingle);
bucketQueue.add(bucketMulti);
bucketQueue.add(bucketMemory);
int remainingBuckets = bucketQueue.size();
BlockBucket bucket;
while ((bucket = bucketQueue.poll()) != null) {
long overflow = bucket.overflow();
if (overflow > 0) {
long bucketBytesToFree =
Math.min(overflow, (bytesToFree - bytesFreed) / remainingBuckets);
bytesFreed += bucket.free(bucketBytesToFree);
}
remainingBuckets--;
}
}
if (LOG.isTraceEnabled()) {
long single = bucketSingle.totalSize();
long multi = bucketMulti.totalSize();
long memory = bucketMemory.totalSize();
LOG.trace(
"Block cache LRU eviction completed; " + "freed=" + StringUtils.byteDesc(bytesFreed)
+ ", " + "total=" + StringUtils.byteDesc(this.size.get()) + ", " + "single="
+ StringUtils.byteDesc(single) + ", " + "multi=" + StringUtils.byteDesc(multi) + ", "
+ "memory=" + StringUtils.byteDesc(memory));
}
} finally {
stats.evict();
evictionInProgress = false;
evictionLock.unlock();
}
} | 3.68 |
framework_GridElement_getCell | /**
* Gets cell element with given row and column index.
*
* @param rowIndex
* Row index
* @param colIndex
* Column index
* @return Cell element with given indices.
*/
public GridCellElement getCell(int rowIndex, int colIndex) {
scrollToRow(rowIndex);
return getSubPart("#cell[" + rowIndex + "][" + colIndex + "]")
.wrap(GridCellElement.class);
} | 3.68 |
pulsar_ReaderConfiguration_setReaderName | /**
* Set the consumer name.
*
* @param readerName
*/
public ReaderConfiguration setReaderName(String readerName) {
checkArgument(StringUtils.isNotBlank(readerName));
conf.setReaderName(readerName);
return this;
} | 3.68 |
querydsl_GeometryExpressions_fromText | /**
* Return a specified ST_Geometry value from Well-Known Text representation (WKT).
*
* @param text WKT form
* @return geometry
*/
public static GeometryExpression<?> fromText(Expression<String> text) {
return geometryOperation(SpatialOps.GEOM_FROM_TEXT, text);
} | 3.68 |
hbase_HFileBlock_putHeader | /**
* Put the header into the given byte array at the given offset.
* @param onDiskSize size of the block on disk header + data + checksum
* @param uncompressedSize size of the block after decompression (but before optional data block
* decoding) including header
* @param onDiskDataSize size of the block on disk with header and data but not including the
* checksums
*/
private void putHeader(byte[] dest, int offset, int onDiskSize, int uncompressedSize,
int onDiskDataSize) {
offset = blockType.put(dest, offset);
offset = Bytes.putInt(dest, offset, onDiskSize - HConstants.HFILEBLOCK_HEADER_SIZE);
offset = Bytes.putInt(dest, offset, uncompressedSize - HConstants.HFILEBLOCK_HEADER_SIZE);
offset = Bytes.putLong(dest, offset, prevOffset);
offset = Bytes.putByte(dest, offset, fileContext.getChecksumType().getCode());
offset = Bytes.putInt(dest, offset, fileContext.getBytesPerChecksum());
Bytes.putInt(dest, offset, onDiskDataSize);
} | 3.68 |
framework_RpcManager_getMethod | /**
* Gets the method that an invocation targets.
*
* @param invocation
* the method invocation to get the method for
*
* @since 7.4
* @return the method targeted by this invocation
*/
public static Method getMethod(MethodInvocation invocation) {
// Implemented here instead of in MethodInovcation since it's in shared
// and can't use our Method class.
Type type = new Type(invocation.getInterfaceName(), null);
Method method = type.getMethod(invocation.getMethodName());
return method;
} | 3.68 |
flink_SubtaskCheckpointCoordinatorImpl_cancelAsyncCheckpointRunnable | /**
* Cancel the async checkpoint runnable with given checkpoint id. If given checkpoint id is not
* registered, return false, otherwise return true.
*/
private boolean cancelAsyncCheckpointRunnable(long checkpointId) {
AsyncCheckpointRunnable asyncCheckpointRunnable;
synchronized (lock) {
asyncCheckpointRunnable = checkpoints.remove(checkpointId);
}
if (asyncCheckpointRunnable != null) {
asyncOperationsThreadPool.execute(() -> closeQuietly(asyncCheckpointRunnable));
}
return asyncCheckpointRunnable != null;
} | 3.68 |
framework_VScrollTable_rowKeyIsSelected | /**
* Checks if the row represented by the row key has been selected
*
* @param key
* The generated row key
*/
private boolean rowKeyIsSelected(int rowKey) {
// Check single selections
if (selectedRowKeys.contains("" + rowKey)) {
return true;
}
// Check range selections
for (SelectionRange r : selectedRowRanges) {
if (r.inRange(getRenderedRowByKey("" + rowKey))) {
return true;
}
}
return false;
} | 3.68 |
hudi_SimpleBloomFilter_readFields | //@Override
public void readFields(DataInput in) throws IOException {
filter = new InternalBloomFilter();
filter.readFields(in);
} | 3.68 |
flink_EnvironmentInformation_getGitCommitTime | /** @return The Instant of the last commit of this code. */
public static Instant getGitCommitTime() {
return getVersionsInstance().gitCommitTime;
} | 3.68 |
hbase_StoreFileWriter_appendTrackedTimestampsToMetadata | /**
* Add TimestampRange and earliest put timestamp to Metadata
*/
public void appendTrackedTimestampsToMetadata() throws IOException {
// TODO: The StoreFileReader always converts the byte[] to TimeRange
// via TimeRangeTracker, so we should write the serialization data of TimeRange directly.
appendFileInfo(TIMERANGE_KEY, TimeRangeTracker.toByteArray(timeRangeTracker));
appendFileInfo(EARLIEST_PUT_TS, Bytes.toBytes(earliestPutTs));
} | 3.68 |
framework_AbstractListing_writeItems | /**
* Writes the data source items to a design. Hierarchical select components
* should override this method to only write the root items.
*
* @param design
* the element into which to insert the items
* @param context
* the DesignContext instance used in writing
*/
protected void writeItems(Element design, DesignContext context) {
internalGetDataProvider().fetch(new Query<>())
.forEach(item -> writeItem(design, item, context));
} | 3.68 |
flink_SegmentsUtil_hashByWords | /**
* hash segments to int, numBytes must be aligned to 4 bytes.
*
* @param segments Source segments.
* @param offset Source segments offset.
* @param numBytes the number bytes to hash.
*/
public static int hashByWords(MemorySegment[] segments, int offset, int numBytes) {
if (inFirstSegment(segments, offset, numBytes)) {
return MurmurHashUtil.hashBytesByWords(segments[0], offset, numBytes);
} else {
return hashMultiSegByWords(segments, offset, numBytes);
}
} | 3.68 |
hbase_CacheConfig_setCacheDataOnWrite | /**
* @param cacheDataOnWrite whether data blocks should be written to the cache when an HFile is
* written
*/
public void setCacheDataOnWrite(boolean cacheDataOnWrite) {
this.cacheDataOnWrite = cacheDataOnWrite;
} | 3.68 |
morf_ChangeIndex_applyChange | /**
* Changes an index from the start point to the end point.
*
* @param schema {@link Schema} to apply the change against resulting in new
* metadata.
* @param indexStartPoint the start definition for the index
* @param indexEndPoint the end definition for the index
* @return MetaData with {@link SchemaChange} applied.
*/
private Schema applyChange(Schema schema, Index indexStartPoint, Index indexEndPoint) {
// Check the state
if (indexStartPoint == null) {
throw new IllegalStateException("Cannot change a null index to have a new definition");
}
if (indexEndPoint == null) {
throw new IllegalStateException(String.format("Cannot change index [%s] to be null", indexStartPoint.getName()));
}
// Now setup the new table definition
Table original = schema.getTable(tableName);
boolean foundMatch = false;
// Copy the index names into a list of strings for column sort order
List<String> indexes = new ArrayList<>();
for (Index index : original.indexes()) {
String currentIndexName = index.getName();
// If we're looking at the index being changed...
if (currentIndexName.equalsIgnoreCase(indexStartPoint.getName())) {
// Substitute in the new index
currentIndexName = indexEndPoint.getName();
foundMatch = true;
}
for (String existing : indexes) {
if (existing.equalsIgnoreCase(currentIndexName)) {
throw new IllegalArgumentException(String.format("Cannot change index name from [%s] to [%s] on table [%s] as index with that name already exists", indexStartPoint.getName(), indexEndPoint.getName(), tableName));
}
}
indexes.add(currentIndexName);
}
if (!foundMatch) {
throw new IllegalArgumentException(String.format("Cannot change index [%s] as it does not exist on table [%s]", indexStartPoint.getName(), tableName));
}
return new TableOverrideSchema(schema, new AlteredTable(original, null, null, indexes, Arrays.asList(new Index[] {indexEndPoint})));
} | 3.68 |
hbase_RequestConverter_buildGetOnlineRegionRequest | /**
* Create a protocol buffer GetOnlineRegionRequest
* @return a protocol buffer GetOnlineRegionRequest
*/
public static GetOnlineRegionRequest buildGetOnlineRegionRequest() {
return GetOnlineRegionRequest.newBuilder().build();
} | 3.68 |
morf_AbstractSetOperator_validateFields | /**
* Don't allow {@code childSelect} have a different number of fields from
* {@code parentSelect}.
* <p>
* The column names from the parent select statement are used as the column
* names for the results returned. Selected columns listed in corresponding
* positions of each SELECT statement should have the same data type.
* </p>
*
* @param parentSelect the select statement to be compared against.
* @param childSelect the select statement to be validated.
*/
void validateFields(SelectStatement parentSelect, SelectStatement childSelect) throws IllegalArgumentException {
if (parentSelect.getFields().size() != childSelect.getFields().size()) {
throw new IllegalArgumentException("A set operator requires selecting the same number of fields on both select statements");
}
} | 3.68 |
hudi_HoodieMetadataFileSystemView_listPartition | /**
* Return all the files in the partition by reading from the Metadata Table.
*
* @param partitionPath The absolute path of the partition
* @throws IOException
*/
@Override
protected FileStatus[] listPartition(Path partitionPath) throws IOException {
return tableMetadata.getAllFilesInPartition(partitionPath);
} | 3.68 |
framework_HorizontalLayout_addComponentsAndExpand | /**
* Adds the given components to this layout and sets them as expanded. The
* width of all added child components are set to 100% so that the expansion
* will be effective. The width of this layout is also set to 100% if it is
* currently undefined.
* <p>
* The components are added in the provided order to the end of this layout.
* Any components that are already children of this layout will be moved to
* new positions.
*
* @param components
* the components to set, not <code>null</code>
* @since 8.0
*/
public void addComponentsAndExpand(Component... components) {
addComponents(components);
if (getWidth() < 0) {
setWidth(100, Unit.PERCENTAGE);
}
for (Component child : components) {
child.setWidth(100, Unit.PERCENTAGE);
setExpandRatio(child, 1);
}
} | 3.68 |
flink_RecordWriter_notifyFlusherException | /**
* Notifies the writer that the output flusher thread encountered an exception.
*
* @param t The exception to report.
*/
private void notifyFlusherException(Throwable t) {
if (flusherException == null) {
LOG.error("An exception happened while flushing the outputs", t);
flusherException = t;
volatileFlusherException = t;
}
} | 3.68 |
dubbo_ServiceInstanceMetadataUtils_setMetadataStorageType | /**
* Set the metadata storage type in specified {@link ServiceInstance service instance}
*
* @param serviceInstance {@link ServiceInstance service instance}
* @param metadataType remote or local
*/
public static void setMetadataStorageType(ServiceInstance serviceInstance, String metadataType) {
Map<String, String> metadata = serviceInstance.getMetadata();
metadata.put(METADATA_STORAGE_TYPE_PROPERTY_NAME, metadataType);
} | 3.68 |
framework_ClickableRenderer_findCell | /**
* Returns the cell the given element belongs to.
*
* @param grid
* the grid instance that is queried
* @param e
* a cell element or the descendant of one
* @return the cell or null if the element is not a grid cell or a
* descendant of one
*/
private static <T> CellReference<T> findCell(Grid<T> grid, Element e) {
RowContainer container = getEscalator(grid).findRowContainer(e);
if (container == null) {
return null;
}
Cell cell = container.getCell(e);
EventCellReference<T> cellReference = new EventCellReference<T>(
grid);
// FIXME: Section is currently always body. Might be useful for the
// future to have an actual check.
cellReference.set(cell, Section.BODY);
return cellReference;
} | 3.68 |
hadoop_EvaluatingStatisticsMap_addFunction | /**
* add a mapping of a key to a function.
* @param key the key
* @param eval the evaluator
*/
void addFunction(String key, Function<String, E> eval) {
evaluators.put(key, eval);
} | 3.68 |
querydsl_MathExpressions_ln | /**
* Create a {@code ln(num)} expression
*
* <p>Returns the natural logarithm of num.</p>
*
* @param num numeric expression
* @return ln(num)
*/
public static <A extends Number & Comparable<?>> NumberExpression<Double> ln(Expression<A> num) {
return Expressions.numberOperation(Double.class, Ops.MathOps.LN, num);
} | 3.68 |
flink_Pattern_times | /**
* Specifies that the pattern can occur between from and to times with time interval corresponds
* to the maximum time gap between previous and current event for each times.
*
* @param from number of times matching event must appear at least
* @param to number of times matching event must appear at most
* @param windowTime time of the matching window between times
* @return The same pattern with the number of times range applied
* @throws MalformedPatternException if the quantifier is not applicable to this pattern.
*/
public Pattern<T, F> times(int from, int to, @Nullable Time windowTime) {
checkIfNoNotPattern();
checkIfQuantifierApplied();
this.quantifier = Quantifier.times(quantifier.getConsumingStrategy());
if (from == 0) {
this.quantifier.optional();
from = 1;
}
this.times = Times.of(from, to, windowTime);
return this;
} | 3.68 |
hadoop_AbfsConfiguration_getMandatoryPasswordString | /**
* Returns a value for the key if the value exists and is not null.
* Otherwise, throws {@link ConfigurationPropertyNotFoundException} with
* key name.
*
* @param key Account-agnostic configuration key
* @return value if exists
* @throws IOException if error in fetching password or
* ConfigurationPropertyNotFoundException for missing key
*/
private String getMandatoryPasswordString(String key) throws IOException {
String value = getPasswordString(key);
if (value == null) {
throw new ConfigurationPropertyNotFoundException(key);
}
return value;
} | 3.68 |
hbase_FilterWrapper_areSerializedFieldsEqual | /**
* @param o the other filter to compare with
* @return true if and only if the fields of the filter that are serialized are equal to the
* corresponding fields in other. Used for testing.
*/
@Override
boolean areSerializedFieldsEqual(Filter o) {
if (o == this) return true;
if (!(o instanceof FilterWrapper)) return false;
FilterWrapper other = (FilterWrapper) o;
return this.filter.areSerializedFieldsEqual(other.filter);
} | 3.68 |
flink_LinkedOptionalMap_optionalMapOf | /**
* Creates an {@code LinkedOptionalMap} from the provided map.
*
* <p>This method is the equivalent of {@link Optional#of(Object)} but for maps. To support more
* than one {@code NULL} key, an optional map requires a unique string name to be associated
* with each key (provided by keyNameGetter)
*
* @param sourceMap a source map to wrap as an optional map.
* @param keyNameGetter function that assigns a unique name to the keys of the source map.
* @param <K> key type
* @param <V> value type
* @return an {@code LinkedOptionalMap} with optional named keys, and optional values.
*/
public static <K, V> LinkedOptionalMap<K, V> optionalMapOf(
Map<K, V> sourceMap, Function<K, String> keyNameGetter) {
LinkedHashMap<String, KeyValue<K, V>> underlyingMap =
CollectionUtil.newLinkedHashMapWithExpectedSize(sourceMap.size());
sourceMap.forEach(
(k, v) -> {
String keyName = keyNameGetter.apply(k);
underlyingMap.put(keyName, new KeyValue<>(k, v));
});
return new LinkedOptionalMap<>(underlyingMap);
} | 3.68 |
hbase_MunkresAssignment_primeInRow | /**
* Find a primed zero in the specified row. If there are no primed zeroes in the specified row,
* then null will be returned.
* @param r the index of the row to be searched
* @return pair of row and column indices of primed zero or null
*/
private Pair<Integer, Integer> primeInRow(int r) {
for (int c = 0; c < cols; c++) {
if (mask[r][c] == PRIME) {
return new Pair<>(r, c);
}
}
return null;
} | 3.68 |
hbase_MapReduceHFileSplitterJob_createSubmittableJob | /**
* Sets up the actual job.
* @param args The command line parameters.
* @return The newly created job.
* @throws IOException When setting up the job fails.
*/
public Job createSubmittableJob(String[] args) throws IOException {
Configuration conf = getConf();
String inputDirs = args[0];
String tabName = args[1];
conf.setStrings(TABLES_KEY, tabName);
conf.set(FileInputFormat.INPUT_DIR, inputDirs);
Job job = Job.getInstance(conf,
conf.get(JOB_NAME_CONF_KEY, NAME + "_" + EnvironmentEdgeManager.currentTime()));
job.setJarByClass(MapReduceHFileSplitterJob.class);
job.setInputFormatClass(HFileInputFormat.class);
job.setMapOutputKeyClass(ImmutableBytesWritable.class);
String hfileOutPath = conf.get(BULK_OUTPUT_CONF_KEY);
if (hfileOutPath != null) {
LOG.debug("add incremental job :" + hfileOutPath + " from " + inputDirs);
TableName tableName = TableName.valueOf(tabName);
job.setMapperClass(HFileCellMapper.class);
job.setReducerClass(CellSortReducer.class);
Path outputDir = new Path(hfileOutPath);
FileOutputFormat.setOutputPath(job, outputDir);
job.setMapOutputValueClass(MapReduceExtendedCell.class);
try (Connection conn = ConnectionFactory.createConnection(conf);
Table table = conn.getTable(tableName);
RegionLocator regionLocator = conn.getRegionLocator(tableName)) {
HFileOutputFormat2.configureIncrementalLoad(job, table.getDescriptor(), regionLocator);
}
LOG.debug("success configuring load incremental job");
TableMapReduceUtil.addDependencyJars(job.getConfiguration(),
org.apache.hbase.thirdparty.com.google.common.base.Preconditions.class);
} else {
throw new IOException("No bulk output directory specified");
}
return job;
} | 3.68 |
framework_FieldGroup_findPropertyId | /**
* Searches for a property id from the current itemDataSource that matches
* the given memberField.
* <p>
* If perfect match is not found, uses a case insensitive search that also
* ignores underscores. Returns null if no match is found. Throws a
* SearchException if no item data source has been set.
* </p>
* <p>
* The propertyId search logic used by
* {@link #buildAndBindMemberFields(Object, boolean)
* buildAndBindMemberFields} can easily be customized by overriding this
* method. No other changes are needed.
* </p>
*
* @param memberField
* The field an object id is searched for
* @return
*/
protected Object findPropertyId(java.lang.reflect.Field memberField) {
String fieldName = memberField.getName();
if (getItemDataSource() == null) {
throw new SearchException("Property id type for field '" + fieldName
+ "' could not be determined. No item data source has been set.");
}
Item dataSource = getItemDataSource();
if (dataSource.getItemProperty(fieldName) != null) {
return fieldName;
} else {
String minifiedFieldName = minifyFieldName(fieldName);
for (Object itemPropertyId : dataSource.getItemPropertyIds()) {
if (itemPropertyId instanceof String) {
String itemPropertyName = (String) itemPropertyId;
if (minifiedFieldName
.equals(minifyFieldName(itemPropertyName))) {
return itemPropertyName;
}
}
}
}
return null;
} | 3.68 |
hbase_ConnectionCache_getAdmin | /**
* Caller doesn't close the admin afterwards. We need to manage it and close it properly.
*/
public Admin getAdmin() throws IOException {
ConnectionInfo connInfo = getCurrentConnection();
if (connInfo.admin == null) {
Lock lock = locker.acquireLock(getEffectiveUser());
try {
if (connInfo.admin == null) {
connInfo.admin = connInfo.connection.getAdmin();
}
} finally {
lock.unlock();
}
}
return connInfo.admin;
} | 3.68 |
flink_DependencyParser_getDepth | /**
* The depths returned by this method do NOT return a continuous sequence.
*
* <pre>
* +- org.apache.flink:...
* | +- org.apache.flink:...
* | | \- org.apache.flink:...
* ...
* </pre>
*/
private static int getDepth(String line) {
final int level = line.indexOf('+');
if (level != -1) {
return level;
}
return line.indexOf('\\');
} | 3.68 |
hudi_AbstractTableFileSystemView_addFilesToView | /**
* Adds the provided statuses into the file system view, and also caches it inside this object.
*/
public List<HoodieFileGroup> addFilesToView(FileStatus[] statuses) {
HoodieTimer timer = HoodieTimer.start();
List<HoodieFileGroup> fileGroups = buildFileGroups(statuses, visibleCommitsAndCompactionTimeline, true);
long fgBuildTimeTakenMs = timer.endTimer();
timer.startTimer();
// Group by partition for efficient updates for both InMemory and DiskBased structures.
fileGroups.stream().collect(Collectors.groupingBy(HoodieFileGroup::getPartitionPath)).forEach((partition, value) -> {
if (!isPartitionAvailableInStore(partition)) {
if (bootstrapIndex.useIndex()) {
try (BootstrapIndex.IndexReader reader = bootstrapIndex.createReader()) {
LOG.info("Bootstrap Index available for partition " + partition);
List<BootstrapFileMapping> sourceFileMappings =
reader.getSourceFileMappingForPartition(partition);
addBootstrapBaseFileMapping(sourceFileMappings.stream()
.map(s -> new BootstrapBaseFileMapping(new HoodieFileGroupId(s.getPartitionPath(),
s.getFileId()), s.getBootstrapFileStatus())));
}
}
storePartitionView(partition, value);
}
});
long storePartitionsTs = timer.endTimer();
LOG.debug("addFilesToView: NumFiles=" + statuses.length + ", NumFileGroups=" + fileGroups.size()
+ ", FileGroupsCreationTime=" + fgBuildTimeTakenMs
+ ", StoreTimeTaken=" + storePartitionsTs);
return fileGroups;
} | 3.68 |
hudi_BaseTableMetadata_fetchAllFilesInPartition | /**
* Return all the files from the partition.
*
* @param partitionPath The absolute path of the partition
*/
FileStatus[] fetchAllFilesInPartition(Path partitionPath) throws IOException {
String relativePartitionPath = FSUtils.getRelativePartitionPath(dataBasePath.get(), partitionPath);
String recordKey = relativePartitionPath.isEmpty() ? NON_PARTITIONED_NAME : relativePartitionPath;
HoodieTimer timer = HoodieTimer.start();
Option<HoodieRecord<HoodieMetadataPayload>> recordOpt = getRecordByKey(recordKey,
MetadataPartitionType.FILES.getPartitionPath());
metrics.ifPresent(m -> m.updateMetrics(HoodieMetadataMetrics.LOOKUP_FILES_STR, timer.endTimer()));
FileStatus[] statuses = recordOpt.map(record -> {
HoodieMetadataPayload metadataPayload = record.getData();
checkForSpuriousDeletes(metadataPayload, recordKey);
try {
return metadataPayload.getFileStatuses(getHadoopConf(), partitionPath);
} catch (IOException e) {
throw new HoodieIOException("Failed to extract file-statuses from the payload", e);
}
})
.orElse(new FileStatus[0]);
LOG.info("Listed file in partition from metadata: partition=" + relativePartitionPath + ", #files=" + statuses.length);
return statuses;
} | 3.68 |
hadoop_ShortWritable_compareTo | /** Compares two ShortWritable. */
@Override
public int compareTo(ShortWritable o) {
short thisValue = this.value;
short thatValue = (o).value;
return (thisValue < thatValue ? -1 : (thisValue == thatValue ? 0 : 1));
} | 3.68 |
hadoop_OBSFileSystem_getSse | /**
* Return server-side encryption wrapper used by this filesystem instance.
*
* @return the server-side encryption wrapper
*/
SseWrapper getSse() {
return sse;
} | 3.68 |
hudi_HoodieFlinkCopyOnWriteTable_deletePrepped | /**
* Delete the given prepared records from the Hoodie table, at the supplied instantTime.
*
* <p>This implementation requires that the input records are already tagged, and de-duped if needed.
*
* <p>Specifies the write handle explicitly in order to have fine-grained control with
* the underneath file.
*
* @param context {@link HoodieEngineContext}
* @param instantTime Instant Time for the action
* @param preppedRecords Hoodie records to delete
* @return {@link HoodieWriteMetadata}
*/
public HoodieWriteMetadata<List<WriteStatus>> deletePrepped(
HoodieEngineContext context,
HoodieWriteHandle<?, ?, ?, ?> writeHandle,
String instantTime,
List<HoodieRecord<T>> preppedRecords) {
return new FlinkDeletePreppedCommitActionExecutor<>(context, writeHandle, config, this, instantTime, preppedRecords).execute();
} | 3.68 |
hibernate-validator_ExecutableHelper_getExecutableAsString | /**
* Returns a string representation of an executable with the given name and parameter types in the form
* {@code <name>(<parameterType 0> ... <parameterType n>)}, e.g. for logging purposes.
*
* @param name the name of the executable
* @param parameterTypes the types of the executable's parameters
*
* @return A string representation of the given executable.
*/
public static String getExecutableAsString(String name, Class<?>... parameterTypes) {
StringBuilder signature = new StringBuilder( name.length() + 2 + parameterTypes.length * 25 );
signature.append( name ).append( '(' );
boolean separator = false;
for ( Class<?> parameterType : parameterTypes ) {
if ( separator ) {
signature.append( ", " );
}
else {
separator = true;
}
signature.append( parameterType.getSimpleName() );
}
signature.append( ')' );
return signature.toString();
} | 3.68 |
graphhopper_LandmarkStorage_setWeight | /**
* @return false if the value capacity was reached and instead of the real value the SHORT_MAX was stored.
*/
final boolean setWeight(long pointer, double value) {
double tmpVal = value / factor;
if (tmpVal > Integer.MAX_VALUE)
throw new UnsupportedOperationException("Cannot store infinity explicitly, pointer=" + pointer + ", value=" + value + ", factor=" + factor);
if (tmpVal >= SHORT_MAX) {
landmarkWeightDA.setShort(pointer, (short) SHORT_MAX);
return false;
} else {
landmarkWeightDA.setShort(pointer, (short) tmpVal);
return true;
}
} | 3.68 |
morf_AbstractSqlDialectTest_testHints | /**
* Check that the optimiser hints work.
*/
@SuppressWarnings("deprecation")
@Test
public void testHints() {
assertEquals(
expectedHints1(1000),
testDialect.convertStatementToSQL(
select()
.from(new TableReference("SCHEMA2", "Foo"))
.innerJoin(new TableReference("Bar"), field("a").eq(field("b")))
.leftOuterJoin(new TableReference("Fo"), field("a").eq(field("b")))
.innerJoin(new TableReference("Fum").as("Fumble"), field("a").eq(field("b")))
.orderBy(field("a"))
.useImplicitJoinOrder()
.optimiseForRowCount(1000)
.useIndex(new TableReference("SCHEMA2", "Foo"), "Foo_1")
.useIndex(new TableReference("SCHEMA2", "Foo").as("aliased"), "Foo_2")
)
);
assertEquals(
expectedHints2(1000),
testDialect.convertStatementToSQL(
select(field("a"), field("b"))
.from(tableRef("Foo"))
.orderBy(field("a"))
.forUpdate()
.useIndex(tableRef("Foo"), "Foo_1")
.optimiseForRowCount(1000)
.useImplicitJoinOrder()
.withParallelQueryPlan()
.allowParallelDml()
.withCustomHint(mock(CustomHint.class))
)
);
assertEquals(
expectedHints3(),
testDialect.convertStatementToSQL(
update(tableRef("Foo"))
.set(field("b").as("a"))
.useParallelDml()
)
);
assertEquals(
expectedHints3a(),
testDialect.convertStatementToSQL(
update(tableRef("Foo"))
.set(field("b").as("a"))
.useParallelDml(5)
)
);
assertEquals(
Lists.newArrayList(expectedHints4()),
testDialect.convertStatementToSQL(
insert()
.into(tableRef("Foo"))
.from(select(field("a"), field("b")).from(tableRef("Foo_1")))
.useDirectPath()
)
);
assertEquals(
Lists.newArrayList(expectedHints4a()),
testDialect.convertStatementToSQL(
insert()
.into(tableRef("Foo"))
.from(select(field("a"), field("b")).from(tableRef("Foo_1")))
.avoidDirectPath()
)
);
assertEquals(
Lists.newArrayList(expectedHints4b()),
testDialect.convertStatementToSQL(
insert()
.into(tableRef("Foo"))
.from(select(field("a"), field("b")).from(tableRef("Foo_1")))
.useParallelDml()
)
);
assertEquals(
Lists.newArrayList(expectedHints4c()),
testDialect.convertStatementToSQL(
insert()
.into(tableRef("Foo"))
.from(select(field("a"), field("b")).from(tableRef("Foo_1")))
.useParallelDml(5)
)
);
assertEquals(
Lists.newArrayList(expectedHints5()),
testDialect.convertStatementToSQL(
insert()
.into(tableRef("Foo"))
.from(select(field("a"), field("b")).from(tableRef("Foo_1")))
)
);
assertEquals(
expectedHints6(),
testDialect.convertStatementToSQL(
select(field("a"), field("b"))
.from(tableRef("Foo"))
.orderBy(field("a"))
.withParallelQueryPlan(5)
)
);
assertEquals(
expectedHints6a(),
testDialect.convertStatementToSQL(
select(field("a"), field("b"))
.from(tableRef("Foo"))
.orderBy(field("a"))
.withParallelQueryPlan(5)
.allowParallelDml()
)
);
assertEquals(
expectedHints7(),
testDialect.convertStatementToSQL(
select()
.from(new TableReference("SCHEMA2", "Foo"))
.withCustomHint(provideCustomHint())
)
);
assertEquals(
expectedHints8(),
testDialect.convertStatementToSQL(
select()
.from(new TableReference("SCHEMA2", "Foo"))
.withCustomHint(() -> "CustomHint")
)
);
assertEquals(
expectedHints8a(),
testDialect.convertStatementToSQL(
select()
.from(new TableReference("SCHEMA2", "Foo"))
.withDialectSpecificHint(provideDatabaseType(), "index(customer cust_primary_key_idx)")
.withDialectSpecificHint("SOMETHING_ELSE", "unused_hint()")
)
);
} | 3.68 |
flink_FlinkKubeClientFactory_fromConfiguration | /**
* Create a Flink Kubernetes client with the given configuration.
*
* @param flinkConfig Flink configuration
* @param useCase Flink Kubernetes client use case (e.g. client, resourcemanager,
* kubernetes-ha-services)
* @return Return the Flink Kubernetes client with the specified configuration and dedicated IO
* executor.
*/
public FlinkKubeClient fromConfiguration(Configuration flinkConfig, String useCase) {
final Config config;
final String kubeContext = flinkConfig.getString(KubernetesConfigOptions.CONTEXT);
if (kubeContext != null) {
LOG.info("Configuring kubernetes client to use context {}.", kubeContext);
}
final String kubeConfigFile =
flinkConfig.getString(KubernetesConfigOptions.KUBE_CONFIG_FILE);
if (kubeConfigFile != null) {
LOG.debug("Trying to load kubernetes config from file: {}.", kubeConfigFile);
try {
// If kubeContext is null, the default context in the kubeConfigFile will be used.
// Note: the third parameter kubeconfigPath is optional and is set to null. It is
// only used to rewrite
// relative tls asset paths inside kubeconfig when a file is passed, and in the case
// that the kubeconfig
// references some assets via relative paths.
config =
Config.fromKubeconfig(
kubeContext,
FileUtils.readFileUtf8(new File(kubeConfigFile)),
null);
} catch (IOException e) {
throw new KubernetesClientException("Load kubernetes config failed.", e);
}
} else {
LOG.debug("Trying to load default kubernetes config.");
config = Config.autoConfigure(kubeContext);
}
final String namespace = flinkConfig.getString(KubernetesConfigOptions.NAMESPACE);
final String userAgent =
flinkConfig.getString(KubernetesConfigOptions.KUBERNETES_CLIENT_USER_AGENT);
config.setNamespace(namespace);
config.setUserAgent(userAgent);
LOG.debug("Setting Kubernetes client namespace: {}, userAgent: {}", namespace, userAgent);
final NamespacedKubernetesClient client =
new KubernetesClientBuilder()
.withConfig(config)
.build()
.adapt(NamespacedKubernetesClient.class);
final int poolSize =
flinkConfig.get(KubernetesConfigOptions.KUBERNETES_CLIENT_IO_EXECUTOR_POOL_SIZE);
return new Fabric8FlinkKubeClient(
flinkConfig, client, createThreadPoolForAsyncIO(poolSize, useCase));
} | 3.68 |
AreaShop_BukkitHandler1_12_getSignFacing | // Uses Sign, which is deprecated in 1.13+, broken in 1.14+
@Override
public BlockFace getSignFacing(Block block) {
if (block == null) {
return null;
}
BlockState blockState = block.getState();
if (blockState == null) {
return null;
}
MaterialData materialData = blockState.getData();
if(materialData instanceof org.bukkit.material.Sign) {
return ((org.bukkit.material.Sign)materialData).getFacing();
}
return null;
} | 3.68 |
AreaShop_AreaShop_setDebug | /**
* Set if the plugin should output debug messages (loaded from config normally).
* @param debug Indicates if the plugin should output debug messages or not
*/
public void setDebug(boolean debug) {
this.debug = debug;
} | 3.68 |
framework_VaadinService_handleSessionExpired | /**
* Called when the session has expired and the request handling is therefore
* aborted.
*
* @param request
* The request
* @param response
* The response
* @throws ServiceException
* Thrown if there was any problem handling the expiration of
* the session
*/
protected void handleSessionExpired(VaadinRequest request,
VaadinResponse response) throws ServiceException {
for (RequestHandler handler : getRequestHandlers()) {
if (handler instanceof SessionExpiredHandler) {
try {
if (((SessionExpiredHandler) handler)
.handleSessionExpired(request, response)) {
return;
}
} catch (IOException e) {
throw new ServiceException(
"Handling of session expired failed", e);
}
}
}
// No request handlers handled the request. Write a normal HTTP response
try {
// If there is a URL, try to redirect there
SystemMessages systemMessages = getSystemMessages(
ServletPortletHelper.findLocale(null, null, request),
request);
String sessionExpiredURL = systemMessages.getSessionExpiredURL();
if (sessionExpiredURL != null
&& (response instanceof VaadinServletResponse)) {
((VaadinServletResponse) response)
.sendRedirect(sessionExpiredURL);
} else {
/*
* Session expired as a result of a standard http request and we
* have nowhere to redirect. Reloading would likely cause an
* endless loop. This can at least happen if refreshing a
* resource when the session has expired.
*/
// Ensure that the browser does not cache expired responses.
// iOS 6 Safari requires this (#3226)
response.setHeader("Cache-Control", "no-cache");
// If Content-Type is not set, browsers assume text/html and may
// complain about the empty response body (#4167)
response.setHeader("Content-Type", "text/plain");
response.sendError(HttpServletResponse.SC_FORBIDDEN,
"Session expired");
}
} catch (IOException e) {
throw new ServiceException(e);
}
} | 3.68 |
hbase_ReplicationThrottler_isEnabled | /**
* If throttling is enabled
* @return true if throttling is enabled
*/
public boolean isEnabled() {
return this.enabled;
} | 3.68 |
hadoop_AbstractS3ACommitter_setOutputPath | /**
* Set the output path.
* @param outputPath new value
*/
protected final void setOutputPath(Path outputPath) {
this.outputPath = requireNonNull(outputPath, "Null output path");
} | 3.68 |
framework_Button_setIcon | /**
* Sets the component's icon and alt text.
* <p>
* An alt text is shown when an image could not be loaded, and read by
* assistive devices.
*
* @param icon
* the icon to be shown with the component's caption.
* @param iconAltText
* String to use as alt text
*/
public void setIcon(Resource icon, String iconAltText) {
super.setIcon(icon);
getState().iconAltText = iconAltText == null ? "" : iconAltText;
} | 3.68 |
framework_Criterion_setValueType | /**
* Sets the type of the payload value to be compared.
*
* @param valueType
* type of the payload to be compared
*/
public void setValueType(Payload.ValueType valueType) {
this.valueType = valueType;
} | 3.68 |
hbase_HashTable_newReader | /**
* Open a TableHash.Reader starting at the first hash at or after the given key.
*/
public Reader newReader(Configuration conf, ImmutableBytesWritable startKey)
throws IOException {
return new Reader(conf, startKey);
} | 3.68 |
hbase_RequestConverter_buildIsBalancerEnabledRequest | /**
* Creates a protocol buffer IsBalancerEnabledRequest
* @return a IsBalancerEnabledRequest
*/
public static IsBalancerEnabledRequest buildIsBalancerEnabledRequest() {
return IsBalancerEnabledRequest.newBuilder().build();
} | 3.68 |
Subsets and Splits
No saved queries yet
Save your SQL queries to embed, download, and access them later. Queries will appear here once saved.