name
stringlengths 12
178
| code_snippet
stringlengths 8
36.5k
| score
float64 3.26
3.68
|
---|---|---|
hudi_CompactionUtil_rollbackCompaction | /**
* Force rolls back all the inflight compaction instants, especially for job failover restart.
*
* @param table The hoodie table
*/
public static void rollbackCompaction(HoodieFlinkTable<?> table) {
HoodieTimeline inflightCompactionTimeline = table.getActiveTimeline()
.filterPendingCompactionTimeline()
.filter(instant ->
instant.getState() == HoodieInstant.State.INFLIGHT);
inflightCompactionTimeline.getInstants().forEach(inflightInstant -> {
LOG.info("Rollback the inflight compaction instant: " + inflightInstant + " for failover");
table.rollbackInflightCompaction(inflightInstant);
table.getMetaClient().reloadActiveTimeline();
});
} | 3.68 |
flink_PythonShellParser_constructYarnOption | /**
* Constructs yarn options. The python shell option will add prefix 'y' to align yarn options in
* `flink run`.
*
* @param options Options that will be used in `flink run`.
* @param yarnOption Python shell yarn options.
* @param commandLine Parsed Python shell parser options.
*/
private static void constructYarnOption(
List<String> options, Option yarnOption, CommandLine commandLine) {
if (commandLine.hasOption(yarnOption.getOpt())) {
options.add("-y" + yarnOption.getOpt());
options.add(commandLine.getOptionValue(yarnOption.getOpt()));
}
} | 3.68 |
morf_SqlDialect_appendHaving | /**
* appends having clause to the result
*
* @param result having clause will be appended here
* @param stmt statement with having clause
*/
protected void appendHaving(StringBuilder result, SelectStatement stmt) {
if (stmt.getHaving() != null) {
result.append(" HAVING ");
result.append(getSqlFrom(stmt.getHaving()));
}
} | 3.68 |
hadoop_AbfsPermission_getAclBit | /**
* Returns true if there is also an ACL (access control list).
*
* @return boolean true if there is also an ACL (access control list).
* @deprecated Get acl bit from the {@link org.apache.hadoop.fs.FileStatus}
* object.
*/
public boolean getAclBit() {
return aclBit;
} | 3.68 |
morf_AbstractSqlDialectTest_testDeleteWithLimitAndComplexWhereCriterion | /**
* Tests that a delete string with a limit and a complex where criterion (involving an 'OR') is created correctly (i.e. brackets around the 'OR' are preserved).
*/
@Test
public void testDeleteWithLimitAndComplexWhereCriterion() {
DeleteStatement stmt = DeleteStatement
.delete(new TableReference(TEST_TABLE))
.where(or(eq(new FieldReference(new TableReference(TEST_TABLE), STRING_FIELD), "A001003657"),
eq(new FieldReference(new TableReference(TEST_TABLE), STRING_FIELD), "A001003658")))
.limit(1000)
.build();
String value1 = varCharCast("'A001003657'");
String value2 = varCharCast("'A001003658'");
assertEquals("Delete with 'OR' where clause and limit - NB do not alter brackets incautiously", expectedDeleteWithLimitAndComplexWhere(value1, value2), testDialect.convertStatementToSQL(stmt));
} | 3.68 |
hadoop_IdentifierResolver_getOutputKeyClass | /**
* Returns the resolved output key class.
*/
public Class getOutputKeyClass() {
return outputKeyClass;
} | 3.68 |
flink_TimerGauge_getMaxSingleMeasurement | /**
* @return the longest marked period as measured by the given * TimerGauge. For example the
* longest consecutive back pressured period.
*/
public synchronized long getMaxSingleMeasurement() {
return previousMaxSingleMeasurement;
} | 3.68 |
morf_DeleteStatementBuilder_limit | /**
* Specifies the limit for the delete statement.
*
* <blockquote><pre>DeleteStatement.delete([table])
* .where([criteria])
* .limit(1000)
* .build();</pre></blockquote>
*
* @param limit the limit on the number of deleted records.
* @return this, for method chaining.
*/
public DeleteStatementBuilder limit(int limit) {
this.limit = Optional.of(limit);
return this;
} | 3.68 |
framework_ListenerMethod_isType | /**
* Compares the type of this ListenerMethod to the given type.
*
* @param eventType
* The type to compare with
* @return true if this type of this ListenerMethod matches the given type,
* false otherwise
*/
public boolean isType(Class<?> eventType) {
return this.eventType == eventType;
} | 3.68 |
framework_AbstractDateField_afterDate | /**
* Checks if {@code value} is after {@code base} or not.
*
* @param value
* temporal value
* @param base
* temporal value to compare to
* @return {@code true} if {@code value} is after {@code base},
* {@code false} otherwise
*/
protected boolean afterDate(T value, T base) {
if (value == null || base == null) {
return false;
}
return value.compareTo(base) > 0;
} | 3.68 |
hbase_SegmentFactory_createCompositeImmutableSegment | // create composite immutable segment from a list of segments
// for snapshot consisting of multiple segments
public CompositeImmutableSegment createCompositeImmutableSegment(final CellComparator comparator,
List<ImmutableSegment> segments) {
return new CompositeImmutableSegment(comparator, segments);
} | 3.68 |
flink_Execution_getTerminalStateFuture | /**
* Gets a future that completes once the task execution reaches a terminal state. The future
* will be completed with specific state that the execution reached. This future is always
* completed from the job master's main thread.
*
* @return A future which is completed once the execution reaches a terminal state
*/
@Override
public CompletableFuture<ExecutionState> getTerminalStateFuture() {
return terminalStateFuture;
} | 3.68 |
hbase_StorageClusterStatusModel_addDeadNode | /**
* Add a dead node to the cluster representation.
* @param node the dead region server's name
*/
public void addDeadNode(String node) {
deadNodes.add(node);
} | 3.68 |
framework_AbstractRendererConnector_getRowKey | /**
* Gets the row key for a row object.
* <p>
* In case this renderer wants be able to identify a row in such a way that
* the server also understands it, the row key is used for that. Rows are
* identified by unified keys between the client and the server.
*
* @param row
* the row object
* @return the row key for the given row
*/
protected String getRowKey(JsonObject row) {
final ServerConnector parent = getParent();
if (parent instanceof GridConnector) {
return ((GridConnector) parent).getRowKey(row);
} else {
throw new IllegalStateException(
"Renderers can only be used " + "with a Grid.");
}
} | 3.68 |
framework_Window_setTabStopTopAssistiveText | /**
* Sets the message that is provided to users of assistive devices when the
* user reaches the top of the window when leaving a window with the tab key
* is prevented.
* <p>
* This message is not visible on the screen.
*
* @param topMessage
* String provided when the user navigates with Shift-Tab keys to
* the top of the window
*/
public void setTabStopTopAssistiveText(String topMessage) {
getState().assistiveTabStopTopText = topMessage;
} | 3.68 |
streampipes_ConnectWorkerDescriptionProvider_getAdapterDescriptions | /**
* Retrieves a list of all adapter descriptions that are currently registered.
* @return a list of {@link AdapterDescription} objects representing the registered adapters
*/
public List<AdapterDescription> getAdapterDescriptions() {
return getRegisteredAdapters()
.stream()
.map(adapter -> applyLocales(adapter.declareConfig().getAdapterDescription()))
.toList();
} | 3.68 |
framework_VTabsheetPanel_replaceComponent | /**
* Removes the old component and sets the new component to its place.
*
* @param oldComponent
* the component to remove
* @param newComponent
* the component to add to the old location
*/
public void replaceComponent(Widget oldComponent, Widget newComponent) {
boolean isVisible = (visibleWidget == oldComponent);
int widgetIndex = getWidgetIndex(oldComponent);
remove(oldComponent);
insert(newComponent, widgetIndex);
if (isVisible) {
showWidget(widgetIndex);
}
} | 3.68 |
hbase_LogRollBackupSubprocedure_releaseBarrier | /**
* Hooray!
*/
public void releaseBarrier() {
// NO OP
} | 3.68 |
flink_RocksDBIncrementalRestoreOperation_readMetaData | /** Reads Flink's state meta data file from the state handle. */
private KeyedBackendSerializationProxy<K> readMetaData(StreamStateHandle metaStateHandle)
throws Exception {
InputStream inputStream = null;
try {
inputStream = metaStateHandle.openInputStream();
cancelStreamRegistry.registerCloseable(inputStream);
DataInputView in = new DataInputViewStreamWrapper(inputStream);
return readMetaData(in);
} finally {
if (cancelStreamRegistry.unregisterCloseable(inputStream)) {
inputStream.close();
}
}
} | 3.68 |
hadoop_ManifestSuccessData_getFilenamePaths | /**
* Get the list of filenames as paths.
* @return the paths.
*/
@JsonIgnore
public List<Path> getFilenamePaths() {
return getFilenames().stream()
.map(AbstractManifestData::unmarshallPath)
.collect(Collectors.toList());
} | 3.68 |
hbase_BloomFilterFactory_createGeneralBloomAtWrite | /**
* Creates a new general (Row or RowCol) Bloom filter at the time of
* {@link org.apache.hadoop.hbase.regionserver.HStoreFile} writing.
* @param maxKeys an estimate of the number of keys we expect to insert. Irrelevant if compound
* Bloom filters are enabled.
* @param writer the HFile writer
* @return the new Bloom filter, or null in case Bloom filters are disabled or when failed to
* create one.
*/
public static BloomFilterWriter createGeneralBloomAtWrite(Configuration conf,
CacheConfig cacheConf, BloomType bloomType, int maxKeys, HFile.Writer writer) {
if (!isGeneralBloomEnabled(conf)) {
LOG.trace("Bloom filters are disabled by configuration for " + writer.getPath()
+ (conf == null ? " (configuration is null)" : ""));
return null;
} else if (bloomType == BloomType.NONE) {
LOG.trace("Bloom filter is turned off for the column family");
return null;
}
float err = getErrorRate(conf);
// In case of row/column Bloom filter lookups, each lookup is an OR if two
// separate lookups. Therefore, if each lookup's false positive rate is p,
// the resulting false positive rate is err = 1 - (1 - p)^2, and
// p = 1 - sqrt(1 - err).
if (bloomType == BloomType.ROWCOL) {
err = (float) (1 - Math.sqrt(1 - err));
}
int maxFold = conf.getInt(IO_STOREFILE_BLOOM_MAX_FOLD, MAX_ALLOWED_FOLD_FACTOR);
// Do we support compound bloom filters?
// In case of compound Bloom filters we ignore the maxKeys hint.
CompoundBloomFilterWriter bloomWriter = new CompoundBloomFilterWriter(getBloomBlockSize(conf),
err, Hash.getHashType(conf), maxFold, cacheConf.shouldCacheBloomsOnWrite(),
bloomType == BloomType.ROWCOL ? CellComparatorImpl.COMPARATOR : null, bloomType);
writer.addInlineBlockWriter(bloomWriter);
return bloomWriter;
} | 3.68 |
hadoop_ResourceRequest_getAllocationRequestId | /**
* Get the optional <em>ID</em> corresponding to this allocation request. This
* ID is an identifier for different {@code ResourceRequest}s from the <b>same
* application</b>. The allocated {@code Container}(s) received as part of the
* {@code AllocateResponse} response will have the ID corresponding to the
* original {@code ResourceRequest} for which the RM made the allocation.
* <p>
* The scheduler may return multiple {@code AllocateResponse}s corresponding
* to the same ID as and when scheduler allocates {@code Container}(s).
* <b>Applications</b> can continue to completely ignore the returned ID in
* the response and use the allocation for any of their outstanding requests.
* <p>
* If one wishes to replace an entire {@code ResourceRequest} corresponding to
* a specific ID, they can simply cancel the corresponding {@code
* ResourceRequest} and submit a new one afresh.
*
* @return the <em>ID</em> corresponding to this allocation request.
*/
@Public
@Evolving
public long getAllocationRequestId() {
throw new UnsupportedOperationException();
} | 3.68 |
flink_HiveTablePartition_ofTable | /**
* Creates a HiveTablePartition to represent a hive table.
*
* @param hiveConf the HiveConf used to connect to HMS
* @param hiveVersion the version of hive in use, if it's null the version will be automatically
* detected
* @param dbName name of the database
* @param tableName name of the table
*/
public static HiveTablePartition ofTable(
HiveConf hiveConf, @Nullable String hiveVersion, String dbName, String tableName) {
HiveShim hiveShim = getHiveShim(hiveVersion);
try (HiveMetastoreClientWrapper client =
new HiveMetastoreClientWrapper(hiveConf, hiveShim)) {
Table hiveTable = client.getTable(dbName, tableName);
return new HiveTablePartition(
hiveTable.getSd(), HiveReflectionUtils.getTableMetadata(hiveShim, hiveTable));
} catch (TException e) {
throw new FlinkHiveException(
String.format(
"Failed to create HiveTablePartition for hive table %s.%s",
dbName, tableName),
e);
}
} | 3.68 |
hadoop_Validate_checkPathExists | /**
* Validates that the given path exists.
* @param path the path to check.
* @param argName the name of the argument being validated.
*/
public static void checkPathExists(Path path, String argName) {
checkNotNull(path, argName);
checkArgument(Files.exists(path), "Path %s (%s) does not exist.", argName,
path);
} | 3.68 |
flink_CommonExecSink_getTargetRowKind | /**
* Get the target row-kind that the row data should change to, assuming the current row kind is
* RowKind.INSERT. Return Optional.empty() if it doesn't need to change. Currently, it'll only
* consider row-level delete/update.
*/
private Optional<RowKind> getTargetRowKind() {
if (tableSinkSpec.getSinkAbilities() != null) {
for (SinkAbilitySpec sinkAbilitySpec : tableSinkSpec.getSinkAbilities()) {
if (sinkAbilitySpec instanceof RowLevelDeleteSpec) {
RowLevelDeleteSpec deleteSpec = (RowLevelDeleteSpec) sinkAbilitySpec;
if (deleteSpec.getRowLevelDeleteMode()
== SupportsRowLevelDelete.RowLevelDeleteMode.DELETED_ROWS) {
return Optional.of(RowKind.DELETE);
}
} else if (sinkAbilitySpec instanceof RowLevelUpdateSpec) {
RowLevelUpdateSpec updateSpec = (RowLevelUpdateSpec) sinkAbilitySpec;
if (updateSpec.getRowLevelUpdateMode()
== SupportsRowLevelUpdate.RowLevelUpdateMode.UPDATED_ROWS) {
return Optional.of(RowKind.UPDATE_AFTER);
}
}
}
}
return Optional.empty();
} | 3.68 |
framework_StringToDoubleConverter_getModelType | /*
* (non-Javadoc)
*
* @see com.vaadin.data.util.converter.Converter#getModelType()
*/
@Override
public Class<Double> getModelType() {
return Double.class;
} | 3.68 |
flink_Broker_handIn | /** Hand in the object to share. */
public void handIn(String key, V obj) {
if (!retrieveSharedQueue(key).offer(obj)) {
throw new RuntimeException(
"Could not register the given element, broker slot is already occupied.");
}
} | 3.68 |
flink_TaskExecutorManager_clearPendingTaskManagerSlots | /** clear all pending task manager slots. */
public void clearPendingTaskManagerSlots() {
if (!resourceAllocator.isSupported()) {
return;
}
if (!pendingSlots.isEmpty()) {
this.pendingSlots.clear();
declareNeededResourcesWithDelay();
}
} | 3.68 |
framework_TooltipInfo_getTitle | /**
* Gets the tooltip title.
*
* @return the title
*/
public String getTitle() {
return title;
} | 3.68 |
rocketmq-connect_MetricUtils_stringToMetricName | /**
* string to MetricName
*
* @param name
* @return
*/
public static MetricName stringToMetricName(String name) {
if (StringUtils.isEmpty(name)) {
throw new IllegalArgumentException("Metric name str is empty");
}
String[] splits = name.replace(ROCKETMQ_CONNECT, "").replace(SPLIT_KV, SPLIT_COMMA).split(SPLIT_COMMA);
return new MetricName(splits[0], splits[1], splits[2], getTags(Arrays.copyOfRange(splits, 3, splits.length))
);
} | 3.68 |
flink_ExecutionConfig_registerTypeWithKryoSerializer | /**
* Registers the given Serializer via its class as a serializer for the given type at the
* KryoSerializer
*
* @param type The class of the types serialized with the given serializer.
* @param serializerClass The class of the serializer to use.
*/
@SuppressWarnings("rawtypes")
public void registerTypeWithKryoSerializer(
Class<?> type, Class<? extends Serializer> serializerClass) {
if (type == null || serializerClass == null) {
throw new NullPointerException("Cannot register null class or serializer.");
}
@SuppressWarnings("unchecked")
Class<? extends Serializer<?>> castedSerializerClass =
(Class<? extends Serializer<?>>) serializerClass;
registeredTypesWithKryoSerializerClasses.put(type, castedSerializerClass);
} | 3.68 |
streampipes_EpProperties_listBooleanEp | /**
* Creates a new list-based event property of type boolean and with the assigned domain property.
*
* @param label A human-readable label of the property
* @param runtimeName The field identifier of the event property at runtime.
* @param domainProperty The semantics of the list property as a String. The string should correspond to a URI
* provided by a vocabulary. Use one of the vocabularies provided in
* {@link org.apache.streampipes.vocabulary} or create your own domain-specific vocabulary.
* @return {@link org.apache.streampipes.model.schema.EventPropertyPrimitive}
*/
public static EventPropertyList listBooleanEp(Label label, String runtimeName, String domainProperty) {
return listEp(label, runtimeName, Datatypes.Boolean, domainProperty);
} | 3.68 |
morf_InsertStatementBuilder_useDirectPath | /**
* If supported by the dialect, hints to the database that an {@code APPEND} query hint should be used in the insert statement.
*
* <p>In general, as with all query plan modification, <strong>do not use this unless you know
* exactly what you are doing</strong>.</p>
*
* <p>These directives are applied in the SQL in the order they are called on {@link InsertStatement}. This usually
* affects their precedence or relative importance, depending on the platform.</p>
*
* @return this, for method chaining.
*/
public InsertStatementBuilder useDirectPath() {
getHints().add(DirectPathQueryHint.INSTANCE);
return this;
} | 3.68 |
flink_FactoryUtil_createTableSink | /**
* Creates a {@link DynamicTableSink} from a {@link CatalogTable}.
*
* <p>It considers {@link Catalog#getFactory()} if provided.
*
* @deprecated Use {@link #createDynamicTableSink(DynamicTableSinkFactory, ObjectIdentifier,
* ResolvedCatalogTable, Map, ReadableConfig, ClassLoader, boolean)} instead.
*/
@Deprecated
public static DynamicTableSink createTableSink(
@Nullable Catalog catalog,
ObjectIdentifier objectIdentifier,
ResolvedCatalogTable catalogTable,
ReadableConfig configuration,
ClassLoader classLoader,
boolean isTemporary) {
final DefaultDynamicTableContext context =
new DefaultDynamicTableContext(
objectIdentifier,
catalogTable,
Collections.emptyMap(),
configuration,
classLoader,
isTemporary);
return createDynamicTableSink(
getDynamicTableFactory(DynamicTableSinkFactory.class, catalog, context),
objectIdentifier,
catalogTable,
Collections.emptyMap(),
configuration,
classLoader,
isTemporary);
} | 3.68 |
hudi_ImmutablePair_of | /**
* <p>
* Obtains an immutable pair of from two objects inferring the generic types.
* </p>
*
* <p>
* This factory allows the pair to be created using inference to obtain the generic types.
* </p>
*
* @param <L> the left element type
* @param <R> the right element type
* @param left the left element, may be null
* @param right the right element, may be null
* @return a pair formed from the two parameters, not null
*/
public static <L, R> ImmutablePair<L, R> of(final L left, final R right) {
return new ImmutablePair<L, R>(left, right);
} | 3.68 |
hadoop_HdfsFileStatus_symlink | /**
* Set symlink bytes for this entity (default = null).
* @param symlink Symlink bytes (see
* {@link DFSUtilClient#bytes2String(byte[])})
* @return This Builder instance
*/
public Builder symlink(byte[] symlink) {
this.symlink = null == symlink
? null
: Arrays.copyOf(symlink, symlink.length);
return this;
} | 3.68 |
hbase_Constraints_enable | /**
* Enable constraints on a table.
* <p/>
* Currently, if you attempt to add a constraint to the table, then Constraints will automatically
* be turned on.
*/
public static TableDescriptorBuilder enable(TableDescriptorBuilder builder) throws IOException {
if (!builder.hasCoprocessor(ConstraintProcessor.class.getName())) {
builder.setCoprocessor(ConstraintProcessor.class.getName());
}
return builder;
} | 3.68 |
flink_MutableHashTable_buildInitialTable | /**
* Creates the initial hash table. This method sets up partitions, hash index, and inserts the
* data from the given iterator.
*
* @param input The iterator with the build side data.
* @throws IOException Thrown, if an element could not be fetched and deserialized from the
* iterator, or if serialization fails.
*/
protected void buildInitialTable(final MutableObjectIterator<BT> input) throws IOException {
// create the partitions
final int partitionFanOut = getPartitioningFanOutNoEstimates(this.availableMemory.size());
if (partitionFanOut > MAX_NUM_PARTITIONS) {
throw new RuntimeException(
"Hash join partitions estimate exeeds maximum number of partitions.");
}
createPartitions(partitionFanOut, 0);
// set up the table structure. the write behind buffers are taken away, as are one buffer
// per partition
final int numBuckets =
getInitialTableSize(
this.availableMemory.size(),
this.segmentSize,
partitionFanOut,
this.avgRecordLen);
initTable(numBuckets, (byte) partitionFanOut);
final TypeComparator<BT> buildTypeComparator = this.buildSideComparator;
BT record = this.buildSideSerializer.createInstance();
// go over the complete input and insert every element into the hash table
while (this.running && ((record = input.next(record)) != null)) {
final int hashCode = hash(buildTypeComparator.hash(record), 0);
insertIntoTable(record, hashCode);
}
if (!this.running) {
return;
}
// finalize the partitions
for (int i = 0; i < this.partitionsBeingBuilt.size(); i++) {
HashPartition<BT, PT> p = this.partitionsBeingBuilt.get(i);
p.finalizeBuildPhase(this.ioManager, this.currentEnumerator, this.writeBehindBuffers);
}
} | 3.68 |
hadoop_LocalSASKeyGeneratorImpl_getDefaultAccountAccessPolicy | /**
* Helper method to generate Access Policy for the Storage Account SAS Key
* @return SharedAccessAccountPolicy
*/
private SharedAccessAccountPolicy getDefaultAccountAccessPolicy() {
SharedAccessAccountPolicy ap =
new SharedAccessAccountPolicy();
Calendar cal = new GregorianCalendar(TimeZone.getTimeZone("UTC"));
cal.setTime(new Date());
cal.add(Calendar.HOUR, (int) getSasKeyExpiryPeriod() * HOURS_IN_DAY);
ap.setSharedAccessExpiryTime(cal.getTime());
ap.setPermissions(getDefaultAccoutSASKeyPermissions());
ap.setResourceTypes(EnumSet.of(SharedAccessAccountResourceType.CONTAINER,
SharedAccessAccountResourceType.OBJECT));
ap.setServices(EnumSet.of(SharedAccessAccountService.BLOB));
return ap;
} | 3.68 |
flink_DelimitedInputFormat_close | /**
* Closes the input by releasing all buffers and closing the file input stream.
*
* @throws IOException Thrown, if the closing of the file stream causes an I/O error.
*/
@Override
public void close() throws IOException {
this.wrapBuffer = null;
this.readBuffer = null;
super.close();
} | 3.68 |
flink_NFACompiler_getCurrentNotCondition | /**
* Retrieves list of conditions resulting in Stop state and names of the corresponding NOT
* patterns.
*
* <p>A current not condition can be produced in two cases:
*
* <ol>
* <li>the previous pattern is a {@link Quantifier.ConsumingStrategy#NOT_FOLLOW}
* <li>exists a backward path of {@link Quantifier.QuantifierProperty#OPTIONAL} patterns
* to {@link Quantifier.ConsumingStrategy#NOT_FOLLOW}
* </ol>
*
* <p><b>WARNING:</b> for more info on the second case see: {@link
* NFAFactoryCompiler#copyWithoutTransitiveNots(State)}
*
* @return list of not conditions with corresponding names
*/
private List<Tuple2<IterativeCondition<T>, String>> getCurrentNotCondition() {
List<Tuple2<IterativeCondition<T>, String>> notConditions = new ArrayList<>();
Pattern<T, ? extends T> previousPattern = currentPattern;
while (previousPattern.getPrevious() != null
&& (previousPattern
.getPrevious()
.getQuantifier()
.hasProperty(Quantifier.QuantifierProperty.OPTIONAL)
|| previousPattern.getPrevious().getQuantifier().getConsumingStrategy()
== Quantifier.ConsumingStrategy.NOT_FOLLOW)) {
previousPattern = previousPattern.getPrevious();
if (previousPattern.getQuantifier().getConsumingStrategy()
== Quantifier.ConsumingStrategy.NOT_FOLLOW) {
final IterativeCondition<T> notCondition = getTakeCondition(previousPattern);
notConditions.add(Tuple2.of(notCondition, previousPattern.getName()));
}
}
return notConditions;
} | 3.68 |
hbase_User_shouldLoginFromKeytab | /**
* In secure environment, if a user specified his keytab and principal, a hbase client will try to
* login with them. Otherwise, hbase client will try to obtain ticket(through kinit) from system.
* @param conf configuration file
* @return true if keytab and principal are configured
*/
public static boolean shouldLoginFromKeytab(Configuration conf) {
Optional<String> keytab = Optional.ofNullable(conf.get(AuthUtil.HBASE_CLIENT_KEYTAB_FILE));
Optional<String> principal =
Optional.ofNullable(conf.get(AuthUtil.HBASE_CLIENT_KERBEROS_PRINCIPAL));
return keytab.isPresent() && principal.isPresent();
} | 3.68 |
flink_Tuple2_copy | /**
* Shallow tuple copy.
*
* @return A new Tuple with the same fields as this.
*/
@Override
@SuppressWarnings("unchecked")
public Tuple2<T0, T1> copy() {
return new Tuple2<>(this.f0, this.f1);
} | 3.68 |
flink_CrossOperator_projectTuple21 | /**
* Projects a pair of crossed elements to a {@link Tuple} with the previously selected
* fields.
*
* @return The projected data set.
* @see Tuple
* @see DataSet
*/
public <
T0,
T1,
T2,
T3,
T4,
T5,
T6,
T7,
T8,
T9,
T10,
T11,
T12,
T13,
T14,
T15,
T16,
T17,
T18,
T19,
T20>
ProjectCross<
I1,
I2,
Tuple21<
T0,
T1,
T2,
T3,
T4,
T5,
T6,
T7,
T8,
T9,
T10,
T11,
T12,
T13,
T14,
T15,
T16,
T17,
T18,
T19,
T20>>
projectTuple21() {
TypeInformation<?>[] fTypes = extractFieldTypes(fieldIndexes);
TupleTypeInfo<
Tuple21<
T0,
T1,
T2,
T3,
T4,
T5,
T6,
T7,
T8,
T9,
T10,
T11,
T12,
T13,
T14,
T15,
T16,
T17,
T18,
T19,
T20>>
tType =
new TupleTypeInfo<
Tuple21<
T0,
T1,
T2,
T3,
T4,
T5,
T6,
T7,
T8,
T9,
T10,
T11,
T12,
T13,
T14,
T15,
T16,
T17,
T18,
T19,
T20>>(fTypes);
return new ProjectCross<
I1,
I2,
Tuple21<
T0,
T1,
T2,
T3,
T4,
T5,
T6,
T7,
T8,
T9,
T10,
T11,
T12,
T13,
T14,
T15,
T16,
T17,
T18,
T19,
T20>>(
this.ds1, this.ds2, this.fieldIndexes, this.isFieldInFirst, tType, this, hint);
} | 3.68 |
morf_MySqlDialect_sqlForDefaultClauseLiteral | /**
* For MySQL, we need to alter the way we render a date literal in a default clause: We need to suppress the "DATE" prefix.
*/
@Override
protected String sqlForDefaultClauseLiteral(Column column) {
if (column.getType() != DataType.DATE) {
return super.sqlForDefaultClauseLiteral(column);
}
// suppress the "DATE" prefix for MySQL, just output the date part directly
return String.format("'%s'", column.getDefaultValue());
} | 3.68 |
hbase_TableDescriptorBuilder_toString | /** Returns Name of this table and then a map of all of the column family descriptors. */
@Override
public String toString() {
StringBuilder s = new StringBuilder();
s.append('\'').append(Bytes.toString(name.getName())).append('\'');
s.append(getValues(true));
families.values().forEach(f -> s.append(", ").append(f));
return s.toString();
} | 3.68 |
hbase_HRegionFileSystem_setStoragePolicy | /**
* Set storage policy for a whole region. <br>
* <i>"LAZY_PERSIST"</i>, <i>"ALL_SSD"</i>, <i>"ONE_SSD"</i>, <i>"HOT"</i>, <i>"WARM"</i>,
* <i>"COLD"</i> <br>
* <br>
* See {@link org.apache.hadoop.hdfs.protocol.HdfsConstants} for more details.
* @param policyName The name of the storage policy: 'HOT', 'COLD', etc. See hadoop 2.6+
* org.apache.hadoop.hdfs.protocol.HdfsConstants for possible list e.g 'COLD',
* 'WARM', 'HOT', 'ONE_SSD', 'ALL_SSD', 'LAZY_PERSIST'.
*/
public void setStoragePolicy(String policyName) {
CommonFSUtils.setStoragePolicy(this.fs, getRegionDir(), policyName);
} | 3.68 |
starts_Attribute_getSize | /**
* Returns the size of all the attributes in this attribute list.
*
* @param cw
* the class writer to be used to convert the attributes into
* byte arrays, with the {@link #write write} method.
* @param code
* the bytecode of the method corresponding to these code
* attributes, or <code>null</code> if these attributes are not code
* attributes.
* @param len
* the length of the bytecode of the method corresponding to
* these code attributes, or <code>null</code> if these attributes
* are not code attributes.
* @param maxStack
* the maximum stack size of the method corresponding to these
* code attributes, or -1 if these attributes are not code
* attributes.
* @param maxLocals
* the maximum number of local variables of the method
* corresponding to these code attributes, or -1 if these
* attributes are not code attributes.
* @return the size of all the attributes in this attribute list. This size
* includes the size of the attribute headers.
*/
final int getSize(final ClassWriter cw, final byte[] code, final int len,
final int maxStack, final int maxLocals) {
Attribute attr = this;
int size = 0;
while (attr != null) {
cw.newUTF8(attr.type);
size += attr.write(cw, code, len, maxStack, maxLocals).length + 6;
attr = attr.next;
}
return size;
} | 3.68 |
hbase_ServerRpcConnection_setupCryptoCipher | /**
* Set up cipher for rpc encryption with Apache Commons Crypto.
*/
private Pair<RPCProtos.ConnectionHeaderResponse, CryptoAES> setupCryptoCipher()
throws FatalConnectionException {
// If simple auth, return
if (saslServer == null) {
return null;
}
// check if rpc encryption with Crypto AES
String qop = saslServer.getNegotiatedQop();
boolean isEncryption = SaslUtil.QualityOfProtection.PRIVACY.getSaslQop().equalsIgnoreCase(qop);
boolean isCryptoAesEncryption = isEncryption
&& this.rpcServer.conf.getBoolean("hbase.rpc.crypto.encryption.aes.enabled", false);
if (!isCryptoAesEncryption) {
return null;
}
if (!connectionHeader.hasRpcCryptoCipherTransformation()) {
return null;
}
String transformation = connectionHeader.getRpcCryptoCipherTransformation();
if (transformation == null || transformation.length() == 0) {
return null;
}
// Negotiates AES based on complete saslServer.
// The Crypto metadata need to be encrypted and send to client.
Properties properties = new Properties();
// the property for SecureRandomFactory
properties.setProperty(CryptoRandomFactory.CLASSES_KEY,
this.rpcServer.conf.get("hbase.crypto.sasl.encryption.aes.crypto.random",
"org.apache.commons.crypto.random.JavaCryptoRandom"));
// the property for cipher class
properties.setProperty(CryptoCipherFactory.CLASSES_KEY,
this.rpcServer.conf.get("hbase.rpc.crypto.encryption.aes.cipher.class",
"org.apache.commons.crypto.cipher.JceCipher"));
int cipherKeyBits =
this.rpcServer.conf.getInt("hbase.rpc.crypto.encryption.aes.cipher.keySizeBits", 128);
// generate key and iv
if (cipherKeyBits % 8 != 0) {
throw new IllegalArgumentException(
"The AES cipher key size in bits" + " should be a multiple of byte");
}
int len = cipherKeyBits / 8;
byte[] inKey = new byte[len];
byte[] outKey = new byte[len];
byte[] inIv = new byte[len];
byte[] outIv = new byte[len];
CryptoAES cryptoAES;
try {
// generate the cipher meta data with SecureRandom
CryptoRandom secureRandom = CryptoRandomFactory.getCryptoRandom(properties);
secureRandom.nextBytes(inKey);
secureRandom.nextBytes(outKey);
secureRandom.nextBytes(inIv);
secureRandom.nextBytes(outIv);
// create CryptoAES for server
cryptoAES = new CryptoAES(transformation, properties, inKey, outKey, inIv, outIv);
} catch (GeneralSecurityException | IOException ex) {
throw new UnsupportedCryptoException(ex.getMessage(), ex);
}
// create SaslCipherMeta and send to client,
// for client, the [inKey, outKey], [inIv, outIv] should be reversed
RPCProtos.CryptoCipherMeta.Builder ccmBuilder = RPCProtos.CryptoCipherMeta.newBuilder();
ccmBuilder.setTransformation(transformation);
ccmBuilder.setInIv(getByteString(outIv));
ccmBuilder.setInKey(getByteString(outKey));
ccmBuilder.setOutIv(getByteString(inIv));
ccmBuilder.setOutKey(getByteString(inKey));
RPCProtos.ConnectionHeaderResponse resp =
RPCProtos.ConnectionHeaderResponse.newBuilder().setCryptoCipherMeta(ccmBuilder).build();
return Pair.newPair(resp, cryptoAES);
} | 3.68 |
hadoop_AbfsDtFetcher_getScheme | /**
* Get the scheme for this specific fetcher.
* @return a scheme.
*/
protected String getScheme() {
return FileSystemUriSchemes.ABFS_SCHEME;
} | 3.68 |
flink_ResolvedExpression_asSerializableString | /**
* Returns a string that fully serializes this instance. The serialized string can be used for
* storing the query in, for example, a {@link org.apache.flink.table.catalog.Catalog} as a
* view.
*
* @return detailed string for persisting in a catalog
*/
default String asSerializableString() {
throw new TableException(
String.format(
"Expression '%s' is not string serializable. Currently, only expressions that "
+ "originated from a SQL expression have a well-defined string representation.",
asSummaryString()));
} | 3.68 |
hbase_AssignReplicationQueuesProcedure_shouldSkip | // check whether ReplicationSyncUp has already done the work for us, if so, we should skip
// claiming the replication queues and deleting them instead.
private boolean shouldSkip(MasterProcedureEnv env) throws IOException {
MasterFileSystem mfs = env.getMasterFileSystem();
Path syncUpDir = new Path(mfs.getRootDir(), ReplicationSyncUp.INFO_DIR);
return mfs.getFileSystem().exists(new Path(syncUpDir, crashedServer.getServerName()));
} | 3.68 |
flink_BloomFilter_optimalNumOfBits | /**
* Compute optimal bits number with given input entries and expected false positive probability.
*
* @param inputEntries
* @param fpp
* @return optimal bits number
*/
public static int optimalNumOfBits(long inputEntries, double fpp) {
int numBits = (int) (-inputEntries * Math.log(fpp) / (Math.log(2) * Math.log(2)));
return numBits;
} | 3.68 |
graphhopper_Entity_getRefField | /**
* Used to check referential integrity.
* Return value is not used, but could allow entities to point to each other directly rather than
* using indirection through string-keyed maps.
*/
protected <K, V> V getRefField(String column, boolean required, Map<K, V> target) throws IOException {
String str = getFieldCheckRequired(column, required);
V val = null;
if (str != null) {
val = target.get(str);
if (val == null) {
feed.errors.add(new ReferentialIntegrityError(tableName, row, column, str));
}
}
return val;
} | 3.68 |
hbase_HFilePreadReader_getRegionName | /*
* Get the region name for the given file path. A HFile is always kept under the <region>/<column
* family>/<hfile>. To find the region for a given hFile, just find the name of the grandparent
* directory.
*/
private static String getRegionName(Path path) {
return path.getParent().getParent().getName();
} | 3.68 |
hbase_WALSplitUtil_tryCreateRecoveredHFilesDir | /**
* Return path to recovered.hfiles directory of the region's column family: e.g.
* /hbase/some_table/2323432434/cf/recovered.hfiles/. This method also ensures existence of
* recovered.hfiles directory under the region's column family, creating it if necessary.
* @param rootFS the root file system
* @param conf configuration
* @param tableName the table name
* @param encodedRegionName the encoded region name
* @param familyName the column family name
* @return Path to recovered.hfiles directory of the region's column family.
*/
static Path tryCreateRecoveredHFilesDir(FileSystem rootFS, Configuration conf,
TableName tableName, String encodedRegionName, String familyName) throws IOException {
Path rootDir = CommonFSUtils.getRootDir(conf);
Path regionDir = FSUtils.getRegionDirFromTableDir(CommonFSUtils.getTableDir(rootDir, tableName),
encodedRegionName);
Path dir = getRecoveredHFilesDir(regionDir, familyName);
if (!rootFS.exists(dir) && !rootFS.mkdirs(dir)) {
LOG.warn("mkdir failed on {}, region {}, column family {}", dir, encodedRegionName,
familyName);
}
return dir;
} | 3.68 |
hadoop_FindOptions_getOut | /**
* Returns the output stream to be used.
*
* @return output stream to be used
*/
public PrintStream getOut() {
return this.out;
} | 3.68 |
flink_PartitionWriter_createNewOutputFormat | /** Create a new output format with path, configure it and open it. */
OutputFormat<T> createNewOutputFormat(Path path) throws IOException {
OutputFormat<T> format = factory.createOutputFormat(path);
format.configure(conf);
// Here we just think of it as a single file format, so there can only be a single task.
format.open(0, 1);
return format;
} | 3.68 |
hudi_SecondaryIndexManager_create | /**
* Create a secondary index for hoodie table, two steps will be performed:
* 1. Add secondary index metadata to hoodie.properties
* 2. Trigger build secondary index
*
* @param metaClient Hoodie table meta client
* @param indexName The unique secondary index name
* @param indexType Index type
* @param ignoreIfExists Whether ignore the creation if the specific secondary index exists
* @param columns The columns referenced by this secondary index, each column
* has its own options
* @param options Options for this secondary index
*/
public void create(
HoodieTableMetaClient metaClient,
String indexName,
String indexType,
boolean ignoreIfExists,
LinkedHashMap<String, Map<String, String>> columns,
Map<String, String> options) {
Option<List<HoodieSecondaryIndex>> secondaryIndexes = SecondaryIndexUtils.getSecondaryIndexes(metaClient);
Set<String> colNames = columns.keySet();
Schema avroSchema;
try {
avroSchema = new TableSchemaResolver(metaClient).getTableAvroSchema(false);
} catch (Exception e) {
throw new HoodieSecondaryIndexException(
"Failed to get table avro schema: " + metaClient.getTableConfig().getTableName());
}
for (String col : colNames) {
if (avroSchema.getField(col) == null) {
throw new HoodieSecondaryIndexException("Field not exists: " + col);
}
}
if (indexExists(secondaryIndexes, indexName, Option.of(indexType), Option.of(colNames))) {
if (ignoreIfExists) {
return;
} else {
throw new HoodieSecondaryIndexException("Secondary index already exists: " + indexName);
}
}
HoodieSecondaryIndex secondaryIndexToAdd = HoodieSecondaryIndex.builder()
.setIndexName(indexName)
.setIndexType(indexType)
.setColumns(columns)
.setOptions(options)
.build();
List<HoodieSecondaryIndex> newSecondaryIndexes = secondaryIndexes.map(h -> {
h.add(secondaryIndexToAdd);
return h;
}).orElse(Collections.singletonList(secondaryIndexToAdd));
newSecondaryIndexes.sort(new HoodieSecondaryIndex.HoodieIndexCompactor());
// Persistence secondary indexes' metadata to hoodie.properties file
Properties updatedProps = new Properties();
updatedProps.put(HoodieTableConfig.SECONDARY_INDEXES_METADATA.key(),
SecondaryIndexUtils.toJsonString(newSecondaryIndexes));
HoodieTableConfig.update(metaClient.getFs(), new Path(metaClient.getMetaPath()), updatedProps);
LOG.info("Success to add secondary index metadata: {}", secondaryIndexToAdd);
// TODO: build index
} | 3.68 |
hadoop_NMTokenCache_containsToken | /**
* Returns true if NMToken is present in cache.
*/
@Private
@VisibleForTesting
public boolean containsToken(String nodeAddr) {
return nmTokens.containsKey(nodeAddr);
} | 3.68 |
hbase_MetricsMaster_convertToProcedureMetrics | /**
* This is utility function that converts {@link OperationMetrics} to {@link ProcedureMetrics}.
* NOTE: Procedure framework in hbase-procedure module accesses metrics common to most procedures
* through {@link ProcedureMetrics} interface. Metrics source classes in hbase-hadoop-compat
* module provides similar interface {@link OperationMetrics} that contains metrics common to most
* operations. As both hbase-procedure and hbase-hadoop-compat are lower level modules used by
* hbase-server (this) module and there is no dependency between them, this method does the
* required conversion.
*/
public static ProcedureMetrics convertToProcedureMetrics(final OperationMetrics metrics) {
return new ProcedureMetrics() {
@Override
public Counter getSubmittedCounter() {
return metrics.getSubmittedCounter();
}
@Override
public Histogram getTimeHisto() {
return metrics.getTimeHisto();
}
@Override
public Counter getFailedCounter() {
return metrics.getFailedCounter();
}
};
} | 3.68 |
framework_Table_removeGeneratedColumn | /**
* Removes a generated column previously added with addGeneratedColumn.
*
* @param columnId
* id of the generated column to remove
* @return true if the column could be removed (existed in the Table)
*/
public boolean removeGeneratedColumn(Object columnId) {
if (columnGenerators.containsKey(columnId)) {
columnGenerators.remove(columnId);
// remove column from visibleColumns list unless it exists in
// container (generator previously overrode this column)
if (!items.getContainerPropertyIds().contains(columnId)) {
visibleColumns.remove(columnId);
}
refreshRowCache();
return true;
} else {
return false;
}
}
/**
* Returns item identifiers of the items which are currently rendered on the
* client.
* <p>
* Note, that some due to historical reasons the name of the method is bit
* misleading. Some items may be partly or totally out of the viewport of
* the table's scrollable area. Actually detecting rows which can be
* actually seen by the end user may be problematic due to the client server
* architecture. Using {@link #getCurrentPageFirstItemId()} combined with
* {@link #getPageLength()} | 3.68 |
dubbo_Converter_accept | /**
* Accept the source type and target type or not
*
* @param sourceType the source type
* @param targetType the target type
* @return if accepted, return <code>true</code>, or <code>false</code>
*/
default boolean accept(Class<?> sourceType, Class<?> targetType) {
return isAssignableFrom(sourceType, getSourceType()) && isAssignableFrom(targetType, getTargetType());
} | 3.68 |
hbase_Procedure_getProcedureMetrics | /**
* Override this method to provide procedure specific counters for submitted count, failed count
* and time histogram.
* @param env The environment passed to the procedure executor
* @return Container object for procedure related metric
*/
protected ProcedureMetrics getProcedureMetrics(TEnvironment env) {
return null;
} | 3.68 |
hadoop_PersistentCommitData_saveToStream | /**
* Save to a file.
* This uses the createFile() API, which S3A supports for
* faster load and declaring sequential access, always
* @param <T> type of persistent format
* @param path path to save to (used for logging)
* @param instance data to save
* @param builder builder already prepared for the write
* @param serializer serializer to use
* @return any IOStatistics from the output stream, or null
* @throws IOException IO failure
*/
public static <T extends PersistentCommitData> IOStatistics saveToStream(
final Path path,
final T instance,
final FSDataOutputStreamBuilder builder,
final JsonSerialization<T> serializer) throws IOException {
LOG.debug("saving commit data to file {}", path);
FSDataOutputStream dataOutputStream = builder.build();
try {
dataOutputStream.write(serializer.toBytes(instance));
} finally {
dataOutputStream.close();
}
return dataOutputStream.getIOStatistics();
} | 3.68 |
hudi_HoodieGlobalBloomIndex_tagLocationBacktoRecords | /**
* Tagging for global index should only consider the record key.
*/
@Override
protected <R> HoodieData<HoodieRecord<R>> tagLocationBacktoRecords(
HoodiePairData<HoodieKey, HoodieRecordLocation> keyLocationPairs,
HoodieData<HoodieRecord<R>> records,
HoodieTable hoodieTable) {
HoodiePairData<String, HoodieRecordGlobalLocation> keyAndExistingLocations = keyLocationPairs
.mapToPair(p -> Pair.of(p.getLeft().getRecordKey(),
HoodieRecordGlobalLocation.fromLocal(p.getLeft().getPartitionPath(), p.getRight())));
boolean mayContainDuplicateLookup = hoodieTable.getMetaClient().getTableType() == MERGE_ON_READ;
boolean shouldUpdatePartitionPath = config.getGlobalBloomIndexUpdatePartitionPath() && hoodieTable.isPartitioned();
return tagGlobalLocationBackToRecords(records, keyAndExistingLocations,
mayContainDuplicateLookup, shouldUpdatePartitionPath, config, hoodieTable);
} | 3.68 |
framework_VScrollTable_selectFocusedRow | /**
* Selects a row where the current selection head is
*
* @param ctrlSelect
* Is the selection a ctrl+selection
* @param shiftSelect
* Is the selection a shift+selection
* @return Returns truw
*/
private void selectFocusedRow(boolean ctrlSelect, boolean shiftSelect) {
if (focusedRow != null) {
// Arrows moves the selection and clears previous selections
if (isSelectable() && !ctrlSelect && !shiftSelect) {
deselectAll();
focusedRow.toggleSelection();
selectionRangeStart = focusedRow;
} else if (isSelectable() && ctrlSelect && !shiftSelect) {
// Ctrl+arrows moves selection head
selectionRangeStart = focusedRow;
// No selection, only selection head is moved
} else if (isMultiSelectModeAny() && !ctrlSelect && shiftSelect) {
// Shift+arrows selection selects a range
focusedRow.toggleShiftSelection(shiftSelect);
}
}
} | 3.68 |
pulsar_ClientConfiguration_getMaxNumberOfRejectedRequestPerConnection | /**
* Get configured max number of reject-request in a time-frame (60 seconds) after which connection will be closed.
*
* @return
*/
public int getMaxNumberOfRejectedRequestPerConnection() {
return confData.getMaxNumberOfRejectedRequestPerConnection();
} | 3.68 |
flink_DataSet_leftOuterJoin | /**
* Initiates a Left Outer Join transformation.
*
* <p>An Outer Join transformation joins two elements of two {@link DataSet DataSets} on key
* equality and provides multiple ways to combine joining elements into one DataSet.
*
* <p>Elements of the <b>left</b> DataSet (i.e. {@code this}) that do not have a matching
* element on the other side are joined with {@code null} and emitted to the resulting DataSet.
*
* @param other The other DataSet with which this DataSet is joined.
* @param strategy The strategy that should be used execute the join. If {@code null} is given,
* then the optimizer will pick the join strategy.
* @return A JoinOperatorSet to continue the definition of the Join transformation.
* @see org.apache.flink.api.java.operators.join.JoinOperatorSetsBase
* @see DataSet
*/
public <R> JoinOperatorSetsBase<T, R> leftOuterJoin(DataSet<R> other, JoinHint strategy) {
switch (strategy) {
case OPTIMIZER_CHOOSES:
case REPARTITION_SORT_MERGE:
case REPARTITION_HASH_FIRST:
case REPARTITION_HASH_SECOND:
case BROADCAST_HASH_SECOND:
return new JoinOperatorSetsBase<>(this, other, strategy, JoinType.LEFT_OUTER);
default:
throw new InvalidProgramException(
"Invalid JoinHint for LeftOuterJoin: " + strategy);
}
} | 3.68 |
flink_PendingCheckpoint_acknowledgeTask | /**
* Acknowledges the task with the given execution attempt id and the given subtask state.
*
* @param executionAttemptId of the acknowledged task
* @param operatorSubtaskStates of the acknowledged task
* @param metrics Checkpoint metrics for the stats
* @return TaskAcknowledgeResult of the operation
*/
public TaskAcknowledgeResult acknowledgeTask(
ExecutionAttemptID executionAttemptId,
TaskStateSnapshot operatorSubtaskStates,
CheckpointMetrics metrics) {
synchronized (lock) {
if (disposed) {
return TaskAcknowledgeResult.DISCARDED;
}
final ExecutionVertex vertex = notYetAcknowledgedTasks.remove(executionAttemptId);
if (vertex == null) {
if (acknowledgedTasks.contains(executionAttemptId)) {
return TaskAcknowledgeResult.DUPLICATE;
} else {
return TaskAcknowledgeResult.UNKNOWN;
}
} else {
acknowledgedTasks.add(executionAttemptId);
}
long ackTimestamp = System.currentTimeMillis();
if (operatorSubtaskStates != null && operatorSubtaskStates.isTaskDeployedAsFinished()) {
checkpointPlan.reportTaskFinishedOnRestore(vertex);
} else {
List<OperatorIDPair> operatorIDs = vertex.getJobVertex().getOperatorIDs();
for (OperatorIDPair operatorID : operatorIDs) {
updateOperatorState(vertex, operatorSubtaskStates, operatorID);
}
if (operatorSubtaskStates != null && operatorSubtaskStates.isTaskFinished()) {
checkpointPlan.reportTaskHasFinishedOperators(vertex);
}
}
++numAcknowledgedTasks;
// publish the checkpoint statistics
// to prevent null-pointers from concurrent modification, copy reference onto stack
if (pendingCheckpointStats != null) {
// Do this in millis because the web frontend works with them
long alignmentDurationMillis = metrics.getAlignmentDurationNanos() / 1_000_000;
long checkpointStartDelayMillis =
metrics.getCheckpointStartDelayNanos() / 1_000_000;
SubtaskStateStats subtaskStateStats =
new SubtaskStateStats(
vertex.getParallelSubtaskIndex(),
ackTimestamp,
metrics.getBytesPersistedOfThisCheckpoint(),
metrics.getTotalBytesPersisted(),
metrics.getSyncDurationMillis(),
metrics.getAsyncDurationMillis(),
metrics.getBytesProcessedDuringAlignment(),
metrics.getBytesPersistedDuringAlignment(),
alignmentDurationMillis,
checkpointStartDelayMillis,
metrics.getUnalignedCheckpoint(),
true);
LOG.trace(
"Checkpoint {} stats for {}: size={}Kb, duration={}ms, sync part={}ms, async part={}ms",
checkpointId,
vertex.getTaskNameWithSubtaskIndex(),
subtaskStateStats.getStateSize() == 0
? 0
: subtaskStateStats.getStateSize() / 1024,
subtaskStateStats.getEndToEndDuration(
pendingCheckpointStats.getTriggerTimestamp()),
subtaskStateStats.getSyncCheckpointDuration(),
subtaskStateStats.getAsyncCheckpointDuration());
pendingCheckpointStats.reportSubtaskStats(
vertex.getJobvertexId(), subtaskStateStats);
}
return TaskAcknowledgeResult.SUCCESS;
}
} | 3.68 |
dubbo_FileSystemDynamicConfiguration_isBasedPoolingWatchService | /**
* It's whether the implementation of {@link WatchService} is based on {@linkplain sun.nio.fs.PollingWatchService}
* or not.
* <p>
*
* @return if based, return <code>true</code>, or <code>false</code>
* @see #detectPoolingBasedWatchService(Optional)
*/
protected static boolean isBasedPoolingWatchService() {
return BASED_POOLING_WATCH_SERVICE;
} | 3.68 |
pulsar_LoadSimulationController_read | /**
* Read the user-submitted arguments as commands to send to clients.
*
* @param args
* Arguments split on whitespace from user input.
*/
private void read(final String[] args) {
// Don't attempt to process blank input.
if (args.length > 0 && !(args.length == 1 && args[0].isEmpty())) {
final ShellArguments arguments = new ShellArguments();
final JCommander jc = new JCommander(arguments);
try {
jc.parse(args);
final String command = arguments.commandArguments.get(0);
switch (command) {
case "trade":
handleTrade(arguments);
break;
case "change":
handleChange(arguments);
break;
case "stop":
handleStop(arguments);
break;
case "trade_group":
handleGroupTrade(arguments);
break;
case "change_group":
handleGroupChange(arguments);
break;
case "stop_group":
handleGroupStop(arguments);
break;
case "script":
// Read input from the given script instead of stdin until
// the script has executed completely.
final List<String> commandArguments = arguments.commandArguments;
checkAppArgs(commandArguments.size() - 1, 1);
final String scriptName = commandArguments.get(1);
final BufferedReader scriptReader = new BufferedReader(
new InputStreamReader(new FileInputStream(Paths.get(scriptName).toFile())));
String line = scriptReader.readLine();
while (line != null) {
read(line.split("\\s+"));
line = scriptReader.readLine();
}
scriptReader.close();
break;
case "copy":
handleCopy(arguments);
break;
case "stream":
handleStream(arguments);
break;
case "simulate":
handleSimulate(arguments);
break;
case "quit":
case "exit":
PerfClientUtils.exit(0);
break;
default:
log.info("ERROR: Unknown command \"{}\"", command);
}
} catch (ParameterException ex) {
ex.printStackTrace();
jc.usage();
} catch (Exception ex) {
ex.printStackTrace();
}
}
} | 3.68 |
framework_CheckBox_getCustomAttributes | /*
* (non-Javadoc)
*
* @see com.vaadin.ui.AbstractField#getCustomAttributes()
*/
@Override
protected Collection<String> getCustomAttributes() {
Collection<String> attributes = super.getCustomAttributes();
attributes.add("checked");
return attributes;
} | 3.68 |
framework_FlyweightCell_getElement | /**
* Returns the element of the cell. Can be either a <code>TD</code> element
* or a <code>TH</code> element.
*
* @return the element
*/
public TableCellElement getElement() {
assertSetup();
return element;
} | 3.68 |
querydsl_ExpressionUtils_and | /**
* Create the intersection of the given arguments
*
* @param left lhs of expression
* @param right rhs of expression
* @return left and right
*/
public static Predicate and(Predicate left, Predicate right) {
left = (Predicate) extract(left);
right = (Predicate) extract(right);
if (left == null) {
return right;
} else if (right == null) {
return left;
} else {
return predicate(Ops.AND, left, right);
}
} | 3.68 |
framework_AbstractConnector_init | /**
* Called when the connector has been initialized. Override this method to
* perform initialization of the connector.
*/
// FIXME: It might make sense to make this abstract to force users to
// use init instead of constructor, where connection and id has not yet been
// set.
protected void init() {
} | 3.68 |
framework_Upload_focus | /**
* {@inheritDoc}
*/
@Override
public void focus() {
super.focus();
} | 3.68 |
framework_VTabsheet_getNextTabKey | /**
* Returns the key code of the keyboard shortcut that focuses the next tab
* in a focused tabsheet.
*
* @return the key to move focus to the next tab
*/
protected int getNextTabKey() {
return KeyCodes.KEY_RIGHT;
} | 3.68 |
flink_PhysicalFile_innerClose | /**
* Close the physical file, stop reusing.
*
* @throws IOException if anything goes wrong with file system.
*/
private void innerClose() throws IOException {
closed = true;
if (outputStream != null) {
outputStream.close();
outputStream = null;
}
} | 3.68 |
hudi_HoodieJavaPairRDD_of | /**
* @param pairRDDData a {@link JavaPairRDD} of pairs.
* @param <K> type of key.
* @param <V> type of value.
* @return a new instance containing the {@link JavaPairRDD<K, V>} reference.
*/
public static <K, V> HoodieJavaPairRDD<K, V> of(JavaPairRDD<K, V> pairRDDData) {
return new HoodieJavaPairRDD<>(pairRDDData);
} | 3.68 |
querydsl_DateTimeExpression_min | /**
* Get the minimum value of this expression (aggregation)
*
* @return min(this)
*/
@Override
public DateTimeExpression<T> min() {
if (min == null) {
min = Expressions.dateTimeOperation(getType(), Ops.AggOps.MIN_AGG, mixin);
}
return min;
} | 3.68 |
flink_VertexThreadInfoTrackerBuilder_setCleanUpInterval | /**
* Sets {@code cleanUpInterval}.
*
* @param cleanUpInterval Clean up interval for completed stats.
* @return Builder.
*/
public VertexThreadInfoTrackerBuilder setCleanUpInterval(Duration cleanUpInterval) {
this.cleanUpInterval = cleanUpInterval;
return this;
} | 3.68 |
querydsl_GenericExporter_getGeneratedFiles | /**
* Return the set of generated files
*
* @return a set of generated files
*/
public Set<File> getGeneratedFiles() {
return generatedFiles;
} | 3.68 |
hbase_RegionServerFlushTableProcedureManager_initialize | /**
* Initialize this region server flush procedure manager Uses a zookeeper based member controller.
* @param rss region server
* @throws KeeperException if the zookeeper cannot be reached
*/
@Override
public void initialize(RegionServerServices rss) throws KeeperException {
this.rss = rss;
ZKWatcher zkw = rss.getZooKeeper();
this.memberRpcs = new ZKProcedureMemberRpcs(zkw,
MasterFlushTableProcedureManager.FLUSH_TABLE_PROCEDURE_SIGNATURE);
Configuration conf = rss.getConfiguration();
long keepAlive = conf.getLong(FLUSH_TIMEOUT_MILLIS_KEY, FLUSH_TIMEOUT_MILLIS_DEFAULT);
int opThreads = conf.getInt(FLUSH_REQUEST_THREADS_KEY, FLUSH_REQUEST_THREADS_DEFAULT);
// create the actual flush table procedure member
ThreadPoolExecutor pool =
ProcedureMember.defaultPool(rss.getServerName().toString(), opThreads, keepAlive);
this.member = new ProcedureMember(memberRpcs, pool, new FlushTableSubprocedureBuilder());
} | 3.68 |
druid_FileNodeListener_destroy | /**
* Close the ScheduledExecutorService.
*/
@Override
public void destroy() {
if (executor == null || executor.isShutdown()) {
return;
}
try {
executor.shutdown();
} catch (Exception e) {
LOG.error("Can NOT shutdown the ScheduledExecutorService.", e);
}
} | 3.68 |
hadoop_StageConfig_withIOProcessors | /**
* Set builder value.
* @param value new value
* @return this
*/
public StageConfig withIOProcessors(final TaskPool.Submitter value) {
checkOpen();
ioProcessors = value;
return this;
} | 3.68 |
flink_NormalizedKeySorter_reset | /**
* Resets the sort buffer back to the state where it is empty. All contained data is discarded.
*/
@Override
public void reset() {
// reset all offsets
this.numRecords = 0;
this.currentSortIndexOffset = 0;
this.currentDataBufferOffset = 0;
this.sortIndexBytes = 0;
// return all memory
this.freeMemory.addAll(this.sortIndex);
this.freeMemory.addAll(this.recordBufferSegments);
this.sortIndex.clear();
this.recordBufferSegments.clear();
// grab first buffers
this.currentSortIndexSegment = nextMemorySegment();
this.sortIndex.add(this.currentSortIndexSegment);
this.recordCollector.reset();
} | 3.68 |
hadoop_TrashPolicy_getCurrentTrashDir | /**
* Get the current trash directory for path specified based on the Trash
* Policy
* @param path path to be deleted
* @return current trash directory for the path to be deleted
* @throws IOException raised on errors performing I/O.
*/
public Path getCurrentTrashDir(Path path) throws IOException {
throw new UnsupportedOperationException();
} | 3.68 |
hbase_HFileOutputFormat2_configureIncrementalLoad | /**
* Configure a MapReduce Job to perform an incremental load into the given table. This
* <ul>
* <li>Inspects the table to configure a total order partitioner</li>
* <li>Uploads the partitions file to the cluster and adds it to the DistributedCache</li>
* <li>Sets the number of reduce tasks to match the current number of regions</li>
* <li>Sets the output key/value class to match HFileOutputFormat2's requirements</li>
* <li>Sets the reducer up to perform the appropriate sorting (either KeyValueSortReducer or
* PutSortReducer)</li>
* </ul>
* The user should be sure to set the map output value class to either KeyValue or Put before
* running this function.
*/
public static void configureIncrementalLoad(Job job, TableDescriptor tableDescriptor,
RegionLocator regionLocator) throws IOException {
ArrayList<TableInfo> singleTableInfo = new ArrayList<>();
singleTableInfo.add(new TableInfo(tableDescriptor, regionLocator));
configureIncrementalLoad(job, singleTableInfo, HFileOutputFormat2.class);
} | 3.68 |
framework_TabsheetNotEnoughHorizontalSpace_getTicketNumber | /*
* (non-Javadoc)
*
* @see com.vaadin.tests.components.AbstractTestUI#getTicketNumber()
*/
@Override
protected Integer getTicketNumber() {
return 12154;
} | 3.68 |
flink_BinaryHashTable_spillPartition | /**
* Selects a partition and spills it. The number of the spilled partition is returned.
*
* @return The number of the spilled partition.
*/
@Override
protected int spillPartition() throws IOException {
// find the largest partition
int largestNumBlocks = 0;
int largestPartNum = -1;
for (int i = 0; i < partitionsBeingBuilt.size(); i++) {
BinaryHashPartition p = partitionsBeingBuilt.get(i);
if (p.isInMemory() && p.getNumOccupiedMemorySegments() > largestNumBlocks) {
largestNumBlocks = p.getNumOccupiedMemorySegments();
largestPartNum = i;
}
}
final BinaryHashPartition p = partitionsBeingBuilt.get(largestPartNum);
// spill the partition
int numBuffersFreed =
p.spillPartition(
this.ioManager,
this.currentEnumerator.next(),
this.buildSpillReturnBuffers);
this.buildSpillRetBufferNumbers += numBuffersFreed;
LOG.info(
String.format(
"Grace hash join: Ran out memory, choosing partition "
+ "[%d] to spill, %d memory segments being freed",
largestPartNum, numBuffersFreed));
// grab as many buffers as are available directly
MemorySegment currBuff;
while (this.buildSpillRetBufferNumbers > 0
&& (currBuff = this.buildSpillReturnBuffers.poll()) != null) {
returnPage(currBuff);
this.buildSpillRetBufferNumbers--;
}
numSpillFiles++;
spillInBytes += numBuffersFreed * segmentSize;
// The bloomFilter is built by bucket area after the data is spilled, so that we can use
// enough memory.
p.buildBloomFilterAndFreeBucket();
return largestPartNum;
} | 3.68 |
hadoop_CustomResourceMetrics_registerCustomResources | /**
* As and when this metric object construction happens for any queue, all
* custom resource metrics value would be initialized with '0' like any other
* mandatory resources metrics.
* @param customResources Map containing all custom resource types
* @param registry of the metric type
* @param metricPrefix prefix in metric name
* @param metricDesc suffix for metric name
*/
public void registerCustomResources(Map<String, Long> customResources,
MetricsRegistry registry, String metricPrefix, String metricDesc) {
for (Map.Entry<String, Long> entry : customResources.entrySet()) {
String resourceName = entry.getKey();
Long resourceValue = entry.getValue();
MutableGaugeLong resourceMetric =
(MutableGaugeLong) registry.get(metricPrefix + resourceName);
if (resourceMetric == null) {
resourceMetric = registry.newGauge(metricPrefix + resourceName,
metricDesc.replace("NAME", resourceName), 0L);
}
resourceMetric.set(resourceValue);
}
} | 3.68 |
framework_LocatorUtil_isUIElement | /**
* Checks if path refers to vaadin UI element com.vaadin.ui.UI.
*
* @param path
* to vaadin element
* @return true if path refers to UI element, false otherwise
*/
public static boolean isUIElement(String path) {
String regex = "^\\/{0,2}(com\\.vaadin\\.ui\\.)?V?UI[\\/\\[]?";
RegExp regexp = RegExp.compile(regex);
return regexp.test(path);
} | 3.68 |
morf_DummyXmlOutputStreamProvider_cleared | /**
* @return Whether clearDestination was called.
*/
public boolean cleared() {
return cleared;
} | 3.68 |
dubbo_GenericBeanPostProcessorAdapter_getBeanType | /**
* Bean Type
*
* @return Bean Type
*/
public final Class<T> getBeanType() {
return beanType;
} | 3.68 |
graphhopper_IntsRef_isValid | /**
* Performs internal consistency checks.
* Always returns true (or throws IllegalStateException)
*/
public boolean isValid() {
if (ints == null) {
throw new IllegalStateException("ints is null");
}
if (length < 0) {
throw new IllegalStateException("length is negative: " + length);
}
if (length > ints.length) {
throw new IllegalStateException("length is out of bounds: " + length + ",ints.length=" + ints.length);
}
if (offset < 0) {
throw new IllegalStateException("offset is negative: " + offset);
}
if (offset > ints.length) {
throw new IllegalStateException("offset out of bounds: " + offset + ",ints.length=" + ints.length);
}
if (offset + length < 0) {
throw new IllegalStateException("offset+length is negative: offset=" + offset + ",length=" + length);
}
if (offset + length > ints.length) {
throw new IllegalStateException("offset+length out of bounds: offset=" + offset + ",length=" + length + ",ints.length=" + ints.length);
}
return true;
} | 3.68 |
framework_ButtonUpdateAltText_getTestDescription | /*
* (non-Javadoc)
*
* @see com.vaadin.tests.components.AbstractTestUI#getTestDescription()
*/
@Override
protected String getTestDescription() {
return "Button should have a alt text";
} | 3.68 |
hadoop_HttpReferrerAuditHeader_maybeStripWrappedQuotes | /**
* Strip any quotes from around a header.
* This is needed when processing log entries.
* @param header field.
* @return field without quotes.
*/
public static String maybeStripWrappedQuotes(String header) {
String h = header;
// remove quotes if needed.
while (h.startsWith("\"")) {
h = h.substring(1);
}
while (h.endsWith("\"")) {
h = h.substring(0, h.length() - 1);
}
return h;
} | 3.68 |
hbase_StorageClusterStatusModel_getStartCode | /** Returns the region server's start code */
@XmlAttribute
public long getStartCode() {
return startCode;
} | 3.68 |
hbase_BinaryComparator_toByteArray | /** Returns The comparator serialized using pb */
@Override
public byte[] toByteArray() {
ComparatorProtos.BinaryComparator.Builder builder =
ComparatorProtos.BinaryComparator.newBuilder();
builder.setComparable(ProtobufUtil.toByteArrayComparable(this.value));
return builder.build().toByteArray();
} | 3.68 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.