name
stringlengths 12
178
| code_snippet
stringlengths 8
36.5k
| score
float64 3.26
3.68
|
---|---|---|
flink_FlinkSecurityManager_forceProcessExit | /**
* Use this method to circumvent the configured {@link FlinkSecurityManager} behavior, ensuring
* that the current JVM process will always stop via System.exit() or
* Runtime.getRuntime().halt().
*/
public static void forceProcessExit(int exitCode) {
// Unset ourselves to allow exiting in any case.
System.setSecurityManager(null);
if (flinkSecurityManager != null && flinkSecurityManager.haltOnSystemExit) {
Runtime.getRuntime().halt(exitCode);
} else {
System.exit(exitCode);
}
} | 3.68 |
hbase_DependentColumnFilter_getQualifier | /** Returns the column qualifier */
public byte[] getQualifier() {
return this.columnQualifier;
} | 3.68 |
hudi_BloomIndexFileInfo_isKeyInRange | /**
* Does the given key fall within the range (inclusive).
*/
public boolean isKeyInRange(String recordKey) {
return Objects.requireNonNull(minRecordKey).compareTo(recordKey) <= 0
&& Objects.requireNonNull(maxRecordKey).compareTo(recordKey) >= 0;
} | 3.68 |
morf_AbstractSqlDialectTest_testUpdateWithNull | /**
* Test that an update statement is generated with a null value.
*/
@Test
public void testUpdateWithNull() {
UpdateStatement stmt = new UpdateStatement(new TableReference(TEST_TABLE)).set(new NullFieldLiteral().as(STRING_FIELD));
String expectedSql = "UPDATE " + tableName(TEST_TABLE) + " SET stringField = null";
assertEquals("Update with null value", expectedSql, testDialect.convertStatementToSQL(stmt));
} | 3.68 |
flink_SecurityContextFactory_isCompatibleWith | /**
* Check if this factory is compatible with the security configuration.
*
* <p>Specific implementation must override this to provide compatibility check, by default it
* will always return {@code false}.
*
* @param securityConfig security configurations.
* @return {@code true} if factory is compatible with the configuration.
*/
default boolean isCompatibleWith(final SecurityConfiguration securityConfig) {
return false;
} | 3.68 |
framework_AbstractBeanContainer_resolveBeanId | /**
* Use the bean resolver to get the identifier for a bean.
*
* @param bean
* @return resolved bean identifier, null if could not be resolved
* @throws IllegalStateException
* if no bean resolver is set
*/
protected IDTYPE resolveBeanId(BEANTYPE bean) {
if (beanIdResolver == null) {
throw new IllegalStateException(
"Bean item identifier resolver is required.");
}
return beanIdResolver.getIdForBean(bean);
} | 3.68 |
flink_UnionOperator_translateToDataFlow | /**
* Returns the BinaryNodeTranslation of the Union.
*
* @param input1 The first input of the union, as a common API operator.
* @param input2 The second input of the union, as a common API operator.
* @return The common API union operator.
*/
@Override
protected Union<T> translateToDataFlow(Operator<T> input1, Operator<T> input2) {
return new Union<T>(input1, input2, unionLocationName);
} | 3.68 |
hbase_MasterQuotaManager_isInViolationAndPolicyDisable | /**
* Method to check if a table is in violation and policy set on table is DISABLE.
* @param tableName tableName to check.
* @param quotaObserverChore QuotaObserverChore instance
* @return returns true if table is in violation and policy is disable else false.
*/
private boolean isInViolationAndPolicyDisable(TableName tableName,
QuotaObserverChore quotaObserverChore) {
boolean isInViolationAtTable = false;
boolean isInViolationAtNamespace = false;
SpaceViolationPolicy tablePolicy = null;
SpaceViolationPolicy namespacePolicy = null;
// Get Current Snapshot for the given table
SpaceQuotaSnapshot tableQuotaSnapshot = quotaObserverChore.getTableQuotaSnapshot(tableName);
SpaceQuotaSnapshot namespaceQuotaSnapshot =
quotaObserverChore.getNamespaceQuotaSnapshot(tableName.getNamespaceAsString());
if (tableQuotaSnapshot != null) {
// check if table in violation
isInViolationAtTable = tableQuotaSnapshot.getQuotaStatus().isInViolation();
Optional<SpaceViolationPolicy> policy = tableQuotaSnapshot.getQuotaStatus().getPolicy();
if (policy.isPresent()) {
tablePolicy = policy.get();
}
}
if (namespaceQuotaSnapshot != null) {
// check namespace in violation
isInViolationAtNamespace = namespaceQuotaSnapshot.getQuotaStatus().isInViolation();
Optional<SpaceViolationPolicy> policy = namespaceQuotaSnapshot.getQuotaStatus().getPolicy();
if (policy.isPresent()) {
namespacePolicy = policy.get();
}
}
return (tablePolicy == SpaceViolationPolicy.DISABLE && isInViolationAtTable)
|| (namespacePolicy == SpaceViolationPolicy.DISABLE && isInViolationAtNamespace);
} | 3.68 |
framework_VEmbedded_createFlashEmbed | /**
* Creates the Object and Embed tags for the Flash plugin so it works
* cross-browser.
* <p>
* For internal use only. May be removed or replaced in the future.
*
* @param state
* The EmbeddedState
* @param src
* The src attribute
* @return Tags concatenated into a string
* @since 8.2
*/
public String createFlashEmbed(EmbeddedState state, String src) {
/*
* To ensure cross-browser compatibility we are using the twice-cooked
* method to embed flash i.e. we add a OBJECT tag for IE ActiveX and
* inside it a EMBED for all other browsers.
*/
StringBuilder html = new StringBuilder();
// Start the object tag
html.append("<object ");
/*
* Add classid required for ActiveX to recognize the flash. This is a
* predefined value which ActiveX recognizes and must be the given
* value. More info can be found on
* http://kb2.adobe.com/cps/415/tn_4150.html. Allow user to override
* this by setting his own classid.
*/
if (state.classId != null) {
html.append("classid=\"" + WidgetUtil.escapeAttribute(state.classId)
+ "\" ");
} else {
html.append(
"classid=\"clsid:D27CDB6E-AE6D-11cf-96B8-444553540000\" ");
}
/*
* Add codebase required for ActiveX and must be exactly this according
* to http://kb2.adobe.com/cps/415/tn_4150.html to work with the above
* given classid. Again, see more info on
* http://kb2.adobe.com/cps/415/tn_4150.html. Limiting Flash version to
* 6.0.0.0 and above. Allow user to override this by setting his own
* codebase
*/
if (state.codebase != null) {
html.append("codebase=\""
+ WidgetUtil.escapeAttribute(state.codebase) + "\" ");
} else {
html.append(
"codebase=\"http://download.macromedia.com/pub/shockwave/cabs/flash/swflash.cab#version=6,0,0,0\" ");
}
ComponentConnector paintable = ConnectorMap.get(client)
.getConnector(this);
String height = paintable.getState().height;
String width = paintable.getState().width;
// Add width and height
html.append("width=\"" + WidgetUtil.escapeAttribute(width) + "\" ");
html.append("height=\"" + WidgetUtil.escapeAttribute(height) + "\" ");
html.append("type=\"application/x-shockwave-flash\" ");
// Codetype
if (state.codetype != null) {
html.append("codetype=\""
+ WidgetUtil.escapeAttribute(state.codetype) + "\" ");
}
// Standby
if (state.standby != null) {
html.append("standby=\"" + WidgetUtil.escapeAttribute(state.standby)
+ "\" ");
}
// Archive
if (state.archive != null) {
html.append("archive=\"" + WidgetUtil.escapeAttribute(state.archive)
+ "\" ");
}
// End object tag
html.append('>');
// Ensure we have an movie parameter
Map<String, String> parameters = state.parameters;
if (parameters.get("movie") == null) {
parameters.put("movie", getSrc(src, client));
}
// Add parameters to OBJECT
for (String name : parameters.keySet()) {
html.append("<param ");
html.append("name=\"" + WidgetUtil.escapeAttribute(name) + "\" ");
html.append("value=\""
+ WidgetUtil.escapeAttribute(parameters.get(name)) + "\" ");
html.append("/>");
}
// Build inner EMBED tag
html.append("<embed ");
html.append("src=\"" + WidgetUtil.escapeAttribute(getSrc(src, client))
+ "\" ");
html.append("width=\"" + WidgetUtil.escapeAttribute(width) + "\" ");
html.append("height=\"" + WidgetUtil.escapeAttribute(height) + "\" ");
html.append("type=\"application/x-shockwave-flash\" ");
// Add the parameters to the Embed
for (String name : parameters.keySet()) {
html.append(WidgetUtil.escapeAttribute(name));
html.append('=');
html.append("\"" + WidgetUtil.escapeAttribute(parameters.get(name))
+ "\"");
}
// End embed tag
html.append("></embed>");
if (state.altText != null) {
html.append(state.altText);
}
// End object tag
html.append("</object>");
return html.toString();
} | 3.68 |
pulsar_NamespaceIsolationPolicies_getPolicyByNamespace | /**
* Get the namespace isolation policy for the specified namespace.
*
* <p>There should only be one namespace isolation policy defined for the specific namespace. If multiple policies
* match, the first one will be returned.
*
* @param namespace
* @return
*/
public NamespaceIsolationPolicy getPolicyByNamespace(NamespaceName namespace) {
for (NamespaceIsolationData nsPolicyData : policies.values()) {
if (this.namespaceMatches(namespace, nsPolicyData)) {
return new NamespaceIsolationPolicyImpl(nsPolicyData);
}
}
return null;
} | 3.68 |
flink_QuickSort_sortInternal | /**
* Sort the given range of items using quick sort. If the recursion depth falls below {@link
* #getMaxDepth}, then switch to {@link HeapSort}.
*
* @param s paged sortable
* @param recordsPerSegment number of records per memory segment
* @param recordSize number of bytes per record
* @param maxOffset offset of a last record in a memory segment
* @param p index of first record in range
* @param pN page number of first record in range
* @param pO page offset of first record in range
* @param r index of last-plus-one'th record in range
* @param rN page number of last-plus-one'th record in range
* @param rO page offset of last-plus-one'th record in range
* @param depth recursion depth
* @see #sort(IndexedSortable, int, int)
*/
private static void sortInternal(
final IndexedSortable s,
int recordsPerSegment,
int recordSize,
int maxOffset,
int p,
int pN,
int pO,
int r,
int rN,
int rO,
int depth) {
while (true) {
if (r - p < 13) {
// switch to insertion sort
int i = p + 1, iN, iO;
if (pO == maxOffset) {
iN = pN + 1;
iO = 0;
} else {
iN = pN;
iO = pO + recordSize;
}
while (i < r) {
int j = i, jN = iN, jO = iO;
int jd = j - 1, jdN, jdO;
if (jO == 0) {
jdN = jN - 1;
jdO = maxOffset;
} else {
jdN = jN;
jdO = jO - recordSize;
}
while (j > p && s.compare(jdN, jdO, jN, jO) > 0) {
s.swap(jN, jO, jdN, jdO);
j = jd;
jN = jdN;
jO = jdO;
jd--;
if (jdO == 0) {
jdN--;
jdO = maxOffset;
} else {
jdO -= recordSize;
}
}
i++;
if (iO == maxOffset) {
iN++;
iO = 0;
} else {
iO += recordSize;
}
}
return;
}
if (--depth < 0) {
// switch to heap sort
alt.sort(s, p, r);
return;
}
int rdN, rdO;
if (rO == 0) {
rdN = rN - 1;
rdO = maxOffset;
} else {
rdN = rN;
rdO = rO - recordSize;
}
int m = (p + r) >>> 1,
mN = m / recordsPerSegment,
mO = (m % recordsPerSegment) * recordSize;
// select, move pivot into first position
fix(s, mN, mO, pN, pO);
fix(s, mN, mO, rdN, rdO);
fix(s, pN, pO, rdN, rdO);
// Divide
int i = p, iN = pN, iO = pO;
int j = r, jN = rN, jO = rO;
int ll = p, llN = pN, llO = pO;
int rr = r, rrN = rN, rrO = rO;
int cr;
while (true) {
i++;
if (iO == maxOffset) {
iN++;
iO = 0;
} else {
iO += recordSize;
}
while (i < j) {
if ((cr = s.compare(iN, iO, pN, pO)) > 0) {
break;
}
if (0 == cr) {
ll++;
if (llO == maxOffset) {
llN++;
llO = 0;
} else {
llO += recordSize;
}
if (ll != i) {
s.swap(llN, llO, iN, iO);
}
}
i++;
if (iO == maxOffset) {
iN++;
iO = 0;
} else {
iO += recordSize;
}
}
j--;
if (jO == 0) {
jN--;
jO = maxOffset;
} else {
jO -= recordSize;
}
while (j > i) {
if ((cr = s.compare(pN, pO, jN, jO)) > 0) {
break;
}
if (0 == cr) {
rr--;
if (rrO == 0) {
rrN--;
rrO = maxOffset;
} else {
rrO -= recordSize;
}
if (rr != j) {
s.swap(rrN, rrO, jN, jO);
}
}
j--;
if (jO == 0) {
jN--;
jO = maxOffset;
} else {
jO -= recordSize;
}
}
if (i < j) {
s.swap(iN, iO, jN, jO);
} else {
break;
}
}
j = i;
jN = iN;
jO = iO;
// swap pivot- and all eq values- into position
while (ll >= p) {
i--;
if (iO == 0) {
iN--;
iO = maxOffset;
} else {
iO -= recordSize;
}
s.swap(llN, llO, iN, iO);
ll--;
if (llO == 0) {
llN--;
llO = maxOffset;
} else {
llO -= recordSize;
}
}
while (rr < r) {
s.swap(rrN, rrO, jN, jO);
rr++;
if (rrO == maxOffset) {
rrN++;
rrO = 0;
} else {
rrO += recordSize;
}
j++;
if (jO == maxOffset) {
jN++;
jO = 0;
} else {
jO += recordSize;
}
}
// Conquer
// Recurse on smaller interval first to keep stack shallow
assert i != j;
if (i - p < r - j) {
sortInternal(
s, recordsPerSegment, recordSize, maxOffset, p, pN, pO, i, iN, iO, depth);
p = j;
pN = jN;
pO = jO;
} else {
sortInternal(
s, recordsPerSegment, recordSize, maxOffset, j, jN, jO, r, rN, rO, depth);
r = i;
rN = iN;
rO = iO;
}
}
} | 3.68 |
morf_AbstractSqlDialectTest_testIndexDropStatements | /**
* Test dropping an index.
*/
@SuppressWarnings("unchecked")
@Test
public void testIndexDropStatements() {
Table table = metadata.getTable(TEST_TABLE);
Index index = index("indexName").unique().columns(table.columns().get(0).getName());
compareStatements(
expectedIndexDropStatements(),
testDialect.indexDropStatements(table, index));
} | 3.68 |
framework_DefaultConnectionStateHandler_isHigherPriorityThan | /**
* Checks if this type is of higher priority than the given type.
*
* @param type
* the type to compare to
* @return true if this type has higher priority than the given type,
* false otherwise
*/
public boolean isHigherPriorityThan(Type type) {
return priority > type.priority;
} | 3.68 |
graphhopper_VectorTile_clearLayers | /**
* <code>repeated .vector_tile.Tile.Layer layers = 3;</code>
*/
public Builder clearLayers() {
if (layersBuilder_ == null) {
layers_ = java.util.Collections.emptyList();
bitField0_ = (bitField0_ & ~0x00000001);
onChanged();
} else {
layersBuilder_.clear();
}
return this;
} | 3.68 |
hbase_TableDescriptorBuilder_setRegionSplitPolicyClassName | /**
* This sets the class associated with the region split policy which determines when a region
* split should occur. The class used by default is defined in
* org.apache.hadoop.hbase.regionserver.RegionSplitPolicy
* @param clazz the class name
* @return the modifyable TD
*/
public ModifyableTableDescriptor setRegionSplitPolicyClassName(String clazz) {
return setValue(SPLIT_POLICY_KEY, clazz);
} | 3.68 |
flink_HiveDDLUtils_relyConstraint | // returns a constraint trait that requires RELY
public static byte relyConstraint(byte trait) {
return (byte) (trait | HIVE_CONSTRAINT_RELY);
} | 3.68 |
pulsar_SystemTopicClient_deleteAsync | /**
* Async delete event in the system topic.
* @param key the key of the event
* @param t pulsar event
* @return message id future
*/
default CompletableFuture<MessageId> deleteAsync(String key, T t) {
throw new UnsupportedOperationException("Unsupported operation");
} | 3.68 |
flink_OrcColumnarRowInputFormat_createPartitionedFormat | /**
* Create a partitioned {@link OrcColumnarRowInputFormat}, the partition columns can be
* generated by split.
*/
public static <SplitT extends FileSourceSplit>
OrcColumnarRowInputFormat<VectorizedRowBatch, SplitT> createPartitionedFormat(
OrcShim<VectorizedRowBatch> shim,
Configuration hadoopConfig,
RowType tableType,
List<String> partitionKeys,
PartitionFieldExtractor<SplitT> extractor,
int[] selectedFields,
List<OrcFilters.Predicate> conjunctPredicates,
int batchSize,
Function<RowType, TypeInformation<RowData>> rowTypeInfoFactory) {
// TODO FLINK-25113 all this partition keys code should be pruned from the orc format,
// because now FileSystemTableSource uses FileInfoExtractorBulkFormat for reading partition
// keys.
String[] tableFieldNames = tableType.getFieldNames().toArray(new String[0]);
LogicalType[] tableFieldTypes = tableType.getChildren().toArray(new LogicalType[0]);
List<String> orcFieldNames = getNonPartNames(tableFieldNames, partitionKeys);
int[] orcSelectedFields =
getSelectedOrcFields(tableFieldNames, selectedFields, orcFieldNames);
ColumnBatchFactory<VectorizedRowBatch, SplitT> batchGenerator =
(SplitT split, VectorizedRowBatch rowBatch) -> {
// create and initialize the row batch
ColumnVector[] vectors = new ColumnVector[selectedFields.length];
for (int i = 0; i < vectors.length; i++) {
String name = tableFieldNames[selectedFields[i]];
LogicalType type = tableFieldTypes[selectedFields[i]];
vectors[i] =
partitionKeys.contains(name)
? createFlinkVectorFromConstant(
type,
extractor.extract(split, name, type),
batchSize)
: createFlinkVector(
rowBatch.cols[orcFieldNames.indexOf(name)], type);
}
return new VectorizedColumnBatch(vectors);
};
return new OrcColumnarRowInputFormat<>(
shim,
hadoopConfig,
convertToOrcTypeWithPart(tableFieldNames, tableFieldTypes, partitionKeys),
orcSelectedFields,
conjunctPredicates,
batchSize,
batchGenerator,
rowTypeInfoFactory.apply(
new RowType(
Arrays.stream(selectedFields)
.mapToObj(i -> tableType.getFields().get(i))
.collect(Collectors.toList()))));
} | 3.68 |
framework_Form_commit | /*
* Commit changes to the data source Don't add a JavaDoc comment here, we
* use the default one from the interface.
*/
@Override
public void commit()
throws Buffered.SourceException, InvalidValueException {
LinkedList<SourceException> problems = null;
// Only commit on valid state if so requested
if (!isInvalidCommitted() && !isValid()) {
/*
* The values are not ok and we are told not to commit invalid
* values
*/
if (validationVisibleOnCommit) {
setValidationVisible(true);
}
// Find the first invalid value and throw the exception
validate();
}
// Try to commit all
for (final Object id : propertyIds) {
try {
final Field<?> f = fields.get(id);
// Commit only non-readonly fields.
if (!f.isReadOnly()) {
f.commit();
}
} catch (final Buffered.SourceException e) {
if (problems == null) {
problems = new LinkedList<SourceException>();
}
problems.add(e);
}
}
// No problems occurred
if (problems == null) {
if (currentBufferedSourceException != null) {
currentBufferedSourceException = null;
markAsDirty();
}
return;
}
// Commit problems
final Throwable[] causes = problems
.toArray(new Throwable[problems.size()]);
final Buffered.SourceException e = new Buffered.SourceException(this,
causes);
currentBufferedSourceException = e;
markAsDirty();
throw e;
} | 3.68 |
hadoop_Tail_expandArgument | // TODO: HADOOP-7234 will add glob support; for now, be backwards compat
@Override
protected List<PathData> expandArgument(String arg) throws IOException {
List<PathData> items = new LinkedList<PathData>();
items.add(new PathData(arg, getConf()));
return items;
} | 3.68 |
pulsar_BookieServiceInfoSerde_extractBookiedIdFromPath | /**
* Extract the BookieId
* The path should look like /ledgers/available/bookieId
* or /ledgers/available/readonly/bookieId.
* But the prefix depends on the configuration.
* @param path
* @return the bookieId
*/
private static String extractBookiedIdFromPath(String path) throws IOException {
// https://github.com/apache/bookkeeper/blob/
// 034ef8566ad037937a4d58a28f70631175744f53/bookkeeper-server/
// src/main/java/org/apache/bookkeeper/discover/ZKRegistrationClient.java#L258
if (path == null) {
path = "";
}
int last = path.lastIndexOf("/");
if (last >= 0) {
return path.substring(last + 1);
} else {
throw new IOException("The path " + path + " doesn't look like a valid path for a BookieServiceInfo node");
}
} | 3.68 |
morf_AbstractSqlDialectTest_testAddIndexStatementsOnSingleColumn | /**
* Test adding an index over a single column.
*/
@SuppressWarnings("unchecked")
@Test
public void testAddIndexStatementsOnSingleColumn() {
Table table = metadata.getTable(TEST_TABLE);
Index index = index("indexName").columns(table.columns().get(0).getName());
compareStatements(
expectedAddIndexStatementsOnSingleColumn(),
testDialect.addIndexStatements(table, index));
} | 3.68 |
hadoop_TypedBytesInput_readMap | /**
* Reads the map following a <code>Type.MAP</code> code.
* @return the obtained map
* @throws IOException
*/
@SuppressWarnings("unchecked")
public TreeMap readMap() throws IOException {
int length = readMapHeader();
TreeMap result = new TreeMap();
for (int i = 0; i < length; i++) {
Object key = read();
Object value = read();
result.put(key, value);
}
return result;
} | 3.68 |
flink_TaskManagerOptions_loadFromConfiguration | /**
* The method is mainly to load the {@link
* TaskManagerOptions#TASK_MANAGER_LOAD_BALANCE_MODE} from {@link Configuration}, which is
* compatible with {@link ClusterOptions#EVENLY_SPREAD_OUT_SLOTS_STRATEGY}.
*/
public static TaskManagerLoadBalanceMode loadFromConfiguration(
@Nonnull Configuration configuration) {
Optional<TaskManagerLoadBalanceMode> taskManagerLoadBalanceModeOptional =
configuration.getOptional(TaskManagerOptions.TASK_MANAGER_LOAD_BALANCE_MODE);
if (taskManagerLoadBalanceModeOptional.isPresent()) {
return taskManagerLoadBalanceModeOptional.get();
}
boolean evenlySpreadOutSlots =
configuration.getBoolean(ClusterOptions.EVENLY_SPREAD_OUT_SLOTS_STRATEGY);
return evenlySpreadOutSlots
? TaskManagerLoadBalanceMode.SLOTS
: TaskManagerOptions.TASK_MANAGER_LOAD_BALANCE_MODE.defaultValue();
} | 3.68 |
framework_CalendarWeekDropHandler_dragAccepted | /*
* (non-Javadoc)
*
* @see
* com.vaadin.terminal.gwt.client.ui.dd.VAbstractDropHandler#dragAccepted
* (com.vaadin.terminal.gwt.client.ui.dd.VDragEvent)
*/
@Override
protected void dragAccepted(VDragEvent drag) {
deEmphasis();
currentTargetElement = drag.getElementOver();
currentTargetDay = WidgetUtil.findWidget(currentTargetElement,
DateCell.class);
emphasis();
} | 3.68 |
hbase_TableRegionModel_getId | /** Returns the encoded region id */
@XmlAttribute
public long getId() {
return id;
} | 3.68 |
hibernate-validator_ConstraintViolationImpl_equals | /**
* IMPORTANT - some behaviour of Validator depends on the correct implementation of this equals method! (HF)
* <p>
* {@code messageParameters}, {@code expressionVariables} and {@code dynamicPayload} are not taken into account for
* equality. These variables solely enrich the actual Constraint Violation with additional information e.g how we
* actually got to this CV.
*
* @return true if the two ConstraintViolation's are considered equals; false otherwise
*/
@Override
public boolean equals(Object o) {
if ( this == o ) {
return true;
}
if ( o == null || getClass() != o.getClass() ) {
return false;
}
ConstraintViolationImpl<?> that = (ConstraintViolationImpl<?>) o;
if ( interpolatedMessage != null ? !interpolatedMessage.equals( that.interpolatedMessage ) : that.interpolatedMessage != null ) {
return false;
}
if ( messageTemplate != null ? !messageTemplate.equals( that.messageTemplate ) : that.messageTemplate != null ) {
return false;
}
if ( propertyPath != null ? !propertyPath.equals( that.propertyPath ) : that.propertyPath != null ) {
return false;
}
if ( rootBean != null ? ( rootBean != that.rootBean ) : that.rootBean != null ) {
return false;
}
if ( leafBeanInstance != null ? ( leafBeanInstance != that.leafBeanInstance ) : that.leafBeanInstance != null ) {
return false;
}
if ( value != null ? ( value != that.value ) : that.value != null ) {
return false;
}
if ( constraintDescriptor != null ? !constraintDescriptor.equals( that.constraintDescriptor ) : that.constraintDescriptor != null ) {
return false;
}
return true;
} | 3.68 |
hbase_HFileBlockIndex_getNumLevels | /** Returns the number of levels in this block index. */
public int getNumLevels() {
return numLevels;
} | 3.68 |
hudi_Pipelines_compact | /**
* The compaction tasks pipeline.
*
* <p>The compaction plan operator monitors the new compaction plan on the timeline
* then distributes the sub-plans to the compaction tasks. The compaction task then
* handle over the metadata to commit task for compaction transaction commit.
* The whole pipeline looks like the following:
*
* <pre>
* /=== | task1 | ===\
* | plan generation | ===> hash | commit |
* \=== | task2 | ===/
*
* Note: both the compaction plan generation task and commission task are singleton.
* </pre>
*
* @param conf The configuration
* @param dataStream The input data stream
* @return the compaction pipeline
*/
public static DataStreamSink<CompactionCommitEvent> compact(Configuration conf, DataStream<Object> dataStream) {
DataStreamSink<CompactionCommitEvent> compactionCommitEventDataStream = dataStream.transform("compact_plan_generate",
TypeInformation.of(CompactionPlanEvent.class),
new CompactionPlanOperator(conf))
.setParallelism(1) // plan generate must be singleton
.setMaxParallelism(1)
// make the distribution strategy deterministic to avoid concurrent modifications
// on the same bucket files
.keyBy(plan -> plan.getOperation().getFileGroupId().getFileId())
.transform("compact_task",
TypeInformation.of(CompactionCommitEvent.class),
new CompactOperator(conf))
.setParallelism(conf.getInteger(FlinkOptions.COMPACTION_TASKS))
.addSink(new CompactionCommitSink(conf))
.name("compact_commit")
.setParallelism(1); // compaction commit should be singleton
compactionCommitEventDataStream.getTransformation().setMaxParallelism(1);
return compactionCommitEventDataStream;
} | 3.68 |
framework_CheckBoxElement_clear | /**
* Clears the check box, setting unchecked value. The check box is unchecked
* by sending a click event on it.
*
*/
@Override
public void clear() {
if (isChecked()) {
click();
}
} | 3.68 |
graphhopper_GHRequest_putHint | /**
* This method sets a key value pair in the hints and is unrelated to the setPointHints method.
* It is mainly used for deserialization with Jackson.
*
* @see #setPointHints(List)
*/
// a good trick to serialize unknown properties into the HintsMap
@JsonAnySetter
public GHRequest putHint(String fieldName, Object value) {
this.hints.putObject(fieldName, value);
return this;
} | 3.68 |
morf_SchemaAdapter_tables | /**
* @see org.alfasoftware.morf.metadata.Schema#tables()
*/
@Override
public Collection<Table> tables() {
return delegate.tables();
} | 3.68 |
flink_Types_MAP | /**
* Returns type information for a Java {@link java.util.Map}. A map must not be null. Null
* values in keys are not supported. An entry's value can be null.
*
* <p>By default, maps are untyped and treated as a generic type in Flink; therefore, it is
* useful to pass type information whenever a map is used.
*
* <p><strong>Note:</strong> Flink does not preserve the concrete {@link Map} type. It converts
* a map into {@link HashMap} when copying or deserializing.
*
* @param keyType type information for the map's keys
* @param valueType type information for the map's values
*/
public static <K, V> TypeInformation<Map<K, V>> MAP(
TypeInformation<K> keyType, TypeInformation<V> valueType) {
return new MapTypeInfo<>(keyType, valueType);
} | 3.68 |
hbase_HBackupFileSystem_getTableBackupPath | /**
* Given the backup root dir, backup id and the table name, return the backup image location,
* which is also where the backup manifest file is. return value look like:
* "hdfs://backup.hbase.org:9000/user/biadmin/backup/backup_1396650096738/default/t1_dn/", where
* "hdfs://backup.hbase.org:9000/user/biadmin/backup" is a backup root directory
* @param backupRootPath backup root path
* @param tableName table name
* @param backupId backup Id
* @return backupPath for the particular table
*/
public static Path getTableBackupPath(TableName tableName, Path backupRootPath, String backupId) {
return new Path(getTableBackupDir(backupRootPath.toString(), backupId, tableName));
} | 3.68 |
hbase_HRegionServer_main | /**
* @see org.apache.hadoop.hbase.regionserver.HRegionServerCommandLine
*/
public static void main(String[] args) {
LOG.info("STARTING executorService " + HRegionServer.class.getSimpleName());
VersionInfo.logVersion();
Configuration conf = HBaseConfiguration.create();
@SuppressWarnings("unchecked")
Class<? extends HRegionServer> regionServerClass = (Class<? extends HRegionServer>) conf
.getClass(HConstants.REGION_SERVER_IMPL, HRegionServer.class);
new HRegionServerCommandLine(regionServerClass).doMain(args);
} | 3.68 |
flink_ReusingBuildSecondReOpenableHashJoinIterator_reopenProbe | /**
* Set new input for probe side
*
* @throws IOException
*/
public void reopenProbe(MutableObjectIterator<V1> probeInput) throws IOException {
reopenHashTable.reopenProbe(probeInput);
} | 3.68 |
graphhopper_VLongStorage_readVLong | /**
* Reads a long stored in variable-length format. Reads between one and nine bytes. Smaller
* values take fewer bytes. Negative numbers are not supported.
* <p>
* The format is described further in DataOutput writeVInt(int) from Lucene.
*/
public long readVLong() {
/* This is the original code of this method,
* but a Hotspot bug (see LUCENE-2975) corrupts the for-loop if
* readByte() is inlined. So the loop was unwinded!
byte b = readByte();
long i = b & 0x7F;
for (int shift = 7; (b & 0x80) != 0; shift += 7) {
b = readByte();
i |= (b & 0x7FL) << shift;
}
return i;
*/
byte b = readByte();
if (b >= 0) {
return b;
}
long i = b & 0x7FL;
b = readByte();
i |= (b & 0x7FL) << 7;
if (b >= 0) {
return i;
}
b = readByte();
i |= (b & 0x7FL) << 14;
if (b >= 0) {
return i;
}
b = readByte();
i |= (b & 0x7FL) << 21;
if (b >= 0) {
return i;
}
b = readByte();
i |= (b & 0x7FL) << 28;
if (b >= 0) {
return i;
}
b = readByte();
i |= (b & 0x7FL) << 35;
if (b >= 0) {
return i;
}
b = readByte();
i |= (b & 0x7FL) << 42;
if (b >= 0) {
return i;
}
b = readByte();
i |= (b & 0x7FL) << 49;
if (b >= 0) {
return i;
}
b = readByte();
i |= (b & 0x7FL) << 56;
if (b >= 0) {
return i;
}
throw new RuntimeException("Invalid vLong detected (negative values disallowed)");
} | 3.68 |
hadoop_SchedulingRequest_priority | /**
* Set the <code>priority</code> of the request.
*
* @param priority <code>priority</code> of the request
* @return {@link SchedulingRequest.SchedulingRequestBuilder}
* @see SchedulingRequest#setPriority(Priority)
*/
@Public
@Unstable
public SchedulingRequestBuilder priority(Priority priority) {
schedulingRequest.setPriority(priority);
return this;
} | 3.68 |
flink_PythonConnectorUtils_createFirstColumnTopicSelector | /**
* Creates a selector that returns the first column of a row, and cast it to {@code clazz}.
* {@code T} should be a sub interface of {@link Function}, which accepts a {@link Row}.
*
* @param clazz The desired selector class to cast to, e.g. TopicSelector.class for Kafka.
* @param <T> An interface
*/
@SuppressWarnings("unchecked")
public static <T> T createFirstColumnTopicSelector(Class<T> clazz) {
return (T)
Proxy.newProxyInstance(
clazz.getClassLoader(),
new Class[] {clazz},
new FirstColumnTopicSelectorInvocationHandler());
} | 3.68 |
framework_ReflectTools_findMethod | /**
* Locates the method in the given class. Returns null if the method is not
* found. Throws an ExceptionInInitializerError if there is a problem
* locating the method as this is mainly called from static blocks.
*
* @param cls
* Class that contains the method
* @param methodName
* The name of the method
* @param parameterTypes
* The parameter types for the method.
* @return A reference to the method
* @throws ExceptionInInitializerError
* Wraps any exception in an {@link ExceptionInInitializerError}
* so this method can be called from a static initializer.
*/
public static Method findMethod(Class<?> cls, String methodName,
Class<?>... parameterTypes) throws ExceptionInInitializerError {
try {
return cls.getDeclaredMethod(methodName, parameterTypes);
} catch (Exception e) {
throw new ExceptionInInitializerError(e);
}
} | 3.68 |
framework_Color_checkRange | /**
* Checks that all values are within the acceptable range of [0, 255].
*
* @throws IllegalArgumentException
* if any of the values fall outside of the range
*
* @param red
* @param green
* @param blue
* @param alpha
*/
private void checkRange(int red, int green, int blue, int alpha) {
if (!withinRange(red) || !withinRange(green) || !withinRange(blue)
|| !withinRange(alpha)) {
String errorMessage = "All values must fall within range [0-255]. (red: "
+ red + ", green: " + green + ", blue: " + blue
+ ", alpha: " + alpha + ")";
throw new IllegalArgumentException(errorMessage);
}
} | 3.68 |
flink_WatermarkStrategy_forBoundedOutOfOrderness | /**
* Creates a watermark strategy for situations where records are out of order, but you can place
* an upper bound on how far the events are out of order. An out-of-order bound B means that
* once the an event with timestamp T was encountered, no events older than {@code T - B} will
* follow any more.
*
* <p>The watermarks are generated periodically. The delay introduced by this watermark strategy
* is the periodic interval length, plus the out of orderness bound.
*
* @see BoundedOutOfOrdernessWatermarks
*/
static <T> WatermarkStrategy<T> forBoundedOutOfOrderness(Duration maxOutOfOrderness) {
return (ctx) -> new BoundedOutOfOrdernessWatermarks<>(maxOutOfOrderness);
} | 3.68 |
hadoop_AbfsInputStream_getBytesFromRemoteRead | /**
* Getter for bytes read remotely from the data store.
*
* @return value of the counter in long.
*/
@VisibleForTesting
public long getBytesFromRemoteRead() {
return bytesFromRemoteRead;
} | 3.68 |
flink_Transformation_getBufferTimeout | /**
* Returns the buffer timeout of this {@code Transformation}.
*
* @see #setBufferTimeout(long)
*/
public long getBufferTimeout() {
return bufferTimeout;
} | 3.68 |
hbase_FutureUtils_failedFuture | /**
* Returns a CompletableFuture that is already completed exceptionally with the given exception.
*/
public static <T> CompletableFuture<T> failedFuture(Throwable e) {
CompletableFuture<T> future = new CompletableFuture<>();
future.completeExceptionally(e);
return future;
} | 3.68 |
hadoop_PlacementPolicy_constraints | /**
* Placement constraint details.
**/
public PlacementPolicy constraints(List<PlacementConstraint> constraints) {
this.constraints = constraints;
return this;
} | 3.68 |
hadoop_OBSInputStream_reopen | /**
* Opens up the stream at specified target position and for given length.
*
* @param reason reason for reopen
* @param targetPos target position
* @param length length requested
* @throws IOException on any failure to open the object
*/
private synchronized void reopen(final String reason, final long targetPos,
final long length)
throws IOException {
long startTime = System.currentTimeMillis();
long threadId = Thread.currentThread().getId();
if (wrappedStream != null) {
closeStream("reopen(" + reason + ")", contentRangeFinish);
}
contentRangeFinish =
calculateRequestLimit(targetPos, length, contentLength,
readAheadRange);
try {
GetObjectRequest request = new GetObjectRequest(bucket, key);
request.setRangeStart(targetPos);
request.setRangeEnd(contentRangeFinish);
if (fs.getSse().isSseCEnable()) {
request.setSseCHeader(fs.getSse().getSseCHeader());
}
wrappedStream = client.getObject(request).getObjectContent();
contentRangeStart = targetPos;
if (wrappedStream == null) {
throw new IOException(
"Null IO stream from reopen of (" + reason + ") " + uri);
}
} catch (ObsException e) {
throw translateException("Reopen at position " + targetPos, uri, e);
}
this.streamCurrentPos = targetPos;
long endTime = System.currentTimeMillis();
LOG.debug(
"reopen({}) for {} range[{}-{}], length={},"
+ " streamPosition={}, nextReadPosition={}, thread={}, "
+ "timeUsedInMilliSec={}",
uri,
reason,
targetPos,
contentRangeFinish,
length,
streamCurrentPos,
nextReadPos,
threadId,
endTime - startTime
);
} | 3.68 |
framework_FocusableFlexTable_addKeyPressHandler | /*
* (non-Javadoc)
*
* @see
* com.google.gwt.event.dom.client.HasKeyPressHandlers#addKeyPressHandler
* (com.google.gwt.event.dom.client.KeyPressHandler)
*/
@Override
public HandlerRegistration addKeyPressHandler(KeyPressHandler handler) {
return addDomHandler(handler, KeyPressEvent.getType());
} | 3.68 |
hbase_AuthFilter_getConfiguration | /**
* Returns the configuration to be used by the authentication filter to initialize the
* authentication handler. This filter retrieves all HBase configurations and passes those started
* with REST_PREFIX to the authentication handler. It is useful to support plugging different
* authentication handlers.
*/
@Override
protected Properties getConfiguration(String configPrefix, FilterConfig filterConfig)
throws ServletException {
Properties props = super.getConfiguration(configPrefix, filterConfig);
// setting the cookie path to root '/' so it is used for all resources.
props.setProperty(AuthenticationFilter.COOKIE_PATH, "/");
Configuration conf = null;
// Dirty hack to get at the RESTServer's configuration. These should be pulled out
// of the FilterConfig.
if (RESTServer.conf != null) {
conf = RESTServer.conf;
} else {
conf = HBaseConfiguration.create();
}
for (Map.Entry<String, String> entry : conf) {
String name = entry.getKey();
if (name.startsWith(REST_PREFIX)) {
String value = entry.getValue();
if (name.equals(REST_AUTHENTICATION_PRINCIPAL)) {
try {
String machineName = Strings.domainNamePointerToHostName(DNS.getDefaultHost(
conf.get(REST_DNS_INTERFACE, "default"), conf.get(REST_DNS_NAMESERVER, "default")));
value = SecurityUtil.getServerPrincipal(value, machineName);
} catch (IOException ie) {
throw new ServletException("Failed to retrieve server principal", ie);
}
}
if (LOG.isTraceEnabled()) {
LOG.trace("Setting property " + name + "=" + value);
}
name = name.substring(REST_PREFIX_LEN);
props.setProperty(name, value);
}
}
return props;
} | 3.68 |
hbase_StateMachineProcedure_isYieldBeforeExecuteFromState | /**
* By default, the executor will try ro run all the steps of the procedure start to finish. Return
* true to make the executor yield between execution steps to give other procedures time to run
* their steps.
* @param state the state we are going to execute next.
* @return Return true if the executor should yield before the execution of the specified step.
* Defaults to return false.
*/
protected boolean isYieldBeforeExecuteFromState(TEnvironment env, TState state) {
return false;
} | 3.68 |
hbase_FileIOEngine_sync | /**
* Sync the data to file after writing
*/
@Override
public void sync() throws IOException {
for (int i = 0; i < fileChannels.length; i++) {
try {
if (fileChannels[i] != null) {
fileChannels[i].force(true);
}
} catch (IOException ie) {
LOG.warn("Failed syncing data to " + this.filePaths[i]);
throw ie;
}
}
} | 3.68 |
hbase_SpaceQuotaSnapshot_getUsage | /**
* Returns the current usage, in bytes, of the target (e.g. table, namespace).
*/
@Override
public long getUsage() {
return usage;
} | 3.68 |
morf_DatabaseSchemaManager_deployTable | /**
* Deploys the specified table to the database.
*
* @param table the table to deploy
*/
private Collection<String> deployTable(Table table) {
if (log.isDebugEnabled()) log.debug("Deploying table [" + table.getName() + "]");
String upperCase = table.getName().toUpperCase();
tables.get().put(upperCase, SchemaUtils.copy(table));
tablesNotNeedingTruncate.get().add(upperCase);
return dialect.get().tableDeploymentStatements(table);
} | 3.68 |
hadoop_OBSFileSystem_copyFromLocalFile | /**
* Copy the {@code src} file on the local disk to the filesystem at the given
* {@code dst} name.
*
* @param delSrc whether to delete the src
* @param overwrite whether to overwrite an existing file
* @param src path
* @param dst path
* @throws FileAlreadyExistsException if the destination file exists and
* overwrite == false
* @throws IOException IO problem
*/
@Override
public void copyFromLocalFile(final boolean delSrc, final boolean overwrite,
final Path src, final Path dst) throws FileAlreadyExistsException,
IOException {
try {
super.copyFromLocalFile(delSrc, overwrite, src, dst);
} catch (ObsException e) {
throw OBSCommonUtils.translateException(
"copyFromLocalFile(" + src + ", " + dst + ")", src, e);
}
} | 3.68 |
framework_Tree_translateDropTargetDetails | /*
* (non-Javadoc)
*
* @see
* com.vaadin.event.dd.DropTarget#translateDropTargetDetails(java.util.Map)
*/
@Override
public TreeTargetDetails translateDropTargetDetails(
Map<String, Object> clientVariables) {
return new TreeTargetDetails(clientVariables);
} | 3.68 |
hbase_CacheConfig_shouldCacheIndexesOnWrite | /**
* @return true if index blocks should be written to the cache when an HFile is written, false if
* not
*/
public boolean shouldCacheIndexesOnWrite() {
return this.cacheIndexesOnWrite;
} | 3.68 |
hbase_BucketAllocator_completelyFreeBuckets | /**
* How many buckets are currently claimed by this bucket size but as yet totally unused. These
* buckets are available for reallocation to other bucket sizes if those fill up.
*/
public int completelyFreeBuckets() {
return completelyFreeBuckets;
} | 3.68 |
flink_OperatingSystem_getCurrentOperatingSystem | /**
* Gets the operating system that the JVM runs on from the java system properties. this method
* returns <tt>UNKNOWN</tt>, if the operating system was not successfully determined.
*
* @return The enum constant for the operating system, or <tt>UNKNOWN</tt>, if it was not
* possible to determine.
*/
public static OperatingSystem getCurrentOperatingSystem() {
return os;
} | 3.68 |
morf_RenameTable_applyChange | /**
* Applies the renaming.
*/
private Schema applyChange(Schema schema, String from, String to) {
if (!schema.tableExists(from)) {
throw new IllegalArgumentException("Cannot rename table [" + from + "]. It does not exist.");
}
if (schema.tableExists(to)) {
throw new IllegalArgumentException("Cannot rename table [" + from + "]. The new table name [" + to + "] already exists.");
}
Map<String, Table> tableMap = Maps.newHashMap();
for (Table table : schema.tables()) {
if (table.getName().equalsIgnoreCase(from)) {
// If this is the table being renamed, add the new renamed table instead
tableMap.put(to, new RenamedTable(to, schema.getTable(from)));
} else {
tableMap.put(table.getName(), table);
}
}
return SchemaUtils.schema(tableMap.values());
} | 3.68 |
hbase_SnapshotVerifyProcedure_unwrapRemoteProcedureException | // we will wrap remote exception into a RemoteProcedureException,
// here we try to unwrap it
private Throwable unwrapRemoteProcedureException(RemoteProcedureException e) {
return e.getCause();
} | 3.68 |
dubbo_RpcServiceContext_getRemoteAddress | /**
* get remote address.
*
* @return remote address
*/
@Override
public InetSocketAddress getRemoteAddress() {
return remoteAddress;
} | 3.68 |
flink_Tuple13_equals | /**
* Deep equality for tuples by calling equals() on the tuple members.
*
* @param o the object checked for equality
* @return true if this is equal to o.
*/
@Override
public boolean equals(Object o) {
if (this == o) {
return true;
}
if (!(o instanceof Tuple13)) {
return false;
}
@SuppressWarnings("rawtypes")
Tuple13 tuple = (Tuple13) o;
if (f0 != null ? !f0.equals(tuple.f0) : tuple.f0 != null) {
return false;
}
if (f1 != null ? !f1.equals(tuple.f1) : tuple.f1 != null) {
return false;
}
if (f2 != null ? !f2.equals(tuple.f2) : tuple.f2 != null) {
return false;
}
if (f3 != null ? !f3.equals(tuple.f3) : tuple.f3 != null) {
return false;
}
if (f4 != null ? !f4.equals(tuple.f4) : tuple.f4 != null) {
return false;
}
if (f5 != null ? !f5.equals(tuple.f5) : tuple.f5 != null) {
return false;
}
if (f6 != null ? !f6.equals(tuple.f6) : tuple.f6 != null) {
return false;
}
if (f7 != null ? !f7.equals(tuple.f7) : tuple.f7 != null) {
return false;
}
if (f8 != null ? !f8.equals(tuple.f8) : tuple.f8 != null) {
return false;
}
if (f9 != null ? !f9.equals(tuple.f9) : tuple.f9 != null) {
return false;
}
if (f10 != null ? !f10.equals(tuple.f10) : tuple.f10 != null) {
return false;
}
if (f11 != null ? !f11.equals(tuple.f11) : tuple.f11 != null) {
return false;
}
if (f12 != null ? !f12.equals(tuple.f12) : tuple.f12 != null) {
return false;
}
return true;
} | 3.68 |
dubbo_DataParseUtils_writeTextContent | /**
* content-type text
*
* @param object
* @param outputStream
* @throws IOException
*/
public static void writeTextContent(Object object, OutputStream outputStream) throws IOException {
outputStream.write(objectTextConvertToByteArray(object));
} | 3.68 |
morf_HumanReadableStatementHelper_generateAddColumnString | /**
* Generates a human-readable "Add Column" string for a column with no default value
* set at the database level but with a default initialiser for any existing records.
*
* @param tableName the table name to which the column is being added
* @param definition the definition of the new column
* @return a string containing the human-readable version of the action
*/
public static String generateAddColumnString(final String tableName, final Column definition, final FieldLiteral defaultValue) {
if (defaultValue == null) {
return generateAddColumnString(tableName, definition);
}
return String.format("Add a %s column to %s called %s [%s], set to %s",
generateNullableString(definition),
tableName,
definition.getName(),
generateColumnDefinitionString(definition),
generateFieldValueString(defaultValue));
} | 3.68 |
pulsar_LeastLongTermMessageRate_getScore | // Form a score for a broker using its preallocated bundle data and time average data.
// This is done by summing all preallocated long-term message rates and adding them to the broker's overall
// long-term message rate, which is itself the sum of the long-term message rate of every allocated bundle.
// Any broker at (or above) the overload threshold will have a score of POSITIVE_INFINITY.
private static double getScore(final BrokerData brokerData, final ServiceConfiguration conf) {
final double overloadThreshold = conf.getLoadBalancerBrokerOverloadedThresholdPercentage() / 100.0;
final double maxUsage = brokerData.getLocalData().getMaxResourceUsage();
if (maxUsage > overloadThreshold) {
log.warn("Broker {} is overloaded: max usage={}", brokerData.getLocalData().getWebServiceUrl(), maxUsage);
return Double.POSITIVE_INFINITY;
}
double totalMessageRate = 0;
for (BundleData bundleData : brokerData.getPreallocatedBundleData().values()) {
final TimeAverageMessageData longTermData = bundleData.getLongTermData();
totalMessageRate += longTermData.getMsgRateIn() + longTermData.getMsgRateOut();
}
// calculate estimated score
final TimeAverageBrokerData timeAverageData = brokerData.getTimeAverageData();
final double timeAverageLongTermMessageRate = timeAverageData.getLongTermMsgRateIn()
+ timeAverageData.getLongTermMsgRateOut();
final double totalMessageRateEstimate = totalMessageRate + timeAverageLongTermMessageRate;
if (log.isDebugEnabled()) {
log.debug("Broker {} has long term message rate {}",
brokerData.getLocalData().getWebServiceUrl(), totalMessageRateEstimate);
}
return totalMessageRateEstimate;
} | 3.68 |
hbase_WALActionsListener_visitLogEntryBeforeWrite | /**
* Called before each write.
*/
default void visitLogEntryBeforeWrite(RegionInfo info, WALKey logKey, WALEdit logEdit) {
} | 3.68 |
morf_MathsField_toString | /**
* @see java.lang.Object#toString()
*/
@Override
public String toString() {
return String.format("%s %s %s%s", leftField, operator, rightField, super.toString());
} | 3.68 |
querydsl_ConstructorUtils_getTransformers | /**
* Returns a fetch of transformers applicable to the given constructor.
*
* @param constructor constructor
* @return transformers
*/
public static Iterable<Function<Object[], Object[]>> getTransformers(Constructor<?> constructor) {
return Stream.of(
new PrimitiveAwareVarArgsTransformer(constructor),
new PrimitiveTransformer(constructor),
new VarArgsTransformer(constructor))
.filter(ArgumentTransformer::isApplicable)
.collect(Collectors.toList());
} | 3.68 |
hadoop_RecordCreatorFactory_setPort | /**
* Set the port.
* @param port the port.
*/
void setPort(int port) {
this.port = port;
} | 3.68 |
flink_StateSerializerProvider_fromPreviousSerializerSnapshot | /**
* Creates a {@link StateSerializerProvider} for restored state from the previous serializer's
* snapshot.
*
* <p>Once a new serializer is registered for the state, it should be provided via the {@link
* #registerNewSerializerForRestoredState(TypeSerializer)} method.
*
* @param stateSerializerSnapshot the previous serializer's snapshot.
* @param <T> the type of the state.
* @return a new {@link StateSerializerProvider}.
*/
public static <T> StateSerializerProvider<T> fromPreviousSerializerSnapshot(
TypeSerializerSnapshot<T> stateSerializerSnapshot) {
return new LazilyRegisteredStateSerializerProvider<>(stateSerializerSnapshot);
} | 3.68 |
hadoop_DatanodeAdminProperties_setPort | /**
* Set the port number of the datanode.
* @param port the port number of the datanode.
*/
public void setPort(final int port) {
this.port = port;
} | 3.68 |
hbase_MiniHBaseCluster_killAll | /**
* Do a simulated kill all masters and regionservers. Useful when it is impossible to bring the
* mini-cluster back for clean shutdown.
*/
public void killAll() {
// Do backups first.
MasterThread activeMaster = null;
for (MasterThread masterThread : getMasterThreads()) {
if (!masterThread.getMaster().isActiveMaster()) {
masterThread.getMaster().abort("killAll");
} else {
activeMaster = masterThread;
}
}
// Do active after.
if (activeMaster != null) {
activeMaster.getMaster().abort("killAll");
}
for (RegionServerThread rst : getRegionServerThreads()) {
rst.getRegionServer().abort("killAll");
}
} | 3.68 |
framework_ScrollbarBundle_showsScrollHandle | /**
* Checks whether the scrollbar's handle is visible.
* <p>
* In other words, this method checks whether the contents is larger than
* can visually fit in the element.
*
* @return <code>true</code> if the scrollbar's handle is visible
*/
public boolean showsScrollHandle() {
return getScrollSize() - getOffsetSize() > WidgetUtil.PIXEL_EPSILON;
} | 3.68 |
hadoop_YarnRegistryViewForProviders_listComponents | /**
* List components.
* @return a list of components
* @throws IOException
*/
public List<String> listComponents() throws IOException {
String path = RegistryUtils.componentListPath(
user, serviceClass, instanceName);
return registryOperations.list(path);
} | 3.68 |
framework_AbstractSelect_setContainerDataSource | /**
* Sets the Container that serves as the data source of the viewer.
*
* As a side-effect the fields value (selection) is set to null due old
* selection not necessary exists in new Container.
*
* @see Container.Viewer#setContainerDataSource(Container)
*
* @param newDataSource
* the new data source.
*/
@Override
public void setContainerDataSource(Container newDataSource) {
if (newDataSource == null) {
newDataSource = new IndexedContainer();
}
getCaptionChangeListener().clear();
if (items != newDataSource) {
// Removes listeners from the old datasource
if (items != null) {
if (items instanceof Container.ItemSetChangeNotifier) {
((Container.ItemSetChangeNotifier) items)
.removeItemSetChangeListener(this);
}
if (items instanceof Container.PropertySetChangeNotifier) {
((Container.PropertySetChangeNotifier) items)
.removePropertySetChangeListener(this);
}
}
// Assigns new data source
items = newDataSource;
// Clears itemIdMapper also
itemIdMapper.removeAll();
// Adds listeners
if (items != null) {
if (items instanceof Container.ItemSetChangeNotifier) {
((Container.ItemSetChangeNotifier) items)
.addItemSetChangeListener(this);
}
if (items instanceof Container.PropertySetChangeNotifier) {
((Container.PropertySetChangeNotifier) items)
.addPropertySetChangeListener(this);
}
}
/*
* We expect changing the data source should also clean value. See
* #810, #4607, #5281
*/
setValue(null);
markAsDirty();
}
} | 3.68 |
framework_VTooltip_getMaxWidth | /**
* Returns the maximum width of the tooltip popup.
*
* @return The maximum width the tooltip popup (in pixels)
*/
public int getMaxWidth() {
return maxWidth;
} | 3.68 |
morf_TableNameDecorator_indexes | /**
* @see org.alfasoftware.morf.metadata.Table#indexes()
*/
@Override
public List<Index> indexes() {
return decoratedIndexes;
} | 3.68 |
flink_SubtaskStateMapper_getNewToOldSubtasksMapping | /** Returns a mapping new subtask index to all old subtask indexes. */
public RescaleMappings getNewToOldSubtasksMapping(int oldParallelism, int newParallelism) {
return RescaleMappings.of(
IntStream.range(0, newParallelism)
.mapToObj(
channelIndex ->
getOldSubtasks(
channelIndex, oldParallelism, newParallelism)),
oldParallelism);
} | 3.68 |
hadoop_ManifestSuccessData_getHostname | /**
* @return host which created the file (implicitly: committed the work).
*/
public String getHostname() {
return hostname;
} | 3.68 |
graphhopper_RamerDouglasPeucker_setElevationMaxDistance | /**
* maximum elevation distance of discrepancy (from the normal way) in meters
*/
public RamerDouglasPeucker setElevationMaxDistance(double dist) {
this.elevationMaxDistance = dist;
return this;
} | 3.68 |
framework_AsyncPushUpdates_getTicketNumber | /*
* (non-Javadoc)
*
* @see com.vaadin.tests.components.AbstractTestUI#getTicketNumber()
*/
@Override
protected Integer getTicketNumber() {
return 13562;
} | 3.68 |
flink_TaskManagerLocation_getNodeId | /**
* Return the ID of node where the task manager is located on.
*
* @return The ID of node where the task manager is located on.
*/
public String getNodeId() {
return nodeId;
} | 3.68 |
hadoop_OBSFileSystem_getBlockFactory | /**
* Return the block factory used by {@link OBSBlockOutputStream}.
*
* @return the block factory
*/
OBSDataBlocks.BlockFactory getBlockFactory() {
return blockFactory;
} | 3.68 |
hadoop_Find_isAncestor | /** Returns true if the target is an ancestor of the source. */
private boolean isAncestor(PathData source, PathData target) {
for (Path parent = source.path; (parent != null) && !parent.isRoot();
parent = parent.getParent()) {
if (parent.equals(target.path)) {
return true;
}
}
return false;
} | 3.68 |
morf_HumanReadableStatementHelper_generateTruncateStatementString | /**
* Generates a human-readable description of a data truncate operation.
*
* @param table the table to be truncated to describe
* @return a string containing the human-readable description of the operation
*/
private static String generateTruncateStatementString(final TableReference table) {
return String.format("Delete all records in %s", table.getName());
} | 3.68 |
framework_DownloadStream_getStream | /**
* Gets downloadable stream.
*
* @return output stream.
*/
public InputStream getStream() {
return stream;
} | 3.68 |
hbase_TableState_convert | /**
* Covert from PB version of TableState
* @param tableName table this state of
* @param tableState convert from
*/
public static TableState convert(TableName tableName, HBaseProtos.TableState tableState) {
TableState.State state = State.convert(tableState.getState());
return new TableState(tableName, state);
} | 3.68 |
framework_VComboBox_getNullSelectionItemShouldBeVisible | /**
* Returns null selection item should be visible or not.
* <p>
* NOTE: this checks for any entered filter value, and whether the feature
* is enabled
*
* @since 8.0
* @return {@code true} if it should be visible, {@code}
*/
public boolean getNullSelectionItemShouldBeVisible() {
return nullSelectionAllowed && "".equals(lastFilter);
} | 3.68 |
flink_AbstractFsCheckpointStorageAccess_getCheckpointDirectoryForJob | /**
* Builds directory into which a specific job checkpoints, meaning the directory inside which it
* creates the checkpoint-specific subdirectories.
*
* <p>This method only succeeds if a base checkpoint directory has been set; otherwise the
* method fails with an exception.
*
* @param jobId The ID of the job
* @return The job's checkpoint directory, re
* @throws UnsupportedOperationException Thrown, if no base checkpoint directory has been set.
*/
protected static Path getCheckpointDirectoryForJob(Path baseCheckpointPath, JobID jobId) {
return new Path(baseCheckpointPath, jobId.toString());
} | 3.68 |
flink_FlinkDriver_getMajorVersion | /**
* Major version of flink.
*
* @return the major version
*/
@Override
public int getMajorVersion() {
return DRIVER_VERSION_MAJOR;
} | 3.68 |
morf_BracketedExpression_getInnerExpression | /**
* @see #innerExpression
* @return The inner expression
*/
public MathsField getInnerExpression() {
return innerExpression;
} | 3.68 |
morf_AbstractSqlDialectTest_testCreateViewStatementOverUnionSelect | /**
* Tests the SQL for creating a view over a union select.
*/
@SuppressWarnings("unchecked")
@Test
public void testCreateViewStatementOverUnionSelect() {
compareStatements(
expectedCreateViewOverUnionSelectStatements(),
testDialect.viewDeploymentStatements(testViewWithUnion));
} | 3.68 |
morf_AbstractSqlDialectTest_testTruncateTemporaryTableStatements | /**
* Tests SQL for clearing tables.
*/
@SuppressWarnings("unchecked")
@Test
public void testTruncateTemporaryTableStatements() {
compareStatements(
expectedTruncateTempTableStatements(),
testDialect.truncateTableStatements(testTempTable)
);
} | 3.68 |
morf_ExistingTableStateLoader_loadAppliedStepUUIDs | /**
* @return The set of all UUIDs which have previously been applied to this database.
*/
Set<java.util.UUID> loadAppliedStepUUIDs() {
Set<java.util.UUID> results = new HashSet<>();
// Query the database to see if the UpgradeAudit
SelectStatement upgradeAuditSelect = select(field("upgradeUUID"))
.from(tableRef(DatabaseUpgradeTableContribution.UPGRADE_AUDIT_NAME));
String sql = dialect.convertStatementToSQL(upgradeAuditSelect);
if (log.isDebugEnabled()) log.debug("Loading UpgradeAudit with SQL [" + sql + "]");
try (Connection connection = dataSource.getConnection();
java.sql.Statement statement = connection.createStatement();
ResultSet resultSet = statement.executeQuery(sql)) {
while (resultSet.next()) {
convertToUUID(resultSet.getString(1)).ifPresent(results::add);
}
} catch (SQLException e) {
throw new RuntimeSqlException("Failed to load applied UUIDs. SQL: [" + sql + "]", e);
}
return Collections.unmodifiableSet(results);
} | 3.68 |
flink_TransientBlobCleanupTask_run | /** Cleans up transient BLOBs whose TTL is up, tolerating that files do not exist (anymore). */
@Override
public void run() {
// let's cache the current time - we do not operate on a millisecond precision anyway
final long currentTimeMillis = System.currentTimeMillis();
// iterate through all entries and remove those where the current time is past their expiry
Set<Map.Entry<Tuple2<JobID, TransientBlobKey>, Long>> entries =
new HashSet<>(blobExpiryTimes.entrySet());
for (Map.Entry<Tuple2<JobID, TransientBlobKey>, Long> entry : entries) {
if (currentTimeMillis >= entry.getValue()) {
JobID jobId = entry.getKey().f0;
TransientBlobKey blobKey = entry.getKey().f1;
cleanupCallback.accept(jobId, blobKey);
}
}
} | 3.68 |
framework_Window_setClosable | /**
* Sets the closable status for the window. If a window is closable it
* typically shows an X in the upper right corner. Clicking on the X sends a
* close event to the server. Setting closable to false will remove the X
* from the window and prevent the user from closing the window.
*
* @param closable
* determines if the window can be closed by the user.
*/
public void setClosable(boolean closable) {
if (closable != isClosable()) {
getState().closable = closable;
}
} | 3.68 |
pulsar_ManagedLedgerConfig_getBookKeeperEnsemblePlacementPolicyClassName | /**
* Managed-ledger can setup different custom EnsemblePlacementPolicy (eg: affinity to write ledgers to only setup of
* group of bookies).
*
* @return
*/
public Class<? extends EnsemblePlacementPolicy> getBookKeeperEnsemblePlacementPolicyClassName() {
return bookKeeperEnsemblePlacementPolicyClassName;
} | 3.68 |
hadoop_MappableBlockLoader_shutdown | /**
* Clean up cache, can be used during DataNode shutdown.
*/
void shutdown() {
// Do nothing.
} | 3.68 |
framework_Design_setComponentFactory | /**
* Sets the component factory that is used for creating component instances
* based on fully qualified class names derived from a design file.
* <p>
* Please note that this setting is global, so care should be taken to avoid
* conflicting changes.
*
* @param componentFactory
* the component factory to set; not <code>null</code>
*
* @since 7.4.1
*/
public static void setComponentFactory(ComponentFactory componentFactory) {
if (componentFactory == null) {
throw new IllegalArgumentException(
"Cannot set null component factory");
}
Design.componentFactory = componentFactory;
} | 3.68 |
flink_PendingCheckpoint_abort | /** Aborts a checkpoint with reason and cause. */
public void abort(
CheckpointFailureReason reason,
@Nullable Throwable cause,
CheckpointsCleaner checkpointsCleaner,
Runnable postCleanup,
Executor executor,
CheckpointStatsTracker statsTracker) {
try {
failureCause = new CheckpointException(reason, cause);
onCompletionPromise.completeExceptionally(failureCause);
masterTriggerCompletionPromise.completeExceptionally(failureCause);
assertAbortSubsumedForced(reason);
} finally {
dispose(true, checkpointsCleaner, postCleanup, executor);
}
} | 3.68 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.