name
stringlengths 12
178
| code_snippet
stringlengths 8
36.5k
| score
float64 3.26
3.68
|
---|---|---|
flink_CatalogManager_getCurrentDatabase | /**
* Gets the current database name that will be used when resolving table path.
*
* @return the current database
* @see CatalogManager#qualifyIdentifier(UnresolvedIdentifier)
*/
public String getCurrentDatabase() {
return currentDatabaseName;
} | 3.68 |
morf_InsertStatementDefaulter_addColumnDefaults | /**
* Adds the column defaults for missing columns to the {@code statement}.
*
* @param statement the statement to add to.
* @param columnsWithValues the columns for which we have values.
*/
private InsertStatement addColumnDefaults(InsertStatement statement, Set<String> columnsWithValues) {
Table table = metadata.getTable(statement.getTable().getName().toUpperCase());
if (table == null) {
throw new IllegalArgumentException("Could not find table in schema for: " + statement.getTable().getName());
}
List<AliasedFieldBuilder> aliasedFieldBuilders = Lists.newArrayList();
for (Column currentColumn : table.columns()) {
// Default date columns to null and skip columns we've already added.
if (columnsWithValues.contains(currentColumn.getUpperCaseName())) {
continue;
}
AliasedField fieldDefault = getFieldDefault(currentColumn);
if (fieldDefault == null) {
continue;
}
if(AliasedField.immutableDslEnabled()) {
aliasedFieldBuilders.add(fieldDefault.as(currentColumn.getName()));
}
else {
statement.getFieldDefaults().put(currentColumn.getName(), fieldDefault);
}
}
if(AliasedField.immutableDslEnabled()) {
return statement.shallowCopy().withDefaults(aliasedFieldBuilders).build();
}
return statement;
} | 3.68 |
framework_TextArea_readDesign | /*
* (non-Javadoc)
*
* @see com.vaadin.ui.AbstractField#readDesign(org.jsoup.nodes.Element ,
* com.vaadin.ui.declarative.DesignContext)
*/
@Override
public void readDesign(Element design, DesignContext designContext) {
super.readDesign(design, designContext);
setValue(DesignFormatter.decodeFromTextNode(design.html()), false,
true);
} | 3.68 |
rocketmq-connect_AvroData_avroSchemaForUnderlyingMapEntryType | /**
* MapEntry types in connect Schemas are represented as Arrays of record.
* Return the array type from the union instead of the union itself.
*/
private static org.apache.avro.Schema avroSchemaForUnderlyingMapEntryType(
Schema schema,
org.apache.avro.Schema avroSchema) {
if (schema != null && schema.isOptional()) {
if (avroSchema.getType() == org.apache.avro.Schema.Type.UNION) {
for (org.apache.avro.Schema typeSchema : avroSchema.getTypes()) {
if (!typeSchema.getType().equals(org.apache.avro.Schema.Type.NULL)
&& org.apache.avro.Schema.Type.ARRAY.getName().equals(typeSchema.getType().getName())) {
return typeSchema;
}
}
} else {
throw new ConnectException(
"An optional schema should have an Avro Union type, not "
+ schema.getFieldType());
}
}
return avroSchema;
} | 3.68 |
framework_VAbsoluteLayout_setStyleName | /*
* (non-Javadoc)
*
* @see
* com.google.gwt.user.client.ui.UIObject#setStyleName(java.lang.String)
*/
@Override
public void setStyleName(String style) {
super.setStyleName(style);
updateStylenames(style);
addStyleName(StyleConstants.UI_LAYOUT);
} | 3.68 |
cron-utils_FieldConstraintsBuilder_withIntValueMapping | /**
* Adds integer to integer mapping. Source should be greater than destination;
*
* @param source - some int
* @param dest - some int
* @return same FieldConstraintsBuilder instance
*/
public FieldConstraintsBuilder withIntValueMapping(final int source, final int dest) {
intMapping.put(source, dest);
return this;
} | 3.68 |
hbase_MunkresAssignment_stepThree | /**
* Corresponds to step 3 of the original algorithm.
*/
private void stepThree() {
// Find the minimum uncovered cost.
float min = leastInRow[0];
for (int r = 1; r < rows; r++) {
if (leastInRow[r] < min) {
min = leastInRow[r];
}
}
// Add the minimum cost to each of the costs in a covered row, or subtract
// the minimum cost from each of the costs in an uncovered column. As an
// optimization, do not actually modify the cost matrix yet, but track the
// adjustments that need to be made to each row and column.
for (int r = 0; r < rows; r++) {
if (rowsCovered[r]) {
rowAdjust[r] += min;
}
}
for (int c = 0; c < cols; c++) {
if (!colsCovered[c]) {
colAdjust[c] -= min;
}
}
// Since the cost matrix is not being updated yet, the minimum uncovered
// cost per row must be updated.
for (int r = 0; r < rows; r++) {
if (!colsCovered[leastInRowIndex[r]]) {
// The least value in this row was in an uncovered column, meaning that
// it would have had the minimum value subtracted from it, and therefore
// will still be the minimum value in that row.
leastInRow[r] -= min;
} else {
// The least value in this row was in a covered column and would not
// have had the minimum value subtracted from it, so the minimum value
// could be some in another column.
for (int c = 0; c < cols; c++) {
if (cost[r][c] + colAdjust[c] + rowAdjust[r] < leastInRow[r]) {
leastInRow[r] = cost[r][c] + colAdjust[c] + rowAdjust[r];
leastInRowIndex[r] = c;
}
}
}
}
} | 3.68 |
hbase_BinaryComponentComparator_parseFrom | /**
* Parse a serialized representation of {@link BinaryComponentComparator}
* @param pbBytes A pb serialized {@link BinaryComponentComparator} instance
* @return An instance of {@link BinaryComponentComparator} made from <code>bytes</code>
* @throws DeserializationException if an error occurred
* @see #toByteArray
*/
public static BinaryComponentComparator parseFrom(final byte[] pbBytes)
throws DeserializationException {
ComparatorProtos.BinaryComponentComparator proto;
try {
proto = ComparatorProtos.BinaryComponentComparator.parseFrom(pbBytes);
} catch (InvalidProtocolBufferException e) {
throw new DeserializationException(e);
}
return new BinaryComponentComparator(proto.getValue().toByteArray(), proto.getOffset());
} | 3.68 |
flink_WatermarkStrategy_withWatermarkAlignment | /**
* Creates a new {@link WatermarkStrategy} that configures the maximum watermark drift from
* other sources/tasks/partitions in the same watermark group. The group may contain completely
* independent sources (e.g. File and Kafka).
*
* <p>Once configured Flink will "pause" consuming from a source/task/partition that is ahead of
* the emitted watermark in the group by more than the maxAllowedWatermarkDrift.
*
* @param watermarkGroup A group of sources to align watermarks
* @param maxAllowedWatermarkDrift Maximal drift, before we pause consuming from the
* source/task/partition
* @param updateInterval How often tasks should notify coordinator about the current watermark
* and how often the coordinator should announce the maximal aligned watermark.
*/
@PublicEvolving
default WatermarkStrategy<T> withWatermarkAlignment(
String watermarkGroup, Duration maxAllowedWatermarkDrift, Duration updateInterval) {
return new WatermarksWithWatermarkAlignment<T>(
this, watermarkGroup, maxAllowedWatermarkDrift, updateInterval);
} | 3.68 |
hudi_AvroSchemaUtils_isValidEvolutionOf | /**
* Validate whether the {@code targetSchema} is a valid evolution of {@code sourceSchema}.
* Basically {@link #isCompatibleProjectionOf(Schema, Schema)} but type promotion in the
* opposite direction
*/
public static boolean isValidEvolutionOf(Schema sourceSchema, Schema targetSchema) {
return (sourceSchema.getType() == Schema.Type.NULL) || isProjectionOfInternal(sourceSchema, targetSchema,
AvroSchemaUtils::isAtomicSchemasCompatibleEvolution);
} | 3.68 |
flink_DataStream_assignTimestampsAndWatermarks | /**
* Assigns timestamps to the elements in the data stream and creates watermarks based on events,
* to signal event time progress.
*
* <p>This method uses the deprecated watermark generator interfaces. Please switch to {@link
* #assignTimestampsAndWatermarks(WatermarkStrategy)} to use the new interfaces instead. The new
* interfaces support watermark idleness and no longer need to differentiate between "periodic"
* and "punctuated" watermarks.
*
* @deprecated Please use {@link #assignTimestampsAndWatermarks(WatermarkStrategy)} instead.
*/
@Deprecated
public SingleOutputStreamOperator<T> assignTimestampsAndWatermarks(
AssignerWithPunctuatedWatermarks<T> timestampAndWatermarkAssigner) {
final AssignerWithPunctuatedWatermarks<T> cleanedAssigner =
clean(timestampAndWatermarkAssigner);
final WatermarkStrategy<T> wms =
new AssignerWithPunctuatedWatermarksAdapter.Strategy<>(cleanedAssigner);
return assignTimestampsAndWatermarks(wms);
} | 3.68 |
zxing_PDF417ErrorCorrection_generateErrorCorrection | /**
* Generates the error correction codewords according to 4.10 in ISO/IEC 15438:2001(E).
*
* @param dataCodewords the data codewords
* @param errorCorrectionLevel the error correction level (0-8)
* @return the String representing the error correction codewords
*/
static String generateErrorCorrection(CharSequence dataCodewords, int errorCorrectionLevel) {
int k = getErrorCorrectionCodewordCount(errorCorrectionLevel);
char[] e = new char[k];
int sld = dataCodewords.length();
for (int i = 0; i < sld; i++) {
int t1 = (dataCodewords.charAt(i) + e[e.length - 1]) % 929;
int t2;
int t3;
for (int j = k - 1; j >= 1; j--) {
t2 = (t1 * EC_COEFFICIENTS[errorCorrectionLevel][j]) % 929;
t3 = 929 - t2;
e[j] = (char) ((e[j - 1] + t3) % 929);
}
t2 = (t1 * EC_COEFFICIENTS[errorCorrectionLevel][0]) % 929;
t3 = 929 - t2;
e[0] = (char) (t3 % 929);
}
StringBuilder sb = new StringBuilder(k);
for (int j = k - 1; j >= 0; j--) {
if (e[j] != 0) {
e[j] = (char) (929 - e[j]);
}
sb.append(e[j]);
}
return sb.toString();
} | 3.68 |
querydsl_Alias_var | /**
* Create a new variable path
*
* @param arg alias
* @return expression
*/
public static StringPath var(String arg) {
return Expressions.stringPath(arg.replace(' ', '_'));
} | 3.68 |
framework_AbstractMedia_setAltText | /**
* Sets the alternative text to be displayed if the browser does not support
* HTML5. This text is rendered as HTML if
* {@link #setHtmlContentAllowed(boolean)} is set to true. With HTML
* rendering, this method can also be used to implement fallback to a
* flash-based player, see the <a href=
* "https://developer.mozilla.org/En/Using_audio_and_video_in_Firefox#Using_Flash"
* >Mozilla Developer Network</a> for details.
*
* @param altText
*/
public void setAltText(String altText) {
getState().altText = altText;
} | 3.68 |
morf_DatabaseMetaDataProvider_createColumnsFrom | /**
* Creates a list of table columns from given columns and map of primary key columns.
* Also reorders the primary key columns between themselves to reflect the order of columns within the primary key.
*
* @param originalColumns Collection of table columns to work with.
* @param primaryKey Map of respective positions by column names.
* @return List of table columns.
*/
protected static List<Column> createColumnsFrom(Collection<ColumnBuilder> originalColumns, Map<AName, Integer> primaryKey) {
final List<Column> primaryKeyColumns = new ArrayList<>(Collections.nCopies(primaryKey.size(), null));
final List<Supplier<Column>> results = new ArrayList<>(originalColumns.size());
// Reorder primary-key columns between themselves according to their ordering within provided reference
// All non-primary-key columns simply keep their original positions
Iterator<Integer> numberer = IntStream.rangeClosed(0, primaryKey.size()).iterator();
for (ColumnBuilder column : originalColumns) {
if (primaryKey.containsKey(named(column.getName()))) {
Integer primaryKeyPosition = primaryKey.get(named(column.getName()));
primaryKeyColumns.set(primaryKeyPosition, column.primaryKey());
results.add(() -> primaryKeyColumns.get(numberer.next()));
}
else {
results.add(Suppliers.ofInstance(column));
}
}
return results.stream().map(Supplier::get).collect(Collectors.toList());
} | 3.68 |
rocketmq-connect_JsonConverter_convertToJsonWithoutEnvelope | /**
* convert to json without envelop
*
* @param schema
* @param value
* @return
*/
private Object convertToJsonWithoutEnvelope(Schema schema, Object value) {
return convertToJson(schema, value);
} | 3.68 |
morf_SelectStatement_union | /**
* Perform an UNION set operation with another {@code selectStatement},
* eliminating any duplicate rows.
*
* <p>It is possible to have more than one union statement by chaining union calls:</p>
* <blockquote><pre>
* SelectStatement stmtA = select(...).from(...);
* SelectStatement stmtB = select(...).from(...);
* SelectStatement stmtC = select(...).from(...).union(stmtA).union(stmtB).orderBy(...);
* </pre></blockquote>
*
* <p>If an union operation is performed then all
* participating select statements require the same selected column list, i.e.
* same naming and ordering. In addition, only the leftmost select statement
* should have an order-by statement (see example above).</p>
*
* @param selectStatement the select statement to be united with the current select statement;
* @return a new select statement with the change applied.
*/
public SelectStatement union(SelectStatement selectStatement) {
return copyOnWriteOrMutate(
(SelectStatementBuilder b) -> b.union(selectStatement),
() -> setOperators.add(new UnionSetOperator(UnionStrategy.DISTINCT, this, selectStatement))
);
} | 3.68 |
flink_FieldAccessorFactory_getAccessor | /**
* Creates a {@link FieldAccessor} for the field that is given by a field expression, which can
* be used to get and set the specified field on instances of this type.
*
* @param field The field expression
* @param config Configuration object
* @param <F> The type of the field to access
* @return The created FieldAccessor
*/
@Internal
public static <T, F> FieldAccessor<T, F> getAccessor(
TypeInformation<T> typeInfo, String field, ExecutionConfig config) {
// In case of arrays
if (typeInfo instanceof BasicArrayTypeInfo || typeInfo instanceof PrimitiveArrayTypeInfo) {
try {
return new FieldAccessor.ArrayFieldAccessor<>(Integer.parseInt(field), typeInfo);
} catch (NumberFormatException ex) {
throw new CompositeType.InvalidFieldReferenceException(
"A field expression on an array must be an integer index (that might be given as a string).");
}
// In case of basic types
} else if (typeInfo instanceof BasicTypeInfo) {
try {
int pos =
field.equals(Keys.ExpressionKeys.SELECT_ALL_CHAR)
? 0
: Integer.parseInt(field);
return FieldAccessorFactory.getAccessor(typeInfo, pos, config);
} catch (NumberFormatException ex) {
throw new CompositeType.InvalidFieldReferenceException(
"You tried to select the field \""
+ field
+ "\" on a "
+ typeInfo.toString()
+ ". A field expression on a basic type can only be \"*\" or \"0\""
+ " (both of which mean selecting the entire basic type).");
}
// In case of Pojos
} else if (typeInfo instanceof PojoTypeInfo) {
FieldExpression decomp = decomposeFieldExpression(field);
PojoTypeInfo<?> pojoTypeInfo = (PojoTypeInfo) typeInfo;
int fieldIndex = pojoTypeInfo.getFieldIndex(decomp.head);
if (fieldIndex == -1) {
throw new CompositeType.InvalidFieldReferenceException(
"Unable to find field \"" + decomp.head + "\" in type " + typeInfo + ".");
} else {
PojoField pojoField = pojoTypeInfo.getPojoFieldAt(fieldIndex);
TypeInformation<?> fieldType = pojoTypeInfo.getTypeAt(fieldIndex);
if (decomp.tail == null) {
@SuppressWarnings("unchecked")
FieldAccessor<F, F> innerAccessor =
new FieldAccessor.SimpleFieldAccessor<>((TypeInformation<F>) fieldType);
return new FieldAccessor.PojoFieldAccessor<>(
pojoField.getField(), innerAccessor);
} else {
@SuppressWarnings("unchecked")
FieldAccessor<Object, F> innerAccessor =
FieldAccessorFactory.getAccessor(
(TypeInformation<Object>) fieldType, decomp.tail, config);
return new FieldAccessor.PojoFieldAccessor<>(
pojoField.getField(), innerAccessor);
}
}
// In case of case classes
} else if (typeInfo.isTupleType() && ((TupleTypeInfoBase) typeInfo).isCaseClass()) {
TupleTypeInfoBase tupleTypeInfo = (TupleTypeInfoBase) typeInfo;
FieldExpression decomp = decomposeFieldExpression(field);
int fieldPos = tupleTypeInfo.getFieldIndex(decomp.head);
if (fieldPos < 0) {
throw new CompositeType.InvalidFieldReferenceException(
"Invalid field selected: " + field);
}
if (decomp.tail == null) {
if (scalaProductFieldAccessorFactory != null) {
return scalaProductFieldAccessorFactory.createSimpleProductFieldAccessor(
fieldPos, typeInfo, config);
} else {
throw new IllegalStateException(
"Scala products are used but Scala API is not on the classpath.");
}
} else {
@SuppressWarnings("unchecked")
FieldAccessor<Object, F> innerAccessor =
getAccessor(tupleTypeInfo.getTypeAt(fieldPos), decomp.tail, config);
if (scalaProductFieldAccessorFactory != null) {
return scalaProductFieldAccessorFactory.createRecursiveProductFieldAccessor(
fieldPos, typeInfo, innerAccessor, config);
} else {
throw new IllegalStateException(
"Scala products are used but Scala API is not on the classpath.");
}
}
// In case of tuples
} else if (typeInfo.isTupleType() && typeInfo instanceof TupleTypeInfo) {
TupleTypeInfo tupleTypeInfo = (TupleTypeInfo) typeInfo;
FieldExpression decomp = decomposeFieldExpression(field);
int fieldPos = tupleTypeInfo.getFieldIndex(decomp.head);
if (fieldPos == -1) {
try {
fieldPos = Integer.parseInt(decomp.head);
} catch (NumberFormatException ex) {
throw new CompositeType.InvalidFieldReferenceException(
"Tried to select field \""
+ decomp.head
+ "\" on "
+ typeInfo.toString()
+ " . Only integer values are allowed here.");
}
}
if (decomp.tail == null) {
@SuppressWarnings("unchecked")
FieldAccessor<T, F> result =
new FieldAccessor.SimpleTupleFieldAccessor(fieldPos, tupleTypeInfo);
return result;
} else {
@SuppressWarnings("unchecked")
FieldAccessor<?, F> innerAccessor =
getAccessor(tupleTypeInfo.getTypeAt(fieldPos), decomp.tail, config);
@SuppressWarnings("unchecked")
FieldAccessor<T, F> result =
new FieldAccessor.RecursiveTupleFieldAccessor(
fieldPos, innerAccessor, tupleTypeInfo);
return result;
}
// Default statement
} else {
throw new CompositeType.InvalidFieldReferenceException(
"Cannot reference field by field expression on "
+ typeInfo.toString()
+ "Field expressions are only supported on POJO types, tuples, and case classes. "
+ "(See the Flink documentation on what is considered a POJO.)");
}
} | 3.68 |
flink_StreamExecutionEnvironment_isUnalignedCheckpointsEnabled | /** Returns whether unaligned checkpoints are enabled. */
@PublicEvolving
public boolean isUnalignedCheckpointsEnabled() {
return checkpointCfg.isUnalignedCheckpointsEnabled();
} | 3.68 |
hudi_InternalSchemaCache_searchSchemaAndCache | /**
* Search internalSchema based on versionID.
* first step: try to get internalSchema from hoodie commit files, we no need to add lock.
* if we cannot get internalSchema by first step, then we try to get internalSchema from cache.
*
* @param versionID schema version_id need to search
* @param metaClient current hoodie metaClient
* @return internalSchema
*/
public static InternalSchema searchSchemaAndCache(long versionID, HoodieTableMetaClient metaClient, boolean cacheEnable) {
Option<InternalSchema> candidateSchema = getSchemaByReadingCommitFile(versionID, metaClient);
if (candidateSchema.isPresent()) {
return candidateSchema.get();
}
if (!cacheEnable) {
// parse history schema and return directly
return InternalSchemaUtils.searchSchema(versionID, getHistoricalSchemas(metaClient));
}
String tablePath = metaClient.getBasePath();
// use segment lock to reduce competition.
synchronized (lockList[tablePath.hashCode() & (lockList.length - 1)]) {
TreeMap<Long, InternalSchema> historicalSchemas = HISTORICAL_SCHEMA_CACHE.getIfPresent(tablePath);
if (historicalSchemas == null || InternalSchemaUtils.searchSchema(versionID, historicalSchemas) == null) {
historicalSchemas = getHistoricalSchemas(metaClient);
HISTORICAL_SCHEMA_CACHE.put(tablePath, historicalSchemas);
} else {
long maxVersionId = historicalSchemas.keySet().stream().max(Long::compareTo).get();
if (versionID > maxVersionId) {
historicalSchemas = getHistoricalSchemas(metaClient);
HISTORICAL_SCHEMA_CACHE.put(tablePath, historicalSchemas);
}
}
return InternalSchemaUtils.searchSchema(versionID, historicalSchemas);
}
} | 3.68 |
framework_ComboBoxScrollingWithArrows_setup | /*
* (non-Javadoc)
*
* @see com.vaadin.tests.components.AbstractTestUI#setup(com.vaadin.server.
* VaadinRequest)
*/
@Override
protected void setup(VaadinRequest request) {
VerticalLayout layout = new VerticalLayout();
addComponent(layout);
addComboBox(layout);
} | 3.68 |
flink_CheckpointConfig_setForceCheckpointing | /**
* Checks whether checkpointing is forced, despite currently non-checkpointable iteration
* feedback.
*
* @param forceCheckpointing The flag to force checkpointing.
* @deprecated This will be removed once iterations properly participate in checkpointing.
*/
@Deprecated
@PublicEvolving
public void setForceCheckpointing(boolean forceCheckpointing) {
configuration.set(ExecutionCheckpointingOptions.FORCE_CHECKPOINTING, forceCheckpointing);
} | 3.68 |
morf_Upgrade_getUpgradeAuditRowCount | /**
* Provides a number of already applied upgrade steps.
* @return the number of upgrade steps from the UpgradeAudit table
*/
long getUpgradeAuditRowCount(ResultSetProcessor<Long> processor) {
SelectStatement selectStatement = selectUpgradeAuditTableCount();
long appliedUpgradeStepsCount = -1;
try {
SqlScriptExecutorProvider sqlScriptExecutorProvider = new SqlScriptExecutorProvider(connectionResources);
appliedUpgradeStepsCount = sqlScriptExecutorProvider.get()
.executeQuery(connectionResources.sqlDialect().convertStatementToSQL(selectStatement), processor);
}
catch (Exception e) {
log.warn("Unable to read from UpgradeAudit table", e);
}
log.debug("Returning number of applied upgrade steps [" + appliedUpgradeStepsCount + "]");
return appliedUpgradeStepsCount;
} | 3.68 |
dubbo_Parameters_getExtension | /**
* @deprecated will be removed in 3.3.0
*/
@Deprecated
public <T> T getExtension(Class<T> type, String key, String defaultValue) {
String name = getParameter(key, defaultValue);
return ExtensionLoader.getExtensionLoader(type).getExtension(name);
} | 3.68 |
framework_VOverlay_setOverlayContainerLabel | /**
* Set the label of the container element, where tooltip, notification and
* dialogs are added to.
*
* @param applicationConnection
* the application connection for which to change the label
* @param overlayContainerLabel
* label for the container
*/
public static void setOverlayContainerLabel(
ApplicationConnection applicationConnection,
String overlayContainerLabel) {
Roles.getAlertRole().setAriaLabelProperty(
VOverlay.getOverlayContainer(applicationConnection),
overlayContainerLabel);
} | 3.68 |
hadoop_OBSCommonUtils_uploadPart | /**
* Upload part of a multi-partition file. Increments the write and put
* counters. <i>Important: this call does not close any input stream in the
* request.</i>
*
* @param owner the owner OBSFileSystem instance
* @param request request
* @return the result of the operation.
* @throws ObsException on problems
*/
static UploadPartResult uploadPart(final OBSFileSystem owner,
final UploadPartRequest request) throws ObsException {
long len = request.getPartSize();
UploadPartResult uploadPartResult = owner.getObsClient()
.uploadPart(request);
owner.getSchemeStatistics().incrementWriteOps(1);
owner.getSchemeStatistics().incrementBytesWritten(len);
return uploadPartResult;
} | 3.68 |
framework_Design_findClassWithAnnotation | /**
* Find the first class with the given annotation, starting the search from
* the given class and moving upwards in the class hierarchy.
*
* @param componentClass
* the class to check
* @param annotationClass
* the annotation to look for
* @return the first class with the given annotation or null if no class
* with the annotation was found
*/
private static Class<? extends Component> findClassWithAnnotation(
Class<? extends Component> componentClass,
Class<? extends Annotation> annotationClass) {
if (componentClass == null) {
return null;
}
if (componentClass.isAnnotationPresent(annotationClass)) {
return componentClass;
}
Class<?> superClass = componentClass.getSuperclass();
if (!Component.class.isAssignableFrom(superClass)) {
return null;
}
return findClassWithAnnotation(superClass.asSubclass(Component.class),
annotationClass);
} | 3.68 |
open-banking-gateway_WebDriverBasedAccountInformation_sandbox_anton_brueckner_clicks_redirect_back_to_tpp_button_api_localhost_cookie_only | // Sending cookie with last request as it doesn't exist in browser for API tests
// null for cookieDomain is the valid value for localhost tests. This works correctly for localhost.
public SELF sandbox_anton_brueckner_clicks_redirect_back_to_tpp_button_api_localhost_cookie_only(WebDriver driver, String authSessionCookie) {
waitForPageLoad(driver);
add_open_banking_auth_session_key_cookie_to_selenium(driver, authSessionCookie);
try {
clickOnButton(driver, By.className("btn-primary"), true);
} finally {
driver.manage().deleteCookieNamed(AUTHORIZATION_SESSION_KEY);
}
return self();
} | 3.68 |
framework_TabSheetScrollOnTabClose_setup | /*
* (non-Javadoc)
*
* @see com.vaadin.tests.components.AbstractTestUI#setup(com.vaadin.server.
* VaadinRequest)
*/
@Override
protected void setup(VaadinRequest request) {
final TabSheet tabSheet = new TabSheet();
for (int i = 0; i < 10; i++) {
Tab tab = tabSheet.addTab(new CssLayout(), "tab " + i);
tab.setClosable(true);
tab.setId("tab" + i);
}
tabSheet.setWidth(250, Unit.PIXELS);
addComponent(tabSheet);
addComponent(new Label("Close tab number"));
for (int i = 0; i < 10; i++) {
final String tabCaption = "tab " + i;
final Button b = new Button("" + i);
b.addClickListener(event -> {
b.setEnabled(false);
tabSheet.removeTab(getTab(tabSheet, tabCaption));
});
addComponent(b);
}
} | 3.68 |
hudi_AbstractTableFileSystemView_ensurePartitionLoadedCorrectly | /**
* Allows lazily loading the partitions if needed.
*
* @param partition partition to be loaded if not present
*/
private void ensurePartitionLoadedCorrectly(String partition) {
ValidationUtils.checkArgument(!isClosed(), "View is already closed");
// ensure we list files only once even in the face of concurrency
addedPartitions.computeIfAbsent(partition, (partitionPathStr) -> {
long beginTs = System.currentTimeMillis();
if (!isPartitionAvailableInStore(partitionPathStr)) {
// Not loaded yet
try {
LOG.info("Building file system view for partition (" + partitionPathStr + ")");
List<HoodieFileGroup> groups = addFilesToView(getAllFilesInPartition(partitionPathStr));
if (groups.isEmpty()) {
storePartitionView(partitionPathStr, new ArrayList<>());
}
} catch (IOException e) {
throw new HoodieIOException("Failed to list base files in partition " + partitionPathStr, e);
}
} else {
LOG.debug("View already built for Partition :" + partitionPathStr + ", FOUND is ");
}
long endTs = System.currentTimeMillis();
LOG.debug("Time to load partition (" + partitionPathStr + ") =" + (endTs - beginTs));
return true;
});
} | 3.68 |
flink_LimitedConnectionsFileSystem_unregisterInputStream | /**
* Atomically removes the given input stream from the set of currently open input streams, and
* signals that new stream can now be opened.
*/
void unregisterInputStream(InStream stream) {
lock.lock();
try {
// only decrement if we actually remove the stream
if (openInputStreams.remove(stream)) {
numReservedInputStreams--;
available.signalAll();
}
} finally {
lock.unlock();
}
} | 3.68 |
hbase_MasterRpcServices_getTableDescriptors | /**
* Get list of TableDescriptors for requested tables.
* @param c Unused (set to null).
* @param req GetTableDescriptorsRequest that contains: - tableNames: requested tables, or if
* empty, all are requested.
*/
@Override
public GetTableDescriptorsResponse getTableDescriptors(RpcController c,
GetTableDescriptorsRequest req) throws ServiceException {
try {
server.checkInitialized();
final String regex = req.hasRegex() ? req.getRegex() : null;
final String namespace = req.hasNamespace() ? req.getNamespace() : null;
List<TableName> tableNameList = null;
if (req.getTableNamesCount() > 0) {
tableNameList = new ArrayList<TableName>(req.getTableNamesCount());
for (HBaseProtos.TableName tableNamePB : req.getTableNamesList()) {
tableNameList.add(ProtobufUtil.toTableName(tableNamePB));
}
}
List<TableDescriptor> descriptors =
server.listTableDescriptors(namespace, regex, tableNameList, req.getIncludeSysTables());
GetTableDescriptorsResponse.Builder builder = GetTableDescriptorsResponse.newBuilder();
if (descriptors != null && descriptors.size() > 0) {
// Add the table descriptors to the response
for (TableDescriptor htd : descriptors) {
builder.addTableSchema(ProtobufUtil.toTableSchema(htd));
}
}
return builder.build();
} catch (IOException ioe) {
throw new ServiceException(ioe);
}
} | 3.68 |
morf_ExceptSetOperator_getSelectStatement | /**
* {@inheritDoc}
*
* @see org.alfasoftware.morf.sql.SetOperator#getSelectStatement()
*/
@Override
public SelectStatement getSelectStatement() {
return selectStatement;
} | 3.68 |
hbase_Bytes_len | /**
* Returns length of the byte array, returning 0 if the array is null. Useful for calculating
* sizes.
* @param b byte array, which can be null
* @return 0 if b is null, otherwise returns length
*/
final public static int len(byte[] b) {
return b == null ? 0 : b.length;
} | 3.68 |
flink_SharedSlot_cancelLogicalSlotRequest | /**
* Cancels a logical slot request.
*
* <p>If the logical slot request is already complete, nothing happens because the logical slot
* is already given to the execution and it the responsibility of the execution to call {@link
* #returnLogicalSlot(LogicalSlot)}.
*
* <p>If the logical slot request is not complete yet, its future gets cancelled or failed.
*
* @param executionVertexID {@link ExecutionVertexID} of the execution for which to cancel the
* logical slot
* @param cause the reason of cancellation or null if it is not available
*/
void cancelLogicalSlotRequest(ExecutionVertexID executionVertexID, @Nullable Throwable cause) {
Preconditions.checkState(
state == State.ALLOCATED,
"SharedSlot (physical request %s) has been released",
physicalSlotRequestId);
CompletableFuture<SingleLogicalSlot> logicalSlotFuture =
requestedLogicalSlots.getValueByKeyA(executionVertexID);
SlotRequestId logicalSlotRequestId = requestedLogicalSlots.getKeyBByKeyA(executionVertexID);
if (logicalSlotFuture != null) {
LOG.debug(
"Cancel {} from {}",
getLogicalSlotString(logicalSlotRequestId),
executionVertexID);
// If the logicalSlotFuture was not completed and now it fails, the exceptionally
// callback will also call removeLogicalSlotRequest
if (cause == null) {
logicalSlotFuture.cancel(false);
} else {
logicalSlotFuture.completeExceptionally(cause);
}
} else {
LOG.debug(
"No request for logical {} from physical {}}",
logicalSlotRequestId,
physicalSlotRequestId);
} | 3.68 |
hadoop_HdfsFileStatus_getFullName | /**
* Get the string representation of the full path name.
* @param parent the parent path
* @return the full path in string
*/
default String getFullName(String parent) {
if (isEmptyLocalName()) {
return parent;
}
StringBuilder fullName = new StringBuilder(parent);
if (!parent.endsWith(Path.SEPARATOR)) {
fullName.append(Path.SEPARATOR);
}
fullName.append(getLocalName());
return fullName.toString();
} | 3.68 |
flink_DiskCacheManager_flushBuffers | /**
* Note that the request of flushing buffers may come from the disk check thread or the task
* thread, so the method itself should ensure the thread safety.
*/
private synchronized void flushBuffers(boolean forceFlush) {
if (!forceFlush && !hasFlushCompleted.isDone()) {
return;
}
List<PartitionFileWriter.SubpartitionBufferContext> buffersToFlush = new ArrayList<>();
int numToWriteBuffers = getSubpartitionToFlushBuffers(buffersToFlush);
if (numToWriteBuffers > 0) {
CompletableFuture<Void> flushCompletableFuture =
partitionFileWriter.write(partitionId, buffersToFlush);
if (!forceFlush) {
hasFlushCompleted = flushCompletableFuture;
}
}
numCachedBytesCounter = 0;
} | 3.68 |
flink_TableFunctionCollector_setCollector | /** Sets the current collector, which used to emit the final row. */
public void setCollector(Collector<?> collector) {
this.collector = collector;
} | 3.68 |
hadoop_MapReduceJobPropertiesParser_getLatestKeyName | // Finds a corresponding key for the specified key in the current mapreduce
// setup.
// Note that this API uses a cached copy of the Configuration object. This is
// purely for performance reasons.
private String getLatestKeyName(String key) {
// set the specified key
configuration.set(key, key);
try {
// check if keys in MRConfig maps to the specified key.
for (Field f : mrFields) {
String mrKey = f.get(f.getName()).toString();
if (configuration.get(mrKey) != null) {
return mrKey;
}
}
// unset the key
return null;
} catch (IllegalAccessException iae) {
throw new RuntimeException(iae);
} finally {
// clean up!
configuration.clear();
}
} | 3.68 |
hadoop_AuxiliaryService_setRecoveryPath | /**
* Set the path for this auxiliary service to use for storing state
* that will be used during recovery.
*
* @param recoveryPath where recoverable state should be stored
*/
public void setRecoveryPath(Path recoveryPath) {
this.recoveryPath = recoveryPath;
} | 3.68 |
framework_ConnectorEvent_getConnector | /**
* Returns the connector that fired the event.
*
* @return the source connector
*/
public ClientConnector getConnector() {
return (ClientConnector) getSource();
} | 3.68 |
flink_CheckpointConfig_configure | /**
* Sets all relevant options contained in the {@link ReadableConfig} such as e.g. {@link
* ExecutionCheckpointingOptions#CHECKPOINTING_MODE}.
*
* <p>It will change the value of a setting only if a corresponding option was set in the {@code
* configuration}. If a key is not present, the current value of a field will remain untouched.
*
* @param configuration a configuration to read the values from
*/
public void configure(ReadableConfig configuration) {
configuration
.getOptional(ExecutionCheckpointingOptions.CHECKPOINTING_MODE)
.ifPresent(this::setCheckpointingMode);
configuration
.getOptional(ExecutionCheckpointingOptions.CHECKPOINTING_INTERVAL)
.ifPresent(i -> this.setCheckpointInterval(i.toMillis()));
configuration
.getOptional(ExecutionCheckpointingOptions.CHECKPOINTING_INTERVAL_DURING_BACKLOG)
.ifPresent(i -> this.setCheckpointIntervalDuringBacklog(i.toMillis()));
configuration
.getOptional(ExecutionCheckpointingOptions.CHECKPOINTING_TIMEOUT)
.ifPresent(t -> this.setCheckpointTimeout(t.toMillis()));
configuration
.getOptional(ExecutionCheckpointingOptions.MAX_CONCURRENT_CHECKPOINTS)
.ifPresent(this::setMaxConcurrentCheckpoints);
configuration
.getOptional(ExecutionCheckpointingOptions.MIN_PAUSE_BETWEEN_CHECKPOINTS)
.ifPresent(m -> this.setMinPauseBetweenCheckpoints(m.toMillis()));
configuration
.getOptional(ExecutionCheckpointingOptions.TOLERABLE_FAILURE_NUMBER)
.ifPresent(this::setTolerableCheckpointFailureNumber);
configuration
.getOptional(ExecutionCheckpointingOptions.EXTERNALIZED_CHECKPOINT)
.ifPresent(this::setExternalizedCheckpointCleanup);
configuration
.getOptional(ExecutionCheckpointingOptions.ENABLE_UNALIGNED)
.ifPresent(this::enableUnalignedCheckpoints);
configuration
.getOptional(ExecutionCheckpointingOptions.CHECKPOINT_ID_OF_IGNORED_IN_FLIGHT_DATA)
.ifPresent(this::setCheckpointIdOfIgnoredInFlightData);
configuration
.getOptional(ExecutionCheckpointingOptions.ALIGNED_CHECKPOINT_TIMEOUT)
.ifPresent(this::setAlignedCheckpointTimeout);
configuration
.getOptional(
ExecutionCheckpointingOptions.UNALIGNED_MAX_SUBTASKS_PER_CHANNEL_STATE_FILE)
.ifPresent(this::setMaxSubtasksPerChannelStateFile);
configuration
.getOptional(ExecutionCheckpointingOptions.FORCE_UNALIGNED)
.ifPresent(this::setForceUnalignedCheckpoints);
configuration
.getOptional(CheckpointingOptions.CHECKPOINTS_DIRECTORY)
.ifPresent(this::setCheckpointStorage);
} | 3.68 |
flink_DataStream_getId | /**
* Returns the ID of the {@link DataStream} in the current {@link StreamExecutionEnvironment}.
*
* @return ID of the DataStream
*/
@Internal
public int getId() {
return transformation.getId();
} | 3.68 |
flink_BeamPythonFunctionRunner_getWindowCoderProto | /** Gets the proto representation of the window coder. */
private RunnerApi.Coder getWindowCoderProto() {
return RunnerApi.Coder.newBuilder()
.setSpec(
RunnerApi.FunctionSpec.newBuilder()
.setUrn(ModelCoders.GLOBAL_WINDOW_CODER_URN)
.build())
.build();
} | 3.68 |
flink_AvailabilityProvider_isAvailable | /**
* In order to best-effort avoid volatile access in {@link CompletableFuture#isDone()}, we check
* the condition of <code>future == AVAILABLE</code> firstly for getting probable performance
* benefits while hot looping.
*
* <p>It is always safe to use this method in performance nonsensitive scenarios to get the
* precise state.
*
* @return true if this instance is available for further processing.
*/
default boolean isAvailable() {
CompletableFuture<?> future = getAvailableFuture();
return future == AVAILABLE || future.isDone();
} | 3.68 |
hadoop_RouterDistCpProcedure_enableWrite | /**
* Enable write.
*/
@Override
protected void enableWrite() throws IOException {
// do nothing.
} | 3.68 |
hbase_OutputSink_startWriterThreads | /**
* Start the threads that will pump data from the entryBuffers to the output files.
*/
void startWriterThreads() throws IOException {
for (int i = 0; i < numThreads; i++) {
WriterThread t = new WriterThread(controller, entryBuffers, this, i);
t.start();
writerThreads.add(t);
}
} | 3.68 |
dubbo_SlidingWindow_currentPane | /**
* Get the pane at the specified timestamp in milliseconds.
*
* @param timeMillis a timestamp in milliseconds.
* @return the pane at the specified timestamp if the time is valid; null if time is invalid.
*/
public Pane<T> currentPane(long timeMillis) {
if (timeMillis < 0) {
return null;
}
int paneIdx = calculatePaneIdx(timeMillis);
long paneStartInMs = calculatePaneStart(timeMillis);
while (true) {
Pane<T> oldPane = referenceArray.get(paneIdx);
// Create a pane instance when the pane does not exist.
if (oldPane == null) {
Pane<T> pane = new Pane<>(paneIntervalInMs, paneStartInMs, newEmptyValue(timeMillis));
if (referenceArray.compareAndSet(paneIdx, null, pane)) {
return pane;
} else {
// Contention failed, the thread will yield its time slice to wait for pane available.
Thread.yield();
}
}
//
else if (paneStartInMs == oldPane.getStartInMs()) {
return oldPane;
}
// The pane has deprecated. To avoid the overhead of creating a new instance, reset the original pane
// directly.
else if (paneStartInMs > oldPane.getStartInMs()) {
if (updateLock.tryLock()) {
try {
return resetPaneTo(oldPane, paneStartInMs);
} finally {
updateLock.unlock();
}
} else {
// Contention failed, the thread will yield its time slice to wait for pane available.
Thread.yield();
}
}
// The specified timestamp has passed.
else if (paneStartInMs < oldPane.getStartInMs()) {
return new Pane<>(paneIntervalInMs, paneStartInMs, newEmptyValue(timeMillis));
}
}
} | 3.68 |
pulsar_Schema_AUTO_CONSUME | /**
* Create a schema instance that automatically deserialize messages
* based on the current topic schema.
*
* <p>The messages values are deserialized into a {@link GenericRecord} object,
* that extends the {@link GenericObject} interface.
*
* @return the auto schema instance
*/
static Schema<GenericRecord> AUTO_CONSUME() {
return DefaultImplementation.getDefaultImplementation().newAutoConsumeSchema();
} | 3.68 |
hbase_CatalogFamilyFormat_getSeqNumDuringOpen | /**
* The latest seqnum that the server writing to meta observed when opening the region. E.g. the
* seqNum when the result of {@link getServerName} was written.
* @param r Result to pull the seqNum from
* @return SeqNum, or HConstants.NO_SEQNUM if there's no value written.
*/
private static long getSeqNumDuringOpen(final Result r, final int replicaId) {
Cell cell = r.getColumnLatestCell(HConstants.CATALOG_FAMILY, getSeqNumColumn(replicaId));
if (cell == null || cell.getValueLength() == 0) {
return HConstants.NO_SEQNUM;
}
return Bytes.toLong(cell.getValueArray(), cell.getValueOffset(), cell.getValueLength());
} | 3.68 |
querydsl_JDOExpressions_select | /**
* Create a new detached {@link JDOQuery} instance with the given projection
*
* @param exprs projection
* @return select(exprs)
*/
public static JDOQuery<Tuple> select(Expression<?>... exprs) {
return new JDOQuery<Void>().select(exprs);
} | 3.68 |
hadoop_AbstractSchedulerPlanFollower_arePlanResourcesLessThanReservations | /**
* Check if plan resources are less than expected reservation resources.
*/
private boolean arePlanResourcesLessThanReservations(
ResourceCalculator rescCalculator, Resource clusterResources,
Resource planResources, Resource reservedResources) {
return Resources.greaterThan(rescCalculator, clusterResources,
reservedResources, planResources);
} | 3.68 |
hbase_SortedCompactionPolicy_skipLargeFiles | /**
* @param candidates pre-filtrate
* @return filtered subset exclude all files above maxCompactSize Also save all references. We
* MUST compact them
*/
protected ArrayList<HStoreFile> skipLargeFiles(ArrayList<HStoreFile> candidates,
boolean mayUseOffpeak) {
int pos = 0;
while (
pos < candidates.size() && !candidates.get(pos).isReference()
&& (candidates.get(pos).getReader().length() > comConf.getMaxCompactSize(mayUseOffpeak))
) {
++pos;
}
if (pos > 0) {
LOG.debug("Some files are too large. Excluding " + pos + " files from compaction candidates");
candidates.subList(0, pos).clear();
}
return candidates;
} | 3.68 |
pulsar_AuthenticationDataSource_authenticate | /**
* Evaluate and challenge the data that passed in, and return processed data back.
* It is used for mutual authentication like SASL.
* NOTE: this method is not called by the Pulsar authentication framework.
* @deprecated use {@link AuthenticationProvider} or {@link AuthenticationState}.
*/
@Deprecated
default AuthData authenticate(AuthData data) throws AuthenticationException {
throw new AuthenticationException("Not supported");
} | 3.68 |
AreaShop_BuyRegion_getMoneyBackPercentage | /**
* Get the moneyBack percentage.
* @return The % of money the player will get back when selling
*/
public double getMoneyBackPercentage() {
return Utils.evaluateToDouble(getStringSetting("buy.moneyBack"), this);
} | 3.68 |
hadoop_TextSplitter_stringToBigDecimal | /**
* Return a BigDecimal representation of string 'str' suitable for use
* in a numerically-sorting order.
*/
BigDecimal stringToBigDecimal(String str) {
BigDecimal result = BigDecimal.ZERO;
BigDecimal curPlace = ONE_PLACE; // start with 1/65536 to compute the first digit.
int len = Math.min(str.length(), MAX_CHARS);
for (int i = 0; i < len; i++) {
int codePoint = str.codePointAt(i);
result = result.add(tryDivide(new BigDecimal(codePoint), curPlace));
// advance to the next less significant place. e.g., 1/(65536^2) for the second char.
curPlace = curPlace.multiply(ONE_PLACE);
}
return result;
} | 3.68 |
hbase_FileLink_getLocations | /** Returns the locations to look for the linked file. */
public Path[] getLocations() {
return locations;
} | 3.68 |
druid_DruidAbstractDataSource_setNumTestsPerEvictionRun | /**
* @param numTestsPerEvictionRun
*/
@Deprecated
public void setNumTestsPerEvictionRun(int numTestsPerEvictionRun) {
this.numTestsPerEvictionRun = numTestsPerEvictionRun;
} | 3.68 |
dubbo_Server_setWelcome | /**
* welcome message
*/
public void setWelcome(String welcome) {
this.welcome = welcome;
} | 3.68 |
morf_SchemaUtils_table | /**
* Build a {@link Table}.
* <p>
* Use {@link TableBuilder#columns(Column...)} to provide columns and
* {@link TableBuilder#indexes(Index...)} to provide indexes.
* </p>
*
* @param name The name of the table.
* @return A new {@link TableBuilder} for the table.
*/
public static TableBuilder table(String name) {
return new TableBuilderImpl(name);
} | 3.68 |
flink_SkipListKeySerializer_serializeNamespace | /** Serialize the namespace to bytes. */
byte[] serializeNamespace(N namespace) {
outputStream.reset();
try {
namespaceSerializer.serialize(namespace, outputView);
} catch (IOException e) {
throw new RuntimeException("serialize namespace failed", e);
}
return outputStream.toByteArray();
} | 3.68 |
hadoop_CommitUtils_verifyIsS3AFS | /**
* Verify that an FS is an S3A FS.
* @param fs filesystem
* @param path path to to use in exception
* @return the typecast FS.
* @throws PathCommitException if the FS is not an S3A FS.
*/
public static S3AFileSystem verifyIsS3AFS(FileSystem fs, Path path)
throws PathCommitException {
if (!(fs instanceof S3AFileSystem)) {
throw new PathCommitException(path, E_WRONG_FS);
}
return (S3AFileSystem) fs;
} | 3.68 |
hbase_ProcedureExecutor_executeProcedure | // ==========================================================================
// Executions
// ==========================================================================
private void executeProcedure(Procedure<TEnvironment> proc) {
if (proc.isFinished()) {
LOG.debug("{} is already finished, skipping execution", proc);
return;
}
final Long rootProcId = getRootProcedureId(proc);
if (rootProcId == null) {
// The 'proc' was ready to run but the root procedure was rolledback
LOG.warn("Rollback because parent is done/rolledback proc=" + proc);
executeRollback(proc);
return;
}
RootProcedureState<TEnvironment> procStack = rollbackStack.get(rootProcId);
if (procStack == null) {
LOG.warn("RootProcedureState is null for " + proc.getProcId());
return;
}
do {
// Try to acquire the execution
if (!procStack.acquire(proc)) {
if (procStack.setRollback()) {
// we have the 'rollback-lock' we can start rollingback
switch (executeRollback(rootProcId, procStack)) {
case LOCK_ACQUIRED:
break;
case LOCK_YIELD_WAIT:
procStack.unsetRollback();
scheduler.yield(proc);
break;
case LOCK_EVENT_WAIT:
LOG.info("LOCK_EVENT_WAIT rollback..." + proc);
procStack.unsetRollback();
break;
default:
throw new UnsupportedOperationException();
}
} else {
// if we can't rollback means that some child is still running.
// the rollback will be executed after all the children are done.
// If the procedure was never executed, remove and mark it as rolledback.
if (!proc.wasExecuted()) {
switch (executeRollback(proc)) {
case LOCK_ACQUIRED:
break;
case LOCK_YIELD_WAIT:
scheduler.yield(proc);
break;
case LOCK_EVENT_WAIT:
LOG.info("LOCK_EVENT_WAIT can't rollback child running?..." + proc);
break;
default:
throw new UnsupportedOperationException();
}
}
}
break;
}
// Execute the procedure
assert proc.getState() == ProcedureState.RUNNABLE : proc;
// Note that lock is NOT about concurrency but rather about ensuring
// ownership of a procedure of an entity such as a region or table
LockState lockState = acquireLock(proc);
switch (lockState) {
case LOCK_ACQUIRED:
execProcedure(procStack, proc);
break;
case LOCK_YIELD_WAIT:
LOG.info(lockState + " " + proc);
scheduler.yield(proc);
break;
case LOCK_EVENT_WAIT:
// Someone will wake us up when the lock is available
LOG.debug(lockState + " " + proc);
break;
default:
throw new UnsupportedOperationException();
}
procStack.release(proc);
if (proc.isSuccess()) {
// update metrics on finishing the procedure
proc.updateMetricsOnFinish(getEnvironment(), proc.elapsedTime(), true);
LOG.info("Finished " + proc + " in " + StringUtils.humanTimeDiff(proc.elapsedTime()));
// Finalize the procedure state
if (proc.getProcId() == rootProcId) {
procedureFinished(proc);
} else {
execCompletionCleanup(proc);
}
break;
}
} while (procStack.isFailed());
} | 3.68 |
dubbo_RegistryDirectory_toRouters | /**
* @param urls
* @return null : no routers ,do nothing
* else :routers list
*/
private Optional<List<Router>> toRouters(List<URL> urls) {
if (urls == null || urls.isEmpty()) {
return Optional.empty();
}
List<Router> routers = new ArrayList<>();
for (URL url : urls) {
if (EMPTY_PROTOCOL.equals(url.getProtocol())) {
continue;
}
String routerType = url.getParameter(ROUTER_KEY);
if (routerType != null && routerType.length() > 0) {
url = url.setProtocol(routerType);
}
try {
Router router = routerFactory.getRouter(url);
if (!routers.contains(router)) {
routers.add(router);
}
} catch (Throwable t) {
logger.error(PROXY_FAILED_CONVERT_URL, "", "", "convert router url to router error, url:" + url, t);
}
}
return Optional.of(routers);
} | 3.68 |
querydsl_DefaultEvaluatorFactory_createEvaluator | /**
* Create an Evaluator for the given sources and the given optional filter
*
* @param metadata query metadata
* @param joins joins
* @param filter where condition
* @return evaluator
*/
public Evaluator<List<Object[]>> createEvaluator(QueryMetadata metadata,
List<JoinExpression> joins, @Nullable Predicate filter) {
List<String> sourceNames = new ArrayList<String>();
List<Type> sourceTypes = new ArrayList<Type>();
List<Class<?>> sourceClasses = new ArrayList<Class<?>>();
StringBuilder vars = new StringBuilder();
CollQuerySerializer ser = new CollQuerySerializer(templates);
ser.append("java.util.List<Object[]> rv = new java.util.ArrayList<Object[]>();\n");
List<String> anyJoinMatchers = new ArrayList<String>();
// creating context
for (JoinExpression join : joins) {
Expression<?> target = join.getTarget();
String typeName = com.querydsl.codegen.utils.support.ClassUtils.getName(target.getType());
if (vars.length() > 0) {
vars.append(",");
}
switch (join.getType()) {
case DEFAULT:
ser.append("for (" + typeName + " " + target + " : " + target + "_) {\n");
vars.append(target);
sourceNames.add(target + "_");
sourceTypes.add(new SimpleType(Types.ITERABLE, new ClassType(TypeCategory.SIMPLE,target.getType())));
sourceClasses.add(Iterable.class);
break;
case INNERJOIN:
case LEFTJOIN:
Operation<?> alias = (Operation<?>) join.getTarget();
boolean colAnyJoin = join.getCondition() != null && join.getCondition().toString().equals("any");
boolean leftJoin = join.getType() == JoinType.LEFTJOIN;
String matcher = null;
if (colAnyJoin) {
matcher = alias.getArg(1).toString() + "_matched";
ser.append("boolean " + matcher + " = false;\n");
anyJoinMatchers.add(matcher);
}
ser.append("for (" + typeName + " " + alias.getArg(1) + " : ");
if (leftJoin) {
ser.append(CollQueryFunctions.class.getName() + ".leftJoin(");
}
if (colAnyJoin) {
Context context = new Context();
Expression<?> replacement = alias.getArg(0)
.accept(collectionAnyVisitor, context);
ser.handle(replacement);
} else {
ser.handle(alias.getArg(0));
}
if (alias.getArg(0).getType().equals(Map.class)) {
ser.append(".values()");
}
if (leftJoin) {
ser.append(")");
}
ser.append(") {\n");
if (matcher != null) {
ser.append("if (!" + matcher + ") {\n");
}
vars.append(alias.getArg(1));
break;
default:
throw new IllegalArgumentException("Illegal join expression " + join);
}
}
// filter
if (filter != null) {
ser.append("try {\n");
ser.append("if (");
ser.handle(filter).append(") {\n");
for (String matcher : anyJoinMatchers) {
ser.append(" " + matcher + " = true;\n");
}
ser.append(" rv.add(new Object[]{" + vars + "});\n");
ser.append("}\n");
ser.append("} catch (NullPointerException npe) { }\n");
} else {
ser.append("rv.add(new Object[]{" + vars + "});\n");
}
// closing context
int amount = joins.size() + anyJoinMatchers.size();
for (int i = 0; i < amount; i++) {
ser.append("} | 3.68 |
hadoop_AutoRefreshNoHARMFailoverProxyProvider_performFailover | /**
* Stop the current proxy when performFailover.
* @param currentProxy currentProxy.
*/
@Override
public synchronized void performFailover(T currentProxy) {
RPC.stopProxy(proxy);
proxy = null;
} | 3.68 |
hbase_BackupRestoreFactory_getBackupCopyJob | /**
* Gets backup copy job
* @param conf configuration
* @return backup copy job instance
*/
public static BackupCopyJob getBackupCopyJob(Configuration conf) {
Class<? extends BackupCopyJob> cls = conf.getClass(HBASE_BACKUP_COPY_IMPL_CLASS,
MapReduceBackupCopyJob.class, BackupCopyJob.class);
BackupCopyJob service = ReflectionUtils.newInstance(cls, conf);
service.setConf(conf);
return service;
} | 3.68 |
pulsar_KubernetesServiceAccountTokenAuthProvider_updateAuthData | /**
* No need to update anything. Kubernetes updates the token used for authentication.
*/
@Override
public Optional<FunctionAuthData> updateAuthData(Function.FunctionDetails funcDetails,
Optional<FunctionAuthData> existingFunctionAuthData,
AuthenticationDataSource authenticationDataSource)
throws Exception {
return Optional.empty();
} | 3.68 |
hbase_MetricsMaster_setNumRegionSizeReports | /**
* Sets the number of region size reports the master currently has in memory.
* @see MetricsMasterQuotaSource#updateNumCurrentSpaceQuotaRegionSizeReports(long)
*/
public void setNumRegionSizeReports(final long numRegionReports) {
masterQuotaSource.updateNumCurrentSpaceQuotaRegionSizeReports(numRegionReports);
} | 3.68 |
hibernate-validator_TypeHelper_parameterizedType | /**
* Creates a parameterized type for the specified raw type and actual type arguments.
*
* @param rawType the raw type
* @param actualTypeArguments the actual type arguments
*
* @return the parameterized type
*
* @throws MalformedParameterizedTypeException if the number of actual type arguments differs from those defined on the raw type
*/
public static ParameterizedType parameterizedType(final Class<?> rawType, final Type... actualTypeArguments) {
return new ParameterizedType() {
@Override
public Type[] getActualTypeArguments() {
return actualTypeArguments;
}
@Override
public Type getRawType() {
return rawType;
}
@Override
public Type getOwnerType() {
return null;
}
};
} | 3.68 |
graphhopper_BitUtil_toSignedInt | /**
* Converts the specified long back into a signed int (reverse method for toUnsignedLong)
*/
public static int toSignedInt(long x) {
return (int) x;
} | 3.68 |
rocketmq-connect_Worker_checkAndTransitionToConnectors | /**
* check and transition to connectors
*
* @param assigns
*/
private void checkAndTransitionToConnectors(Map<String, ConnectKeyValue> assigns) {
if (assigns == null || assigns.isEmpty()) {
return;
}
for (String connectName : assigns.keySet()) {
if (!connectors.containsKey(connectName)) {
// new
continue;
}
WorkerConnector connector = connectors.get(connectName);
ConnectKeyValue newConfig = assigns.get(connectName);
connector.transitionTo(newConfig.getTargetState(), new Callback<TargetState>() {
@Override
public void onCompletion(Throwable error, TargetState result) {
if (error != null) {
log.error(error.getMessage());
} else {
if (newConfig.getTargetState() != result) {
log.info("Connector {} set target state {} successed!!", connectName, result);
}
}
}
});
}
} | 3.68 |
flink_AbstractKubernetesStepDecorator_buildAccompanyingKubernetesResources | /**
* Note that the method could have a side effect of modifying the Flink Configuration object,
* such as update the JobManager address.
*/
@Override
public List<HasMetadata> buildAccompanyingKubernetesResources() throws IOException {
return Collections.emptyList();
} | 3.68 |
flink_SchedulerNG_updateJobResourceRequirements | /**
* Update {@link JobResourceRequirements job resource requirements}.
*
* @param jobResourceRequirements new resource requirements
*/
default void updateJobResourceRequirements(JobResourceRequirements jobResourceRequirements) {
throw new UnsupportedOperationException(
String.format(
"The %s does not support changing the parallelism without a job restart. This feature is currently only expected to work with the %s.",
getClass().getSimpleName(), AdaptiveScheduler.class.getSimpleName()));
} | 3.68 |
hbase_DeletionListener_hasException | /**
* Check if an exception has occurred when re-setting the watch.
* @return True if we were unable to re-set a watch on a ZNode due to an exception.
*/
public boolean hasException() {
return exception != null;
} | 3.68 |
framework_Table_isColumnCollapsible | /**
* Checks if the given column is collapsible. Note that even if this method
* returns <code>true</code>, the column can only be actually collapsed (via
* UI or with {@link #setColumnCollapsed(Object, boolean)
* setColumnCollapsed()}) if {@link #isColumnCollapsingAllowed()} is also
* true.
*
* @return true if the column can be collapsed; false otherwise.
*/
public boolean isColumnCollapsible(Object propertyId) {
return !noncollapsibleColumns.contains(propertyId);
} | 3.68 |
graphhopper_AbstractBidirAlgo_finished | // http://www.cs.princeton.edu/courses/archive/spr06/cos423/Handouts/EPP%20shortest%20path%20algorithms.pdf
// a node from overlap may not be on the best path!
// => when scanning an arc (v, w) in the forward search and w is scanned in the reverseOrder
// search, update extractPath = μ if df (v) + (v, w) + dr (w) < μ
protected boolean finished() {
if (finishedFrom || finishedTo)
return true;
return currFrom.weight + currTo.weight >= bestWeight;
} | 3.68 |
rocketmq-connect_LogReporter_message | /**
* format error message
*
* @param context
* @return
*/
String message(ProcessingContext context) {
return String.format("Error encountered in task %s. %s", id.toString(),
context.toString(deadLetterQueueConfig.includeRecordDetailsInErrorLog()));
} | 3.68 |
dubbo_TriHttp2RemoteFlowController_writeError | /**
* Discards this {@link FlowControlled}, writing an error. If this frame is in the pending queue,
* the unwritten bytes are removed from this branch of the priority tree.
*/
private void writeError(FlowControlled frame, Http2Exception cause) {
assert ctx != null;
decrementPendingBytes(frame.size(), true);
frame.error(ctx, cause);
} | 3.68 |
hbase_Segment_maybeCloneWithAllocator | /**
* If the segment has a memory allocator the cell is being cloned to this space, and returned;
* otherwise the given cell is returned When a cell's size is too big (bigger than maxAlloc), it
* is not allocated on MSLAB. Since the process of flattening to CellChunkMap assumes that all
* cells are allocated on MSLAB, during this process, the input parameter forceCloneOfBigCell is
* set to 'true' and the cell is copied into MSLAB.
* @return either the given cell or its clone
*/
public Cell maybeCloneWithAllocator(Cell cell, boolean forceCloneOfBigCell) {
if (this.memStoreLAB == null) {
return cell;
}
Cell cellFromMslab;
if (forceCloneOfBigCell) {
cellFromMslab = this.memStoreLAB.forceCopyOfBigCellInto(cell);
} else {
cellFromMslab = this.memStoreLAB.copyCellInto(cell);
}
return (cellFromMslab != null) ? cellFromMslab : cell;
} | 3.68 |
pulsar_Reflections_createInstance | /**
* Create an instance of <code>userClassName</code> using provided <code>classLoader</code>.
*
* @param userClassName user class name
* @param classLoader class loader to load the class.
* @return the instance
*/
public static Object createInstance(String userClassName,
ClassLoader classLoader) {
Class<?> theCls;
try {
theCls = Class.forName(userClassName, true, classLoader);
} catch (ClassNotFoundException | NoClassDefFoundError cnfe) {
throw new RuntimeException("User class must be in class path", cnfe);
}
Object result;
try {
Constructor<?> meth = constructorCache.get(theCls);
if (null == meth) {
meth = theCls.getDeclaredConstructor();
meth.setAccessible(true);
constructorCache.put(theCls, meth);
}
result = meth.newInstance();
} catch (InstantiationException ie) {
throw new RuntimeException("User class must be concrete", ie);
} catch (NoSuchMethodException e) {
throw new RuntimeException("User class doesn't have such method", e);
} catch (IllegalAccessException e) {
throw new RuntimeException("User class must have a no-arg constructor", e);
} catch (InvocationTargetException e) {
throw new RuntimeException("User class constructor throws exception", e);
}
return result;
} | 3.68 |
hbase_CommonFSUtils_validateRootPath | /**
* Verifies root directory path is a valid URI with a scheme
* @param root root directory path
* @return Passed <code>root</code> argument.
* @throws IOException if not a valid URI with a scheme
*/
public static Path validateRootPath(Path root) throws IOException {
try {
URI rootURI = new URI(root.toString());
String scheme = rootURI.getScheme();
if (scheme == null) {
throw new IOException("Root directory does not have a scheme");
}
return root;
} catch (URISyntaxException e) {
throw new IOException("Root directory path is not a valid " + "URI -- check your "
+ HConstants.HBASE_DIR + " configuration", e);
}
} | 3.68 |
hadoop_NodePlan_setNodeUUID | /**
* Sets the Node UUID.
*
* @param nodeUUID - UUID of the node.
*/
public void setNodeUUID(String nodeUUID) {
this.nodeUUID = nodeUUID;
} | 3.68 |
hudi_HoodieBackedTableMetadataWriter_listAllPartitionsFromFilesystem | /**
* Function to find hoodie partitions and list files in them in parallel.
*
* @param initializationTime Files which have a timestamp after this are neglected
* @return List consisting of {@code DirectoryInfo} for each partition found.
*/
private List<DirectoryInfo> listAllPartitionsFromFilesystem(String initializationTime) {
List<SerializablePath> pathsToList = new LinkedList<>();
pathsToList.add(new SerializablePath(new CachingPath(dataWriteConfig.getBasePath())));
List<DirectoryInfo> partitionsToBootstrap = new LinkedList<>();
final int fileListingParallelism = metadataWriteConfig.getFileListingParallelism();
SerializableConfiguration conf = new SerializableConfiguration(dataMetaClient.getHadoopConf());
final String dirFilterRegex = dataWriteConfig.getMetadataConfig().getDirectoryFilterRegex();
final String datasetBasePath = dataMetaClient.getBasePath();
SerializablePath serializableBasePath = new SerializablePath(new CachingPath(datasetBasePath));
while (!pathsToList.isEmpty()) {
// In each round we will list a section of directories
int numDirsToList = Math.min(fileListingParallelism, pathsToList.size());
// List all directories in parallel
engineContext.setJobStatus(this.getClass().getSimpleName(), "Listing " + numDirsToList + " partitions from filesystem");
List<DirectoryInfo> processedDirectories = engineContext.map(pathsToList.subList(0, numDirsToList), path -> {
FileSystem fs = path.get().getFileSystem(conf.get());
String relativeDirPath = FSUtils.getRelativePartitionPath(serializableBasePath.get(), path.get());
return new DirectoryInfo(relativeDirPath, fs.listStatus(path.get()), initializationTime);
}, numDirsToList);
pathsToList = new LinkedList<>(pathsToList.subList(numDirsToList, pathsToList.size()));
// If the listing reveals a directory, add it to queue. If the listing reveals a hoodie partition, add it to
// the results.
for (DirectoryInfo dirInfo : processedDirectories) {
if (!dirFilterRegex.isEmpty()) {
final String relativePath = dirInfo.getRelativePath();
if (!relativePath.isEmpty() && relativePath.matches(dirFilterRegex)) {
LOG.info("Ignoring directory " + relativePath + " which matches the filter regex " + dirFilterRegex);
continue;
}
}
if (dirInfo.isHoodiePartition()) {
// Add to result
partitionsToBootstrap.add(dirInfo);
} else {
// Add sub-dirs to the queue
pathsToList.addAll(dirInfo.getSubDirectories().stream()
.map(path -> new SerializablePath(new CachingPath(path.toUri())))
.collect(Collectors.toList()));
}
}
}
return partitionsToBootstrap;
} | 3.68 |
framework_StaticSection_setText | /**
* Sets the textual caption of this cell.
*
* @param text
* a plain text caption, not null
*/
public void setText(String text) {
Objects.requireNonNull(text, "text cannot be null");
removeComponentIfPresent();
cellState.text = text;
cellState.type = GridStaticCellType.TEXT;
row.section.markAsDirty();
} | 3.68 |
morf_SqlDialect_getGreatestFunctionName | /**
* @return The name of the GREATEST function
*/
protected String getGreatestFunctionName() {
return "GREATEST";
} | 3.68 |
hadoop_AccessTokenTimer_setExpiresIn | /**
* Set when the access token will expire as reported by the oauth server,
* ie in seconds from now.
* @param expiresIn Access time expiration as reported by OAuth server
*/
public void setExpiresIn(String expiresIn) {
this.nextRefreshMSSinceEpoch = convertExpiresIn(timer, expiresIn);
} | 3.68 |
hbase_ReplicationSink_processReplicationMarkerEntry | /*
* First check if config key hbase.regionserver.replication.sink.tracker.enabled is true or not.
* If false, then ignore this cell. If set to true, de-serialize value into
* ReplicationTrackerDescriptor. Create a Put mutation with regionserver name, walname, offset and
* timestamp from ReplicationMarkerDescriptor.
*/
private Put processReplicationMarkerEntry(Cell cell) throws IOException {
// If source is emitting replication marker rows but sink is not accepting them,
// ignore the edits.
if (!replicationSinkTrackerEnabled) {
return null;
}
WALProtos.ReplicationMarkerDescriptor descriptor =
WALProtos.ReplicationMarkerDescriptor.parseFrom(new ByteArrayInputStream(cell.getValueArray(),
cell.getValueOffset(), cell.getValueLength()));
Put put = new Put(cell.getRowArray(), cell.getRowOffset(), cell.getRowLength());
put.addColumn(REPLICATION_SINK_TRACKER_INFO_FAMILY, RS_COLUMN, cell.getTimestamp(),
(Bytes.toBytes(descriptor.getRegionServerName())));
put.addColumn(REPLICATION_SINK_TRACKER_INFO_FAMILY, WAL_NAME_COLUMN, cell.getTimestamp(),
Bytes.toBytes(descriptor.getWalName()));
put.addColumn(REPLICATION_SINK_TRACKER_INFO_FAMILY, TIMESTAMP_COLUMN, cell.getTimestamp(),
Bytes.toBytes(cell.getTimestamp()));
put.addColumn(REPLICATION_SINK_TRACKER_INFO_FAMILY, OFFSET_COLUMN, cell.getTimestamp(),
Bytes.toBytes(descriptor.getOffset()));
return put;
} | 3.68 |
hadoop_JobBase_setDoubleValue | /**
* Set the given counter to the given value
*
* @param name
* the counter name
* @param value
* the value for the counter
*/
protected void setDoubleValue(Object name, double value) {
this.doubleCounters.put(name, new Double(value));
} | 3.68 |
flink_FileCatalogStore_contains | /**
* Returns whether the specified catalog exists in the catalog store.
*
* @param catalogName the name of the catalog to check
* @return {@code true} if the catalog exists in the catalog store, {@code false} otherwise
* @throws CatalogException if the catalog store is not open or if there is an error checking
* for the catalog
*/
@Override
public boolean contains(String catalogName) throws CatalogException {
checkOpenState();
Path catalogPath = getCatalogPath(catalogName);
try {
return catalogPath.getFileSystem().exists(catalogPath);
} catch (Exception e) {
throw new CatalogException(
String.format(
"Failed to check if catalog %s exists in the catalog store.",
catalogName),
e);
}
} | 3.68 |
hadoop_AbfsOutputStreamStatisticsImpl_blockReleased | /**
* Increment the counter to indicate a block has been released.
*/
@Override
public void blockReleased() {
blocksReleased.incrementAndGet();
} | 3.68 |
framework_VScrollTable_calculateMaxIndent | /**
* This method exists for the needs of {@link VTreeTable} only. May be
* removed or replaced in the future.<br>
* <br>
* Calculates the maximum indent of the hierarcyColumn, if applicable.
*/
protected void calculateMaxIndent() {
// NOP
} | 3.68 |
hbase_MetricsConnection_getScope | /**
* Returns the scope for a MetricsConnection based on the configured {@link #METRICS_SCOPE_KEY} or
* by generating a default from the passed clusterId and connectionObj's hashCode.
* @param conf configuration for the connection
* @param clusterId clusterId for the connection
* @param connectionObj either a Connection or AsyncConnectionImpl, the instance creating this
* MetricsConnection.
*/
static String getScope(Configuration conf, String clusterId, Object connectionObj) {
return conf.get(METRICS_SCOPE_KEY,
clusterId + "@" + Integer.toHexString(connectionObj.hashCode()));
} | 3.68 |
hadoop_AbstractConfigurableFederationPolicy_getPolicyInfo | /**
* Getter method for the configuration weights.
*
* @return the {@link WeightedPolicyInfo} representing the policy
* configuration.
*/
public WeightedPolicyInfo getPolicyInfo() {
return policyInfo;
} | 3.68 |
flink_IOManager_createChannelEnumerator | /**
* Creates a new {@link Enumerator}, spreading the channels in a round-robin fashion across the
* temporary file directories.
*
* @return An enumerator for channels.
*/
public Enumerator createChannelEnumerator() {
return fileChannelManager.createChannelEnumerator();
} | 3.68 |
hbase_HRegion_getStoreFiles | /** Returns Map of StoreFiles by column family */
private NavigableMap<byte[], List<Path>> getStoreFiles() {
NavigableMap<byte[], List<Path>> allStoreFiles = new TreeMap<>(Bytes.BYTES_COMPARATOR);
for (HStore store : stores.values()) {
Collection<HStoreFile> storeFiles = store.getStorefiles();
if (storeFiles == null) {
continue;
}
List<Path> storeFileNames = new ArrayList<>();
for (HStoreFile storeFile : storeFiles) {
storeFileNames.add(storeFile.getPath());
}
allStoreFiles.put(store.getColumnFamilyDescriptor().getName(), storeFileNames);
}
return allStoreFiles;
} | 3.68 |
rocketmq-connect_AbstractConnectController_putConnectorConfig | /**
* add connector
*
* @param connectorName
* @param configs
* @return
* @throws Exception
*/
public String putConnectorConfig(String connectorName, ConnectKeyValue configs) throws Exception {
return configManagementService.putConnectorConfig(connectorName, configs);
} | 3.68 |
hbase_ProcedureUtil_convertToProtoProcedure | /**
* Helper to convert the procedure to protobuf.
* <p/>
* Used by ProcedureStore implementations.
*/
public static ProcedureProtos.Procedure convertToProtoProcedure(Procedure<?> proc)
throws IOException {
Preconditions.checkArgument(proc != null);
validateClass(proc);
final ProcedureProtos.Procedure.Builder builder = ProcedureProtos.Procedure.newBuilder()
.setClassName(proc.getClass().getName()).setProcId(proc.getProcId()).setState(proc.getState())
.setSubmittedTime(proc.getSubmittedTime()).setLastUpdate(proc.getLastUpdate());
if (proc.hasParent()) {
builder.setParentId(proc.getParentProcId());
}
if (proc.hasTimeout()) {
builder.setTimeout(proc.getTimeout());
}
if (proc.hasOwner()) {
builder.setOwner(proc.getOwner());
}
final int[] stackIds = proc.getStackIndexes();
if (stackIds != null) {
for (int i = 0; i < stackIds.length; ++i) {
builder.addStackId(stackIds[i]);
}
}
if (proc.hasException()) {
RemoteProcedureException exception = proc.getException();
builder.setException(
RemoteProcedureException.toProto(exception.getSource(), exception.getCause()));
}
final byte[] result = proc.getResult();
if (result != null) {
builder.setResult(UnsafeByteOperations.unsafeWrap(result));
}
ProcedureStateSerializer serializer = new StateSerializer(builder);
proc.serializeStateData(serializer);
if (proc.getNonceKey() != null) {
builder.setNonceGroup(proc.getNonceKey().getNonceGroup());
builder.setNonce(proc.getNonceKey().getNonce());
}
if (proc.hasLock()) {
builder.setLocked(true);
}
if (proc.isBypass()) {
builder.setBypass(true);
}
return builder.build();
} | 3.68 |
hmily_HmilyTacDatasourceConfig_dataSource | /**
* Data source data source.
*
* @return the data source
*/
@Bean
@Primary
public DataSource dataSource() {
HikariDataSource hikariDataSource = new HikariDataSource();
hikariDataSource.setJdbcUrl(dataSourceProperties.getUrl());
hikariDataSource.setDriverClassName(dataSourceProperties.getDriverClassName());
hikariDataSource.setUsername(dataSourceProperties.getUsername());
hikariDataSource.setPassword(dataSourceProperties.getPassword());
hikariDataSource.setMaximumPoolSize(20);
hikariDataSource.setMinimumIdle(10);
hikariDataSource.setConnectionTimeout(30000);
hikariDataSource.setIdleTimeout(600000);
hikariDataSource.setMaxLifetime(1800000);
return new HmilyP6Datasource(hikariDataSource);
} | 3.68 |
hadoop_DockerCommand_preparePrivilegedOperation | /**
* Prepare the privileged operation object that will be used to invoke
* the container-executor.
*
* @param dockerCommand Specific command to be run by docker.
* @param containerName
* @param env
* @param nmContext
* @return Returns the PrivilegedOperation object to be used.
* @throws ContainerExecutionException
*/
public PrivilegedOperation preparePrivilegedOperation(
DockerCommand dockerCommand, String containerName, Map<String,
String> env, Context nmContext)
throws ContainerExecutionException {
DockerClient dockerClient = new DockerClient();
String commandFile =
dockerClient.writeCommandToTempFile(dockerCommand,
ContainerId.fromString(containerName),
nmContext);
PrivilegedOperation dockerOp = new PrivilegedOperation(
PrivilegedOperation.OperationType.RUN_DOCKER_CMD);
dockerOp.appendArgs(commandFile);
return dockerOp;
} | 3.68 |
Subsets and Splits
No saved queries yet
Save your SQL queries to embed, download, and access them later. Queries will appear here once saved.