name
stringlengths 12
178
| code_snippet
stringlengths 8
36.5k
| score
float64 3.26
3.68
|
---|---|---|
hadoop_BalanceProcedureScheduler_remove | /**
* Remove the job from scheduler if it finishes.
*/
public BalanceJob remove(BalanceJob job) {
BalanceJob inner = findJob(job);
if (inner == null) {
return null;
} else if (job.isJobDone()) {
synchronized (this) {
return jobSet.remove(inner);
}
}
return null;
} | 3.68 |
hbase_StorageClusterStatusModel_getRegions | /** Returns the total number of regions served by the cluster */
@XmlAttribute
public int getRegions() {
return regions;
} | 3.68 |
flink_TableConfig_setRootConfiguration | /**
* Sets the given configuration as {@link #rootConfiguration}, which contains any configuration
* set in the execution context. See the docs of {@link TableConfig} for more information.
*
* @param rootConfiguration root configuration to be set
*/
@Internal
public void setRootConfiguration(ReadableConfig rootConfiguration) {
this.rootConfiguration = rootConfiguration;
} | 3.68 |
flink_ListTypeInfo_getElementTypeInfo | /** Gets the type information for the elements contained in the list */
public TypeInformation<T> getElementTypeInfo() {
return elementTypeInfo;
} | 3.68 |
morf_SqlServer_openSchema | /**
* @see org.alfasoftware.morf.jdbc.DatabaseType#openSchema(java.sql.Connection,
* java.lang.String, java.lang.String)
*/
@Override
public Schema openSchema(Connection connection, String databaseName, String schemaName) {
return new SqlServerMetaDataProvider(connection, schemaName);
} | 3.68 |
hadoop_ReconfigurationException_constructMessage | /**
* Construct the exception message.
*/
private static String constructMessage(String property,
String newVal, String oldVal) {
String message = "Could not change property " + property;
if (oldVal != null) {
message += " from \'" + oldVal;
}
if (newVal != null) {
message += "\' to \'" + newVal + "\'";
}
return message;
} | 3.68 |
framework_GridSortOrder_asc | /**
* Creates a new grid sort builder with given sorting using ascending sort
* direction.
*
* @param by
* the column to sort by
* @param <T>
* the grid type
*
* @return the grid sort builder
*/
public static <T> GridSortOrderBuilder<T> asc(Column<T, ?> by) {
return new GridSortOrderBuilder<T>().thenAsc(by);
} | 3.68 |
hadoop_SnappyCodec_getCompressorType | /**
* Get the type of {@link Compressor} needed by this {@link CompressionCodec}.
*
* @return the type of compressor needed by this codec.
*/
@Override
public Class<? extends Compressor> getCompressorType() {
return SnappyCompressor.class;
} | 3.68 |
hbase_MultiByteBuff_skip | /**
* Jumps the current position of this MBB by specified length.
*/
@Override
public MultiByteBuff skip(int length) {
checkRefCount();
// Get available bytes from this item and remaining from next
int jump = 0;
while (true) {
jump = this.curItem.remaining();
if (jump >= length) {
this.curItem.position(this.curItem.position() + length);
break;
}
this.curItem.position(this.curItem.position() + jump);
length -= jump;
this.curItemIndex++;
this.curItem = this.items[this.curItemIndex];
}
return this;
} | 3.68 |
hbase_BucketCache_blockEvicted | /**
* This method is invoked after the bucketEntry is removed from {@link BucketCache#backingMap}
*/
void blockEvicted(BlockCacheKey cacheKey, BucketEntry bucketEntry, boolean decrementBlockNumber,
boolean evictedByEvictionProcess) {
bucketEntry.markAsEvicted();
blocksByHFile.remove(cacheKey);
if (decrementBlockNumber) {
this.blockNumber.decrement();
}
if (evictedByEvictionProcess) {
cacheStats.evicted(bucketEntry.getCachedTime(), cacheKey.isPrimary());
}
if (ioEngine.isPersistent()) {
removeFileFromPrefetch(cacheKey.getHfileName());
setCacheInconsistent(true);
}
} | 3.68 |
hadoop_ZKClient_unregisterService | /**
* unregister the service.
*
* @param path the path at which the service was registered
* @throws IOException if there are I/O errors.
* @throws InterruptedException if any thread has interrupted.
*/
public void unregisterService(String path) throws IOException,
InterruptedException {
try {
zkClient.delete(path, -1);
} catch(KeeperException ke) {
throw new IOException(ke);
}
} | 3.68 |
hbase_Mutation_isEmpty | /**
* Method to check if the familyMap is empty
* @return true if empty, false otherwise
*/
public boolean isEmpty() {
return getFamilyCellMap().isEmpty();
} | 3.68 |
hbase_ProcedureExecutor_getCorePoolSize | /** Returns the core pool size settings. */
public int getCorePoolSize() {
return corePoolSize;
} | 3.68 |
framework_SliderTooltip_setup | /*
* (non-Javadoc)
*
* @see com.vaadin.tests.components.AbstractTestUI#setup(com.vaadin.server.
* VaadinRequest)
*/
@Override
protected void setup(VaadinRequest request) {
Slider slider = new Slider();
slider.setDescription("Tooltip");
addComponent(slider);
} | 3.68 |
hibernate-validator_ConstraintDefinitionContribution_includeExisting | /**
* Whether or not the existing constraint validators should be kept or not.
*
* @return {@code true} if the existing constraint validators for the constraint type wrapped by this
* instance should be kept, {@code false} otherwise.
*/
public boolean includeExisting() {
return includeExisting;
} | 3.68 |
flink_PythonOperatorChainingOptimizer_optimize | /**
* Perform chaining optimization. It will returns the chained transformations and the
* transformation after chaining optimization for the given transformation.
*/
public static Tuple2<List<Transformation<?>>, Transformation<?>> optimize(
List<Transformation<?>> transformations, Transformation<?> targetTransformation) {
final Map<Transformation<?>, Set<Transformation<?>>> outputMap =
buildOutputMap(transformations);
final LinkedHashSet<Transformation<?>> chainedTransformations = new LinkedHashSet<>();
final Set<Transformation<?>> alreadyTransformed = Sets.newIdentityHashSet();
final Queue<Transformation<?>> toTransformQueue = Queues.newArrayDeque();
toTransformQueue.add(targetTransformation);
while (!toTransformQueue.isEmpty()) {
final Transformation<?> toTransform = toTransformQueue.poll();
if (!alreadyTransformed.contains(toTransform)) {
alreadyTransformed.add(toTransform);
final ChainInfo chainInfo = chainWithInputIfPossible(toTransform, outputMap);
chainedTransformations.add(chainInfo.newTransformation);
chainedTransformations.removeAll(chainInfo.oldTransformations);
alreadyTransformed.addAll(chainInfo.oldTransformations);
// Add the chained transformation and its inputs to the to-optimize list
toTransformQueue.add(chainInfo.newTransformation);
toTransformQueue.addAll(chainInfo.newTransformation.getInputs());
if (toTransform == targetTransformation) {
targetTransformation = chainInfo.newTransformation;
}
}
}
return Tuple2.of(new ArrayList<>(chainedTransformations), targetTransformation);
} | 3.68 |
querydsl_AbstractMongodbQuery_fetchOne | /**
* Fetch one with the specific fields
*
* @param paths fields to return
* @return first result
*/
public K fetchOne(Path<?>... paths) {
queryMixin.setProjection(paths);
return fetchOne();
} | 3.68 |
morf_DataValueLookupMetadataRegistry_appendAndIntern | /**
* Given an existing (interned) metadata descriptor, appends the given column and
* returns the interned result.
*
* <p>Used when adding a new value to an existing {@link DataValueLookupBuilderImpl}.</p>
*
* <p>This call pattern means we can avoid constructing the combined {@link DataValueLookupMetadata}
* simply to find that there is already an interned instance and throw it away. If the
* metadata is already interned, the caller only needs to provide their current
* metadata and the column name to add.</p>
*
* @param appendTo The metadata prior to appending the column.
* @param columnName The column name to append.
* @return The interned result.
*/
static DataValueLookupMetadata appendAndIntern(DataValueLookupMetadata appendTo, CaseInsensitiveString columnName) {
ImmutableMap<CaseInsensitiveString, DataValueLookupMetadata> old = appendTo.getChildren();
DataValueLookupMetadata result = old.get(columnName);
if (result != null) {
return result;
}
synchronized (appendTo) {
ImmutableMap<CaseInsensitiveString, DataValueLookupMetadata> current = appendTo.getChildren();
if (old != current) {
result = current.get(columnName);
if (result != null) {
return result;
}
}
result = new DataValueLookupMetadata(ImmutableList.<CaseInsensitiveString>builderWithExpectedSize(appendTo.getColumnNames().size() + 1)
.addAll(appendTo.getColumnNames())
.add(columnName)
.build());
appendTo.setChildren(
builderPlusOneEntry(current)
.putAll(current)
.put(columnName, result)
.build());
return result;
}
} | 3.68 |
framework_FileDownloader_isOverrideContentType | /**
* Checks whether the content type should be overridden.
*
* @return <code>true</code> if the content type will be overridden when
* possible; <code>false</code> if the original content type will be
* used.
* @see #setOverrideContentType(boolean)
*/
public boolean isOverrideContentType() {
return overrideContentType;
} | 3.68 |
querydsl_SQLExpressions_max | /**
* Start a window function expression
*
* @param expr expression
* @return max(expr)
*/
public static <T extends Comparable> WindowOver<T> max(Expression<T> expr) {
return new WindowOver<T>(expr.getType(), Ops.AggOps.MAX_AGG, expr);
} | 3.68 |
flink_PbCodegenUtils_flinkContainerElementCode | /**
* @param flinkContainerCode code phrase which represent flink container type like row/array in
* codegen sections
* @param index the index number in flink container type
* @param eleType the element type
*/
public static String flinkContainerElementCode(
String flinkContainerCode, String index, LogicalType eleType) {
switch (eleType.getTypeRoot()) {
case INTEGER:
return flinkContainerCode + ".getInt(" + index + ")";
case BIGINT:
return flinkContainerCode + ".getLong(" + index + ")";
case FLOAT:
return flinkContainerCode + ".getFloat(" + index + ")";
case DOUBLE:
return flinkContainerCode + ".getDouble(" + index + ")";
case BOOLEAN:
return flinkContainerCode + ".getBoolean(" + index + ")";
case VARCHAR:
case CHAR:
return flinkContainerCode + ".getString(" + index + ")";
case VARBINARY:
case BINARY:
return flinkContainerCode + ".getBinary(" + index + ")";
case ROW:
int size = eleType.getChildren().size();
return flinkContainerCode + ".getRow(" + index + ", " + size + ")";
case MAP:
return flinkContainerCode + ".getMap(" + index + ")";
case ARRAY:
return flinkContainerCode + ".getArray(" + index + ")";
default:
throw new IllegalArgumentException("Unsupported data type in schema: " + eleType);
}
} | 3.68 |
flink_ResourceProfile_merge | /**
* Calculates the sum of two resource profiles.
*
* @param other The other resource profile to add.
* @return The merged resource profile.
*/
@Nonnull
public ResourceProfile merge(final ResourceProfile other) {
checkNotNull(other, "Cannot merge with null resources");
if (equals(ANY) || other.equals(ANY)) {
return ANY;
}
if (this.equals(UNKNOWN) || other.equals(UNKNOWN)) {
return UNKNOWN;
}
Map<String, ExternalResource> resultExtendedResource = new HashMap<>(extendedResources);
other.extendedResources.forEach(
(String name, ExternalResource resource) -> {
resultExtendedResource.compute(
name,
(ignored, oldResource) ->
oldResource == null ? resource : oldResource.merge(resource));
});
return new ResourceProfile(
cpuCores.merge(other.cpuCores),
taskHeapMemory.add(other.taskHeapMemory),
taskOffHeapMemory.add(other.taskOffHeapMemory),
managedMemory.add(other.managedMemory),
networkMemory.add(other.networkMemory),
resultExtendedResource);
} | 3.68 |
hadoop_StagingCommitter_getCommittedTaskPath | /**
* Compute the path where the output of a committed task is stored until the
* entire job is committed for a specific application attempt.
* @param appAttemptId the ID of the application attempt to use
* @param context the context of any task.
* @return the path where the output of a committed task is stored.
*/
protected Path getCommittedTaskPath(int appAttemptId,
TaskAttemptContext context) {
validateContext(context);
return new Path(getJobAttemptPath(appAttemptId),
String.valueOf(context.getTaskAttemptID().getTaskID()));
} | 3.68 |
streampipes_MigrateExtensionsResource_handleMigration | /**
* Migrates a pipeline element instance based on the provided {@link MigrationRequest}.
* The outcome of the migration is described in {@link MigrationResult}.
* The result is always part of the response.
* Independent, of the migration outcome, the returned response always has OK as status code.
* It is the responsibility of the recipient to interpret the migration result and act accordingly.
* @param migrationRequest Request that contains both the pipeline element to be migrated and the migration config.
* @return A response with status code ok, that contains a migration result reflecting the outcome of the operation.
*/
protected MigrationResult<T> handleMigration(MigrationRequest<T> migrationRequest) {
var pipelineElementDescription = migrationRequest.migrationElement();
var migrationConfig = migrationRequest.modelMigratorConfig();
LOG.info("Received migration request for pipeline element '{}' to migrate from version {} to {}",
pipelineElementDescription.getElementId(),
migrationConfig.fromVersion(),
migrationConfig.toVersion()
);
var migratorOptional = getMigrator(migrationConfig);
if (migratorOptional.isPresent()) {
LOG.info("Migrator found for request, starting migration...");
return executeMigration(migratorOptional.get(), pipelineElementDescription);
}
LOG.error("Migrator for migration config {} could not be found. Migration is cancelled.", migrationConfig);
return MigrationResult.failure(
pipelineElementDescription,
String.format(
"The given migration config '%s' could not be mapped to a registered migrator.",
migrationConfig
)
);
} | 3.68 |
hudi_LSMTimeline_isFileFromLayer | /**
* Returns whether a file belongs to the specified layer {@code layer} within the LSM layout.
*/
public static boolean isFileFromLayer(String fileName, int layer) {
return getFileLayer(fileName) == layer;
} | 3.68 |
hbase_MetricsHeapMemoryManager_updateUnblockedFlushCount | /**
* Update/Set the unblocked flush count histogram/gauge
* @param unblockedFlushCount the number of unblocked memstore flush since last tuning.
*/
public void updateUnblockedFlushCount(final long unblockedFlushCount) {
source.updateUnblockedFlushCount(unblockedFlushCount);
} | 3.68 |
flink_MemorySegment_putLong | /**
* Writes the given long value (64bit, 8 bytes) to the given position in the system's native
* byte order. This method offers the best speed for long integer writing and should be used
* unless a specific byte order is required. In most cases, it suffices to know that the byte
* order in which the value is written is the same as the one in which it is read (such as
* transient storage in memory, or serialization for I/O and network), making this method the
* preferable choice.
*
* @param index The position at which the value will be written.
* @param value The long value to be written.
* @throws IndexOutOfBoundsException Thrown, if the index is negative, or larger than the
* segment size minus 8.
*/
public void putLong(int index, long value) {
final long pos = address + index;
if (index >= 0 && pos <= addressLimit - 8) {
UNSAFE.putLong(heapMemory, pos, value);
} else if (address > addressLimit) {
throw new IllegalStateException("segment has been freed");
} else {
// index is in fact invalid
throw new IndexOutOfBoundsException();
}
} | 3.68 |
hadoop_NamenodeStatusReport_getNumOfBlocksPendingReplication | /**
* Get the number of pending replication blocks.
*
* @return Number of pending replication blocks.
*/
public long getNumOfBlocksPendingReplication() {
return this.numOfBlocksPendingReplication;
} | 3.68 |
hadoop_ECPolicyLoader_loadSchemas | /**
* Load schemas from root element in the XML configuration file.
* @param root root element
* @return EC schema map
*/
private Map<String, ECSchema> loadSchemas(Element root) {
NodeList elements = root.getElementsByTagName("schemas")
.item(0).getChildNodes();
Map<String, ECSchema> schemas = new HashMap<String, ECSchema>();
for (int i = 0; i < elements.getLength(); i++) {
Node node = elements.item(i);
if (node instanceof Element) {
Element element = (Element) node;
if ("schema".equals(element.getTagName())) {
String schemaId = element.getAttribute("id");
ECSchema schema = loadSchema(element);
if (!schemas.containsValue(schema)) {
schemas.put(schemaId, schema);
} else {
throw new RuntimeException("Repetitive schemas in EC policy"
+ " configuration file: " + schemaId);
}
} else {
throw new RuntimeException("Bad element in EC policy"
+ " configuration file: " + element.getTagName());
}
}
}
return schemas;
} | 3.68 |
hibernate-validator_ReflectionHelper_getCollectionElementType | /**
* Determines the type of the elements of an {@code Iterable}, array or the value of a {@code Map}.
*/
public static Type getCollectionElementType(Type type) {
Type indexedType = null;
if ( isIterable( type ) && type instanceof ParameterizedType ) {
ParameterizedType paramType = (ParameterizedType) type;
indexedType = paramType.getActualTypeArguments()[0];
}
else if ( isMap( type ) && type instanceof ParameterizedType ) {
ParameterizedType paramType = (ParameterizedType) type;
indexedType = paramType.getActualTypeArguments()[1];
}
else if ( TypeHelper.isArray( type ) ) {
indexedType = TypeHelper.getComponentType( type );
}
return indexedType;
} | 3.68 |
flink_FlinkRelUtil_merge | /**
* Merges the programs of two {@link Calc} instances and returns a new {@link Calc} instance
* with the merged program.
*/
public static Calc merge(Calc topCalc, Calc bottomCalc) {
RexProgram topProgram = topCalc.getProgram();
RexBuilder rexBuilder = topCalc.getCluster().getRexBuilder();
// Merge the programs together.
RexProgram mergedProgram =
RexProgramBuilder.mergePrograms(topProgram, bottomCalc.getProgram(), rexBuilder);
if (!mergedProgram.getOutputRowType().equals(topProgram.getOutputRowType())) {
throw new IllegalArgumentException(
"Output row type of merged program is not the same top program.");
}
RexProgram newMergedProgram;
if (mergedProgram.getCondition() != null) {
RexNode condition = mergedProgram.expandLocalRef(mergedProgram.getCondition());
RexNode simplifiedCondition =
FlinkRexUtil.simplify(
rexBuilder, condition, topCalc.getCluster().getPlanner().getExecutor());
if (simplifiedCondition.equals(condition)) {
newMergedProgram = mergedProgram;
} else {
RexProgramBuilder programBuilder =
RexProgramBuilder.forProgram(mergedProgram, rexBuilder, true);
programBuilder.clearCondition();
programBuilder.addCondition(simplifiedCondition);
newMergedProgram = programBuilder.getProgram(true);
}
} else {
newMergedProgram = mergedProgram;
}
return topCalc.copy(topCalc.getTraitSet(), bottomCalc.getInput(), newMergedProgram);
} | 3.68 |
hbase_WALEntryBatch_getLastSeqIds | /** Returns the last sequenceid for each region if the table has serial-replication scope */
public Map<String, Long> getLastSeqIds() {
return lastSeqIds;
} | 3.68 |
flink_WatermarkOutputMultiplexer_updateCombinedWatermark | /**
* Checks whether we need to update the combined watermark. Should be called when a newly
* emitted per-output watermark is higher than the max so far or if we need to combined the
* deferred per-output updates.
*/
private void updateCombinedWatermark() {
if (combinedWatermarkStatus.updateCombinedWatermark()) {
underlyingOutput.emitWatermark(
new Watermark(combinedWatermarkStatus.getCombinedWatermark()));
} else if (combinedWatermarkStatus.isIdle()) {
underlyingOutput.markIdle();
}
} | 3.68 |
morf_AbstractSqlDialectTest_differentSchemaTableName | /**
* For tests using tables from different schema values.
*
* @param baseName Base table name.
* @return Decorated name.
*/
protected String differentSchemaTableName(String baseName) {
return "MYSCHEMA." + baseName;
} | 3.68 |
graphhopper_ResponsePath_hasErrors | /**
* @return true if this alternative response contains one or more errors
*/
public boolean hasErrors() {
return !errors.isEmpty();
} | 3.68 |
flink_BufferManager_releaseAll | /**
* The floating buffer is recycled to local buffer pool directly, and the exclusive buffer
* will be gathered to return to global buffer pool later.
*
* @param exclusiveSegments The list that we will add exclusive segments into.
*/
void releaseAll(List<MemorySegment> exclusiveSegments) {
Buffer buffer;
while ((buffer = floatingBuffers.poll()) != null) {
buffer.recycleBuffer();
}
while ((buffer = exclusiveBuffers.poll()) != null) {
exclusiveSegments.add(buffer.getMemorySegment());
}
} | 3.68 |
dubbo_CacheFilter_invoke | /**
* If cache is configured, dubbo will invoke method on each method call. If cache value is returned by cache store
* then it will return otherwise call the remote method and return value. If remote method's return value has error
* then it will not cache the value.
* @param invoker service
* @param invocation invocation.
* @return Cache returned value if found by the underlying cache store. If cache miss it will call target method.
* @throws RpcException
*/
@Override
public Result invoke(Invoker<?> invoker, Invocation invocation) throws RpcException {
if (cacheFactory == null
|| ConfigUtils.isEmpty(invoker.getUrl().getMethodParameter(invocation.getMethodName(), CACHE_KEY))) {
return invoker.invoke(invocation);
}
Cache cache = cacheFactory.getCache(invoker.getUrl(), invocation);
if (cache == null) {
return invoker.invoke(invocation);
}
String key = StringUtils.toArgumentString(invocation.getArguments());
Object value = cache.get(key);
return (value != null)
? onCacheValuePresent(invocation, value)
: onCacheValueNotPresent(invoker, invocation, cache, key);
} | 3.68 |
flink_ExtractionUtils_extractionError | /** Helper method for creating consistent exceptions during extraction. */
static ValidationException extractionError(Throwable cause, String message, Object... args) {
return new ValidationException(String.format(message, args), cause);
} | 3.68 |
dubbo_RequestEvent_toRequestErrorEvent | /**
* Acts on MetricsClusterFilter to monitor exceptions that occur before request execution
*/
public static RequestEvent toRequestErrorEvent(
ApplicationModel applicationModel,
String appName,
MetricsDispatcher metricsDispatcher,
Invocation invocation,
String side,
int code,
boolean serviceLevel) {
RequestEvent event = new RequestEvent(applicationModel, appName, metricsDispatcher, null, REQUEST_ERROR_EVENT);
event.putAttachment(ATTACHMENT_KEY_SERVICE, MetricsSupport.getInterfaceName(invocation));
event.putAttachment(MetricsConstants.INVOCATION_SIDE, side);
event.putAttachment(MetricsConstants.INVOCATION, invocation);
event.putAttachment(MetricsConstants.INVOCATION_REQUEST_ERROR, code);
event.putAttachment(
MetricsConstants.METHOD_METRICS, new MethodMetric(applicationModel, invocation, serviceLevel));
return event;
} | 3.68 |
framework_AbstractSelect_setNullSelectionAllowed | /**
* Allow or disallow empty selection by the user. If the select is in
* single-select mode, you can make an item represent the empty selection by
* calling <code>setNullSelectionItemId()</code>. This way you can for
* instance set an icon and caption for the null selection item.
*
* @param nullSelectionAllowed
* whether or not to allow empty selection
* @see #setNullSelectionItemId(Object)
* @see #isNullSelectionAllowed()
*/
public void setNullSelectionAllowed(boolean nullSelectionAllowed) {
if (nullSelectionAllowed != this.nullSelectionAllowed) {
this.nullSelectionAllowed = nullSelectionAllowed;
markAsDirty();
}
} | 3.68 |
pulsar_AuthenticationProvider_newHttpAuthState | /**
* Create an http authentication data State use passed in AuthenticationDataSource.
* @deprecated implementations that previously relied on this should update their implementation of
* {@link #authenticateHttpRequest(HttpServletRequest, HttpServletResponse)} or of
* {@link #authenticateHttpRequestAsync(HttpServletRequest, HttpServletResponse)} so that the desired attributes
* are added in those methods.
*
* <p>Note: this method was only ever used to generate an {@link AuthenticationState} object in order to generate
* an {@link AuthenticationDataSource} that was added as the {@link AuthenticatedDataAttributeName} attribute to
* the http request. Removing this method removes an unnecessary step in the authentication flow.</p>
*/
@Deprecated(since = "3.0.0")
default AuthenticationState newHttpAuthState(HttpServletRequest request)
throws AuthenticationException {
return new OneStageAuthenticationState(request, this);
} | 3.68 |
framework_VPanel_setIconUri | /** For internal use only. May be removed or replaced in the future. */
public void setIconUri(String iconUri, ApplicationConnection client) {
if (icon != null) {
captionNode.removeChild(icon.getElement());
}
icon = client.getIcon(iconUri);
if (icon != null) {
DOM.insertChild(captionNode, icon.getElement(), 0);
}
} | 3.68 |
hadoop_JobBase_configure | /**
* Initializes a new instance from a {@link JobConf}.
*
* @param job
* the configuration
*/
public void configure(JobConf job) {
this.longCounters = new TreeMap<Object, Long>();
this.doubleCounters = new TreeMap<Object, Double>();
} | 3.68 |
hibernate-validator_BeanMetaDataManagerImpl_getBeanConfigurationForHierarchy | /**
* Returns a list with the configurations for all types contained in the given type's hierarchy (including
* implemented interfaces) starting at the specified type.
*
* @param beanClass The type of interest.
* @param <T> The type of the class to get the configurations for.
* @return A set with the configurations for the complete hierarchy of the given type. May be empty, but never
* {@code null}.
*/
private <T> List<BeanConfiguration<? super T>> getBeanConfigurationForHierarchy(MetaDataProvider provider, Class<T> beanClass) {
List<BeanConfiguration<? super T>> configurations = newArrayList();
for ( Class<? super T> clazz : ClassHierarchyHelper.getHierarchy( beanClass ) ) {
BeanConfiguration<? super T> configuration = provider.getBeanConfiguration( clazz );
if ( configuration != null ) {
configurations.add( configuration );
}
}
return configurations;
} | 3.68 |
hudi_AdbSyncTool_syncSchema | /**
* Get the latest schema from the last commit and check if its in sync with the ADB
* table schema. If not, evolves the table schema.
*
* @param tableName The table to be synced
* @param tableExists Whether target table exists
* @param useRealTimeInputFormat Whether using realtime input format
* @param readAsOptimized Whether read as optimized table
* @param schema The extracted schema
*/
private void syncSchema(String tableName, boolean tableExists, boolean useRealTimeInputFormat,
boolean readAsOptimized, MessageType schema) {
// Append spark table properties & serde properties
Map<String, String> tableProperties = ConfigUtils.toMap(config.getString(ADB_SYNC_TABLE_PROPERTIES));
Map<String, String> serdeProperties = ConfigUtils.toMap(config.getString(ADB_SYNC_SERDE_PROPERTIES));
if (config.getBoolean(ADB_SYNC_SYNC_AS_SPARK_DATA_SOURCE_TABLE)) {
Map<String, String> sparkTableProperties = SparkDataSourceTableUtils.getSparkTableProperties(config.getSplitStrings(META_SYNC_PARTITION_FIELDS),
config.getString(META_SYNC_SPARK_VERSION), config.getInt(ADB_SYNC_SCHEMA_STRING_LENGTH_THRESHOLD), schema);
Map<String, String> sparkSerdeProperties = SparkDataSourceTableUtils.getSparkSerdeProperties(readAsOptimized, config.getString(META_SYNC_BASE_PATH));
tableProperties.putAll(sparkTableProperties);
serdeProperties.putAll(sparkSerdeProperties);
LOG.info("Sync as spark datasource table, tableName:{}, tableExists:{}, tableProperties:{}, sederProperties:{}",
tableName, tableExists, tableProperties, serdeProperties);
}
// Check and sync schema
if (!tableExists) {
LOG.info("ADB table [{}] is not found, creating it", tableName);
String inputFormatClassName = HoodieInputFormatUtils.getInputFormatClassName(HoodieFileFormat.PARQUET, useRealTimeInputFormat);
// Custom serde will not work with ALTER TABLE REPLACE COLUMNS
// https://github.com/apache/hive/blob/release-1.1.0/ql/src/java/org/apache/hadoop/hive
// /ql/exec/DDLTask.java#L3488
syncClient.createTable(tableName, schema, inputFormatClassName, MapredParquetOutputFormat.class.getName(),
ParquetHiveSerDe.class.getName(), serdeProperties, tableProperties);
} else {
// Check if the table schema has evolved
Map<String, String> tableSchema = syncClient.getMetastoreSchema(tableName);
SchemaDifference schemaDiff = HiveSchemaUtil.getSchemaDifference(schema, tableSchema, config.getSplitStrings(META_SYNC_PARTITION_FIELDS),
config.getBoolean(ADB_SYNC_SUPPORT_TIMESTAMP));
if (!schemaDiff.isEmpty()) {
LOG.info("Schema difference found for table:{}", tableName);
syncClient.updateTableDefinition(tableName, schemaDiff);
} else {
LOG.info("No Schema difference for table:{}", tableName);
}
}
} | 3.68 |
hadoop_Cluster_getJobHistoryUrl | /**
* Get the job history file path for a given job id. The job history file at
* this path may or may not be existing depending on the job completion state.
* The file is present only for the completed jobs.
* @param jobId the JobID of the job submitted by the current user.
* @return the file path of the job history file
* @throws IOException
* @throws InterruptedException
*/
public String getJobHistoryUrl(JobID jobId) throws IOException,
InterruptedException {
if (jobHistoryDir == null) {
jobHistoryDir = new Path(client.getJobHistoryDir());
}
return new Path(jobHistoryDir, jobId.toString() + "_"
+ ugi.getShortUserName()).toString();
} | 3.68 |
hbase_SnapshotInfo_getArchivedStoreFilesCount | /** Returns the number of available store files in the archive */
public int getArchivedStoreFilesCount() {
return hfilesArchiveCount.get();
} | 3.68 |
graphhopper_TarjanSCC_getSingleNodeComponents | /**
* The set of nodes that form their own (single-node) component. If {@link TarjanSCC#excludeSingleNodeComponents}
* is enabled this set will be empty.
*/
public BitSet getSingleNodeComponents() {
return singleNodeComponents;
} | 3.68 |
framework_VComboBox_setPrevButtonActive | /**
* Should the previous page button be visible to the user
*
* @param active
*/
private void setPrevButtonActive(boolean active) {
if (enableDebug) {
debug("VComboBox.SP: setPrevButtonActive(" + active + ")");
}
if (active) {
DOM.sinkEvents(up, Event.ONCLICK);
up.setClassName(
VComboBox.this.getStylePrimaryName() + "-prevpage");
} else {
DOM.sinkEvents(up, 0);
up.setClassName(
VComboBox.this.getStylePrimaryName() + "-prevpage-off");
}
} | 3.68 |
flink_TaskExecutorMemoryConfiguration_getFrameworkOffHeap | /** Returns the configured off-heap size used by the framework. */
public Long getFrameworkOffHeap() {
return frameworkOffHeap;
} | 3.68 |
flink_ComponentClosingUtils_tryShutdownExecutorElegantly | /**
* A util method that tries to shut down an {@link ExecutorService} elegantly within the given
* timeout. If the executor has not been shut down before it hits timeout or the thread is
* interrupted when waiting for the termination, a forceful shutdown will be attempted on the
* executor.
*
* @param executor the {@link ExecutorService} to shut down.
* @param timeout the timeout duration.
* @return true if the given executor has been successfully closed, false otherwise.
*/
@SuppressWarnings("ResultOfMethodCallIgnored")
public static boolean tryShutdownExecutorElegantly(ExecutorService executor, Duration timeout) {
try {
executor.shutdown();
executor.awaitTermination(timeout.toMillis(), TimeUnit.MILLISECONDS);
} catch (InterruptedException ie) {
// Let it go.
}
if (!executor.isTerminated()) {
shutdownExecutorForcefully(executor, Duration.ZERO, false);
}
return executor.isTerminated();
} | 3.68 |
flink_EmptyMutableObjectIterator_get | /**
* Gets a singleton instance of the empty iterator.
*
* @param <E> The type of the objects (not) returned by the iterator.
* @return An instance of the iterator.
*/
public static <E> MutableObjectIterator<E> get() {
@SuppressWarnings("unchecked")
MutableObjectIterator<E> iter = (MutableObjectIterator<E>) INSTANCE;
return iter;
} | 3.68 |
hbase_FileLink_getBackReferenceFileName | /**
* Get the referenced file name from the reference link directory path.
* @param dirPath Link references directory path
* @return Name of the file referenced
*/
public static String getBackReferenceFileName(final Path dirPath) {
return dirPath.getName().substring(BACK_REFERENCES_DIRECTORY_PREFIX.length());
} | 3.68 |
hbase_MetricsMasterFileSystem_addMetaWALSplit | /**
* Record a single instance of a split
* @param time time that the split took
* @param size length of original WALs that were split
*/
public synchronized void addMetaWALSplit(long time, long size) {
source.updateMetaWALSplitTime(time);
source.updateMetaWALSplitSize(size);
} | 3.68 |
hadoop_BinaryPartitioner_setOffsets | /**
* Set the subarray to be used for partitioning to
* <code>bytes[left:(right+1)]</code> in Python syntax.
*
* @param conf configuration object
* @param left left Python-style offset
* @param right right Python-style offset
*/
public static void setOffsets(Configuration conf, int left, int right) {
conf.setInt(LEFT_OFFSET_PROPERTY_NAME, left);
conf.setInt(RIGHT_OFFSET_PROPERTY_NAME, right);
} | 3.68 |
flink_MapView_get | /**
* Return the value for the specified key or {@code null} if the key is not in the map view.
*
* @param key The look up key.
* @return The value for the specified key.
* @throws Exception Thrown if the system cannot get data.
*/
public V get(K key) throws Exception {
return map.get(key);
} | 3.68 |
hbase_RowMutations_add | /**
* Add a list of mutations
* @param mutations The data to send.
* @throws IOException if the row of added mutation doesn't match the original row
*/
public RowMutations add(List<? extends Mutation> mutations) throws IOException {
for (Mutation mutation : mutations) {
if (!Bytes.equals(row, mutation.getRow())) {
throw new WrongRowIOException(
"The row in the recently added Mutation <" + Bytes.toStringBinary(mutation.getRow())
+ "> doesn't match the original one <" + Bytes.toStringBinary(this.row) + ">");
}
}
this.mutations.addAll(mutations);
return this;
} | 3.68 |
hbase_MetricsSource_setOldestWalAge | /*
* Sets the age of oldest log file just for source.
*/
public void setOldestWalAge(long age) {
singleSourceSource.setOldestWalAge(age);
} | 3.68 |
hadoop_RMContainerTokenSecretManager_createContainerToken | /**
* Helper function for creating ContainerTokens.
*
* @param containerId Container Id
* @param containerVersion Container version
* @param nodeId Node Id
* @param appSubmitter App Submitter
* @param capability Capability
* @param priority Priority
* @param createTime Create Time
* @param logAggregationContext Log Aggregation Context
* @param nodeLabelExpression Node Label Expression
* @param containerType Container Type
* @param execType Execution Type
* @param allocationRequestId allocationRequestId
* @param allocationTags allocation Tags
* @return the container-token
*/
public Token createContainerToken(ContainerId containerId,
int containerVersion, NodeId nodeId, String appSubmitter,
Resource capability, Priority priority, long createTime,
LogAggregationContext logAggregationContext, String nodeLabelExpression,
ContainerType containerType, ExecutionType execType,
long allocationRequestId, Set<String> allocationTags) {
byte[] password;
ContainerTokenIdentifier tokenIdentifier;
long expiryTimeStamp =
System.currentTimeMillis() + containerTokenExpiryInterval;
// Lock so that we use the same MasterKey's keyId and its bytes
this.readLock.lock();
try {
tokenIdentifier =
new ContainerTokenIdentifier(containerId, containerVersion,
nodeId.toString(), appSubmitter, capability, expiryTimeStamp,
this.currentMasterKey.getMasterKey().getKeyId(),
ResourceManager.getClusterTimeStamp(), priority, createTime,
logAggregationContext, nodeLabelExpression, containerType,
execType, allocationRequestId, allocationTags);
password = this.createPassword(tokenIdentifier);
} finally {
this.readLock.unlock();
}
return BuilderUtils.newContainerToken(nodeId, password, tokenIdentifier);
} | 3.68 |
morf_SchemaEditor_removePrimaryKey | /**
* Drop the primary key of a table.
*
* @param tableName The original table name
* @param oldPrimaryKeyColumns The current/old primary key columns for the table.
*/
default void removePrimaryKey(String tableName, List<String> oldPrimaryKeyColumns){
changePrimaryKeyColumns(tableName, oldPrimaryKeyColumns, Collections.emptyList());
} | 3.68 |
hbase_InternalScan_checkOnlyMemStore | /**
* StoreFiles will not be scanned. Only MemStore will be scanned.
*/
public void checkOnlyMemStore() {
memOnly = true;
filesOnly = false;
} | 3.68 |
hadoop_LogParserUtil_setLogParser | /**
* Set the {@link LogParser} to use.
*
* @param logParser the {@link LogParser} to use.
*/
public void setLogParser(final LogParser logParser) {
this.logParser = logParser;
} | 3.68 |
hbase_CoprocessorRpcUtils_getControllerException | /**
* Retreivies exception stored during RPC invocation.
* @param controller the controller instance provided by the client when calling the service
* @return exception if any, or null; Will return DoNotRetryIOException for string represented
* failure causes in controller.
*/
@Nullable
public static IOException getControllerException(RpcController controller) throws IOException {
if (controller == null || !controller.failed()) {
return null;
}
if (controller instanceof ServerRpcController) {
return ((ServerRpcController) controller).getFailedOn();
}
return new DoNotRetryIOException(controller.errorText());
} | 3.68 |
morf_ChangePrimaryKeyColumns_isApplied | /**
* @see org.alfasoftware.morf.upgrade.SchemaChange#isApplied(org.alfasoftware.morf.metadata.Schema, org.alfasoftware.morf.jdbc.ConnectionResources)
*/
@Override
public boolean isApplied(Schema schema, ConnectionResources database) {
List<String> actual = namesOfColumns(primaryKeysForTable(schema.getTable(tableName)));
List<String> expected = newPrimaryKeyColumns;
return actual.equals(expected);
} | 3.68 |
morf_Table_primaryKey | /**
* @return The column definitions of the columns in the primary key.
*/
public default List<Column> primaryKey() {
return columns().stream()
.filter(Column::isPrimaryKey)
.collect(Collectors.toList());
} | 3.68 |
morf_RenameIndex_applyChange | /**
* Renames an index from the name specified to the new name.
*
* @param schema {@link Schema} to apply the change against resulting in new
* metadata.
* @param indexStartName the starting name for the index
* @param indexEndName the end name for the index
* @return MetaData with {@link SchemaChange} applied.
*/
private Schema applyChange(Schema schema, String indexStartName, String indexEndName) {
// Check the state
if (StringUtils.isBlank(indexStartName)) {
throw new IllegalArgumentException("Cannot rename an index without the name of the index to rename");
}
if (StringUtils.isBlank(indexEndName)) {
throw new IllegalArgumentException(String.format("Cannot rename index [%s] to be blank", indexStartName));
}
// Now setup the new table definition
Table original = schema.getTable(tableName);
boolean foundMatch = false;
// Copy the index names into a list of strings
List<String> indexes = new ArrayList<>();
Index newIndex = null;
for (Index index : original.indexes()) {
String currentIndexName = index.getName();
// If we're looking at the index being renamed...
if (currentIndexName.equalsIgnoreCase(indexStartName)) {
// Substitute in the new index name
currentIndexName = indexEndName;
if (index.isUnique()) {
newIndex = index(indexEndName).columns(index.columnNames()).unique();
} else {
newIndex = index(indexEndName).columns(index.columnNames());
}
foundMatch = true;
}
for (String existing : indexes) {
if (existing.equalsIgnoreCase(currentIndexName)) {
throw new IllegalArgumentException(String.format(
"Cannot rename index from [%s] to [%s] on table [%s] as index with that name already exists", indexStartName,
indexEndName, tableName));
}
}
indexes.add(currentIndexName);
}
if (!foundMatch) {
throw new IllegalArgumentException(String.format("Cannot rename index [%s] as it does not exist on table [%s]",
indexStartName, tableName));
}
return new TableOverrideSchema(schema, new AlteredTable(original, null, null, indexes, Arrays.asList(new Index[] { newIndex })));
} | 3.68 |
dubbo_GenericBeanPostProcessorAdapter_processBeforeInitialization | /**
* Process {@link T Bean} with name without return value before initialization,
* <p>
* This method will be invoked by BeanPostProcessor#postProcessBeforeInitialization(Object, String)
*
* @param bean Bean Object
* @param beanName Bean Name
* @throws BeansException in case of errors
*/
protected void processBeforeInitialization(T bean, String beanName) throws BeansException {} | 3.68 |
morf_UpgradePathFinder_readOnlyWithUUID | /**
* Reads the {@link OnlyWith} UUID from a class, doing some sanity checking.
*
* @param upgradeStepClass The upgrade step class.
* @return The UUID of the referenced class; null if no annotation is present on the given class.
*/
public static java.util.UUID readOnlyWithUUID(Class<? extends UpgradeStep> upgradeStepClass) {
OnlyWith annotation = upgradeStepClass.getAnnotation(OnlyWith.class);
if (annotation == null || StringUtils.isBlank(annotation.value())) {
return null;
}
return java.util.UUID.fromString(annotation.value());
} | 3.68 |
hbase_AvlUtil_readNext | /**
* Return the successor of the current node
* @param node the current node
* @return the successor of the current node
*/
public static <TNode extends AvlLinkedNode> TNode readNext(TNode node) {
return (TNode) node.iterNext;
} | 3.68 |
morf_SqlDialect_extractParameters | /**
* Extracts the parameters from a SQL statement.
*
* @param statement the SQL statement.
* @return the list of parameters.
*/
public List<SqlParameter> extractParameters(InsertStatement statement) {
SqlParameterExtractor extractor = new SqlParameterExtractor();
ObjectTreeTraverser.forCallback(extractor).dispatch(statement);
return extractor.list;
} | 3.68 |
hbase_HBaseTestingUtility_startMiniMapReduceCluster | /**
* Starts a <code>MiniMRCluster</code>. Call {@link #setFileSystemURI(String)} to use a different
* filesystem.
* @param servers The number of <code>TaskTracker</code>'s to start.
* @throws IOException When starting the cluster fails.
*/
private void startMiniMapReduceCluster(final int servers) throws IOException {
if (mrCluster != null) {
throw new IllegalStateException("MiniMRCluster is already running");
}
LOG.info("Starting mini mapreduce cluster...");
setupClusterTestDir();
createDirsAndSetProperties();
forceChangeTaskLogDir();
//// hadoop2 specific settings
// Tests were failing because this process used 6GB of virtual memory and was getting killed.
// we up the VM usable so that processes don't get killed.
conf.setFloat("yarn.nodemanager.vmem-pmem-ratio", 8.0f);
// Tests were failing due to MAPREDUCE-4880 / MAPREDUCE-4607 against hadoop 2.0.2-alpha and
// this avoids the problem by disabling speculative task execution in tests.
conf.setBoolean("mapreduce.map.speculative", false);
conf.setBoolean("mapreduce.reduce.speculative", false);
////
// Allow the user to override FS URI for this map-reduce cluster to use.
mrCluster =
new MiniMRCluster(servers, FS_URI != null ? FS_URI : FileSystem.get(conf).getUri().toString(),
1, null, null, new JobConf(this.conf));
JobConf jobConf = MapreduceTestingShim.getJobConf(mrCluster);
if (jobConf == null) {
jobConf = mrCluster.createJobConf();
}
// Hadoop MiniMR overwrites this while it should not
jobConf.set("mapreduce.cluster.local.dir", conf.get("mapreduce.cluster.local.dir"));
LOG.info("Mini mapreduce cluster started");
// In hadoop2, YARN/MR2 starts a mini cluster with its own conf instance and updates settings.
// Our HBase MR jobs need several of these settings in order to properly run. So we copy the
// necessary config properties here. YARN-129 required adding a few properties.
conf.set("mapreduce.jobtracker.address", jobConf.get("mapreduce.jobtracker.address"));
// this for mrv2 support; mr1 ignores this
conf.set("mapreduce.framework.name", "yarn");
conf.setBoolean("yarn.is.minicluster", true);
String rmAddress = jobConf.get("yarn.resourcemanager.address");
if (rmAddress != null) {
conf.set("yarn.resourcemanager.address", rmAddress);
}
String historyAddress = jobConf.get("mapreduce.jobhistory.address");
if (historyAddress != null) {
conf.set("mapreduce.jobhistory.address", historyAddress);
}
String schedulerAddress = jobConf.get("yarn.resourcemanager.scheduler.address");
if (schedulerAddress != null) {
conf.set("yarn.resourcemanager.scheduler.address", schedulerAddress);
}
String mrJobHistoryWebappAddress = jobConf.get("mapreduce.jobhistory.webapp.address");
if (mrJobHistoryWebappAddress != null) {
conf.set("mapreduce.jobhistory.webapp.address", mrJobHistoryWebappAddress);
}
String yarnRMWebappAddress = jobConf.get("yarn.resourcemanager.webapp.address");
if (yarnRMWebappAddress != null) {
conf.set("yarn.resourcemanager.webapp.address", yarnRMWebappAddress);
}
} | 3.68 |
flink_DualInputOperator_setSecondInput | /**
* Sets the second input to the union of the given operators.
*
* @param inputs The operator(s) that form the second input.
* @deprecated This method will be removed in future versions. Use the {@link Union} operator
* instead.
*/
@Deprecated
public void setSecondInput(Operator<IN2>... inputs) {
this.input2 = Operator.createUnionCascade(inputs);
} | 3.68 |
hadoop_CryptoUtils_createIV | /**
* This method creates and initializes an IV (Initialization Vector)
*
* @param conf configuration
* @return byte[] initialization vector
* @throws IOException exception in case of error
*/
public static byte[] createIV(Configuration conf) throws IOException {
CryptoCodec cryptoCodec = CryptoCodec.getInstance(conf);
if (isEncryptedSpillEnabled(conf)) {
byte[] iv = new byte[cryptoCodec.getCipherSuite().getAlgorithmBlockSize()];
cryptoCodec.generateSecureRandom(iv);
cryptoCodec.close();
return iv;
} else {
return null;
}
} | 3.68 |
hbase_QuotaFilter_setNamespaceFilter | /**
* Set the namespace filter regex
* @param regex the namespace filter
* @return the quota filter object
*/
public QuotaFilter setNamespaceFilter(final String regex) {
this.namespaceRegex = regex;
hasFilters |= StringUtils.isNotEmpty(regex);
return this;
} | 3.68 |
framework_Tree_getItemDescriptionGenerator | /**
* Get the item description generator which generates tooltips for tree
* items.
*
* @return the item description generator
*/
public ItemDescriptionGenerator getItemDescriptionGenerator() {
return itemDescriptionGenerator;
} | 3.68 |
flink_DataTypeFactoryImpl_createSerializerExecutionConfig | /**
* Creates a lazy {@link ExecutionConfig} that contains options for {@link TypeSerializer}s with
* information from existing {@link ExecutionConfig} (if available) enriched with table {@link
* ReadableConfig}.
*/
private static Supplier<ExecutionConfig> createSerializerExecutionConfig(
ClassLoader classLoader, ReadableConfig config, ExecutionConfig executionConfig) {
return () -> {
final ExecutionConfig newExecutionConfig = new ExecutionConfig();
if (executionConfig != null) {
if (executionConfig.isForceKryoEnabled()) {
newExecutionConfig.enableForceKryo();
}
if (executionConfig.isForceAvroEnabled()) {
newExecutionConfig.enableForceAvro();
}
executionConfig
.getDefaultKryoSerializers()
.forEach(
(c, s) ->
newExecutionConfig.addDefaultKryoSerializer(
c, s.getSerializer()));
executionConfig
.getDefaultKryoSerializerClasses()
.forEach(newExecutionConfig::addDefaultKryoSerializer);
executionConfig
.getRegisteredKryoTypes()
.forEach(newExecutionConfig::registerKryoType);
executionConfig
.getRegisteredTypesWithKryoSerializerClasses()
.forEach(newExecutionConfig::registerTypeWithKryoSerializer);
executionConfig
.getRegisteredTypesWithKryoSerializers()
.forEach(
(c, s) ->
newExecutionConfig.registerTypeWithKryoSerializer(
c, s.getSerializer()));
}
newExecutionConfig.configure(config, classLoader);
return newExecutionConfig;
};
} | 3.68 |
hadoop_RouterQuotaUsage_verifyNamespaceQuota | /**
* Verify if namespace quota is violated once quota is set. Relevant
* method {@link DirectoryWithQuotaFeature#verifyNamespaceQuota}.
* @throws NSQuotaExceededException If the quota is exceeded.
*/
public void verifyNamespaceQuota() throws NSQuotaExceededException {
long quota = getQuota();
long fileAndDirectoryCount = getFileAndDirectoryCount();
if (Quota.isViolated(quota, fileAndDirectoryCount)) {
throw new NSQuotaExceededException(quota, fileAndDirectoryCount);
}
} | 3.68 |
querydsl_MetaDataExporter_setExportAll | /**
* Set whether all table types should be exported
*
* @param exportAll
*/
public void setExportAll(boolean exportAll) {
this.exportAll = exportAll;
} | 3.68 |
hadoop_JobHistoryServer_getBindAddress | /**
* Retrieve JHS bind address from configuration
*
* @param conf
* @return InetSocketAddress
*/
public static InetSocketAddress getBindAddress(Configuration conf) {
return conf.getSocketAddr(JHAdminConfig.MR_HISTORY_ADDRESS,
JHAdminConfig.DEFAULT_MR_HISTORY_ADDRESS,
JHAdminConfig.DEFAULT_MR_HISTORY_PORT);
} | 3.68 |
hadoop_SysInfoWindows_getNumCores | /** {@inheritDoc} */
@Override
public int getNumCores() {
return getNumProcessors();
} | 3.68 |
hbase_AsyncConnectionImpl_getNonceGenerator | // ditto
NonceGenerator getNonceGenerator() {
return nonceGenerator;
} | 3.68 |
flink_ResourceInformationReflector_getExternalResourcesUnSafe | /**
* Same as {@link #getExternalResources(Resource)} but allows to pass objects that are not of
* type {@link Resource}.
*/
@VisibleForTesting
Map<String, Long> getExternalResourcesUnSafe(Object resource) {
if (!isYarnResourceTypesAvailable) {
return Collections.emptyMap();
}
final Map<String, Long> externalResources = new HashMap<>();
final Object[] externalResourcesInfo;
try {
externalResourcesInfo = (Object[]) resourceGetResourcesMethod.invoke(resource);
// The first two element would be cpu and mem.
for (int i = 2; i < externalResourcesInfo.length; i++) {
final String name =
(String) resourceInformationGetNameMethod.invoke(externalResourcesInfo[i]);
final long value =
(long) resourceInformationGetValueMethod.invoke(externalResourcesInfo[i]);
externalResources.put(name, value);
}
} catch (Exception e) {
LOG.warn("Could not obtain the external resources supported by the given Resource.", e);
return Collections.emptyMap();
}
return externalResources;
} | 3.68 |
hadoop_ApplicationServiceRecordProcessor_createAInfo | /**
* Create an application A record descriptor.
*
* @param record the service record.
* @throws Exception if there is an issue during descriptor creation.
*/
protected void createAInfo(ServiceRecord record) throws Exception {
AApplicationRecordDescriptor recordInfo = new AApplicationRecordDescriptor(
getPath(), record);
registerRecordDescriptor(Type.A, recordInfo);
} | 3.68 |
dubbo_ReferenceBeanBuilder_setInjvm | /**
* @param injvm
* @deprecated instead, use the parameter <b>scope</b> to judge if it's in jvm, scope=local
*/
@Deprecated
public ReferenceBeanBuilder setInjvm(Boolean injvm) {
attributes.put(ReferenceAttributes.INJVM, injvm);
return this;
} | 3.68 |
framework_CheckBox_writeDesign | /*
* (non-Javadoc)
*
* @see com.vaadin.ui.AbstractField#writeDesign(org.jsoup.nodes.Element,
* com.vaadin.ui.declarative.DesignContext)
*/
@Override
public void writeDesign(Element design, DesignContext designContext) {
super.writeDesign(design, designContext);
CheckBox def = designContext.getDefaultInstance(this);
Attributes attr = design.attributes();
DesignAttributeHandler.writeAttribute("checked", attr, getValue(),
def.getValue(), Boolean.class, designContext);
} | 3.68 |
pulsar_ConsumerStats_getPartitionStats | /**
* @return stats for each partition if topic is partitioned topic
*/
default Map<String, ConsumerStats> getPartitionStats() {
return Collections.emptyMap();
} | 3.68 |
flink_NetUtils_validateHostPortString | /**
* Validates if the given String represents a hostname:port.
*
* <p>Works also for ipv6.
*
* <p>See:
* http://stackoverflow.com/questions/2345063/java-common-way-to-validate-and-convert-hostport-to-inetsocketaddress
*
* @return URL object for accessing host and port
*/
private static URL validateHostPortString(String hostPort) {
if (StringUtils.isNullOrWhitespaceOnly(hostPort)) {
throw new IllegalArgumentException("hostPort should not be null or empty");
}
try {
URL u =
(hostPort.toLowerCase().startsWith("http://")
|| hostPort.toLowerCase().startsWith("https://"))
? new URL(hostPort)
: new URL("http://" + hostPort);
if (u.getHost() == null) {
throw new IllegalArgumentException(
"The given host:port ('" + hostPort + "') doesn't contain a valid host");
}
if (u.getPort() == -1) {
throw new IllegalArgumentException(
"The given host:port ('" + hostPort + "') doesn't contain a valid port");
}
return u;
} catch (MalformedURLException e) {
throw new IllegalArgumentException(
"The given host:port ('" + hostPort + "') is invalid", e);
}
} | 3.68 |
hadoop_Cluster_renewDelegationToken | /**
* Renew a delegation token
* @param token the token to renew
* @return the new expiration time
* @throws InvalidToken
* @throws IOException
* @deprecated Use {@link Token#renew} instead
*/
public long renewDelegationToken(Token<DelegationTokenIdentifier> token
) throws InvalidToken, IOException,
InterruptedException {
return token.renew(getConf());
} | 3.68 |
hadoop_FederationStateStoreFacade_addReservationHomeSubCluster | /**
* Save Reservation And HomeSubCluster Mapping.
*
* @param reservationId reservationId
* @param homeSubCluster homeSubCluster
* @throws YarnException on failure
*/
public void addReservationHomeSubCluster(ReservationId reservationId,
ReservationHomeSubCluster homeSubCluster) throws YarnException {
try {
// persist the mapping of reservationId and the subClusterId which has
// been selected as its home
addReservationHomeSubCluster(homeSubCluster);
} catch (YarnException e) {
String msg = String.format(
"Unable to insert the ReservationId %s into the FederationStateStore.", reservationId);
throw new YarnException(msg, e);
}
} | 3.68 |
morf_AbstractSqlDialectTest_testTrim | /**
* Tests that Trim functionality works.
*/
@Test
public void testTrim() {
// Given
Function trim = trim(new FieldReference("field1"));
SelectStatement selectStatement = new SelectStatement(trim).from(new TableReference("schedule"));
// When
String result = testDialect.convertStatementToSQL(selectStatement);
// Then
assertEquals("Trim script should match expected", expectedTrim(), result);
} | 3.68 |
flink_CatalogContext_getConfiguration | /**
* TODO After https://issues.apache.org/jira/browse/FLINK-32427 is finished, we can get
* configuration for catalog.
*/
@Override
public Configuration getConfiguration() {
throw new UnsupportedOperationException();
} | 3.68 |
hadoop_RouterQuotaUpdateService_generateNewQuota | /**
* Generate a new quota based on old quota and current quota usage value.
* @param oldQuota Old quota stored in State Store.
* @param currentQuotaUsage Current quota usage value queried from
* subcluster.
* @return A new RouterQuotaUsage.
*/
private RouterQuotaUsage generateNewQuota(RouterQuotaUsage oldQuota,
QuotaUsage currentQuotaUsage) {
RouterQuotaUsage.Builder newQuotaBuilder = new RouterQuotaUsage.Builder()
.fileAndDirectoryCount(currentQuotaUsage.getFileAndDirectoryCount())
.quota(oldQuota.getQuota())
.spaceConsumed(currentQuotaUsage.getSpaceConsumed())
.spaceQuota(oldQuota.getSpaceQuota());
Quota.eachByStorageType(t -> {
newQuotaBuilder.typeQuota(t, oldQuota.getTypeQuota(t));
newQuotaBuilder.typeConsumed(t, currentQuotaUsage.getTypeConsumed(t));
});
return newQuotaBuilder.build();
} | 3.68 |
querydsl_MetaDataExporter_setPackageName | /**
* Set the package name
*
* @param packageName package name for sources
*/
public void setPackageName(String packageName) {
module.bind(SQLCodegenModule.PACKAGE_NAME, packageName);
} | 3.68 |
flink_ScalarFunction_getResultType | /**
* Returns the result type of the evaluation method with a given signature.
*
* @deprecated This method uses the old type system and is based on the old reflective
* extraction logic. The method will be removed in future versions and is only called when
* using the deprecated {@code TableEnvironment.registerFunction(...)} method. The new
* reflective extraction logic (possibly enriched with {@link DataTypeHint} and {@link
* FunctionHint}) should be powerful enough to cover most use cases. For advanced users, it
* is possible to override {@link UserDefinedFunction#getTypeInference(DataTypeFactory)}.
*/
@Deprecated
public TypeInformation<?> getResultType(Class<?>[] signature) {
return null;
} | 3.68 |
pulsar_ProducerConfiguration_getMaxPendingMessagesAcrossPartitions | /**
*
* @return the maximum number of pending messages allowed across all the partitions
*/
public int getMaxPendingMessagesAcrossPartitions() {
return conf.getMaxPendingMessagesAcrossPartitions();
} | 3.68 |
dubbo_AbstractJSONImpl_getNumberAsDouble | /**
* Gets a number from an object for the given key. If the key is not present, this returns null.
* If the value does not represent a double, throws an exception.
*/
@Override
public Double getNumberAsDouble(Map<String, ?> obj, String key) {
assert obj != null;
assert key != null;
if (!obj.containsKey(key)) {
return null;
}
Object value = obj.get(key);
if (value instanceof Double) {
return (Double) value;
}
if (value instanceof String) {
try {
return Double.parseDouble((String) value);
} catch (NumberFormatException e) {
throw new IllegalArgumentException(
String.format("value '%s' for key '%s' is not a double", value, key));
}
}
throw new IllegalArgumentException(
String.format("value '%s' for key '%s' in '%s' is not a number", value, key, obj));
} | 3.68 |
hadoop_LongValueSum_reset | /**
* reset the aggregator
*/
public void reset() {
sum = 0;
} | 3.68 |
hadoop_FileSubclusterResolver_getMountPoints | /**
* Get a list of mount points for a path.
*
* @param path Path to get the mount points under.
* @param mountPoints the mount points to choose.
* @return Return empty list if the path is a mount point but there are no
* mount points under the path. Return null if the path is not a mount
* point and there are no mount points under the path.
*/
static List<String> getMountPoints(String path,
Collection<String> mountPoints) {
Set<String> children = new TreeSet<>();
boolean exists = false;
for (String subPath : mountPoints) {
String child = subPath;
// Special case for /
if (!path.equals(Path.SEPARATOR)) {
// Get the children
int ini = path.length();
child = subPath.substring(ini);
}
if (child.isEmpty()) {
// This is a mount point but without children
exists = true;
} else if (child.startsWith(Path.SEPARATOR)) {
// This is a mount point with children
exists = true;
child = child.substring(1);
// We only return immediate children
int fin = child.indexOf(Path.SEPARATOR);
if (fin > -1) {
child = child.substring(0, fin);
}
if (!child.isEmpty()) {
children.add(child);
}
}
}
if (!exists) {
return null;
}
return new LinkedList<>(children);
} | 3.68 |
hadoop_StoreContext_pathToKey | /**
* Turns a path (relative or otherwise) into an S3 key.
*
* @param path input path, may be relative to the working dir
* @return a key excluding the leading "/", or, if it is the root path, ""
*/
public String pathToKey(Path path) {
return contextAccessors.pathToKey(path);
} | 3.68 |
morf_ChangelogStatementConsumer_writeWrapped | /**
* Writes one or more lines of text, applying line wrapping.
*/
private void writeWrapped(final String text) {
// Handle the case of multiple lines
if (text.contains(System.lineSeparator())) {
for (String line : text.split(System.lineSeparator())) {
writeWrapped(line);
}
return;
}
// Write anything below the wrapping limit
if (text.length() < LINE_LENGTH) {
outputStream.println(text);
return;
}
// Measure the indent to use on the split lines
int indent = 0;
while (indent < text.length() && text.charAt(indent) == ' ') {
indent++;
}
indent += 2;
// Split the line, preserving the indent on new lines
final String firstLineIndent = text.substring(0, indent);
final String lineSeparator = System.lineSeparator() + StringUtils.repeat(" ", indent);
outputStream.println(firstLineIndent + WordUtils.wrap(text.substring(indent), LINE_LENGTH - indent, lineSeparator, false));
} | 3.68 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.