name
stringlengths 12
178
| code_snippet
stringlengths 8
36.5k
| score
float64 3.26
3.68
|
---|---|---|
hudi_ColumnStatsIndices_getMetadataDataType | // -------------------------------------------------------------------------
// Utilities
// -------------------------------------------------------------------------
private static DataType getMetadataDataType() {
return AvroSchemaConverter.convertToDataType(HoodieMetadataRecord.SCHEMA$);
} | 3.68 |
hadoop_BufferPool_releaseDoneBlocks | /**
* Releases resources for any blocks marked as 'done'.
*/
private synchronized void releaseDoneBlocks() {
for (BufferData data : getAll()) {
if (data.stateEqualsOneOf(BufferData.State.DONE)) {
release(data);
}
}
} | 3.68 |
hbase_ReportMakingVisitor_isTableDisabled | /** Returns True if table is disabled or disabling; defaults false! */
boolean isTableDisabled(RegionInfo ri) {
if (ri == null) {
return false;
}
if (this.services == null) {
return false;
}
if (this.services.getTableStateManager() == null) {
return false;
}
TableState state = null;
try {
state = this.services.getTableStateManager().getTableState(ri.getTable());
} catch (IOException e) {
LOG.warn("Failed getting table state", e);
}
return state != null && state.isDisabledOrDisabling();
} | 3.68 |
framework_TooltipInfo_hasMessage | /**
* Checks is a message has been defined for the tooltip.
*
* @return true if title or error message is present, false if both are
* empty
*/
public boolean hasMessage() {
return (title != null && !title.isEmpty())
|| (errorMessageHtml != null && !errorMessageHtml.isEmpty());
} | 3.68 |
hbase_TableDescriptorBuilder_removeCoprocessor | /**
* Remove a coprocessor from those set on the table
* @param className Class name of the co-processor
*/
public void removeCoprocessor(String className) {
Bytes match = null;
Matcher keyMatcher;
Matcher valueMatcher;
for (Map.Entry<Bytes, Bytes> e : this.values.entrySet()) {
keyMatcher = CP_HTD_ATTR_KEY_PATTERN.matcher(Bytes.toString(e.getKey().get()));
if (!keyMatcher.matches()) {
continue;
}
valueMatcher = CP_HTD_ATTR_VALUE_PATTERN.matcher(Bytes.toString(e.getValue().get()));
if (!valueMatcher.matches()) {
continue;
}
// get className and compare
String clazz = valueMatcher.group(2).trim(); // classname is the 2nd field
// remove the CP if it is present
if (clazz.equals(className.trim())) {
match = e.getKey();
break;
}
}
// if we found a match, remove it
if (match != null) {
ModifyableTableDescriptor.this.removeValue(match);
} else {
throw new IllegalArgumentException(String.format(
"coprocessor with class name %s was not found in the table attribute", className));
}
} | 3.68 |
hbase_MemStoreSnapshot_getScanners | /**
* Create new {@link SnapshotSegmentScanner}s for iterating over the snapshot. <br/>
* NOTE:Here when create new {@link SnapshotSegmentScanner}s, {@link Segment#incScannerCount} is
* invoked in the {@link SnapshotSegmentScanner} ctor,so after we use these
* {@link SnapshotSegmentScanner}s, we must call {@link SnapshotSegmentScanner#close} to invoke
* {@link Segment#decScannerCount}.
* @return {@link KeyValueScanner}s(Which type is {@link SnapshotSegmentScanner}) for iterating
* over the snapshot.
*/
public List<KeyValueScanner> getScanners() {
return snapshotImmutableSegment.getSnapshotScanners();
} | 3.68 |
framework_VDragAndDropManager_defer | /**
* Method to que tasks until all dd related server visits are done
*
* @param command
*/
private void defer(Command command) {
deferredCommand = command;
} | 3.68 |
hadoop_JobMonitor_getRemainingJobs | /**
* If shutdown before all jobs have completed, any still-running jobs
* may be extracted from the component.
* @throws IllegalStateException If monitoring thread is still running.
* @return Any jobs submitted and not known to have completed.
*/
List<JobStats> getRemainingJobs() {
synchronized (mJobs) {
return new ArrayList<JobStats>(mJobs);
}
} | 3.68 |
flink_NFA_close | /** Tear-down method for the NFA. */
public void close() throws Exception {
for (State<T> state : getStates()) {
for (StateTransition<T> transition : state.getStateTransitions()) {
IterativeCondition condition = transition.getCondition();
FunctionUtils.closeFunction(condition);
}
}
} | 3.68 |
hbase_Threads_setDaemonThreadRunning | /**
* Utility method that sets name, daemon status and starts passed thread.
* @param t thread to frob
* @param name new name
* @param handler A handler to set on the thread. Pass null if want to use default handler.
* @return Returns the passed Thread <code>t</code>.
*/
public static <T extends Thread> T setDaemonThreadRunning(T t, String name,
UncaughtExceptionHandler handler) {
t.setName(name);
if (handler != null) {
t.setUncaughtExceptionHandler(handler);
}
t.setDaemon(true);
t.start();
return t;
} | 3.68 |
hbase_MasterObserver_postGetConfiguredNamespacesAndTablesInRSGroup | /**
* Called after getting the configured namespaces and tables in the region server group.
* @param ctx the environment to interact with the framework and master
* @param groupName name of the region server group
*/
default void postGetConfiguredNamespacesAndTablesInRSGroup(
final ObserverContext<MasterCoprocessorEnvironment> ctx, final String groupName)
throws IOException {
} | 3.68 |
flink_HsSubpartitionMemoryDataManager_canBeCompressed | /**
* Whether the buffer can be compressed or not. Note that event is not compressed because it is
* usually small and the size can become even larger after compression.
*/
private boolean canBeCompressed(Buffer buffer) {
return bufferCompressor != null && buffer.isBuffer() && buffer.readableBytes() > 0;
} | 3.68 |
morf_SchemaValidator_validateIndices | /**
* Validates a {@link Table}'s {@link Index}s meet the rules.
*
* @param table The {@link Table} on which to validate indexes.
*/
private void validateIndices(Table table) {
Map<IndexSignature, Index> indexesBySignature = new HashMap<>();
// -- First add the 'PRIMARY' index which is always present in the database
// for the column annotated @Id or @GeneratedId...
//
Iterable<Column> primaryKeyColumns = filter(table.columns(), new Predicate<Column>() {
@Override public boolean apply(Column input) {
return input.isPrimaryKey();
}
});
Iterable<String> primaryKeyColumnNames = Iterables.transform(
primaryKeyColumns,
new Function<Column, String>() {
@Override
public String apply(Column input) {
return input.getName();
}
});
Index primaryKeyIndex = index("PRIMARY")
.unique()
.columns(primaryKeyColumnNames);
indexesBySignature.put(new IndexSignature(primaryKeyIndex), primaryKeyIndex);
for (Index index : table.indexes()) {
if (!isEntityNameLengthValid(index.getName())) {
validationFailures.add("Name of index [" + index.getName() + "] on table [" + table.getName() + "] is not allowed - it is over " + MAX_LENGTH + " characters long");
}
if (isSQLReservedWord(index.getName())) {
validationFailures.add("Name of index [" + index.getName() + "] on table [" + table.getName() + "] is not allowed - it is an SQL reserved word");
}
if (!isNameConventional(index.getName())) {
validationFailures.add("Name of index [" + index.getName() + "] on table [" + table.getName() + "] is not allowed - it must match " + validNamePattern.toString());
}
// validate that there isn't an index by "id", which people have added in the past. This isn't necessary since id is the primary key, and in
// fact it breaks Oracle.
if (index.columnNames().equals(Arrays.asList("id"))) {
validationFailures.add("Index [" + index.getName() + "] on table [" + table.getName() + "] is not allowed - indexes by 'id' only are superfluous since 'id' is the primary key.");
}
// validate that there aren't any duplicate indexes
Index other = indexesBySignature.put(new IndexSignature(index), index);
if (other != null) {
validationFailures.add("Indexes [" + index.getName() + "] and [" + other.getName() + "] on table [" + table.getName() + "] are not allowed - one is a duplicate of the other");
}
}
} | 3.68 |
flink_BinaryRowDataSerializer_checkSkipWriteForFixLengthPart | /**
* We need skip bytes to write when the remain bytes of current segment is not enough to write
* binary row fixed part. See {@link BinaryRowData}.
*/
private int checkSkipWriteForFixLengthPart(AbstractPagedOutputView out) throws IOException {
// skip if there is no enough size.
int available = out.getSegmentSize() - out.getCurrentPositionInSegment();
if (available < getSerializedRowFixedPartLength()) {
out.advance();
return available;
}
return 0;
} | 3.68 |
pulsar_PositionImpl_getPositionAfterEntries | /**
* Position after moving entryNum messages,
* if entryNum < 1, then return the current position.
* */
public PositionImpl getPositionAfterEntries(int entryNum) {
if (entryNum < 1) {
return this;
}
if (entryId < 0) {
return PositionImpl.get(ledgerId, entryNum - 1);
} else {
return PositionImpl.get(ledgerId, entryId + entryNum);
}
} | 3.68 |
flink_FutureCompletingBlockingQueue_moveToUnAvailable | /** Makes sure the availability future is incomplete, if it was complete before. */
@GuardedBy("lock")
private void moveToUnAvailable() {
if (currentFuture == AVAILABLE) {
currentFuture = new CompletableFuture<>();
}
} | 3.68 |
flink_FlinkHints_clearJoinHintsOnUnmatchedNodes | /** Clear the join hints on some nodes where these hints should not be attached. */
public static RelNode clearJoinHintsOnUnmatchedNodes(RelNode root) {
return root.accept(
new ClearJoinHintsOnUnmatchedNodesShuttle(root.getCluster().getHintStrategies()));
} | 3.68 |
hudi_LSMTimeline_getManifestVersion | /**
* Parse the snapshot version from the manifest file name.
*/
public static int getManifestVersion(String fileName) {
return Integer.parseInt(fileName.split("_")[1]);
} | 3.68 |
hbase_Export_createSubmittableJob | /**
* Sets up the actual job.
* @param conf The current configuration.
* @param args The command line parameters.
* @return The newly created job.
* @throws IOException When setting up the job fails.
*/
public static Job createSubmittableJob(Configuration conf, String[] args) throws IOException {
Triple<TableName, Scan, Path> arguments = ExportUtils.getArgumentsFromCommandLine(conf, args);
String tableName = arguments.getFirst().getNameAsString();
Path outputDir = arguments.getThird();
Job job = Job.getInstance(conf, conf.get(JOB_NAME_CONF_KEY, NAME + "_" + tableName));
job.setJobName(NAME + "_" + tableName);
job.setJarByClass(Export.class);
// Set optional scan parameters
Scan s = arguments.getSecond();
IdentityTableMapper.initJob(tableName, s, IdentityTableMapper.class, job);
// No reducers. Just write straight to output files.
job.setNumReduceTasks(0);
job.setOutputFormatClass(SequenceFileOutputFormat.class);
job.setOutputKeyClass(ImmutableBytesWritable.class);
job.setOutputValueClass(Result.class);
FileOutputFormat.setOutputPath(job, outputDir); // job conf doesn't contain the conf so doesn't
// have a default fs.
return job;
} | 3.68 |
morf_RenameTable_reverse | /**
* @see org.alfasoftware.morf.upgrade.SchemaChange#reverse(org.alfasoftware.morf.metadata.Schema)
*/
@Override
public Schema reverse(Schema schema) {
return applyChange(schema, newTableName, oldTableName);
} | 3.68 |
streampipes_Formats_jsonFormat | /**
* Defines the transport format JSON used by a data stream at runtime.
*
* @return The {@link org.apache.streampipes.model.grounding.TransportFormat} of type JSON.
*/
public static TransportFormat jsonFormat() {
return new TransportFormat(MessageFormat.JSON);
} | 3.68 |
hudi_ExternalSpillableMap_getDiskBasedMapNumEntries | /**
* Number of entries in BitCaskDiskMap.
*/
public int getDiskBasedMapNumEntries() {
return getDiskBasedMap().size();
} | 3.68 |
hadoop_FindOptions_setStartTime | /**
* Set the start time of this {@link Find} command.
*
* @param time start time (in milliseconds since epoch)
*/
public void setStartTime(long time) {
this.startTime = time;
} | 3.68 |
flink_TestEnvironmentSettings_getSavepointRestorePath | /** Path of savepoint that the job should recover from. */
@Nullable
public String getSavepointRestorePath() {
return savepointRestorePath;
} | 3.68 |
flink_HiveSetProcessor_setVariable | /** Set variable following Hive's implementation. */
public static void setVariable(
HiveConf hiveConf, Map<String, String> hiveVariables, String varname, String varvalue) {
if (varname.startsWith(ENV_PREFIX)) {
throw new UnsupportedOperationException("env:* variables can not be set.");
} else if (varname.startsWith(SYSTEM_PREFIX)) {
String propName = varname.substring(SYSTEM_PREFIX.length());
System.getProperties()
.setProperty(
propName,
new VariableSubstitution(() -> hiveVariables)
.substitute(hiveConf, varvalue));
} else if (varname.startsWith(HIVECONF_PREFIX)) {
String propName = varname.substring(HIVECONF_PREFIX.length());
setConf(hiveConf, hiveVariables, varname, propName, varvalue);
} else if (varname.startsWith(HIVEVAR_PREFIX)) {
String propName = varname.substring(HIVEVAR_PREFIX.length());
hiveVariables.put(
propName,
new VariableSubstitution(() -> hiveVariables).substitute(hiveConf, varvalue));
} else if (varname.startsWith(METACONF_PREFIX)) {
String propName = varname.substring(METACONF_PREFIX.length());
try {
Hive hive = Hive.get(hiveConf);
hive.setMetaConf(
propName,
new VariableSubstitution(() -> hiveVariables)
.substitute(hiveConf, varvalue));
} catch (HiveException e) {
throw new FlinkHiveException(
String.format("'SET %s=%s' FAILED.", varname, varvalue), e);
}
} else {
// here is a little of different from Hive's behavior,
// if there's no prefix, we also put it to passed hiveVariables for flink
// may use it as its own configurations.
// Otherwise, there's no way to set Flink's configuration using Hive's set command.
hiveVariables.put(
varname,
new VariableSubstitution(() -> hiveVariables).substitute(hiveConf, varvalue));
setConf(hiveConf, hiveVariables, varname, varname, varvalue);
}
} | 3.68 |
shardingsphere-elasticjob_TaskContext_setSlaveId | /**
* Set job server ID.
*
* @param slaveId job server ID
*/
public void setSlaveId(final String slaveId) {
id = id.replaceAll(this.slaveId, slaveId);
this.slaveId = slaveId;
} | 3.68 |
framework_DropTargetExtensionConnector_containsFiles | /**
* Tells if the given array of types contains files.
* <p>
* According to HTML specification, if any files are being dragged, {@code
* dataTransfer.types} will contain the string "Files". See
* https://html.spec.whatwg.org/multipage/interaction.html#the-datatransfer-interface:dom-datatransfer-types-2
*
* @param types
* Array of data types.
* @return {@code} true if given array contains {@code "Files"}, {@code
* false} otherwise.
*/
private boolean containsFiles(JsArrayString types) {
for (int i = 0; i < types.length(); i++) {
if ("Files".equals(types.get(i))) {
return true;
}
}
return false;
} | 3.68 |
flink_StateAssignmentOperation_getRawKeyedStateHandles | /**
* Collect {@link KeyGroupsStateHandle rawKeyedStateHandles} which have intersection with given
* {@link KeyGroupRange} from {@link TaskState operatorState}.
*
* @param operatorState all state handles of a operator
* @param subtaskKeyGroupRange the KeyGroupRange of a subtask
* @return all rawKeyedStateHandles which have intersection with given KeyGroupRange
*/
public static List<KeyedStateHandle> getRawKeyedStateHandles(
OperatorState operatorState, KeyGroupRange subtaskKeyGroupRange) {
final int parallelism = operatorState.getParallelism();
List<KeyedStateHandle> extractedKeyedStateHandles = null;
for (int i = 0; i < parallelism; i++) {
if (operatorState.getState(i) != null) {
Collection<KeyedStateHandle> rawKeyedState =
operatorState.getState(i).getRawKeyedState();
if (extractedKeyedStateHandles == null) {
extractedKeyedStateHandles =
new ArrayList<>(parallelism * rawKeyedState.size());
}
extractIntersectingState(
rawKeyedState, subtaskKeyGroupRange, extractedKeyedStateHandles);
}
}
return extractedKeyedStateHandles != null ? extractedKeyedStateHandles : emptyList();
} | 3.68 |
dubbo_InternalThreadLocal_removeAll | /**
* Removes all {@link InternalThreadLocal} variables bound to the current thread. This operation is useful when you
* are in a container environment, and you don't want to leave the thread local variables in the threads you do not
* manage.
*/
@SuppressWarnings("unchecked")
public static void removeAll() {
InternalThreadLocalMap threadLocalMap = InternalThreadLocalMap.getIfSet();
if (threadLocalMap == null) {
return;
}
try {
Object v = threadLocalMap.indexedVariable(VARIABLES_TO_REMOVE_INDEX);
if (v != null && v != InternalThreadLocalMap.UNSET) {
Set<InternalThreadLocal<?>> variablesToRemove = (Set<InternalThreadLocal<?>>) v;
InternalThreadLocal<?>[] variablesToRemoveArray =
variablesToRemove.toArray(new InternalThreadLocal[0]);
for (InternalThreadLocal<?> tlv : variablesToRemoveArray) {
tlv.remove(threadLocalMap);
}
}
} finally {
InternalThreadLocalMap.remove();
}
} | 3.68 |
pulsar_PulsarAdminImpl_sinks | /**
* @return the sinks management object
*/
public Sinks sinks() {
return sinks;
} | 3.68 |
framework_Overlay_setLeft | /**
* Sets the pixel value for left css property.
*
* @param left
* value to set
*/
public void setLeft(int left) {
this.left = left;
} | 3.68 |
framework_Escalator_getStylePrimaryName | /**
* Returns the primary style name of the container.
*
* @return The primary style name or <code>null</code> if not set.
*/
protected String getStylePrimaryName() {
return primaryStyleName;
} | 3.68 |
morf_AbstractSqlDialectTest_shouldGenerateCorrectSqlForMathOperationsForExistingDataFix1 | /**
* Regression test that checks if the DSL with Math expressions, that is used produces expected SQL.
*/
@Test
public void shouldGenerateCorrectSqlForMathOperationsForExistingDataFix1() {
Function dsl = round(field("doublevalue").divideBy(literal(1000)).multiplyBy(field("doublevalue")), literal(2));
String sql = testDialect.getSqlFrom(dsl);
assertEquals(expectedSqlForMathOperationsForExistingDataFix1(), sql);
} | 3.68 |
hadoop_UnmanagedApplicationManager_getUAMToken | /**
* Gets the amrmToken of the unmanaged AM.
*
* @return the amrmToken of the unmanaged AM.
* @throws IOException if getApplicationReport fails
* @throws YarnException if getApplicationReport fails
*/
protected Token<AMRMTokenIdentifier> getUAMToken()
throws IOException, YarnException {
Token<AMRMTokenIdentifier> token = null;
org.apache.hadoop.yarn.api.records.Token amrmToken =
getApplicationReport(this.applicationId).getAMRMToken();
if (amrmToken != null) {
token = ConverterUtils.convertFromYarn(amrmToken, (Text) null);
} else {
LOG.warn("AMRMToken not found in the application report for application: {}",
this.applicationId);
}
return token;
} | 3.68 |
hudi_HoodieBootstrapSchemaProvider_getBootstrapSchema | /**
* Main API to select avro schema for bootstrapping.
* @param context HoodieEngineContext
* @param partitions List of partitions with files within them
* @return Avro Schema
*/
public final Schema getBootstrapSchema(HoodieEngineContext context, List<Pair<String, List<HoodieFileStatus>>> partitions) {
if (writeConfig.getSchema() != null) {
// Use schema specified by user if set
Schema userSchema = new Schema.Parser().parse(writeConfig.getSchema());
if (!HoodieAvroUtils.getNullSchema().equals(userSchema)) {
return userSchema;
}
}
return getBootstrapSourceSchema(context, partitions);
} | 3.68 |
framework_Sort_build | /**
* Build a sort order list. This method is called internally by Grid when
* calling {@link com.vaadin.client.ui.grid.Grid#sort(Sort)}, but can also
* be called manually to create a SortOrder list, which can also be provided
* directly to Grid.
*
* @return a sort order list.
*/
public List<SortOrder> build() {
List<SortOrder> order = new ArrayList<>(count);
Sort s = this;
for (int i = count - 1; i >= 0; --i) {
order.add(0, s.order);
s = s.previous;
}
return order;
} | 3.68 |
flink_TGetQueryIdReq_isSetOperationHandle | /**
* Returns true if field operationHandle is set (has been assigned a value) and false otherwise
*/
public boolean isSetOperationHandle() {
return this.operationHandle != null;
} | 3.68 |
flink_StreamOperatorFactoryUtil_createOperator | /**
* Creates a new operator using a factory and makes sure that all special factory traits are
* properly handled.
*
* @param operatorFactory the operator factory.
* @param containingTask the containing task.
* @param configuration the configuration of the operator.
* @param output the output of the operator.
* @param operatorEventDispatcher the operator event dispatcher for communication between
* operator and coordinators.
* @return a newly created and configured operator, and the {@link ProcessingTimeService}
* instance it can access.
*/
public static <OUT, OP extends StreamOperator<OUT>>
Tuple2<OP, Optional<ProcessingTimeService>> createOperator(
StreamOperatorFactory<OUT> operatorFactory,
StreamTask<OUT, ?> containingTask,
StreamConfig configuration,
Output<StreamRecord<OUT>> output,
OperatorEventDispatcher operatorEventDispatcher) {
MailboxExecutor mailboxExecutor =
containingTask
.getMailboxExecutorFactory()
.createExecutor(configuration.getChainIndex());
if (operatorFactory instanceof YieldingOperatorFactory) {
((YieldingOperatorFactory<?>) operatorFactory).setMailboxExecutor(mailboxExecutor);
}
final Supplier<ProcessingTimeService> processingTimeServiceFactory =
() ->
containingTask
.getProcessingTimeServiceFactory()
.createProcessingTimeService(mailboxExecutor);
final ProcessingTimeService processingTimeService;
if (operatorFactory instanceof ProcessingTimeServiceAware) {
processingTimeService = processingTimeServiceFactory.get();
((ProcessingTimeServiceAware) operatorFactory)
.setProcessingTimeService(processingTimeService);
} else {
processingTimeService = null;
}
// TODO: what to do with ProcessingTimeServiceAware?
OP op =
operatorFactory.createStreamOperator(
new StreamOperatorParameters<>(
containingTask,
configuration,
output,
processingTimeService != null
? () -> processingTimeService
: processingTimeServiceFactory,
operatorEventDispatcher));
return new Tuple2<>(op, Optional.ofNullable(processingTimeService));
} | 3.68 |
morf_SqlUtils_nullLiteralIfFieldIsZero | /**
* Returns null if field is zero, otherwise returns the expression.
*
* @param field expression to check for zero.
* @param expression expression to return if field is not zero.
* @return null or the expression
*/
public static AliasedField nullLiteralIfFieldIsZero(AliasedField field, AliasedField expression) {
return caseStatement(
when(field.eq(0)).then(nullLiteral())
).otherwise(expression);
} | 3.68 |
flink_GenericRowData_setField | /**
* Sets the field value at the given position.
*
* <p>Note: The given field value must be an internal data structures. Otherwise the {@link
* GenericRowData} is corrupted and may throw exception when processing. See {@link RowData} for
* more information about internal data structures.
*
* <p>The field value can be null for representing nullability.
*/
public void setField(int pos, Object value) {
this.fields[pos] = value;
} | 3.68 |
morf_InsertStatement_isSpecificValuesInsert | /**
* Identifies whether this insert will use specified
* {@link #values(AliasedFieldBuilder...)} instead of a source
* table or select.
*
* @return true if the insert is using specified actual values to insert into the columns.
*/
public boolean isSpecificValuesInsert() {
return fromTable == null && selectStatement == null && !values.isEmpty();
} | 3.68 |
dubbo_RpcServiceContext_getRemoteAddressString | /**
* get remote address string.
*
* @return remote address string.
*/
@Override
public String getRemoteAddressString() {
return getRemoteHost() + ":" + getRemotePort();
} | 3.68 |
dubbo_NacosNamingServiceUtils_toInstance | /**
* Convert the {@link ServiceInstance} to {@link Instance}
*
* @param serviceInstance {@link ServiceInstance}
* @return non-null
* @since 2.7.5
*/
public static Instance toInstance(ServiceInstance serviceInstance) {
Instance instance = new Instance();
instance.setServiceName(serviceInstance.getServiceName());
instance.setIp(serviceInstance.getHost());
instance.setPort(serviceInstance.getPort());
instance.setMetadata(serviceInstance.getSortedMetadata());
instance.setEnabled(serviceInstance.isEnabled());
instance.setHealthy(serviceInstance.isHealthy());
return instance;
} | 3.68 |
graphhopper_VectorTile_clearValues | /**
* <pre>
* Dictionary encoding for values
* </pre>
*
* <code>repeated .vector_tile.Tile.Value values = 4;</code>
*/
public Builder clearValues() {
if (valuesBuilder_ == null) {
values_ = java.util.Collections.emptyList();
bitField0_ = (bitField0_ & ~0x00000010);
onChanged();
} else {
valuesBuilder_.clear();
}
return this;
} | 3.68 |
incubator-hugegraph-toolchain_SchemaController_checkProperties | /**
* Check properties are defined
*/
public static void checkProperties(PropertyKeyService service,
Set<Property> properties,
boolean mustNullable, int connId) {
if (properties == null) {
return;
}
for (Property property : properties) {
String pkName = property.getName();
service.checkExist(pkName, connId);
Ex.check(mustNullable, property::isNullable,
"schema.propertykey.must-be-nullable", pkName);
}
} | 3.68 |
hbase_SplitLogManagerCoordination_getTasks | /** Returns map of tasks */
public ConcurrentMap<String, Task> getTasks() {
return tasks;
} | 3.68 |
framework_TreeGridElement_isRowCollapsed | /**
* Returns whether the row at the given index is collapsed or not.
*
* @param rowIndex
* 0-based row index
* @param hierarchyColumnIndex
* 0-based index of the hierarchy column
* @return {@code true} if collapsed, {@code false} if expanded
*/
public boolean isRowCollapsed(int rowIndex, int hierarchyColumnIndex) {
return !isRowExpanded(rowIndex, hierarchyColumnIndex);
} | 3.68 |
hbase_HFileBlock_checkOnDiskSizeWithHeader | /**
* Check that {@code value} read from a block header seems reasonable, within a large margin of
* error.
* @return {@code true} if the value is safe to proceed, {@code false} otherwise.
*/
private boolean checkOnDiskSizeWithHeader(int value) {
if (value < 0) {
if (LOG.isTraceEnabled()) {
LOG.trace(
"onDiskSizeWithHeader={}; value represents a size, so it should never be negative.",
value);
}
return false;
}
if (value - hdrSize < 0) {
if (LOG.isTraceEnabled()) {
LOG.trace("onDiskSizeWithHeader={}, hdrSize={}; don't accept a value that is negative"
+ " after the header size is excluded.", value, hdrSize);
}
return false;
}
return true;
} | 3.68 |
querydsl_DateExpression_dayOfYear | /**
* Create a day of year expression (range 1-356)
* <p>NOT supported in JDOQL and not in Derby</p>
*
* @return day of year
*/
public NumberExpression<Integer> dayOfYear() {
if (dayOfYear == null) {
dayOfYear = Expressions.numberOperation(Integer.class, Ops.DateTimeOps.DAY_OF_YEAR, mixin);
}
return dayOfYear;
} | 3.68 |
framework_VaadinService_init | /**
* Initializes this service. The service should be initialized before it is
* used.
*
* @since 7.1
* @throws ServiceException
* if a problem occurs when creating the service
*/
public void init() throws ServiceException {
List<RequestHandler> handlers = createRequestHandlers();
ServiceInitEvent event = new ServiceInitEvent(this);
Iterator<VaadinServiceInitListener> initListeners = getServiceInitListeners();
while (initListeners.hasNext()) {
initListeners.next().serviceInit(event);
}
handlers.addAll(event.getAddedRequestHandlers());
Collections.reverse(handlers);
requestHandlers = Collections.unmodifiableCollection(handlers);
dependencyFilters = Collections.unmodifiableCollection(
initDependencyFilters(event.getAddedDependencyFilters()));
connectorIdGenerator = initConnectorIdGenerator(
event.getAddedConnectorIdGenerators());
assert connectorIdGenerator != null;
initialized = true;
} | 3.68 |
rocketmq-connect_PositionStorageWriter_close | /**
* Closes this stream and releases any system resources associated
* with it. If the stream is already closed then invoking this
* method has no effect.
*
* @throws IOException if an I/O error occurs
*/
@Override
public void close() throws IOException {
if (executorService != null) {
executorService.shutdown();
}
} | 3.68 |
framework_DownloadStream_writeResponse | /**
* Writes this download stream to a Vaadin response. This takes care of
* setting response headers according to what is defined in this download
* stream ({@link #getContentType()}, {@link #getCacheTime()},
* {@link #getFileName()}) and transferring the data from the stream (
* {@link #getStream()}) to the response. Defined parameters (
* {@link #getParameterNames()}) are also included as headers in the
* response. If there's is a parameter named <code>Location</code>, a
* redirect (302 Moved temporarily) is sent instead of the contents of this
* stream.
*
* @param request
* the request for which the response should be written
* @param response
* the Vaadin response to write this download stream to
*
* @throws IOException
* passed through from the Vaadin response
*
* @since 7.0
*/
public void writeResponse(VaadinRequest request, VaadinResponse response)
throws IOException {
if (getParameter("Location") != null) {
response.setStatus(HttpServletResponse.SC_MOVED_TEMPORARILY);
response.setHeader("Location", getParameter("Location"));
return;
}
// Download from given stream
final InputStream data = getStream();
if (data == null) {
response.setStatus(HttpServletResponse.SC_NOT_FOUND);
return;
}
if (data != null) {
OutputStream out = null;
try {
// Sets content type
response.setContentType(getContentType());
// Sets cache headers
response.setCacheTime(getCacheTime());
// Copy download stream parameters directly
// to HTTP headers.
final Iterator<String> i = getParameterNames();
if (i != null) {
while (i.hasNext()) {
final String param = i.next();
response.setHeader(param, getParameter(param));
}
}
// Content-Disposition: attachment generally forces download
String contentDisposition = getParameter(CONTENT_DISPOSITION);
if (contentDisposition == null) {
contentDisposition = getContentDispositionFilename(
getFileName());
}
response.setHeader(CONTENT_DISPOSITION, contentDisposition);
int bufferSize = getBufferSize();
if (bufferSize <= 0 || bufferSize > Constants.MAX_BUFFER_SIZE) {
bufferSize = Constants.DEFAULT_BUFFER_SIZE;
}
final byte[] buffer = new byte[bufferSize];
int bytesRead = 0;
out = response.getOutputStream();
long totalWritten = 0;
while ((bytesRead = data.read(buffer)) > 0) {
out.write(buffer, 0, bytesRead);
totalWritten += bytesRead;
if (totalWritten >= buffer.length) {
// Avoid chunked encoding for small resources
out.flush();
}
}
} finally {
tryToCloseStream(out);
tryToCloseStream(data);
}
}
} | 3.68 |
flink_WebLogDataGenerator_genVisits | /**
* Generates the files for the visits relation. The visits entries apply the following format:
* <br>
* <code>IP Address | URL | Date (YYYY-MM-DD) | Misc. Data (e.g. User-Agent) |\n</code>
*
* @param noVisits Number of entries for the visits relation
* @param noDocs Number of entries in the documents relation
* @param path Output path for the visits relation
*/
private static void genVisits(int noVisits, int noDocs, String path) {
Random rand = new Random(Calendar.getInstance().getTimeInMillis());
try (BufferedWriter fw = new BufferedWriter(new FileWriter(path))) {
for (int i = 0; i < noVisits; i++) {
int year = 2000 + rand.nextInt(10); // yearFilter 3
int month = rand.nextInt(12) + 1; // month between 1 and 12
int day = rand.nextInt(27) + 1; // day between 1 and 28
// IP address
StringBuilder visit =
new StringBuilder(
rand.nextInt(256)
+ "."
+ rand.nextInt(256)
+ "."
+ rand.nextInt(256)
+ "."
+ rand.nextInt(256)
+ "|");
// URL
visit.append("url_" + rand.nextInt(noDocs) + "|");
// Date (format: YYYY-MM-DD)
visit.append(year + "-" + month + "-" + day + "|");
// Miscellaneous data, e.g. User-Agent
visit.append("0.12|Mozilla Firefox 3.1|de|de|Nothing special|124|\n");
fw.write(visit.toString());
}
} catch (IOException e) {
e.printStackTrace();
}
} | 3.68 |
flink_OperationManager_getOperationInfo | /**
* Get the {@link OperationInfo} of the operation.
*
* @param operationHandle identifies the {@link Operation}.
*/
public OperationInfo getOperationInfo(OperationHandle operationHandle) {
return getOperation(operationHandle).getOperationInfo();
} | 3.68 |
framework_GridConnector_detailsOpened | /**
* Inform LazyDetailsScroller that a details row has opened on a row.
*
* @param rowIndex
* index of row with details now open
*/
public void detailsOpened(int rowIndex) {
if (targetRow == rowIndex) {
getWidget().scrollToRow(targetRow, destination);
disableScroller.run();
}
} | 3.68 |
hbase_BackupManifest_setIncrTimestampMap | /**
* Set the incremental timestamp map directly.
* @param incrTimestampMap timestamp map
*/
public void setIncrTimestampMap(Map<TableName, Map<String, Long>> incrTimestampMap) {
this.backupImage.setIncrTimeRanges(incrTimestampMap);
} | 3.68 |
hadoop_CompositeService_stop | /**
* Stop the services in reverse order
*
* @param numOfServicesStarted index from where the stop should work
* @param stopOnlyStartedServices flag to say "only start services that are
* started, not those that are NOTINITED or INITED.
* @throws RuntimeException the first exception raised during the
* stop process -<i>after all services are stopped</i>
*/
private void stop(int numOfServicesStarted, boolean stopOnlyStartedServices) {
// stop in reverse order of start
Exception firstException = null;
List<Service> services = getServices();
for (int i = numOfServicesStarted - 1; i >= 0; i--) {
Service service = services.get(i);
if (LOG.isDebugEnabled()) {
LOG.debug("Stopping service #" + i + ": " + service);
}
STATE state = service.getServiceState();
//depending on the stop police
if (state == STATE.STARTED
|| (!stopOnlyStartedServices && state == STATE.INITED)) {
Exception ex = ServiceOperations.stopQuietly(LOG, service);
if (ex != null && firstException == null) {
firstException = ex;
}
}
}
//after stopping all services, rethrow the first exception raised
if (firstException != null) {
throw ServiceStateException.convert(firstException);
}
} | 3.68 |
flink_FileChannelOutputView_close | /**
* Closes this output, writing pending data and releasing the memory.
*
* @throws IOException Thrown, if the pending data could not be written.
*/
public void close() throws IOException {
close(false);
} | 3.68 |
hbase_ParseFilter_checkForWhile | /**
* Checks if the current index of filter string we are on is the beginning of the keyword 'WHILE'
* <p>
* @param filterStringAsByteArray filter string given by the user
* @param indexOfWhile index at which an 'W' was read
* @return true if the keyword 'WHILE' is at the current index
*/
public static boolean checkForWhile(byte[] filterStringAsByteArray, int indexOfWhile)
throws CharacterCodingException {
try {
if (
filterStringAsByteArray[indexOfWhile] == ParseConstants.W
&& filterStringAsByteArray[indexOfWhile + 1] == ParseConstants.H
&& filterStringAsByteArray[indexOfWhile + 2] == ParseConstants.I
&& filterStringAsByteArray[indexOfWhile + 3] == ParseConstants.L
&& filterStringAsByteArray[indexOfWhile + 4] == ParseConstants.E
&& (indexOfWhile == 0
|| filterStringAsByteArray[indexOfWhile - 1] == ParseConstants.WHITESPACE
|| filterStringAsByteArray[indexOfWhile - 1] == ParseConstants.RPAREN
|| filterStringAsByteArray[indexOfWhile - 1] == ParseConstants.LPAREN)
&& (filterStringAsByteArray[indexOfWhile + 5] == ParseConstants.WHITESPACE
|| filterStringAsByteArray[indexOfWhile + 5] == ParseConstants.LPAREN)
) {
return true;
} else {
return false;
}
} catch (ArrayIndexOutOfBoundsException e) {
return false;
}
} | 3.68 |
zxing_RSSExpandedReader_checkRows | // Try to construct a valid rows sequence
// Recursion is used to implement backtracking
private List<ExpandedPair> checkRows(List<ExpandedRow> collectedRows, int currentRow) throws NotFoundException {
for (int i = currentRow; i < rows.size(); i++) {
ExpandedRow row = rows.get(i);
this.pairs.clear();
for (ExpandedRow collectedRow : collectedRows) {
this.pairs.addAll(collectedRow.getPairs());
}
this.pairs.addAll(row.getPairs());
if (isValidSequence(this.pairs, false)) {
if (checkChecksum()) {
return this.pairs;
}
List<ExpandedRow> rs = new ArrayList<>(collectedRows);
rs.add(row);
try {
// Recursion: try to add more rows
return checkRows(rs, i + 1);
} catch (NotFoundException e) {
// We failed, try the next candidate
}
}
}
throw NotFoundException.getNotFoundInstance();
} | 3.68 |
streampipes_AbstractProcessingElementBuilder_naryMappingPropertyWithoutRequirement | /**
* Adds a new {@link org.apache.streampipes.model.staticproperty.MappingPropertyNary}
* to the pipeline element definition which is not linked to a specific input property.
* Use this method if you want to present users a selection (in form of a Checkbox Group)
* of all available input event properties.
*
* @param label A human-readable label that is displayed to users in the StreamPipes UI.
* @param propertyScope Only input event properties that match the
* {@link org.apache.streampipes.model.schema.PropertyScope} are displayed.
* @return
*/
public K naryMappingPropertyWithoutRequirement(Label label, PropertyScope propertyScope) {
MappingPropertyNary mp = new MappingPropertyNary(label.getInternalId(), label.getLabel(), label.getDescription());
mp.setPropertyScope(propertyScope.name());
this.staticProperties.add(mp);
return me();
} | 3.68 |
hibernate-validator_ValueExtractorResolver_getPotentiallyRuntimeTypeCompliantAndContainerElementCompliantValueExtractors | /**
* Returns the set of potentially type-compliant and container-element-compliant value extractors or an empty set if none was found.
* <p>
* A value extractor is potentially runtime type compliant if it might be compliant for any runtime type that matches the declared type.
*/
private Set<ValueExtractorDescriptor> getPotentiallyRuntimeTypeCompliantAndContainerElementCompliantValueExtractors(Type declaredType,
TypeVariable<?> typeParameter) {
boolean isInternal = TypeVariables.isInternal( typeParameter );
Type erasedDeclaredType = TypeHelper.getErasedReferenceType( declaredType );
Set<ValueExtractorDescriptor> typeCompatibleExtractors = registeredValueExtractors
.stream()
.filter( e -> TypeHelper.isAssignable( erasedDeclaredType, e.getContainerType() ) )
.collect( Collectors.toSet() );
Set<ValueExtractorDescriptor> containerElementCompliantExtractors = new HashSet<>();
for ( ValueExtractorDescriptor extractorDescriptor : typeCompatibleExtractors ) {
TypeVariable<?> typeParameterBoundToExtractorType;
if ( !isInternal ) {
Map<Class<?>, Map<TypeVariable<?>, TypeVariable<?>>> allBindings =
TypeVariableBindings.getTypeVariableBindings( extractorDescriptor.getContainerType() );
Map<TypeVariable<?>, TypeVariable<?>> bindingsForExtractorType = allBindings.get( erasedDeclaredType );
typeParameterBoundToExtractorType = bind( extractorDescriptor.getExtractedTypeParameter(), bindingsForExtractorType );
}
else {
typeParameterBoundToExtractorType = typeParameter;
}
if ( Objects.equals( typeParameter, typeParameterBoundToExtractorType ) ) {
containerElementCompliantExtractors.add( extractorDescriptor );
}
}
return containerElementCompliantExtractors;
} | 3.68 |
flink_TaskManagerRunner_createRpcService | /**
* Create a RPC service for the task manager.
*
* @param configuration The configuration for the TaskManager.
* @param haServices to use for the task manager hostname retrieval
*/
@VisibleForTesting
static RpcService createRpcService(
final Configuration configuration,
final HighAvailabilityServices haServices,
final RpcSystem rpcSystem)
throws Exception {
checkNotNull(configuration);
checkNotNull(haServices);
return RpcUtils.createRemoteRpcService(
rpcSystem,
configuration,
determineTaskManagerBindAddress(configuration, haServices, rpcSystem),
configuration.getString(TaskManagerOptions.RPC_PORT),
configuration.getString(TaskManagerOptions.BIND_HOST),
configuration.getOptional(TaskManagerOptions.RPC_BIND_PORT));
} | 3.68 |
zxing_MaskUtil_applyMaskPenaltyRule1Internal | /**
* Helper function for applyMaskPenaltyRule1. We need this for doing this calculation in both
* vertical and horizontal orders respectively.
*/
private static int applyMaskPenaltyRule1Internal(ByteMatrix matrix, boolean isHorizontal) {
int penalty = 0;
int iLimit = isHorizontal ? matrix.getHeight() : matrix.getWidth();
int jLimit = isHorizontal ? matrix.getWidth() : matrix.getHeight();
byte[][] array = matrix.getArray();
for (int i = 0; i < iLimit; i++) {
int numSameBitCells = 0;
int prevBit = -1;
for (int j = 0; j < jLimit; j++) {
int bit = isHorizontal ? array[i][j] : array[j][i];
if (bit == prevBit) {
numSameBitCells++;
} else {
if (numSameBitCells >= 5) {
penalty += N1 + (numSameBitCells - 5);
}
numSameBitCells = 1; // Include the cell itself.
prevBit = bit;
}
}
if (numSameBitCells >= 5) {
penalty += N1 + (numSameBitCells - 5);
}
}
return penalty;
} | 3.68 |
hmily_ExtensionLoader_getExtensionLoader | /**
* Gets extension loader.
*
* @param <T> the type parameter
* @param clazz the clazz
* @return the extension loader
*/
public static <T> ExtensionLoader<T> getExtensionLoader(final Class<T> clazz) {
if (clazz == null) {
throw new NullPointerException("extension clazz is null");
}
if (!clazz.isInterface()) {
throw new IllegalArgumentException("extension clazz (" + clazz + "is not interface!");
}
ExtensionLoader<T> extensionLoader = (ExtensionLoader<T>) LOADERS.get(clazz);
if (extensionLoader != null) {
return extensionLoader;
}
LOADERS.putIfAbsent(clazz, new ExtensionLoader<>(clazz));
return (ExtensionLoader<T>) LOADERS.get(clazz);
} | 3.68 |
morf_AbstractSqlDialectTest_expectedSqlForMathOperations5 | /**
* @return expected SQL for math operation 5
*/
protected String expectedSqlForMathOperations5() {
return "a * (b + c)";
} | 3.68 |
hadoop_ManifestCommitter_getSuccessReport | /**
* Get the manifest Success data; only valid after a job.
* @return the job _SUCCESS data, or null.
*/
public ManifestSuccessData getSuccessReport() {
return successReport;
} | 3.68 |
hadoop_VersionInfoMojo_byteArrayToString | /**
* Converts bytes to a hexadecimal string representation and returns it.
*
* @param array byte[] to convert
* @return String containing hexadecimal representation of bytes
*/
private String byteArrayToString(byte[] array) {
StringBuilder sb = new StringBuilder();
for (byte b : array) {
sb.append(Integer.toHexString(0xff & b));
}
return sb.toString();
} | 3.68 |
pulsar_Record_getPartitionId | /**
* Retrieves the partition information if any of the record.
*
* @return The partition id where the
*/
default Optional<String> getPartitionId() {
return Optional.empty();
} | 3.68 |
framework_DownloadStream_setCacheTime | /**
* Sets length of cache expiration time. This gives the adapter the
* possibility cache streams sent to the client. The caching may be made in
* adapter or at the client if the client supports caching. Zero or negative
* value disables the caching of this stream.
*
* @param cacheTime
* the cache time in milliseconds.
*/
public void setCacheTime(long cacheTime) {
this.cacheTime = cacheTime;
} | 3.68 |
hbase_RSGroupInfoManagerImpl_resetRSGroupMap | /**
* Make changes visible. Caller must be synchronized on 'this'.
*/
private void resetRSGroupMap(Map<String, RSGroupInfo> newRSGroupMap) {
this.holder = new RSGroupInfoHolder(newRSGroupMap);
} | 3.68 |
shardingsphere-elasticjob_TriggerNode_isLocalTriggerPath | /**
* Is local trigger path.
*
* @param path path
* @return is local trigger path or not
*/
public boolean isLocalTriggerPath(final String path) {
JobInstance jobInstance = JobRegistry.getInstance().getJobInstance(jobName);
return null != jobInstance && path.equals(jobNodePath.getFullPath(String.format(TRIGGER, jobInstance.getJobInstanceId())));
} | 3.68 |
flink_OperatingSystem_isLinux | /**
* Checks whether the operating system this JVM runs on is Linux.
*
* @return <code>true</code> if the operating system this JVM runs on is Linux, <code>false
* </code> otherwise
*/
public static boolean isLinux() {
return getCurrentOperatingSystem() == LINUX;
} | 3.68 |
hbase_StoreFileInfo_computeHDFSBlocksDistribution | /**
* Compute the HDFS Block Distribution for this StoreFile
*/
public HDFSBlocksDistribution computeHDFSBlocksDistribution(final FileSystem fs)
throws IOException {
// guard against the case where we get the FileStatus from link, but by the time we
// call compute the file is moved again
if (this.link != null) {
FileNotFoundException exToThrow = null;
for (int i = 0; i < this.link.getLocations().length; i++) {
try {
return computeHDFSBlocksDistributionInternal(fs);
} catch (FileNotFoundException ex) {
// try the other location
exToThrow = ex;
}
}
throw exToThrow;
} else {
return computeHDFSBlocksDistributionInternal(fs);
}
} | 3.68 |
framework_Escalator_removeColumns | /**
* {@inheritDoc}
* <p>
* <em>Implementation detail:</em> This method does no DOM modifications
* (i.e. is very cheap to call) if there are no rows in the DOM when
* this method is called.
*
* @see #hasSomethingInDom()
*/
@Override
public void removeColumns(final int index, final int numberOfColumns) {
// Validate
assertArgumentsAreValidAndWithinRange(index, numberOfColumns);
// Move the horizontal scrollbar to the left, if removed columns are
// to the left of the viewport
removeColumnsAdjustScrollbar(index, numberOfColumns);
// Remove from DOM
header.paintRemoveColumns(index, numberOfColumns);
body.paintRemoveColumns(index, numberOfColumns);
footer.paintRemoveColumns(index, numberOfColumns);
// Remove from bookkeeping
flyweightRow.removeCells(index, numberOfColumns);
columns.subList(index, index + numberOfColumns).clear();
// Adjust frozen columns
if (index < getFrozenColumnCount()) {
if (index + numberOfColumns < frozenColumns) {
/*
* Last removed column was frozen, meaning that all removed
* columns were frozen. Just decrement the number of frozen
* columns accordingly.
*/
frozenColumns -= numberOfColumns;
} else {
/*
* If last removed column was not frozen, we have removed
* columns beyond the frozen range, so all remaining frozen
* columns are to the left of the removed columns.
*/
frozenColumns = index;
}
}
scroller.recalculateScrollbarsForVirtualViewport();
body.verifyEscalatorCount();
if (getColumnConfiguration().getColumnCount() > 0) {
reapplyRowWidths(header);
reapplyRowWidths(body);
reapplyRowWidths(footer);
}
/*
* Colspans make any kind of automatic clever content re-rendering
* impossible: As soon as anything has colspans, removing one might
* reveal further colspans, modifying the DOM structure once again,
* ending in a cascade of updates. Because we don't know how the
* data is updated.
*
* So, instead, we don't do anything. The client code is responsible
* for re-rendering the content (if so desired). Everything Just
* Works (TM) if colspans aren't used.
*/
} | 3.68 |
hadoop_ReadWriteDiskValidatorMetrics_getFileWriteQuantiles | /**
* Get {@link MutableQuantiles} metrics for the file write time.
*
* @return {@link MutableQuantiles} metrics for the file write time
*/
@VisibleForTesting
protected MutableQuantiles[] getFileWriteQuantiles() {
return fileWriteQuantiles;
} | 3.68 |
flink_HighAvailabilityMode_fromConfig | /**
* Return the configured {@link HighAvailabilityMode}.
*
* @param config The config to parse
* @return Configured recovery mode or {@link HighAvailabilityMode#NONE} if not configured.
*/
public static HighAvailabilityMode fromConfig(Configuration config) {
String haMode = config.getValue(HighAvailabilityOptions.HA_MODE);
if (haMode == null) {
return HighAvailabilityMode.NONE;
} else if (haMode.equalsIgnoreCase(ConfigConstants.DEFAULT_RECOVERY_MODE)) {
// Map old default to new default
return HighAvailabilityMode.NONE;
} else {
try {
return HighAvailabilityMode.valueOf(haMode.toUpperCase());
} catch (IllegalArgumentException e) {
return FACTORY_CLASS;
}
}
} | 3.68 |
hadoop_FlowActivityRowKey_decode | /*
* (non-Javadoc)
*
* @see
* org.apache.hadoop.yarn.server.timelineservice.storage.common
* .KeyConverter#decode(byte[])
*/
@Override
public FlowActivityRowKey decode(byte[] rowKey) {
byte[][] rowKeyComponents =
Separator.QUALIFIERS.split(rowKey, SEGMENT_SIZES);
if (rowKeyComponents.length != 4) {
throw new IllegalArgumentException("the row key is not valid for "
+ "a flow activity");
}
String clusterId =
Separator.decode(Bytes.toString(rowKeyComponents[0]),
Separator.QUALIFIERS, Separator.TAB, Separator.SPACE);
Long dayTs = LongConverter.invertLong(Bytes.toLong(rowKeyComponents[1]));
String userId =
Separator.decode(Bytes.toString(rowKeyComponents[2]),
Separator.QUALIFIERS, Separator.TAB, Separator.SPACE);
String flowName =
Separator.decode(Bytes.toString(rowKeyComponents[3]),
Separator.QUALIFIERS, Separator.TAB, Separator.SPACE);
return new FlowActivityRowKey(clusterId, dayTs, userId, flowName);
} | 3.68 |
hadoop_FindOptions_getCommandFactory | /**
* Return the command factory.
*
* @return {@link CommandFactory}
*/
public CommandFactory getCommandFactory() {
return this.commandFactory;
} | 3.68 |
framework_TreeData_addRootItems | /**
* Adds the items of the given stream as root items to this structure.
*
* @param items
* the stream of root items to add
* @return this
*
* @throws IllegalArgumentException
* if any of the given items have already been added to this
* structure
* @throws NullPointerException
* if any of the items are {code null}
*/
public TreeData<T> addRootItems(Stream<T> items) {
addItems(null, items);
return this;
} | 3.68 |
hbase_ProcedureEvent_suspendIfNotReady | /**
* Returns true if event is not ready and adds procedure to suspended queue, else returns false.
*/
public synchronized boolean suspendIfNotReady(Procedure proc) {
if (!ready) {
suspendedProcedures.addLast(proc);
}
return !ready;
} | 3.68 |
morf_SpreadsheetDataSetProducer_getHyperlink | /**
* Gets the hyperlink that starts at the given column/row in the given sheet.
*
* @param sheet sheet to look for a hyperlink in
* @param column column of the hyperlink
* @param row row of the hyperlink
* @return the hyperlink, if found. Otherwise, null
*/
private HyperlinkRecord getHyperlink(Sheet sheet, int column, int row) {
for (Hyperlink link : sheet.getHyperlinks()) {
if (link.getColumn() == column && link.getRow() == row) {
return (HyperlinkRecord)link;
}
}
return null;
} | 3.68 |
hadoop_S3AMultipartUploader_buildPartHandlePayload | /**
* Build the payload for marshalling.
*
* @param partNumber part number from response
* @param etag upload etag
* @param len length
* @return a byte array to marshall.
* @throws IOException error writing the payload
*/
@VisibleForTesting
static byte[] buildPartHandlePayload(
final String path,
final String uploadId,
final int partNumber,
final String etag,
final long len)
throws IOException {
return new PartHandlePayload(path, uploadId, partNumber, len, etag)
.toBytes();
} | 3.68 |
hadoop_ShadedProtobufHelper_getRemoteException | /**
* Return the IOException thrown by the remote server wrapped in
* ServiceException as cause.
* The signature of this method changes with updates to the hadoop-thirdparty
* shaded protobuf library.
* @param se ServiceException that wraps IO exception thrown by the server
* @return Exception wrapped in ServiceException or
* a new IOException that wraps the unexpected ServiceException.
*/
@InterfaceAudience.Private
@InterfaceStability.Unstable
public static IOException getRemoteException(ServiceException se) {
Throwable e = se.getCause();
if (e == null) {
return new IOException(se);
}
return e instanceof IOException
? (IOException) e
: new IOException(se);
} | 3.68 |
framework_Table_getColumnAlignments | /**
* Gets the array of column alignments.
*
* <p>
* The items in the array must match the properties identified by
* {@link #getVisibleColumns()}. The possible values for the alignments
* include:
* <ul>
* <li>{@link Align#LEFT}: Left alignment</li>
* <li>{@link Align#CENTER}: Centered</li>
* <li>{@link Align#RIGHT}: Right alignment</li>
* </ul>
* The alignments default to {@link Align#LEFT}: any null values are
* rendered as align lefts.
* </p>
*
* @return the Column alignments array.
*/
public Align[] getColumnAlignments() {
if (columnAlignments == null) {
return null;
}
final Align[] alignments = new Align[visibleColumns.size()];
int i = 0;
for (final Object column : visibleColumns) {
alignments[i++] = getColumnAlignment(column);
}
return alignments;
} | 3.68 |
hadoop_FieldSelectionMapper_map | /**
* The identify function. Input key/value pair is written directly to output.
*/
public void map(K key, V val, Context context)
throws IOException, InterruptedException {
FieldSelectionHelper helper = new FieldSelectionHelper(
FieldSelectionHelper.emptyText, FieldSelectionHelper.emptyText);
helper.extractOutputKeyValue(key.toString(), val.toString(),
fieldSeparator, mapOutputKeyFieldList, mapOutputValueFieldList,
allMapValueFieldsFrom, ignoreInputKey, true);
context.write(helper.getKey(), helper.getValue());
} | 3.68 |
Activiti_CollectionUtil_singletonMap | /**
* Helper method that creates a singleton map.
*
* Alternative for singletonMap()), since that method returns a generic typed map <K,T> depending on the input type, but we often need a <String, Object> map.
*/
public static Map<String, Object> singletonMap(String key, Object value) {
Map<String, Object> map = new HashMap<>();
map.put(key, value);
return map;
} | 3.68 |
hudi_CollectionUtils_emptyProps | /**
* Returns an empty {@code Properties} instance. The props instance is a singleton,
* it should not be modified in any case.
*/
public static Properties emptyProps() {
return EMPTY_PROPERTIES;
} | 3.68 |
hbase_HDFSBlocksDistribution_getWeightForSsd | /** Returns the weight for ssd */
public long getWeightForSsd() {
return weightForSsd;
} | 3.68 |
flink_KvStateSerializer_deserializeKeyAndNamespace | /**
* Deserializes the key and namespace into a {@link Tuple2}.
*
* @param serializedKeyAndNamespace Serialized key and namespace
* @param keySerializer Serializer for the key
* @param namespaceSerializer Serializer for the namespace
* @param <K> Key type
* @param <N> Namespace
* @return Tuple2 holding deserialized key and namespace
* @throws IOException if the deserialization fails for any reason
*/
public static <K, N> Tuple2<K, N> deserializeKeyAndNamespace(
byte[] serializedKeyAndNamespace,
TypeSerializer<K> keySerializer,
TypeSerializer<N> namespaceSerializer)
throws IOException {
DataInputDeserializer dis =
new DataInputDeserializer(
serializedKeyAndNamespace, 0, serializedKeyAndNamespace.length);
try {
K key = keySerializer.deserialize(dis);
byte magicNumber = dis.readByte();
if (magicNumber != MAGIC_NUMBER) {
throw new IOException("Unexpected magic number " + magicNumber + ".");
}
N namespace = namespaceSerializer.deserialize(dis);
if (dis.available() > 0) {
throw new IOException("Unconsumed bytes in the serialized key and namespace.");
}
return new Tuple2<>(key, namespace);
} catch (IOException e) {
throw new IOException(
"Unable to deserialize key "
+ "and namespace. This indicates a mismatch in the key/namespace "
+ "serializers used by the KvState instance and this access.",
e);
}
} | 3.68 |
hadoop_OBSCommonUtils_longOption | /**
* Get a long option not smaller than the minimum allowed value.
*
* @param conf configuration
* @param key key to look up
* @param defVal default value
* @param min minimum value
* @return the value
* @throws IllegalArgumentException if the value is below the minimum
*/
static long longOption(final Configuration conf, final String key,
final long defVal,
final long min) {
long v = conf.getLong(key, defVal);
Preconditions.checkArgument(
v >= min,
String.format("Value of %s: %d is below the minimum value %d", key,
v, min));
LOG.debug("Value of {} is {}", key, v);
return v;
} | 3.68 |
hadoop_MRJobConfUtil_redact | /**
* Redact job configuration properties.
* @param conf the job configuration to redact
*/
public static void redact(final Configuration conf) {
for (String prop : conf.getTrimmedStringCollection(
MRJobConfig.MR_JOB_REDACTED_PROPERTIES)) {
conf.set(prop, REDACTION_REPLACEMENT_VAL);
}
} | 3.68 |
hbase_ExportUtils_usage | /**
* Common usage for other export tools.
* @param errorMsg Error message. Can be null.
*/
public static void usage(final String errorMsg) {
if (errorMsg != null && errorMsg.length() > 0) {
System.err.println("ERROR: " + errorMsg);
}
System.err.println("Usage: Export [-D <property=value>]* <tablename> <outputdir> [<versions> "
+ "[<starttime> [<endtime>]] [^[regex pattern] or [Prefix] to filter]]\n");
System.err.println(" Note: -D properties will be applied to the conf used. ");
System.err.println(" For example: ");
System.err.println(" -D " + FileOutputFormat.COMPRESS + "=true");
System.err.println(
" -D " + FileOutputFormat.COMPRESS_CODEC + "=org.apache.hadoop.io.compress.GzipCodec");
System.err.println(" -D " + FileOutputFormat.COMPRESS_TYPE + "=BLOCK");
System.err.println(" Additionally, the following SCAN properties can be specified");
System.err.println(" to control/limit what is exported..");
System.err
.println(" -D " + TableInputFormat.SCAN_COLUMN_FAMILY + "=<family1>,<family2>, ...");
System.err.println(" -D " + RAW_SCAN + "=true");
System.err.println(" -D " + TableInputFormat.SCAN_ROW_START + "=<ROWSTART>");
System.err.println(" -D " + TableInputFormat.SCAN_ROW_STOP + "=<ROWSTOP>");
System.err.println(" -D " + HConstants.HBASE_CLIENT_SCANNER_CACHING + "=100");
System.err.println(" -D " + EXPORT_VISIBILITY_LABELS + "=<labels>");
System.err.println("For tables with very wide rows consider setting the batch size as below:\n"
+ " -D " + EXPORT_BATCHING + "=10\n" + " -D " + EXPORT_CACHING + "=100");
} | 3.68 |
framework_LoginForm_createLoginButton | /**
* Customize the login button. Only for overriding, do not call.
*
* @return the login button
* @since 7.7
*/
protected Button createLoginButton() {
throwIfInitialized();
return new Button(getLoginButtonCaption());
} | 3.68 |
hbase_BackupInfo_setProgress | /**
* Set progress (0-100%)
* @param p progress value
*/
public void setProgress(int p) {
this.progress = p;
} | 3.68 |
zxing_DecoderResult_getNumBits | /**
* @return how many bits of {@link #getRawBytes()} are valid; typically 8 times its length
* @since 3.3.0
*/
public int getNumBits() {
return numBits;
} | 3.68 |
flink_HadoopFileSystem_getKindForScheme | /**
* Gets the kind of the file system from its scheme.
*
* <p>Implementation note: Initially, especially within the Flink 1.3.x line (in order to not
* break backwards compatibility), we must only label file systems as 'inconsistent' or as 'not
* proper filesystems' if we are sure about it. Otherwise, we cause regression for example in
* the performance and cleanup handling of checkpoints. For that reason, we initially mark some
* filesystems as 'eventually consistent' or as 'object stores', and leave the others as
* 'consistent file systems'.
*/
static FileSystemKind getKindForScheme(String scheme) {
scheme = scheme.toLowerCase(Locale.US);
if (scheme.startsWith("s3")
|| scheme.startsWith("emr")
|| scheme.startsWith("oss")
|| scheme.startsWith("wasb")
|| scheme.startsWith("gs")) {
// the Amazon S3 storage or Aliyun OSS storage or Azure Blob Storage
// or Google Cloud Storage
return FileSystemKind.OBJECT_STORE;
} else if (scheme.startsWith("http") || scheme.startsWith("ftp")) {
// file servers instead of file systems
// they might actually be consistent, but we have no hard guarantees
// currently to rely on that
return FileSystemKind.OBJECT_STORE;
} else {
// the remainder should include hdfs, kosmos, ceph, ...
// this also includes federated HDFS (viewfs).
return FileSystemKind.FILE_SYSTEM;
}
} | 3.68 |
framework_Window_readDesignChildren | /**
* Reads the content and possible assistive descriptions from the list of
* child elements of a design. If an element has an
* {@code :assistive-description} attribute, adds the parsed component to
* the list of components used as the assistive description of this Window.
* Otherwise, sets the component as the content of this Window. If there are
* multiple non-description elements, throws a DesignException.
*
* @param children
* child elements in a design
* @param context
* the DesignContext instance used to parse the design
*
* @throws DesignException
* if there are multiple non-description child elements
* @throws DesignException
* if a child element could not be parsed as a Component
*
* @see #setContent(Component)
* @see #setAssistiveDescription(Component...)
*/
@Override
protected void readDesignChildren(Elements children,
DesignContext context) {
List<Component> descriptions = new ArrayList<>();
Elements content = new Elements();
for (Element child : children) {
if (child.hasAttr(":assistive-description")) {
descriptions.add(context.readDesign(child));
} else {
content.add(child);
}
}
super.readDesignChildren(content, context);
setAssistiveDescription(
descriptions.toArray(new Component[descriptions.size()]));
} | 3.68 |
flink_HashTableBloomFilter_addHash | /** @return false if the accuracy of the BloomFilter is not high. */
boolean addHash(int hash) {
setLocation(hash);
filter.addHash(hash);
size++;
return size <= maxSize;
} | 3.68 |
hadoop_OBSCommonUtils_isFolderEmpty | // Used to check if a folder is empty or not by counting the number of
// sub objects in list.
private static boolean isFolderEmpty(final String key,
final ObjectListing objects) {
int count = objects.getObjects().size();
if (count >= 2) {
// There is a sub file at least.
return false;
} else if (count == 1 && !objects.getObjects()
.get(0)
.getObjectKey()
.equals(key)) {
// There is a sub file at least.
return false;
}
count = objects.getCommonPrefixes().size();
// There is a sub file at least.
// There is no sub object.
if (count >= 2) {
// There is a sub file at least.
return false;
} else {
return count != 1 || objects.getCommonPrefixes().get(0).equals(key);
}
} | 3.68 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.