name
stringlengths 12
178
| code_snippet
stringlengths 8
36.5k
| score
float64 3.26
3.68
|
---|---|---|
hudi_HoodieSimpleIndex_fetchRecordLocationsForAffectedPartitions | /**
* Fetch record locations for passed in {@link HoodieKey}s.
*
* @param hoodieKeys {@link HoodieData} of {@link HoodieKey}s for which locations are fetched
* @param context instance of {@link HoodieEngineContext} to use
* @param hoodieTable instance of {@link HoodieTable} of interest
* @param parallelism parallelism to use
* @return {@link HoodiePairData} of {@link HoodieKey} and {@link HoodieRecordLocation}
*/
protected HoodiePairData<HoodieKey, HoodieRecordLocation> fetchRecordLocationsForAffectedPartitions(
HoodieData<HoodieKey> hoodieKeys, HoodieEngineContext context, HoodieTable hoodieTable,
int parallelism) {
List<String> affectedPartitionPathList =
hoodieKeys.map(HoodieKey::getPartitionPath).distinct().collectAsList();
List<Pair<String, HoodieBaseFile>> latestBaseFiles =
getLatestBaseFilesForAllPartitions(affectedPartitionPathList, context, hoodieTable);
return fetchRecordLocations(context, hoodieTable, parallelism, latestBaseFiles);
} | 3.68 |
pulsar_ConcurrentLongLongPairHashMap_remove | /**
* Remove an existing entry if found.
*
* @param key1
* @param key2
* @return the value associated with the key or -1 if key was not present.
*/
public boolean remove(long key1, long key2) {
checkBiggerEqualZero(key1);
long h = hash(key1, key2);
return getSection(h).remove(key1, key2, ValueNotFound, ValueNotFound, (int) h);
} | 3.68 |
hbase_Get_addColumn | /**
* Get the column from the specific family with the specified qualifier.
* <p>
* Overrides previous calls to addFamily for this family.
* @param family family name
* @param qualifier column qualifier
* @return the Get objec
*/
public Get addColumn(byte[] family, byte[] qualifier) {
NavigableSet<byte[]> set = familyMap.get(family);
if (set == null) {
set = new TreeSet<>(Bytes.BYTES_COMPARATOR);
familyMap.put(family, set);
}
if (qualifier == null) {
qualifier = HConstants.EMPTY_BYTE_ARRAY;
}
set.add(qualifier);
return this;
} | 3.68 |
querydsl_GenericExporter_setSerializerClass | /**
* Set the serializer class to be used
*
* @param serializerClass
*/
public void setSerializerClass(Class<? extends Serializer> serializerClass) {
codegenModule.bind(serializerClass);
this.serializerClass = serializerClass;
} | 3.68 |
hadoop_HsController_attempts | /*
* (non-Javadoc)
* @see org.apache.hadoop.mapreduce.v2.app.webapp.AppController#attempts()
*/
@Override
public void attempts() {
super.attempts();
} | 3.68 |
flink_Costs_setHeuristicNetworkCost | /**
* Sets the heuristic network cost for this Costs object.
*
* @param cost The heuristic network cost to set, in bytes to be transferred.
*/
public void setHeuristicNetworkCost(double cost) {
if (cost <= 0) {
throw new IllegalArgumentException("Heuristic costs must be positive.");
}
this.heuristicNetworkCost = cost;
} | 3.68 |
hadoop_MoveStep_getDestinationVolume | /**
* Gets the destination volume.
*
* @return - volume
*/
@Override
public DiskBalancerVolume getDestinationVolume() {
return destinationVolume;
} | 3.68 |
hbase_RegionStates_getRegionStateNodes | /** Returns A view of region state nodes for all the regions. */
public Collection<RegionStateNode> getRegionStateNodes() {
return Collections.unmodifiableCollection(regionsMap.values());
} | 3.68 |
hadoop_BaseService_getPrefixedName | /**
* Returns the full prefixed name of a service property.
*
* @param name of the property.
*
* @return prefixed name of the property.
*/
protected String getPrefixedName(String name) {
return server.getPrefixedName(prefix + "." + name);
} | 3.68 |
hadoop_LocalityMulticastAMRMProxyPolicy_getSubClusterForUnResolvedRequest | /**
* For requests whose location cannot be resolved, choose an active and
* enabled sub-cluster to forward this requestId to.
*/
private SubClusterId getSubClusterForUnResolvedRequest(long allocationId) {
if (unResolvedRequestLocation.containsKey(allocationId)) {
return unResolvedRequestLocation.get(allocationId);
}
int id = rand.nextInt(activeAndEnabledSC.size());
for (SubClusterId subclusterId : activeAndEnabledSC) {
if (id == 0) {
unResolvedRequestLocation.put(allocationId, subclusterId);
return subclusterId;
}
id--;
}
throw new RuntimeException(
"Should not be here. activeAndEnabledSC size = "
+ activeAndEnabledSC.size() + " id = " + id);
} | 3.68 |
morf_AbstractSqlDialectTest_testSpecifiedValueInsert | /**
* Tests an insert statement where the value for each column (except the id) has been explicitly specified,
*/
@Test
public void testSpecifiedValueInsert() {
InsertStatement stmt = new InsertStatement().into(new TableReference(TEST_TABLE)).values(
new FieldLiteral("Escap'd").as(STRING_FIELD),
new FieldLiteral(7).as(INT_FIELD),
new FieldLiteral(11.25).as(FLOAT_FIELD),
new FieldLiteral(20100405).as(DATE_FIELD),
new FieldLiteral(true).as(BOOLEAN_FIELD),
new FieldLiteral('X').as(CHAR_FIELD)
);
List<String> sql = testDialect.convertStatementToSQL(stmt, metadata, SqlDialect.IdTable.withDeterministicName(ID_VALUES_TABLE));
assertSQLEquals("Generated SQL not as expected", expectedSpecifiedValueInsert(), sql);
} | 3.68 |
framework_ApplicationConfiguration_getContextRootUrl | /**
* Gets the URL to the context root of the web application.
*
* @return the URL to the server-side context root as a string
*
* @since 8.0.3
*/
public String getContextRootUrl() {
return contextRootUrl;
} | 3.68 |
hbase_PageFilter_toByteArray | /** Returns The filter serialized using pb */
@Override
public byte[] toByteArray() {
FilterProtos.PageFilter.Builder builder = FilterProtos.PageFilter.newBuilder();
builder.setPageSize(this.pageSize);
return builder.build().toByteArray();
} | 3.68 |
hadoop_SchedulerHealth_getLastAllocationDetails | /**
* Get the details of last allocation.
*
* @return last allocation details
*/
public DetailedInformation getLastAllocationDetails() {
return getDetailedInformation(Operation.ALLOCATION);
} | 3.68 |
hbase_AuthMethod_read | /** Read from in */
public static AuthMethod read(DataInput in) throws IOException {
return valueOf(in.readByte());
} | 3.68 |
framework_ConnectorIdGenerator_generateDefaultConnectorId | /**
* Generates a connector id using the default logic by using
* {@link VaadinSession#getNextConnectorId()}.
*
* @param event
* the event object that has a reference to the connector and the
* session, not <code>null</code>
* @return the connector id to use for the connector, not <code>null</code>
*/
public static String generateDefaultConnectorId(
ConnectorIdGenerationEvent event) {
assert event != null;
return event.getSession().getNextConnectorId();
} | 3.68 |
dubbo_HashedWheelTimer_addTimeout | /**
* Add {@link HashedWheelTimeout} to this bucket.
*/
void addTimeout(HashedWheelTimeout timeout) {
assert timeout.bucket == null;
timeout.bucket = this;
if (head == null) {
head = tail = timeout;
} else {
tail.next = timeout;
timeout.prev = tail;
tail = timeout;
}
} | 3.68 |
hudi_HoodiePipeline_column | /**
* Add a table column definition.
*
* @param column the column format should be in the form like 'f0 int'
*/
public Builder column(String column) {
this.columns.add(column);
return this;
} | 3.68 |
flink_InstantiationUtil_cloneUnchecked | /**
* Unchecked equivalent of {@link #clone(Serializable)}.
*
* @param obj Object to clone
* @param <T> Type of the object to clone
* @return The cloned object
*/
public static <T extends Serializable> T cloneUnchecked(T obj) {
try {
return clone(obj, obj.getClass().getClassLoader());
} catch (IOException | ClassNotFoundException e) {
throw new RuntimeException(
String.format("Unable to clone instance of %s.", obj.getClass().getName()), e);
}
} | 3.68 |
flink_PlannerCallProcedureOperation_procedureResultToTableResult | /** Convert the result of procedure to table result . */
private TableResultInternal procedureResultToTableResult(
Object procedureResult, TableConfig tableConfig, ClassLoader userClassLoader) {
// get result converter
ZoneId zoneId = tableConfig.getLocalTimeZone();
DataType tableResultType = outputType;
// if is not composite type, wrap it to composited type
if (!LogicalTypeChecks.isCompositeType(outputType.getLogicalType())) {
tableResultType = DataTypes.ROW(DataTypes.FIELD("result", tableResultType));
}
RowRowConverter rowConverter = null;
// if the output is struct type,
// we need a row converter to help convert it to Row.
// we will first convert the struct value to RowData, and then use the row converter
// to convert the RowData to Row.
if (outputType.getLogicalType().getTypeRoot() == STRUCTURED_TYPE) {
rowConverter = RowRowConverter.create(tableResultType);
rowConverter.open(userClassLoader);
}
// expand the result type to schema
ResolvedSchema resultSchema = DataTypeUtils.expandCompositeTypeToSchema(tableResultType);
RowDataToStringConverter rowDataToStringConverter =
new RowDataToStringConverterImpl(
tableResultType,
zoneId,
userClassLoader,
tableConfig
.get(ExecutionConfigOptions.TABLE_EXEC_LEGACY_CAST_BEHAVIOUR)
.isEnabled());
// create DataStructure converters
DataStructureConverter<Object, Object> converter =
DataStructureConverters.getConverter(outputType);
converter.open(userClassLoader);
return TableResultImpl.builder()
.resultProvider(
new CallProcedureResultProvider(
converter, rowDataToStringConverter, rowConverter, procedureResult))
.schema(resultSchema)
.resultKind(ResultKind.SUCCESS_WITH_CONTENT)
.build();
} | 3.68 |
framework_TabSheet_setCloseHandler | /**
* Provide a custom {@link CloseHandler} for this TabSheet if you wish to
* perform some additional tasks when a user clicks on a tabs close button,
* e.g. show a confirmation dialogue before removing the tab.
*
* To remove the tab, if you provide your own close handler, you must call
* {@link #removeComponent(Component)} yourself.
*
* The default CloseHandler for TabSheet will only remove the tab.
*
* @param handler
* the close handler that should be used
*/
public void setCloseHandler(CloseHandler handler) {
closeHandler = handler;
} | 3.68 |
morf_AbstractSqlDialectTest_testAddBooleanColumn | /**
* Test adding boolean column.
*/
@Test
public void testAddBooleanColumn() {
testAlterTableColumn(AlterationType.ADD, column("booleanField_new", DataType.BOOLEAN).nullable(), expectedAlterTableAddBooleanColumnStatement());
} | 3.68 |
open-banking-gateway_ValidatedExecution_doMockedExecution | /**
* Mock ASPSP API function call template. Used within validation process to imitate ASPSP responses, so
* that certain internal parameters can be provided to context as the API input.
*/
protected void doMockedExecution(DelegateExecution execution, T context) {
} | 3.68 |
hudi_BaseHoodieWriteClient_bootstrap | /**
* Main API to run bootstrap to hudi.
*/
public void bootstrap(Option<Map<String, String>> extraMetadata) {
// TODO : MULTIWRITER -> check if failed bootstrap files can be cleaned later
if (config.getWriteConcurrencyMode().supportsMultiWriter()) {
throw new HoodieException("Cannot bootstrap the table in multi-writer mode");
}
HoodieTable<T, I, K, O> table = initTable(WriteOperationType.UPSERT, Option.ofNullable(HoodieTimeline.METADATA_BOOTSTRAP_INSTANT_TS));
tableServiceClient.rollbackFailedBootstrap();
table.bootstrap(context, extraMetadata);
} | 3.68 |
hbase_HFileSystem_setStoragePolicy | /**
* Set the source path (directory/file) to the specified storage policy.
* @param path The source path (directory/file).
* @param policyName The name of the storage policy: 'HOT', 'COLD', etc. See see hadoop 2.6+
* org.apache.hadoop.hdfs.protocol.HdfsConstants for possible list e.g 'COLD',
* 'WARM', 'HOT', 'ONE_SSD', 'ALL_SSD', 'LAZY_PERSIST'.
*/
public void setStoragePolicy(Path path, String policyName) {
CommonFSUtils.setStoragePolicy(this.fs, path, policyName);
} | 3.68 |
hadoop_OBSCommonUtils_continueListObjects | /**
* List the next set of objects.
*
* @param owner the owner OBSFileSystem instance
* @param objects paged result
* @return the next result object
* @throws IOException on any failure to list the next set of objects
*/
static ObjectListing continueListObjects(final OBSFileSystem owner,
final ObjectListing objects) throws IOException {
if (objects.getDelimiter() == null && owner.isFsBucket()
&& owner.isObsClientDFSListEnable()) {
return OBSFsDFSListing.fsDFSContinueListObjects(owner,
(OBSFsDFSListing) objects);
}
return commonContinueListObjects(owner, objects);
} | 3.68 |
framework_AbstractSelect_firePropertySetChange | /**
* Fires the property set change event.
*/
protected void firePropertySetChange() {
if (propertySetEventListeners != null
&& !propertySetEventListeners.isEmpty()) {
final Container.PropertySetChangeEvent event = new PropertySetChangeEvent(
this);
for (Object l : propertySetEventListeners.toArray()) {
((Container.PropertySetChangeListener) l)
.containerPropertySetChange(event);
}
}
markAsDirty();
} | 3.68 |
hadoop_RouterSafemodeService_leave | /**
* Leave safe mode.
*/
private void leave() {
// Cache recently updated, leave safemode
long timeInSafemode = monotonicNow() - enterSafeModeTime;
LOG.info("Leaving safe mode after {} milliseconds", timeInSafemode);
RouterMetrics routerMetrics = router.getRouterMetrics();
if (routerMetrics == null) {
LOG.error("The Router metrics are not enabled");
} else {
routerMetrics.setSafeModeTime(timeInSafemode);
}
safeMode = false;
router.updateRouterState(RouterServiceState.RUNNING);
} | 3.68 |
flink_FlinkContainersSettings_basedOn | /**
* Merges the provided {@code config} with the default config, potentially overwriting the
* defaults in case of collisions. Returns a reference to this Builder enabling method
* chaining.
*
* @param <T> the type parameter
* @param config The {@code config} to add.
* @return A reference to this Builder.
*/
public <T> Builder basedOn(Configuration config) {
this.flinkConfiguration.addAll(config);
return this;
} | 3.68 |
hadoop_RouterWebServices_finalize | /**
* Shutdown the chain of interceptors when the object is destroyed.
*/
@Override
protected void finalize() {
rootInterceptor.shutdown();
}
} | 3.68 |
flink_FlinkRelUtil_isMergeable | /**
* Return two neighbouring {@link Calc} can merge into one {@link Calc} or not. If the two
* {@link Calc} can merge into one, each non-deterministic {@link RexNode} of bottom {@link
* Calc} should appear at most once in the project list of top {@link Calc}.
*/
public static boolean isMergeable(Calc topCalc, Calc bottomCalc) {
final RexProgram topProgram = topCalc.getProgram();
final RexProgram bottomProgram = bottomCalc.getProgram();
final int[] topInputRefCounter =
initializeArray(topCalc.getInput().getRowType().getFieldCount(), 0);
List<RexNode> topInputRefs =
topProgram.getProjectList().stream()
.map(topProgram::expandLocalRef)
.collect(Collectors.toList());
List<RexNode> bottomProjects =
bottomProgram.getProjectList().stream()
.map(bottomProgram::expandLocalRef)
.collect(Collectors.toList());
if (null != topProgram.getCondition()) {
topInputRefs.add(topProgram.expandLocalRef(topProgram.getCondition()));
}
return mergeable(topInputRefCounter, topInputRefs, bottomProjects);
} | 3.68 |
morf_OracleDialect_truncateTableStatements | /**
* @see org.alfasoftware.morf.jdbc.SqlDialect#truncateTableStatements(org.alfasoftware.morf.metadata.Table)
*/
@Override
public Collection<String> truncateTableStatements(Table table) {
String mainTruncate = "TRUNCATE TABLE " + schemaNamePrefix() + table.getName();
if (table.isTemporary()) {
return Arrays.asList(mainTruncate);
} else {
return Arrays.asList(mainTruncate + " REUSE STORAGE");
}
} | 3.68 |
flink_EmbeddedRocksDBStateBackend_getPriorityQueueStateType | /**
* Gets the type of the priority queue state. It will fallback to the default value, if it is
* not explicitly set.
*
* @return The type of the priority queue state.
*/
public PriorityQueueStateType getPriorityQueueStateType() {
return priorityQueueConfig.getPriorityQueueStateType();
} | 3.68 |
hadoop_IncrementalBlockReportManager_remove | /**
* Remove the given block from this IBR
* @return true if the block was removed; otherwise, return false.
*/
ReceivedDeletedBlockInfo remove(Block block) {
return blocks.remove(block);
} | 3.68 |
framework_TestBenchElementRightClick_initProperties | // set up the properties (columns)
private void initProperties(Table table) {
for (int i = 0; i < COLUMNS; i++) {
table.addContainerProperty("property" + i, String.class,
"some value");
}
} | 3.68 |
graphhopper_AStarBidirectionCH_setApproximation | /**
* @param approx if true it enables approximate distance calculation from lat,lon values
*/
public AStarBidirectionCH setApproximation(WeightApproximator approx) {
weightApprox = new BalancedWeightApproximator(approx);
return this;
} | 3.68 |
hbase_BucketEntry_isRpcRef | /**
* Check whether have some RPC patch referring this block.<br/>
* For {@link IOEngine#usesSharedMemory()} is true(eg.{@link ByteBufferIOEngine}), there're two
* case: <br>
* 1. If current refCnt is greater than 1, there must be at least one referring RPC path; <br>
* 2. If current refCnt is equal to 1 and the markedAtEvicted is true, the it means backingMap has
* released its reference, the remaining reference can only be from RPC path. <br>
* We use this check to decide whether we can free the block area: when cached size exceed the
* acceptable size, our eviction policy will choose those stale blocks without any RPC reference
* and the RPC referred block will be excluded. <br/>
* <br/>
* For {@link IOEngine#usesSharedMemory()} is false(eg.{@link FileIOEngine}),
* {@link BucketEntry#refCnt} is always 1 until it is evicted from {@link BucketCache#backingMap},
* so {@link BucketEntry#isRpcRef()} is always return false.
* @return true to indicate there're some RPC referring the block.
*/
boolean isRpcRef() {
boolean evicted = markedAsEvicted.get();
return this.refCnt() > 1 || (evicted && refCnt() == 1);
} | 3.68 |
hmily_HmilyTransactionRecoveryService_cancel | /**
* Cancel.
*
* @param hmilyParticipant the hmily participant
* @return the boolean
*/
public boolean cancel(final HmilyParticipant hmilyParticipant) {
try {
HmilyReflector.executor(HmilyActionEnum.CANCELING, ExecutorTypeEnum.LOCAL, hmilyParticipant);
removeHmilyParticipant(hmilyParticipant.getParticipantId());
return true;
} catch (Exception e) {
LOGGER.error("hmily Recovery executor cancel exception param {}", hmilyParticipant.toString(), e);
return false;
}
} | 3.68 |
hadoop_ClientThrottlingAnalyzer_addBytesTransferred | /**
* Updates metrics with results from the current storage operation.
*
* @param count The count of bytes transferred.
*
* @param isFailedOperation True if the operation failed; otherwise false.
*/
public void addBytesTransferred(long count, boolean isFailedOperation) {
BlobOperationMetrics metrics = blobMetrics.get();
if (isFailedOperation) {
metrics.bytesFailed.addAndGet(count);
metrics.operationsFailed.incrementAndGet();
} else {
metrics.bytesSuccessful.addAndGet(count);
metrics.operationsSuccessful.incrementAndGet();
}
} | 3.68 |
zxing_ResultPoint_distance | /**
* @param pattern1 first pattern
* @param pattern2 second pattern
* @return distance between two points
*/
public static float distance(ResultPoint pattern1, ResultPoint pattern2) {
return MathUtils.distance(pattern1.x, pattern1.y, pattern2.x, pattern2.y);
} | 3.68 |
framework_AbstractSplitPanel_getSplitPosition | /**
* Returns the new split position that triggered this change event.
*
* @return the new value of split position
*/
public float getSplitPosition() {
return position;
} | 3.68 |
hadoop_KerberosAuthException_getPrincipal | /** @return The principal, or null if not set. */
public String getPrincipal() {
return principal;
} | 3.68 |
hbase_FavoredNodesManager_getFavoredNodesWithDNPort | /**
* This should only be used when sending FN information to the region servers. Instead of sending
* the region server port, we use the datanode port. This helps in centralizing the DN port logic
* in Master. The RS uses the port from the favored node list as hints.
*/
public synchronized List<ServerName> getFavoredNodesWithDNPort(RegionInfo regionInfo) {
if (getFavoredNodes(regionInfo) == null) {
return null;
}
List<ServerName> fnWithDNPort = Lists.newArrayList();
for (ServerName sn : getFavoredNodes(regionInfo)) {
fnWithDNPort
.add(ServerName.valueOf(sn.getHostname(), datanodeDataTransferPort, NON_STARTCODE));
}
return fnWithDNPort;
} | 3.68 |
hudi_GenericRecordFullPayloadGenerator_isPartialLongField | /**
* Return true if this is a partition field of type long which should be set to the partition index.
*/
private boolean isPartialLongField(Schema.Field field, Set<String> partitionPathFieldNames) {
if ((partitionPathFieldNames == null) || !partitionPathFieldNames.contains(field.name())) {
return false;
}
Schema fieldSchema = field.schema();
if (isOption(fieldSchema)) {
fieldSchema = getNonNull(fieldSchema);
}
return fieldSchema.getType() == org.apache.avro.Schema.Type.LONG;
} | 3.68 |
MagicPlugin_MageSpell_save | /**
* This method is no longer used, and was never called correctly.
* Spells should use getVariables instead if they need to store custom data.
*/
@Deprecated
default void save(SpellData spellData) { } | 3.68 |
framework_GridSingleSelect_setSelectedItem | /**
* Sets the current selection to the given item, or clears selection if
* given {@code null}.
*
* @param item
* the item to select or {@code null} to clear selection
*/
public void setSelectedItem(T item) {
model.setSelectedItem(item);
} | 3.68 |
hadoop_IOStatisticsStoreImpl_getGaugeReference | /**
* Get a reference to the atomic instance providing the
* value for a specific gauge. This is useful if
* the value is passed around.
* @param key statistic name
* @return the reference
* @throws NullPointerException if there is no entry of that name
*/
@Override
public AtomicLong getGaugeReference(String key) {
return lookup(gaugeMap, key);
} | 3.68 |
flink_Tuple17_of | /**
* Creates a new tuple and assigns the given values to the tuple's fields. This is more
* convenient than using the constructor, because the compiler can infer the generic type
* arguments implicitly. For example: {@code Tuple3.of(n, x, s)} instead of {@code new
* Tuple3<Integer, Double, String>(n, x, s)}
*/
public static <T0, T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14, T15, T16>
Tuple17<T0, T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14, T15, T16> of(
T0 f0,
T1 f1,
T2 f2,
T3 f3,
T4 f4,
T5 f5,
T6 f6,
T7 f7,
T8 f8,
T9 f9,
T10 f10,
T11 f11,
T12 f12,
T13 f13,
T14 f14,
T15 f15,
T16 f16) {
return new Tuple17<>(
f0, f1, f2, f3, f4, f5, f6, f7, f8, f9, f10, f11, f12, f13, f14, f15, f16);
} | 3.68 |
hbase_AbstractFSWALProvider_doInit | /**
* @param factory factory that made us, identity used for FS layout. may not be null
* @param conf may not be null
* @param providerId differentiate between providers from one factory, used for FS layout. may be
* null
*/
@Override
protected void doInit(WALFactory factory, Configuration conf, String providerId)
throws IOException {
this.providerId = providerId;
// get log prefix
StringBuilder sb = new StringBuilder().append(factory.factoryId);
if (providerId != null) {
if (providerId.startsWith(WAL_FILE_NAME_DELIMITER)) {
sb.append(providerId);
} else {
sb.append(WAL_FILE_NAME_DELIMITER).append(providerId);
}
}
logPrefix = sb.toString();
doInit(conf);
} | 3.68 |
flink_RocksDBStateBackend_getDbStoragePaths | /**
* Gets the configured local DB storage paths, or null, if none were configured.
*
* <p>Under these directories on the TaskManager, RocksDB stores its SST files and metadata
* files. These directories do not need to be persistent, they can be ephermeral, meaning that
* they are lost on a machine failure, because state in RocksDB is persisted in checkpoints.
*
* <p>If nothing is configured, these directories default to the TaskManager's local temporary
* file directories.
*/
public String[] getDbStoragePaths() {
return rocksDBStateBackend.getDbStoragePaths();
} | 3.68 |
hbase_ProcedureStoreBase_setRunning | /**
* Change the state to 'isRunning', returns true if the store state was changed, false if the
* store was already in that state.
* @param isRunning the state to set.
* @return true if the store state was changed, otherwise false.
*/
protected boolean setRunning(boolean isRunning) {
return running.getAndSet(isRunning) != isRunning;
} | 3.68 |
flink_JobManagerCheckpointStorage_configure | /**
* Creates a copy of this checkpoint storage that uses the values defined in the configuration
* for fields where that were not specified in this checkpoint storage.
*
* @param config The configuration
* @return The re-configured variant of the checkpoint storage
*/
@Override
public JobManagerCheckpointStorage configure(ReadableConfig config, ClassLoader classLoader) {
return new JobManagerCheckpointStorage(this, config);
} | 3.68 |
framework_VAbstractCalendarPanel_getResolution | /**
* Returns the current date resolution.
*
* @return the resolution
*/
public R getResolution() {
return resolution;
} | 3.68 |
pulsar_ResourceGroupService_getRgNamespaceUnRegistersCount | // Visibility for testing.
protected static double getRgNamespaceUnRegistersCount (String rgName) {
return rgNamespaceUnRegisters.labels(rgName).get();
} | 3.68 |
hadoop_ServiceLauncher_error | /**
* Report an error.
* <p>
* This tries to log to {@code LOG.error()}.
* <p>
* If that log level is disabled disabled the message
* is logged to system error along with {@code thrown.toString()}
* @param message message for the user
* @param thrown the exception thrown
*/
protected void error(String message, Throwable thrown) {
String text = "Exception: " + message;
if (LOG.isErrorEnabled()) {
LOG.error(text, thrown);
} else {
System.err.println(text);
if (thrown != null) {
System.err.println(thrown.toString());
}
}
} | 3.68 |
framework_VLayoutSlot_isUndefinedInDirection | /**
* Returns whether the height or the width of the widget has been set as
* undefined depending on the indicated direction.
*
* @param isVertical
* {@code true} if the requested dimension check is about height,
* {@code false} if about width
* @return {@code true} if the widget height or the widget width is
* undefined depending on the indicated direction, {@code false}
* otherwise
*/
public boolean isUndefinedInDirection(boolean isVertical) {
return isVertical ? isUndefinedHeight() : isUndefinedWidth();
} | 3.68 |
AreaShop_UnrentedRegionEvent_getRefundedMoney | /**
* Get the amount that is paid back to the player.
* @return The amount of money paid back to the player
*/
public double getRefundedMoney() {
return refundedMoney;
} | 3.68 |
hbase_SegmentScanner_next | /**
* Return the next Cell in this scanner, iterating the scanner
* @return the next Cell or null if end of scanner
*/
@Override
public Cell next() throws IOException {
if (closed) {
return null;
}
Cell oldCurrent = current;
updateCurrent(); // update the currently observed Cell
return oldCurrent;
} | 3.68 |
morf_SqlDialect_getSqlForRound | /**
* Converts the ROUND function into SQL.
*
* @param function the function to convert.
* @return a string representation of the SQL.
* @see org.alfasoftware.morf.sql.element.Function#round(AliasedField,
* AliasedField)
*/
protected String getSqlForRound(Function function) {
return "ROUND(" + getSqlFrom(function.getArguments().get(0)) + ", " + getSqlFrom(function.getArguments().get(1)) + ")";
} | 3.68 |
framework_VFormLayout_getStylesFromState | /**
* Parses the stylenames from shared state
*
* @param state
* shared state of the component
* @param enabled
* @return An array of stylenames
*/
private String[] getStylesFromState(AbstractComponentState state,
boolean enabled) {
List<String> styles = new ArrayList<>();
if (ComponentStateUtil.hasStyles(state)) {
for (String name : state.styles) {
styles.add(name);
}
}
if (!enabled) {
styles.add(StyleConstants.DISABLED);
}
return styles.toArray(new String[styles.size()]);
} | 3.68 |
hadoop_AMWebServices_getTaskFromTaskIdString | /**
* convert a task id string to an actual task and handle all the error
* checking.
*/
public static Task getTaskFromTaskIdString(String tid, Job job) throws NotFoundException {
TaskId taskID;
Task task;
try {
taskID = MRApps.toTaskID(tid);
} catch (YarnRuntimeException e) {
// TODO: after MAPREDUCE-2793 YarnRuntimeException is probably not expected here
// anymore but keeping it for now just in case other stuff starts failing.
// Also, the webservice should ideally return BadRequest (HTTP:400) when
// the id is malformed instead of NotFound (HTTP:404). The webserver on
// top of which AMWebServices is built seems to automatically do that for
// unhandled exceptions
throw new NotFoundException(e.getMessage());
} catch (NumberFormatException ne) {
throw new NotFoundException(ne.getMessage());
} catch (IllegalArgumentException e) {
throw new NotFoundException(e.getMessage());
}
if (taskID == null) {
throw new NotFoundException("taskid " + tid + " not found or invalid");
}
task = job.getTask(taskID);
if (task == null) {
throw new NotFoundException("task not found with id " + tid);
}
return task;
} | 3.68 |
hibernate-validator_GroupSequenceProviderCheck_retrieveGenericProviderType | /**
* Retrieves the default group sequence provider generic type defined by the given {@code TypeMirror}.
*
* @param typeMirror The {@code TypeMirror} instance.
*
* @return The generic type or {@code null} if the given type doesn't implement the {@link org.hibernate.validator.spi.group.DefaultGroupSequenceProvider} interface.
*/
private TypeMirror retrieveGenericProviderType(TypeMirror typeMirror) {
return typeMirror.accept(
new SimpleTypeVisitor8<TypeMirror, Void>() {
@Override
public TypeMirror visitDeclared(DeclaredType declaredType, Void aVoid) {
TypeMirror eraseType = typeUtils.erasure( declaredType );
if ( typeUtils.isSameType( eraseType, defaultGroupSequenceProviderType ) ) {
List<? extends TypeMirror> typeArguments = declaredType.getTypeArguments();
if ( !typeArguments.isEmpty() ) {
return typeArguments.get( 0 );
}
return null;
}
List<? extends TypeMirror> superTypes = typeUtils.directSupertypes( declaredType );
for ( TypeMirror superType : superTypes ) {
TypeMirror genericProviderType = superType.accept( this, aVoid );
if ( genericProviderType != null ) {
return genericProviderType;
}
}
return null;
}
}, null
);
} | 3.68 |
hbase_RateLimiter_waitInterval | /**
* Returns estimate of the ms required to wait before being able to provide "amount" resources.
*/
public synchronized long waitInterval(final long amount) {
// TODO Handle over quota?
return (amount <= avail) ? 0 : getWaitInterval(getLimit(), avail, amount);
} | 3.68 |
hadoop_TFile_prepareAppendValue | /**
* Obtain an output stream for writing a value into TFile. This may only be
* called right after a key appending operation (the key append stream must
* be closed).
*
* @param length
* The expected length of the value. If length of the value is not
* known, set length = -1. Otherwise, the application must write
* exactly as many bytes as specified here before calling close on
* the returned output stream. Advertising the value size up-front
* guarantees that the value is encoded in one chunk, and avoids
* intermediate chunk buffering.
* @throws IOException raised on errors performing I/O.
* @return DataOutputStream.
*/
public DataOutputStream prepareAppendValue(int length) throws IOException {
if (state != State.END_KEY) {
throw new IllegalStateException(
"Incorrect state to start a new value: " + state.name());
}
DataOutputStream ret;
// unknown length
if (length < 0) {
if (valueBuffer == null) {
valueBuffer = new byte[getChunkBufferSize(conf)];
}
ret = new ValueRegister(new ChunkEncoder(blkAppender, valueBuffer));
} else {
ret =
new ValueRegister(new Chunk.SingleChunkEncoder(blkAppender, length));
}
state = State.IN_VALUE;
return ret;
} | 3.68 |
flink_FileSystemTableFactory_validateTimeZone | /** Similar logic as for {@link TableConfig}. */
private void validateTimeZone(String zone) {
boolean isValid;
try {
// We enforce a zone string that is compatible with both java.util.TimeZone and
// java.time.ZoneId to avoid bugs.
// In general, advertising either TZDB ID, GMT+xx:xx, or UTC is the best we can do.
isValid = java.util.TimeZone.getTimeZone(zone).toZoneId().equals(ZoneId.of(zone));
} catch (Exception e) {
isValid = false;
}
if (!isValid) {
throw new ValidationException(
String.format(
"Invalid time zone for '%s'. The value should be a Time Zone Database (TZDB) ID "
+ "such as 'America/Los_Angeles' to include daylight saving time. Fixed "
+ "offsets are supported using 'GMT-03:00' or 'GMT+03:00'. Or use 'UTC' "
+ "without time zone and daylight saving time.",
FileSystemConnectorOptions.SINK_PARTITION_COMMIT_WATERMARK_TIME_ZONE
.key()));
}
} | 3.68 |
flink_Explainable_printExplain | /** Like {@link #explain(ExplainDetail...)}, but piping the result to {@link System#out}. */
@SuppressWarnings("unchecked")
default SELF printExplain(ExplainDetail... extraDetails) {
System.out.println(explain(extraDetails));
return (SELF) this;
} | 3.68 |
flink_FileSystem_exists | /**
* Check if exists.
*
* @param f source file
*/
public boolean exists(final Path f) throws IOException {
try {
return (getFileStatus(f) != null);
} catch (FileNotFoundException e) {
return false;
}
} | 3.68 |
framework_JsonEncoder_getTransportType | /**
* Returns the transport type for the given value. Only returns a transport
* type for internally handled values.
*
* @param value
* The value that should be transported
* @return One of the JsonEncode.VTYPE_ constants or null if the value
* cannot be transported using an internally handled type.
*/
private static String getTransportType(Object value) {
if (value == null) {
return JsonConstants.VTYPE_NULL;
} else if (value instanceof String) {
return JsonConstants.VTYPE_STRING;
} else if (value instanceof Connector) {
return JsonConstants.VTYPE_CONNECTOR;
} else if (value instanceof Boolean) {
return JsonConstants.VTYPE_BOOLEAN;
} else if (value instanceof Integer) {
return JsonConstants.VTYPE_INTEGER;
} else if (value instanceof Float) {
return JsonConstants.VTYPE_FLOAT;
} else if (value instanceof Double) {
return JsonConstants.VTYPE_DOUBLE;
} else if (value instanceof Long) {
return JsonConstants.VTYPE_LONG;
} else if (value instanceof List) {
return JsonConstants.VTYPE_LIST;
} else if (value instanceof Set) {
return JsonConstants.VTYPE_SET;
} else if (value instanceof String[]) {
return JsonConstants.VTYPE_STRINGARRAY;
} else if (value instanceof Object[]) {
return JsonConstants.VTYPE_ARRAY;
} else if (value instanceof Map) {
return JsonConstants.VTYPE_MAP;
} else if (value instanceof Enum<?>) {
// Enum value is processed as a string
return JsonConstants.VTYPE_STRING;
}
return null;
} | 3.68 |
hbase_HBaseConfiguration_createClusterConf | /**
* Generates a {@link Configuration} instance by applying property overrides prefixed by a cluster
* profile key to the base Configuration. Override properties are extracted by the
* {@link #subset(Configuration, String)} method, then the merged on top of the base Configuration
* and returned.
* @param baseConf the base configuration to use, containing prefixed override properties
* @param clusterKey the ZooKeeper quorum cluster key to apply, or {@code null} if none
* @param overridePrefix the property key prefix to match for override properties, or {@code null}
* if none
* @return the merged configuration with override properties and cluster key applied
*/
public static Configuration createClusterConf(Configuration baseConf, String clusterKey,
String overridePrefix) throws IOException {
Configuration clusterConf = HBaseConfiguration.create(baseConf);
if (clusterKey != null && !clusterKey.isEmpty()) {
applyClusterKeyToConf(clusterConf, clusterKey);
}
if (overridePrefix != null && !overridePrefix.isEmpty()) {
Configuration clusterSubset = HBaseConfiguration.subset(clusterConf, overridePrefix);
HBaseConfiguration.merge(clusterConf, clusterSubset);
}
return clusterConf;
} | 3.68 |
AreaShop_Value_set | /**
* Set the content.
* @param value The new content
*/
public void set(T value) {
this.content = value;
} | 3.68 |
hibernate-validator_ValidatorImpl_validateInContext | /**
* Validates the given object using the available context information.
*
* @param validationContext the global validation context
* @param valueContext the current validation context
* @param validationOrder Contains the information which and in which order groups have to be executed
* @param <T> The root bean type
*
* @return Set of constraint violations or the empty set if there were no violations.
*/
private <T, U> Set<ConstraintViolation<T>> validateInContext(BaseBeanValidationContext<T> validationContext, BeanValueContext<U, Object> valueContext,
ValidationOrder validationOrder) {
if ( valueContext.getCurrentBean() == null ) {
return Collections.emptySet();
}
BeanMetaData<U> beanMetaData = valueContext.getCurrentBeanMetaData();
if ( beanMetaData.isDefaultGroupSequenceRedefined() ) {
validationOrder.assertDefaultGroupSequenceIsExpandable( beanMetaData.getDefaultGroupSequence( valueContext.getCurrentBean() ) );
}
// process first single groups. For these we can optimise object traversal by first running all validations on the current bean
// before traversing the object.
Iterator<Group> groupIterator = validationOrder.getGroupIterator();
while ( groupIterator.hasNext() ) {
Group group = groupIterator.next();
valueContext.setCurrentGroup( group.getDefiningClass() );
validateConstraintsForCurrentGroup( validationContext, valueContext );
if ( shouldFailFast( validationContext ) ) {
return validationContext.getFailingConstraints();
}
}
groupIterator = validationOrder.getGroupIterator();
while ( groupIterator.hasNext() ) {
Group group = groupIterator.next();
valueContext.setCurrentGroup( group.getDefiningClass() );
validateCascadedConstraints( validationContext, valueContext );
if ( shouldFailFast( validationContext ) ) {
return validationContext.getFailingConstraints();
}
}
// now we process sequences. For sequences I have to traverse the object graph since I have to stop processing when an error occurs.
Iterator<Sequence> sequenceIterator = validationOrder.getSequenceIterator();
while ( sequenceIterator.hasNext() ) {
Sequence sequence = sequenceIterator.next();
for ( GroupWithInheritance groupOfGroups : sequence ) {
int numberOfViolations = validationContext.getFailingConstraints().size();
for ( Group group : groupOfGroups ) {
valueContext.setCurrentGroup( group.getDefiningClass() );
validateConstraintsForCurrentGroup( validationContext, valueContext );
if ( shouldFailFast( validationContext ) ) {
return validationContext.getFailingConstraints();
}
validateCascadedConstraints( validationContext, valueContext );
if ( shouldFailFast( validationContext ) ) {
return validationContext.getFailingConstraints();
}
}
if ( validationContext.getFailingConstraints().size() > numberOfViolations ) {
break;
}
}
}
return validationContext.getFailingConstraints();
} | 3.68 |
hadoop_ActiveAuditManagerS3A_requestCreated | /**
* Forward to the wrapped span.
* {@inheritDoc}
*/
@Override
public void requestCreated(final SdkRequest.Builder builder) {
span.requestCreated(builder);
} | 3.68 |
flink_HiveParserBaseSemanticAnalyzer_unparseExprForValuesClause | // Take an expression in the values clause and turn it back into a string. This is far from
// comprehensive. At the moment it only supports:
// * literals (all types)
// * unary negatives
// * true/false
static String unparseExprForValuesClause(HiveParserASTNode expr) throws SemanticException {
switch (expr.getToken().getType()) {
case HiveASTParser.Number:
return expr.getText();
case HiveASTParser.StringLiteral:
return unescapeSQLString(expr.getText());
case HiveASTParser.KW_FALSE:
// UDFToBoolean casts any non-empty string to true, so set this to false
return "";
case HiveASTParser.KW_TRUE:
return "TRUE";
case HiveASTParser.MINUS:
return "-"
+ unparseExprForValuesClause((HiveParserASTNode) expr.getChildren().get(0));
case HiveASTParser.TOK_NULL:
return null;
default:
throw new SemanticException(
"Expression of type " + expr.getText() + " not supported in insert/values");
}
} | 3.68 |
pulsar_SecurityUtility_getProvider | /**
* Get Bouncy Castle provider, and call Security.addProvider(provider) if success.
* 1. try get from classpath.
* 2. try get from Nar.
*/
public static Provider getProvider() {
boolean isProviderInstalled =
Security.getProvider(BC) != null || Security.getProvider(BC_FIPS) != null;
if (isProviderInstalled) {
Provider provider = Security.getProvider(BC) != null
? Security.getProvider(BC)
: Security.getProvider(BC_FIPS);
if (log.isDebugEnabled()) {
log.debug("Already instantiated Bouncy Castle provider {}", provider.getName());
}
return provider;
}
// Not installed, try load from class path
try {
return getBCProviderFromClassPath();
} catch (Exception e) {
log.warn("Not able to get Bouncy Castle provider for both FIPS and Non-FIPS from class path:", e);
throw new RuntimeException(e);
}
} | 3.68 |
hbase_CatalogJanitor_main | /**
* For testing against a cluster. Doesn't have a MasterServices context so does not report on good
* vs bad servers.
*/
public static void main(String[] args) throws IOException {
checkLog4jProperties();
ReportMakingVisitor visitor = new ReportMakingVisitor(null);
Configuration configuration = HBaseConfiguration.create();
configuration.setBoolean("hbase.defaults.for.version.skip", true);
try (Connection connection = ConnectionFactory.createConnection(configuration)) {
/*
* Used to generate an overlap.
*/
Get g = new Get(Bytes.toBytes("t2,40,1564119846424.1db8c57d64e0733e0f027aaeae7a0bf0."));
g.addColumn(HConstants.CATALOG_FAMILY, HConstants.REGIONINFO_QUALIFIER);
try (Table t = connection.getTable(TableName.META_TABLE_NAME)) {
Result r = t.get(g);
byte[] row = g.getRow();
row[row.length - 2] <<= row[row.length - 2];
Put p = new Put(g.getRow());
p.addColumn(HConstants.CATALOG_FAMILY, HConstants.REGIONINFO_QUALIFIER,
r.getValue(HConstants.CATALOG_FAMILY, HConstants.REGIONINFO_QUALIFIER));
t.put(p);
}
MetaTableAccessor.scanMetaForTableRegions(connection, visitor, null);
CatalogJanitorReport report = visitor.getReport();
LOG.info(report != null ? report.toString() : "empty");
}
} | 3.68 |
hadoop_HttpExceptionUtils_validateResponse | /**
* Validates the status of an <code>HttpURLConnection</code> against an
* expected HTTP status code. If the current status code is not the expected
* one it throws an exception with a detail message using Server side error
* messages if available.
* <p>
* <b>NOTE:</b> this method will throw the deserialized exception even if not
* declared in the <code>throws</code> of the method signature.
*
* @param conn the <code>HttpURLConnection</code>.
* @param expectedStatus the expected HTTP status code.
* @throws IOException thrown if the current status code does not match the
* expected one.
*/
@SuppressWarnings("unchecked")
public static void validateResponse(HttpURLConnection conn,
int expectedStatus) throws IOException {
if (conn.getResponseCode() != expectedStatus) {
Exception toThrow;
InputStream es = null;
try {
es = conn.getErrorStream();
Map json = JsonSerialization.mapReader().readValue(es);
json = (Map) json.get(ERROR_JSON);
String exClass = (String) json.get(ERROR_CLASSNAME_JSON);
String exMsg = (String) json.get(ERROR_MESSAGE_JSON);
if (exClass != null) {
try {
ClassLoader cl = HttpExceptionUtils.class.getClassLoader();
Class klass = cl.loadClass(exClass);
Constructor constr = klass.getConstructor(String.class);
toThrow = (Exception) constr.newInstance(exMsg);
} catch (Exception ex) {
toThrow = new IOException(String.format(
"HTTP status [%d], exception [%s], message [%s], URL [%s]",
conn.getResponseCode(), exClass, exMsg, conn.getURL()));
}
} else {
String msg = (exMsg != null) ? exMsg : conn.getResponseMessage();
toThrow = new IOException(String.format(
"HTTP status [%d], message [%s], URL [%s]",
conn.getResponseCode(), msg, conn.getURL()));
}
} catch (Exception ex) {
toThrow = new IOException(String.format(
"HTTP status [%d], message [%s], URL [%s], exception [%s]",
conn.getResponseCode(), conn.getResponseMessage(), conn.getURL(),
ex.toString()), ex);
} finally {
if (es != null) {
try {
es.close();
} catch (IOException ex) {
//ignore
}
}
}
throwEx(toThrow);
}
} | 3.68 |
hadoop_ErasureCoderOptions_getNumAllUnits | /**
* The number of all the involved units in the coding.
* @return count of all the data units and parity units
*/
public int getNumAllUnits() {
return numAllUnits;
} | 3.68 |
hibernate-validator_ConstraintAnnotationVisitor_visitVariableAsField | /**
* <p>
* Checks whether the given annotations are correctly specified at the given
* field. The following checks are performed:
* </p>
* <ul>
* <li>
* Constraint annotations may only be given at non-static fields which's
* type is supported by the constraints.</li>
* <li>
* The {@code @Valid} annotation may only be given at non-static,
* non-primitive fields.</li>
* </ul>
*/
@Override
public Void visitVariableAsField(VariableElement annotatedField, List<AnnotationMirror> mirrors) {
checkConstraints( annotatedField, mirrors );
return null;
} | 3.68 |
flink_TypeExtractor_materializeTypeVariable | /**
* Tries to find a concrete value (Class, ParameterizedType etc. ) for a TypeVariable by
* traversing the type hierarchy downwards. If a value could not be found it will return the
* most bottom type variable in the hierarchy.
*/
private static Type materializeTypeVariable(List<Type> typeHierarchy, TypeVariable<?> typeVar) {
TypeVariable<?> inTypeTypeVar = typeVar;
// iterate thru hierarchy from top to bottom until type variable gets a class assigned
for (int i = typeHierarchy.size() - 1; i >= 0; i--) {
Type curT = typeHierarchy.get(i);
// parameterized type
if (curT instanceof ParameterizedType) {
Class<?> rawType = ((Class<?>) ((ParameterizedType) curT).getRawType());
for (int paramIndex = 0;
paramIndex < rawType.getTypeParameters().length;
paramIndex++) {
TypeVariable<?> curVarOfCurT = rawType.getTypeParameters()[paramIndex];
// check if variable names match
if (sameTypeVars(curVarOfCurT, inTypeTypeVar)) {
Type curVarType =
((ParameterizedType) curT).getActualTypeArguments()[paramIndex];
// another type variable level
if (curVarType instanceof TypeVariable<?>) {
inTypeTypeVar = (TypeVariable<?>) curVarType;
}
// class
else {
return curVarType;
}
}
}
}
}
// can not be materialized, most likely due to type erasure
// return the type variable of the deepest level
return inTypeTypeVar;
} | 3.68 |
hbase_LocalHBaseCluster_waitOnMaster | /**
* Wait for the specified master to stop. Removes this thread from list of running threads.
* @return Name of master that just went down.
*/
public String waitOnMaster(JVMClusterUtil.MasterThread masterThread) {
boolean interrupted = false;
while (masterThread.isAlive()) {
try {
LOG.info("Waiting on " + masterThread.getMaster().getServerName().toString());
masterThread.join();
} catch (InterruptedException e) {
LOG.error("Interrupted while waiting for {} to finish. Retrying join",
masterThread.getName(), e);
interrupted = true;
}
}
masterThreads.remove(masterThread);
if (interrupted) {
Thread.currentThread().interrupt();
}
return masterThread.getName();
} | 3.68 |
hudi_HoodieWriteHandle_createMarkerFile | /**
* Creates an empty marker file corresponding to storage writer path.
*
* @param partitionPath Partition path
*/
protected void createMarkerFile(String partitionPath, String dataFileName) {
WriteMarkersFactory.get(config.getMarkersType(), hoodieTable, instantTime)
.create(partitionPath, dataFileName, getIOType(), config, fileId, hoodieTable.getMetaClient().getActiveTimeline());
} | 3.68 |
flink_MapView_put | /**
* Inserts a value for the given key into the map view. If the map view already contains a value
* for the key, the existing value is overwritten.
*
* @param key The key for which the value is inserted.
* @param value The value that is inserted for the key.
* @throws Exception Thrown if the system cannot put data.
*/
public void put(K key, V value) throws Exception {
map.put(key, value);
} | 3.68 |
hbase_MobFileCache_getEvictedFileCount | /**
* Gets the number of items evicted from the mob file cache.
* @return The number of items evicted from the mob file cache.
*/
public long getEvictedFileCount() {
return evictedFileCount.sum();
} | 3.68 |
morf_SchemaChangeSequence_getUpgradeSteps | /**
* @return the upgradeSteps
*/
public List<UpgradeStep> getUpgradeSteps() {
return ImmutableList.copyOf(upgradeSteps);
} | 3.68 |
hudi_PartitionFilterGenerator_compare | /**
* As HMS only accept DATE, INT, STRING, BIGINT to push down partition filters, here we only
* do the comparison for these types.
*/
@Override
public int compare(String s1, String s2) {
switch (valueType.toLowerCase(Locale.ROOT)) {
case HiveSchemaUtil.INT_TYPE_NAME:
int i1 = Integer.parseInt(s1);
int i2 = Integer.parseInt(s2);
return i1 - i2;
case HiveSchemaUtil.BIGINT_TYPE_NAME:
long l1 = Long.parseLong(s1);
long l2 = Long.parseLong(s2);
return Long.signum(l1 - l2);
case HiveSchemaUtil.DATE_TYPE_NAME:
case HiveSchemaUtil.STRING_TYPE_NAME:
return s1.compareTo(s2);
default:
throw new IllegalArgumentException(String.format(UNSUPPORTED_TYPE_ERROR, valueType));
}
} | 3.68 |
zxing_MultiFormatReader_decode | /**
* Decode an image using the hints provided. Does not honor existing state.
*
* @param image The pixel data to decode
* @param hints The hints to use, clearing the previous state.
* @return The contents of the image
* @throws NotFoundException Any errors which occurred
*/
@Override
public Result decode(BinaryBitmap image, Map<DecodeHintType,?> hints) throws NotFoundException {
setHints(hints);
return decodeInternal(image);
} | 3.68 |
hudi_FlinkWriteClients_getHoodieClientConfig | /**
* Mainly used for tests.
*/
public static HoodieWriteConfig getHoodieClientConfig(Configuration conf) {
return getHoodieClientConfig(conf, false, false);
} | 3.68 |
hbase_RestoreSnapshotProcedure_restoreSnapshot | /**
* Execute the on-disk Restore
* @param env MasterProcedureEnv
**/
private void restoreSnapshot(final MasterProcedureEnv env) throws IOException {
MasterFileSystem fileSystemManager = env.getMasterServices().getMasterFileSystem();
FileSystem fs = fileSystemManager.getFileSystem();
Path rootDir = fileSystemManager.getRootDir();
final ForeignExceptionDispatcher monitorException = new ForeignExceptionDispatcher();
final Configuration conf = new Configuration(env.getMasterConfiguration());
LOG.info("Starting restore snapshot=" + ClientSnapshotDescriptionUtils.toString(snapshot));
try {
Path snapshotDir = SnapshotDescriptionUtils.getCompletedSnapshotDir(snapshot, rootDir);
SnapshotManifest manifest = SnapshotManifest.open(conf, fs, snapshotDir, snapshot);
RestoreSnapshotHelper restoreHelper = new RestoreSnapshotHelper(conf, fs, manifest,
modifiedTableDescriptor, rootDir, monitorException, getMonitorStatus());
RestoreSnapshotHelper.RestoreMetaChanges metaChanges = restoreHelper.restoreHdfsRegions();
regionsToRestore = metaChanges.getRegionsToRestore();
regionsToRemove = metaChanges.getRegionsToRemove();
regionsToAdd = metaChanges.getRegionsToAdd();
parentsToChildrenPairMap = metaChanges.getParentToChildrenPairMap();
} catch (IOException e) {
String msg = "restore snapshot=" + ClientSnapshotDescriptionUtils.toString(snapshot)
+ " failed in on-disk restore. Try re-running the restore command.";
LOG.error(msg, e);
monitorException
.receive(new ForeignException(env.getMasterServices().getServerName().toString(), e));
throw new IOException(msg, e);
}
} | 3.68 |
framework_AbstractColorPicker_setHtmlContentAllowed | /**
* Set whether the caption text is rendered as HTML or not. You might need
* to re-theme component to allow higher content than the original text
* style.
*
* If set to true, the captions are passed to the browser as html and the
* developer is responsible for ensuring no harmful html is used. If set to
* false, the content is passed to the browser as plain text.
*
* @param htmlContentAllowed
* <code>true</code> if caption is rendered as HTML,
* <code>false</code> otherwise
* @deprecated as of , use {@link #setCaptionAsHtml(boolean)} instead
*/
@Deprecated
public void setHtmlContentAllowed(boolean htmlContentAllowed) {
setCaptionAsHtml(htmlContentAllowed);
} | 3.68 |
morf_AbstractSqlDialectTest_testSelectWithSum | /**
* Test the use of the sum function in a select
*/
@Test
public void testSelectWithSum() {
SelectStatement stmt = new SelectStatement(sum(new FieldReference(INT_FIELD)), sumDistinct(new FieldReference(INT_FIELD))).from(new TableReference(TEST_TABLE));
String expectedSql = "SELECT SUM(intField), SUM(DISTINCT intField) FROM " + tableName(TEST_TABLE);
assertEquals("Select with sum function", expectedSql, testDialect.convertStatementToSQL(stmt));
} | 3.68 |
zxing_UPCEANReader_decodeRow | /**
* <p>Like {@link #decodeRow(int, BitArray, Map)}, but
* allows caller to inform method about where the UPC/EAN start pattern is
* found. This allows this to be computed once and reused across many implementations.</p>
*
* @param rowNumber row index into the image
* @param row encoding of the row of the barcode image
* @param startGuardRange start/end column where the opening start pattern was found
* @param hints optional hints that influence decoding
* @return {@link Result} encapsulating the result of decoding a barcode in the row
* @throws NotFoundException if no potential barcode is found
* @throws ChecksumException if a potential barcode is found but does not pass its checksum
* @throws FormatException if a potential barcode is found but format is invalid
*/
public Result decodeRow(int rowNumber,
BitArray row,
int[] startGuardRange,
Map<DecodeHintType,?> hints)
throws NotFoundException, ChecksumException, FormatException {
ResultPointCallback resultPointCallback = hints == null ? null :
(ResultPointCallback) hints.get(DecodeHintType.NEED_RESULT_POINT_CALLBACK);
int symbologyIdentifier = 0;
if (resultPointCallback != null) {
resultPointCallback.foundPossibleResultPoint(new ResultPoint(
(startGuardRange[0] + startGuardRange[1]) / 2.0f, rowNumber
));
}
StringBuilder result = decodeRowStringBuffer;
result.setLength(0);
int endStart = decodeMiddle(row, startGuardRange, result);
if (resultPointCallback != null) {
resultPointCallback.foundPossibleResultPoint(new ResultPoint(
endStart, rowNumber
));
}
int[] endRange = decodeEnd(row, endStart);
if (resultPointCallback != null) {
resultPointCallback.foundPossibleResultPoint(new ResultPoint(
(endRange[0] + endRange[1]) / 2.0f, rowNumber
));
}
// Make sure there is a quiet zone at least as big as the end pattern after the barcode. The
// spec might want more whitespace, but in practice this is the maximum we can count on.
int end = endRange[1];
int quietEnd = end + (end - endRange[0]);
if (quietEnd >= row.getSize() || !row.isRange(end, quietEnd, false)) {
throw NotFoundException.getNotFoundInstance();
}
String resultString = result.toString();
// UPC/EAN should never be less than 8 chars anyway
if (resultString.length() < 8) {
throw FormatException.getFormatInstance();
}
if (!checkChecksum(resultString)) {
throw ChecksumException.getChecksumInstance();
}
float left = (startGuardRange[1] + startGuardRange[0]) / 2.0f;
float right = (endRange[1] + endRange[0]) / 2.0f;
BarcodeFormat format = getBarcodeFormat();
Result decodeResult = new Result(resultString,
null, // no natural byte representation for these barcodes
new ResultPoint[]{
new ResultPoint(left, rowNumber),
new ResultPoint(right, rowNumber)},
format);
int extensionLength = 0;
try {
Result extensionResult = extensionReader.decodeRow(rowNumber, row, endRange[1]);
decodeResult.putMetadata(ResultMetadataType.UPC_EAN_EXTENSION, extensionResult.getText());
decodeResult.putAllMetadata(extensionResult.getResultMetadata());
decodeResult.addResultPoints(extensionResult.getResultPoints());
extensionLength = extensionResult.getText().length();
} catch (ReaderException re) {
// continue
}
int[] allowedExtensions =
hints == null ? null : (int[]) hints.get(DecodeHintType.ALLOWED_EAN_EXTENSIONS);
if (allowedExtensions != null) {
boolean valid = false;
for (int length : allowedExtensions) {
if (extensionLength == length) {
valid = true;
break;
}
}
if (!valid) {
throw NotFoundException.getNotFoundInstance();
}
}
if (format == BarcodeFormat.EAN_13 || format == BarcodeFormat.UPC_A) {
String countryID = eanManSupport.lookupCountryIdentifier(resultString);
if (countryID != null) {
decodeResult.putMetadata(ResultMetadataType.POSSIBLE_COUNTRY, countryID);
}
}
if (format == BarcodeFormat.EAN_8) {
symbologyIdentifier = 4;
}
decodeResult.putMetadata(ResultMetadataType.SYMBOLOGY_IDENTIFIER, "]E" + symbologyIdentifier);
return decodeResult;
} | 3.68 |
morf_AbstractDatabaseType_identifier | /**
* @see org.alfasoftware.morf.jdbc.DatabaseType#identifier()
*/
@Override
public final String identifier() {
return identifier;
} | 3.68 |
hbase_CloseChecker_isSizeLimit | /**
* Check periodically to see if a system stop is requested every written bytes reach size limit.
* @return if true, system stop.
*/
public boolean isSizeLimit(Store store, long bytesWritten) {
if (closeCheckSizeLimit <= 0) {
return false;
}
bytesWrittenProgressForCloseCheck += bytesWritten;
if (bytesWrittenProgressForCloseCheck <= closeCheckSizeLimit) {
return false;
}
bytesWrittenProgressForCloseCheck = 0;
return !store.areWritesEnabled();
} | 3.68 |
pulsar_PulsarConnectorConfig_getZookeeperUri | /**
* @deprecated use {@link #getMetadataUrl()}
*/
@Deprecated
@NotNull
public String getZookeeperUri() {
return getMetadataUrl();
} | 3.68 |
morf_MySql_matchesProduct | /**
* @see org.alfasoftware.morf.jdbc.DatabaseType#matchesProduct(java.lang.String)
*/
@Override
public boolean matchesProduct(String product) {
return product.equalsIgnoreCase("MySQL");
} | 3.68 |
flink_DateTimeUtils_toInternal | /**
* Converts the Java type used for UDF parameters of SQL TIMESTAMP type ({@link
* java.sql.Timestamp}) to internal representation (long).
*
* <p>Converse of {@link #toSQLTimestamp(long)}.
*/
public static long toInternal(java.sql.Timestamp ts) {
long time = ts.getTime();
return time + LOCAL_TZ.getOffset(time);
} | 3.68 |
hadoop_StageConfig_getTaskAttemptDir | /**
* Task attempt directory.
* @return the task attempt dir.
*/
public Path getTaskAttemptDir() {
return taskAttemptDir;
} | 3.68 |
hadoop_OBSCommonUtils_lookupPassword | /**
* Get a password from a configuration/configured credential providers.
*
* @param conf configuration
* @param key key to look up
* @return a password or the value in {@code defVal}
* @throws IOException on any problem
*/
private static String lookupPassword(final Configuration conf,
final String key) throws IOException {
try {
final char[] pass = conf.getPassword(key);
return pass != null ? new String(pass).trim() : "";
} catch (IOException ioe) {
throw new IOException("Cannot find password option " + key, ioe);
}
} | 3.68 |
hbase_Append_add | /**
* Add column and value to this Append operation.
* @return This instance
*/
@Override
public Append add(final Cell cell) {
try {
super.add(cell);
} catch (IOException e) {
// we eat the exception of wrong row for BC..
LOG.error(e.toString(), e);
}
return this;
} | 3.68 |
rocketmq-connect_AbstractConnectController_connectorTypeForClass | /**
* Retrieves ConnectorType for the corresponding connector class
*
* @param connClass class of the connector
*/
public ConnectorType connectorTypeForClass(String connClass) {
return ConnectorType.from(plugin.newConnector(connClass).getClass());
} | 3.68 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.