name
stringlengths 12
178
| code_snippet
stringlengths 8
36.5k
| score
float64 3.26
3.68
|
---|---|---|
framework_TabSheet_isTabCaptionsAsHtml | /**
* Checks whether HTML is allowed in the tab captions.
* <p>
* The default is false, i.e. render tab captions as plain text
*
* @return true if the tab captions are rendered as HTML, false if rendered
* as plain text
* @since 7.4
*/
public boolean isTabCaptionsAsHtml() {
return getState(false).tabCaptionsAsHtml;
} | 3.68 |
morf_UnsupportedDatabaseTestRule_apply | /**
* @see org.junit.rules.TestRule#apply(org.junit.runners.model.Statement,
* org.junit.runner.Description)
*/
@Override
public Statement apply(Statement base, Description description) {
return unsupportedTests.contains(description.getMethodName()) ? new UnsupportedDatabaseTestStatement(description) : base;
} | 3.68 |
morf_DeleteStatement_getTable | /**
* Gets the table being deleted from.
*
* @return the table being inserted into
*/
public TableReference getTable() {
return table;
} | 3.68 |
flink_OperatorIDGenerator_fromUid | /**
* Generate {@link OperatorID}'s from {@code uid}'s.
*
* <p>{@link
* org.apache.flink.streaming.api.graph.StreamGraphHasherV2#traverseStreamGraphAndGenerateHashes(StreamGraph)}
*
* @param uid {@code DataStream} operator uid.
* @return corresponding {@link OperatorID}
*/
public static OperatorID fromUid(String uid) {
byte[] hash = Hashing.murmur3_128(0).newHasher().putString(uid, UTF_8).hash().asBytes();
return new OperatorID(hash);
} | 3.68 |
flink_RocksDBNativeMetricOptions_enableBlockCachePinnedUsage | /** Returns the memory size for the entries being pinned in block cache. */
public void enableBlockCachePinnedUsage() {
this.properties.add(RocksDBProperty.BlockCachePinnedUsage.getRocksDBProperty());
} | 3.68 |
flink_MailboxProcessor_suspendDefaultAction | /**
* Calling this method signals that the mailbox-thread should (temporarily) stop invoking the
* default action, e.g. because there is currently no input available.
*/
private MailboxDefaultAction.Suspension suspendDefaultAction(
@Nullable PeriodTimer suspensionTimer) {
checkState(
mailbox.isMailboxThread(),
"Suspending must only be called from the mailbox thread!");
checkState(suspendedDefaultAction == null, "Default action has already been suspended");
if (suspendedDefaultAction == null) {
suspendedDefaultAction = new DefaultActionSuspension(suspensionTimer);
}
return suspendedDefaultAction;
} | 3.68 |
MagicPlugin_Base64Coder_decodeString | /**
* Decodes a string from Base64 format.
* No blanks or line breaks are allowed within the Base64 encoded input data.
*
* @param s A Base64 String to be decoded.
* @return A String containing the decoded data.
* @throws IllegalArgumentException If the input is not valid Base64 encoded data.
*/
public static String decodeString(String s) {
return new String(decode(s), StandardCharsets.UTF_8);
} | 3.68 |
framework_ContainerOrderedWrapper_removeContainerProperty | /**
* Removes the specified Property from the underlying container and from the
* ordering.
* <p>
* Note : The Property will be removed from all the Items in the Container.
* </p>
*
* @param propertyId
* the ID of the Property to remove.
* @return <code>true</code> if the operation succeeded, <code>false</code>
* if not
* @throws UnsupportedOperationException
* if the removeContainerProperty is not supported.
*/
@Override
public boolean removeContainerProperty(Object propertyId)
throws UnsupportedOperationException {
return container.removeContainerProperty(propertyId);
} | 3.68 |
hadoop_BlockBlobAppendStream_hflush | /**
* Force all data in the output stream to be written to Azure storage.
* Wait to return until this is complete.
*/
@Override
public void hflush() throws IOException {
// when block compaction is disabled, hflush is empty function
if (compactionEnabled) {
flush();
}
} | 3.68 |
flink_PushCalcPastChangelogNormalizeRule_transformWithRemainingPredicates | /**
* Transforms the {@link RelOptRuleCall} to use {@param changelogNormalize} as the new input to
* a {@link StreamPhysicalCalc} which uses {@param predicates} for the condition.
*/
private void transformWithRemainingPredicates(
RelOptRuleCall call,
StreamPhysicalChangelogNormalize changelogNormalize,
List<RexNode> predicates,
int[] usedInputFields) {
final StreamPhysicalCalc calc = call.rel(0);
final RelBuilder relBuilder = call.builder();
final RexProgramBuilder programBuilder =
new RexProgramBuilder(changelogNormalize.getRowType(), relBuilder.getRexBuilder());
final Map<Integer, Integer> inputRefMapping = buildFieldsMapping(usedInputFields);
// add projects
for (Pair<RexLocalRef, String> ref : calc.getProgram().getNamedProjects()) {
RexNode shiftedProject =
adjustInputRef(calc.getProgram().expandLocalRef(ref.left), inputRefMapping);
programBuilder.addProject(shiftedProject, ref.right);
}
// add conditions
final List<RexNode> shiftedPredicates =
predicates.stream()
.map(p -> adjustInputRef(p, inputRefMapping))
.collect(Collectors.toList());
final RexNode condition = relBuilder.and(shiftedPredicates);
if (!condition.isAlwaysTrue()) {
programBuilder.addCondition(condition);
}
final RexProgram newProgram = programBuilder.getProgram();
if (newProgram.isTrivial()) {
call.transformTo(changelogNormalize);
} else {
final StreamPhysicalCalc newProjectedCalc =
new StreamPhysicalCalc(
changelogNormalize.getCluster(),
changelogNormalize.getTraitSet(),
changelogNormalize,
newProgram,
newProgram.getOutputRowType());
call.transformTo(newProjectedCalc);
}
} | 3.68 |
framework_HierarchicalContainer_isRoot | /*
* Is the Item corresponding to the given ID a root node? Don't add a
* JavaDoc comment here, we use the default documentation from implemented
* interface.
*/
@Override
public boolean isRoot(Object itemId) {
// If the container is filtered the itemId must be among filteredRoots
// to be a root.
if (filteredRoots != null) {
if (!filteredRoots.contains(itemId)) {
return false;
}
} else {
// Container is not filtered
if (parent.containsKey(itemId)) {
return false;
}
}
return containsId(itemId);
} | 3.68 |
framework_VCalendarPanel_focusNextYear | /**
* Selects the next year
*/
private void focusNextYear(int years) {
if (focusedDate == null) {
return;
}
Date nextYearDate = (Date) focusedDate.clone();
nextYearDate.setYear(nextYearDate.getYear() + years);
// Do not focus if not inside range
if (!isDateInsideRange(nextYearDate, Resolution.YEAR)) {
return;
}
// If we add one year, but have to roll back a bit, fit it
// into the calendar. Also the months have to be changed
if (!isDateInsideRange(nextYearDate, Resolution.DAY)) {
nextYearDate = adjustDateToFitInsideRange(nextYearDate);
focusedDate.setYear(nextYearDate.getYear());
focusedDate.setMonth(nextYearDate.getMonth());
focusedDate.setDate(nextYearDate.getDate());
displayedMonth.setYear(nextYearDate.getYear());
displayedMonth.setMonth(nextYearDate.getMonth());
} else {
int currentMonth = focusedDate.getMonth();
focusedDate.setYear(focusedDate.getYear() + years);
displayedMonth.setYear(displayedMonth.getYear() + years);
/*
* If the focused date was a leap day (Feb 29), the new date becomes
* Mar 1 if the new year is not also a leap year. Set it to Feb 28
* instead.
*/
if (focusedDate.getMonth() != currentMonth) {
focusedDate.setDate(0);
}
}
renderCalendar();
} | 3.68 |
hadoop_Service_toString | /**
* Get the name of a state
* @return the state's name
*/
@Override
public String toString() {
return statename;
}
}
/**
* Initialize the service.
*
* The transition MUST be from {@link STATE#NOTINITED} to {@link STATE#INITED}
* unless the operation failed and an exception was raised, in which case
* {@link #stop()} | 3.68 |
AreaShop_FileManager_checkForInactiveRegions | /**
* Check all regions and unrent/sell them if the player is inactive for too long.
*/
public void checkForInactiveRegions() {
Do.forAll(
plugin.getConfig().getInt("inactive.regionsPerTick"),
getRegions(),
GeneralRegion::checkInactive
);
} | 3.68 |
hbase_ByteBufferUtils_hashCode | /**
* ByteBuffer to hash offset to start from length to hash
*/
public static int hashCode(ByteBuffer buf, int offset, int length) {
int hash = 1;
for (int i = offset; i < offset + length; i++) {
hash = (31 * hash) + (int) toByte(buf, i);
}
return hash;
} | 3.68 |
framework_HasStyleNames_setStyleName | /**
* Adds or removes a style name. Multiple styles can be specified as a
* space-separated list of style names.
*
* If the {@code add} parameter is true, the style name is added to the
* component. If the {@code add} parameter is false, the style name is
* removed from the component.
* <p>
* Functionally this is equivalent to using {@link #addStyleName(String)} or
* {@link #removeStyleName(String)}
*
* @since 8.7
* @param style
* the style name to be added or removed
* @param add
* <code>true</code> to add the given style, <code>false</code>
* to remove it
* @see #addStyleName(String)
* @see #removeStyleName(String)
*/
default void setStyleName(String style, boolean add) {
if (add) {
addStyleName(style);
} else {
removeStyleName(style);
}
} | 3.68 |
morf_AbstractSqlDialectTest_shouldGenerateCorrectSqlForMathOperations14 | /**
* Test for proper SQL mathematics operation generation from DSL expressions.
* <p>
* Bracket should be generated for subexpression "b+c". Even without explicit
* {@link org.alfasoftware.morf.sql.SqlUtils#bracket(MathsField)} call.
* </p>
*/
@Test
public void shouldGenerateCorrectSqlForMathOperations14() {
String result = testDialect.getSqlFrom(field("a").plus(field("b").plus(field("c"))).divideBy(literal(2)));
assertEquals(expectedSqlForMathOperations14(), result);
} | 3.68 |
flink_GroupCombineOperatorBase_setGroupOrder | /**
* Sets the order of the elements within a reduce group.
*
* @param order The order for the elements in a reduce group.
*/
public void setGroupOrder(Ordering order) {
this.groupOrder = order;
} | 3.68 |
framework_VCalendar_isEventInDay | /*
* Check if the given event occurs at the given date.
*/
private boolean isEventInDay(Date eventWhen, Date eventTo, Date gridDate) {
if (eventWhen.compareTo(gridDate) <= 0
&& eventTo.compareTo(gridDate) >= 0) {
return true;
}
return false;
} | 3.68 |
flink_ColumnStats_getMin | /**
* Returns null if this instance is constructed by {@link ColumnStats#ColumnStats(Long, Long,
* Double, Integer, Number, Number)}.
*/
public Comparable<?> getMin() {
return min;
} | 3.68 |
hadoop_OBSCommonUtils_newAppendFileRequest | /**
* Create a appendFile request. Adds the ACL and metadata
*
* @param owner the owner OBSFileSystem instance
* @param key key of object
* @param inputStream temp file or input stream
* @param recordPosition client record next append position
* @return the request
* @throws IOException any problem
*/
static WriteFileRequest newAppendFileRequest(final OBSFileSystem owner,
final String key, final long recordPosition,
final InputStream inputStream) throws IOException {
Preconditions.checkNotNull(key);
Preconditions.checkNotNull(inputStream);
ObsFSAttribute obsFsAttribute;
try {
GetAttributeRequest getAttributeReq = new GetAttributeRequest(
owner.getBucket(), key);
obsFsAttribute = owner.getObsClient().getAttribute(getAttributeReq);
} catch (ObsException e) {
throw translateException("GetAttributeRequest", key, e);
}
long appendPosition = Math.max(recordPosition,
obsFsAttribute.getContentLength());
if (recordPosition != obsFsAttribute.getContentLength()) {
LOG.warn("append url[{}] position[{}], file contentLength[{}] not"
+ " equal to recordPosition[{}].", key, appendPosition,
obsFsAttribute.getContentLength(), recordPosition);
}
WriteFileRequest writeFileReq = new WriteFileRequest(owner.getBucket(),
key, inputStream, appendPosition);
writeFileReq.setAcl(owner.getCannedACL());
return writeFileReq;
} | 3.68 |
hadoop_AzureBlobFileSystem_getDelegationToken | /**
* Get a delegation token from remote service endpoint if
* 'fs.azure.enable.kerberos.support' is set to 'true', and
* 'fs.azure.enable.delegation.token' is set to 'true'.
* @param renewer the account name that is allowed to renew the token.
* @return delegation token
* @throws IOException thrown when getting the current user.
*/
@Override
public synchronized Token<?> getDelegationToken(final String renewer) throws IOException {
statIncrement(CALL_GET_DELEGATION_TOKEN);
return this.delegationTokenEnabled ? this.delegationTokenManager.getDelegationToken(renewer)
: super.getDelegationToken(renewer);
} | 3.68 |
rocketmq-connect_ClusterManagementService_configure | /**
* Configure class with the given key-value pairs
*
* @param config can be DistributedConfig or StandaloneConfig
*/
default void configure(WorkerConfig config) {
} | 3.68 |
hadoop_SharedKeyCredentials_safeDecode | /**
* Performs safe decoding of the specified string, taking care to preserve each <code>+</code> character, rather
* than replacing it with a space character.
*
* @param stringToDecode A <code>String</code> that represents the string to decode.
* @return A <code>String</code> that represents the decoded string.
* <p>
* If a storage service error occurred.
*/
private static String safeDecode(final String stringToDecode) throws UnsupportedEncodingException {
if (stringToDecode == null) {
return null;
}
if (stringToDecode.length() == 0) {
return "";
}
if (stringToDecode.contains(AbfsHttpConstants.PLUS)) {
final StringBuilder outBuilder = new StringBuilder();
int startDex = 0;
for (int m = 0; m < stringToDecode.length(); m++) {
if (stringToDecode.charAt(m) == '+') {
if (m > startDex) {
outBuilder.append(URLDecoder.decode(stringToDecode.substring(startDex, m),
AbfsHttpConstants.UTF_8));
}
outBuilder.append(AbfsHttpConstants.PLUS);
startDex = m + 1;
}
}
if (startDex != stringToDecode.length()) {
outBuilder.append(URLDecoder.decode(stringToDecode.substring(startDex, stringToDecode.length()),
AbfsHttpConstants.UTF_8));
}
return outBuilder.toString();
} else {
return URLDecoder.decode(stringToDecode, AbfsHttpConstants.UTF_8);
}
} | 3.68 |
hbase_Constraints_serializeConstraintClass | /**
* Just write the class to a String representation of the class as a key for the
* {@link TableDescriptor}
* @param clazz Constraint class to convert to a {@link TableDescriptor} key
* @return key to store in the {@link TableDescriptor}
*/
private static String serializeConstraintClass(Class<? extends Constraint> clazz) {
String constraintClazz = clazz.getName();
return CONSTRAINT_HTD_KEY_PREFIX + constraintClazz;
} | 3.68 |
zxing_QRCodeWriter_renderResult | // Note that the input matrix uses 0 == white, 1 == black, while the output matrix uses
// 0 == black, 255 == white (i.e. an 8 bit greyscale bitmap).
private static BitMatrix renderResult(QRCode code, int width, int height, int quietZone) {
ByteMatrix input = code.getMatrix();
if (input == null) {
throw new IllegalStateException();
}
int inputWidth = input.getWidth();
int inputHeight = input.getHeight();
int qrWidth = inputWidth + (quietZone * 2);
int qrHeight = inputHeight + (quietZone * 2);
int outputWidth = Math.max(width, qrWidth);
int outputHeight = Math.max(height, qrHeight);
int multiple = Math.min(outputWidth / qrWidth, outputHeight / qrHeight);
// Padding includes both the quiet zone and the extra white pixels to accommodate the requested
// dimensions. For example, if input is 25x25 the QR will be 33x33 including the quiet zone.
// If the requested size is 200x160, the multiple will be 4, for a QR of 132x132. These will
// handle all the padding from 100x100 (the actual QR) up to 200x160.
int leftPadding = (outputWidth - (inputWidth * multiple)) / 2;
int topPadding = (outputHeight - (inputHeight * multiple)) / 2;
BitMatrix output = new BitMatrix(outputWidth, outputHeight);
for (int inputY = 0, outputY = topPadding; inputY < inputHeight; inputY++, outputY += multiple) {
// Write the contents of this row of the barcode
for (int inputX = 0, outputX = leftPadding; inputX < inputWidth; inputX++, outputX += multiple) {
if (input.get(inputX, inputY) == 1) {
output.setRegion(outputX, outputY, multiple, multiple);
}
}
}
return output;
} | 3.68 |
MagicPlugin_PreLoadEvent_registerPlayerWarpManager | /**
* Register a PlayerWarpManager, for providing warps to be used in the Recall menu.
* The name of the manager as registered corresponds with the "allow_player_warps" map in the recall
* spell configuration.
*
* @param key The name of the manager
* @param manager The manager to add
*/
public void registerPlayerWarpManager(String key, PlayerWarpManager manager) {
warpManagers.put(key, manager);
} | 3.68 |
hadoop_StageConfig_withJobAttemptTaskSubDir | /**
* Set builder value.
* @param value new value
* @return the builder
*/
public StageConfig withJobAttemptTaskSubDir(Path value) {
jobAttemptTaskSubDir = value;
return this;
} | 3.68 |
framework_ResourceLoader_getResourceUrl | /**
* Gets the absolute url of the loaded resource.
*
* @return the absolute url of the loaded resource
*/
public String getResourceUrl() {
return resourceUrl;
} | 3.68 |
graphhopper_ArrayUtil_invert | /**
* Creates a new array where each element represents the index position of this element in the given array
* or is set to -1 if this element does not appear in the input array. None of the elements of the input array may
* be equal or larger than the arrays length.
*/
public static int[] invert(int[] arr) {
int[] result = new int[arr.length];
Arrays.fill(result, -1);
for (int i = 0; i < arr.length; i++)
result[arr[i]] = i;
return result;
} | 3.68 |
pulsar_ManagedLedgerConfig_getMetadataOperationsTimeoutSeconds | /**
*
* Ledger-Op (Create/Delete) timeout.
*
* @return
*/
public long getMetadataOperationsTimeoutSeconds() {
return metadataOperationsTimeoutSeconds;
} | 3.68 |
hbase_TableDescriptorBuilder_getFlushPolicyClassName | /**
* This gets the class associated with the flush policy which determines the stores need to be
* flushed when flushing a region. The class used by default is defined in
* org.apache.hadoop.hbase.regionserver.FlushPolicy.
* @return the class name of the flush policy for this table. If this returns null, the default
* flush policy is used.
*/
@Override
public String getFlushPolicyClassName() {
return getOrDefault(FLUSH_POLICY_KEY, Function.identity(), null);
} | 3.68 |
morf_SqlDialect_getCoalesceFunctionName | /**
* @return The name of the coalesce function
*/
protected String getCoalesceFunctionName() {
return "COALESCE";
} | 3.68 |
flink_ApplicationStatus_fromJobStatus | /**
* Derives the ApplicationStatus that should be used for a job that resulted in the given job
* status. If the job is not yet in a globally terminal state, this method returns {@link
* #UNKNOWN}.
*/
public static ApplicationStatus fromJobStatus(JobStatus jobStatus) {
return JOB_STATUS_APPLICATION_STATUS_BI_MAP.getOrDefault(jobStatus, UNKNOWN);
} | 3.68 |
hadoop_CommitterEventHandler_touchz | // If job commit is repeatable, then we should allow
// startCommitFile/endCommitSuccessFile/endCommitFailureFile to be written
// by other AM before.
private void touchz(Path p, boolean overwrite) throws IOException {
fs.create(p, overwrite).close();
} | 3.68 |
rocketmq-connect_AbstractKafkaSourceConnector_taskConfigs | /**
* Returns a set of configurations for Tasks based on the current configuration,
* producing at most count configurations.
*
* @param maxTasks maximum number of configurations to generate
* @return configurations for Tasks
*/
@Override
public List<KeyValue> taskConfigs(int maxTasks) {
List<Map<String, String>> groupConnectors = sourceConnector.taskConfigs(maxTasks);
List<KeyValue> configs = new ArrayList<>();
for (Map<String, String> configMaps : groupConnectors) {
KeyValue keyValue = new DefaultKeyValue();
configMaps.forEach((k, v) -> {
keyValue.put(k, v);
});
configs.add(keyValue);
}
return configs;
} | 3.68 |
flink_DateTimeUtils_toSQLTime | /**
* Converts the internal representation of a SQL TIME (int) to the Java type used for UDF
* parameters ({@link java.sql.Time}).
*/
public static java.sql.Time toSQLTime(int v) {
// note that, in this case, can't handle Daylight Saving Time
return new java.sql.Time(v - LOCAL_TZ.getOffset(v));
} | 3.68 |
dubbo_RpcStatus_getFailedAverageElapsed | /**
* get failed average elapsed.
*
* @return failed average elapsed
*/
public long getFailedAverageElapsed() {
long failed = getFailed();
if (failed == 0) {
return 0;
}
return getFailedElapsed() / failed;
} | 3.68 |
hbase_AccessController_updateACL | /**
* Writes all table ACLs for the tables in the given Map up into ZooKeeper znodes. This is called
* to synchronize ACL changes following {@code _acl_} table updates.
*/
private void updateACL(RegionCoprocessorEnvironment e, final Map<byte[], List<Cell>> familyMap) {
Set<byte[]> entries = new TreeSet<>(Bytes.BYTES_RAWCOMPARATOR);
for (Map.Entry<byte[], List<Cell>> f : familyMap.entrySet()) {
List<Cell> cells = f.getValue();
for (Cell cell : cells) {
if (CellUtil.matchingFamily(cell, PermissionStorage.ACL_LIST_FAMILY)) {
entries.add(CellUtil.cloneRow(cell));
}
}
}
Configuration conf = regionEnv.getConfiguration();
byte[] currentEntry = null;
// TODO: Here we are already on the ACL region. (And it is single
// region) We can even just get the region from the env and do get
// directly. The short circuit connection would avoid the RPC overhead
// so no socket communication, req write/read .. But we have the PB
// to and fro conversion overhead. get req is converted to PB req
// and results are converted to PB results 1st and then to POJOs
// again. We could have avoided such at least in ACL table context..
try (Table t = e.getConnection().getTable(PermissionStorage.ACL_TABLE_NAME)) {
for (byte[] entry : entries) {
currentEntry = entry;
ListMultimap<String, UserPermission> perms =
PermissionStorage.getPermissions(conf, entry, t, null, null, null, false);
byte[] serialized = PermissionStorage.writePermissionsAsBytes(perms, conf);
zkPermissionWatcher.writeToZookeeper(entry, serialized);
}
} catch (IOException ex) {
LOG.error("Failed updating permissions mirror for '"
+ (currentEntry == null ? "null" : Bytes.toString(currentEntry)) + "'", ex);
}
} | 3.68 |
hbase_CompactionTool_compact | /**
* Execute the compaction on the specified path.
* @param path Directory path on which to run compaction.
* @param compactOnce Execute just a single step of compaction.
* @param major Request major compaction.
*/
public void compact(final Path path, final boolean compactOnce, final boolean major)
throws IOException {
if (isFamilyDir(fs, path)) {
Path regionDir = path.getParent();
Path tableDir = regionDir.getParent();
TableDescriptor htd = FSTableDescriptors.getTableDescriptorFromFs(fs, tableDir);
RegionInfo hri = HRegionFileSystem.loadRegionInfoFileContent(fs, regionDir);
compactStoreFiles(tableDir, htd, hri, path.getName(), compactOnce, major);
} else if (isRegionDir(fs, path)) {
Path tableDir = path.getParent();
TableDescriptor htd = FSTableDescriptors.getTableDescriptorFromFs(fs, tableDir);
compactRegion(tableDir, htd, path, compactOnce, major);
} else if (isTableDir(fs, path)) {
compactTable(path, compactOnce, major);
} else {
throw new IOException(
"Specified path is not a table, region or family directory. path=" + path);
}
} | 3.68 |
framework_VScrollTable_getNaturalColumnWidth | /**
* Detects the natural minimum width for the column of this header cell.
* If column is resized by user or the width is defined by server the
* actual width is returned. Else the natural min width is returned.
*
* @param columnIndex
* column index hint, if -1 (unknown) it will be detected
*
* @return
*/
public int getNaturalColumnWidth(int columnIndex) {
final int iw = columnIndex == getHierarchyColumnIndex()
? scrollBody.getMaxIndent()
: 0;
saveNaturalColumnWidthIfNotSaved(columnIndex);
if (isDefinedWidth()) {
if (iw > width) {
return iw;
}
return width;
} else {
if (iw > naturalWidth) {
return iw;
} else {
return naturalWidth;
}
}
} | 3.68 |
flink_ConfigOptions_defaultValue | /**
* Creates a ConfigOption with the given default value.
*
* @param value The default value for the config option
* @return The config option with the default value.
*/
public ConfigOption<T> defaultValue(T value) {
return new ConfigOption<>(key, clazz, ConfigOption.EMPTY_DESCRIPTION, value, false);
} | 3.68 |
hbase_RequestConverter_buildUpdateFavoredNodesRequest | /**
* Create a protocol buffer UpdateFavoredNodesRequest to update a list of favorednode mappings
* @param updateRegionInfos a list of favored node mappings
* @return a protocol buffer UpdateFavoredNodesRequest
*/
public static UpdateFavoredNodesRequest buildUpdateFavoredNodesRequest(
final List<Pair<RegionInfo, List<ServerName>>> updateRegionInfos) {
UpdateFavoredNodesRequest.Builder ubuilder = UpdateFavoredNodesRequest.newBuilder();
if (updateRegionInfos != null && !updateRegionInfos.isEmpty()) {
RegionUpdateInfo.Builder builder = RegionUpdateInfo.newBuilder();
for (Pair<RegionInfo, List<ServerName>> pair : updateRegionInfos) {
builder.setRegion(ProtobufUtil.toRegionInfo(pair.getFirst()));
for (ServerName server : pair.getSecond()) {
builder.addFavoredNodes(ProtobufUtil.toServerName(server));
}
ubuilder.addUpdateInfo(builder.build());
builder.clear();
}
}
return ubuilder.build();
} | 3.68 |
rocketmq-connect_JsonSchemaDeserializer_deserialize | /**
* deserialize
*
* @param topic
* @param isKey
* @param payload
* @return
*/
@Override
public JsonSchemaAndValue deserialize(String topic, boolean isKey, byte[] payload) {
if (payload == null) {
return null;
}
ByteBuffer buffer = ByteBuffer.wrap(payload);
long recordId = buffer.getLong();
GetSchemaResponse response = schemaRegistryClient.getSchemaByRecordId(JsonSchemaData.NAMESPACE, topic, recordId);
int length = buffer.limit() - ID_SIZE;
int start = buffer.position() + buffer.arrayOffset();
// Return JsonNode if type is null
JsonNode value = null;
try {
value = OBJECT_MAPPER.readTree(new ByteArrayInputStream(buffer.array(), start, length));
} catch (IOException e) {
throw new RuntimeException(e);
}
// load json schema
SchemaLoader.SchemaLoaderBuilder schemaLoaderBuilder = SchemaLoader
.builder()
.useDefaults(true)
.draftV7Support();
JSONObject jsonObject = new JSONObject(response.getIdl());
schemaLoaderBuilder.schemaJson(jsonObject);
Schema schema = schemaLoaderBuilder.build().load().build();
// validate schema
if (jsonSchemaConverterConfig.validate()) {
try {
JsonSchemaUtils.validate(schema, value);
} catch (JsonProcessingException e) {
throw new RuntimeException(e);
}
}
return new JsonSchemaAndValue(new JsonSchema(schema), value);
} | 3.68 |
hadoop_IdentifierResolver_setOutputKeyClass | /**
* Sets the output key class class.
*/
protected void setOutputKeyClass(Class outputKeyClass) {
this.outputKeyClass = outputKeyClass;
} | 3.68 |
morf_AbstractSqlDialectTest_expectedSelectModSQL | /**
* @return The expected SQL for the MOD operator.
*/
protected String expectedSelectModSQL() {
return "SELECT MOD(intField, 5) FROM " + tableName(TEST_TABLE);
} | 3.68 |
framework_AbsoluteLayout_setTopUnits | /**
* Sets the unit for the 'top' attribute.
*
* @param topUnits
* See {@link Sizeable} UNIT_SYMBOLS for a description of the
* available units.
*/
public void setTopUnits(Unit topUnits) {
this.topUnits = topUnits;
markAsDirty();
} | 3.68 |
hbase_HMaster_getBackupMasterInfoPort | /**
* @param sn is ServerName of the backup master
* @return info port of backup master or 0 if any exception occurs.
*/
public int getBackupMasterInfoPort(final ServerName sn) {
return activeMasterManager.getBackupMasterInfoPort(sn);
} | 3.68 |
hbase_IdLock_getLockEntry | /**
* Blocks until the lock corresponding to the given id is acquired.
* @param id an arbitrary number to lock on
* @return an "entry" to pass to {@link #releaseLockEntry(Entry)} to release the lock
* @throws IOException if interrupted
*/
public Entry getLockEntry(long id) throws IOException {
Thread currentThread = Thread.currentThread();
Entry entry = new Entry(id, currentThread);
Entry existing;
while ((existing = map.putIfAbsent(entry.id, entry)) != null) {
synchronized (existing) {
if (existing.locked) {
++existing.numWaiters; // Add ourselves to waiters.
while (existing.locked) {
try {
existing.wait();
} catch (InterruptedException e) {
--existing.numWaiters; // Remove ourselves from waiters.
// HBASE-21292
// There is a rare case that interrupting and the lock owner thread call
// releaseLockEntry at the same time. Since the owner thread found there
// still one waiting, it won't remove the entry from the map. If the interrupted
// thread is the last one waiting on the lock, and since an exception is thrown,
// the 'existing' entry will stay in the map forever. Later threads which try to
// get this lock will stuck in a infinite loop because
// existing = map.putIfAbsent(entry.id, entry)) != null and existing.locked=false.
if (!existing.locked && existing.numWaiters == 0) {
map.remove(existing.id);
}
throw new InterruptedIOException("Interrupted waiting to acquire sparse lock");
}
}
--existing.numWaiters; // Remove ourselves from waiters.
existing.locked = true;
existing.holder = currentThread;
return existing;
}
// If the entry is not locked, it might already be deleted from the
// map, so we cannot return it. We need to get our entry into the map
// or get someone else's locked entry.
}
}
return entry;
} | 3.68 |
framework_AbstractClientConnector_requestRepaintAll | /**
* @deprecated As of 7.0, use {@link #markAsDirtyRecursive()} instead
*/
@Override
@Deprecated
public void requestRepaintAll() {
markAsDirtyRecursive();
} | 3.68 |
pulsar_FunctionRuntimeManager_getCurrentAssignments | /**
* Get current assignments.
*
* @return a map of current assignments in the following format
* {workerId : {FullyQualifiedInstanceId : Assignment}}
*/
public synchronized Map<String, Map<String, Assignment>> getCurrentAssignments() {
Map<String, Map<String, Assignment>> copy = new HashMap<>();
for (Map.Entry<String, Map<String, Assignment>> entry : this.workerIdToAssignments.entrySet()) {
Map<String, Assignment> tmp = new HashMap<>();
tmp.putAll(entry.getValue());
copy.put(entry.getKey(), tmp);
}
return copy;
} | 3.68 |
flink_DynamicSinkUtils_convertSinkToRel | /**
* Converts a given {@link DynamicTableSink} to a {@link RelNode}. It adds helper projections if
* necessary.
*/
public static RelNode convertSinkToRel(
FlinkRelBuilder relBuilder,
RelNode input,
SinkModifyOperation sinkModifyOperation,
DynamicTableSink sink) {
return convertSinkToRel(
relBuilder,
input,
sinkModifyOperation.getDynamicOptions(),
sinkModifyOperation.getContextResolvedTable(),
sinkModifyOperation.getStaticPartitions(),
sinkModifyOperation.getTargetColumns(),
sinkModifyOperation.isOverwrite(),
sink);
} | 3.68 |
rocketmq-connect_DebeziumSqlServerConnector_taskClass | /**
* Return the current connector class
* @return task implement class
*/
@Override
public Class<? extends Task> taskClass() {
return DebeziumSqlServerSource.class;
} | 3.68 |
hbase_MetricsConnection_incrHedgedReadWin | /** Increment the number of hedged read returned faster than the original read. */
public void incrHedgedReadWin() {
hedgedReadWin.inc();
} | 3.68 |
hbase_CloneSnapshotProcedure_preCloneSnapshot | /**
* Action before cloning from snapshot.
* @param env MasterProcedureEnv
*/
private void preCloneSnapshot(final MasterProcedureEnv env)
throws IOException, InterruptedException {
if (!getTableName().isSystemTable()) {
// Check and update namespace quota
final MasterFileSystem mfs = env.getMasterServices().getMasterFileSystem();
SnapshotManifest manifest =
SnapshotManifest.open(env.getMasterConfiguration(), mfs.getFileSystem(),
SnapshotDescriptionUtils.getCompletedSnapshotDir(snapshot, mfs.getRootDir()), snapshot);
ProcedureSyncWait.getMasterQuotaManager(env).checkNamespaceTableAndRegionQuota(getTableName(),
manifest.getRegionManifestsMap().size());
}
final MasterCoprocessorHost cpHost = env.getMasterCoprocessorHost();
if (cpHost != null) {
cpHost.preCreateTableAction(tableDescriptor, null, getUser());
}
} | 3.68 |
flink_JobResult_toJobExecutionResult | /**
* Converts the {@link JobResult} to a {@link JobExecutionResult}.
*
* @param classLoader to use for deserialization
* @return JobExecutionResult
* @throws JobCancellationException if the job was cancelled
* @throws JobExecutionException if the job execution did not succeed
* @throws IOException if the accumulator could not be deserialized
* @throws ClassNotFoundException if the accumulator could not deserialized
*/
public JobExecutionResult toJobExecutionResult(ClassLoader classLoader)
throws JobExecutionException, IOException, ClassNotFoundException {
if (applicationStatus == ApplicationStatus.SUCCEEDED) {
return new JobExecutionResult(
jobId,
netRuntime,
AccumulatorHelper.deserializeAccumulators(accumulatorResults, classLoader));
} else {
final Throwable cause;
if (serializedThrowable == null) {
cause = null;
} else {
cause = serializedThrowable.deserializeError(classLoader);
}
final JobExecutionException exception;
if (applicationStatus == ApplicationStatus.FAILED) {
exception = new JobExecutionException(jobId, "Job execution failed.", cause);
} else if (applicationStatus == ApplicationStatus.CANCELED) {
exception = new JobCancellationException(jobId, "Job was cancelled.", cause);
} else {
exception =
new JobExecutionException(
jobId,
"Job completed with illegal application status: "
+ applicationStatus
+ '.',
cause);
}
throw exception;
}
} | 3.68 |
hmily_SchemaCache_get | /**
* acquire Schema with class.
*
* @param clazz Class
* @return Schema schema
*/
public Schema<?> get(final Class<?> clazz) {
return get(clazz, cache);
} | 3.68 |
hudi_AppendWriteFunction_getWriterHelper | // -------------------------------------------------------------------------
// GetterSetter
// -------------------------------------------------------------------------
@VisibleForTesting
public BulkInsertWriterHelper getWriterHelper() {
return this.writerHelper;
} | 3.68 |
hbase_Export_main | /**
* Main entry point.
* @param args The command line parameters.
* @throws Exception When running the job fails.
*/
public static void main(String[] args) throws Exception {
int errCode = ToolRunner.run(HBaseConfiguration.create(), new Export(), args);
System.exit(errCode);
} | 3.68 |
framework_Result_error | /**
* Returns a failure result wrapping the given error message.
*
* @param <R>
* the result value type
* @param message
* the error message
* @return a failure result
*/
public static <R> Result<R> error(String message) {
Objects.requireNonNull(message, "message cannot be null");
return new SimpleResult<>(null, message);
} | 3.68 |
hbase_MasterObserver_postMove | /**
* Called after the region move has been requested.
* @param ctx the environment to interact with the framework and master
* @param region the RegionInfo
* @param srcServer the source ServerName
* @param destServer the destination ServerName
*/
default void postMove(final ObserverContext<MasterCoprocessorEnvironment> ctx,
final RegionInfo region, final ServerName srcServer, final ServerName destServer)
throws IOException {
} | 3.68 |
hadoop_NativeAzureFileSystemHelper_validateReadArgs | /**
* Validation code, based on
* {@code FSInputStream.validatePositionedReadArgs()}.
* @param buffer destination buffer
* @param offset offset within the buffer
* @param length length of bytes to read
* @throws EOFException if the position is negative
* @throws IndexOutOfBoundsException if there isn't space for the amount of
* data requested.
* @throws IllegalArgumentException other arguments are invalid.
*/
static void validateReadArgs(byte[] buffer, int offset, int length)
throws EOFException {
Preconditions.checkArgument(length >= 0, "length is negative");
Preconditions.checkArgument(buffer != null, "Null buffer");
if (buffer.length - offset < length) {
throw new IndexOutOfBoundsException(
FSExceptionMessages.TOO_MANY_BYTES_FOR_DEST_BUFFER
+ ": request length=" + length
+ ", with offset =" + offset
+ "; buffer capacity =" + (buffer.length - offset));
}
} | 3.68 |
pulsar_BrokerService_forEachTopic | /**
* Iterates over all loaded topics in the broker.
*/
public void forEachTopic(Consumer<Topic> consumer) {
topics.forEach((n, t) -> {
Optional<Topic> topic = extractTopic(t);
topic.ifPresent(consumer::accept);
});
} | 3.68 |
pulsar_ReaderConfiguration_getReaderListener | /**
* @return the configured {@link ReaderListener} for the reader
*/
public ReaderListener<byte[]> getReaderListener() {
return readerListener;
} | 3.68 |
flink_AbstractAggregatingMetricsHandler_getAvailableMetrics | /**
* Returns a JSON string containing a list of all available metrics in the given stores.
* Effectively this method maps the union of all key-sets to JSON.
*
* @param stores metrics
* @return JSON string containing a list of all available metrics
*/
private static Collection<String> getAvailableMetrics(
Collection<? extends MetricStore.ComponentMetricStore> stores) {
Set<String> uniqueMetrics = CollectionUtil.newHashSetWithExpectedSize(32);
for (MetricStore.ComponentMetricStore store : stores) {
uniqueMetrics.addAll(store.metrics.keySet());
}
return uniqueMetrics;
} | 3.68 |
flink_ProducerMergedPartitionFileIndex_addBuffers | /**
* Add buffers to the index.
*
* @param buffers to be added. Note, the provided buffers are required to be physically
* consecutive and in the same order as in the file.
*/
void addBuffers(List<FlushedBuffer> buffers) {
if (buffers.isEmpty()) {
return;
}
Map<Integer, List<FixedSizeRegion>> convertedRegions = convertToRegions(buffers);
synchronized (lock) {
convertedRegions.forEach(indexCache::put);
}
} | 3.68 |
flink_InPlaceMutableHashTable_open | /** Initialize the hash table */
@Override
public void open() {
open(calcInitialNumBucketSegments());
} | 3.68 |
flink_LeaderInformationRegister_hasNoLeaderInformation | /**
* Checks that no non-empty {@link LeaderInformation} is stored.
*
* @return {@code true}, if there is no entry that refers to a non-empty {@code
* LeaderInformation}; otherwise {@code false} (i.e. either no information is stored under
* any {@code componentId} or there are entries for certain {@code componentId}s that refer
* to an empty {@code LeaderInformation} record).
*/
public boolean hasNoLeaderInformation() {
return leaderInformationPerComponentId.isEmpty();
} | 3.68 |
morf_OracleDialect_getSubstringFunctionName | /**
* {@inheritDoc}
* @see org.alfasoftware.morf.jdbc.SqlDialect#getSubstringFunctionName()
*/
@Override
protected String getSubstringFunctionName() {
return "SUBSTR";
} | 3.68 |
hadoop_NMContainerStatus_getAllocationTags | /**
* Get and set the Allocation tags associated with the container.
* @return Allocation tags.
*/
public Set<String> getAllocationTags() {
return Collections.emptySet();
} | 3.68 |
hbase_HRegion_getLoadStatistics | /** Returns statistics about the current load of the region */
public ClientProtos.RegionLoadStats getLoadStatistics() {
if (!regionStatsEnabled) {
return null;
}
ClientProtos.RegionLoadStats.Builder stats = ClientProtos.RegionLoadStats.newBuilder();
stats.setMemStoreLoad((int) (Math.min(100,
(this.memStoreSizing.getMemStoreSize().getHeapSize() * 100) / this.memstoreFlushSize)));
if (rsServices.getHeapMemoryManager() != null) {
// the HeapMemoryManager uses -0.0 to signal a problem asking the JVM,
// so we could just do the calculation below and we'll get a 0.
// treating it as a special case analogous to no HMM instead so that it can be
// programatically treated different from using <1% of heap.
final float occupancy = rsServices.getHeapMemoryManager().getHeapOccupancyPercent();
if (occupancy != HeapMemoryManager.HEAP_OCCUPANCY_ERROR_VALUE) {
stats.setHeapOccupancy((int) (occupancy * 100));
}
}
stats.setCompactionPressure((int) (rsServices.getCompactionPressure() * 100 > 100
? 100
: rsServices.getCompactionPressure() * 100));
return stats.build();
} | 3.68 |
shardingsphere-elasticjob_JobScheduleController_scheduleJob | /**
* Schedule job.
*
* @param cron CRON expression
* @param timeZone the time zone
*/
public void scheduleJob(final String cron, final String timeZone) {
try {
if (!scheduler.checkExists(jobDetail.getKey())) {
scheduler.scheduleJob(jobDetail, createCronTrigger(cron, timeZone));
}
scheduler.start();
} catch (final SchedulerException ex) {
throw new JobSystemException(ex);
}
} | 3.68 |
flink_FlinkPreparingTableBase_isKey | /**
* Returns whether the given columns are a key or a superset of a unique key of this table.
*
* <p>Note: Return true means TRUE. However return false means FALSE or NOT KNOWN. It's better
* to use {@link org.apache.calcite.rel.metadata.RelMetadataQuery#areRowsUnique} to distinguish
* FALSE with NOT KNOWN.
*
* @param columns Ordinals of key columns
* @return If the input columns bits represents a unique column set; false if not (or if no
* metadata is available)
*/
public boolean isKey(ImmutableBitSet columns) {
return false;
} | 3.68 |
pulsar_LoadSimulationController_handleGroupChange | // Handle the command line arguments associated with the group change command.
private void handleGroupChange(final ShellArguments arguments) throws Exception {
final List<String> commandArguments = arguments.commandArguments;
// Group change expects two application arguments: tenant name and group
// name.
if (checkAppArgs(commandArguments.size() - 1, 2)) {
final String tenant = commandArguments.get(1);
final String group = commandArguments.get(2);
for (DataOutputStream outputStream : outputStreams) {
outputStream.write(LoadSimulationClient.CHANGE_GROUP_COMMAND);
outputStream.writeUTF(tenant);
outputStream.writeUTF(group);
outputStream.writeInt(arguments.size);
outputStream.writeDouble(arguments.rate);
outputStream.flush();
}
}
} | 3.68 |
hbase_BackupManager_readLogTimestampMap | /**
* Read the timestamp for each region server log after the last successful backup. Each table has
* its own set of the timestamps.
* @return the timestamp for each region server. key: tableName value:
* RegionServer,PreviousTimeStamp
* @throws IOException exception
*/
public Map<TableName, Map<String, Long>> readLogTimestampMap() throws IOException {
return systemTable.readLogTimestampMap(backupInfo.getBackupRootDir());
} | 3.68 |
hbase_StorageClusterStatusModel_setHeapSizeMB | /**
* @param heapSizeMB the current heap size, in MB
*/
public void setHeapSizeMB(int heapSizeMB) {
this.heapSizeMB = heapSizeMB;
} | 3.68 |
hbase_HttpServer_getServerName | /**
* Quote the server name so that users specifying the HOST HTTP header can't inject attacks.
*/
@Override
public String getServerName() {
return HtmlQuoting.quoteHtmlChars(rawRequest.getServerName());
} | 3.68 |
flink_CliClient_close | /** Closes the CLI instance. */
public void close() {
if (terminal != null) {
closeTerminal();
}
} | 3.68 |
hudi_AvroSchemaConverter_nullableSchema | /**
* Returns schema with nullable true.
*/
private static Schema nullableSchema(Schema schema) {
return schema.isNullable()
? schema
: Schema.createUnion(SchemaBuilder.builder().nullType(), schema);
} | 3.68 |
morf_ConnectionResourcesBean_hashCode | /**
* @see java.lang.Object#hashCode()
*/
@Override
public int hashCode() {
final int prime = 31;
int result = 1;
result = prime * result + (databaseName == null ? 0 : databaseName.hashCode());
result = prime * result + (databaseType == null ? 0 : databaseType.hashCode());
result = prime * result + (hostName == null ? 0 : hostName.hashCode());
result = prime * result + (instanceName == null ? 0 : instanceName.hashCode());
result = prime * result + port;
result = prime * result + (schemaName == null ? 0 : schemaName.hashCode());
result = prime * result + statementPoolingMaxStatements;
result = prime * result + fetchSizeForBulkSelects;
result = prime * result + fetchSizeForBulkSelectsAllowingConnectionUseDuringStreaming;
result = prime * result + (userName == null ? 0 : userName.hashCode());
return result;
} | 3.68 |
framework_VScrollTable_removeCell | /**
* Remove a cell by using the columnId.
*
* @param colKey
* The columnId to remove
*/
public void removeCell(String colKey) {
final FooterCell c = getFooterCell(colKey);
remove(c);
} | 3.68 |
hbase_JvmPauseMonitor_main | /**
* Simple 'main' to facilitate manual testing of the pause monitor. This main function just leaks
* memory into a list. Running this class with a 1GB heap will very quickly go into "GC hell" and
* result in log messages about the GC pauses.
*/
public static void main(String[] args) throws Exception {
new JvmPauseMonitor(new Configuration()).start();
List<String> list = Lists.newArrayList();
int i = 0;
while (true) {
list.add(String.valueOf(i++));
}
} | 3.68 |
flink_FlinkCompletableFutureAssert_eventuallySucceeds | /**
* An equivalent of {@link #succeedsWithin(Duration)}, that doesn't rely on timeouts.
*
* @return a new assertion object on the future's result
*/
public ObjectAssert<T> eventuallySucceeds() {
final T object = assertEventuallySucceeds(info, actual);
return new ObjectAssert<>(object);
} | 3.68 |
morf_ResultSetIterator_getTable | /**
* @return the table
*/
public Table getTable() {
return table;
} | 3.68 |
hbase_Bytes_putFloat | /**
* Put a float value out to the specified byte array position.
* @param bytes byte array
* @param offset offset to write to
* @param f float value
* @return New offset in <code>bytes</code>
*/
public static int putFloat(byte[] bytes, int offset, float f) {
return putInt(bytes, offset, Float.floatToRawIntBits(f));
} | 3.68 |
shardingsphere-elasticjob_FailoverService_setCrashedFailoverFlag | /**
* set crashed failover flag.
*
* @param item crashed job item
*/
public void setCrashedFailoverFlag(final int item) {
if (!isFailoverAssigned(item)) {
jobNodeStorage.createJobNodeIfNeeded(FailoverNode.getItemsNode(item));
jobNodeStorage.removeJobNodeIfExisted(ShardingNode.getRunningNode(item));
}
} | 3.68 |
framework_CellReference_getColumn | /**
* Gets the column objects.
*
* @return the column object
*/
public Grid.Column<?, T> getColumn() {
return column;
} | 3.68 |
hibernate-validator_AbstractMessageInterpolator_interpolateMessage | /**
* Runs the message interpolation according to algorithm specified in the Bean Validation specification.
* <p>
* Note:
* <p>
* Look-ups in user bundles is recursive whereas look-ups in default bundle are not!
*
* @param message the message to interpolate
* @param context the context for this interpolation
* @param locale the {@code Locale} to use for the resource bundle.
*
* @return the interpolated message.
*/
private String interpolateMessage(String message, Context context, Locale locale) throws MessageDescriptorFormatException {
// if the message does not contain any message parameter, we can ignore the next steps and just return
// the unescaped message. It avoids storing the message in the cache and a cache lookup.
if ( message.indexOf( '{' ) < 0 ) {
return replaceEscapedLiterals( message );
}
String resolvedMessage = null;
// either retrieve message from cache, or if message is not yet there or caching is disabled,
// perform message resolution algorithm (step 1)
if ( cachingEnabled ) {
resolvedMessage = resolvedMessages.computeIfAbsent( new LocalizedMessage( message, locale ), lm -> resolveMessage( message, locale ) );
}
else {
resolvedMessage = resolveMessage( message, locale );
}
// there's no need for steps 2-3 unless there's `{param}`/`${expr}` in the message
if ( resolvedMessage.indexOf( '{' ) > -1 ) {
// resolve parameter expressions (step 2)
resolvedMessage = interpolateExpression(
new TokenIterator( getParameterTokens( resolvedMessage, tokenizedParameterMessages, InterpolationTermType.PARAMETER ) ),
context,
locale
);
// resolve EL expressions (step 3)
// in the standard Hibernate Validator execution flow, the context is always an instance of
// HibernateMessageInterpolatorContext
// but it can be a spec Context in the Jakarta Bean Validation TCK.
if ( !( context instanceof HibernateMessageInterpolatorContext )
|| ( (HibernateMessageInterpolatorContext) context ).getExpressionLanguageFeatureLevel() != ExpressionLanguageFeatureLevel.NONE ) {
resolvedMessage = interpolateExpression(
new TokenIterator( getParameterTokens( resolvedMessage, tokenizedELMessages, InterpolationTermType.EL ) ),
context,
locale );
} | 3.68 |
morf_HumanReadableStatementHelper_generateColumnCountString | /**
* Generates a string which represents the number of columns specified.
*
* @param columnCount the number of columns
* @return a string representation of the number of columns
*/
private static String generateColumnCountString(final int columnCount) {
if (columnCount == 0) {
return "no columns";
}
if (columnCount == 1) {
return "1 column";
}
return columnCount + " columns";
} | 3.68 |
pulsar_AdditionalServletUtils_load | /**
* Load the additional servlets according to the additional servlet definition.
*
* @param metadata the additional servlet definition.
*/
public AdditionalServletWithClassLoader load(
AdditionalServletMetadata metadata, String narExtractionDirectory) throws IOException {
final File narFile = metadata.getArchivePath().toAbsolutePath().toFile();
NarClassLoader ncl = NarClassLoaderBuilder.builder()
.narFile(narFile)
.parentClassLoader(AdditionalServlet.class.getClassLoader())
.extractionDirectory(narExtractionDirectory)
.build();
AdditionalServletDefinition def = getAdditionalServletDefinition(ncl);
if (StringUtils.isBlank(def.getAdditionalServletClass())) {
throw new IOException("Additional servlets `" + def.getName() + "` does NOT provide an "
+ "additional servlets implementation");
}
try {
Class additionalServletClass = ncl.loadClass(def.getAdditionalServletClass());
Object additionalServlet = additionalServletClass.getDeclaredConstructor().newInstance();
if (!(additionalServlet instanceof AdditionalServlet)) {
throw new IOException("Class " + def.getAdditionalServletClass()
+ " does not implement additional servlet interface");
}
AdditionalServlet servlet = (AdditionalServlet) additionalServlet;
return new AdditionalServletWithClassLoader(servlet, ncl);
} catch (Throwable t) {
rethrowIOException(t);
return null;
}
} | 3.68 |
hbase_Procedure_waitInitialized | /**
* The {@link #doAcquireLock(Object, ProcedureStore)} will be split into two steps, first, it will
* call us to determine whether we need to wait for initialization, second, it will call
* {@link #acquireLock(Object)} to actually handle the lock for this procedure.
* <p/>
* This is because that when master restarts, we need to restore the lock state for all the
* procedures to not break the semantic if {@link #holdLock(Object)} is true. But the
* {@link ProcedureExecutor} will be started before the master finish initialization(as it is part
* of the initialization!), so we need to split the code into two steps, and when restore, we just
* restore the lock part and ignore the waitInitialized part. Otherwise there will be dead lock.
* @return true means we need to wait until the environment has been initialized, otherwise true.
*/
protected boolean waitInitialized(TEnvironment env) {
return false;
} | 3.68 |
hadoop_MountTableProcedure_setMountReadOnly | /**
* Enable or disable readonly of the mount point.
*
* @param mount the mount point.
* @param readOnly enable or disable readonly.
* @param conf the configuration of the router.
*/
private static void setMountReadOnly(String mount, boolean readOnly,
Configuration conf) throws IOException {
String address = conf.getTrimmed(RBFConfigKeys.DFS_ROUTER_ADMIN_ADDRESS_KEY,
RBFConfigKeys.DFS_ROUTER_ADMIN_ADDRESS_DEFAULT);
InetSocketAddress routerSocket = NetUtils.createSocketAddr(address);
RouterClient rClient = new RouterClient(routerSocket, conf);
try {
MountTableManager mountTable = rClient.getMountTableManager();
MountTable originalEntry = getMountEntry(mount, mountTable);
if (originalEntry == null) {
throw new IOException("Mount table " + mount + " doesn't exist");
} else {
originalEntry.setReadOnly(readOnly);
UpdateMountTableEntryRequest updateRequest =
UpdateMountTableEntryRequest.newInstance(originalEntry);
UpdateMountTableEntryResponse response =
mountTable.updateMountTableEntry(updateRequest);
if (!response.getStatus()) {
throw new IOException(
"Failed update mount table " + mount + " with readonly="
+ readOnly);
}
rClient.getMountTableManager().refreshMountTableEntries(
RefreshMountTableEntriesRequest.newInstance());
}
} finally {
rClient.close();
}
} | 3.68 |
hadoop_OBSFileSystem_setWorkingDirectory | /**
* Set the current working directory for the file system. All relative paths
* will be resolved relative to it.
*
* @param newDir the new working directory
*/
@Override
public void setWorkingDirectory(final Path newDir) {
workingDir = newDir;
} | 3.68 |
dubbo_InmemoryConfiguration_addProperties | /**
* Add a set of properties into the store
*/
public void addProperties(Map<String, String> properties) {
if (properties != null) {
this.store.putAll(properties);
}
} | 3.68 |
hadoop_MutableQuantiles_getQuantiles | /**
* Returns the array of Quantiles declared in MutableQuantiles.
*
* @return array of Quantiles
*/
public synchronized Quantile[] getQuantiles() {
return QUANTILES;
} | 3.68 |
hbase_WhileMatchFilter_areSerializedFieldsEqual | /**
* Return true if and only if the fields of the filter that are serialized are equal to the
* corresponding fields in other. Used for testing.
*/
@Override
boolean areSerializedFieldsEqual(Filter o) {
if (o == this) {
return true;
}
if (!(o instanceof WhileMatchFilter)) {
return false;
}
WhileMatchFilter other = (WhileMatchFilter) o;
return getFilter().areSerializedFieldsEqual(other.getFilter());
} | 3.68 |
hbase_FlushPolicy_configureForRegion | /**
* Upon construction, this method will be called with the region to be governed. It will be called
* once and only once.
*/
protected void configureForRegion(HRegion region) {
this.region = region;
} | 3.68 |
hudi_HoodieAsyncService_start | /**
* Start the service. Runs the service in a different thread and returns. Also starts a monitor thread to
* run-callbacks in case of shutdown
*
* @param onShutdownCallback
*/
public void start(Function<Boolean, Boolean> onShutdownCallback) {
if (started) {
LOG.warn("The async service already started.");
return;
}
Pair<CompletableFuture, ExecutorService> res = startService();
future = res.getKey();
executor = res.getValue();
started = true;
shutdownCallback(onShutdownCallback);
} | 3.68 |
flink_CopyOnWriteSkipListStateMapSnapshot_writeKeyAndNamespace | /** Write key and namespace from bytes. */
private void writeKeyAndNamespace(long nodeId, DataOutputView outputView) throws IOException {
// tuple of byte arrays for key and namespace
Tuple2<byte[], byte[]> tuple = owningStateMap.helpGetBytesForKeyAndNamespace(nodeId);
// write namespace first
outputView.write(tuple.f1);
outputView.write(tuple.f0);
} | 3.68 |
zilla_HpackContext_staticIndex5 | // Index in static table for the given name of length 5
private static int staticIndex5(DirectBuffer name)
{
switch (name.getByte(4))
{
case 'e':
if (STATIC_TABLE[50].name.equals(name)) // range
{
return 50;
}
break;
case 'h':
if (STATIC_TABLE[4].name.equals(name)) // path
{
return 4;
}
break;
case 'w':
if (STATIC_TABLE[22].name.equals(name)) // allow
{
return 22;
}
break;
}
return -1;
} | 3.68 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.