name
stringlengths 12
178
| code_snippet
stringlengths 8
36.5k
| score
float64 3.26
3.68
|
---|---|---|
morf_AbstractDatabaseType_driverClassName | /**
* @return the class name of the driver associated with this database type.
*/
@Override
public final String driverClassName() {
return driverClassName;
} | 3.68 |
dubbo_DubboCertManager_scheduleRefresh | /**
* Create task to refresh cert pair for current Dubbo instance
*/
protected void scheduleRefresh() {
FrameworkExecutorRepository repository =
frameworkModel.getBeanFactory().getBean(FrameworkExecutorRepository.class);
refreshFuture = repository
.getSharedScheduledExecutor()
.scheduleAtFixedRate(
this::generateCert,
certConfig.getRefreshInterval(),
certConfig.getRefreshInterval(),
TimeUnit.MILLISECONDS);
} | 3.68 |
hudi_HoodieTableMetaClient_getCommitActionType | /**
* Gets the commit action type.
*/
public String getCommitActionType() {
return CommitUtils.getCommitActionType(this.getTableType());
} | 3.68 |
hudi_Types_fieldByName | /**
* Case-sensitive get field by name
*/
public Field fieldByName(String name) {
if (nameToFields == null) {
nameToFields = Arrays.stream(fields)
.collect(Collectors.toMap(
Field::name,
field -> field));
}
return nameToFields.get(name);
} | 3.68 |
framework_FilesystemContainer_isRecursive | /**
* Is this container recursive filesystem.
*
* @return <code>true</code> if container is recursive, <code>false</code>
* otherwise.
*/
public boolean isRecursive() {
return recursive;
} | 3.68 |
hbase_HFileBlock_blockSizeWritten | /**
* Returns the number of bytes written into the current block so far, or zero if not writing the
* block at the moment. Note that this will return zero in the "block ready" state as well.
* @return the number of bytes written
*/
public int blockSizeWritten() {
return state != State.WRITING ? 0 : this.getEncodingState().getUnencodedDataSizeWritten();
} | 3.68 |
morf_AddIndex_accept | /**
* {@inheritDoc}
*
* @see org.alfasoftware.morf.upgrade.SchemaChange#accept(org.alfasoftware.morf.upgrade.SchemaChangeVisitor)
*/
@Override
public void accept(SchemaChangeVisitor visitor) {
visitor.visit(this);
} | 3.68 |
hbase_SnapshotOfRegionAssignmentFromMeta_getRegionServerToRegionMap | /**
* Get regionserver to region map
* @return regionserver to region map
*/
public Map<ServerName, List<RegionInfo>> getRegionServerToRegionMap() {
return currentRSToRegionMap;
} | 3.68 |
pulsar_AuthorizationProvider_revokePermissionAsync | /**
* Revoke authorization-action permission on a topic to the given client.
* @param topicName
* @param role
* @return CompletableFuture<Void>
*/
default CompletableFuture<Void> revokePermissionAsync(TopicName topicName, String role) {
return FutureUtil.failedFuture(new IllegalStateException(
String.format("revokePermissionAsync on topicName %s is not supported by the Authorization",
topicName)));
} | 3.68 |
hbase_MonitoredRPCHandlerImpl_setConnection | /**
* Registers current handler client details.
* @param clientAddress the address of the current client
* @param remotePort the port from which the client connected
*/
@Override
public void setConnection(String clientAddress, int remotePort) {
this.clientAddress = clientAddress;
this.remotePort = remotePort;
} | 3.68 |
pulsar_OpenIDProviderMetadataCache_loadOpenIDProviderMetadataForIssuer | /**
* A loader for the cache that retrieves the metadata from the issuer's /.well-known/openid-configuration endpoint.
* @return a connection to the issuer's /.well-known/openid-configuration endpoint. Fails with
* AuthenticationException if the URL is malformed or there is an exception while opening the connection
*/
private CompletableFuture<OpenIDProviderMetadata> loadOpenIDProviderMetadataForIssuer(String issuer) {
String url;
// TODO URI's normalization likely follows RFC2396 (library doesn't say so explicitly), whereas the spec
// https://openid.net/specs/openid-connect-discovery-1_0.html#NormalizationSteps
// calls for normalization according to RFC3986, which is supposed to obsolete RFC2396. Is this a problem?
if (issuer.endsWith("/")) {
url = issuer + WELL_KNOWN_OPENID_CONFIG;
} else {
url = issuer + SLASH_WELL_KNOWN_OPENID_CONFIG;
}
return httpClient
.prepareGet(url)
.execute()
.toCompletableFuture()
.thenCompose(result -> {
CompletableFuture<OpenIDProviderMetadata> future = new CompletableFuture<>();
try {
OpenIDProviderMetadata openIDProviderMetadata =
reader.readValue(result.getResponseBodyAsBytes());
// We can verify this issuer once and cache the result because the issuer uniquely maps
// to the cached object.
verifyIssuer(issuer, openIDProviderMetadata, false);
future.complete(openIDProviderMetadata);
} catch (AuthenticationException e) {
incrementFailureMetric(AuthenticationExceptionCode.ERROR_RETRIEVING_PROVIDER_METADATA);
future.completeExceptionally(e);
} catch (Exception e) {
incrementFailureMetric(AuthenticationExceptionCode.ERROR_RETRIEVING_PROVIDER_METADATA);
future.completeExceptionally(new AuthenticationException(
"Error retrieving OpenID Provider Metadata at " + issuer + ": " + e.getMessage()));
}
return future;
});
} | 3.68 |
hbase_DirScanPool_onConfigurationChange | /**
* Checks if pool can be updated. If so, mark for update later.
* @param conf configuration
*/
@Override
public synchronized void onConfigurationChange(Configuration conf) {
int newSize = CleanerChore.calculatePoolSize(conf.get(dirScanPoolType.cleanerPoolSizeConfigName,
dirScanPoolType.cleanerPoolSizeConfigDefault));
if (newSize == size) {
LOG.trace("{} Cleaner Size from configuration is same as previous={}, no need to update.",
name, newSize);
return;
}
size = newSize;
// Chore is working, update it later.
reconfigNotification = true;
} | 3.68 |
morf_DataValueLookupBuilderImpl_getAndConvertByName | /**
* Fetches the value of the specified column, converting it to the target
* type using the associated {@link ValueConverter} to the target type.
*/
private final <STORED, RETURNED> RETURNED getAndConvertByName(String columnName, ValueMapper<STORED, RETURNED> mapper) {
if (metadata == null) return null;
return getAndConvertByIndex(metadata.getIndexInArray(CaseInsensitiveString.of(columnName)), mapper);
} | 3.68 |
hadoop_RouterQuotaManager_remove | /**
* Remove the entity from cache.
* @param path Mount table path.
*/
public void remove(String path) {
writeLock.lock();
try {
this.cache.remove(path);
} finally {
writeLock.unlock();
}
} | 3.68 |
hbase_MasterCoprocessorHost_postMergeRegionsCommit | /**
* Invoked after merge regions operation writes the new region to hbase:meta
* @param regionsToMerge the regions to merge
* @param mergedRegion the new merged region
* @param user the user
*/
public void postMergeRegionsCommit(final RegionInfo[] regionsToMerge,
final RegionInfo mergedRegion, final User user) throws IOException {
execOperation(coprocEnvironments.isEmpty() ? null : new MasterObserverOperation(user) {
@Override
public void call(MasterObserver observer) throws IOException {
observer.postMergeRegionsCommitAction(this, regionsToMerge, mergedRegion);
}
});
} | 3.68 |
framework_AbsoluteLayoutRelativeSizeContent_createFullOnFull | /**
* Creates an {@link AbsoluteLayout} of full size that contains another
* full-sized {@link AbsoluteLayout}.
*
* @return the created layout
*/
private AbsoluteLayout createFullOnFull() {
AbsoluteLayout absoluteLayout = new AbsoluteLayout();
absoluteLayout.setSizeFull();
absoluteLayout.setId("fullonfull-outer");
absoluteLayout.addStyleName("cyan");
absoluteLayout.setCaption("area with red border expected");
AbsoluteLayout absoluteLayout2 = new AbsoluteLayout();
absoluteLayout2.setSizeFull();
absoluteLayout2.setId("fullonfull-inner");
absoluteLayout2.addStyleName("redborder");
absoluteLayout.addComponent(absoluteLayout2, "top:50px;left:100px;");
return absoluteLayout;
} | 3.68 |
framework_VUpload_submit | /** For internal use only. May be removed or replaced in the future. */
public void submit() {
if (submitted || !enabled) {
getLogger()
.info("Submit cancelled (disabled or already submitted)");
return;
}
if (!hasFilename()) {
if (!allowUploadWithoutFilename) {
return;
}
getLogger().info("Submitting empty selection (no file)");
}
// flush possibly pending variable changes, so they will be handled
// before upload
client.getServerRpcQueue().flush();
// This is done as deferred because sendPendingVariableChanges is also
// deferred and we want to start the upload only after the changes have
// been sent to the server
Scheduler.get().scheduleDeferred(startUploadCmd);
} | 3.68 |
hbase_HRegion_addRegionToSnapshot | /**
* Complete taking the snapshot on the region. Writes the region info and adds references to the
* working snapshot directory. TODO for api consistency, consider adding another version with no
* {@link ForeignExceptionSnare} arg. (In the future other cancellable HRegion methods could
* eventually add a {@link ForeignExceptionSnare}, or we could do something fancier).
* @param desc snapshot description object
* @param exnSnare ForeignExceptionSnare that captures external exceptions in case we need to bail
* out. This is allowed to be null and will just be ignored in that case.
* @throws IOException if there is an external or internal error causing the snapshot to fail
*/
public void addRegionToSnapshot(SnapshotDescription desc, ForeignExceptionSnare exnSnare)
throws IOException {
Path rootDir = CommonFSUtils.getRootDir(conf);
Path snapshotDir = SnapshotDescriptionUtils.getWorkingSnapshotDir(desc, rootDir, conf);
SnapshotManifest manifest =
SnapshotManifest.create(conf, getFilesystem(), snapshotDir, desc, exnSnare);
manifest.addRegion(this);
} | 3.68 |
hadoop_AoclDiagnosticOutputParser_parseDiagnosticOutput | /**
* One real sample output of Intel FPGA SDK 17.0's "aocl diagnose" is as below:
* "
* aocl diagnose: Running diagnose from /home/fpga/intelFPGA_pro/17.0/hld/board/nalla_pcie/linux64/libexec
*
* ------------------------- acl0 -------------------------
* Vendor: Nallatech ltd
*
* Phys Dev Name Status Information
*
* aclnalla_pcie0Passed nalla_pcie (aclnalla_pcie0)
* PCIe dev_id = 2494, bus:slot.func = 02:00.00, Gen3 x8
* FPGA temperature = 54.4 degrees C.
* Total Card Power Usage = 31.7 Watts.
* Device Power Usage = 0.0 Watts.
*
* DIAGNOSTIC_PASSED
* ---------------------------------------------------------
* "
*
* While per Intel's guide, the output(should be outdated or prior SDK version's) is as below:
*
* "
* aocl diagnose: Running diagnostic from ALTERAOCLSDKROOT/board/<board_name>/
* <platform>/libexec
* Verified that the kernel mode driver is installed on the host machine.
* Using board package from vendor: <board_vendor_name>
* Querying information for all supported devices that are installed on the host
* machine ...
*
* device_name Status Information
*
* acl0 Passed <descriptive_board_name>
* PCIe dev_id = <device_ID>, bus:slot.func = 02:00.00,
* at Gen 2 with 8 lanes.
* FPGA temperature=43.0 degrees C.
* acl1 Passed <descriptive_board_name>
* PCIe dev_id = <device_ID>, bus:slot.func = 03:00.00,
* at Gen 2 with 8 lanes.
* FPGA temperature = 35.0 degrees C.
*
* Found 2 active device(s) installed on the host machine, to perform a full
* diagnostic on a specific device, please run aocl diagnose <device_name>
*
* DIAGNOSTIC_PASSED
* "
* But this method only support the first output
* */
public static List<FpgaDevice> parseDiagnosticOutput(
String output, InnerShellExecutor shellExecutor, String fpgaType) {
if (output.contains("DIAGNOSTIC_PASSED")) {
List<FpgaDevice> devices = new ArrayList<>();
Matcher headerStartMatcher = Pattern.compile("acl[0-31]")
.matcher(output);
Matcher headerEndMatcher = Pattern.compile("(?i)DIAGNOSTIC_PASSED")
.matcher(output);
int sectionStartIndex;
int sectionEndIndex;
String aliasName;
while (headerStartMatcher.find()) {
sectionStartIndex = headerStartMatcher.end();
String section = null;
aliasName = headerStartMatcher.group();
while (headerEndMatcher.find(sectionStartIndex)) {
sectionEndIndex = headerEndMatcher.start();
section = output.substring(sectionStartIndex, sectionEndIndex);
break;
}
if (section == null) {
LOG.warn("Unsupported diagnose output");
LOG.warn("aocl output is: " + output);
return Collections.emptyList();
}
// devName, \(.*\)
// busNum, bus:slot.func\s=\s.*,
// FPGA temperature\s=\s.*
// Total\sCard\sPower\sUsage\s=\s.*
String[] fieldRegexes = new String[]{"\\(.*\\)\n",
"(?i)bus:slot.func\\s=\\s.*,",
"(?i)FPGA temperature\\s=\\s.*",
"(?i)Total\\sCard\\sPower\\sUsage\\s=\\s.*"};
String[] fields = new String[4];
String tempFieldValue;
for (int i = 0; i < fieldRegexes.length; i++) {
Matcher fieldMatcher = Pattern.compile(fieldRegexes[i])
.matcher(section);
if (!fieldMatcher.find()) {
LOG.warn("Couldn't find " + fieldRegexes[i] + " pattern");
fields[i] = "";
continue;
}
tempFieldValue = fieldMatcher.group().trim();
if (i == 0) {
// special case for Device name
fields[i] = tempFieldValue.substring(1,
tempFieldValue.length() - 1);
} else {
String ss = tempFieldValue.split("=")[1].trim();
fields[i] = ss.substring(0, ss.length() - 1);
}
}
String majorMinorNumber = shellExecutor
.getMajorAndMinorNumber(fields[0]);
if (null != majorMinorNumber) {
String[] mmn = majorMinorNumber.split(":");
devices.add(new FpgaDevice(fpgaType,
Integer.parseInt(mmn[0]),
Integer.parseInt(mmn[1]),
aliasName));
} else {
LOG.warn("Failed to retrieve major/minor number for device");
}
}
return devices;
} else {
LOG.warn("The diagnostic has failed");
LOG.warn("Output of aocl is: " + output);
return Collections.emptyList();
}
} | 3.68 |
pulsar_JdbcUtils_getTableDefinition | /**
* Get the {@link TableDefinition} for the given table.
*/
public static TableDefinition getTableDefinition(
Connection connection,
TableId tableId,
List<String> keyList,
List<String> nonKeyList,
boolean excludeNonDeclaredFields
) throws Exception {
TableDefinition table = TableDefinition.of(
tableId, Lists.newArrayList(), Lists.newArrayList(), Lists.newArrayList());
keyList = keyList == null ? Collections.emptyList() : keyList;
nonKeyList = nonKeyList == null ? Collections.emptyList() : nonKeyList;
try (ResultSet rs = connection.getMetaData().getColumns(
tableId.getCatalogName(),
tableId.getSchemaName(),
tableId.getTableName(),
null
)) {
while (rs.next()) {
final String columnName = rs.getString(4);
final int sqlDataType = rs.getInt(5);
final String typeName = rs.getString(6);
final int position = rs.getInt(17);
if (log.isDebugEnabled()) {
log.debug("Get column. name: {}, data type: {}, position: {}", columnName, typeName, position);
}
ColumnId columnId = ColumnId.of(tableId, columnName, sqlDataType, typeName, position);
if (keyList.contains(columnName)) {
table.keyColumns.add(columnId);
table.columns.add(columnId);
} else if (nonKeyList.contains(columnName)) {
table.nonKeyColumns.add(columnId);
table.columns.add(columnId);
} else if (!excludeNonDeclaredFields) {
table.columns.add(columnId);
}
}
return table;
}
} | 3.68 |
morf_DeleteStatementBuilder_getLimit | /**
* Gets the limit.
*
* @return the limit on the number of deleted records.
*/
public Optional<Integer> getLimit() {
return limit;
} | 3.68 |
graphhopper_ResponsePath_setWaypoints | /**
* This method initializes this path with the snapped input points.
*/
public ResponsePath setWaypoints(PointList wpList) {
if (waypointList != PointList.EMPTY)
throw new IllegalStateException("Cannot call setWaypoints twice");
this.waypointList = wpList;
return this;
} | 3.68 |
hbase_HFileSystem_getBackingFs | /**
* Returns the underlying filesystem
* @return The underlying FileSystem for this FilterFileSystem object.
*/
public FileSystem getBackingFs() throws IOException {
return fs;
} | 3.68 |
hadoop_RemoteMethod_getTypes | /**
* Get the calling types for this method.
*
* @return An array of calling types.
*/
public Class<?>[] getTypes() {
return Arrays.copyOf(this.types, this.types.length);
} | 3.68 |
flink_BlobUtils_writeLength | /**
* Auxiliary method to write the length of an upcoming data chunk to an output stream.
*
* @param length the length of the upcoming data chunk in bytes
* @param outputStream the output stream to write the length to
* @throws IOException thrown if an I/O error occurs while writing to the output stream
*/
static void writeLength(int length, OutputStream outputStream) throws IOException {
byte[] buf = new byte[4];
buf[0] = (byte) (length & 0xff);
buf[1] = (byte) ((length >> 8) & 0xff);
buf[2] = (byte) ((length >> 16) & 0xff);
buf[3] = (byte) ((length >> 24) & 0xff);
outputStream.write(buf, 0, 4);
} | 3.68 |
flink_MapValue_containsValue | /*
* (non-Javadoc)
* @see java.util.Map#containsValue(java.lang.Object)
*/
@Override
public boolean containsValue(final Object value) {
return this.map.containsValue(value);
} | 3.68 |
AreaShop_AreaShop_message | /**
* Send a message to a target, prefixed by the default chat prefix.
* @param target The target to send the message to
* @param key The key of the language string
* @param replacements The replacements to insert in the message
*/
public void message(Object target, String key, Object... replacements) {
Message.fromKey(key).prefix().replacements(replacements).send(target);
} | 3.68 |
graphhopper_DepthFirstSearch_start | /**
* beginning with startNode add all following nodes to LIFO queue. If node has been already
* explored before, skip reexploration.
*/
@Override
public void start(EdgeExplorer explorer, int startNode) {
IntArrayDeque stack = new IntArrayDeque();
GHBitSet explored = createBitSet();
stack.addLast(startNode);
int current;
while (stack.size() > 0) {
current = stack.removeLast();
if (!explored.contains(current) && goFurther(current)) {
EdgeIterator iter = explorer.setBaseNode(current);
while (iter.next()) {
int connectedId = iter.getAdjNode();
if (checkAdjacent(iter)) {
stack.addLast(connectedId);
}
}
explored.add(current);
}
}
} | 3.68 |
framework_AbstractMedia_setLoop | /**
* Enables or disables looping.
*
* @param loop
* if true, enable looping
* @since 7.7.11
*/
public void setLoop(final boolean loop) {
getState().loop = loop;
} | 3.68 |
flink_ClockService_ofSystem | /**
* Creates a {@link ClockService} which assigns as current processing time the result of calling
* {@link System#currentTimeMillis()}.
*/
static ClockService ofSystem() {
return System::currentTimeMillis;
} | 3.68 |
framework_StringToBooleanConverter_getModelType | /*
* (non-Javadoc)
*
* @see com.vaadin.data.util.converter.Converter#getModelType()
*/
@Override
public Class<Boolean> getModelType() {
return Boolean.class;
} | 3.68 |
morf_SqlDialect_buildParameterisedInsert | /**
* Creates an SQL statement to insert values with positional parameterised
* fields based on the insert statement specified.
*
* @param statement the insert statement to build an SQL query for
* @param metadata the metadata for the database
* @return a string containing a parameterised insert query for the specified
* table
*/
public String buildParameterisedInsert(InsertStatement statement, Schema metadata) {
String destinationTableName = statement.getTable().getName();
if (StringUtils.isBlank(destinationTableName)) {
throw new IllegalArgumentException("Cannot create parameterised SQL for a blank table");
}
if (metadata == null) {
throw new IllegalArgumentException("Cannot specify null for the source metadata");
}
if (!metadata.tableExists(destinationTableName)) {
throw new IllegalArgumentException("Cannot create parameterised SQL for table [" + destinationTableName
+ "] without metadata");
}
Table destinationTable = metadata.getTable(destinationTableName);
StringBuilder sqlBuilder = new StringBuilder();
StringBuilder values = new StringBuilder(") VALUES (");
// -- Work out the literal values...
//
Map<String, String> literalValueMap = new HashMap<>();
for (AliasedField f : statement.getFields()) {
literalValueMap.put(f.getAlias().toUpperCase(), literalValue(f));
}
for (Entry<String, AliasedField> value : statement.getFieldDefaults().entrySet()) {
literalValueMap.put(value.getKey().toUpperCase(), literalValue(value.getValue()));
}
// -- Add the preamble...
//
sqlBuilder.append(getSqlForInsertInto(statement));
sqlBuilder.append(tableNameWithSchemaName(statement.getTable()));
sqlBuilder.append(" (");
boolean first = true;
for (Column currentColumn : destinationTable.columns()) {
if (!first) {
sqlBuilder.append(", ");
values.append(", ");
}
first = false;
sqlBuilder.append(currentColumn.getName());
String literalValue = literalValueMap.get(currentColumn.getName().toUpperCase());
if (literalValue == null) {
values.append(getSqlFrom(new SqlParameter(currentColumn)));
} else {
values.append(literalValue);
}
}
values.append(")");
sqlBuilder.append(values);
return sqlBuilder.toString();
} | 3.68 |
rocketmq-connect_RocketMqStateManagementServiceImpl_initialize | /**
* initialize cb config
*
* @param config
*/
@Override
public void initialize(WorkerConfig config, RecordConverter converter) {
super.initialize(config, converter);
/**connector status store*/
this.connectorStatusStore = new MemoryBasedKeyValueStore<>();
/**task status store*/
this.taskStatusStore = new MemoryBasedKeyValueStore<>();
} | 3.68 |
AreaShop_FileManager_setGroupSetting | /**
* Set a setting for a group.
* @param group The group to set it for
* @param path The path to set
* @param setting The value to set
*/
public void setGroupSetting(RegionGroup group, String path, Object setting) {
groupsConfig.set(group.getName().toLowerCase() + "." + path, setting);
} | 3.68 |
querydsl_JPAExpressions_avg | /**
* Create a avg(col) expression
*
* @param col collection
* @return avg(col)
*/
public static <A extends Comparable<? super A>> ComparableExpression<A> avg(CollectionExpression<?,A> col) {
return Expressions.comparableOperation((Class) col.getParameter(0), Ops.QuantOps.AVG_IN_COL, (Expression<?>) col);
} | 3.68 |
hbase_AsyncRegionLocationCache_get | /**
* Gets the RegionLocations for a given region's startKey. This is a direct lookup, if the key
* does not exist in the cache it will return null.
* @param startKey region start key to directly look up
*/
public RegionLocations get(byte[] startKey) {
return cache.get(startKey);
} | 3.68 |
MagicPlugin_MapController_getPlayerPortrait | /**
* Get an ItemStack that is a headshot of a player's skin.
*/
@Nullable
@Override
public ItemStack getPlayerPortrait(String worldName, String playerName, Integer priority, String photoLabel) {
photoLabel = photoLabel == null ? playerName : photoLabel;
String url = CompatibilityLib.getSkinUtils().getOnlineSkinURL(playerName);
if (url != null) {
MapView mapView = getURL(worldName, url, photoLabel, 8, 8, 40, 8, 8, 8, priority, playerName);
return getMapItem(photoLabel, mapView);
}
MapView mapView = getURL(worldName, null, photoLabel, 8, 8, 40, 8, 8, 8, priority, playerName);
return getMapItem(photoLabel, mapView);
} | 3.68 |
graphhopper_LMPreparationHandler_createPreparations | /**
* This method creates the landmark storages ready for landmark creation.
*/
List<PrepareLandmarks> createPreparations(List<LMConfig> lmConfigs, BaseGraph graph, EncodedValueLookup encodedValueLookup, LocationIndex locationIndex) {
LOGGER.info("Creating LM preparations, {}", getMemInfo());
List<LandmarkSuggestion> lmSuggestions = new ArrayList<>(lmSuggestionsLocations.size());
if (!lmSuggestionsLocations.isEmpty()) {
try {
for (String loc : lmSuggestionsLocations) {
lmSuggestions.add(LandmarkSuggestion.readLandmarks(loc, locationIndex));
}
} catch (IOException ex) {
throw new RuntimeException(ex);
}
}
List<PrepareLandmarks> preparations = new ArrayList<>();
for (LMConfig lmConfig : lmConfigs) {
Double maximumWeight = maximumWeights.get(lmConfig.getName());
if (maximumWeight == null)
throw new IllegalStateException("maximumWeight cannot be null. Default should be just negative. " +
"Couldn't find " + lmConfig.getName() + " in " + maximumWeights);
PrepareLandmarks prepareLandmarks = new PrepareLandmarks(graph.getDirectory(), graph, encodedValueLookup,
lmConfig, landmarkCount).
setLandmarkSuggestions(lmSuggestions).
setMaximumWeight(maximumWeight).
setLogDetails(logDetails);
if (minNodes > 1)
prepareLandmarks.setMinimumNodes(minNodes);
// using the area index we separate certain areas from each other but we do not change the base graph for this
// so that other algorithms still can route between these areas
if (areaIndex != null)
prepareLandmarks.setAreaIndex(areaIndex);
preparations.add(prepareLandmarks);
}
return preparations;
} | 3.68 |
hbase_ZKConfig_makeZKProps | /**
* Make a Properties object holding ZooKeeper config. Parses the corresponding config options from
* the HBase XML configs and generates the appropriate ZooKeeper properties.
* @param conf Configuration to read from.
* @return Properties holding mappings representing ZooKeeper config file.
*/
public static Properties makeZKProps(Configuration conf) {
return makeZKPropsFromHbaseConfig(conf);
} | 3.68 |
framework_LoginForm_setPasswordCaption | /**
* Set the caption of the password field. Note that the caption can only be
* set with this method before the login form has been initialized
* (attached).
* <p>
* As an alternative to calling this method, the method
* {@link #createPasswordField()} can be overridden.
*
* @param passwordCaption
* the caption for the password field
*/
public void setPasswordCaption(String passwordCaption) {
this.passwordCaption = passwordCaption;
} | 3.68 |
flink_Tuple21_toString | /**
* Creates a string representation of the tuple in the form (f0, f1, f2, f3, f4, f5, f6, f7, f8,
* f9, f10, f11, f12, f13, f14, f15, f16, f17, f18, f19, f20), where the individual fields are
* the value returned by calling {@link Object#toString} on that field.
*
* @return The string representation of the tuple.
*/
@Override
public String toString() {
return "("
+ StringUtils.arrayAwareToString(this.f0)
+ ","
+ StringUtils.arrayAwareToString(this.f1)
+ ","
+ StringUtils.arrayAwareToString(this.f2)
+ ","
+ StringUtils.arrayAwareToString(this.f3)
+ ","
+ StringUtils.arrayAwareToString(this.f4)
+ ","
+ StringUtils.arrayAwareToString(this.f5)
+ ","
+ StringUtils.arrayAwareToString(this.f6)
+ ","
+ StringUtils.arrayAwareToString(this.f7)
+ ","
+ StringUtils.arrayAwareToString(this.f8)
+ ","
+ StringUtils.arrayAwareToString(this.f9)
+ ","
+ StringUtils.arrayAwareToString(this.f10)
+ ","
+ StringUtils.arrayAwareToString(this.f11)
+ ","
+ StringUtils.arrayAwareToString(this.f12)
+ ","
+ StringUtils.arrayAwareToString(this.f13)
+ ","
+ StringUtils.arrayAwareToString(this.f14)
+ ","
+ StringUtils.arrayAwareToString(this.f15)
+ ","
+ StringUtils.arrayAwareToString(this.f16)
+ ","
+ StringUtils.arrayAwareToString(this.f17)
+ ","
+ StringUtils.arrayAwareToString(this.f18)
+ ","
+ StringUtils.arrayAwareToString(this.f19)
+ ","
+ StringUtils.arrayAwareToString(this.f20)
+ ")";
} | 3.68 |
framework_VScrollTable_handleClickEvent | /**
* If there are registered click listeners, sends a click event and
* returns true. Otherwise, does nothing and returns false.
*
* @param event
* @param targetTdOrTr
* @param immediate
* Whether the event is sent immediately
* @return Whether a click event was sent
*/
private boolean handleClickEvent(Event event, Element targetTdOrTr,
boolean immediate) {
if (!client.hasEventListeners(VScrollTable.this,
TableConstants.ITEM_CLICK_EVENT_ID)) {
// Don't send an event if nobody is listening
return false;
}
// This row was clicked
client.updateVariable(paintableId, "clickedKey", "" + rowKey,
false);
if (getElement() == targetTdOrTr.getParentElement()) {
// A specific column was clicked
int childIndex = DOM.getChildIndex(getElement(),
targetTdOrTr);
String colKey = null;
colKey = tHead.getHeaderCell(childIndex).getColKey();
client.updateVariable(paintableId, "clickedColKey", colKey,
false);
}
MouseEventDetails details = MouseEventDetailsBuilder
.buildMouseEventDetails(event);
client.updateVariable(paintableId, "clickEvent",
details.toString(), immediate);
return true;
} | 3.68 |
flink_TriConsumerWithException_unchecked | /**
* Convert a {@link TriConsumerWithException} into a {@link TriConsumer}.
*
* @param triConsumerWithException TriConsumer with exception to convert into a {@link
* TriConsumer}.
* @param <A> first input type
* @param <B> second input type
* @param <C> third input type
* @return {@link TriConsumer} which rethrows all checked exceptions as unchecked.
*/
static <A, B, C> TriConsumer<A, B, C> unchecked(
TriConsumerWithException<A, B, C, ?> triConsumerWithException) {
return (A a, B b, C c) -> {
try {
triConsumerWithException.accept(a, b, c);
} catch (Throwable t) {
ExceptionUtils.rethrow(t);
}
};
} | 3.68 |
hudi_HoodieAvroUtils_fromJavaDate | /**
* convert Date to days
* <p>
* NOTE: This method could only be used in tests
*
* @VisibleForTesting
*/
public static int fromJavaDate(Date date) {
long millisUtc = date.getTime();
long millisLocal = millisUtc + TimeZone.getDefault().getOffset(millisUtc);
int julianDays = Math.toIntExact(Math.floorDiv(millisLocal, MILLIS_PER_DAY));
return julianDays;
} | 3.68 |
hadoop_XMLUtils_newSecureDocumentBuilderFactory | /**
* This method should be used if you need a {@link DocumentBuilderFactory}. Use this method
* instead of {@link DocumentBuilderFactory#newInstance()}. The factory that is returned has
* secure configuration enabled.
*
* @return a {@link DocumentBuilderFactory} with secure configuration enabled
* @throws ParserConfigurationException if the {@code JAXP} parser does not support the
* secure configuration
*/
public static DocumentBuilderFactory newSecureDocumentBuilderFactory()
throws ParserConfigurationException {
DocumentBuilderFactory dbf = DocumentBuilderFactory.newInstance();
dbf.setFeature(XMLConstants.FEATURE_SECURE_PROCESSING, true);
dbf.setFeature(DISALLOW_DOCTYPE_DECL, true);
dbf.setFeature(LOAD_EXTERNAL_DECL, false);
dbf.setFeature(EXTERNAL_GENERAL_ENTITIES, false);
dbf.setFeature(EXTERNAL_PARAMETER_ENTITIES, false);
dbf.setFeature(CREATE_ENTITY_REF_NODES, false);
return dbf;
} | 3.68 |
flink_FromElementsFunction_checkCollection | /**
* Verifies that all elements in the collection are non-null, and are of the given class, or a
* subclass thereof.
*
* @param elements The collection to check.
* @param viewedAs The class to which the elements must be assignable to.
* @param <OUT> The generic type of the collection to be checked.
*/
public static <OUT> void checkCollection(Collection<OUT> elements, Class<OUT> viewedAs) {
checkIterable(elements, viewedAs);
} | 3.68 |
flink_ApiSpecGeneratorUtils_shouldBeDocumented | /**
* Checks whether the given endpoint should be documented.
*
* @param spec endpoint to check
* @return true if the endpoint should be documented
*/
public static boolean shouldBeDocumented(
MessageHeaders<
? extends RequestBody,
? extends ResponseBody,
? extends MessageParameters>
spec) {
return spec.getClass().getAnnotation(Documentation.ExcludeFromDocumentation.class) == null;
} | 3.68 |
flink_OptionalUtils_stream | /**
* Converts the given {@link Optional} into a {@link Stream}.
*
* <p>This is akin to {@code Optional#stream} available in JDK9+.
*/
@SuppressWarnings("OptionalUsedAsFieldOrParameterType")
public static <T> Stream<T> stream(Optional<T> opt) {
return opt.map(Stream::of).orElseGet(Stream::empty);
} | 3.68 |
flink_EvictingWindowReader_reduce | /**
* Reads window state generated using a {@link ReduceFunction}.
*
* @param uid The uid of the operator.
* @param function The reduce function used to create the window.
* @param readerFunction The window reader function.
* @param keyType The key type of the window.
* @param reduceType The type information of the reduce function.
* @param outputType The output type of the reader function.
* @param <K> The type of the key.
* @param <T> The type of the reduce function.
* @param <OUT> The output type of the reduce function.
* @return A {@code DataSet} of objects read from keyed state.
* @throws IOException If savepoint does not contain the specified uid.
*/
public <K, T, OUT> DataSet<OUT> reduce(
String uid,
ReduceFunction<T> function,
WindowReaderFunction<T, OUT, K, W> readerFunction,
TypeInformation<K> keyType,
TypeInformation<T> reduceType,
TypeInformation<OUT> outputType)
throws IOException {
WindowReaderOperator<?, K, StreamRecord<T>, W, OUT> operator =
WindowReaderOperator.evictingWindow(
new ReduceEvictingWindowReaderFunction<>(readerFunction, function),
keyType,
windowSerializer,
reduceType,
env.getConfig());
return readWindowOperator(uid, outputType, operator);
} | 3.68 |
hadoop_AbstractTask_setTimeout | /**
* Set Task Timeout in seconds.
* @param taskTimeout : Timeout in seconds
*/
@Override
public final void setTimeout(final long taskTimeout) {
this.timeout = taskTimeout;
} | 3.68 |
flink_PackagedProgramUtils_createJobGraph | /**
* Creates a {@link JobGraph} with a random {@link JobID} from the given {@link
* PackagedProgram}.
*
* @param packagedProgram to extract the JobGraph from
* @param configuration to use for the optimizer and job graph generator
* @param defaultParallelism for the JobGraph
* @param suppressOutput Whether to suppress stdout/stderr during interactive JobGraph creation.
* @return JobGraph extracted from the PackagedProgram
* @throws ProgramInvocationException if the JobGraph generation failed
*/
public static JobGraph createJobGraph(
PackagedProgram packagedProgram,
Configuration configuration,
int defaultParallelism,
boolean suppressOutput)
throws ProgramInvocationException {
return createJobGraph(
packagedProgram, configuration, defaultParallelism, null, suppressOutput);
} | 3.68 |
hbase_OrderedBytes_encodeNull | /**
* Encode a null value.
* @param dst The destination to which encoded digits are written.
* @param ord The {@link Order} to respect while encoding {@code val}.
* @return the number of bytes written.
*/
public static int encodeNull(PositionedByteRange dst, Order ord) {
dst.put(ord.apply(NULL));
return 1;
} | 3.68 |
flink_CheckpointConfig_setCheckpointStorage | /**
* Configures the application to write out checkpoint snapshots to the configured directory. See
* {@link FileSystemCheckpointStorage} for more details on checkpointing to a file system.
*
* @param checkpointDirectory The path to write checkpoint metadata to.
* @see #setCheckpointStorage(String)
*/
@PublicEvolving
public void setCheckpointStorage(Path checkpointDirectory) {
Preconditions.checkNotNull(checkpointDirectory, "Checkpoint directory must not be null");
this.storage = new FileSystemCheckpointStorage(checkpointDirectory);
} | 3.68 |
hbase_AccessChecker_requireAccess | /**
* Authorizes that the current user has any of the given permissions to access the table.
* @param user Active user to which authorization checks should be applied
* @param request Request type.
* @param tableName Table requested
* @param permissions Actions being requested
* @throws IOException if obtaining the current user fails
* @throws AccessDeniedException if user has no authorization
*/
public void requireAccess(User user, String request, TableName tableName, Action... permissions)
throws IOException {
AuthResult result = null;
for (Action permission : permissions) {
if (authManager.accessUserTable(user, tableName, permission)) {
result = AuthResult.allow(request, "Table permission granted", user, permission, tableName,
null, null);
break;
} else {
// rest of the world
result = AuthResult.deny(request, "Insufficient permissions", user, permission, tableName,
null, null);
}
}
logResult(result);
if (!result.isAllowed()) {
throw new AccessDeniedException("Insufficient permissions " + result.toContextString());
}
} | 3.68 |
hadoop_SinglePendingCommit_getBucket | /** @return destination bucket. */
public String getBucket() {
return bucket;
} | 3.68 |
hbase_SegmentScanner_updateCurrent | /**
* Private internal method for iterating over the segment, skipping the cells with irrelevant MVCC
*/
protected void updateCurrent() {
Cell next = null;
try {
while (iter.hasNext()) {
next = iter.next();
if (next.getSequenceId() <= this.readPoint) {
current = next;
return;// skip irrelevant versions
}
// for backwardSeek() stay in the boundaries of a single row
if (stopSkippingKVsIfNextRow && segment.compareRows(next, stopSkippingKVsRow) > 0) {
current = null;
return;
}
} // end of while
current = null; // nothing found
} finally {
if (next != null) {
// in all cases, remember the last KV we iterated to, needed for reseek()
last = next;
}
}
} | 3.68 |
framework_ContainerOrderedWrapper_getItemIds | /*
* Gets the ID's of all Items stored in the Container Don't add a JavaDoc
* comment here, we use the default documentation from implemented
* interface.
*/
@Override
public Collection<?> getItemIds() {
if (ordered) {
return ((Container.Ordered) container).getItemIds();
} else if (first == null) {
return new ArrayList<Object>();
} else {
List<Object> itemIds = new ArrayList<Object>();
itemIds.add(first);
Object current = first;
while (next.containsKey(current)) {
current = next.get(current);
itemIds.add(current);
}
return itemIds;
}
} | 3.68 |
hbase_MemStoreFlusher_flushRegion | /**
* Flush a region.
* @param region Region to flush.
* @param emergencyFlush Set if we are being force flushed. If true the region needs to be removed
* from the flush queue. If false, when we were called from the main flusher
* run loop and we got the entry to flush by calling poll on the flush queue
* (which removed it).
* @param families stores of region to flush.
* @return true if the region was successfully flushed, false otherwise. If false, there will be
* accompanying log messages explaining why the region was not flushed.
*/
private boolean flushRegion(HRegion region, boolean emergencyFlush, List<byte[]> families,
FlushLifeCycleTracker tracker) {
synchronized (this.regionsInQueue) {
FlushRegionEntry fqe = this.regionsInQueue.remove(region);
// Use the start time of the FlushRegionEntry if available
if (fqe != null && emergencyFlush) {
// Need to remove from region from delay queue. When NOT an
// emergencyFlush, then item was removed via a flushQueue.poll.
flushQueue.remove(fqe);
}
}
tracker.beforeExecution();
lock.readLock().lock();
final CompactSplit compactSplitThread = server.getCompactSplitThread();
try {
notifyFlushRequest(region, emergencyFlush);
FlushResult flushResult = region.flushcache(families, false, tracker);
boolean shouldCompact = flushResult.isCompactionNeeded();
// We just want to check the size
boolean shouldSplit = region.checkSplit().isPresent();
if (shouldSplit) {
compactSplitThread.requestSplit(region);
} else if (shouldCompact) {
compactSplitThread.requestSystemCompaction(region, Thread.currentThread().getName());
}
} catch (DroppedSnapshotException ex) {
// Cache flush can fail in a few places. If it fails in a critical
// section, we get a DroppedSnapshotException and a replay of wal
// is required. Currently the only way to do this is a restart of
// the server. Abort because hdfs is probably bad (HBASE-644 is a case
// where hdfs was bad but passed the hdfs check).
server.abort("Replay of WAL required. Forcing server shutdown", ex);
return false;
} catch (IOException ex) {
ex = ex instanceof RemoteException ? ((RemoteException) ex).unwrapRemoteException() : ex;
LOG.error("Cache flush failed" + (region != null
? (" for region " + Bytes.toStringBinary(region.getRegionInfo().getRegionName()))
: ""), ex);
if (!server.checkFileSystem()) {
return false;
}
} finally {
lock.readLock().unlock();
wakeUpIfBlocking();
tracker.afterExecution();
}
return true;
} | 3.68 |
flink_PlanGenerator_registerCachedFilesWithPlan | /**
* Registers all files that were registered at this execution environment's cache registry of
* the given plan's cache registry.
*
* @param p The plan to register files at.
* @throws IOException Thrown if checks for existence and sanity fail.
*/
private void registerCachedFilesWithPlan(Plan p) throws IOException {
for (Tuple2<String, DistributedCache.DistributedCacheEntry> entry : cacheFile) {
p.registerCachedFile(entry.f0, entry.f1);
}
} | 3.68 |
hbase_DefaultHeapMemoryTuner_getTuneDirection | /**
* Determine best direction of tuning base on given context.
* @param context The tuner context.
* @return tuning direction.
*/
private StepDirection getTuneDirection(TunerContext context) {
StepDirection newTuneDirection = StepDirection.NEUTRAL;
long blockedFlushCount = context.getBlockedFlushCount();
long unblockedFlushCount = context.getUnblockedFlushCount();
long evictCount = context.getEvictCount();
long cacheMissCount = context.getCacheMissCount();
long totalFlushCount = blockedFlushCount + unblockedFlushCount;
float curMemstoreSize = context.getCurMemStoreSize();
float curBlockCacheSize = context.getCurBlockCacheSize();
StringBuilder tunerLog = new StringBuilder();
// We can consider memstore or block cache to be sufficient if
// we are using only a minor fraction of what have been already provided to it.
boolean earlyMemstoreSufficientCheck = totalFlushCount == 0
|| context.getCurMemStoreUsed() < curMemstoreSize * sufficientMemoryLevel;
boolean earlyBlockCacheSufficientCheck =
evictCount == 0 || context.getCurBlockCacheUsed() < curBlockCacheSize * sufficientMemoryLevel;
if (earlyMemstoreSufficientCheck && earlyBlockCacheSufficientCheck) {
// Both memstore and block cache memory seems to be sufficient. No operation required.
newTuneDirection = StepDirection.NEUTRAL;
tunerLog.append("Going to do nothing because no changes are needed.");
} else if (earlyMemstoreSufficientCheck) {
// Increase the block cache size and corresponding decrease in memstore size.
newTuneDirection = StepDirection.INCREASE_BLOCK_CACHE_SIZE;
tunerLog.append("Going to increase the block cache size.");
} else if (earlyBlockCacheSufficientCheck) {
// Increase the memstore size and corresponding decrease in block cache size.
newTuneDirection = StepDirection.INCREASE_MEMSTORE_SIZE;
tunerLog.append("Going to increase the memstore size.");
} else {
// Early checks for sufficient memory failed. Tuning memory based on past statistics.
// Boolean indicator to show if we need to revert previous step or not.
boolean isReverting = false;
switch (prevTuneDirection) {
// Here we are using number of evictions rather than cache misses because it is more
// strong indicator for deficient cache size. Improving caching is what we
// would like to optimize for in steady state.
case INCREASE_BLOCK_CACHE_SIZE:
if (
(double) evictCount > rollingStatsForEvictions.getMean() || (double) totalFlushCount
> rollingStatsForFlushes.getMean() + rollingStatsForFlushes.getDeviation() / 2.00
) {
// Reverting previous step as it was not useful.
// Tuning failed to decrease evictions or tuning resulted in large number of flushes.
newTuneDirection = StepDirection.INCREASE_MEMSTORE_SIZE;
tunerLog.append("We will revert previous tuning");
if ((double) evictCount > rollingStatsForEvictions.getMean()) {
tunerLog.append(" because we could not decrease evictions sufficiently.");
} else {
tunerLog.append(" because the number of flushes rose significantly.");
}
isReverting = true;
}
break;
case INCREASE_MEMSTORE_SIZE:
if (
(double) totalFlushCount > rollingStatsForFlushes.getMean()
|| (double) evictCount > rollingStatsForEvictions.getMean()
+ rollingStatsForEvictions.getDeviation() / 2.00
) {
// Reverting previous step as it was not useful.
// Tuning failed to decrease flushes or tuning resulted in large number of evictions.
newTuneDirection = StepDirection.INCREASE_BLOCK_CACHE_SIZE;
tunerLog.append("We will revert previous tuning");
if ((double) totalFlushCount > rollingStatsForFlushes.getMean()) {
tunerLog.append(" because we could not decrease flushes sufficiently.");
} else {
tunerLog.append(" because number of evictions rose significantly.");
}
isReverting = true;
}
break;
default:
// Last step was neutral, revert doesn't not apply here.
break;
}
// If we are not reverting. We try to tune memory sizes by looking at cache misses / flushes.
if (!isReverting) {
// mean +- deviation*0.8 is considered to be normal
// below it its consider low and above it is considered high.
// We can safely assume that the number cache misses, flushes are normally distributed over
// past periods and hence on all the above mentioned classes (normal, high and low)
// are likely to occur with probability 56%, 22%, 22% respectively. Hence there is at
// least ~10% probability that we will not fall in NEUTRAL step.
// This optimization solution is feedback based and we revert when we
// dont find our steps helpful. Hence we want to do tuning only when we have clear
// indications because too many unnecessary tuning may affect the performance of cluster.
if (
(double) cacheMissCount
< rollingStatsForCacheMisses.getMean()
- rollingStatsForCacheMisses.getDeviation() * 0.80
&& (double) totalFlushCount
< rollingStatsForFlushes.getMean() - rollingStatsForFlushes.getDeviation() * 0.80
) {
// Everything is fine no tuning required
newTuneDirection = StepDirection.NEUTRAL;
} else if (
(double) cacheMissCount
> rollingStatsForCacheMisses.getMean()
+ rollingStatsForCacheMisses.getDeviation() * 0.80
&& (double) totalFlushCount
< rollingStatsForFlushes.getMean() - rollingStatsForFlushes.getDeviation() * 0.80
) {
// more misses , increasing cache size
newTuneDirection = StepDirection.INCREASE_BLOCK_CACHE_SIZE;
tunerLog.append(
"Going to increase block cache size due to increase in number of cache misses.");
} else if (
(double) cacheMissCount
< rollingStatsForCacheMisses.getMean()
- rollingStatsForCacheMisses.getDeviation() * 0.80
&& (double) totalFlushCount
> rollingStatsForFlushes.getMean() + rollingStatsForFlushes.getDeviation() * 0.80
) {
// more flushes , increasing memstore size
newTuneDirection = StepDirection.INCREASE_MEMSTORE_SIZE;
tunerLog.append("Going to increase memstore size due to increase in number of flushes.");
} else if (blockedFlushCount > 0 && prevTuneDirection == StepDirection.NEUTRAL) {
// we do not want blocked flushes
newTuneDirection = StepDirection.INCREASE_MEMSTORE_SIZE;
tunerLog.append(
"Going to increase memstore size due to" + blockedFlushCount + " blocked flushes.");
} else {
// Default. Not enough facts to do tuning.
tunerLog.append(
"Going to do nothing because we " + "could not determine best tuning direction");
newTuneDirection = StepDirection.NEUTRAL;
}
}
}
// Log NEUTRAL decisions at DEBUG, because they are the most frequent and not that interesting.
// Log other decisions at INFO because they are making meaningful operational changes.
switch (newTuneDirection) {
case NEUTRAL:
if (LOG.isDebugEnabled()) {
LOG.debug(tunerLog.toString());
}
break;
default:
LOG.info(tunerLog.toString());
break;
}
return newTuneDirection;
} | 3.68 |
flink_EntropyInjector_createEntropyAware | /**
* Handles entropy injection across regular and entropy-aware file systems.
*
* <p>If the given file system is entropy-aware (a implements {@link
* EntropyInjectingFileSystem}), then this method replaces the entropy marker in the path with
* random characters. The entropy marker is defined by {@link
* EntropyInjectingFileSystem#getEntropyInjectionKey()}.
*
* <p>If the given file system does not implement {@code EntropyInjectingFileSystem}, then this
* method delegates to {@link FileSystem#create(Path, WriteMode)} and returns the same path in
* the resulting {@code OutputStreamAndPath}.
*/
public static OutputStreamAndPath createEntropyAware(
FileSystem fs, Path path, WriteMode writeMode) throws IOException {
final Path processedPath = addEntropy(fs, path);
// create the stream on the original file system to let the safety net
// take its effect
final FSDataOutputStream out = fs.create(processedPath, writeMode);
return new OutputStreamAndPath(out, processedPath);
} | 3.68 |
hadoop_FederationStateStoreUtils_setProperty | /**
* Sets a specific value for a specific property of
* <code>HikariDataSource</code> SQL connections.
*
* @param dataSource the <code>HikariDataSource</code> connections
* @param property the property to set
* @param value the value to set
*/
public static void setProperty(HikariDataSource dataSource, String property,
String value) {
LOG.debug("Setting property {} with value {}", property, value);
if (property != null && !property.isEmpty() && value != null) {
dataSource.addDataSourceProperty(property, value);
}
} | 3.68 |
framework_VaadinSession_getNextUIid | /**
* Creates a new unique id for a UI.
*
* @return a unique UI id
*/
public int getNextUIid() {
assert hasLock();
return nextUIId++;
} | 3.68 |
morf_DatabaseMetaDataProvider_loadTableColumns | /**
* Loads the columns for the given table name.
*
* @param tableName Name of the table.
* @param primaryKey Map of respective positions by column names.
* @return List of table columns.
*/
protected List<Column> loadTableColumns(RealName tableName, Map<AName, Integer> primaryKey) {
final Collection<ColumnBuilder> originalColumns = allColumns.get().get(tableName).values();
return createColumnsFrom(originalColumns, primaryKey);
} | 3.68 |
querydsl_PointExpression_m | /**
* The m-coordinate value for this Point, if it has one. Returns NIL otherwise.
*
* @return m-coordinate
*/
public NumberExpression<Double> m() {
if (m == null) {
m = Expressions.numberOperation(Double.class, SpatialOps.M, mixin);
}
return m;
} | 3.68 |
flink_BinarySegmentUtils_getLong | /**
* get long from segments.
*
* @param segments target segments.
* @param offset value offset.
*/
public static long getLong(MemorySegment[] segments, int offset) {
if (inFirstSegment(segments, offset, 8)) {
return segments[0].getLong(offset);
} else {
return getLongMultiSegments(segments, offset);
}
} | 3.68 |
hbase_MasterObserver_preGetRSGroupInfoOfServer | /**
* Called before getting region server group info of the passed server.
* @param ctx the environment to interact with the framework and master
* @param server server to get RSGroupInfo for
*/
default void preGetRSGroupInfoOfServer(final ObserverContext<MasterCoprocessorEnvironment> ctx,
final Address server) throws IOException {
} | 3.68 |
framework_VaadinPortletService_getCurrentResponse | /**
* Gets the currently processed Vaadin portlet response. The current
* response is automatically defined when the request is started. The
* current response can not be used in e.g. background threads because of
* the way server implementations reuse response instances.
*
* @return the current Vaadin portlet response instance if available,
* otherwise <code>null</code>
*
*/
public static VaadinPortletResponse getCurrentResponse() {
return (VaadinPortletResponse) VaadinService.getCurrentResponse();
} | 3.68 |
hbase_ZKUtil_updateExistingNodeData | /**
* Update the data of an existing node with the expected version to have the specified data.
* Throws an exception if there is a version mismatch or some other problem. Sets no watches under
* any conditions.
* @param zkw zk reference
* @param znode the path to the ZNode
* @param data the data to store in ZooKeeper
* @param expectedVersion the expected version
* @throws KeeperException if unexpected zookeeper exception
* @throws KeeperException.BadVersionException if version mismatch
* @deprecated Unused
*/
@Deprecated
public static void updateExistingNodeData(ZKWatcher zkw, String znode, byte[] data,
int expectedVersion) throws KeeperException {
try {
zkw.getRecoverableZooKeeper().setData(znode, data, expectedVersion);
} catch (InterruptedException ie) {
zkw.interruptedException(ie);
}
} | 3.68 |
hadoop_SuccessData_getMetrics | /**
* @return any metrics.
*/
public Map<String, Long> getMetrics() {
return metrics;
} | 3.68 |
framework_AbstractDateField_handleUnparsableDateString | /**
* This method is called to handle a non-empty date string from the client
* if the client could not parse it as a Date.
*
* By default, an error result is returned whose error message is
* {@link #getParseErrorMessage()}.
*
* This can be overridden to handle conversions, to return a result with
* {@code null} value (equivalent to empty input) or to return a custom
* error.
*
* @param dateString
* date string to handle
* @return result that contains parsed Date as a value or an error
*/
protected Result<T> handleUnparsableDateString(String dateString) {
return Result.error(getParseErrorMessage());
} | 3.68 |
hadoop_ApplicationMaster_isDataNode | /**
* Return true iff {@code containerId} represents a DataNode container.
*/
private boolean isDataNode(ContainerId containerId) {
return datanodeContainers.containsKey(containerId);
} | 3.68 |
dubbo_DubboProtocol_initClient | /**
* Create new connection
*
* @param url
*/
private ExchangeClient initClient(URL url) {
/*
* Instance of url is InstanceAddressURL, so addParameter actually adds parameters into ServiceInstance,
* which means params are shared among different services. Since client is shared among services this is currently not a problem.
*/
String str = url.getParameter(CLIENT_KEY, url.getParameter(SERVER_KEY, DEFAULT_REMOTING_CLIENT));
// BIO is not allowed since it has severe performance issue.
if (StringUtils.isNotEmpty(str)
&& !url.getOrDefaultFrameworkModel()
.getExtensionLoader(Transporter.class)
.hasExtension(str)) {
throw new RpcException("Unsupported client type: " + str + "," + " supported client type is "
+ StringUtils.join(
url.getOrDefaultFrameworkModel()
.getExtensionLoader(Transporter.class)
.getSupportedExtensions(),
" "));
}
try {
ScopeModel scopeModel = url.getScopeModel();
int heartbeat = UrlUtils.getHeartbeat(url);
// Replace InstanceAddressURL with ServiceConfigURL.
url = new ServiceConfigURL(
DubboCodec.NAME,
url.getUsername(),
url.getPassword(),
url.getHost(),
url.getPort(),
url.getPath(),
url.getAllParameters());
url = url.addParameter(CODEC_KEY, DubboCodec.NAME);
// enable heartbeat by default
url = url.addParameterIfAbsent(HEARTBEAT_KEY, Integer.toString(heartbeat));
url = url.setScopeModel(scopeModel);
// connection should be lazy
return url.getParameter(LAZY_CONNECT_KEY, false)
? new LazyConnectExchangeClient(url, requestHandler)
: Exchangers.connect(url, requestHandler);
} catch (RemotingException e) {
throw new RpcException("Fail to create remoting client for service(" + url + "): " + e.getMessage(), e);
}
} | 3.68 |
framework_MockApplicationConnection_getLastCsrfTokenReceiver | /**
* Provide the last token received from the server. <br/>
* We added this to test the change done on CSRF token.
*
* @see CsrfTokenDisabled
*/
public String getLastCsrfTokenReceiver() {
return getMessageHandler().lastCsrfTokenReceiver;
} | 3.68 |
framework_BeanContainer_addItemAfter | /**
* Adds the bean after the given item id.
*
* @see Container.Ordered#addItemAfter(Object, Object)
*/
@Override
public BeanItem<BEANTYPE> addItemAfter(IDTYPE previousItemId,
IDTYPE newItemId, BEANTYPE bean) {
if (newItemId != null && bean != null) {
return super.addItemAfter(previousItemId, newItemId, bean);
} else {
return null;
}
} | 3.68 |
flink_ExecNodeBase_getContextFromAnnotation | /**
* Retrieves the default context from the {@link ExecNodeMetadata} annotation to be serialized
* into the JSON plan.
*/
@JsonProperty(value = FIELD_NAME_TYPE, access = JsonProperty.Access.READ_ONLY, index = 1)
protected final ExecNodeContext getContextFromAnnotation() {
return isCompiled ? context : ExecNodeContext.newContext(this.getClass()).withId(getId());
} | 3.68 |
morf_ValueConverters_bigDecimalValue | /**
* Naive implementation which requires a string conversion. Subtypes define more efficient implementations.
*/
@Override
public BigDecimal bigDecimalValue(T value) {
return new BigDecimal(value.toString());
} | 3.68 |
framework_FilesystemContainer_setFilter | /**
* Sets the file filter used to limit the files in this container.
*
* @param extension
* the Filename extension (w/o separator) to limit the files in
* container.
*/
public void setFilter(String extension) {
filter = new FileExtensionFilter(extension);
} | 3.68 |
hudi_BaseHoodieWriteClient_inlineScheduleCompaction | /**
* Schedules compaction inline.
* @param extraMetadata extra metadata to be used.
* @return compaction instant if scheduled.
*/
protected Option<String> inlineScheduleCompaction(Option<Map<String, String>> extraMetadata) {
return scheduleCompaction(extraMetadata);
} | 3.68 |
zxing_BitArray_getBitArray | /**
* @return underlying array of ints. The first element holds the first 32 bits, and the least
* significant bit is bit 0.
*/
public int[] getBitArray() {
return bits;
} | 3.68 |
MagicPlugin_PreLoadEvent_registerAttributeProvider | /**
* Register an AttributeProvider, for adding custom attribute support to spells and mages.
*
* @param provider The provider to add.
*/
public void registerAttributeProvider(AttributeProvider provider) {
attributeProviders.add(provider);
} | 3.68 |
framework_Upload_removeProgressListener | /**
* Removes the upload progress event listener.
*
* @param listener
* the progress listener to be removed
*/
@Deprecated
public void removeProgressListener(ProgressListener listener) {
if (progressListeners != null) {
progressListeners.remove(listener);
}
} | 3.68 |
flink_BuiltInFunctionDefinition_runtimeClass | /** Specifies the runtime class implementing this {@link BuiltInFunctionDefinition}. */
public Builder runtimeClass(String runtimeClass) {
this.runtimeClass = runtimeClass;
return this;
} | 3.68 |
flink_CatalogTable_getSnapshot | /** Return the snapshot specified for the table. Return Optional.empty() if not specified. */
default Optional<Long> getSnapshot() {
return Optional.empty();
} | 3.68 |
Activiti_ActivitiEventDispatcherImpl_extractBpmnModelFromEvent | /**
* In case no process-context is active, this method attempts to extract a process-definition based on the event. In case it's an event related to an entity, this can be deducted by inspecting the
* entity, without additional queries to the database.
*
* If not an entity-related event, the process-definition will be retrieved based on the processDefinitionId (if filled in). This requires an additional query to the database in case not already
* cached. However, queries will only occur when the definition is not yet in the cache, which is very unlikely to happen, unless evicted.
*
* @param event
* @return
*/
protected BpmnModel extractBpmnModelFromEvent(ActivitiEvent event) {
BpmnModel result = null;
if (result == null && event.getProcessDefinitionId() != null) {
ProcessDefinition processDefinition = ProcessDefinitionUtil.getProcessDefinition(event.getProcessDefinitionId(), true);
if (processDefinition != null) {
result = Context.getProcessEngineConfiguration().getDeploymentManager().resolveProcessDefinition(processDefinition).getBpmnModel();
}
}
return result;
} | 3.68 |
pulsar_ConsumerConfiguration_getCryptoFailureAction | /**
* @return The ConsumerCryptoFailureAction
*/
public ConsumerCryptoFailureAction getCryptoFailureAction() {
return conf.getCryptoFailureAction();
} | 3.68 |
morf_MathsField_deepCopyInternal | /**
* @see org.alfasoftware.morf.sql.element.AliasedField#deepCopyInternal(DeepCopyTransformation)
*/
@Override
protected AliasedField deepCopyInternal(DeepCopyTransformation transformer) {
return new MathsField(this.getAlias(), transformer.deepCopy(leftField), operator, transformer.deepCopy(rightField));
} | 3.68 |
querydsl_SimpleExpression_countDistinct | /**
* Get the {@code count(distinct this)} expression
*
* @return count(distinct this)
*/
public NumberExpression<Long> countDistinct() {
if (countDistinct == null) {
countDistinct = Expressions.numberOperation(Long.class, Ops.AggOps.COUNT_DISTINCT_AGG, mixin);
}
return countDistinct;
} | 3.68 |
framework_KeyMapper_key | /**
* Gets key for an object.
*
* @param o
* the object.
*/
@Override
public String key(V o) {
if (o == null) {
return "null";
}
// If the object is already mapped, use existing key
Object id = identifierGetter.apply(o);
String key = objectIdKeyMap.get(id);
if (key != null) {
return key;
}
// If the object is not yet mapped, map it
key = createKey();
objectIdKeyMap.put(id, key);
keyObjectMap.put(key, o);
return key;
} | 3.68 |
hadoop_OpenFileCtxCache_getEntryToEvict | /**
* The entry to be evicted is based on the following rules:<br>
* 1. if the OpenFileCtx has any pending task, it will not be chosen.<br>
* 2. if there is inactive OpenFileCtx, the first found one is to evict. <br>
* 3. For OpenFileCtx entries don't belong to group 1 or 2, the idlest one
* is select. If it's idle longer than OUTPUT_STREAM_TIMEOUT_MIN_DEFAULT, it
* will be evicted. Otherwise, the whole eviction request is failed.
*/
@VisibleForTesting
Entry<FileHandle, OpenFileCtx> getEntryToEvict() {
Iterator<Entry<FileHandle, OpenFileCtx>> it = openFileMap.entrySet()
.iterator();
if (LOG.isTraceEnabled()) {
LOG.trace("openFileMap size:" + size());
}
Entry<FileHandle, OpenFileCtx> idlest = null;
while (it.hasNext()) {
Entry<FileHandle, OpenFileCtx> pairs = it.next();
OpenFileCtx ctx = pairs.getValue();
if (!ctx.getActiveState()) {
if (LOG.isDebugEnabled()) {
LOG.debug("Got one inactive stream: " + ctx);
}
return pairs;
}
if (ctx.hasPendingWork()) {
// Always skip files with pending work.
continue;
}
if (idlest == null) {
idlest = pairs;
} else {
if (ctx.getLastAccessTime() < idlest.getValue().getLastAccessTime()) {
idlest = pairs;
}
}
}
if (idlest == null) {
LOG.warn("No eviction candidate. All streams have pending work.");
return null;
} else {
long idleTime = Time.monotonicNow()
- idlest.getValue().getLastAccessTime();
if (idleTime < NfsConfigKeys.DFS_NFS_STREAM_TIMEOUT_MIN_DEFAULT) {
if (LOG.isDebugEnabled()) {
LOG.debug("idlest stream's idle time:" + idleTime);
}
LOG.warn("All opened streams are busy, can't remove any from cache.");
return null;
} else {
return idlest;
}
}
} | 3.68 |
hadoop_AzureBlobFileSystemStore_generateContinuationTokenForXns | // generate continuation token for xns account
private String generateContinuationTokenForXns(final String firstEntryName) {
Preconditions.checkArgument(!Strings.isNullOrEmpty(firstEntryName)
&& !firstEntryName.startsWith(AbfsHttpConstants.ROOT_PATH),
"startFrom must be a dir/file name and it can not be a full path");
StringBuilder sb = new StringBuilder();
sb.append(firstEntryName).append("#$").append("0");
CRC64 crc64 = new CRC64();
StringBuilder token = new StringBuilder();
token.append(crc64.compute(sb.toString().getBytes(StandardCharsets.UTF_8)))
.append(SINGLE_WHITE_SPACE)
.append("0")
.append(SINGLE_WHITE_SPACE)
.append(firstEntryName);
return Base64.encode(token.toString().getBytes(StandardCharsets.UTF_8));
} | 3.68 |
dubbo_AsyncRpcResult_setValue | /**
* CompletableFuture can only be completed once, so try to update the result of one completed CompletableFuture will
* have no effect. To avoid this problem, we check the complete status of this future before update its value.
* <p>
* But notice that trying to give an uncompleted CompletableFuture a new specified value may face a race condition,
* because the background thread watching the real result will also change the status of this CompletableFuture.
* The result is you may lose the value you expected to set.
*
* @param value
*/
@Override
public void setValue(Object value) {
try {
if (responseFuture.isDone()) {
responseFuture.get().setValue(value);
} else {
AppResponse appResponse = new AppResponse(invocation);
appResponse.setValue(value);
responseFuture.complete(appResponse);
}
} catch (Exception e) {
// This should not happen in normal request process;
logger.error(
PROXY_ERROR_ASYNC_RESPONSE,
"",
"",
"Got exception when trying to fetch the underlying result from AsyncRpcResult.");
throw new RpcException(e);
}
} | 3.68 |
morf_TableHelper_indexWithName | /**
* Finds an index with the specified name on the specified table.
*
* @param table the table to find the index in.
* @param indexName the name of the index to look for
* @return the matching index, or null if one cannot be found
*/
public static Index indexWithName(Table table, String indexName) {
for (Index currentIndex : table.indexes()) {
if (currentIndex.getName().equalsIgnoreCase(indexName)) {
return currentIndex;
}
}
return null;
} | 3.68 |
framework_ComputedStyle_getHeight | /**
* Returns the current height from the DOM.
*
* @since 7.5.1
* @return the computed height
*/
public double getHeight() {
return getDoubleProperty("height");
} | 3.68 |
hadoop_CipherSuite_convert | /**
* Convert to CipherSuite from name, {@link #algoBlockSize} is fixed for
* certain cipher suite, just need to compare the name.
* @param name cipher suite name
* @return CipherSuite cipher suite
*/
public static CipherSuite convert(String name) {
CipherSuite[] suites = CipherSuite.values();
for (CipherSuite suite : suites) {
if (suite.getName().equals(name)) {
return suite;
}
}
throw new IllegalArgumentException("Invalid cipher suite name: " + name);
} | 3.68 |
hbase_Chunk_reset | /**
* Reset the offset to UNINITIALIZED before before reusing an old chunk
*/
void reset() {
if (nextFreeOffset.get() != UNINITIALIZED) {
nextFreeOffset.set(UNINITIALIZED);
allocCount.set(0);
}
} | 3.68 |
flink_MessageParameters_resolveUrl | /**
* Resolves the given URL (e.g "jobs/:jobid") using the given path/query parameters.
*
* <p>This method will fail with an {@link IllegalStateException} if any mandatory parameter was
* not resolved.
*
* <p>Unresolved optional parameters will be ignored.
*
* @param genericUrl URL to resolve
* @param parameters message parameters parameters
* @return resolved url, e.g "/jobs/1234?state=running"
* @throws IllegalStateException if any mandatory parameter was not resolved
*/
public static String resolveUrl(String genericUrl, MessageParameters parameters) {
Preconditions.checkState(
parameters.isResolved(), "Not all mandatory message parameters were resolved.");
StringBuilder path = new StringBuilder(genericUrl);
StringBuilder queryParameters = new StringBuilder();
for (MessageParameter<?> pathParameter : parameters.getPathParameters()) {
if (pathParameter.isResolved()) {
int start = path.indexOf(':' + pathParameter.getKey());
final String pathValue =
Preconditions.checkNotNull(pathParameter.getValueAsString());
// only replace path parameters if they are present
if (start != -1) {
path.replace(start, start + pathParameter.getKey().length() + 1, pathValue);
}
}
}
boolean isFirstQueryParameter = true;
for (MessageQueryParameter<?> queryParameter : parameters.getQueryParameters()) {
if (queryParameter.isResolved()) {
if (isFirstQueryParameter) {
queryParameters.append('?');
isFirstQueryParameter = false;
} else {
queryParameters.append('&');
}
queryParameters.append(queryParameter.getKey());
queryParameters.append('=');
queryParameters.append(queryParameter.getValueAsString());
}
}
path.append(queryParameters);
return path.toString();
} | 3.68 |
pulsar_BrokerVersionFilter_filterAsync | /**
* From the given set of available broker candidates, filter those old brokers using the version numbers.
*
* @param brokers The currently available brokers that have not already been filtered.
* @param context The load manager context.
*
*/
@Override
public CompletableFuture<Map<String, BrokerLookupData>> filterAsync(Map<String, BrokerLookupData> brokers,
ServiceUnitId serviceUnit,
LoadManagerContext context) {
ServiceConfiguration conf = context.brokerConfiguration();
if (!conf.isPreferLaterVersions() || brokers.isEmpty()) {
return CompletableFuture.completedFuture(brokers);
}
Version latestVersion;
try {
latestVersion = getLatestVersionNumber(brokers);
if (log.isDebugEnabled()) {
log.debug("Latest broker version found was [{}]", latestVersion);
}
} catch (Exception ex) {
log.warn("Disabling PreferLaterVersions feature; reason: " + ex.getMessage());
return FutureUtil.failedFuture(
new BrokerFilterBadVersionException("Cannot determine newest broker version: " + ex.getMessage()));
}
int numBrokersLatestVersion = 0;
int numBrokersOlderVersion = 0;
Iterator<Map.Entry<String, BrokerLookupData>> brokerIterator = brokers.entrySet().iterator();
while (brokerIterator.hasNext()) {
Map.Entry<String, BrokerLookupData> next = brokerIterator.next();
String brokerId = next.getKey();
String version = next.getValue().brokerVersion();
Version brokerVersionVersion = Version.valueOf(version);
if (brokerVersionVersion.equals(latestVersion)) {
log.debug("Broker [{}] is running the latest version ([{}])", brokerId, version);
numBrokersLatestVersion++;
} else {
log.info("Broker [{}] is running an older version ([{}]); latest version is [{}]",
brokerId, version, latestVersion);
numBrokersOlderVersion++;
brokerIterator.remove();
}
}
if (numBrokersOlderVersion == 0) {
log.info("All {} brokers are running the latest version [{}]", numBrokersLatestVersion, latestVersion);
}
return CompletableFuture.completedFuture(brokers);
} | 3.68 |
framework_IndexedContainer_hashCode | /**
* Calculates a integer hash-code for the Property that's unique inside
* the Item containing the Property. Two different Properties inside the
* same Item contained in the same list always have different
* hash-codes, though Properties in different Items may have identical
* hash-codes.
*
* @return A locally unique hash-code as integer
*/
@Override
public int hashCode() {
return itemId.hashCode() ^ propertyId.hashCode();
} | 3.68 |
pulsar_DispatchRateLimiter_getPolicies | /**
* @deprecated Avoid using the deprecated method
* #{@link org.apache.pulsar.broker.resources.NamespaceResources#getPoliciesIfCached(NamespaceName)} and blocking
* call. we can use #{@link DispatchRateLimiter#getPoliciesAsync(BrokerService, String)} to instead of it.
*/
@Deprecated
public static Optional<Policies> getPolicies(BrokerService brokerService, String topicName) {
final NamespaceName namespace = TopicName.get(topicName).getNamespaceObject();
return brokerService.pulsar().getPulsarResources().getNamespaceResources().getPoliciesIfCached(namespace);
} | 3.68 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.