name
stringlengths 12
178
| code_snippet
stringlengths 8
36.5k
| score
float64 3.26
3.68
|
---|---|---|
open-banking-gateway_Xs2aConsentInfo_isWrongScaChallenge | /**
* Was the SCA challenge result that was sent to ASPSP wrong.
*/
public boolean isWrongScaChallenge(Xs2aContext ctx) {
return null != ctx.getWrongAuthCredentials() && ctx.getWrongAuthCredentials();
} | 3.68 |
dubbo_ModuleConfigManager_findDuplicatedInterfaceConfig | /**
* check duplicated ReferenceConfig/ServiceConfig
*
* @param config
*/
private AbstractInterfaceConfig findDuplicatedInterfaceConfig(AbstractInterfaceConfig config) {
String uniqueServiceName;
Map<String, AbstractInterfaceConfig> configCache;
if (config instanceof ReferenceConfigBase) {
return null;
} else if (config instanceof ServiceConfigBase) {
ServiceConfigBase serviceConfig = (ServiceConfigBase) config;
uniqueServiceName = serviceConfig.getUniqueServiceName();
configCache = serviceConfigCache;
} else {
throw new IllegalArgumentException(
"Illegal type of parameter 'config' : " + config.getClass().getName());
}
AbstractInterfaceConfig prevConfig = configCache.putIfAbsent(uniqueServiceName, config);
if (prevConfig != null) {
if (prevConfig == config) {
return prevConfig;
}
if (prevConfig.equals(config)) {
// Is there any problem with ignoring duplicate and equivalent but different ReferenceConfig instances?
if (logger.isWarnEnabled() && duplicatedConfigs.add(config)) {
logger.warn(COMMON_UNEXPECTED_EXCEPTION, "", "", "Ignore duplicated and equal config: " + config);
}
return prevConfig;
}
String configType = config.getClass().getSimpleName();
String msg = "Found multiple " + configType + "s with unique service name [" + uniqueServiceName
+ "], previous: " + prevConfig + ", later: " + config + ". " + "There can only be one instance of "
+ configType + " with the same triple (group, interface, version). "
+ "If multiple instances are required for the same interface, please use a different group or version.";
if (logger.isWarnEnabled() && duplicatedConfigs.add(config)) {
logger.warn(COMMON_UNEXPECTED_EXCEPTION, "", "", msg);
}
if (!this.ignoreDuplicatedInterface) {
throw new IllegalStateException(msg);
}
}
return prevConfig;
} | 3.68 |
zxing_DecoderResult_getECLevel | /**
* @return name of error correction level used, or {@code null} if not applicable
*/
public String getECLevel() {
return ecLevel;
} | 3.68 |
pulsar_FunctionRuntimeManager_findAssignment | /**
* Private methods for internal use. Should not be used outside of this class
*/
private Assignment findAssignment(String tenant, String namespace, String functionName, int instanceId) {
String fullyQualifiedInstanceId =
FunctionCommon.getFullyQualifiedInstanceId(tenant, namespace, functionName, instanceId);
for (Map.Entry<String, Map<String, Assignment>> entry : this.workerIdToAssignments.entrySet()) {
Map<String, Assignment> assignmentMap = entry.getValue();
Assignment existingAssignment = assignmentMap.get(fullyQualifiedInstanceId);
if (existingAssignment != null) {
return existingAssignment;
}
}
return null;
} | 3.68 |
graphhopper_PbfBlobResult_isComplete | /**
* Gets the complete flag.
* <p>
*
* @return True if complete.
*/
public boolean isComplete() {
return complete;
} | 3.68 |
hbase_KeyValue_getFamilyOffset | /** Returns Family offset */
int getFamilyOffset(int familyLenPosition) {
return familyLenPosition + Bytes.SIZEOF_BYTE;
} | 3.68 |
druid_StringUtils_stringToInteger | /**
* @param in
* @return
*/
public static Integer stringToInteger(String in) {
if (in == null) {
return null;
}
in = in.trim();
if (in.length() == 0) {
return null;
}
try {
return Integer.parseInt(in);
} catch (NumberFormatException e) {
LOG.warn("stringToInteger fail,string=" + in, e);
return null;
}
} | 3.68 |
hbase_ExplicitColumnTracker_reset | // Called between every row.
@Override
public void reset() {
this.index = 0;
this.column = this.columns[this.index];
for (ColumnCount col : this.columns) {
col.setCount(0);
}
resetTS();
} | 3.68 |
framework_MenuBarElement_clickItem | /**
* Clicks the item specified by a full path given as variable arguments.<br>
* Fails if path given is not full (ie: last submenu is already opened, and
* path given is last item only).
* <p>
* Example:
* </p>
*
* <pre>
* // clicks on "File" item
* menuBarElement.click("File");
* // clicks on "Copy" item in "File" top level menu.
* menuBarElement.click("File", "Copy");
* </pre>
*
* @param path
* Array of items to click through
*/
public void clickItem(String... path) {
if (path.length > 1) {
closeAll();
}
for (String itemName : path) {
clickItem(itemName);
}
} | 3.68 |
morf_CaseInsensitiveString_equals | /**
* Direct instance comparison (we ensure that instances never get duplicated).
* Use {@link #equalsString(String)} to compare with strings.
*
* @see java.lang.Object#equals(java.lang.Object)
*/
@Override
public boolean equals(Object obj) {
return this == obj;
} | 3.68 |
framework_ConnectorTracker_getConnectorString | /**
* Returns a string with the connector name and id. Useful mostly for
* debugging and logging.
*
* @param connector
* The connector
* @return A string that describes the connector
*/
private String getConnectorString(ClientConnector connector) {
if (connector == null) {
return "(null)";
}
String connectorId;
try {
connectorId = connector.getConnectorId();
} catch (RuntimeException e) {
// This happens if the connector is not attached to the application.
// SHOULD not happen in this case but theoretically can.
connectorId = "@" + Integer.toHexString(connector.hashCode());
}
return connector.getClass().getName() + "(" + connectorId + ")";
} | 3.68 |
hmily_OriginTrackedYamlLoader_get | /**
* Get node tuple.
*
* @param nodeTuple the node tuple
* @return the node tuple
*/
public static NodeTuple get(final NodeTuple nodeTuple) {
Node keyNode = nodeTuple.getKeyNode();
Node valueNode = nodeTuple.getValueNode();
return new NodeTuple(KeyScalarNode.get(keyNode), valueNode);
} | 3.68 |
flink_KubernetesStateHandleStore_releaseAndTryRemove | /**
* Remove the key in state config map. As well as the state on external storage will be removed.
* It returns the {@link RetrievableStateHandle} stored under the given state node if any.
*
* @param key Key to be removed from ConfigMap
* @return True if the state handle isn't listed anymore.
* @throws Exception if removing the key or discarding the state failed
*/
@Override
public boolean releaseAndTryRemove(String key) throws Exception {
checkNotNull(key, "Key in ConfigMap.");
final AtomicReference<RetrievableStateHandle<T>> stateHandleRefer = new AtomicReference<>();
final AtomicBoolean stateHandleDoesNotExist = new AtomicBoolean(false);
return updateConfigMap(
configMap -> {
final String content = configMap.getData().get(key);
if (content != null) {
try {
final StateHandleWithDeleteMarker<T> result =
deserializeStateHandle(content);
if (!result.isMarkedForDeletion()) {
// Mark the ConfigMap entry as deleting. This basically
// starts a "removal transaction" that allows us to retry
// the removal if needed.
configMap
.getData()
.put(
key,
serializeStateHandle(result.toDeleting()));
}
stateHandleRefer.set(result.getInner());
} catch (IOException e) {
logInvalidEntry(key, configMapName, e);
// Remove entry from the config map as we can't recover from
// this (the serialization would fail on the retry as well).
Objects.requireNonNull(configMap.getData().remove(key));
}
return Optional.of(configMap);
} else {
stateHandleDoesNotExist.set(true);
}
return Optional.empty();
})
.thenCompose(
updated -> {
if (updated && stateHandleRefer.get() != null) {
try {
stateHandleRefer.get().discardState();
return updateConfigMap(
configMap -> {
// Now we can safely commit the "removal
// transaction" by removing the entry from the
// ConfigMap.
configMap.getData().remove(key);
return Optional.of(configMap);
});
} catch (Exception e) {
throw new CompletionException(e);
}
}
return CompletableFuture.completedFuture(
stateHandleDoesNotExist.get() || updated);
})
.get();
} | 3.68 |
hbase_UserQuotaState_update | /**
* Perform an update of the quota state based on the other quota state object. (This operation is
* executed by the QuotaCache)
*/
@Override
public synchronized void update(final QuotaState other) {
super.update(other);
if (other instanceof UserQuotaState) {
UserQuotaState uOther = (UserQuotaState) other;
tableLimiters = updateLimiters(tableLimiters, uOther.tableLimiters);
namespaceLimiters = updateLimiters(namespaceLimiters, uOther.namespaceLimiters);
bypassGlobals = uOther.bypassGlobals;
} else {
tableLimiters = null;
namespaceLimiters = null;
bypassGlobals = false;
}
} | 3.68 |
hudi_MarkerUtils_readMarkersFromFile | /**
* Reads the markers stored in the underlying file.
*
* @param markersFilePath File path for the markers.
* @param conf Serializable config.
* @param ignoreException Whether to ignore IOException.
* @return Markers in a {@code Set} of String.
*/
public static Set<String> readMarkersFromFile(Path markersFilePath, SerializableConfiguration conf, boolean ignoreException) {
FSDataInputStream fsDataInputStream = null;
Set<String> markers = new HashSet<>();
try {
LOG.debug("Read marker file: " + markersFilePath);
FileSystem fs = markersFilePath.getFileSystem(conf.get());
fsDataInputStream = fs.open(markersFilePath);
markers = new HashSet<>(FileIOUtils.readAsUTFStringLines(fsDataInputStream));
} catch (IOException e) {
String errorMessage = "Failed to read MARKERS file " + markersFilePath;
if (ignoreException) {
LOG.warn(errorMessage + ". Ignoring the exception and continue.", e);
} else {
throw new HoodieIOException(errorMessage, e);
}
} finally {
closeQuietly(fsDataInputStream);
}
return markers;
} | 3.68 |
streampipes_InfluxDbStreamAdapter_getNewestTimestamp | // Returns the newest timestamp in the measurement as unix timestamp in Nanoseconds.
// If no entry is found, a SpRuntimeException is thrown
String getNewestTimestamp() throws SpRuntimeException {
List<List<Object>> queryResult = influxDbClient.query("SELECT * FROM " + influxDbClient.getMeasurement()
+ " ORDER BY time DESC LIMIT 1");
if (queryResult.size() > 0) {
return InfluxDbClient.getTimestamp((String) queryResult.get(0).get(0));
} else {
throw new SpRuntimeException("No entry found in query");
}
} | 3.68 |
morf_AbstractSqlDialectTest_getColumn | /**
* Utility method to get a column from the 'Test' table based on its name.
*/
private Column getColumn(String tableName, String columnName) {
for (Column column : metadata.getTable(tableName).columns()) {
if (column.getName().equals(columnName)) {
return column;
}
}
return null;
} | 3.68 |
flink_UserDefinedFunction_toString | /** Returns the name of the UDF that is used for plan explanation and logging. */
@Override
public String toString() {
return getClass().getSimpleName();
} | 3.68 |
flink_QueryableStateClient_getExecutionConfig | /** Gets the {@link ExecutionConfig}. */
public ExecutionConfig getExecutionConfig() {
return executionConfig;
} | 3.68 |
flink_AvroParquetWriters_forGenericRecord | /**
* Creates a ParquetWriterFactory that accepts and writes Avro generic types. The Parquet
* writers will use the given schema to build and write the columnar data.
*
* @param schema The schema of the generic type.
*/
public static ParquetWriterFactory<GenericRecord> forGenericRecord(Schema schema) {
final String schemaString = schema.toString();
final ParquetBuilder<GenericRecord> builder =
// Must override the lambda representation because of a bug in shading lambda
// serialization, see similar issue FLINK-28043 for more details.
new ParquetBuilder<GenericRecord>() {
@Override
public ParquetWriter<GenericRecord> createWriter(OutputFile out)
throws IOException {
return createAvroParquetWriter(schemaString, GenericData.get(), out);
}
};
return new ParquetWriterFactory<>(builder);
} | 3.68 |
dubbo_ClassUtils_isPrimitive | /**
* The specified type is primitive type or simple type
*
* @param type the type to test
* @return
* @deprecated as 2.7.6, use {@link Class#isPrimitive()} plus {@link #isSimpleType(Class)} instead
*/
public static boolean isPrimitive(Class<?> type) {
return type != null && (type.isPrimitive() || isSimpleType(type));
} | 3.68 |
framework_AbstractOrderedLayoutConnector_needsFixedHeight | /**
* Does the layout need a fixed height?
*/
private boolean needsFixedHeight() {
boolean isVertical = getWidget().vertical;
if (isVertical) {
// Doesn't need height fix for vertical layouts
return false;
} else if (!isUndefinedHeight()) {
// Fix not needed unless the height is undefined
return false;
} else if (!hasChildrenWithRelativeHeight
&& !hasChildrenWithMiddleAlignment) {
// Already works if there are no relative heights or middle aligned
// children
return false;
}
return true;
} | 3.68 |
hadoop_TimelineDomain_setOwner | /**
* Set the domain owner. The user doesn't need to set it, which will
* automatically set to the user who puts the domain.
*
* @param owner the domain owner
*/
public void setOwner(String owner) {
this.owner = owner;
} | 3.68 |
morf_AbstractSqlDialectTest_testSelectWhereInSubqueryWithAllFields | /**
* Tests if a select with an IN operator and a sub-select containing all (*)
* fields from sub-table throws an exception.
*/
@Test
public void testSelectWhereInSubqueryWithAllFields() {
SelectStatement inStatement = new SelectStatement()
.from(new TableReference(TEST_TABLE))
.where(isNotNull(new FieldReference(INT_FIELD)));
exception.expect(IllegalArgumentException.class);
new SelectStatement()
.from(new TableReference(ALTERNATE_TABLE))
.where(in(new FieldReference(STRING_FIELD), inStatement));
} | 3.68 |
flink_ExecutionEnvironment_getConfig | /**
* Gets the config object that defines execution parameters.
*
* @return The environment's execution configuration.
*/
public ExecutionConfig getConfig() {
return config;
} | 3.68 |
pulsar_KerberosName_getHostName | /**
* Get the second component of the name.
* @return the second section of the Kerberos principal name, and may be null
*/
public String getHostName() {
return hostName;
} | 3.68 |
hadoop_CrcComposer_digest | /**
* Returns byte representation of composed CRCs; if no stripeLength was
* specified, the digest should be of length equal to exactly one CRC.
* Otherwise, the number of CRCs in the returned array is equal to the
* total sum bytesPerCrc divided by stripeLength. If the sum of bytesPerCrc
* is not a multiple of stripeLength, then the last CRC in the array
* corresponds to totalLength % stripeLength underlying data bytes.
*
* @return byte representation of composed CRCs.
*/
public byte[] digest() {
if (curPositionInStripe > 0) {
digestOut.write(CrcUtil.intToBytes(curCompositeCrc), 0, CRC_SIZE_BYTES);
curCompositeCrc = 0;
curPositionInStripe = 0;
}
byte[] digestValue = digestOut.toByteArray();
digestOut.reset();
return digestValue;
} | 3.68 |
flink_OneInputOperatorTransformation_assignTimestamps | /**
* Assigns an event time timestamp to each record. This value will be used when performing event
* time computations such as assigning windows.
*/
public OneInputOperatorTransformation<T> assignTimestamps(TimestampAssigner<T> assigner) {
this.timestamper = new TimestampAssignerWrapper<>(assigner);
return this;
} | 3.68 |
hbase_RegionInfoDisplay_getStartKeyForDisplay | /**
* Get the start key for display. Optionally hide the real start key.
* @return the startkey
*/
public static byte[] getStartKeyForDisplay(RegionInfo ri, Configuration conf) {
boolean displayKey = conf.getBoolean(DISPLAY_KEYS_KEY, true);
if (displayKey) return ri.getStartKey();
return HIDDEN_START_KEY;
} | 3.68 |
hadoop_ErrorTranslation_maybeExtractIOException | /**
* Translate an exception if it or its inner exception is an
* IOException.
* If this condition is not met, null is returned.
* @param path path of operation.
* @param thrown exception
* @return a translated exception or null.
*/
public static IOException maybeExtractIOException(String path, Throwable thrown) {
if (thrown == null) {
return null;
}
// look inside
Throwable cause = thrown.getCause();
while (cause != null && cause.getCause() != null) {
cause = cause.getCause();
}
if (!(cause instanceof IOException)) {
return null;
}
// the cause can be extracted to an IOE.
// rather than just return it, we try to preserve the stack trace
// of the outer exception.
// as a new instance is created through reflection, the
// class of the returned instance will be that of the innermost,
// unless no suitable constructor is available.
final IOException ioe = (IOException) cause;
return wrapWithInnerIOE(path, thrown, ioe);
} | 3.68 |
hbase_ZKProcedureMemberRpcs_abort | /**
* Pass along the found abort notification to the listener
* @param abortZNode full znode path to the failed procedure information
*/
protected void abort(String abortZNode) {
LOG.debug("Aborting procedure member for znode " + abortZNode);
String opName = ZKUtil.getNodeName(abortZNode);
try {
byte[] data = ZKUtil.getData(zkController.getWatcher(), abortZNode);
// figure out the data we need to pass
ForeignException ee;
try {
if (data == null || data.length == 0) {
// ignore
return;
} else if (!ProtobufUtil.isPBMagicPrefix(data)) {
String msg = "Illegally formatted data in abort node for proc " + opName
+ ". Killing the procedure.";
LOG.error(msg);
// we got a remote exception, but we can't describe it so just return exn from here
ee = new ForeignException(getMemberName(), new IllegalArgumentException(msg));
} else {
data = Arrays.copyOfRange(data, ProtobufUtil.lengthOfPBMagic(), data.length);
ee = ForeignException.deserialize(data);
}
} catch (IOException e) {
LOG.warn("Got an error notification for op:" + opName
+ " but we can't read the information. Killing the procedure.");
// we got a remote exception, but we can't describe it so just return exn from here
ee = new ForeignException(getMemberName(), e);
}
this.member.receiveAbortProcedure(opName, ee);
} catch (KeeperException e) {
member.controllerConnectionFailure(
"Failed to get data for abort znode:" + abortZNode + zkController.getAbortZnode(), e,
opName);
} catch (InterruptedException e) {
LOG.warn("abort already in progress", e);
Thread.currentThread().interrupt();
}
} | 3.68 |
hbase_HFilePrettyPrinter_evictMobFilesIfNecessary | /**
* Evicts the cached mob files if the set is larger than the limit.
*/
private void evictMobFilesIfNecessary(Set<String> mobFileNames, int limit) {
if (mobFileNames.size() < limit) {
return;
}
int index = 0;
int evict = limit / 2;
Iterator<String> fileNamesItr = mobFileNames.iterator();
while (index < evict && fileNamesItr.hasNext()) {
fileNamesItr.next();
fileNamesItr.remove();
index++;
}
} | 3.68 |
hudi_BucketAssigners_create | /**
* Creates a {@code BucketAssigner}.
*
* @param taskID The task ID
* @param maxParallelism The max parallelism
* @param numTasks The number of tasks
* @param ignoreSmallFiles Whether to ignore the small files
* @param tableType The table type
* @param context The engine context
* @param config The configuration
* @return the bucket assigner instance
*/
public static BucketAssigner create(
int taskID,
int maxParallelism,
int numTasks,
boolean ignoreSmallFiles,
HoodieTableType tableType,
HoodieFlinkEngineContext context,
HoodieWriteConfig config) {
boolean delta = tableType.equals(HoodieTableType.MERGE_ON_READ);
WriteProfile writeProfile = WriteProfiles.singleton(ignoreSmallFiles, delta, config, context);
return new BucketAssigner(taskID, maxParallelism, numTasks, writeProfile, config);
} | 3.68 |
flink_SyntaxHighlightStyle_getCommentStyle | /**
* Returns the style for a SQL comments, such as {@literal /* This is a comment *}{@literal /}
* or {@literal -- End of line comment}.
*
* @return Style for SQL comments
*/
public AttributedStyle getCommentStyle() {
return commentStyle;
} | 3.68 |
hadoop_DBNameNodeConnector_getNodes | /**
* getNodes function returns a list of DiskBalancerDataNodes.
*
* @return Array of DiskBalancerDataNodes
*/
@Override
public List<DiskBalancerDataNode> getNodes() throws Exception {
Preconditions.checkNotNull(this.connector);
List<DiskBalancerDataNode> nodeList = new LinkedList<>();
DatanodeStorageReport[] reports = this.connector
.getLiveDatanodeStorageReport();
for (DatanodeStorageReport report : reports) {
DiskBalancerDataNode datanode = getBalancerNodeFromDataNode(
report.getDatanodeInfo());
getVolumeInfoFromStorageReports(datanode, report.getStorageReports());
nodeList.add(datanode);
}
return nodeList;
} | 3.68 |
hadoop_CrcComposer_newCrcComposer | /**
* Returns a CrcComposer which will collapse all ingested CRCs into a single
* value.
*
* @param type type.
* @param bytesPerCrcHint bytesPerCrcHint.
* @throws IOException raised on errors performing I/O.
* @return a CrcComposer which will collapse all ingested CRCs into a single value.
*/
public static CrcComposer newCrcComposer(
DataChecksum.Type type, long bytesPerCrcHint)
throws IOException {
return newStripedCrcComposer(type, bytesPerCrcHint, Long.MAX_VALUE);
} | 3.68 |
flink_HiveSourceBuilder_setPartitions | /**
* Sets the partitions to read in batch mode. By default, batch source reads all partitions in a
* hive table.
*/
public HiveSourceBuilder setPartitions(List<HiveTablePartition> partitions) {
this.partitions = partitions;
return this;
} | 3.68 |
hbase_FullTableBackupClient_snapshotCopy | /**
* Do snapshot copy.
* @param backupInfo backup info
* @throws Exception exception
*/
protected void snapshotCopy(BackupInfo backupInfo) throws Exception {
LOG.info("Snapshot copy is starting.");
// set overall backup phase: snapshot_copy
backupInfo.setPhase(BackupPhase.SNAPSHOTCOPY);
// call ExportSnapshot to copy files based on hbase snapshot for backup
// ExportSnapshot only support single snapshot export, need loop for multiple tables case
BackupCopyJob copyService = BackupRestoreFactory.getBackupCopyJob(conf);
// number of snapshots matches number of tables
float numOfSnapshots = backupInfo.getSnapshotNames().size();
LOG.debug("There are " + (int) numOfSnapshots + " snapshots to be copied.");
for (TableName table : backupInfo.getTables()) {
// Currently we simply set the sub copy tasks by counting the table snapshot number, we can
// calculate the real files' size for the percentage in the future.
// backupCopier.setSubTaskPercntgInWholeTask(1f / numOfSnapshots);
int res;
ArrayList<String> argsList = new ArrayList<>();
argsList.add("-snapshot");
argsList.add(backupInfo.getSnapshotName(table));
argsList.add("-copy-to");
argsList.add(backupInfo.getTableBackupDir(table));
if (backupInfo.getBandwidth() > -1) {
argsList.add("-bandwidth");
argsList.add(String.valueOf(backupInfo.getBandwidth()));
}
if (backupInfo.getWorkers() > -1) {
argsList.add("-mappers");
argsList.add(String.valueOf(backupInfo.getWorkers()));
}
String[] args = argsList.toArray(new String[0]);
String jobname = "Full-Backup_" + backupInfo.getBackupId() + "_" + table.getNameAsString();
if (LOG.isDebugEnabled()) {
LOG.debug("Setting snapshot copy job name to : " + jobname);
}
conf.set(JOB_NAME_CONF_KEY, jobname);
LOG.debug("Copy snapshot " + args[1] + " to " + args[3]);
res = copyService.copy(backupInfo, backupManager, conf, BackupType.FULL, args);
// if one snapshot export failed, do not continue for remained snapshots
if (res != 0) {
LOG.error("Exporting Snapshot " + args[1] + " failed with return code: " + res + ".");
throw new IOException("Failed of exporting snapshot " + args[1] + " to " + args[3]
+ " with reason code " + res);
}
conf.unset(JOB_NAME_CONF_KEY);
LOG.info("Snapshot copy " + args[1] + " finished.");
}
} | 3.68 |
hadoop_LocalJobOutputFiles_removeAll | /** Removes all of the files related to a task. */
public void removeAll() throws IOException {
conf.deleteLocalFiles(TASKTRACKER_OUTPUT);
} | 3.68 |
hadoop_LocalSASKeyGeneratorImpl_getRelativeBlobSASUri | /**
* Implementation for generation of Relative Path Blob SAS Uri.
*/
@Override
public URI getRelativeBlobSASUri(String accountName, String container,
String relativePath) throws SASKeyGenerationException {
CloudBlobContainer sc = null;
CloudBlobClient client = null;
CachedSASKeyEntry cacheKey = null;
try {
cacheKey = new CachedSASKeyEntry(accountName, container, relativePath);
URI cacheResult = cache.get(cacheKey);
if (cacheResult != null) {
return cacheResult;
}
CloudStorageAccount account =
getSASKeyBasedStorageAccountInstance(accountName);
client = account.createCloudBlobClient();
sc = client.getContainerReference(container);
} catch (URISyntaxException uriSyntaxEx) {
throw new SASKeyGenerationException("Encountered URISyntaxException "
+ "while getting container references for container " + container
+ " inside storage account : " + accountName, uriSyntaxEx);
} catch (StorageException stoEx) {
throw new SASKeyGenerationException("Encountered StorageException while "
+ "getting container references for container " + container
+ " inside storage account : " + accountName, stoEx);
}
CloudBlockBlob blob = null;
try {
blob = sc.getBlockBlobReference(relativePath);
} catch (URISyntaxException uriSyntaxEx) {
throw new SASKeyGenerationException("Encountered URISyntaxException while "
+ "getting Block Blob references for container " + container
+ " inside storage account : " + accountName, uriSyntaxEx);
} catch (StorageException stoEx) {
throw new SASKeyGenerationException("Encountered StorageException while "
+ "getting Block Blob references for container " + container
+ " inside storage account : " + accountName, stoEx);
}
try {
URI sasKey = client.getCredentials().transformUri(blob.getUri());
cache.put(cacheKey, sasKey);
return sasKey;
} catch (StorageException stoEx) {
throw new SASKeyGenerationException("Encountered StorageException while "
+ "generating SAS key for Blob: " + relativePath + " inside "
+ "container : " + container + " in Storage Account : " + accountName,
stoEx);
} catch (URISyntaxException uriSyntaxEx) {
throw new SASKeyGenerationException("Encountered URISyntaxException "
+ "while generating SAS key for Blob: " + relativePath + " inside "
+ "container: " + container + " in Storage Account : " + accountName,
uriSyntaxEx);
}
} | 3.68 |
streampipes_PrintDebugFilter_getInstance | /**
* Returns the default instance for {@link PrintDebugFilter}, which dumps debug information to
* <code>System.out</code>
*/
public static PrintDebugFilter getInstance() {
return INSTANCE;
} | 3.68 |
flink_SetOperationFactory_create | /**
* Creates a valid algebraic operation.
*
* @param type type of operation to create
* @param left first relational operation of the operation
* @param right second relational operation of the operation
* @param all flag defining how duplicates should be handled
* @return creates a valid algebraic operation
*/
QueryOperation create(
SetQueryOperationType type, QueryOperation left, QueryOperation right, boolean all) {
failIfStreaming(type, all);
validateSetOperation(type, left, right);
return new SetQueryOperation(left, right, type, all, createCommonTableSchema(left, right));
} | 3.68 |
hadoop_FederationStateStoreFacade_getSubCluster | /**
* Updates the cache with the central {@link FederationStateStore} and returns
* the {@link SubClusterInfo} for the specified {@link SubClusterId}.
*
* @param subClusterId the identifier of the sub-cluster
* @param flushCache flag to indicate if the cache should be flushed or not
* @return the sub cluster information
* @throws YarnException if the call to the state store is unsuccessful
*/
public SubClusterInfo getSubCluster(final SubClusterId subClusterId,
final boolean flushCache) throws YarnException {
if (flushCache && federationCache.isCachingEnabled()) {
LOG.info("Flushing subClusters from cache and rehydrating from store,"
+ " most likely on account of RM failover.");
federationCache.removeSubCluster(false);
}
return getSubCluster(subClusterId);
} | 3.68 |
morf_MySqlDialect_postInsertWithPresetAutonumStatements | /**
* @see org.alfasoftware.morf.jdbc.SqlDialect#postInsertWithPresetAutonumStatements(Table, SqlScriptExecutor, Connection, boolean)
*/
@Override
public void postInsertWithPresetAutonumStatements(Table table, SqlScriptExecutor executor,Connection connection, boolean insertingUnderAutonumLimit) {
repairAutoNumberStartPosition(table,executor,connection);
} | 3.68 |
morf_Builder_buildAll | /**
* Convenience method to call build() on a list of Builders.
*
* @param builders a list of AliasedFieldBuilder
* @return the result of calling build() on each builder
* @param <T> The type of instance built by the builders
*/
public static <T> ImmutableList<T> buildAll(Iterable<? extends Builder<? extends T>> builders) {
return FluentIterable.from(builders)
.transform(Helper.<T>buildAll()).toList();
} | 3.68 |
hadoop_CapacityOverTimePolicy_validate | /**
* The validation algorithm walks over the RLE encoded allocation and
* checks that for all transition points (when the start or end of the
* checking window encounters a value in the RLE). At this point it
* checkes whether the integral computed exceeds the quota limit. Note that
* this might not find the exact time of a violation, but if a violation
* exists it will find it. The advantage is a much lower number of checks
* as compared to time-slot by time-slot checks.
*
* @param plan the plan to validate against
* @param reservation the reservation allocation to test.
* @throws PlanningException if the validation fails.
*/
@Override
public void validate(Plan plan, ReservationAllocation reservation)
throws PlanningException {
// rely on NoOverCommitPolicy to check for: 1) user-match, 2) physical
// cluster limits, and 3) maxInst (via override of available)
try {
super.validate(plan, reservation);
} catch (PlanningException p) {
//wrap it in proper quota exception
throw new PlanningQuotaException(p);
}
long checkStart = reservation.getStartTime() - validWindow;
long checkEnd = reservation.getEndTime() + validWindow;
//---- check for integral violations of capacity --------
// Gather a view of what to check (curr allocation of user, minus old
// version of this reservation, plus new version)
RLESparseResourceAllocation consumptionForUserOverTime =
plan.getConsumptionForUserOverTime(reservation.getUser(),
checkStart, checkEnd);
ReservationAllocation old =
plan.getReservationById(reservation.getReservationId());
if (old != null) {
consumptionForUserOverTime =
RLESparseResourceAllocation.merge(plan.getResourceCalculator(),
plan.getTotalCapacity(), consumptionForUserOverTime,
old.getResourcesOverTime(checkStart, checkEnd), RLEOperator.add,
checkStart, checkEnd);
}
RLESparseResourceAllocation resRLE =
reservation.getResourcesOverTime(checkStart, checkEnd);
RLESparseResourceAllocation toCheck = RLESparseResourceAllocation
.merge(plan.getResourceCalculator(), plan.getTotalCapacity(),
consumptionForUserOverTime, resRLE, RLEOperator.add, Long.MIN_VALUE,
Long.MAX_VALUE);
NavigableMap<Long, Resource> integralUp = new TreeMap<>();
NavigableMap<Long, Resource> integralDown = new TreeMap<>();
long prevTime = toCheck.getEarliestStartTime();
IntegralResource prevResource = new IntegralResource(0L, 0L);
IntegralResource runningTot = new IntegralResource(0L, 0L);
// add intermediate points
Map<Long, Resource> temp = new TreeMap<>();
for (Map.Entry<Long, Resource> pointToCheck : toCheck.getCumulative()
.entrySet()) {
Long timeToCheck = pointToCheck.getKey();
Resource resourceToCheck = pointToCheck.getValue();
Long nextPoint = toCheck.getCumulative().higherKey(timeToCheck);
if (nextPoint == null || toCheck.getCumulative().get(nextPoint) == null) {
continue;
}
for (int i = 1; i <= (nextPoint - timeToCheck) / validWindow; i++) {
temp.put(timeToCheck + (i * validWindow), resourceToCheck);
}
}
temp.putAll(toCheck.getCumulative());
// compute point-wise integral for the up-fronts and down-fronts
for (Map.Entry<Long, Resource> currPoint : temp.entrySet()) {
Long currTime = currPoint.getKey();
Resource currResource = currPoint.getValue();
//add to running total current contribution
prevResource.multiplyBy(currTime - prevTime);
runningTot.add(prevResource);
integralUp.put(currTime, normalizeToResource(runningTot, validWindow));
integralDown.put(currTime + validWindow,
normalizeToResource(runningTot, validWindow));
if (currResource != null) {
prevResource.memory = currResource.getMemorySize();
prevResource.vcores = currResource.getVirtualCores();
} else {
prevResource.memory = 0L;
prevResource.vcores = 0L;
}
prevTime = currTime;
}
// compute final integral as delta of up minus down transitions
RLESparseResourceAllocation intUp =
new RLESparseResourceAllocation(integralUp,
plan.getResourceCalculator());
RLESparseResourceAllocation intDown =
new RLESparseResourceAllocation(integralDown,
plan.getResourceCalculator());
RLESparseResourceAllocation integral = RLESparseResourceAllocation
.merge(plan.getResourceCalculator(), plan.getTotalCapacity(), intUp,
intDown, RLEOperator.subtract, Long.MIN_VALUE, Long.MAX_VALUE);
// define over-time integral limit
// note: this is aligned with the normalization done above
NavigableMap<Long, Resource> tlimit = new TreeMap<>();
Resource maxAvgRes = Resources.multiply(plan.getTotalCapacity(), maxAvg);
tlimit.put(toCheck.getEarliestStartTime() - validWindow, maxAvgRes);
RLESparseResourceAllocation targetLimit =
new RLESparseResourceAllocation(tlimit, plan.getResourceCalculator());
// compare using merge() limit with integral
try {
RLESparseResourceAllocation.merge(plan.getResourceCalculator(),
plan.getTotalCapacity(), targetLimit, integral,
RLEOperator.subtractTestNonNegative, checkStart, checkEnd);
} catch (PlanningException p) {
throw new PlanningQuotaException(
"Integral (avg over time) quota capacity " + maxAvg
+ " over a window of " + validWindow / 1000 + " seconds, "
+ " would be exceeded by accepting reservation: " + reservation
.getReservationId(), p);
}
} | 3.68 |
morf_ViewChangesDeploymentHelper_dropViewIfExists | /**
* Creates SQL statements for dropping given view.
*
* @param view View to be dropped.
* @param updateDeployedViews Whether to update the DeployedViews table.
* @param dropTheView Whether to actually drop the view from the database.
* @return SQL statements to be run to drop the view.
* @deprecated kept to ensure backwards compatibility.
*/
@Deprecated
private List<String> dropViewIfExists(View view, boolean dropTheView, boolean updateDeployedViews) {
return dropViewIfExists(view, dropTheView, updateDeployedViews, new UpgradeSchemas(schema(), schema()));
} | 3.68 |
hibernate-validator_ComposingConstraintTree_prepareFinalConstraintViolations | /**
* Before the final constraint violations can be reported back we need to check whether we have a composing
* constraint whose result should be reported as single violation.
*
* @param validationContext meta data about top level validation
* @param valueContext meta data for currently validated value
* @param violatedConstraintValidatorContexts used to accumulate constraint validator contexts that cause constraint violations
* @param localConstraintValidatorContext an optional of constraint violations of top level constraint
*/
private void prepareFinalConstraintViolations(ValidationContext<?> validationContext,
ValueContext<?, ?> valueContext,
Collection<ConstraintValidatorContextImpl> violatedConstraintValidatorContexts,
Optional<ConstraintValidatorContextImpl> localConstraintValidatorContext) {
if ( reportAsSingleViolation() ) {
// We clear the current violations list anyway
violatedConstraintValidatorContexts.clear();
// But then we need to distinguish whether the local ConstraintValidator has reported
// violations or not (or if there is no local ConstraintValidator at all).
// If not we create a violation
// using the error message in the annotation declaration at top level.
if ( !localConstraintValidatorContext.isPresent() ) {
violatedConstraintValidatorContexts.add(
validationContext.createConstraintValidatorContextFor(
descriptor, valueContext.getPropertyPath()
)
);
}
}
// Now, if there were some violations reported by
// the local ConstraintValidator, they need to be added to constraintViolations.
// Whether we need to report them as a single constraint or just add them to the other violations
// from the composing constraints, has been taken care of in the previous conditional block.
// This takes also care of possible custom error messages created by the constraintValidator,
// as checked in test CustomErrorMessage.java
// If no violations have been reported from the local ConstraintValidator, or no such validator exists,
// then we just add an empty list.
if ( localConstraintValidatorContext.isPresent() ) {
violatedConstraintValidatorContexts.add( localConstraintValidatorContext.get() );
}
} | 3.68 |
hudi_TableChange_addPositionChange | /**
* Add position change.
*
* @param srcName column which need to be reordered
* @param dsrName reference position
* @param orderType change types
* @return this
*/
public BaseColumnChange addPositionChange(String srcName, String dsrName, ColumnPositionChange.ColumnPositionType orderType) {
Integer srcId = findIdByFullName(srcName);
Option<Integer> dsrIdOpt = dsrName.isEmpty() ? Option.empty() : Option.of(findIdByFullName(dsrName));
Integer srcParentId = id2parent.get(srcId);
Option<Integer> dsrParentIdOpt = dsrIdOpt.map(id2parent::get);
// forbid adjust hoodie metadata columns.
switch (orderType) {
case BEFORE:
checkColModifyIsLegal(dsrName);
break;
case FIRST:
if (srcId == null || srcId == -1 || srcParentId == null || srcParentId == -1) {
throw new HoodieSchemaException("forbid adjust top-level columns position by using through first syntax");
}
break;
case AFTER:
List<String> checkColumns = HoodieRecord.HOODIE_META_COLUMNS.subList(0, HoodieRecord.HOODIE_META_COLUMNS.size() - 2);
if (checkColumns.stream().anyMatch(f -> f.equalsIgnoreCase(dsrName))) {
throw new HoodieSchemaException("forbid adjust the position of ordinary columns between meta columns");
}
break;
case NO_OPERATION:
default:
break;
}
int parentId;
if (srcParentId != null && dsrParentIdOpt.isPresent() && srcParentId.equals(dsrParentIdOpt.get())) {
Types.Field parentField = internalSchema.findField(srcParentId);
if (!(parentField.type() instanceof Types.RecordType)) {
throw new HoodieSchemaException(String.format("only support reorder fields in struct type, but find: %s", parentField.type()));
}
parentId = parentField.fieldId();
} else if (srcParentId == null && !dsrParentIdOpt.isPresent()) {
parentId = -1;
} else if (srcParentId != null && !dsrParentIdOpt.isPresent() && orderType.equals(ColumnPositionChange.ColumnPositionType.FIRST)) {
parentId = srcParentId;
} else {
throw new HoodieSchemaException("cannot order position from different parent");
}
ArrayList<ColumnPositionChange> changes = positionChangeMap.getOrDefault(parentId, new ArrayList<>());
changes.add(ColumnPositionChange.get(srcId, dsrIdOpt.orElse(-1), orderType));
positionChangeMap.put(parentId, changes);
return this;
} | 3.68 |
hadoop_DelegatingSSLSocketFactory_resetDefaultFactory | /**
* For testing only: reset the socket factory.
*/
@VisibleForTesting
public static synchronized void resetDefaultFactory() {
LOG.info("Resetting default SSL Socket Factory");
instance = null;
} | 3.68 |
zxing_BarcodeValue_setValue | /**
* Add an occurrence of a value
*/
void setValue(int value) {
Integer confidence = values.get(value);
if (confidence == null) {
confidence = 0;
}
confidence++;
values.put(value, confidence);
} | 3.68 |
flink_DefaultRollingPolicy_create | /** This method is {@link Deprecated}, use {@link DefaultRollingPolicy#builder()} instead. */
@Deprecated
public static DefaultRollingPolicy.PolicyBuilder create() {
return builder();
} | 3.68 |
hbase_SizeCachedKeyValue_getSerializedSize | /**
* Override by just returning the length for saving cost of method dispatching. If not, it will
* call {@link ExtendedCell#getSerializedSize()} firstly, then forward to
* {@link SizeCachedKeyValue#getSerializedSize(boolean)}. (See HBASE-21657)
*/
@Override
public int getSerializedSize() {
return this.length;
} | 3.68 |
hadoop_AzureNativeFileSystemStore_getAccountFromAuthority | /**
* Method to extract the account name from an Azure URI.
*
* @param uri
* -- WASB blob URI
* @returns accountName -- the account name for the URI.
* @throws URISyntaxException
* if the URI does not have an authority it is badly formed.
*/
private String getAccountFromAuthority(URI uri) throws URISyntaxException {
// Check to make sure that the authority is valid for the URI.
//
String authority = uri.getRawAuthority();
if (null == authority) {
// Badly formed or illegal URI.
//
throw new URISyntaxException(uri.toString(),
"Expected URI with a valid authority");
}
// Check if authority container the delimiter separating the account name from the
// the container.
//
if (!authority.contains(WASB_AUTHORITY_DELIMITER)) {
return authority;
}
// Split off the container name and the authority.
//
String[] authorityParts = authority.split(WASB_AUTHORITY_DELIMITER, 2);
// Because the string contains an '@' delimiter, a container must be
// specified.
//
if (authorityParts.length < 2 || "".equals(authorityParts[0])) {
// Badly formed WASB authority since there is no container.
//
final String errMsg = String
.format(
"URI '%s' has a malformed WASB authority, expected container name. "
+ "Authority takes the form wasb://[<container name>@]<account name>",
uri.toString());
throw new IllegalArgumentException(errMsg);
}
// Return with the account name. It is possible that this name is NULL.
//
return authorityParts[1];
} | 3.68 |
hadoop_SimpleTcpClientHandler_channelRead | /**
* Shutdown connection by default. Subclass can override this method to do
* more interaction with the server.
*/
@Override
public void channelRead(ChannelHandlerContext ctx, Object msg) {
ctx.channel().close();
} | 3.68 |
hbase_RegionSplitRestriction_create | /**
* Create the RegionSplitRestriction configured for the given table.
* @param tableDescriptor the table descriptor
* @param conf the configuration
* @return a RegionSplitRestriction instance
* @throws IOException if an error occurs
*/
public static RegionSplitRestriction create(TableDescriptor tableDescriptor, Configuration conf)
throws IOException {
String type = tableDescriptor.getValue(RESTRICTION_TYPE_KEY);
if (type == null) {
type = conf.get(RESTRICTION_TYPE_KEY, RESTRICTION_TYPE_NONE);
}
RegionSplitRestriction ret;
switch (type) {
case RESTRICTION_TYPE_NONE:
ret = new NoRegionSplitRestriction();
break;
case RESTRICTION_TYPE_KEY_PREFIX:
ret = new KeyPrefixRegionSplitRestriction();
break;
case RESTRICTION_TYPE_DELIMITED_KEY_PREFIX:
ret = new DelimitedKeyPrefixRegionSplitRestriction();
break;
default:
LOG.warn("Invalid RegionSplitRestriction type specified: {}. "
+ "Using the default RegionSplitRestriction", type);
ret = new NoRegionSplitRestriction();
break;
}
ret.initialize(tableDescriptor, conf);
return ret;
} | 3.68 |
hmily_HmilyRepositoryFacade_createHmilyParticipantUndo | /**
* Create hmily participant undo.
*
* @param undo the undo
*/
public void createHmilyParticipantUndo(final HmilyParticipantUndo undo) {
checkRows(hmilyRepository.createHmilyParticipantUndo(undo));
} | 3.68 |
framework_ComputedStyle_getHeightIncludingBorderPadding | /**
* Returns the current height, padding and border from the DOM.
*
* @return the computed height including padding and borders
*/
public double getHeightIncludingBorderPadding() {
double h = getHeight();
if (BrowserInfo.get().isIE() || isContentBox()) {
// IE11 always returns only the height without padding/border
h += getBorderHeight() + getPaddingHeight();
}
return h;
} | 3.68 |
hadoop_BalanceJournalInfoHDFS_recoverJob | /**
* Recover job from journal on HDFS.
*/
public void recoverJob(BalanceJob job) throws IOException {
FSDataInputStream in = null;
try {
Path logPath = getLatestStateJobPath(job);
FileSystem fs = FileSystem.get(workUri, conf);
in = fs.open(logPath);
job.readFields(in);
LOG.debug("Recover job={} from journal.", job);
} finally {
if (in != null) {
in.close();
}
}
} | 3.68 |
druid_StatViewServlet_getJmxResult | /**
* 根据指定的url来获取jmx服务返回的内容.
*
* @param connetion jmx连接
* @param url url内容
* @return the jmx返回的内容
* @throws Exception the exception
*/
private String getJmxResult(MBeanServerConnection connetion, String url) throws Exception {
ObjectName name = new ObjectName(DruidStatService.MBEAN_NAME);
String result = (String) conn.invoke(name, "service", new String[]{url},
new String[]{String.class.getName()});
return result;
} | 3.68 |
flink_RestClusterClient_pollResourceAsync | /**
* Creates a {@code CompletableFuture} that polls a {@code AsynchronouslyCreatedResource} until
* its {@link AsynchronouslyCreatedResource#queueStatus() QueueStatus} becomes {@link
* QueueStatus.Id#COMPLETED COMPLETED}. The future completes with the result of {@link
* AsynchronouslyCreatedResource#resource()}.
*
* @param resourceFutureSupplier The operation which polls for the {@code
* AsynchronouslyCreatedResource}.
* @param <R> The type of the resource.
* @param <A> The type of the {@code AsynchronouslyCreatedResource}.
* @return A {@code CompletableFuture} delivering the resource.
*/
private <R, A extends AsynchronouslyCreatedResource<R>> CompletableFuture<R> pollResourceAsync(
final Supplier<CompletableFuture<A>> resourceFutureSupplier) {
return pollResourceAsync(resourceFutureSupplier, new CompletableFuture<>(), 0);
} | 3.68 |
hadoop_LocalCacheDirectoryManager_decrementFileCountForPath | /**
* This method will reduce the file count for the directory represented by
* path. The root directory of this Local cache directory manager is
* represented by an empty string.
*/
public synchronized void decrementFileCountForPath(String relPath) {
relPath = relPath == null ? "" : relPath.trim();
Directory subDir = knownDirectories.get(relPath);
int oldCount = subDir.getCount();
if (subDir.decrementAndGetCount() < perDirectoryFileLimit
&& oldCount >= perDirectoryFileLimit) {
nonFullDirectories.add(subDir);
}
} | 3.68 |
hbase_ScannerContext_getTimeScope | /** Returns {@link LimitScope} indicating scope in which the time limit is enforced */
LimitScope getTimeScope() {
return this.timeScope;
} | 3.68 |
framework_VTabsheetBase_setActiveTabIndex | /**
* For internal use only. May be removed or replaced in the future.
*
* @param activeTabIndex
* the index of the currently active tab
*/
public void setActiveTabIndex(int activeTabIndex) {
this.activeTabIndex = activeTabIndex;
} | 3.68 |
dubbo_StringUtils_isJavaIdentifier | /**
* Returns true if s is a legal Java identifier.<p>
* <a href="http://www.exampledepot.com/egs/java.lang/IsJavaId.html">more info.</a>
*/
public static boolean isJavaIdentifier(String s) {
if (isEmpty(s) || !Character.isJavaIdentifierStart(s.charAt(0))) {
return false;
}
for (int i = 1; i < s.length(); i++) {
if (!Character.isJavaIdentifierPart(s.charAt(i))) {
return false;
}
}
return true;
} | 3.68 |
hadoop_CsiGrpcClient_close | /**
* Shutdown the communication channel gracefully,
* wait for 5 seconds before it is enforced.
*/
@Override
public void close() {
try {
this.channel.shutdown().awaitTermination(5, TimeUnit.SECONDS);
} catch (InterruptedException e) {
LOG.error("Failed to gracefully shutdown"
+ " gRPC communication channel in 5 seconds", e);
}
} | 3.68 |
framework_CssLayout_getComponent | /**
* Returns the component at the given position.
*
* @param index
* The position of the component.
* @return The component at the given index.
* @throws IndexOutOfBoundsException
* If the index is out of range.
*/
public Component getComponent(int index) throws IndexOutOfBoundsException {
return components.get(index);
} | 3.68 |
hadoop_ResourceUsageMetrics_getCumulativeCpuUsage | /**
* Get the cumulative CPU usage.
*/
public long getCumulativeCpuUsage() {
return cumulativeCpuUsage;
} | 3.68 |
querydsl_NumberExpression_doubleValue | /**
* Create a {@code cast(this as double)} expression
*
* <p>Get the double expression of this numeric expression</p>
*
* @return this.doubleValue()
* @see java.lang.Number#doubleValue()
*/
public NumberExpression<Double> doubleValue() {
return castToNum(Double.class);
} | 3.68 |
framework_Window_isClosable | /**
* Returns the closable status of the window. If a window is closable, it
* typically shows an X in the upper right corner. Clicking on the X sends a
* close event to the server. Setting closable to false will remove the X
* from the window and prevent the user from closing the window.
*
* @return true if the window can be closed by the user.
*/
public boolean isClosable() {
return getState(false).closable;
} | 3.68 |
hbase_MemStoreCompactorSegmentsIterator_createScanner | /**
* Creates the scanner for compacting the pipeline.
* @return the scanner
*/
private InternalScanner createScanner(HStore store, List<KeyValueScanner> scanners)
throws IOException {
InternalScanner scanner = null;
boolean success = false;
try {
RegionCoprocessorHost cpHost = store.getCoprocessorHost();
ScanInfo scanInfo;
if (cpHost != null) {
scanInfo = cpHost.preMemStoreCompactionCompactScannerOpen(store);
} else {
scanInfo = store.getScanInfo();
}
scanner = new StoreScanner(store, scanInfo, scanners, ScanType.COMPACT_RETAIN_DELETES,
store.getSmallestReadPoint(), PrivateConstants.OLDEST_TIMESTAMP);
if (cpHost != null) {
InternalScanner scannerFromCp = cpHost.preMemStoreCompactionCompact(store, scanner);
if (scannerFromCp == null) {
throw new CoprocessorException("Got a null InternalScanner when calling"
+ " preMemStoreCompactionCompact which is not acceptable");
}
success = true;
return scannerFromCp;
} else {
success = true;
return scanner;
}
} finally {
if (!success) {
Closeables.close(scanner, true);
scanners.forEach(KeyValueScanner::close);
}
}
} | 3.68 |
hbase_StripeStoreFileManager_getStripeFilesSize | /**
* Gets the total size of all files in the stripe.
* @param stripeIndex Stripe index.
* @return Size.
*/
private long getStripeFilesSize(int stripeIndex) {
long result = 0;
for (HStoreFile sf : state.stripeFiles.get(stripeIndex)) {
result += sf.getReader().length();
}
return result;
} | 3.68 |
flink_CopyOnWriteSkipListStateMap_helpGetKey | /** Returns the key of the node. */
private K helpGetKey(long node) {
Node nodeStorage = getNodeSegmentAndOffset(node);
MemorySegment segment = nodeStorage.nodeSegment;
int offsetInSegment = nodeStorage.nodeOffset;
int level = SkipListUtils.getLevel(segment, offsetInSegment);
int keyDataLen = SkipListUtils.getKeyLen(segment, offsetInSegment);
int keyDataOffset = offsetInSegment + SkipListUtils.getKeyDataOffset(level);
return skipListKeySerializer.deserializeKey(segment, keyDataOffset, keyDataLen);
} | 3.68 |
hadoop_RoleModel_statement | /**
* Create a statement.
* If {@code isDirectory} is true, a "/" is added to the path.
* This is critical when adding wildcard permissions under
* a directory, and also needed when locking down dir-as-file
* and dir-as-directory-marker access.
* @param allow allow or deny
* @param path path
* @param isDirectory is this a directory?
* @param wildcards add a * to the tail of the key?
* @param actions action
* @return the formatted json statement
*/
public static Statement statement(
final boolean allow,
final Path path,
final boolean isDirectory,
final boolean wildcards,
final Collection<String> actions) {
return new Statement(RoleModel.effect(allow))
.addActions(actions)
.addResources(resource(path, isDirectory, wildcards));
} | 3.68 |
querydsl_GeometryExpressions_asEWKT | /**
* Return a specified ST_Geometry value from Extended Well-Known Text representation (EWKT).
*
* @param expr geometry
* @return serialized form
*/
public static StringExpression asEWKT(GeometryExpression<?> expr) {
return Expressions.stringOperation(SpatialOps.AS_EWKT, expr);
} | 3.68 |
hbase_MasterObserver_preGetConfiguredNamespacesAndTablesInRSGroup | /**
* Called before getting the configured namespaces and tables in the region server group.
* @param ctx the environment to interact with the framework and master
* @param groupName name of the region server group
*/
default void preGetConfiguredNamespacesAndTablesInRSGroup(
final ObserverContext<MasterCoprocessorEnvironment> ctx, final String groupName)
throws IOException {
} | 3.68 |
flink_AbstractBytesHashMap_isHashSetMode | /**
* @return true when BytesHashMap's valueTypeInfos.length == 0. Any appended value will be
* ignored and replaced with a reusedValue as a present tag.
*/
@VisibleForTesting
boolean isHashSetMode() {
return hashSetMode;
} | 3.68 |
hbase_MetaFixer_getHoleCover | /**
* @return Attempts to calculate a new {@link RegionInfo} that covers the region range described
* in {@code hole}.
*/
private static Optional<RegionInfo> getHoleCover(Pair<RegionInfo, RegionInfo> hole) {
final RegionInfo left = hole.getFirst();
final RegionInfo right = hole.getSecond();
if (left.getTable().equals(right.getTable())) {
// Simple case.
if (Bytes.compareTo(left.getEndKey(), right.getStartKey()) >= 0) {
LOG.warn("Skipping hole fix; left-side endKey is not less than right-side startKey;"
+ " left=<{}>, right=<{}>", left, right);
return Optional.empty();
}
return Optional.of(buildRegionInfo(left.getTable(), left.getEndKey(), right.getStartKey()));
}
final boolean leftUndefined = left.equals(RegionInfoBuilder.UNDEFINED);
final boolean rightUndefined = right.equals(RegionInfoBuilder.UNDEFINED);
final boolean last = left.isLast();
final boolean first = right.isFirst();
if (leftUndefined && rightUndefined) {
LOG.warn("Skipping hole fix; both the hole left-side and right-side RegionInfos are "
+ "UNDEFINED; left=<{}>, right=<{}>", left, right);
return Optional.empty();
}
if (leftUndefined || last) {
return Optional
.of(buildRegionInfo(right.getTable(), HConstants.EMPTY_START_ROW, right.getStartKey()));
}
if (rightUndefined || first) {
return Optional
.of(buildRegionInfo(left.getTable(), left.getEndKey(), HConstants.EMPTY_END_ROW));
}
LOG.warn("Skipping hole fix; don't know what to do with left=<{}>, right=<{}>", left, right);
return Optional.empty();
} | 3.68 |
morf_AbstractSqlDialectTest_callPrepareStatementParameter | /**
* Calls callPrepareStatementParameter with a mock {@link PreparedStatement} and returns
* the mock for analysis.
*
* @param parameter The SQL parameter
* @param value The value to set
* @return The mocked {@link PreparedStatement}
*/
protected NamedParameterPreparedStatement callPrepareStatementParameter(SqlParameter parameter, String value) {
NamedParameterPreparedStatement mockStatement = mock(NamedParameterPreparedStatement.class);
testDialect.prepareStatementParameters(mockStatement, ImmutableList.of(parameter), statementParameters().setString(parameter.getImpliedName(), value));
return mockStatement;
} | 3.68 |
hbase_MasterObserver_postBalanceSwitch | /**
* Called after the flag to enable/disable balancing has changed.
* @param ctx the coprocessor instance's environment
* @param oldValue the previously set balanceSwitch value
* @param newValue the newly set balanceSwitch value
*/
default void postBalanceSwitch(final ObserverContext<MasterCoprocessorEnvironment> ctx,
final boolean oldValue, final boolean newValue) throws IOException {
} | 3.68 |
framework_DragSourceExtension_getParent | /**
* Returns the component this extension is attached to.
*
* @return Extended component.
*/
@Override
@SuppressWarnings("unchecked")
public T getParent() {
return (T) super.getParent();
} | 3.68 |
flink_Tuple22_of | /**
* Creates a new tuple and assigns the given values to the tuple's fields. This is more
* convenient than using the constructor, because the compiler can infer the generic type
* arguments implicitly. For example: {@code Tuple3.of(n, x, s)} instead of {@code new
* Tuple3<Integer, Double, String>(n, x, s)}
*/
public static <
T0,
T1,
T2,
T3,
T4,
T5,
T6,
T7,
T8,
T9,
T10,
T11,
T12,
T13,
T14,
T15,
T16,
T17,
T18,
T19,
T20,
T21>
Tuple22<
T0,
T1,
T2,
T3,
T4,
T5,
T6,
T7,
T8,
T9,
T10,
T11,
T12,
T13,
T14,
T15,
T16,
T17,
T18,
T19,
T20,
T21>
of(
T0 f0,
T1 f1,
T2 f2,
T3 f3,
T4 f4,
T5 f5,
T6 f6,
T7 f7,
T8 f8,
T9 f9,
T10 f10,
T11 f11,
T12 f12,
T13 f13,
T14 f14,
T15 f15,
T16 f16,
T17 f17,
T18 f18,
T19 f19,
T20 f20,
T21 f21) {
return new Tuple22<>(
f0, f1, f2, f3, f4, f5, f6, f7, f8, f9, f10, f11, f12, f13, f14, f15, f16, f17, f18,
f19, f20, f21);
} | 3.68 |
framework_CalendarConnector_getActionIcon | /**
* Get the icon url for a context menu item.
*
* @param actionKey
* The unique action key
* @return
*/
public String getActionIcon(String actionKey) {
return actionMap.get(actionKey + "_i");
} | 3.68 |
hbase_MetaBrowser_hasMoreResults | /**
* @return {@code true} when the underlying {@link ResultScanner} is not yet exhausted,
* {@code false} otherwise.
*/
public boolean hasMoreResults() {
return sourceIterator.hasNext();
} | 3.68 |
hbase_Query_setFilter | /**
* Apply the specified server-side filter when performing the Query. Only
* {@link Filter#filterCell(org.apache.hadoop.hbase.Cell)} is called AFTER all tests for ttl,
* column match, deletes and column family's max versions have been run.
* @param filter filter to run on the server
* @return this for invocation chaining
*/
public Query setFilter(Filter filter) {
this.filter = filter;
return this;
} | 3.68 |
flink_Configuration_setFloat | /**
* Adds the given value to the configuration object. The main key of the config option will be
* used to map the value.
*
* @param key the option specifying the key to be added
* @param value the value of the key/value pair to be added
*/
@PublicEvolving
public void setFloat(ConfigOption<Float> key, float value) {
setValueInternal(key.key(), value);
} | 3.68 |
hudi_BaseHoodieWriteClient_compact | /**
* Ensures compaction instant is in expected state and performs Compaction for the workload stored in instant-time.
*
* @param compactionInstantTime Compaction Instant Time
* @return Collection of Write Status
*/
protected HoodieWriteMetadata<O> compact(String compactionInstantTime, boolean shouldComplete) {
HoodieTable table = createTable(config, context.getHadoopConf().get());
preWrite(compactionInstantTime, WriteOperationType.COMPACT, table.getMetaClient());
return tableServiceClient.compact(compactionInstantTime, shouldComplete);
} | 3.68 |
framework_CustomField_attach | /**
* Constructs the content and notifies it that the {@link CustomField} is
* attached to a window.
*
* @see com.vaadin.ui.Component#attach()
*/
@Override
public void attach() {
// First call super attach to notify all children (none if content has
// not yet been created)
super.attach();
// If the content has not yet been created, create and attach it at
// this point by calling getContent()
getContent();
} | 3.68 |
framework_LogSection_addRow | /**
* Adds a row to the log, applies the log row limit by removing old rows if
* needed, and scrolls new row into view if scroll lock is not active.
*
* @param level
* @param msg
* @return
*/
private Element addRow(Level level, String msg) {
int sinceReset = VDebugWindow.getMillisSinceReset();
int sinceStart = VDebugWindow.getMillisSinceStart();
Element row = DOM.createDiv();
row.addClassName(VDebugWindow.STYLENAME + "-row");
row.addClassName(level.getName());
String inner = "<span class='" + VDebugWindow.STYLENAME + "-"
+ "'></span><span class='" + VDebugWindow.STYLENAME
+ "-time' title='"
+ VDebugWindow.getTimingTooltip(sinceStart, sinceReset) + "'>"
+ sinceReset + "ms</span><span class='" + VDebugWindow.STYLENAME
+ "-message'>" + msg + "</span>";
row.setInnerHTML(inner);
contentElement.appendChild(row);
applyLimit();
maybeScroll();
return row;
} | 3.68 |
morf_ViewChanges_correctCase | /**
* Correct case of names with respect to all the views we know about.
*
* @return equivalent collection with names case-corrected where possible.
*/
private Collection<String> correctCase(Collection<String> names) {
Map<String, String> namesMap = names.stream().collect(Collectors.toMap(String::toLowerCase, name -> name, (first, second) -> first));
namesMap.replaceAll(allViewsMap::getOrDefault);
return namesMap.values();
} | 3.68 |
hbase_WALCellCodec_create | /**
* Create and setup a {@link WALCellCodec} from the CompressionContext. Cell Codec classname is
* read from {@link Configuration}. Fully prepares the codec for use.
* @param conf {@link Configuration} to read for the user-specified codec. If none is
* specified, uses a {@link WALCellCodec}.
* @param compression compression the codec should use
* @return a {@link WALCellCodec} ready for use.
* @throws UnsupportedOperationException if the codec cannot be instantiated
*/
public static WALCellCodec create(Configuration conf, CompressionContext compression)
throws UnsupportedOperationException {
String cellCodecClsName = getWALCellCodecClass(conf).getName();
return ReflectionUtils.instantiateWithCustomCtor(cellCodecClsName,
new Class[] { Configuration.class, CompressionContext.class },
new Object[] { conf, compression });
} | 3.68 |
flink_HighAvailabilityMode_isHighAvailabilityModeActivated | /**
* Returns true if the defined recovery mode supports high availability.
*
* @param configuration Configuration which contains the recovery mode
* @return true if high availability is supported by the recovery mode, otherwise false
*/
public static boolean isHighAvailabilityModeActivated(Configuration configuration) {
HighAvailabilityMode mode = fromConfig(configuration);
return mode.haActive;
} | 3.68 |
hbase_QuotaObserverChore_getNamespaceQuotaSnapshot | /**
* Fetches the {@link SpaceQuotaSnapshot} for the given namespace from this chore.
*/
SpaceQuotaSnapshot getNamespaceQuotaSnapshot(String namespace) {
SpaceQuotaSnapshot state = this.namespaceQuotaSnapshots.get(namespace);
if (state == null) {
// No tracked state implies observance.
return QuotaSnapshotStore.NO_QUOTA;
}
return state;
} | 3.68 |
dubbo_URLParam_getAnyMethodParameter | /**
* Get any method related parameter which match key
*
* @param key key
* @return result ( if any, random choose one )
*/
public String getAnyMethodParameter(String key) {
Map<String, String> methodMap = METHOD_PARAMETERS.get(key);
if (CollectionUtils.isNotEmptyMap(methodMap)) {
String methods = getParameter(METHODS_KEY);
if (StringUtils.isNotEmpty(methods)) {
for (String method : methods.split(",")) {
String value = methodMap.get(method);
if (StringUtils.isNotEmpty(value)) {
return value;
}
}
} else {
return methodMap.values().iterator().next();
}
}
return null;
} | 3.68 |
framework_ConnectorMap_getElement | /**
* Gets the main element for the connector with the given id. The reverse of
* {@link #getConnectorId(Element)}.
*
* @param connectorId
* the id of the widget whose element is desired
* @return the element for the connector corresponding to the id
*/
public Element getElement(String connectorId) {
ServerConnector p = getConnector(connectorId);
if (p instanceof ComponentConnector) {
return ((ComponentConnector) p).getWidget().getElement();
}
return null;
} | 3.68 |
hadoop_ItemInfo_getFile | /**
* Returns the file for which needs to satisfy the policy.
*/
public long getFile() {
return fileId;
} | 3.68 |
hbase_TableDescriptorBuilder_getCoprocessorDescriptors | /**
* Return the list of attached co-processor represented by their name className
* @return The list of co-processors classNames
*/
@Override
public List<CoprocessorDescriptor> getCoprocessorDescriptors() {
List<CoprocessorDescriptor> result = new ArrayList<>();
for (Map.Entry<Bytes, Bytes> e : getValues().entrySet()) {
String key = Bytes.toString(e.getKey().get()).trim();
if (CP_HTD_ATTR_KEY_PATTERN.matcher(key).matches()) {
toCoprocessorDescriptor(Bytes.toString(e.getValue().get()).trim()).ifPresent(result::add);
}
}
return result;
} | 3.68 |
framework_AbstractContainer_addListener | /**
* @deprecated As of 7.0, replaced by
* {@link #addItemSetChangeListener(Container.ItemSetChangeListener)}
*/
@Deprecated
protected void addListener(Container.ItemSetChangeListener listener) {
addItemSetChangeListener(listener);
} | 3.68 |
flink_HsResultPartition_setupInternal | // Called by task thread.
@Override
protected void setupInternal() throws IOException {
if (isReleased()) {
throw new IOException("Result partition has been released.");
}
this.fileDataManager.setup();
this.memoryDataManager =
new HsMemoryDataManager(
isBroadcastOnly ? 1 : numSubpartitions,
networkBufferSize,
bufferPool,
getSpillingStrategy(hybridShuffleConfiguration),
dataIndex,
dataFilePath,
bufferCompressor,
hybridShuffleConfiguration.getBufferPoolSizeCheckIntervalMs());
} | 3.68 |
hbase_StoreUtils_getMaxSequenceIdInList | /**
* Return the highest sequence ID found across all storefiles in the given list.
*/
public static OptionalLong getMaxSequenceIdInList(Collection<HStoreFile> sfs) {
return sfs.stream().mapToLong(HStoreFile::getMaxSequenceId).max();
} | 3.68 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.