name
stringlengths 12
178
| code_snippet
stringlengths 8
36.5k
| score
float64 3.26
3.68
|
---|---|---|
hadoop_AbfsRestOperation_hasResult | /**
* Checks if there is non-null HTTP response.
* @return true if there is a non-null HTTP response from the ABFS call.
*/
public boolean hasResult() {
return result != null;
} | 3.68 |
hbase_AccessController_revoke | /**
* @deprecated since 2.2.0 and will be removed in 4.0.0. Use {@link Admin#revoke(UserPermission)}
* instead.
* @see Admin#revoke(UserPermission)
* @see <a href="https://issues.apache.org/jira/browse/HBASE-21739">HBASE-21739</a>
*/
@Deprecated
@Override
public void revoke(RpcController controller, AccessControlProtos.RevokeRequest request,
RpcCallback<AccessControlProtos.RevokeResponse> done) {
final UserPermission perm = AccessControlUtil.toUserPermission(request.getUserPermission());
AccessControlProtos.RevokeResponse response = null;
try {
// only allowed to be called on _acl_ region
if (aclRegion) {
if (!initialized) {
throw new CoprocessorException("AccessController not yet initialized");
}
User caller = RpcServer.getRequestUser().orElse(null);
if (LOG.isDebugEnabled()) {
LOG.debug("Received request from {} to revoke access permission {}",
caller.getShortName(), perm.toString());
}
preGrantOrRevoke(caller, "revoke", perm);
// regionEnv is set at #start. Hopefully not null here.
regionEnv.getConnection().getAdmin()
.revoke(new UserPermission(perm.getUser(), perm.getPermission()));
if (AUDITLOG.isTraceEnabled()) {
// audit log should record all permission changes
AUDITLOG.trace("Revoked permission " + perm.toString());
}
} else {
throw new CoprocessorException(AccessController.class,
"This method " + "can only execute at " + PermissionStorage.ACL_TABLE_NAME + " table.");
}
response = AccessControlProtos.RevokeResponse.getDefaultInstance();
} catch (IOException ioe) {
// pass exception back up
CoprocessorRpcUtils.setControllerException(controller, ioe);
}
done.run(response);
} | 3.68 |
hbase_JSONBean_dumpAllBeans | /**
* Dump out all registered mbeans as json on System.out.
*/
public static void dumpAllBeans() throws IOException, MalformedObjectNameException {
try (PrintWriter writer =
new PrintWriter(new OutputStreamWriter(System.out, StandardCharsets.UTF_8))) {
JSONBean dumper = new JSONBean();
try (JSONBean.Writer jsonBeanWriter = dumper.open(writer)) {
MBeanServer mbeanServer = ManagementFactory.getPlatformMBeanServer();
jsonBeanWriter.write(mbeanServer, new ObjectName("*:*"), null, false);
}
}
} | 3.68 |
flink_RichAndCondition_getRight | /** @return One of the {@link IterativeCondition conditions} combined in this condition. */
public IterativeCondition<T> getRight() {
return getNestedConditions()[1];
} | 3.68 |
graphhopper_StringEncodedValue_getValues | /**
* @return an unmodifiable List of the current values
*/
public List<String> getValues() {
return Collections.unmodifiableList(values);
} | 3.68 |
morf_TruncateStatement_getTable | /**
* @return the table
*/
public TableReference getTable() {
return table;
} | 3.68 |
hbase_ZooKeeperHelper_getConnectedZooKeeper | /**
* Get a ZooKeeper instance and wait until it connected before returning.
* @param sessionTimeoutMs Used as session timeout passed to the created ZooKeeper AND as the
* timeout to wait on connection establishment.
*/
public static ZooKeeper getConnectedZooKeeper(String connectString, int sessionTimeoutMs)
throws IOException {
ZooKeeper zookeeper = new ZooKeeper(connectString, sessionTimeoutMs, e -> {
});
return ensureConnectedZooKeeper(zookeeper, sessionTimeoutMs);
} | 3.68 |
hadoop_FileIoProvider_replaceFile | /**
* Move the src file to the target using
* {@link FileUtil#replaceFile(File, File)}.
*
* @param volume target volume. null if unavailable.
* @param src source path.
* @param target target path.
* @throws IOException
*/
public void replaceFile(
@Nullable FsVolumeSpi volume, File src, File target) throws IOException {
final long begin = profilingEventHook.beforeMetadataOp(volume, MOVE);
try {
faultInjectorEventHook.beforeMetadataOp(volume, MOVE);
FileUtil.replaceFile(src, target);
profilingEventHook.afterMetadataOp(volume, MOVE, begin);
} catch(Exception e) {
onFailure(volume, begin);
throw e;
}
} | 3.68 |
rocketmq-connect_RecordPositionMapSerde_serde | /**
* serializer and deserializer
*
* @return
*/
public static RecordPositionMapSerde serde() {
return new RecordPositionMapSerde(new RecordPositionMapSerializer(), new RecordPositionMapDeserializer());
} | 3.68 |
flink_StateObjectCollection_hasState | /** Returns true if this contains at least one {@link StateObject}. */
public boolean hasState() {
for (StateObject state : stateObjects) {
if (state != null) {
return true;
}
}
return false;
} | 3.68 |
hbase_SaslServerAuthenticationProviders_addProviderIfNotExists | /**
* Adds the given provider into the map of providers if a mapping for the auth code does not
* already exist in the map.
*/
static void addProviderIfNotExists(SaslServerAuthenticationProvider provider,
HashMap<Byte, SaslServerAuthenticationProvider> providers) {
final byte newProviderAuthCode = provider.getSaslAuthMethod().getCode();
final SaslServerAuthenticationProvider alreadyRegisteredProvider =
providers.get(newProviderAuthCode);
if (alreadyRegisteredProvider != null) {
throw new RuntimeException("Trying to load SaslServerAuthenticationProvider "
+ provider.getClass() + ", but " + alreadyRegisteredProvider.getClass()
+ " is already registered with the same auth code");
}
providers.put(newProviderAuthCode, provider);
} | 3.68 |
querydsl_SQLExpressions_addHours | /**
* Add the given amount of hours to the date
*
* @param date datetime
* @param hours hours to add
* @return converted datetime
*/
public static <D extends Comparable> DateTimeExpression<D> addHours(DateTimeExpression<D> date, int hours) {
return Expressions.dateTimeOperation(date.getType(), Ops.DateTimeOps.ADD_HOURS, date, ConstantImpl.create(hours));
} | 3.68 |
hbase_HRegionLocation_toString | /**
* @see java.lang.Object#toString()
*/
@Override
public String toString() {
return "region=" + (this.regionInfo == null ? "null" : this.regionInfo.getRegionNameAsString())
+ ", hostname=" + this.serverName + ", seqNum=" + seqNum;
} | 3.68 |
druid_IPRange_toString | /**
* Convert the IP Range into a string representation.
*
* @return Return the string representation of the IP Address following the common format xxx.xxx.xxx.xxx/xx (IP
* address/extended network prefixs).
*/
public String toString() {
return ipAddress.toString() + "/" + extendedNetworkPrefix;
} | 3.68 |
framework_VaadinSession_setAttribute | /**
* Stores a value in this service session. This can be used to associate
* data with the current user so that it can be retrieved at a later point
* from some other part of the application. Setting the value to
* <code>null</code> clears the stored value.
* <p>
* The fully qualified name of the type is used as the name when storing the
* value. The outcome of calling this method is thus the same as if
* calling<br />
* <br />
* <code>setAttribute(type.getName(), value);</code>
*
* @see #getAttribute(Class)
* @see #setAttribute(String, Object)
*
* @param type
* the type that the stored value represents, can not be null
* @param value
* the value to associate with the type, or <code>null</code> to
* remove a previous association.
*/
public <T> void setAttribute(Class<T> type, T value) {
assert hasLock();
if (type == null) {
throw new IllegalArgumentException("type can not be null");
}
if (value != null && !type.isInstance(value)) {
throw new IllegalArgumentException("value of type " + type.getName()
+ " expected but got " + value.getClass().getName());
}
setAttribute(type.getName(), value);
} | 3.68 |
framework_DataBoundTransferable_getSourceContainer | /**
* Returns the container data source from which the transfer occurs.
*
* {@link Container.Viewer#getContainerDataSource()} is used to obtain the
* underlying container of the source component.
*
* @return Container
*/
public Container getSourceContainer() {
Component sourceComponent = getSourceComponent();
if (sourceComponent instanceof Container.Viewer) {
return ((Container.Viewer) sourceComponent)
.getContainerDataSource();
} else {
// this should not happen
return null;
}
} | 3.68 |
flink_JobExecutionResult_getIntCounterResult | /**
* Gets the accumulator with the given name as an integer.
*
* @param accumulatorName Name of the counter
* @return Result of the counter, or null if the counter does not exist
* @throws java.lang.ClassCastException Thrown, if the accumulator was not aggregating a {@link
* java.lang.Integer}
* @deprecated Will be removed in future versions. Use {@link #getAccumulatorResult} instead.
*/
@Deprecated
@PublicEvolving
public Integer getIntCounterResult(String accumulatorName) {
Object result = this.accumulatorResults.get(accumulatorName).getUnchecked();
if (result == null) {
return null;
}
if (!(result instanceof Integer)) {
throw new ClassCastException(
"Requested result of the accumulator '"
+ accumulatorName
+ "' should be Integer but has type "
+ result.getClass());
}
return (Integer) result;
} | 3.68 |
flink_BeamPythonFunctionRunner_createStageBundleFactory | /** To make the error messages more user friendly, throws an exception with the boot logs. */
private StageBundleFactory createStageBundleFactory(
JobBundleFactory jobBundleFactory, RunnerApi.Environment environment) throws Exception {
try (TemporaryClassLoaderContext ignored =
TemporaryClassLoaderContext.of(getClass().getClassLoader())) {
// It loads classes using service loader under context classloader in Beam,
// make sure the classloader used to load SPI classes is the same as the class
// loader of the current class.
return jobBundleFactory.forStage(createExecutableStage(environment));
} catch (Throwable e) {
throw new RuntimeException(environmentManager.getBootLog(), e);
}
} | 3.68 |
querydsl_Expressions_comparableEntityPath | /**
* Create a new Path expression
*
* @param type type of expression
* @param metadata path metadata
* @param <T> type of expression
* @return path expression
*/
public static <T extends Comparable<?>> ComparableEntityPath<T> comparableEntityPath(Class<? extends T> type,
PathMetadata metadata) {
return new ComparableEntityPath<T>(type, metadata);
} | 3.68 |
hadoop_CommonCallableSupplier_maybeAwaitCompletion | /**
* Block awaiting completion for any non-null future passed in;
* No-op if a null arg was supplied.
* @param future future
* @throws IOException if one of the called futures raised an IOE.
* @throws RuntimeException if one of the futures raised one.
*/
public static void maybeAwaitCompletion(
@Nullable final CompletableFuture<Void> future) throws IOException {
if (future != null) {
waitForCompletion(future);
}
} | 3.68 |
hadoop_TimelineWriteResponse_addErrors | /**
* Add a list of {@link TimelineWriteError} instances into the existing list.
*
* @param writeErrors
* a list of {@link TimelineWriteError} instances
*/
public void addErrors(List<TimelineWriteError> writeErrors) {
this.errors.addAll(writeErrors);
} | 3.68 |
hbase_HMobStore_createStoreEngine | /**
* Creates the mob store engine.
*/
@Override
protected StoreEngine<?, ?, ?, ?> createStoreEngine(HStore store, Configuration conf,
CellComparator cellComparator) throws IOException {
MobStoreEngine engine = new MobStoreEngine();
engine.createComponentsOnce(conf, store, cellComparator);
return engine;
} | 3.68 |
hadoop_RegistryOperationsFactory_createAnonymousInstance | /**
* Create and initialize an anonymous read/write registry operations instance.
* In a secure cluster, this instance will only have read access to the
* registry.
* @param conf configuration
* @return an anonymous registry operations instance
*
* @throws ServiceStateException on any failure to initialize
*/
public static RegistryOperations createAnonymousInstance(Configuration conf) {
Preconditions.checkArgument(conf != null, "Null configuration");
conf.set(KEY_REGISTRY_CLIENT_AUTH, REGISTRY_CLIENT_AUTH_ANONYMOUS);
return createInstance("AnonymousRegistryOperations", conf);
} | 3.68 |
zxing_PDF417HighLevelEncoder_encodeMultiECIBinary | /**
* Encode all of the message using Byte Compaction as described in ISO/IEC 15438:2001(E)
*
* @param input the input
* @param startpos the start position within the message
* @param count the number of bytes to encode
* @param startmode the mode from which this method starts
* @param sb receives the encoded codewords
*/
private static void encodeMultiECIBinary(ECIInput input,
int startpos,
int count,
int startmode,
StringBuilder sb) throws WriterException {
final int end = Math.min(startpos + count, input.length());
int localStart = startpos;
while (true) {
//encode all leading ECIs and advance localStart
while (localStart < end && input.isECI(localStart)) {
encodingECI(input.getECIValue(localStart), sb);
localStart++;
}
int localEnd = localStart;
//advance end until before the next ECI
while (localEnd < end && !input.isECI(localEnd)) {
localEnd++;
}
final int localCount = localEnd - localStart;
if (localCount <= 0) {
//done
break;
} else {
//encode the segment
encodeBinary(subBytes(input, localStart, localEnd),
0, localCount, localStart == startpos ? startmode : BYTE_COMPACTION, sb);
localStart = localEnd;
}
}
} | 3.68 |
hbase_QuotaUtil_doPut | /*
* ========================================================================= HTable helpers
*/
private static void doPut(final Connection connection, final Put put) throws IOException {
try (Table table = connection.getTable(QuotaUtil.QUOTA_TABLE_NAME)) {
table.put(put);
}
} | 3.68 |
hbase_Compression_getClassLoaderForCodec | /**
* Returns the classloader to load the Codec class from.
*/
private static ClassLoader getClassLoaderForCodec() {
ClassLoader cl = Thread.currentThread().getContextClassLoader();
if (cl == null) {
cl = Compression.class.getClassLoader();
}
if (cl == null) {
cl = ClassLoader.getSystemClassLoader();
}
if (cl == null) {
throw new RuntimeException("A ClassLoader to load the Codec could not be determined");
}
return cl;
} | 3.68 |
flink_CheckpointedInputGate_close | /**
* Cleans up all internally held resources.
*
* @throws IOException Thrown if the cleanup of I/O resources failed.
*/
public void close() throws IOException {
barrierHandler.close();
} | 3.68 |
morf_OracleDialect_prepareBooleanParameter | /**
* @see org.alfasoftware.morf.jdbc.SqlDialect#prepareBooleanParameter(org.alfasoftware.morf.jdbc.NamedParameterPreparedStatement, java.lang.Boolean, org.alfasoftware.morf.sql.element.SqlParameter)
*/
@Override
protected void prepareBooleanParameter(NamedParameterPreparedStatement statement, Boolean boolVal, SqlParameter parameter) throws SQLException {
statement.setBigDecimal(
parameter(parameter.getImpliedName()).type(DECIMAL).width(1),
boolVal == null ? null : boolVal ? BigDecimal.ONE : BigDecimal.ZERO
);
} | 3.68 |
flink_ShortParser_parseField | /**
* Static utility to parse a field of type short from a byte sequence that represents text
* characters (such as when read from a file stream).
*
* @param bytes The bytes containing the text data that should be parsed.
* @param startPos The offset to start the parsing.
* @param length The length of the byte sequence (counting from the offset).
* @param delimiter The delimiter that terminates the field.
* @return The parsed value.
* @throws NumberFormatException Thrown when the value cannot be parsed because the text
* represents not a correct number.
*/
public static final short parseField(byte[] bytes, int startPos, int length, char delimiter) {
long val = 0;
boolean neg = false;
if (bytes[startPos] == delimiter) {
throw new NumberFormatException("Empty field.");
}
if (bytes[startPos] == '-') {
neg = true;
startPos++;
length--;
if (length == 0 || bytes[startPos] == delimiter) {
throw new NumberFormatException("Orphaned minus sign.");
}
}
for (; length > 0; startPos++, length--) {
if (bytes[startPos] == delimiter) {
return (short) (neg ? -val : val);
}
if (bytes[startPos] < 48 || bytes[startPos] > 57) {
throw new NumberFormatException("Invalid character.");
}
val *= 10;
val += bytes[startPos] - 48;
if (val > OVERFLOW_BOUND && (!neg || val > UNDERFLOW_BOUND)) {
throw new NumberFormatException("Value overflow/underflow");
}
}
return (short) (neg ? -val : val);
} | 3.68 |
flink_InternalSourceReaderMetricGroup_recordEmitted | /**
* Called when a new record was emitted with the given timestamp. {@link
* TimestampAssigner#NO_TIMESTAMP} should be indicated that the record did not have a timestamp.
*
* <p>Note this function should be called before the actual record is emitted such that chained
* processing does not influence the statistics.
*/
public void recordEmitted(long timestamp) {
idleStartTime = ACTIVE;
lastEventTime = timestamp;
} | 3.68 |
graphhopper_PrepareLandmarks_setLMSelectionWeighting | /**
* @see LandmarkStorage#setLMSelectionWeighting(Weighting)
*/
public void setLMSelectionWeighting(Weighting w) {
lms.setLMSelectionWeighting(w);
} | 3.68 |
hbase_RemoteWithExtrasException_isDoNotRetry | /** Returns True if origin exception was a do not retry type. */
public boolean isDoNotRetry() {
return this.doNotRetry;
} | 3.68 |
flink_PushCalcPastChangelogNormalizeRule_adjustInputRef | /** Adjust the {@param expr} field indices according to the field index {@param mapping}. */
private RexNode adjustInputRef(RexNode expr, Map<Integer, Integer> mapping) {
return expr.accept(
new RexShuttle() {
@Override
public RexNode visitInputRef(RexInputRef inputRef) {
Integer newIndex = mapping.get(inputRef.getIndex());
return new RexInputRef(newIndex, inputRef.getType());
}
});
} | 3.68 |
hadoop_TimelineDomain_setWriters | /**
* Set the writer (and/or writer group) list string
*
* @param writers the writer (and/or writer group) list string
*/
public void setWriters(String writers) {
this.writers = writers;
} | 3.68 |
hadoop_ApplicationRowKey_encode | /*
* (non-Javadoc)
*
* Encodes ApplicationRowKey object into a byte array with each
* component/field in ApplicationRowKey separated by Separator#QUALIFIERS.
* This leads to an application table row key of the form
* clusterId!userName!flowName!flowRunId!appId If flowRunId in passed
* ApplicationRowKey object is null (and the fields preceding it i.e.
* clusterId, userId and flowName are not null), this returns a row key
* prefix of the form clusterId!userName!flowName! and if appId in
* ApplicationRowKey is null (other 4 components all are not null), this
* returns a row key prefix of the form
* clusterId!userName!flowName!flowRunId! flowRunId is inverted while
* encoding as it helps maintain a descending order for row keys in the
* application table.
*
* @see
* org.apache.hadoop.yarn.server.timelineservice.storage.common
* .KeyConverter#encode(java.lang.Object)
*/
@Override
public byte[] encode(ApplicationRowKey rowKey) {
byte[] cluster =
Separator.encode(rowKey.getClusterId(), Separator.SPACE,
Separator.TAB, Separator.QUALIFIERS);
byte[] user =
Separator.encode(rowKey.getUserId(), Separator.SPACE, Separator.TAB,
Separator.QUALIFIERS);
byte[] flow =
Separator.encode(rowKey.getFlowName(), Separator.SPACE,
Separator.TAB, Separator.QUALIFIERS);
byte[] first = Separator.QUALIFIERS.join(cluster, user, flow);
// Note that flowRunId is a long, so we can't encode them all at the same
// time.
if (rowKey.getFlowRunId() == null) {
return Separator.QUALIFIERS.join(first, Separator.EMPTY_BYTES);
}
byte[] second =
Bytes.toBytes(LongConverter.invertLong(
rowKey.getFlowRunId()));
if (rowKey.getAppId() == null || rowKey.getAppId().isEmpty()) {
return Separator.QUALIFIERS.join(first, second, Separator.EMPTY_BYTES);
}
byte[] third = appIDKeyConverter.encode(rowKey.getAppId());
return Separator.QUALIFIERS.join(first, second, third);
} | 3.68 |
hbase_BufferedMutator_setOperationTimeout | /**
* Set operation timeout for this mutator instance
* @deprecated Since 3.0.0, will be removed in 4.0.0. Please set this through the
* {@link BufferedMutatorParams}.
*/
@Deprecated
default void setOperationTimeout(int timeout) {
throw new UnsupportedOperationException(
"The BufferedMutator::setOperationTimeout has not been implemented");
} | 3.68 |
hbase_MetricsHeapMemoryManager_updateBlockCacheDeltaSizeHistogram | /**
* Update the increase/decrease blockcache size histogram
* @param blockCacheDeltaSize the tuning result of blockcache.
*/
public void updateBlockCacheDeltaSizeHistogram(final int blockCacheDeltaSize) {
source.updateBlockCacheDeltaSizeHistogram(blockCacheDeltaSize);
} | 3.68 |
hadoop_BinaryEditsVisitor_close | /**
* Finish the visitor
*/
@Override
public void close(Throwable error) throws IOException {
elfos.setReadyToFlush();
elfos.flushAndSync(true);
elfos.close();
} | 3.68 |
flink_WrappingRuntimeException_unwrap | /**
* Recursively unwraps this WrappingRuntimeException and its causes, getting the first non
* wrapping exception.
*
* @return The first cause that is not a wrapping exception.
*/
public Throwable unwrap() {
Throwable cause = getCause();
return (cause instanceof WrappingRuntimeException)
? ((WrappingRuntimeException) cause).unwrap()
: cause;
} | 3.68 |
morf_DataSetProducerBuilderImpl_open | /**
* @see org.alfasoftware.morf.dataset.DataSetProducer#open()
*/
@Override
public void open() {
// Nothing to do
} | 3.68 |
flink_AbstractCheckpointStats_getLatestAckTimestamp | /**
* Returns the ack timestamp of the latest acknowledged subtask or <code>-1</code> if none was
* acknowledged yet.
*
* @return Ack timestamp of the latest acknowledged subtask or <code>-1</code>.
*/
public long getLatestAckTimestamp() {
SubtaskStateStats subtask = getLatestAcknowledgedSubtaskStats();
if (subtask != null) {
return subtask.getAckTimestamp();
} else {
return -1;
}
} | 3.68 |
hbase_MetricsRegionWrapperImpl_getReplicaId | /**
* Get the replica id of this region.
*/
@Override
public int getReplicaId() {
return region.getRegionInfo().getReplicaId();
} | 3.68 |
Activiti_NeedsActiveTaskCmd_getSuspendedTaskException | /**
* Subclasses can override this method to provide a customized exception message that will be thrown when the task is suspended.
*/
protected String getSuspendedTaskException() {
return "Cannot execute operation: task is suspended";
} | 3.68 |
hadoop_ManifestCommitter_maybeSaveSummary | /**
* Save a summary to the report dir if the config option
* is set.
* The IOStatistics of the summary will be updated to the latest
* snapshot of the committer's statistics, so the report is up
* to date.
* The report will updated with the current active stage,
* and if {@code thrown} is non-null, it will be added to the
* diagnostics (and the job tagged as a failure).
* Static for testability.
* @param activeStage active stage
* @param config configuration to use.
* @param report summary file.
* @param thrown any exception indicting failure.
* @param quiet should exceptions be swallowed.
* @param overwrite should the existing file be overwritten
* @return the path of a file, if successfully saved
* @throws IOException if a failure occured and quiet==false
*/
private static Path maybeSaveSummary(
String activeStage,
ManifestCommitterConfig config,
ManifestSuccessData report,
Throwable thrown,
boolean quiet,
boolean overwrite) throws IOException {
Configuration conf = config.getConf();
String reportDir = conf.getTrimmed(OPT_SUMMARY_REPORT_DIR, "");
if (reportDir.isEmpty()) {
LOG.debug("No summary directory set in " + OPT_SUMMARY_REPORT_DIR);
return null;
}
LOG.debug("Summary directory set in to {}" + OPT_SUMMARY_REPORT_DIR,
reportDir);
// update to the latest statistics
report.snapshotIOStatistics(config.getIOStatistics());
Path reportDirPath = new Path(reportDir);
Path path = new Path(reportDirPath,
createJobSummaryFilename(config.getJobUniqueId()));
if (thrown != null) {
report.recordJobFailure(thrown);
}
report.putDiagnostic(STAGE, activeStage);
// the store operations here is explicitly created for the FS where
// the reports go, which may not be the target FS of the job.
final FileSystem fs = path.getFileSystem(conf);
try (ManifestStoreOperations operations = new ManifestStoreOperationsThroughFileSystem(fs)) {
if (!overwrite) {
// check for file existence so there is no need to worry about
// precisely what exception is raised when overwrite=false and dest file
// exists
try {
FileStatus st = operations.getFileStatus(path);
// get here and the file exists
LOG.debug("Report already exists: {}", st);
return null;
} catch (FileNotFoundException ignored) {
}
}
operations.save(report, path, overwrite);
LOG.info("Job summary saved to {}", path);
return path;
} catch (IOException e) {
LOG.debug("Failed to save summary to {}", path, e);
if (quiet) {
return null;
} else {
throw e;
}
}
} | 3.68 |
flink_SingleOutputStreamOperator_name | /**
* Sets the name of the current data stream. This name is used by the visualization and logging
* during runtime.
*
* @return The named operator.
*/
public SingleOutputStreamOperator<T> name(String name) {
transformation.setName(name);
return this;
} | 3.68 |
hbase_RoundRobinTableInputFormat_getSuperSplits | /**
* Call super-classes' getSplits. Have it out here as its own method so can be overridden.
*/
List<InputSplit> getSuperSplits(JobContext context) throws IOException {
return super.getSplits(context);
} | 3.68 |
framework_SQLContainer_isColumnIdentifierValid | /**
* Checks is the given column identifier valid to be used with SQLContainer.
* Currently the only non-valid identifier is "rownum" when MSSQL or Oracle
* is used. This is due to the way the SELECT queries are constructed in
* order to implement paging in these databases.
*
* @param identifier
* Column identifier
* @return true if the identifier is valid
*/
private boolean isColumnIdentifierValid(String identifier) {
if (identifier.equalsIgnoreCase("rownum")
&& queryDelegate instanceof TableQuery) {
TableQuery tq = (TableQuery) queryDelegate;
if (tq.getSqlGenerator() instanceof MSSQLGenerator
|| tq.getSqlGenerator() instanceof OracleGenerator) {
return false;
}
}
return true;
} | 3.68 |
AreaShop_GeneralRegion_isSaveRequired | /**
* Check if a save is required.
* @return true if a save is required because some data changed, otherwise false
*/
public boolean isSaveRequired() {
return saveRequired && !isDeleted();
} | 3.68 |
hmily_HmilySQLServerDeleteStatement_getWithSegment | /**
* Get with segment.
*
* @return with segment.
*/
public Optional<HmilyWithSegment> getWithSegment() {
return Optional.ofNullable(withSegment);
} | 3.68 |
framework_GridRowDragger_setSourceDataProviderUpdater | /**
* Sets the source data provider updater, which handles removing items from
* the drag source grid.
* <p>
* By default the items are removed from any {@link ListDataProvider}. If
* another type of data provider is used, this updater should be set to
* handle updating instead.
* <p>
* If you want to skip removing items from the source, you can use
* {@link SourceDataProviderUpdater#NOOP}.
*
* @param sourceDataProviderUpdater
* the drag source data provider updater to set, or {@code null}
* to remove
*/
public void setSourceDataProviderUpdater(
SourceDataProviderUpdater<T> sourceDataProviderUpdater) {
this.sourceDataProviderUpdater = sourceDataProviderUpdater;
} | 3.68 |
flink_Configuration_setClass | /**
* Adds the given key/value pair to the configuration object. The class can be retrieved by
* invoking {@link #getClass(String, Class, ClassLoader)} if it is in the scope of the class
* loader on the caller.
*
* @param key The key of the pair to be added
* @param klazz The value of the pair to be added
* @see #getClass(String, Class, ClassLoader)
*/
public void setClass(String key, Class<?> klazz) {
setValueInternal(key, klazz.getName());
} | 3.68 |
hmily_ConfigEnv_registerConfig | /**
* Register config.
*
* @param config the config
*/
public void registerConfig(final Config config) {
if (config.getClass().getSuperclass().isAssignableFrom(AbstractConfig.class)) {
putBean(config);
}
} | 3.68 |
flink_TieredStorageConfiguration_getRemoteStorageBasePath | /**
* Get the base path on remote storage.
*
* @return string if the remote storage path is configured otherwise null.
*/
public String getRemoteStorageBasePath() {
return remoteStorageBasePath;
} | 3.68 |
hbase_Delete_addColumns | /**
* Delete all versions of the specified column with a timestamp less than or equal to the
* specified timestamp.
* @param family family name
* @param qualifier column qualifier
* @param timestamp maximum version timestamp
* @return this for invocation chaining
*/
public Delete addColumns(final byte[] family, final byte[] qualifier, final long timestamp) {
if (timestamp < 0) {
throw new IllegalArgumentException("Timestamp cannot be negative. ts=" + timestamp);
}
List<Cell> list = getCellList(family);
list.add(new KeyValue(this.row, family, qualifier, timestamp, KeyValue.Type.DeleteColumn));
return this;
} | 3.68 |
framework_VAbstractCalendarPanel_setFocusOutListener | /**
* A focus out listener is triggered when the panel loosed focus. This can
* happen either after a user clicks outside the panel or tabs out.
*
* @param listener
* The listener to trigger
*/
public void setFocusOutListener(FocusOutListener listener) {
focusOutListener = listener;
} | 3.68 |
hbase_AbstractMemStore_internalAdd | /*
* Internal version of add() that doesn't clone Cells with the allocator, and doesn't take the
* lock. Callers should ensure they already have the read lock taken
* @param toAdd the cell to add
* @param mslabUsed whether using MSLAB
* @param memstoreSizing object to accumulate changed size
*/
private void internalAdd(MutableSegment currentActive, final Cell toAdd, final boolean mslabUsed,
MemStoreSizing memstoreSizing) {
boolean sizeAddedPreOperation = sizeAddedPreOperation();
currentActive.add(toAdd, mslabUsed, memstoreSizing, sizeAddedPreOperation);
setOldestEditTimeToNow();
} | 3.68 |
framework_StringToLongConverter_getFormat | /**
* Returns the format used by
* {@link #convertToPresentation(Long, Class, Locale)} and
* {@link #convertToModel(String, Class, Locale)}.
*
* @param locale
* The locale to use
* @return A NumberFormat instance
*/
@Override
protected NumberFormat getFormat(Locale locale) {
if (locale == null) {
locale = Locale.getDefault();
}
return NumberFormat.getIntegerInstance(locale);
} | 3.68 |
zxing_BinaryBitmap_getBlackMatrix | /**
* Converts a 2D array of luminance data to 1 bit. As above, assume this method is expensive
* and do not call it repeatedly. This method is intended for decoding 2D barcodes and may or
* may not apply sharpening. Therefore, a row from this matrix may not be identical to one
* fetched using getBlackRow(), so don't mix and match between them.
*
* @return The 2D array of bits for the image (true means black).
* @throws NotFoundException if image can't be binarized to make a matrix
*/
public BitMatrix getBlackMatrix() throws NotFoundException {
// The matrix is created on demand the first time it is requested, then cached. There are two
// reasons for this:
// 1. This work will never be done if the caller only installs 1D Reader objects, or if a
// 1D Reader finds a barcode before the 2D Readers run.
// 2. This work will only be done once even if the caller installs multiple 2D Readers.
if (matrix == null) {
matrix = binarizer.getBlackMatrix();
}
return matrix;
} | 3.68 |
hbase_RegionStates_include | /**
* Utility. Whether to include region in list of regions. Default is to weed out split and offline
* regions.
* @return True if we should include the <code>node</code> (do not include if split or offline
* unless <code>offline</code> is set to true.
*/
private boolean include(final RegionStateNode node, final boolean offline) {
if (LOG.isTraceEnabled()) {
LOG.trace("WORKING ON " + node + " " + node.getRegionInfo());
}
final RegionInfo hri = node.getRegionInfo();
if (node.isInState(State.SPLIT) || hri.isSplit()) {
return false;
}
if ((node.isInState(State.OFFLINE) || hri.isOffline()) && !offline) {
return false;
}
return (!hri.isOffline() && !hri.isSplit()) || ((hri.isOffline() || hri.isSplit()) && offline);
} | 3.68 |
hbase_ZKWatcher_getRecoverableZooKeeper | /**
* Get the connection to ZooKeeper.
* @return connection reference to zookeeper
*/
public RecoverableZooKeeper getRecoverableZooKeeper() {
return recoverableZooKeeper;
} | 3.68 |
hudi_HoodieBaseFileGroupRecordBuffer_getRecordsIterator | /**
* Create a record iterator for a data block. The records are filtered by a key set specified by {@code keySpecOpt}.
*
* @param dataBlock
* @param keySpecOpt
* @return
* @throws IOException
*/
protected Pair<ClosableIterator<T>, Schema> getRecordsIterator(
HoodieDataBlock dataBlock, Option<KeySpec> keySpecOpt) throws IOException {
ClosableIterator<T> blockRecordsIterator;
if (keySpecOpt.isPresent()) {
KeySpec keySpec = keySpecOpt.get();
blockRecordsIterator = dataBlock.getEngineRecordIterator(readerContext, keySpec.getKeys(), keySpec.isFullKey());
} else {
blockRecordsIterator = dataBlock.getEngineRecordIterator(readerContext);
}
return Pair.of(blockRecordsIterator, dataBlock.getSchema());
} | 3.68 |
hudi_SecondaryIndexManager_refresh | /**
* Refresh the specific secondary index
*
* @param metaClient Hoodie table meta client
* @param indexName The target secondary index name
*/
public void refresh(HoodieTableMetaClient metaClient, String indexName) {
// TODO
} | 3.68 |
hadoop_DoubleValueSum_getReport | /**
* @return the string representation of the aggregated value
*/
public String getReport() {
return "" + sum;
} | 3.68 |
hadoop_S3ABlockManager_read | /**
* Reads into the given {@code buffer} {@code size} bytes from the underlying file
* starting at {@code startOffset}.
*
* @param buffer the buffer to read data in to.
* @param startOffset the offset at which reading starts.
* @param size the number bytes to read.
* @return number of bytes read.
*/
@Override
public int read(ByteBuffer buffer, long startOffset, int size)
throws IOException {
return reader.read(buffer, startOffset, size);
} | 3.68 |
dubbo_DynamicConfiguration_publishConfigCas | /**
* publish config mapped to this given key and given group with stat.
*
* @param key
* @param group
* @param content
* @param ticket
* @return
* @throws UnsupportedOperationException
*/
default boolean publishConfigCas(String key, String group, String content, Object ticket)
throws UnsupportedOperationException {
return false;
} | 3.68 |
hbase_CellChunkImmutableSegment_reinitializeCellSet | /*------------------------------------------------------------------------*/
// Create CellSet based on CellChunkMap from current ConcurrentSkipListMap based CellSet
// (without compacting iterator)
// This is a service for not-flat immutable segments
private void reinitializeCellSet(int numOfCells, KeyValueScanner segmentScanner,
CellSet oldCellSet, MemStoreSizing memstoreSizing, MemStoreCompactionStrategy.Action action) {
Cell curCell;
Chunk[] chunks = allocIndexChunks(numOfCells);
int currentChunkIdx = 0;
int offsetInCurentChunk = ChunkCreator.SIZEOF_CHUNK_HEADER;
int numUniqueKeys = 0;
Cell prev = null;
try {
while ((curCell = segmentScanner.next()) != null) {
assert (curCell instanceof ExtendedCell);
if (((ExtendedCell) curCell).getChunkId() == ExtendedCell.CELL_NOT_BASED_ON_CHUNK) {
// CellChunkMap assumes all cells are allocated on MSLAB.
// Therefore, cells which are not allocated on MSLAB initially,
// are copied into MSLAB here.
curCell = copyCellIntoMSLAB(curCell, memstoreSizing);
}
if (offsetInCurentChunk + ClassSize.CELL_CHUNK_MAP_ENTRY > chunks[currentChunkIdx].size) {
// continue to the next metadata chunk
currentChunkIdx++;
offsetInCurentChunk = ChunkCreator.SIZEOF_CHUNK_HEADER;
}
offsetInCurentChunk = createCellReference((ByteBufferKeyValue) curCell,
chunks[currentChunkIdx].getData(), offsetInCurentChunk);
if (action == MemStoreCompactionStrategy.Action.FLATTEN_COUNT_UNIQUE_KEYS) {
// counting number of unique keys
if (prev != null) {
if (!CellUtil.matchingRowColumn(prev, curCell)) {
numUniqueKeys++;
}
} else {
numUniqueKeys++;
}
}
prev = curCell;
}
if (action != MemStoreCompactionStrategy.Action.FLATTEN_COUNT_UNIQUE_KEYS) {
numUniqueKeys = CellSet.UNKNOWN_NUM_UNIQUES;
}
} catch (IOException ie) {
throw new IllegalStateException(ie);
} finally {
segmentScanner.close();
}
CellChunkMap ccm = new CellChunkMap(getComparator(), chunks, 0, numOfCells, false);
// update the CellSet of this Segment
this.setCellSet(oldCellSet, new CellSet(ccm, numUniqueKeys));
} | 3.68 |
hbase_Response_getStream | /**
* Gets the input stream instance.
* @return an instance of InputStream class.
*/
public InputStream getStream() {
return this.stream;
} | 3.68 |
framework_DateField_setDateOutOfRangeMessage | /**
* Sets the current error message if the range validation fails.
*
* @param dateOutOfRangeMessage
* - Localizable message which is shown when value (the date) is
* set outside allowed range
*/
public void setDateOutOfRangeMessage(String dateOutOfRangeMessage) {
this.dateOutOfRangeMessage = dateOutOfRangeMessage;
updateRangeValidator();
} | 3.68 |
hadoop_NodePlan_setNodeName | /**
* Sets Node Name.
*
* @param nodeName - Name
*/
public void setNodeName(String nodeName) {
this.nodeName = nodeName;
} | 3.68 |
AreaShop_GeneralRegion_getMaximum | /**
* Get the maximum number of the group that is the limiting factor, assuming actionAllowed() is false.
* @return The maximum
*/
public int getMaximum() {
return maximum;
} | 3.68 |
hbase_WALUtil_doFullMarkerAppendTransaction | /**
* A 'full' WAL transaction involves starting an mvcc transaction followed by an append, an
* optional sync, and then a call to complete the mvcc transaction. This method does it all. Good
* for case of adding a single edit or marker to the WAL.
* <p/>
* This write is for internal use only. Not for external client consumption.
* @return WALKeyImpl that was added to the WAL.
*/
private static WALKeyImpl doFullMarkerAppendTransaction(final WAL wal,
final NavigableMap<byte[], Integer> replicationScope, final RegionInfo hri, final WALEdit edit,
final MultiVersionConcurrencyControl mvcc, final Map<String, byte[]> extendedAttributes,
final boolean sync, final RegionReplicationSink sink) throws IOException {
// TODO: Pass in current time to use?
WALKeyImpl walKey = createWALKey(hri, mvcc, replicationScope, extendedAttributes);
long trx = MultiVersionConcurrencyControl.NONE;
try {
trx = wal.appendMarker(hri, walKey, edit);
WriteEntry writeEntry = walKey.getWriteEntry();
if (sink != null) {
writeEntry.attachCompletionAction(() -> sink.add(walKey, edit,
RpcServer.getCurrentServerCallWithCellScanner().orElse(null)));
}
if (sync) {
wal.sync(trx);
}
// Call complete only here because these are markers only. They are not for clients to read.
mvcc.complete(writeEntry);
} catch (IOException ioe) {
if (walKey.getWriteEntry() != null) {
mvcc.complete(walKey.getWriteEntry());
}
/**
* Here we do not abort the RegionServer for {@link WALSyncTimeoutIOException} as
* {@link HRegion#doWALAppend} does,because WAL Marker just records the internal state and
* seems it is no need to always abort the RegionServer when {@link WAL#sync} timeout,it is
* the internal state transition that determines whether RegionServer is aborted or not.
*/
throw ioe;
}
return walKey;
} | 3.68 |
hudi_HoodieBigQuerySyncClient_updateTableSchema | /**
* Updates the schema for the given table if the schema has changed. The schema passed in will not have the partition columns defined,
* so we add them back to the schema with the values read from the existing BigQuery table. This allows us to keep the partition
* field type in sync with how it is registered in BigQuery.
* @param tableName name of the table in BigQuery
* @param schema latest schema for the table
*/
public void updateTableSchema(String tableName, Schema schema, List<String> partitionFields) {
Table existingTable = bigquery.getTable(TableId.of(projectId, datasetName, tableName));
ExternalTableDefinition definition = existingTable.getDefinition();
Schema remoteTableSchema = definition.getSchema();
// Add the partition fields into the schema to avoid conflicts while updating
List<Field> updatedTableFields = remoteTableSchema.getFields().stream()
.filter(field -> partitionFields.contains(field.getName()))
.collect(Collectors.toList());
updatedTableFields.addAll(schema.getFields());
Schema finalSchema = Schema.of(updatedTableFields);
boolean sameSchema = definition.getSchema() != null && definition.getSchema().equals(finalSchema);
boolean samePartitionFilter = partitionFields.isEmpty()
|| (requirePartitionFilter == (definition.getHivePartitioningOptions().getRequirePartitionFilter() != null && definition.getHivePartitioningOptions().getRequirePartitionFilter()));
if (sameSchema && samePartitionFilter) {
return; // No need to update schema.
}
ExternalTableDefinition.Builder builder = definition.toBuilder();
builder.setSchema(finalSchema);
builder.setAutodetect(false);
if (definition.getHivePartitioningOptions() != null) {
builder.setHivePartitioningOptions(definition.getHivePartitioningOptions().toBuilder().setRequirePartitionFilter(requirePartitionFilter).build());
}
Table updatedTable = existingTable.toBuilder()
.setDefinition(builder.build())
.build();
bigquery.update(updatedTable);
} | 3.68 |
hudi_Type_equals | /**
* We need to override equals because the check {@code intType1 == intType2} can return {@code false}.
* Despite the fact that most subclasses look like singleton with static field {@code INSTANCE},
* they can still be created by deserializer.
*/
@Override
public boolean equals(Object o) {
if (this == o) {
return true;
} else if (!(o instanceof PrimitiveType)) {
return false;
}
PrimitiveType that = (PrimitiveType) o;
return typeId().equals(that.typeId());
} | 3.68 |
hadoop_HsController_job | /*
* (non-Javadoc)
* @see org.apache.hadoop.mapreduce.v2.app.webapp.AppController#job()
*/
@Override
public void job() {
super.job();
} | 3.68 |
hudi_HoodieAvroUtils_removeFields | /**
* Given an Avro record and list of columns to remove, this method removes the list of columns from
* the given avro record using rewriteRecord method.
* <p>
* To better understand how it removes please check {@link #rewriteRecord(GenericRecord, Schema)}
*/
public static GenericRecord removeFields(GenericRecord record, Set<String> fieldsToRemove) {
Schema newSchema = removeFields(record.getSchema(), fieldsToRemove);
return rewriteRecord(record, newSchema);
} | 3.68 |
pulsar_TxnBatchedPositionImpl_setAckSetByIndex | /**
* Build the attribute ackSet to that {@link #batchIndex} is false and others is true.
*/
public void setAckSetByIndex(){
if (batchSize == 1){
return;
}
BitSetRecyclable bitSetRecyclable = BitSetRecyclable.create();
bitSetRecyclable.set(0, batchSize, true);
bitSetRecyclable.clear(batchIndex);
long[] ackSet = bitSetRecyclable.toLongArray();
bitSetRecyclable.recycle();
setAckSet(ackSet);
} | 3.68 |
flink_HiveParserSubQueryUtils_checkAggOrWindowing | /*
* is this expr a UDAF invocation; does it imply windowing
* @return
* 0 if implies neither
* 1 if implies aggregation
* 2 if implies count
* 3 if implies windowing
*/
static int checkAggOrWindowing(HiveParserASTNode expressionTree) throws SemanticException {
int exprTokenType = expressionTree.getToken().getType();
if (exprTokenType == HiveASTParser.TOK_FUNCTION
|| exprTokenType == HiveASTParser.TOK_FUNCTIONDI
|| exprTokenType == HiveASTParser.TOK_FUNCTIONSTAR) {
assert (expressionTree.getChildCount() != 0);
if (expressionTree.getChild(expressionTree.getChildCount() - 1).getType()
== HiveASTParser.TOK_WINDOWSPEC) {
return 3;
}
if (expressionTree.getChild(0).getType() == HiveASTParser.Identifier) {
String functionName =
HiveParserBaseSemanticAnalyzer.unescapeIdentifier(
expressionTree.getChild(0).getText());
GenericUDAFResolver udafResolver =
FunctionRegistry.getGenericUDAFResolver(functionName);
if (udafResolver != null) {
// we need to know if it is COUNT since this is specialized for IN/NOT IN
// corr subqueries.
if (udafResolver instanceof GenericUDAFCount) {
return 2;
}
return 1;
}
}
}
int r = 0;
for (int i = 0; i < expressionTree.getChildCount(); i++) {
int c = checkAggOrWindowing((HiveParserASTNode) expressionTree.getChild(i));
r = Math.max(r, c);
}
return r;
} | 3.68 |
hbase_SnapshotDescriptionUtils_completeSnapshot | /**
* Commits the snapshot process by moving the working snapshot to the finalized filepath
* @param snapshotDir The file path of the completed snapshots
* @param workingDir The file path of the in progress snapshots
* @param fs The file system of the completed snapshots
* @param workingDirFs The file system of the in progress snapshots
* @param conf Configuration
* @throws SnapshotCreationException if the snapshot could not be moved
* @throws IOException the filesystem could not be reached
*/
public static void completeSnapshot(Path snapshotDir, Path workingDir, FileSystem fs,
FileSystem workingDirFs, final Configuration conf)
throws SnapshotCreationException, IOException {
LOG.debug(
"Sentinel is done, just moving the snapshot from " + workingDir + " to " + snapshotDir);
// If the working and completed snapshot directory are on the same file system, attempt
// to rename the working snapshot directory to the completed location. If that fails,
// or the file systems differ, attempt to copy the directory over, throwing an exception
// if this fails
URI workingURI = workingDirFs.getUri();
URI rootURI = fs.getUri();
if (
(shouldSkipRenameSnapshotDirectories(workingURI, rootURI)
|| !fs.rename(workingDir, snapshotDir))
&& !FileUtil.copy(workingDirFs, workingDir, fs, snapshotDir, true, true, conf)
) {
throw new SnapshotCreationException("Failed to copy working directory(" + workingDir
+ ") to completed directory(" + snapshotDir + ").");
}
} | 3.68 |
hbase_ZKWatcher_process | /**
* Method called from ZooKeeper for events and connection status.
* <p>
* Valid events are passed along to listeners. Connection status changes are dealt with locally.
*/
@Override
public void process(WatchedEvent event) {
LOG.debug(prefix("Received ZooKeeper Event, " + "type=" + event.getType() + ", " + "state="
+ event.getState() + ", " + "path=" + event.getPath()));
final String spanName = ZKWatcher.class.getSimpleName() + "-" + identifier;
if (!zkEventProcessor.isShutdown()) {
zkEventProcessor.execute(TraceUtil.tracedRunnable(() -> processEvent(event), spanName));
}
} | 3.68 |
hudi_SparkRDDReadClient_tagLocation | /**
* Looks up the index and tags each incoming record with a location of a file that contains the row (if it is actually
* present). Input RDD should contain no duplicates if needed.
*
* @param hoodieRecords Input RDD of Hoodie records
* @return Tagged RDD of Hoodie records
*/
public JavaRDD<HoodieRecord<T>> tagLocation(JavaRDD<HoodieRecord<T>> hoodieRecords) throws HoodieIndexException {
return HoodieJavaRDD.getJavaRDD(
index.tagLocation(HoodieJavaRDD.of(hoodieRecords), context, hoodieTable));
} | 3.68 |
framework_LegacyWindow_addListener | /**
* Adds a new {@link BrowserWindowResizeListener} to this UI. The listener
* will be notified whenever the browser window within which this UI resides
* is resized.
*
* @param resizeListener
* the listener to add
*
* @see BrowserWindowResizeListener#browserWindowResized(com.vaadin.server.Page.BrowserWindowResizeEvent)
* BrowserWindowResizeListener#browserWindowResized(BrowserWindowResizeEvent)
* @see #setResizeLazy(boolean)
*
* @deprecated As of 7.0, use the similarly named api in Page instead
*/
@Deprecated
public void addListener(BrowserWindowResizeListener resizeListener) {
getPage().addBrowserWindowResizeListener(resizeListener);
} | 3.68 |
querydsl_ArrayUtils_subarray | // copied and modified from commons-lang-2.3
// originally licensed under ASL 2.0
public static Object[] subarray(Object[] array, int startIndexInclusive, int endIndexExclusive) {
int newSize = endIndexExclusive - startIndexInclusive;
Class<?> type = array.getClass().getComponentType();
if (newSize <= 0) {
return (Object[]) Array.newInstance(type, 0);
}
Object[] subarray = (Object[]) Array.newInstance(type, newSize);
System.arraycopy(array, startIndexInclusive, subarray, 0, newSize);
return subarray;
} | 3.68 |
flink_NonBufferResponseDecoder_ensureBufferCapacity | /**
* Ensures the message header accumulation buffer has enough capacity for the current message.
*/
private void ensureBufferCapacity() {
if (messageBuffer.capacity() < messageLength) {
messageBuffer.capacity(messageLength);
}
} | 3.68 |
hbase_AccessControlUtil_toUserTablePermissions | /**
* Convert a ListMultimap<String, TablePermission> where key is username to a protobuf
* UserPermission
* @param perm the list of user and table permissions
* @return the protobuf UserTablePermissions
*/
public static AccessControlProtos.UsersAndPermissions
toUserTablePermissions(ListMultimap<String, UserPermission> perm) {
AccessControlProtos.UsersAndPermissions.Builder builder =
AccessControlProtos.UsersAndPermissions.newBuilder();
for (Map.Entry<String, Collection<UserPermission>> entry : perm.asMap().entrySet()) {
AccessControlProtos.UsersAndPermissions.UserPermissions.Builder userPermBuilder =
AccessControlProtos.UsersAndPermissions.UserPermissions.newBuilder();
userPermBuilder.setUser(ByteString.copyFromUtf8(entry.getKey()));
for (UserPermission userPerm : entry.getValue()) {
userPermBuilder.addPermissions(toPermission(userPerm.getPermission()));
}
builder.addUserPermissions(userPermBuilder.build());
}
return builder.build();
} | 3.68 |
hbase_RowResource_append | /**
* Validates the input request parameters, parses columns from CellSetModel, and invokes Append on
* HTable.
* @param model instance of CellSetModel
* @return Response 200 OK, 304 Not modified, 400 Bad request
*/
Response append(final CellSetModel model) {
Table table = null;
Append append = null;
try {
table = servlet.getTable(tableResource.getName());
if (model.getRows().size() != 1) {
servlet.getMetrics().incrementFailedAppendRequests(1);
return Response.status(Response.Status.BAD_REQUEST).type(MIMETYPE_TEXT)
.entity("Bad request: Number of rows specified is not 1." + CRLF).build();
}
RowModel rowModel = model.getRows().get(0);
byte[] key = rowModel.getKey();
if (key == null) {
key = rowspec.getRow();
}
if (key == null) {
servlet.getMetrics().incrementFailedAppendRequests(1);
return Response.status(Response.Status.BAD_REQUEST).type(MIMETYPE_TEXT)
.entity("Bad request: Row key found to be null." + CRLF).build();
}
append = new Append(key);
append.setReturnResults(returnResult);
int i = 0;
for (CellModel cell : rowModel.getCells()) {
byte[] col = cell.getColumn();
if (col == null) {
try {
col = rowspec.getColumns()[i++];
} catch (ArrayIndexOutOfBoundsException e) {
col = null;
}
}
if (col == null) {
servlet.getMetrics().incrementFailedAppendRequests(1);
return Response.status(Response.Status.BAD_REQUEST).type(MIMETYPE_TEXT)
.entity("Bad request: Column found to be null." + CRLF).build();
}
byte[][] parts = CellUtil.parseColumn(col);
if (parts.length != 2) {
servlet.getMetrics().incrementFailedAppendRequests(1);
return Response.status(Response.Status.BAD_REQUEST).type(MIMETYPE_TEXT)
.entity("Bad request: Column incorrectly specified." + CRLF).build();
}
append.addColumn(parts[0], parts[1], cell.getValue());
}
if (LOG.isDebugEnabled()) {
LOG.debug("APPEND " + append.toString());
}
Result result = table.append(append);
if (returnResult) {
if (result.isEmpty()) {
servlet.getMetrics().incrementFailedAppendRequests(1);
return Response.status(Response.Status.NOT_MODIFIED).type(MIMETYPE_TEXT)
.entity("Append return empty." + CRLF).build();
}
CellSetModel rModel = new CellSetModel();
RowModel rRowModel = new RowModel(result.getRow());
for (Cell cell : result.listCells()) {
rRowModel.addCell(new CellModel(CellUtil.cloneFamily(cell), CellUtil.cloneQualifier(cell),
cell.getTimestamp(), CellUtil.cloneValue(cell)));
}
rModel.addRow(rRowModel);
servlet.getMetrics().incrementSucessfulAppendRequests(1);
return Response.ok(rModel).build();
}
servlet.getMetrics().incrementSucessfulAppendRequests(1);
return Response.ok().build();
} catch (Exception e) {
servlet.getMetrics().incrementFailedAppendRequests(1);
return processException(e);
} finally {
if (table != null) {
try {
table.close();
} catch (IOException ioe) {
LOG.debug("Exception received while closing the table" + table.getName(), ioe);
}
}
}
} | 3.68 |
dubbo_IOUtils_getURL | /**
* use like spring code
*
* @param resourceLocation
* @return
*/
public static URL getURL(String resourceLocation) throws FileNotFoundException {
Assert.notNull(resourceLocation, "Resource location must not be null");
if (resourceLocation.startsWith(CommonConstants.CLASSPATH_URL_PREFIX)) {
String path = resourceLocation.substring(CommonConstants.CLASSPATH_URL_PREFIX.length());
ClassLoader cl = ClassUtils.getClassLoader();
URL url = (cl != null ? cl.getResource(path) : ClassLoader.getSystemResource(path));
if (url == null) {
String description = "class path resource [" + path + "]";
throw new FileNotFoundException(description + " cannot be resolved to URL because it does not exist");
}
return url;
}
try {
// try URL
return new URL(resourceLocation);
} catch (MalformedURLException ex) {
// no URL -> treat as file path
try {
return new File(resourceLocation).toURI().toURL();
} catch (MalformedURLException ex2) {
throw new FileNotFoundException(
"Resource location [" + resourceLocation + "] is neither a URL not a well-formed file path");
}
}
} | 3.68 |
pulsar_TxnLogBufferedWriter_internalAsyncAddData | /**
* Append data to queue, if reach {@link #batchedWriteMaxRecords} or {@link #batchedWriteMaxSize}, do flush. And if
* accept a request that {@param data} is too large (larger than {@link #batchedWriteMaxSize}), then two flushes
* are executed:
* 1. Write the data cached in the queue to BK.
* 2. Direct write the large data to BK, this flush event will not record to Metrics.
* This ensures the sequential nature of multiple writes to BK.
*/
private void internalAsyncAddData(T data, AddDataCallback callback, Object ctx){
// Avoid missing callback, do failed callback when error occur before add data to the array.
if (state == State.CLOSING || state == State.CLOSED){
callback.addFailed(BUFFERED_WRITER_CLOSED_EXCEPTION, ctx);
return;
}
int dataLength;
try {
dataLength = dataSerializer.getSerializedSize(data);
} catch (Exception e){
callback.addFailed(new ManagedLedgerInterceptException(e), ctx);
return;
}
if (dataLength >= batchedWriteMaxSize){
trigFlushByLargeSingleData();
ByteBuf byteBuf = null;
try {
byteBuf = dataSerializer.serialize(data);
} catch (Exception e){
callback.addFailed(new ManagedLedgerInterceptException(e), ctx);
return;
}
managedLedger.asyncAddEntry(byteBuf, DisabledBatchCallback.INSTANCE,
AsyncAddArgs.newInstance(callback, ctx, System.currentTimeMillis(), byteBuf));
return;
}
try {
// Why should try-catch here?
// If the recycle mechanism is not executed as expected, exception occurs.
flushContext.addCallback(callback, ctx);
} catch (Exception e){
callback.addFailed(new ManagedLedgerInterceptException(e), ctx);
return;
}
dataArray.add(data);
bytesSize += dataLength;
trigFlushIfReachMaxRecordsOrMaxSize();
} | 3.68 |
framework_ComboBox_buildFilter | /**
* Constructs a filter instance to use when using a Filterable container in
* the <code>ITEM_CAPTION_MODE_PROPERTY</code> mode.
*
* Note that the client side implementation expects the filter string to
* apply to the item caption string it sees, so changing the behavior of
* this method can cause problems.
*
* @param filterString
* @param filteringMode
* @return
*/
protected Filter buildFilter(String filterString,
FilteringMode filteringMode) {
Filter filter = null;
if (null != filterString && !"".equals(filterString)) {
switch (filteringMode) {
case OFF:
break;
case STARTSWITH:
filter = new SimpleStringFilter(getItemCaptionPropertyId(),
filterString, true, true);
break;
case CONTAINS:
filter = new SimpleStringFilter(getItemCaptionPropertyId(),
filterString, true, false);
break;
}
}
return filter;
} | 3.68 |
flink_DefaultRollingPolicy_build | /** Creates the actual policy. */
public <IN, BucketID> DefaultRollingPolicy<IN, BucketID> build() {
return new DefaultRollingPolicy<>(partSize, rolloverInterval, inactivityInterval);
} | 3.68 |
hibernate-validator_ConstraintValidatorContextImpl_dropLeafNodeIfRequired | /**
* In case nodes are added from within a class-level constraint, the node representing
* the constraint element will be dropped. inIterable(), getKey() etc.
*/
private void dropLeafNodeIfRequired() {
if ( propertyPath.getLeafNode().getKind() == ElementKind.BEAN ) {
propertyPath = PathImpl.createCopyWithoutLeafNode( propertyPath );
}
} | 3.68 |
framework_VNativeButton_onClick | /*
* (non-Javadoc)
*
* @see
* com.google.gwt.event.dom.client.ClickHandler#onClick(com.google.gwt.event
* .dom.client.ClickEvent)
*/
@Override
public void onClick(ClickEvent event) {
if (paintableId == null || client == null || cancelNextClick) {
cancelNextClick = false;
return;
}
if (BrowserInfo.get().isWebkit()) {
// Webkit does not focus non-text input elements on click
// (#11854)
setFocus(true);
}
} | 3.68 |
morf_ResultSetMetadataSorter_sortedCopy | /**
* Creates a copy of an {@link Iterable} of columns, where the copy is sorted such that the
* columns appear in the same order as they do in the supplied {@link ResultSet}.
*
* @param columns Columns expected.
* @param resultSet The result set containing the values.
* @return The sorted columns.
*/
public static Collection<Column> sortedCopy(Iterable<Column> columns, ResultSet resultSet) {
Column[] result = new Column[Iterables.size(columns)];
for (Column column : columns) {
try {
result[resultSet.findColumn(column.getName()) - 1] = column;
} catch(SQLException ex) {
throw new IllegalStateException("Could not retrieve column [" + column.getName() + "]", ex);
}
}
return Collections.unmodifiableCollection(Arrays.asList(result));
} | 3.68 |
morf_AbstractSqlDialectTest_expectedGreatest | /**
* @return The expected SQL statement when performing the ANSI GREATEST call
*/
protected String expectedGreatest() {
return "SELECT GREATEST(NULL, bob) FROM " + tableName("MyTable");
} | 3.68 |
hadoop_FileIoProvider_moveFile | /**
* Move the src file to the target using
* {@link FileUtils#moveFile(File, File)}.
*
* @param volume target volume. null if unavailable.
* @param src source path.
* @param target target path.
* @throws IOException
*/
public void moveFile(
@Nullable FsVolumeSpi volume, File src, File target)
throws IOException {
final long begin = profilingEventHook.beforeMetadataOp(volume, MOVE);
try {
faultInjectorEventHook.beforeMetadataOp(volume, MOVE);
FileUtils.moveFile(src, target);
profilingEventHook.afterMetadataOp(volume, MOVE, begin);
} catch(Exception e) {
onFailure(volume, begin);
throw e;
}
} | 3.68 |
hbase_MetaTableAccessor_getTableRegionsAndLocations | /**
* Do not use this method to get meta table regions, use methods in MetaTableLocator instead.
* @param connection connection we're using
* @param tableName table to work with, can be null for getting all regions
* @param excludeOfflinedSplitParents don't return split parents
* @return Return list of regioninfos and server addresses.
*/
// What happens here when 1M regions in hbase:meta? This won't scale?
public static List<Pair<RegionInfo, ServerName>> getTableRegionsAndLocations(
Connection connection, @Nullable final TableName tableName,
final boolean excludeOfflinedSplitParents) throws IOException {
if (tableName != null && tableName.equals(TableName.META_TABLE_NAME)) {
throw new IOException(
"This method can't be used to locate meta regions;" + " use MetaTableLocator instead");
}
// Make a version of CollectingVisitor that collects RegionInfo and ServerAddress
ClientMetaTableAccessor.CollectRegionLocationsVisitor visitor =
new ClientMetaTableAccessor.CollectRegionLocationsVisitor(excludeOfflinedSplitParents);
scanMeta(connection,
ClientMetaTableAccessor.getTableStartRowForMeta(tableName, QueryType.REGION),
ClientMetaTableAccessor.getTableStopRowForMeta(tableName, QueryType.REGION), QueryType.REGION,
visitor);
return visitor.getResults();
} | 3.68 |
dubbo_TripleServerStream_supportContentType | /**
* must starts from application/grpc
*/
private boolean supportContentType(String contentType) {
if (contentType == null) {
return false;
}
return contentType.startsWith(TripleConstant.APPLICATION_GRPC);
} | 3.68 |
flink_DataTypeTemplate_mergeWithInnerAnnotation | /**
* Merges this template with an inner annotation. The inner annotation has highest precedence
* and definitely determines the explicit data type (if available).
*/
DataTypeTemplate mergeWithInnerAnnotation(DataTypeFactory typeFactory, DataTypeHint hint) {
final DataTypeTemplate otherTemplate = fromAnnotation(typeFactory, hint);
return new DataTypeTemplate(
otherTemplate.dataType,
rightValueIfNotNull(rawSerializer, otherTemplate.rawSerializer),
rightValueIfNotNull(inputGroup, otherTemplate.inputGroup),
rightValueIfNotNull(version, otherTemplate.version),
rightValueIfNotNull(allowRawGlobally, otherTemplate.allowRawGlobally),
rightValueIfNotNull(allowRawPattern, otherTemplate.allowRawPattern),
rightValueIfNotNull(forceRawPattern, otherTemplate.forceRawPattern),
rightValueIfNotNull(defaultDecimalPrecision, otherTemplate.defaultDecimalPrecision),
rightValueIfNotNull(defaultDecimalScale, otherTemplate.defaultDecimalScale),
rightValueIfNotNull(defaultYearPrecision, otherTemplate.defaultYearPrecision),
rightValueIfNotNull(defaultSecondPrecision, otherTemplate.defaultSecondPrecision));
} | 3.68 |
framework_AbstractColorPicker_setHSVVisibility | /**
* Set the visibility of the HSV Tab.
*
* @param visible
* The visibility
*/
public void setHSVVisibility(boolean visible) {
if (!visible && !rgbVisible && !swatchesVisible) {
throw new IllegalArgumentException("Cannot hide all tabs.");
}
hsvVisible = visible;
if (window != null) {
window.setHSVTabVisible(visible);
}
} | 3.68 |
framework_VCalendar_isDisabledOrReadOnly | /**
* Is the calendar either disabled or readonly.
*
* @return
*/
public boolean isDisabledOrReadOnly() {
return disabled || readOnly;
} | 3.68 |
flink_JobResult_isSuccess | /** Returns {@code true} if the job finished successfully. */
public boolean isSuccess() {
return applicationStatus == ApplicationStatus.SUCCEEDED
|| (applicationStatus == ApplicationStatus.UNKNOWN && serializedThrowable == null);
} | 3.68 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.