name
stringlengths 12
178
| code_snippet
stringlengths 8
36.5k
| score
float64 3.26
3.68
|
---|---|---|
hbase_HttpServer_hasAdministratorAccess | /**
* Does the user sending the HttpServletRequest has the administrator ACLs? If it isn't the case,
* response will be modified to send an error to the user.
* @param servletContext the {@link ServletContext} to use
* @param request the {@link HttpServletRequest} to check
* @param response used to send the error response if user does not have admin access.
* @return true if admin-authorized, false otherwise
* @throws IOException if an unauthenticated or unauthorized user tries to access the page
*/
public static boolean hasAdministratorAccess(ServletContext servletContext,
HttpServletRequest request, HttpServletResponse response) throws IOException {
Configuration conf = (Configuration) servletContext.getAttribute(CONF_CONTEXT_ATTRIBUTE);
AccessControlList acl = (AccessControlList) servletContext.getAttribute(ADMINS_ACL);
return hasAdministratorAccess(conf, acl, request, response);
} | 3.68 |
shardingsphere-elasticjob_ConfigurationService_checkMaxTimeDiffSecondsTolerable | /**
* Check max time different seconds tolerable between job server and registry center.
*
* @throws JobExecutionEnvironmentException throe JobExecutionEnvironmentException if exceed max time different seconds
*/
public void checkMaxTimeDiffSecondsTolerable() throws JobExecutionEnvironmentException {
int maxTimeDiffSeconds = load(true).getMaxTimeDiffSeconds();
if (0 > maxTimeDiffSeconds) {
return;
}
long timeDiff = Math.abs(timeService.getCurrentMillis() - jobNodeStorage.getRegistryCenterTime());
if (timeDiff > maxTimeDiffSeconds * 1000L) {
throw new JobExecutionEnvironmentException(
"Time different between job server and register center exceed '%s' seconds, max time different is '%s' seconds.", timeDiff / 1000, maxTimeDiffSeconds);
}
} | 3.68 |
framework_RegexpValidator_getMatcher | /**
* Returns a new or reused matcher for the pattern.
*
* @param value
* the string to find matches in
* @return a matcher for the string
*/
private Matcher getMatcher(String value) {
if (matcher == null) {
matcher = pattern.matcher(value);
} else {
matcher.reset(value);
}
return matcher;
} | 3.68 |
framework_Panel_getScrollTop | /*
* (non-Javadoc)
*
* @see com.vaadin.server.Scrollable#setScrollable(boolean)
*/
@Override
public int getScrollTop() {
return getState(false).scrollTop;
} | 3.68 |
hudi_HoodieBackedTableMetadataWriter_validateTimelineBeforeSchedulingCompaction | /**
* Validates the timeline for both main and metadata tables to ensure compaction on MDT can be scheduled.
*/
protected boolean validateTimelineBeforeSchedulingCompaction(Option<String> inFlightInstantTimestamp, String latestDeltaCommitTimeInMetadataTable) {
// we need to find if there are any inflights in data table timeline before or equal to the latest delta commit in metadata table.
// Whenever you want to change this logic, please ensure all below scenarios are considered.
// a. There could be a chance that latest delta commit in MDT is committed in MDT, but failed in DT. And so findInstantsBeforeOrEquals() should be employed
// b. There could be DT inflights after latest delta commit in MDT and we are ok with it. bcoz, the contract is, the latest compaction instant time in MDT represents
// any instants before that is already synced with metadata table.
// c. Do consider out of order commits. For eg, c4 from DT could complete before c3. and we can't trigger compaction in MDT with c4 as base instant time, until every
// instant before c4 is synced with metadata table.
List<HoodieInstant> pendingInstants = dataMetaClient.reloadActiveTimeline().filterInflightsAndRequested()
.findInstantsBeforeOrEquals(latestDeltaCommitTimeInMetadataTable).getInstants();
if (!pendingInstants.isEmpty()) {
checkNumDeltaCommits(metadataMetaClient, dataWriteConfig.getMetadataConfig().getMaxNumDeltacommitsWhenPending());
LOG.info(String.format(
"Cannot compact metadata table as there are %d inflight instants in data table before latest deltacommit in metadata table: %s. Inflight instants in data table: %s",
pendingInstants.size(), latestDeltaCommitTimeInMetadataTable, Arrays.toString(pendingInstants.toArray())));
return false;
}
// Check if there are any pending compaction or log compaction instants in the timeline.
// If pending compact/logCompaction operations are found abort scheduling new compaction/logCompaction operations.
Option<HoodieInstant> pendingLogCompactionInstant =
metadataMetaClient.getActiveTimeline().filterPendingLogCompactionTimeline().firstInstant();
Option<HoodieInstant> pendingCompactionInstant =
metadataMetaClient.getActiveTimeline().filterPendingCompactionTimeline().firstInstant();
if (pendingLogCompactionInstant.isPresent() || pendingCompactionInstant.isPresent()) {
LOG.warn(String.format("Not scheduling compaction or logCompaction, since a pending compaction instant %s or logCompaction %s instant is present",
pendingCompactionInstant, pendingLogCompactionInstant));
return false;
}
return true;
} | 3.68 |
open-banking-gateway_EncryptionKeySerde_fromString | /**
* Convert string to symmetric key with initialization vector.
* @param fromString String to buld key from
* @return Deserialized key
*/
@SneakyThrows
public SecretKeyWithIv fromString(String fromString) {
SecretKeyWithIvContainer container = mapper.readValue(fromString, SecretKeyWithIvContainer.class);
return new SecretKeyWithIv(
container.getIv(),
new SecretKeySpec(container.getEncoded(), container.getAlgo())
);
} | 3.68 |
hudi_UtilHelpers_parseSchema | /**
* Parse Schema from file.
*
* @param fs File System
* @param schemaFile Schema File
*/
public static String parseSchema(FileSystem fs, String schemaFile) throws Exception {
// Read schema file.
Path p = new Path(schemaFile);
if (!fs.exists(p)) {
throw new Exception(String.format("Could not find - %s - schema file.", schemaFile));
}
long len = fs.getFileStatus(p).getLen();
ByteBuffer buf = ByteBuffer.allocate((int) len);
try (FSDataInputStream inputStream = fs.open(p)) {
inputStream.readFully(0, buf.array(), 0, buf.array().length);
}
return new String(buf.array());
} | 3.68 |
pulsar_ProducerConfiguration_getMessageRoutingMode | /**
* Get the message routing mode for the partitioned producer.
*
* @return message routing mode, default is round-robin routing.
* @see MessageRoutingMode#RoundRobinPartition
*/
public MessageRoutingMode getMessageRoutingMode() {
return MessageRoutingMode.valueOf(conf.getMessageRoutingMode().toString());
} | 3.68 |
flink_AsyncDataStream_orderedWaitWithRetry | /**
* Adds an AsyncWaitOperator with an AsyncRetryStrategy to support retry of AsyncFunction. The
* order to process input records is guaranteed to be the same as * input ones.
*
* @param in Input {@link DataStream}
* @param func {@link AsyncFunction}
* @param timeout from first invoke to final completion of asynchronous operation, may include
* multiple retries, and will be reset in case of restart
* @param timeUnit of the given timeout
* @param capacity The max number of async i/o operation that can be triggered
* @param asyncRetryStrategy The strategy of reattempt async i/o operation that can be triggered
* @param <IN> Type of input record
* @param <OUT> Type of output record
* @return A new {@link SingleOutputStreamOperator}.
*/
public static <IN, OUT> SingleOutputStreamOperator<OUT> orderedWaitWithRetry(
DataStream<IN> in,
AsyncFunction<IN, OUT> func,
long timeout,
TimeUnit timeUnit,
int capacity,
AsyncRetryStrategy<OUT> asyncRetryStrategy) {
return addOperator(
in,
func,
timeUnit.toMillis(timeout),
capacity,
OutputMode.ORDERED,
asyncRetryStrategy);
} | 3.68 |
flink_CheckpointRequestDecider_chooseQueuedRequestToExecute | /**
* Choose one of the queued requests to execute, if any.
*
* @return request that should be executed
*/
Optional<CheckpointTriggerRequest> chooseQueuedRequestToExecute(
boolean isTriggering, long lastCompletionMs) {
Optional<CheckpointTriggerRequest> request =
chooseRequestToExecute(isTriggering, lastCompletionMs);
request.ifPresent(CheckpointRequestDecider::logInQueueTime);
return request;
} | 3.68 |
flink_TableSink_getOutputType | /**
* @deprecated This method will be removed in future versions as it uses the old type system. It
* is recommended to use {@link #getConsumedDataType()} instead which uses the new type
* system based on {@link DataTypes}. Please make sure to use either the old or the new type
* system consistently to avoid unintended behavior. See the website documentation for more
* information.
*/
@Deprecated
default TypeInformation<T> getOutputType() {
return null;
} | 3.68 |
hadoop_TypedBytesOutput_writeVector | /**
* Writes a vector as a typed bytes sequence.
*
* @param vector the vector to be written
* @throws IOException
*/
public void writeVector(ArrayList vector) throws IOException {
writeVectorHeader(vector.size());
for (Object obj : vector) {
write(obj);
}
} | 3.68 |
framework_WrappedPortletSession_getAttribute | /**
* Returns the object bound with the specified name in this session, or
* <code>null</code> if no object is bound under the name in the given
* scope.
*
* @param name
* a string specifying the name of the object
* @param scope
* session scope of this attribute
*
* @return the object with the specified name
*
* @exception java.lang.IllegalStateException
* if this method is called on an invalidated session, or the
* scope is unknown to the container.
* @exception java.lang.IllegalArgumentException
* if name is <code>null</code>.
*
* @see PortletSession#getAttribute(String, int)
* @see PortletSession#PORTLET_SCOPE
* @see PortletSession#APPLICATION_SCOPE
*
* @since 7.6
*/
public Object getAttribute(String name, int scope) {
return session.getAttribute(name, scope);
} | 3.68 |
framework_NestedMethodProperty_setInstance | /**
* Sets the instance used by this property.
* <p>
* The new instance must be of the same type as the old instance
* <p>
* To be consistent with {@link #setValue(Object)}, this method will fire a
* value change event even if the value stays the same
*
* @param instance
* the instance to use
* @since 7.7.7
*/
public void setInstance(Object instance) {
if (this.instance.getClass() != instance.getClass()) {
throw new IllegalArgumentException("The new instance is of type "
+ instance.getClass().getName()
+ " which does not match the old instance type "
+ this.instance.getClass().getName());
}
this.instance = instance;
fireValueChange();
} | 3.68 |
hbase_AccessController_hasFamilyQualifierPermission | /**
* Returns <code>true</code> if the current user is allowed the given action over at least one of
* the column qualifiers in the given column families.
*/
private boolean hasFamilyQualifierPermission(User user, Action perm,
RegionCoprocessorEnvironment env, Map<byte[], ? extends Collection<byte[]>> familyMap)
throws IOException {
RegionInfo hri = env.getRegion().getRegionInfo();
TableName tableName = hri.getTable();
if (user == null) {
return false;
}
if (familyMap != null && familyMap.size() > 0) {
// at least one family must be allowed
for (Map.Entry<byte[], ? extends Collection<byte[]>> family : familyMap.entrySet()) {
if (family.getValue() != null && !family.getValue().isEmpty()) {
for (byte[] qualifier : family.getValue()) {
if (
getAuthManager().authorizeUserTable(user, tableName, family.getKey(), qualifier, perm)
) {
return true;
}
}
} else {
if (getAuthManager().authorizeUserFamily(user, tableName, family.getKey(), perm)) {
return true;
}
}
}
} else if (LOG.isDebugEnabled()) {
LOG.debug("Empty family map passed for permission check");
}
return false;
} | 3.68 |
flink_TableChange_getConstraintName | /** Returns the constraint name. */
public String getConstraintName() {
return constraintName;
} | 3.68 |
hbase_RegionCoprocessorHost_preMemStoreCompaction | /**
* Invoked before in memory compaction.
*/
public void preMemStoreCompaction(HStore store) throws IOException {
execOperation(coprocEnvironments.isEmpty() ? null : new RegionObserverOperationWithoutResult() {
@Override
public void call(RegionObserver observer) throws IOException {
observer.preMemStoreCompaction(this, store);
}
});
} | 3.68 |
pulsar_NonPersistentSubscriptionStatsImpl_add | // if the stats are added for the 1st time, we will need to make a copy of these stats and add it to the current
// stats
public NonPersistentSubscriptionStatsImpl add(NonPersistentSubscriptionStatsImpl stats) {
Objects.requireNonNull(stats);
super.add(stats);
this.msgDropRate += stats.msgDropRate;
return this;
} | 3.68 |
hadoop_XException_format | /**
* Creates a message using a error message template and arguments.
* <p>
* The template must be in JDK <code>MessageFormat</code> syntax
* (using {#} positional parameters).
*
* @param error error code, to get the template from.
* @param args arguments to use for creating the message.
*
* @return the resolved error message.
*/
private static String format(ERROR error, Object... args) {
String template = error.getTemplate();
if (template == null) {
StringBuilder sb = new StringBuilder();
for (int i = 0; i < args.length; i++) {
sb.append(" {").append(i).append("}");
}
template = sb.deleteCharAt(0).toString();
}
return error + ": " + MessageFormat.format(template, args);
} | 3.68 |
dubbo_RpcStatus_beginCount | /**
* @param url
*/
public static boolean beginCount(URL url, String methodName, int max) {
max = (max <= 0) ? Integer.MAX_VALUE : max;
RpcStatus appStatus = getStatus(url);
RpcStatus methodStatus = getStatus(url, methodName);
if (methodStatus.active.get() == Integer.MAX_VALUE) {
return false;
}
for (int i; ; ) {
i = methodStatus.active.get();
if (i == Integer.MAX_VALUE || i + 1 > max) {
return false;
}
if (methodStatus.active.compareAndSet(i, i + 1)) {
break;
}
}
appStatus.active.incrementAndGet();
return true;
} | 3.68 |
flink_DateTimeUtils_parseTimestampMillis | /**
* Parse date time string to timestamp based on the given time zone and format. Returns null if
* parsing failed.
*
* @param dateStr the date time string
* @param format date time string format
* @param tz the time zone
*/
private static long parseTimestampMillis(String dateStr, String format, TimeZone tz)
throws ParseException {
SimpleDateFormat formatter = FORMATTER_CACHE.get(format);
formatter.setTimeZone(tz);
return formatter.parse(dateStr).getTime();
} | 3.68 |
hbase_ColumnRangeFilter_getMinColumn | /** Returns the min column range for the filter */
public byte[] getMinColumn() {
return this.minColumn;
} | 3.68 |
flink_FailureHandlingResultSnapshot_getFailureLabels | /**
* Returns the labels future associated with the failure.
*
* @return the CompletableFuture map of String labels
*/
public CompletableFuture<Map<String, String>> getFailureLabels() {
return failureLabels;
} | 3.68 |
hbase_MobUtils_isMobRegionInfo | /**
* Gets whether the current RegionInfo is a mob one.
* @param regionInfo The current RegionInfo.
* @return If true, the current RegionInfo is a mob one.
*/
public static boolean isMobRegionInfo(RegionInfo regionInfo) {
return regionInfo == null
? false
: getMobRegionInfo(regionInfo.getTable()).getEncodedName()
.equals(regionInfo.getEncodedName());
} | 3.68 |
framework_JsonCodec_jsonEquals | /**
* Compares two json values for deep equality.
*
* This is a helper for overcoming the fact that
* {@link JsonValue#equals(Object)} only does an identity check and
* {@link JsonValue#jsEquals(JsonValue)} is defined to use JavaScript
* semantics where arrays and objects are equals only based on identity.
*
* @since 7.4
* @param a
* the first json value to check, may not be null
* @param b
* the second json value to check, may not be null
* @return <code>true</code> if both json values are the same;
* <code>false</code> otherwise
*/
public static boolean jsonEquals(JsonValue a, JsonValue b) {
assert a != null;
assert b != null;
if (a == b) {
return true;
}
JsonType type = a.getType();
if (type != b.getType()) {
return false;
}
switch (type) {
case NULL:
return true;
case BOOLEAN:
return a.asBoolean() == b.asBoolean();
case NUMBER:
return a.asNumber() == b.asNumber();
case STRING:
return a.asString().equals(b.asString());
case OBJECT:
return jsonObjectEquals((JsonObject) a, (JsonObject) b);
case ARRAY:
return jsonArrayEquals((JsonArray) a, (JsonArray) b);
default:
throw new RuntimeException("Unsupported JsonType: " + type);
}
} | 3.68 |
hbase_HFileArchiver_archiveRegions | /**
* Archive the specified regions in parallel.
* @param conf the configuration to use
* @param fs {@link FileSystem} from which to remove the region
* @param rootDir {@link Path} to the root directory where hbase files are stored (for
* building the archive path)
* @param tableDir {@link Path} to where the table is being stored (for building the archive
* path)
* @param regionDirList {@link Path} to where regions are being stored (for building the archive
* path)
* @throws IOException if the request cannot be completed
*/
public static void archiveRegions(Configuration conf, FileSystem fs, Path rootDir, Path tableDir,
List<Path> regionDirList) throws IOException {
List<Future<Void>> futures = new ArrayList<>(regionDirList.size());
for (Path regionDir : regionDirList) {
Future<Void> future = getArchiveExecutor(conf).submit(() -> {
archiveRegion(fs, rootDir, tableDir, regionDir);
return null;
});
futures.add(future);
}
try {
for (Future<Void> future : futures) {
future.get();
}
} catch (InterruptedException e) {
throw new InterruptedIOException(e.getMessage());
} catch (ExecutionException e) {
throw new IOException(e.getCause());
}
} | 3.68 |
flink_HiveParserASTNode_getChildren | /*
* (non-Javadoc)
*
* @see org.apache.hadoop.hive.ql.lib.Node#getChildren()
*/
@Override
public ArrayList<Node> getChildren() {
if (super.getChildCount() == 0) {
return null;
}
ArrayList<Node> retVec = new ArrayList<>();
for (int i = 0; i < super.getChildCount(); ++i) {
retVec.add((Node) super.getChild(i));
}
return retVec;
} | 3.68 |
hadoop_TypedBytesInput_readType | /**
* Reads a type byte and returns the corresponding {@link Type}.
* @return the obtained Type or null when the end of the file is reached
* @throws IOException
*/
public Type readType() throws IOException {
int code = -1;
try {
code = in.readUnsignedByte();
} catch (EOFException eof) {
return null;
}
for (Type type : Type.values()) {
if (type.code == code) {
return type;
}
}
return null;
} | 3.68 |
flink_ResourceCounter_getTotalResourceCount | /**
* Computes the total number of resources in this counter.
*
* @return the total number of resources in this counter
*/
public int getTotalResourceCount() {
return resources.isEmpty() ? 0 : resources.values().stream().reduce(0, Integer::sum);
} | 3.68 |
hbase_RotateFile_read | /**
* Reads the content of the rotate file by selecting the winner file based on the timestamp of the
* data inside the files. It reads the content of both files and selects the one with the latest
* timestamp as the winner. If a file is incomplete or does not exist, it logs the error and moves
* on to the next file. It returns the content of the winner file as a byte array. If none of the
* files have valid data, it returns null.
* @return a byte array containing the data from the winner file, or null if no valid data is
* found.
* @throws IOException if an error occurs while reading the files.
*/
public byte[] read() throws IOException {
HBaseProtos.RotateFileData[] datas = new HBaseProtos.RotateFileData[2];
for (int i = 0; i < 2; i++) {
try {
datas[i] = read(files[i]);
} catch (FileNotFoundException e) {
LOG.debug("file {} does not exist", files[i], e);
} catch (EOFException e) {
LOG.debug("file {} is incomplete", files[i], e);
}
}
int winnerIndex = select(datas);
nextFile = 1 - winnerIndex;
if (datas[winnerIndex] != null) {
prevTimestamp = datas[winnerIndex].getTimestamp();
return datas[winnerIndex].getData().toByteArray();
} else {
return null;
}
} | 3.68 |
morf_DataValueLookupBuilderImpl_hasSameMetadata | /**
* Validation method, for testing only.
*
* @param other The other.
* @return true if equivalent metadata.
*/
@VisibleForTesting
boolean hasSameMetadata(DataValueLookupBuilderImpl other) {
return other.metadata.equals(this.metadata);
} | 3.68 |
hadoop_UnmanagedApplicationManager_shutDownConnections | /**
* Shutdown this UAM client, without killing the UAM in the YarnRM side.
*/
public void shutDownConnections() {
this.heartbeatHandler.shutdown();
this.rmProxyRelayer.shutdown();
} | 3.68 |
hbase_LockManager_requestRegionsLock | /**
* @throws IllegalArgumentException if all regions are not from same table.
*/
public long requestRegionsLock(final RegionInfo[] regionInfos, final String description,
final NonceKey nonceKey) throws IllegalArgumentException, IOException {
master.getMasterCoprocessorHost().preRequestLock(null, null, regionInfos, LockType.EXCLUSIVE,
description);
final LockProcedure proc = new LockProcedure(master.getConfiguration(), regionInfos,
LockType.EXCLUSIVE, description, null);
submitProcedure(proc, nonceKey);
master.getMasterCoprocessorHost().postRequestLock(null, null, regionInfos, LockType.EXCLUSIVE,
description);
return proc.getProcId();
} | 3.68 |
framework_TableSortingIndicator_getTicketNumber | /*
* (non-Javadoc)
*
* @see com.vaadin.tests.components.AbstractTestUI#getTicketNumber()
*/
@Override
protected Integer getTicketNumber() {
return 8978;
} | 3.68 |
morf_ViewChangesDeploymentHelper_create | /**
* Creates a {@link ViewChangesDeploymentHelper} implementation for the given connection details.
* @param connectionResources connection resources for the data source.
* @return ViewChangesDeploymentHelper.
*/
public ViewChangesDeploymentHelper create(ConnectionResources connectionResources) {
return new ViewChangesDeploymentHelper(connectionResources.sqlDialect(),
createViewListenerFactory.createCreateViewListener(connectionResources),
dropViewListenerFactory.createDropViewListener(connectionResources));
} | 3.68 |
druid_MySqlStatementParser_parseLoop | /**
* parse loop statement with label
*/
public SQLLoopStatement parseLoop(String label) {
SQLLoopStatement loopStmt = new SQLLoopStatement();
loopStmt.setLabelName(label);
accept(Token.LOOP);
this.parseStatementList(loopStmt.getStatements(), -1, loopStmt);
accept(Token.END);
accept(Token.LOOP);
if (lexer.token() != Token.SEMI) {
acceptIdentifier(label);
}
accept(Token.SEMI);
loopStmt.setAfterSemi(true);
return loopStmt;
} | 3.68 |
flink_ObjectIdentifier_ofAnonymous | /**
* This method allows to create an {@link ObjectIdentifier} without catalog and database name,
* in order to propagate anonymous objects with unique identifiers throughout the stack.
*
* <p>This method for no reason should be exposed to users, as this should be used only when
* creating anonymous tables with uniquely generated identifiers.
*/
static ObjectIdentifier ofAnonymous(String objectName) {
return new ObjectIdentifier(
null,
null,
Preconditions.checkNotNull(objectName, "Object name must not be null."));
} | 3.68 |
hadoop_SafeMode_setSafeMode | /**
* Enter, leave, or get safe mode.
*
* @param action One of {@link SafeModeAction} LEAVE, ENTER, GET, FORCE_EXIT.
* @throws IOException if set safe mode fails to proceed.
* @return true if the action is successfully accepted, otherwise false means rejected.
*/
default boolean setSafeMode(SafeModeAction action) throws IOException {
return setSafeMode(action, false);
} | 3.68 |
hbase_QuotaFilter_getUserFilter | /** Returns the User filter regex */
public String getUserFilter() {
return userRegex;
} | 3.68 |
morf_FilteredDataSetProducerAdapter_getSchema | /**
* Produce a {@link Schema} that represents the filtered view.
* @see org.alfasoftware.morf.dataset.DataSetProducer#getSchema()
*/
@Override
public Schema getSchema() {
return new SchemaAdapter(delegate.getSchema()) {
@Override
public Table getTable(String name) {
if (includeTable(name)) {
return delegate.getTable(name);
}
throw new IllegalStateException("["+name+"] has been excluded or does not exist");
}
@Override
public boolean tableExists(String name) {
if (includeTable(name)) {
return delegate.tableExists(name);
}
throw new IllegalStateException("["+name+"] has been exlcuded or does not exist");
}
/**
* If multiple calls to this are expected, consider caching the list of table names.
* @see org.alfasoftware.morf.dataset.SchemaAdapter#tableNames()
*/
@Override
public Collection<String> tableNames() {
return Collections2.filter(delegate.tableNames(), includingPredicate);
}
/**
* If multiple calls to this are expected, consider caching the list of tables.
* @see org.alfasoftware.morf.dataset.SchemaAdapter#tableNames()
*/
@Override
public Collection<Table> tables() {
return Collections2.filter(delegate.tables(), t -> includingPredicate.apply(t.getName()));
}
};
} | 3.68 |
flink_MultiShotLatch_await | /** Waits until {@link #trigger()} is called. */
public void await() throws InterruptedException {
synchronized (lock) {
while (!triggered) {
lock.wait();
}
triggered = false;
}
} | 3.68 |
flink_Tuple24_toString | /**
* Creates a string representation of the tuple in the form (f0, f1, f2, f3, f4, f5, f6, f7, f8,
* f9, f10, f11, f12, f13, f14, f15, f16, f17, f18, f19, f20, f21, f22, f23), where the
* individual fields are the value returned by calling {@link Object#toString} on that field.
*
* @return The string representation of the tuple.
*/
@Override
public String toString() {
return "("
+ StringUtils.arrayAwareToString(this.f0)
+ ","
+ StringUtils.arrayAwareToString(this.f1)
+ ","
+ StringUtils.arrayAwareToString(this.f2)
+ ","
+ StringUtils.arrayAwareToString(this.f3)
+ ","
+ StringUtils.arrayAwareToString(this.f4)
+ ","
+ StringUtils.arrayAwareToString(this.f5)
+ ","
+ StringUtils.arrayAwareToString(this.f6)
+ ","
+ StringUtils.arrayAwareToString(this.f7)
+ ","
+ StringUtils.arrayAwareToString(this.f8)
+ ","
+ StringUtils.arrayAwareToString(this.f9)
+ ","
+ StringUtils.arrayAwareToString(this.f10)
+ ","
+ StringUtils.arrayAwareToString(this.f11)
+ ","
+ StringUtils.arrayAwareToString(this.f12)
+ ","
+ StringUtils.arrayAwareToString(this.f13)
+ ","
+ StringUtils.arrayAwareToString(this.f14)
+ ","
+ StringUtils.arrayAwareToString(this.f15)
+ ","
+ StringUtils.arrayAwareToString(this.f16)
+ ","
+ StringUtils.arrayAwareToString(this.f17)
+ ","
+ StringUtils.arrayAwareToString(this.f18)
+ ","
+ StringUtils.arrayAwareToString(this.f19)
+ ","
+ StringUtils.arrayAwareToString(this.f20)
+ ","
+ StringUtils.arrayAwareToString(this.f21)
+ ","
+ StringUtils.arrayAwareToString(this.f22)
+ ","
+ StringUtils.arrayAwareToString(this.f23)
+ ")";
} | 3.68 |
graphhopper_Service_removeDays | /**
* @param service_id the service_id to assign to the newly created copy.
* @param daysToRemove the days of the week on which to deactivate service in the copy.
* @return a copy of this Service with any service on the specified days of the week deactivated.
*/
public Service removeDays(String service_id, EnumSet<DayOfWeek> daysToRemove) {
Service service = new Service(service_id);
// First, duplicate any Calendar in this Service, minus the specified days of the week.
if (this.calendar != null) {
Calendar calendar = new Calendar();
// TODO calendar.getDaysOfWeek/setDaysOfWeek which allow simplifying this section and activeOn below.
calendar.monday = daysToRemove.contains(MONDAY) ? 0 : this.calendar.monday;
calendar.tuesday = daysToRemove.contains(TUESDAY) ? 0 : this.calendar.tuesday;
calendar.wednesday = daysToRemove.contains(WEDNESDAY) ? 0 : this.calendar.wednesday;
calendar.thursday = daysToRemove.contains(THURSDAY) ? 0 : this.calendar.thursday;
calendar.friday = daysToRemove.contains(FRIDAY) ? 0 : this.calendar.friday;
calendar.saturday = daysToRemove.contains(SATURDAY) ? 0 : this.calendar.saturday;
calendar.sunday = daysToRemove.contains(SUNDAY) ? 0 : this.calendar.sunday;
// The new calendar should cover exactly the same time range as the existing one.
calendar.start_date = this.calendar.start_date;
calendar.end_date = this.calendar.end_date;
// Create the bidirectional reference between Calendar and Service.
service.calendar = calendar;
}
// Copy over all exceptions whose dates fall on days of the week that are retained.
this.calendar_dates.forEach((date, exception) -> {
DayOfWeek dow = date.getDayOfWeek();
if (!daysToRemove.contains(dow)) {
CalendarDate newException = exception.clone();
service.calendar_dates.put(date, newException);
}
});
return service;
} | 3.68 |
framework_FieldGroup_bindMemberFields | /**
* Binds member fields found in the given object.
* <p>
* This method processes all (Java) member fields whose type extends
* {@link Field} and that can be mapped to a property id. Property id
* mapping is done based on the field name or on a @{@link PropertyId}
* annotation on the field. All non-null fields for which a property id can
* be determined are bound to the property id.
* </p>
* <p>
* For example:
*
* <pre>
* public class MyForm extends VerticalLayout {
* private TextField firstName = new TextField("First name");
* @PropertyId("last")
* private TextField lastName = new TextField("Last name");
* private TextField age = new TextField("Age"); ... }
*
* MyForm myForm = new MyForm();
* ...
* fieldGroup.bindMemberFields(myForm);
* </pre>
*
* </p>
* This binds the firstName TextField to a "firstName" property in the item,
* lastName TextField to a "last" property and the age TextField to a "age"
* property.
*
* @param objectWithMemberFields
* The object that contains (Java) member fields to bind
* @throws BindException
* If there is a problem binding a field
*/
public void bindMemberFields(Object objectWithMemberFields)
throws BindException {
buildAndBindMemberFields(objectWithMemberFields, false);
} | 3.68 |
pulsar_ResourceGroupService_getRgLocalUsageMessageCount | // Visibility for testing.
protected static double getRgLocalUsageMessageCount (String rgName, String monClassName) {
return rgLocalUsageMessages.labels(rgName, monClassName).get();
} | 3.68 |
hbase_TableRecordReaderImpl_setStartRow | /**
* @param startRow the first row in the split
*/
public void setStartRow(final byte[] startRow) {
this.startRow = startRow;
} | 3.68 |
hadoop_FSTreeTraverser_traverseDirInt | /**
* Iterates the parent directory, and add direct children files to current
* batch. If batch size meets configured threshold, current batch will be
* submitted for the processing.
* <p>
* Locks could be released and reacquired when a batch submission is
* finished.
*
* @param startId
* Id of the start inode.
* @return The inode which was just processed, if lock is held in the entire
* process. Null if lock is released.
* @throws IOException
* @throws InterruptedException
*/
protected INode traverseDirInt(final long startId, INode curr,
List<byte[]> startAfters, final TraverseInfo traverseInfo)
throws IOException, InterruptedException {
assert dir.hasReadLock();
assert dir.getFSNamesystem().hasReadLock();
long lockStartTime = timer.monotonicNow();
Preconditions.checkNotNull(curr, "Current inode can't be null");
checkINodeReady(startId);
final INodeDirectory parent = curr.isDirectory() ? curr.asDirectory()
: curr.getParent();
ReadOnlyList<INode> children = parent
.getChildrenList(Snapshot.CURRENT_STATE_ID);
if (LOG.isDebugEnabled()) {
LOG.debug("Traversing directory {}", parent.getFullPathName());
}
final byte[] startAfter = startAfters.get(startAfters.size() - 1);
boolean lockReleased = false;
for (int i = INodeDirectory.nextChild(children, startAfter); i < children
.size(); ++i) {
final INode inode = children.get(i);
if (!processFileInode(inode, traverseInfo)) {
// inode wasn't processes. Recurse down if it's a dir,
// skip otherwise.
if (!inode.isDirectory()) {
continue;
}
if (!canTraverseDir(inode)) {
continue;
}
// add 1 level to the depth-first search.
curr = inode;
if (!startAfters.isEmpty()) {
startAfters.remove(startAfters.size() - 1);
startAfters.add(curr.getLocalNameBytes());
}
startAfters.add(HdfsFileStatus.EMPTY_NAME);
return lockReleased ? null : curr;
}
if (shouldSubmitCurrentBatch()) {
final byte[] currentStartAfter = inode.getLocalNameBytes();
final String parentPath = parent.getFullPathName();
lockReleased = true;
readUnlock();
submitCurrentBatch(startId);
try {
throttle();
checkPauseForTesting();
} finally {
readLock();
lockStartTime = timer.monotonicNow();
}
checkINodeReady(startId);
// Things could have changed when the lock was released.
// Re-resolve the parent inode.
FSPermissionChecker pc = dir.getPermissionChecker();
INode newParent = dir
.resolvePath(pc, parentPath, FSDirectory.DirOp.READ)
.getLastINode();
if (newParent == null || !newParent.equals(parent)) {
// parent dir is deleted or recreated. We're done.
return null;
}
children = parent.getChildrenList(Snapshot.CURRENT_STATE_ID);
// -1 to counter the ++ on the for loop
i = INodeDirectory.nextChild(children, currentStartAfter) - 1;
}
if ((timer.monotonicNow()
- lockStartTime) > readLockReportingThresholdMs) {
readUnlock();
try {
throttle();
} finally {
readLock();
lockStartTime = timer.monotonicNow();
}
}
}
// Successfully finished this dir, adjust pointers to 1 level up, and
// startAfter this dir.
startAfters.remove(startAfters.size() - 1);
if (!startAfters.isEmpty()) {
startAfters.remove(startAfters.size() - 1);
startAfters.add(curr.getLocalNameBytes());
}
curr = curr.getParent();
return lockReleased ? null : curr;
} | 3.68 |
framework_AbstractSplitPanel_getOldSplitPositionUnit | /**
* Returns the position unit of the split before this change event
* occurred.
*
* @since 8.1
*
* @return the split position unit previously set to the source of this
* event
*/
public Unit getOldSplitPositionUnit() {
return oldUnit;
} | 3.68 |
hmily_HashedWheelTimer_start | /**
* 启动任务.
*/
public void start() {
switch (WORKER_STATE_UPDATER.get(this)) {
//更改线程状态;如果当前的状态是初始,则改成启动中;
case WORKER_STATE_INIT:
if (WORKER_STATE_UPDATER.compareAndSet(this, WORKER_STATE_INIT, WORKER_STATE_STARTED)) {
//启动线程;
workerThread.start();
}
break;
case WORKER_STATE_STARTED:
break;
case WORKER_STATE_SHUTDOWN:
throw new IllegalStateException("cannot be started once stopped");
default:
throw new Error("Invalid WorkerState");
}
// 等待初始化的Worker线程的startTime实始化;
while (startTime == 0) {
try {
startTimeInitialized.await();
} catch (InterruptedException ignore) {
// Ignore - it will be ready very soon.
}
}
} | 3.68 |
framework_RadioButtonGroup_getItemDescriptionGenerator | /**
* Gets the item description generator.
*
* @return the item description generator
*
* @since 8.2
*/
public DescriptionGenerator<T> getItemDescriptionGenerator() {
return descriptionGenerator;
} | 3.68 |
hadoop_Time_formatTime | /**
* Convert time in millisecond to human readable format.
*
* @param millis millisecond.
* @return a human readable string for the input time
*/
public static String formatTime(long millis) {
return DATE_FORMAT.get().format(millis);
} | 3.68 |
hbase_HRegionFileSystem_createStoreDir | /**
* Create the store directory for the specified family name
* @param familyName Column Family Name
* @return {@link Path} to the directory of the specified family
* @throws IOException if the directory creation fails.
*/
Path createStoreDir(final String familyName) throws IOException {
Path storeDir = getStoreDir(familyName);
if (!fs.exists(storeDir) && !createDir(storeDir))
throw new IOException("Failed creating " + storeDir);
return storeDir;
} | 3.68 |
hbase_BucketAllocator_wastedBytes | /**
* If {@link #bucketCapacity} is not perfectly divisible by this {@link #itemSize()}, the
* remainder will be unusable by in buckets of this size. A high value here may be optimized by
* trying to choose bucket sizes which can better divide {@link #bucketCapacity}.
*/
public long wastedBytes() {
return wastedBytes;
} | 3.68 |
hadoop_AuxServiceRecord_version | /**
* Version of the service.
*/
public AuxServiceRecord version(String v) {
this.version = v;
return this;
} | 3.68 |
flink_FsCheckpointStreamFactory_flush | /** Flush buffers to file if their size is above {@link #localStateThreshold}. */
@Override
public void flush() throws IOException {
if (outStream != null || pos > localStateThreshold) {
flushToFile();
}
} | 3.68 |
hibernate-validator_ConstraintCheckFactory_getConstraintChecks | /**
* Returns those checks that have to be performed to validate the given
* annotation at the given element. In case no checks have to be performed
* (e.g. because the given annotation is no constraint annotation) an empty
* {@link ConstraintChecks} instance will be returned. It's therefore always
* safe to operate on the returned object.
*
* @param annotatedElement An annotated element, e.g. a type declaration or a method.
* @param annotation An annotation.
*
* @return The checks to be performed to validate the given annotation at
* the given element.
*/
public ConstraintChecks getConstraintChecks(Element annotatedElement, AnnotationMirror annotation) {
AnnotationType annotationType = constraintHelper.getAnnotationType( annotation );
switch ( annotatedElement.getKind() ) {
case PARAMETER:
return parameterChecks.get( annotationType );
case FIELD:
return fieldChecks.get( annotationType );
case METHOD:
return methodChecks.get( annotationType );
case ANNOTATION_TYPE:
return annotationTypeChecks.get( annotationType );
case CLASS:
case INTERFACE:
case ENUM:
return nonAnnotationTypeChecks.get( annotationType );
default:
return NULL_CHECKS;
}
} | 3.68 |
querydsl_NumberExpression_nullif | /**
* Create a {@code nullif(this, other)} expression
*
* @param other
* @return nullif(this, other)
*/
@Override
public NumberExpression<T> nullif(T other) {
return nullif(ConstantImpl.create(other));
} | 3.68 |
hbase_MetricsConnection_incrementServerOverloadedBackoffTime | /** Update the overloaded backoff time **/
public void incrementServerOverloadedBackoffTime(long time, TimeUnit timeUnit) {
overloadedBackoffTimer.update(time, timeUnit);
} | 3.68 |
dubbo_Bytes_float2bytes | /**
* to byte array.
*
* @param v value.
* @param b byte array.
* @param off array offset.
*/
public static void float2bytes(float v, byte[] b, int off) {
int i = Float.floatToIntBits(v);
b[off + 3] = (byte) i;
b[off + 2] = (byte) (i >>> 8);
b[off + 1] = (byte) (i >>> 16);
b[off + 0] = (byte) (i >>> 24);
} | 3.68 |
querydsl_Expressions_comparableOperation | /**
* Create a new Operation expression
*
* @param type type of expression
* @param operator operator
* @param args operation arguments
* @return operation expression
*/
public static <T extends Comparable<?>> ComparableOperation<T> comparableOperation(Class<? extends T> type,
Operator operator, Expression<?>... args) {
return new ComparableOperation<T>(type, operator, args);
} | 3.68 |
dubbo_NopDynamicConfiguration_publishConfig | /**
* @since 2.7.5
*/
@Override
public boolean publishConfig(String key, String group, String content) {
return true;
} | 3.68 |
hadoop_SQLSecretManagerRetriableHandler_execute | /**
* Executes a SQL command and raises retryable errors as
* {@link SQLSecretManagerRetriableException}s so they are recognized by the
* {@link RetryProxy}.
* @param command SQL command to execute
* @throws SQLException When SQL connection errors occur
*/
@Override
public <T> T execute(SQLCommand<T> command) throws SQLException {
try {
return command.doCall();
} catch (SQLException e) {
LOG.warn("Failed to execute SQL command", e);
throw new SQLSecretManagerRetriableException(e);
}
} | 3.68 |
pulsar_ConcurrentLongPairSet_items | /**
* @return a new list of keys with max provided numberOfItems (makes a copy)
*/
public Set<LongPair> items(int numberOfItems) {
return items(numberOfItems, (item1, item2) -> new LongPair(item1, item2));
} | 3.68 |
hadoop_Validate_checkNotNull | /**
* Validates that the given reference argument is not null.
* @param obj the argument reference to validate.
* @param argName the name of the argument being validated.
*/
public static void checkNotNull(Object obj, String argName) {
checkArgument(obj != null, "'%s' must not be null.", argName);
} | 3.68 |
hbase_ReplicationSourceManager_claimQueue | /**
* Claim a replication queue.
* <p/>
* We add a flag to indicate whether we are called by ReplicationSyncUp. For normal claiming queue
* operation, we are the last step of a SCP, so we can assume that all the WAL files are under
* oldWALs directory. But for ReplicationSyncUp, we may want to claim the replication queue for a
* region server which has not been processed by SCP yet, so we still need to look at its WALs
* directory.
* @param queueId the replication queue id we want to claim
* @param syncUp whether we are called by ReplicationSyncUp
*/
void claimQueue(ReplicationQueueId queueId, boolean syncUp) {
// Wait a bit before transferring the queues, we may be shutting down.
// This sleep may not be enough in some cases.
try {
Thread.sleep(sleepBeforeFailover
+ (long) (ThreadLocalRandom.current().nextFloat() * sleepBeforeFailover));
} catch (InterruptedException e) {
LOG.warn("Interrupted while waiting before transferring a queue.");
Thread.currentThread().interrupt();
}
// We try to lock that rs' queue directory
if (server.isStopped()) {
LOG.info("Not transferring queue since we are shutting down");
return;
}
// After claim the queues from dead region server, we will skip to start the
// RecoveredReplicationSource if the peer has been removed. but there's possible that remove a
// peer with peerId = 2 and add a peer with peerId = 2 again during failover. So we need to get
// a copy of the replication peer first to decide whether we should start the
// RecoveredReplicationSource. If the latest peer is not the old peer, we should also skip to
// start the RecoveredReplicationSource, Otherwise the rs will abort (See HBASE-20475).
String peerId = queueId.getPeerId();
ReplicationPeerImpl oldPeer = replicationPeers.getPeer(peerId);
if (oldPeer == null) {
LOG.info("Not transferring queue since the replication peer {} for queue {} does not exist",
peerId, queueId);
return;
}
Map<String, ReplicationGroupOffset> offsets;
try {
offsets = queueStorage.claimQueue(queueId, server.getServerName());
} catch (ReplicationException e) {
LOG.error("ReplicationException: cannot claim dead region ({})'s replication queue",
queueId.getServerName(), e);
server.abort("Failed to claim queue from dead regionserver.", e);
return;
}
if (offsets.isEmpty()) {
// someone else claimed the queue
return;
}
ServerName sourceRS = queueId.getServerWALsBelongTo();
ReplicationQueueId claimedQueueId = queueId.claim(server.getServerName());
ReplicationPeerImpl peer = replicationPeers.getPeer(peerId);
if (peer == null || peer != oldPeer) {
LOG.warn("Skipping failover for peer {} of node {}, peer is null", peerId, sourceRS);
deleteQueue(claimedQueueId);
return;
}
ReplicationSourceInterface src;
try {
src =
createSource(new ReplicationQueueData(claimedQueueId, ImmutableMap.copyOf(offsets)), peer);
} catch (IOException e) {
LOG.error("Can not create replication source for peer {} and queue {}", peerId,
claimedQueueId, e);
server.abort("Failed to create replication source after claiming queue.", e);
return;
}
PriorityQueue<Path> walFiles;
try {
walFiles = getWALFilesToReplicate(sourceRS, syncUp, offsets);
} catch (IOException e) {
LOG.error("Can not list wal files for peer {} and queue {}", peerId, queueId, e);
server.abort("Can not list wal files after claiming queue.", e);
return;
}
// synchronized on oldsources to avoid adding recovered source for the to-be-removed peer
synchronized (oldsources) {
addRecoveredSource(src, oldPeer, claimedQueueId, walFiles);
}
} | 3.68 |
hbase_HBaseTestingUtility_isReadShortCircuitOn | /**
* Get the HBase setting for dfs.client.read.shortcircuit from the conf or a system property. This
* allows to specify this parameter on the command line. If not set, default is true.
*/
public boolean isReadShortCircuitOn() {
final String propName = "hbase.tests.use.shortcircuit.reads";
String readOnProp = System.getProperty(propName);
if (readOnProp != null) {
return Boolean.parseBoolean(readOnProp);
} else {
return conf.getBoolean(propName, false);
}
} | 3.68 |
framework_SystemMessageException_getCause | /**
* @see java.lang.Throwable#getCause()
*/
@Override
public Throwable getCause() {
return cause;
} | 3.68 |
zxing_MultiFinderPatternFinder_selectMultipleBestPatterns | /**
* @return the 3 best {@link FinderPattern}s from our list of candidates. The "best" are
* those that have been detected at least 2 times, and whose module
* size differs from the average among those patterns the least
* @throws NotFoundException if 3 such finder patterns do not exist
*/
private FinderPattern[][] selectMultipleBestPatterns() throws NotFoundException {
List<FinderPattern> possibleCenters = new ArrayList<>();
for (FinderPattern fp : getPossibleCenters()) {
if (fp.getCount() >= 2) {
possibleCenters.add(fp);
}
}
int size = possibleCenters.size();
if (size < 3) {
// Couldn't find enough finder patterns
throw NotFoundException.getNotFoundInstance();
}
/*
* Begin HE modifications to safely detect multiple codes of equal size
*/
if (size == 3) {
return new FinderPattern[][] { possibleCenters.toArray(EMPTY_FP_ARRAY) };
}
// Sort by estimated module size to speed up the upcoming checks
Collections.sort(possibleCenters, new ModuleSizeComparator());
/*
* Now lets start: build a list of tuples of three finder locations that
* - feature similar module sizes
* - are placed in a distance so the estimated module count is within the QR specification
* - have similar distance between upper left/right and left top/bottom finder patterns
* - form a triangle with 90° angle (checked by comparing top right/bottom left distance
* with pythagoras)
*
* Note: we allow each point to be used for more than one code region: this might seem
* counterintuitive at first, but the performance penalty is not that big. At this point,
* we cannot make a good quality decision whether the three finders actually represent
* a QR code, or are just by chance laid out so it looks like there might be a QR code there.
* So, if the layout seems right, lets have the decoder try to decode.
*/
List<FinderPattern[]> results = new ArrayList<>(); // holder for the results
for (int i1 = 0; i1 < (size - 2); i1++) {
FinderPattern p1 = possibleCenters.get(i1);
if (p1 == null) {
continue;
}
for (int i2 = i1 + 1; i2 < (size - 1); i2++) {
FinderPattern p2 = possibleCenters.get(i2);
if (p2 == null) {
continue;
}
// Compare the expected module sizes; if they are really off, skip
float vModSize12 = (p1.getEstimatedModuleSize() - p2.getEstimatedModuleSize()) /
Math.min(p1.getEstimatedModuleSize(), p2.getEstimatedModuleSize());
float vModSize12A = Math.abs(p1.getEstimatedModuleSize() - p2.getEstimatedModuleSize());
if (vModSize12A > DIFF_MODSIZE_CUTOFF && vModSize12 >= DIFF_MODSIZE_CUTOFF_PERCENT) {
// break, since elements are ordered by the module size deviation there cannot be
// any more interesting elements for the given p1.
break;
}
for (int i3 = i2 + 1; i3 < size; i3++) {
FinderPattern p3 = possibleCenters.get(i3);
if (p3 == null) {
continue;
}
// Compare the expected module sizes; if they are really off, skip
float vModSize23 = (p2.getEstimatedModuleSize() - p3.getEstimatedModuleSize()) /
Math.min(p2.getEstimatedModuleSize(), p3.getEstimatedModuleSize());
float vModSize23A = Math.abs(p2.getEstimatedModuleSize() - p3.getEstimatedModuleSize());
if (vModSize23A > DIFF_MODSIZE_CUTOFF && vModSize23 >= DIFF_MODSIZE_CUTOFF_PERCENT) {
// break, since elements are ordered by the module size deviation there cannot be
// any more interesting elements for the given p1.
break;
}
FinderPattern[] test = {p1, p2, p3};
ResultPoint.orderBestPatterns(test);
// Calculate the distances: a = topleft-bottomleft, b=topleft-topright, c = diagonal
FinderPatternInfo info = new FinderPatternInfo(test);
float dA = ResultPoint.distance(info.getTopLeft(), info.getBottomLeft());
float dC = ResultPoint.distance(info.getTopRight(), info.getBottomLeft());
float dB = ResultPoint.distance(info.getTopLeft(), info.getTopRight());
// Check the sizes
float estimatedModuleCount = (dA + dB) / (p1.getEstimatedModuleSize() * 2.0f);
if (estimatedModuleCount > MAX_MODULE_COUNT_PER_EDGE ||
estimatedModuleCount < MIN_MODULE_COUNT_PER_EDGE) {
continue;
}
// Calculate the difference of the edge lengths in percent
float vABBC = Math.abs((dA - dB) / Math.min(dA, dB));
if (vABBC >= 0.1f) {
continue;
}
// Calculate the diagonal length by assuming a 90° angle at topleft
float dCpy = (float) Math.sqrt((double) dA * dA + (double) dB * dB);
// Compare to the real distance in %
float vPyC = Math.abs((dC - dCpy) / Math.min(dC, dCpy));
if (vPyC >= 0.1f) {
continue;
}
// All tests passed!
results.add(test);
}
}
}
if (!results.isEmpty()) {
return results.toArray(EMPTY_FP_2D_ARRAY);
}
// Nothing found!
throw NotFoundException.getNotFoundInstance();
} | 3.68 |
morf_SqlDialect_getSqlForCoalesce | /**
* Converts the coalesce function into SQL.
*
* @param function the function details
* @return a string representation of the SQL
*/
protected String getSqlForCoalesce(Function function) {
StringBuilder expression = new StringBuilder();
expression.append(getCoalesceFunctionName()).append('(');
boolean first = true;
for (AliasedField f : function.getArguments()) {
if (!first) {
expression.append(", ");
}
expression.append(getSqlFrom(f));
first = false;
}
expression.append(')');
return expression.toString();
} | 3.68 |
hbase_ZKConfig_standardizeZKQuorumServerString | /**
* Standardize the ZK quorum string: make it a "server:clientport" list, separated by ','
* @param quorumStringInput a string contains a list of servers for ZK quorum
* @param clientPort the default client port
* @return the string for a list of "server:port" separated by ","
*/
public static String standardizeZKQuorumServerString(String quorumStringInput,
String clientPort) {
String[] serverHosts = quorumStringInput.split(",");
return buildZKQuorumServerString(serverHosts, clientPort);
} | 3.68 |
hbase_TableRegionModel_getEndKey | /** Returns the end key */
@XmlAttribute
public byte[] getEndKey() {
return endKey;
} | 3.68 |
Activiti_DelegateHelper_createExpressionForField | /**
* Creates an {@link Expression} for the {@link FieldExtension}.
*/
public static Expression createExpressionForField(FieldExtension fieldExtension) {
if (StringUtils.isNotEmpty(fieldExtension.getExpression())) {
ExpressionManager expressionManager = Context.getProcessEngineConfiguration().getExpressionManager();
return expressionManager.createExpression(fieldExtension.getExpression());
} else {
return new FixedValue(fieldExtension.getStringValue());
}
} | 3.68 |
flink_ProjectOperator_projectTuple7 | /**
* Projects a {@link Tuple} {@link DataSet} to the previously selected fields.
*
* @return The projected DataSet.
* @see Tuple
* @see DataSet
*/
public <T0, T1, T2, T3, T4, T5, T6>
ProjectOperator<T, Tuple7<T0, T1, T2, T3, T4, T5, T6>> projectTuple7() {
TypeInformation<?>[] fTypes = extractFieldTypes(fieldIndexes, ds.getType());
TupleTypeInfo<Tuple7<T0, T1, T2, T3, T4, T5, T6>> tType =
new TupleTypeInfo<Tuple7<T0, T1, T2, T3, T4, T5, T6>>(fTypes);
return new ProjectOperator<T, Tuple7<T0, T1, T2, T3, T4, T5, T6>>(
this.ds, this.fieldIndexes, tType);
} | 3.68 |
hbase_FavoredStochasticBalancer_getRandomGenerator | /** Returns any candidate generator in random */
@Override
protected CandidateGenerator getRandomGenerator() {
return candidateGenerators.get(ThreadLocalRandom.current().nextInt(candidateGenerators.size()));
} | 3.68 |
hbase_BucketAllocator_allocate | /**
* Allocate a block in this bucket, return the offset representing the position in physical
* space
* @return the offset in the IOEngine
*/
public long allocate() {
assert freeCount > 0; // Else should not have been called
assert sizeIndex != -1;
++usedCount;
long offset = baseOffset + (freeList[--freeCount] * itemAllocationSize);
assert offset >= 0;
return offset;
} | 3.68 |
hadoop_IteratorSelector_getPartition | /**
* The partition for this iterator selector.
* @return partition
*/
public String getPartition() {
return this.partition;
} | 3.68 |
querydsl_Expressions_dateOperation | /**
* Create a new Operation expression
*
* @param type type of expression
* @param operator operator
* @param args operation arguments
* @return operation expression
*/
public static <T extends Comparable<?>> DateOperation<T> dateOperation(Class<? extends T> type,
Operator operator, Expression<?>... args) {
return new DateOperation<T>(type, operator, args);
} | 3.68 |
framework_DateFieldElement_getDate | /**
* Gets the value as a LocalDate object.
*
* @return the current value as a date object, or null if a date is not set
* or if the text field contains an invalid date
*/
public LocalDate getDate() {
String value = getISOValue();
if (value == null) {
return null;
}
return LocalDate.parse(value, getISOFormatter());
} | 3.68 |
flink_WritableSavepoint_removeOperator | /**
* Drop an existing operator from the savepoint.
*
* @param uid The uid of the operator.
* @return A modified savepoint.
*/
@SuppressWarnings("unchecked")
public F removeOperator(String uid) {
metadata.removeOperator(uid);
return (F) this;
} | 3.68 |
flink_SingleInputOperator_clearInputs | /** Removes all inputs. */
public void clearInputs() {
this.input = null;
} | 3.68 |
hudi_CLIUtils_getTimelineInRange | /**
* Gets a {@link HoodieDefaultTimeline} instance containing the instants in the specified range.
*
* @param startTs Start instant time.
* @param endTs End instant time.
* @param includeArchivedTimeline Whether to include intants from the archived timeline.
* @return a {@link HoodieDefaultTimeline} instance containing the instants in the specified range.
*/
public static HoodieDefaultTimeline getTimelineInRange(String startTs, String endTs, boolean includeArchivedTimeline) {
if (isNullOrEmpty(startTs)) {
startTs = getTimeDaysAgo(10);
}
if (isNullOrEmpty(endTs)) {
endTs = getTimeDaysAgo(1);
}
checkArgument(nonEmpty(startTs), "startTs is null or empty");
checkArgument(nonEmpty(endTs), "endTs is null or empty");
HoodieTableMetaClient metaClient = HoodieCLI.getTableMetaClient();
HoodieActiveTimeline activeTimeline = metaClient.getActiveTimeline();
if (includeArchivedTimeline) {
HoodieArchivedTimeline archivedTimeline = metaClient.getArchivedTimeline();
archivedTimeline.loadInstantDetailsInMemory(startTs, endTs);
return archivedTimeline.findInstantsInRange(startTs, endTs).mergeTimeline(activeTimeline);
}
return activeTimeline;
} | 3.68 |
querydsl_Expressions_enumOperation | /**
* Create a new Enum operation expression
*
* @param type type of expression
* @param operator operator
* @param args operation arguments
* @param <T> type of expression
* @return operation expression
*/
public static <T extends Enum<T>> EnumOperation<T> enumOperation(Class<? extends T> type, Operator operator,
Expression<?>... args) {
return new EnumOperation<T>(type, operator, args);
} | 3.68 |
framework_Tree_setHtmlContentAllowed | /**
* Sets whether html is allowed in the item captions. If set to
* <code>true</code>, the captions are passed to the browser as html and the
* developer is responsible for ensuring no harmful html is used. If set to
* <code>false</code>, the content is passed to the browser as plain text.
* The default setting is <code>false</code>
*
* @since 7.6
* @param htmlContentAllowed
* <code>true</code> if the captions are used as html,
* <code>false</code> if used as plain text
*/
public void setHtmlContentAllowed(boolean htmlContentAllowed) {
this.htmlContentAllowed = htmlContentAllowed;
markAsDirty();
} | 3.68 |
flink_HardwareDescription_getNumberOfCPUCores | /**
* Returns the number of CPU cores available to the JVM on the compute node.
*
* @return the number of CPU cores available to the JVM on the compute node
*/
public int getNumberOfCPUCores() {
return this.numberOfCPUCores;
} | 3.68 |
graphhopper_VectorTile_setGeometry | /**
* <pre>
* Contains a stream of commands and parameters (vertices).
* A detailed description on geometry encoding is located in
* section 4.3 of the specification.
* </pre>
*
* <code>repeated uint32 geometry = 4 [packed = true];</code>
*/
public Builder setGeometry(
int index, int value) {
ensureGeometryIsMutable();
geometry_.set(index, value);
onChanged();
return this;
} | 3.68 |
rocketmq-connect_WorkerSinkTask_errorRecordReporter | /**
* error record reporter
*
* @return
*/
public WorkerErrorRecordReporter errorRecordReporter() {
return errorRecordReporter;
} | 3.68 |
hadoop_AbstractS3AStatisticsSource_setIOStatistics | /**
* Setter.
* this must be called in the subclass constructor with
* whatever
* @param statistics statistics to set
*/
protected void setIOStatistics(final IOStatisticsStore statistics) {
this.ioStatistics = statistics;
} | 3.68 |
zxing_BitMatrix_getTopLeftOnBit | /**
* This is useful in detecting a corner of a 'pure' barcode.
*
* @return {@code x,y} coordinate of top-left-most 1 bit, or null if it is all white
*/
public int[] getTopLeftOnBit() {
int bitsOffset = 0;
while (bitsOffset < bits.length && bits[bitsOffset] == 0) {
bitsOffset++;
}
if (bitsOffset == bits.length) {
return null;
}
int y = bitsOffset / rowSize;
int x = (bitsOffset % rowSize) * 32;
int theBits = bits[bitsOffset];
int bit = 0;
while ((theBits << (31 - bit)) == 0) {
bit++;
}
x += bit;
return new int[] {x, y};
} | 3.68 |
flink_SkipListUtils_findPredecessor | /**
* Find the predecessor node for the given key at the given level. The key is in the memory
* segment positioning at the given offset.
*
* @param keySegment memory segment which contains the key.
* @param keyOffset offset of the key in the memory segment.
* @param level the level.
* @param levelIndexHeader the head level index.
* @param spaceAllocator the space allocator.
* @return node id before the key at the given level.
*/
static long findPredecessor(
MemorySegment keySegment,
int keyOffset,
int level,
@Nonnull LevelIndexHeader levelIndexHeader,
Allocator spaceAllocator) {
int currentLevel = levelIndexHeader.getLevel();
long currentNode = HEAD_NODE;
long nextNode = levelIndexHeader.getNextNode(currentLevel);
for (; ; ) {
if (nextNode != NIL_NODE) {
int c = compareSegmentAndNode(keySegment, keyOffset, nextNode, spaceAllocator);
if (c > 0) {
currentNode = nextNode;
nextNode =
helpGetNextNode(
currentNode, currentLevel, levelIndexHeader, spaceAllocator);
continue;
}
}
if (currentLevel <= level) {
return currentNode;
}
currentLevel--;
nextNode = helpGetNextNode(currentNode, currentLevel, levelIndexHeader, spaceAllocator);
}
} | 3.68 |
dubbo_ProtobufTypeBuilder_validateMapType | /**
* 1. Unsupported Map with key type is not String <br/>
* Bytes is a primitive type in Proto, transform to ByteString.class in java<br/>
*
* @param fieldName
* @param typeName
* @return
*/
private void validateMapType(String fieldName, String typeName) {
Matcher matcher = MAP_PATTERN.matcher(typeName);
if (!matcher.matches()) {
throw new IllegalArgumentException("Map protobuf property " + fieldName + "of Type " + typeName
+ " can't be parsed.The type name should mathch[" + MAP_PATTERN.toString() + "].");
}
} | 3.68 |
hadoop_Duration_start | /**
* Start
* @return self
*/
public Duration start() {
start = now();
return this;
} | 3.68 |
hbase_BloomFilterChunk_writeBloom | /**
* Writes just the bloom filter to the output array
* @param out OutputStream to place bloom
* @throws IOException Error writing bloom array
*/
public void writeBloom(final DataOutput out) throws IOException {
if (!this.bloom.hasArray()) {
throw new IOException("Only writes ByteBuffer with underlying array.");
}
out.write(this.bloom.array(), this.bloom.arrayOffset(), this.bloom.limit());
} | 3.68 |
framework_VTabsheet_getLeftGap | /**
* Returns the gap between the leftmost visible tab and the tab container
* edge. By default there should be no gap at all, unless the tabs have been
* right-aligned by styling (e.g. Valo style {@code right-aligned-tabs} or
* {@code centered-tabs}).
*
* @return the left gap (in pixels), or zero if no gap
*/
private int getLeftGap() {
int firstVisibleIndex = tb.getFirstVisibleTab() < 0 ? -1
: scrollerIndex;
int gap;
if (firstVisibleIndex < 0) {
// no tabs are visible, the entire empty space is returned
// through getRightGap()
gap = 0;
} else {
Element tabContainer = tb.getElement().getParentElement();
Tab firstVisibleTab = tb.getTab(firstVisibleIndex);
gap = firstVisibleTab.getAbsoluteLeft()
- tabContainer.getAbsoluteLeft();
}
return gap > 0 ? gap : 0;
} | 3.68 |
druid_ListDG_DFS | /*
* 深度优先搜索遍历图
*/
public void DFS() {
boolean[] visited = new boolean[mVexs.size()]; // 顶点访问标记
// 初始化所有顶点都没有被访问
for (int i = 0; i < mVexs.size(); i++) {
visited[i] = false;
}
for (int i = 0; i < mVexs.size(); i++) {
if (!visited[i]) {
DFS(i, visited);
}
}
} | 3.68 |
hadoop_ActiveAuditManagerS3A_afterUnmarshalling | /**
* Forward to the inner span.
* {@inheritDoc}
*/
@Override
public void afterUnmarshalling(Context.AfterUnmarshalling context,
ExecutionAttributes executionAttributes) {
span.afterUnmarshalling(context, executionAttributes);
} | 3.68 |
flink_TimeWindow_modInverse | /** Compute the inverse of (odd) x mod 2^32. */
private int modInverse(int x) {
// Cube gives inverse mod 2^4, as x^4 == 1 (mod 2^4) for all odd x.
int inverse = x * x * x;
// Newton iteration doubles correct bits at each step.
inverse *= 2 - x * inverse;
inverse *= 2 - x * inverse;
inverse *= 2 - x * inverse;
return inverse;
} | 3.68 |
hbase_CleanerChore_sortByConsumedSpace | /**
* Sort the given list in (descending) order of the space each element takes
* @param dirs the list to sort, element in it should be directory (not file)
*/
private void sortByConsumedSpace(List<FileStatus> dirs) {
if (dirs == null || dirs.size() < 2) {
// no need to sort for empty or single directory
return;
}
dirs.sort(new Comparator<FileStatus>() {
HashMap<FileStatus, Long> directorySpaces = new HashMap<>();
@Override
public int compare(FileStatus f1, FileStatus f2) {
long f1ConsumedSpace = getSpace(f1);
long f2ConsumedSpace = getSpace(f2);
return Long.compare(f2ConsumedSpace, f1ConsumedSpace);
}
private long getSpace(FileStatus f) {
Long cached = directorySpaces.get(f);
if (cached != null) {
return cached;
}
try {
long space =
f.isDirectory() ? fs.getContentSummary(f.getPath()).getSpaceConsumed() : f.getLen();
directorySpaces.put(f, space);
return space;
} catch (IOException e) {
LOG.trace("Failed to get space consumed by path={}", f, e);
return -1;
}
}
});
} | 3.68 |
flink_InPlaceMutableHashTable_emit | /** Emits all elements currently held by the table to the collector. */
public void emit() throws IOException {
T record = buildSideSerializer.createInstance();
EntryIterator iter = getEntryIterator();
while ((record = iter.next(record)) != null && !closed) {
outputCollector.collect(record);
if (!objectReuseEnabled) {
record = buildSideSerializer.createInstance();
}
}
} | 3.68 |
flink_Path_initialize | /**
* Initializes a path object given the scheme, authority and path string.
*
* @param scheme the scheme string.
* @param authority the authority string.
* @param path the path string.
*/
private void initialize(String scheme, String authority, String path) {
try {
this.uri = new URI(scheme, authority, normalizePath(path), null, null).normalize();
} catch (URISyntaxException e) {
throw new IllegalArgumentException(e);
}
} | 3.68 |
hbase_SnapshotDescriptionUtils_getSnapshotRootDir | /**
* Get the snapshot root directory. All the snapshots are kept under this directory, i.e.
* ${hbase.rootdir}/.snapshot
* @param rootDir hbase root directory
* @return the base directory in which all snapshots are kept
*/
public static Path getSnapshotRootDir(final Path rootDir) {
return new Path(rootDir, HConstants.SNAPSHOT_DIR_NAME);
} | 3.68 |