name
stringlengths 12
178
| code_snippet
stringlengths 8
36.5k
| score
float64 3.26
3.68
|
---|---|---|
flink_AllWindowedStream_allowedLateness | /**
* Sets the time by which elements are allowed to be late. Elements that arrive behind the
* watermark by more than the specified time will be dropped. By default, the allowed lateness
* is {@code 0L}.
*
* <p>Setting an allowed lateness is only valid for event-time windows.
*/
@PublicEvolving
public AllWindowedStream<T, W> allowedLateness(Time lateness) {
final long millis = lateness.toMilliseconds();
checkArgument(millis >= 0, "The allowed lateness cannot be negative.");
this.allowedLateness = millis;
return this;
} | 3.68 |
hadoop_ServiceLauncher_getClassLoader | /**
* Override point: get the classloader to use.
* @return the classloader for loading a service class.
*/
protected ClassLoader getClassLoader() {
return this.getClass().getClassLoader();
} | 3.68 |
flink_FlinkSemiAntiJoinFilterTransposeRule_onMatch | // implement RelOptRule
public void onMatch(RelOptRuleCall call) {
LogicalJoin join = call.rel(0);
LogicalFilter filter = call.rel(1);
RelNode newJoin =
LogicalJoin.create(
filter.getInput(),
join.getRight(),
join.getHints(),
join.getCondition(),
join.getVariablesSet(),
join.getJoinType());
final RelFactories.FilterFactory factory = RelFactories.DEFAULT_FILTER_FACTORY;
RelNode newFilter = factory.createFilter(newJoin, filter.getCondition(), ImmutableSet.of());
call.transformTo(newFilter);
} | 3.68 |
framework_LoginForm_setUsernameCaption | /**
* Sets the caption of the user name field. Note that the caption can only
* be set with this method before the login form has been initialized
* (attached).
* <p>
* As an alternative to calling this method, the method
* {@link #createUsernameField()} can be overridden.
*
* @param usernameCaption
* the caption to set for the user name field
*/
public void setUsernameCaption(String usernameCaption) {
this.usernameCaption = usernameCaption;
} | 3.68 |
hbase_LocalHBaseCluster_getLiveMasters | /**
* @return List of running master servers (Some servers may have been killed or aborted during
* lifetime of cluster; these servers are not included in this list).
*/
public List<JVMClusterUtil.MasterThread> getLiveMasters() {
List<JVMClusterUtil.MasterThread> liveServers = new ArrayList<>();
List<JVMClusterUtil.MasterThread> list = getMasters();
for (JVMClusterUtil.MasterThread mt : list) {
if (mt.isAlive()) {
liveServers.add(mt);
}
}
return liveServers;
} | 3.68 |
pulsar_AuthorizationProvider_allowTopicOperation | /**
* @deprecated - will be removed after 2.12. Use async variant.
*/
@Deprecated
default Boolean allowTopicOperation(TopicName topicName,
String role,
TopicOperation operation,
AuthenticationDataSource authData) {
try {
return allowTopicOperationAsync(topicName, role, operation, authData).get();
} catch (InterruptedException e) {
throw new RestException(e);
} catch (ExecutionException e) {
throw new RestException(e.getCause());
}
} | 3.68 |
zxing_AztecReader_decode | /**
* Locates and decodes a Data Matrix code in an image.
*
* @return a String representing the content encoded by the Data Matrix code
* @throws NotFoundException if a Data Matrix code cannot be found
* @throws FormatException if a Data Matrix code cannot be decoded
*/
@Override
public Result decode(BinaryBitmap image) throws NotFoundException, FormatException {
return decode(image, null);
} | 3.68 |
framework_StaticSection_removeRow | /**
* Removes the given row from this section.
*
* @param row
* the row to remove, not null
* @throws IllegalArgumentException
* if this section does not contain the row
*/
public void removeRow(Object row) {
Objects.requireNonNull(row, "row cannot be null");
int index = rows.indexOf(row);
if (index < 0) {
throw new IllegalArgumentException(
"Section does not contain the given row");
}
removeRow(index);
} | 3.68 |
flink_Pool_add | /**
* Adds an entry to the pool with an optional payload. This method fails if called more often
* than the pool capacity specified during construction.
*/
public synchronized void add(T object) {
if (poolSize >= poolCapacity) {
throw new IllegalStateException("No space left in pool");
}
poolSize++;
addBack(object);
} | 3.68 |
morf_AbstractSqlDialectTest_testSpecifiedValueInsertWithTableInDifferentSchema | /**
* Tests an insert statement where the value for each column (except the id) has been explicitly specified,
*/
@Test
public void testSpecifiedValueInsertWithTableInDifferentSchema() {
InsertStatement stmt = new InsertStatement().into(new TableReference("MYSCHEMA", TEST_TABLE)).values(
new FieldLiteral("Escap'd").as(STRING_FIELD),
new FieldLiteral(7).as(INT_FIELD),
new FieldLiteral(11.25).as(FLOAT_FIELD),
new FieldLiteral(20100405).as(DATE_FIELD),
new FieldLiteral(true).as(BOOLEAN_FIELD),
new FieldLiteral('X').as(CHAR_FIELD)
);
List<String> sql = testDialect.convertStatementToSQL(stmt, metadata, SqlDialect.IdTable.withDeterministicName(ID_VALUES_TABLE));
assertSQLEquals("Generated SQL not as expected", expectedSpecifiedValueInsertWithTableInDifferentSchema(), sql);
} | 3.68 |
hibernate-validator_ReflectionHelper_isCollection | /**
* Indicates whether the given type represents a collection of elements or not (i.e. whether it is an
* {@code Iterable}, {@code Map} or array type).
*/
public static boolean isCollection(Type type) {
return isIterable( type ) ||
isMap( type ) ||
TypeHelper.isArray( type );
} | 3.68 |
morf_SqlServerDialect_renameIndexStatements | /**
* @see org.alfasoftware.morf.jdbc.SqlDialect#renameIndexStatements(org.alfasoftware.morf.metadata.Table, java.lang.String, java.lang.String)
*/
@Override
public Collection<String> renameIndexStatements(Table table, String fromIndexName, String toIndexName) {
return ImmutableList.of(String.format("sp_rename N'%s%s.%s', N'%s', N'INDEX'", schemaNamePrefix(), table.getName(), fromIndexName, toIndexName));
} | 3.68 |
flink_StreamExecutionEnvironment_createRemoteEnvironment | /**
* Creates a {@link RemoteStreamEnvironment}. The remote environment sends (parts of) the
* program to a cluster for execution. Note that all file paths used in the program must be
* accessible from the cluster. The execution will use the specified parallelism.
*
* @param host The host name or address of the master (JobManager), where the program should be
* executed.
* @param port The port of the master (JobManager), where the program should be executed.
* @param clientConfig The configuration used by the client that connects to the remote cluster.
* @param jarFiles The JAR files with code that needs to be shipped to the cluster. If the
* program uses user-defined functions, user-defined input formats, or any libraries, those
* must be provided in the JAR files.
* @return A remote environment that executes the program on a cluster.
*/
public static StreamExecutionEnvironment createRemoteEnvironment(
String host, int port, Configuration clientConfig, String... jarFiles) {
return new RemoteStreamEnvironment(host, port, clientConfig, jarFiles);
} | 3.68 |
flink_Tuple9_of | /**
* Creates a new tuple and assigns the given values to the tuple's fields. This is more
* convenient than using the constructor, because the compiler can infer the generic type
* arguments implicitly. For example: {@code Tuple3.of(n, x, s)} instead of {@code new
* Tuple3<Integer, Double, String>(n, x, s)}
*/
public static <T0, T1, T2, T3, T4, T5, T6, T7, T8>
Tuple9<T0, T1, T2, T3, T4, T5, T6, T7, T8> of(
T0 f0, T1 f1, T2 f2, T3 f3, T4 f4, T5 f5, T6 f6, T7 f7, T8 f8) {
return new Tuple9<>(f0, f1, f2, f3, f4, f5, f6, f7, f8);
} | 3.68 |
flink_PartitionTempFileManager_listTaskTemporaryPaths | /** Returns task temporary paths in this checkpoint. */
public static List<Path> listTaskTemporaryPaths(
FileSystem fs, Path basePath, BiPredicate<Integer, Integer> taskAttemptFilter)
throws Exception {
List<Path> taskTmpPaths = new ArrayList<>();
if (fs.exists(basePath)) {
for (FileStatus taskStatus : fs.listStatus(basePath)) {
final String taskDirName = taskStatus.getPath().getName();
final Matcher matcher = TASK_DIR_PATTERN.matcher(taskDirName);
if (matcher.matches()) {
final int subtaskIndex = Integer.parseInt(matcher.group(1));
final int attemptNumber = Integer.parseInt(matcher.group(2));
if (taskAttemptFilter.test(subtaskIndex, attemptNumber)) {
taskTmpPaths.add(taskStatus.getPath());
}
}
}
} else {
LOG.warn(
"The path {} doesn't exist. Maybe no data is generated in the path and the path is not created.",
basePath);
}
return taskTmpPaths;
} | 3.68 |
hbase_AuthManager_refreshTableCacheFromWritable | /**
* Update acl info for table.
* @param table name of table
* @param data updated acl data
* @throws IOException exception when deserialize data
*/
public void refreshTableCacheFromWritable(TableName table, byte[] data) throws IOException {
if (data != null && data.length > 0) {
try {
ListMultimap<String, Permission> perms = PermissionStorage.readPermissions(data, conf);
if (perms != null) {
if (Bytes.equals(table.getName(), PermissionStorage.ACL_GLOBAL_NAME)) {
updateGlobalCache(perms);
} else {
updateTableCache(table, perms);
}
}
} catch (DeserializationException e) {
throw new IOException(e);
}
} else {
LOG.info("Skipping permission cache refresh because writable data is empty");
}
} | 3.68 |
hadoop_ErasureCodingPolicyState_write | /** Write to out. */
public void write(DataOutput out) throws IOException {
out.writeByte(ordinal());
} | 3.68 |
flink_MailboxProcessor_sendPoisonMail | /** Send mail in first priority for internal needs. */
private void sendPoisonMail(RunnableWithException mail) {
mailbox.runExclusively(
() -> {
// keep state check and poison mail enqueuing atomic, such that no intermediate
// #close may cause a
// MailboxStateException in #sendPriorityMail.
if (mailbox.getState() == TaskMailbox.State.OPEN) {
sendControlMail(mail, "poison mail");
}
});
} | 3.68 |
framework_AbsoluteLayout_readDesign | /*
* (non-Javadoc)
*
* @see com.vaadin.ui.AbstractComponent#readDesign(org.jsoup.nodes .Node,
* com.vaadin.ui.declarative.DesignContext)
*/
@Override
public void readDesign(Element design, DesignContext designContext) {
// process default attributes
super.readDesign(design, designContext);
// handle children
for (Element childComponent : design.children()) {
Attributes attr = childComponent.attributes();
Component newChild = designContext.readDesign(childComponent);
StringBuilder css = new StringBuilder();
if (attr.hasKey(ATTR_TOP)) {
css.append("top:").append(attr.get(ATTR_TOP)).append(';');
}
if (attr.hasKey(ATTR_RIGHT)) {
css.append("right:").append(attr.get(ATTR_RIGHT)).append(';');
}
if (attr.hasKey(ATTR_BOTTOM)) {
css.append("bottom:").append(attr.get(ATTR_BOTTOM)).append(';');
}
if (attr.hasKey(ATTR_LEFT)) {
css.append("left:").append(attr.get(ATTR_LEFT)).append(';');
}
if (attr.hasKey(ATTR_Z_INDEX)) {
css.append("z-index:").append(attr.get(ATTR_Z_INDEX))
.append(';');
}
addComponent(newChild, css.toString());
}
} | 3.68 |
framework_CalendarComponentEvents_getWeek | /**
* Get week as a integer. See {@link java.util.Calendar} for the allowed
* values.
*
* @return Week as a integer.
*/
public int getWeek() {
return week;
} | 3.68 |
flink_TypeExtractor_getUnaryOperatorReturnType | /**
* Returns the unary operator's return type.
*
* <p>This method can extract a type in 4 different ways:
*
* <p>1. By using the generics of the base class like MyFunction<X, Y, Z, IN, OUT>. This is what
* outputTypeArgumentIndex (in this example "4") is good for.
*
* <p>2. By using input type inference SubMyFunction<T, String, String, String, T>. This is what
* inputTypeArgumentIndex (in this example "0") and inType is good for.
*
* <p>3. By using the static method that a compiler generates for Java lambdas. This is what
* lambdaOutputTypeArgumentIndices is good for. Given that MyFunction has the following single
* abstract method:
*
* <pre>
* <code>
* void apply(IN value, Collector<OUT> value)
* </code>
* </pre>
*
* <p>Lambda type indices allow the extraction of a type from lambdas. To extract the output
* type <b>OUT</b> from the function one should pass {@code new int[] {1, 0}}. "1" for selecting
* the parameter and 0 for the first generic in this type. Use {@code TypeExtractor.NO_INDEX}
* for selecting the return type of the lambda for extraction or if the class cannot be a lambda
* because it is not a single abstract method interface.
*
* <p>4. By using interfaces such as {@link TypeInfoFactory} or {@link ResultTypeQueryable}.
*
* <p>See also comments in the header of this class.
*
* @param function Function to extract the return type from
* @param baseClass Base class of the function
* @param inputTypeArgumentIndex Index of input generic type in the base class specification
* (ignored if inType is null)
* @param outputTypeArgumentIndex Index of output generic type in the base class specification
* @param lambdaOutputTypeArgumentIndices Table of indices of the type argument specifying the
* input type. See example.
* @param inType Type of the input elements (In case of an iterable, it is the element type) or
* null
* @param functionName Function name
* @param allowMissing Can the type information be missing (this generates a MissingTypeInfo for
* postponing an exception)
* @param <IN> Input type
* @param <OUT> Output type
* @return TypeInformation of the return type of the function
*/
@SuppressWarnings("unchecked")
@PublicEvolving
public static <IN, OUT> TypeInformation<OUT> getUnaryOperatorReturnType(
Function function,
Class<?> baseClass,
int inputTypeArgumentIndex,
int outputTypeArgumentIndex,
int[] lambdaOutputTypeArgumentIndices,
TypeInformation<IN> inType,
String functionName,
boolean allowMissing) {
Preconditions.checkArgument(
inType == null || inputTypeArgumentIndex >= 0,
"Input type argument index was not provided");
Preconditions.checkArgument(
outputTypeArgumentIndex >= 0, "Output type argument index was not provided");
Preconditions.checkArgument(
lambdaOutputTypeArgumentIndices != null,
"Indices for output type arguments within lambda not provided");
// explicit result type has highest precedence
if (function instanceof ResultTypeQueryable) {
return ((ResultTypeQueryable<OUT>) function).getProducedType();
}
// perform extraction
try {
final LambdaExecutable exec;
try {
exec = checkAndExtractLambda(function);
} catch (TypeExtractionException e) {
throw new InvalidTypesException("Internal error occurred.", e);
}
if (exec != null) {
// parameters must be accessed from behind, since JVM can add additional parameters
// e.g. when using local variables inside lambda function
// paramLen is the total number of parameters of the provided lambda, it includes
// parameters added through closure
final int paramLen = exec.getParameterTypes().length;
final Method sam = TypeExtractionUtils.getSingleAbstractMethod(baseClass);
// number of parameters the SAM of implemented interface has; the parameter indexing
// applies to this range
final int baseParametersLen = sam.getParameterCount();
final Type output;
if (lambdaOutputTypeArgumentIndices.length > 0) {
output =
TypeExtractionUtils.extractTypeFromLambda(
baseClass,
exec,
lambdaOutputTypeArgumentIndices,
paramLen,
baseParametersLen);
} else {
output = exec.getReturnType();
TypeExtractionUtils.validateLambdaType(baseClass, output);
}
return new TypeExtractor().privateCreateTypeInfo(output, inType, null);
} else {
if (inType != null) {
validateInputType(
baseClass, function.getClass(), inputTypeArgumentIndex, inType);
}
return new TypeExtractor()
.privateCreateTypeInfo(
baseClass,
function.getClass(),
outputTypeArgumentIndex,
inType,
null);
}
} catch (InvalidTypesException e) {
if (allowMissing) {
return (TypeInformation<OUT>)
new MissingTypeInfo(
functionName != null ? functionName : function.toString(), e);
} else {
throw e;
}
}
} | 3.68 |
framework_VDateField_getResolutionVariable | /**
* Returns a resolution variable name for the given {@code resolution}.
*
* @param resolution
* the given resolution
* @return the resolution variable name
*/
public String getResolutionVariable(R resolution) {
return resolution.name().toLowerCase(Locale.ROOT);
} | 3.68 |
hbase_RateLimiter_update | /**
* Sets the current instance of RateLimiter to a new values. if current limit is smaller than the
* new limit, bump up the available resources. Otherwise allow clients to use up the previously
* available resources.
*/
public synchronized void update(final RateLimiter other) {
this.tunit = other.tunit;
if (this.limit < other.limit) {
// If avail is capped to this.limit, it will never overflow,
// otherwise, avail may overflow, just be careful here.
long diff = other.limit - this.limit;
if (this.avail <= Long.MAX_VALUE - diff) {
this.avail += diff;
this.avail = Math.min(this.avail, other.limit);
} else {
this.avail = other.limit;
}
}
this.limit = other.limit;
} | 3.68 |
hadoop_NodePlan_getNodeName | /**
* Returns the DataNode URI.
*
* @return URI
*/
public String getNodeName() {
return nodeName;
} | 3.68 |
framework_ProgressIndicator_getPollingInterval | /**
* Gets the interval that component checks for progress.
*
* @return the interval in milliseconds.
*/
public int getPollingInterval() {
return getState(false).pollingInterval;
} | 3.68 |
hadoop_StoragePolicySatisfyManager_addPathId | /**
* Adds the sps path to SPSPathIds list.
* @param id
*/
public void addPathId(long id) {
synchronized (pathsToBeTraversed) {
pathsToBeTraversed.add(id);
}
} | 3.68 |
hbase_ScannerContext_incrementSizeProgress | /**
* Progress towards the size limit has been made. Increment internal tracking of size progress
*/
void incrementSizeProgress(long dataSize, long heapSize) {
if (skippingRow) {
return;
}
long curDataSize = progress.getDataSize();
progress.setDataSize(curDataSize + dataSize);
long curHeapSize = progress.getHeapSize();
progress.setHeapSize(curHeapSize + heapSize);
} | 3.68 |
hbase_TableSplit_getStartRow | /**
* Returns the start row.
* @return The start row.
*/
public byte[] getStartRow() {
return startRow;
} | 3.68 |
hbase_User_create | /**
* Wraps an underlying {@code UserGroupInformation} instance.
* @param ugi The base Hadoop user
*/
public static User create(UserGroupInformation ugi) {
if (ugi == null) {
return null;
}
return new SecureHadoopUser(ugi);
} | 3.68 |
pulsar_ManagedLedgerConfig_setRetentionTime | /**
* Set the retention time for the ManagedLedger.
* <p>
* Retention time and retention size ({@link #setRetentionSizeInMB(long)}) are together used to retain the
* ledger data when there are no cursors or when all the cursors have marked the data for deletion.
* Data will be deleted in this case when both retention time and retention size settings don't prevent deleting
* the data marked for deletion.
* <p>
* A retention time of 0 (default) will make data to be deleted immediately.
* <p>
* A retention time of -1, means to have an unlimited retention time.
*
* @param retentionTime
* duration for which messages should be retained
* @param unit
* time unit for retention time
*/
public ManagedLedgerConfig setRetentionTime(int retentionTime, TimeUnit unit) {
this.retentionTimeMs = unit.toMillis(retentionTime);
return this;
} | 3.68 |
druid_Lexer_token | /**
* Return the current token, set by nextToken().
*/
public final Token token() {
return token;
} | 3.68 |
morf_AbstractSqlDialectTest_testSelectSimpleJoinScript | /**
* Tests a select with a simple join.
*/
@Test
public void testSelectSimpleJoinScript() {
SelectStatement stmt = new SelectStatement().from(new TableReference(TEST_TABLE))
.innerJoin(new TableReference(ALTERNATE_TABLE),
eq(new FieldReference(new TableReference(TEST_TABLE), STRING_FIELD),
new FieldReference(new TableReference(ALTERNATE_TABLE), STRING_FIELD))
);
String expectedSql = "SELECT * FROM " + tableName(TEST_TABLE) + " INNER JOIN " + tableName(ALTERNATE_TABLE) + " ON (Test.stringField = Alternate.stringField)";
assertEquals("Select with simple join", expectedSql, testDialect.convertStatementToSQL(stmt));
} | 3.68 |
flink_CatalogManager_resolveCatalogBaseTable | /** Resolves a {@link CatalogBaseTable} to a validated {@link ResolvedCatalogBaseTable}. */
public ResolvedCatalogBaseTable<?> resolveCatalogBaseTable(CatalogBaseTable baseTable) {
Preconditions.checkNotNull(schemaResolver, "Schema resolver is not initialized.");
if (baseTable instanceof CatalogTable) {
return resolveCatalogTable((CatalogTable) baseTable);
} else if (baseTable instanceof CatalogView) {
return resolveCatalogView((CatalogView) baseTable);
}
throw new IllegalArgumentException(
"Unknown kind of catalog base table: " + baseTable.getClass());
} | 3.68 |
flink_PythonConfigUtil_alignTransformation | /**
* Configure the {@link AbstractExternalOneInputPythonFunctionOperator} to be chained with the
* upstream/downstream operator by setting their parallelism, slot sharing group, co-location
* group to be the same, and applying a {@link ForwardPartitioner}. 1. operator with name
* "_keyed_stream_values_operator" should align with its downstream operator. 2. operator with
* name "_stream_key_by_map_operator" should align with its upstream operator.
*/
private static void alignTransformation(Transformation<?> transformation)
throws NoSuchFieldException, IllegalAccessException {
String transformName = transformation.getName();
if (transformation.getInputs().isEmpty()) {
return;
}
Transformation<?> inputTransformation = transformation.getInputs().get(0);
String inputTransformName = inputTransformation.getName();
if (inputTransformName.equals(KEYED_STREAM_VALUE_OPERATOR_NAME)) {
chainTransformation(inputTransformation, transformation);
configForwardPartitioner(inputTransformation, transformation);
}
if (transformName.equals(STREAM_KEY_BY_MAP_OPERATOR_NAME)
|| transformName.equals(STREAM_PARTITION_CUSTOM_MAP_OPERATOR_NAME)) {
chainTransformation(transformation, inputTransformation);
configForwardPartitioner(inputTransformation, transformation);
}
} | 3.68 |
pulsar_ModularLoadManagerImpl_updateLoadBalancingBundlesMetrics | /**
* As any broker, update its bundle metrics.
*
* @param bundlesData
*/
private void updateLoadBalancingBundlesMetrics(Map<String, NamespaceBundleStats> bundlesData) {
List<Metrics> metrics = new ArrayList<>();
for (Map.Entry<String, NamespaceBundleStats> entry: bundlesData.entrySet()) {
final String bundle = entry.getKey();
final NamespaceBundleStats stats = entry.getValue();
Map<String, String> dimensions = new HashMap<>();
dimensions.put("broker", pulsar.getAdvertisedAddress());
dimensions.put("bundle", bundle);
dimensions.put("metric", "bundle");
Metrics m = Metrics.create(dimensions);
m.put("brk_bundle_msg_rate_in", stats.msgRateIn);
m.put("brk_bundle_msg_rate_out", stats.msgRateOut);
m.put("brk_bundle_topics_count", stats.topics);
m.put("brk_bundle_consumer_count", stats.consumerCount);
m.put("brk_bundle_producer_count", stats.producerCount);
m.put("brk_bundle_msg_throughput_in", stats.msgThroughputIn);
m.put("brk_bundle_msg_throughput_out", stats.msgThroughputOut);
metrics.add(m);
}
this.bundleMetrics.set(metrics);
} | 3.68 |
AreaShop_SignsFeature_getSignsRef | /**
* Get the signs of this region.
* @return Map with signs: locationString -> RegionSign
*/
Map<String, RegionSign> getSignsRef() {
return signs;
} | 3.68 |
hbase_ScannerModel_getJasonProvider | /**
* Get the <code>JacksonJaxbJsonProvider</code> instance;
* @return A <code>JacksonJaxbJsonProvider</code>.
*/
private static JacksonJaxbJsonProvider getJasonProvider() {
return JaxbJsonProviderHolder.INSTANCE;
} | 3.68 |
hbase_CleanerChore_deleteAction | /**
* Perform a delete on a specified type.
* @param deletion a delete
* @param type possible values are 'files', 'subdirs', 'dirs'
* @return true if it deleted successfully, false otherwise
*/
private boolean deleteAction(Action<Boolean> deletion, String type, Path dir) {
boolean deleted;
try {
LOG.trace("Start deleting {} under {}", type, dir);
deleted = deletion.act();
} catch (PathIsNotEmptyDirectoryException exception) {
// N.B. HDFS throws this exception when we try to delete a non-empty directory, but
// LocalFileSystem throws a bare IOException. So some test code will get the verbose
// message below.
LOG.debug("Couldn't delete '{}' yet because it isn't empty w/exception.", dir, exception);
deleted = false;
} catch (IOException ioe) {
if (LOG.isTraceEnabled()) {
LOG.trace("Could not delete {} under {}; will retry. If it keeps happening, "
+ "quote the exception when asking on mailing list.", type, dir, ioe);
} else {
LOG.info(
"Could not delete {} under {} because {}; will retry. If it keeps happening, enable"
+ "TRACE-level logging and quote the exception when asking on mailing list.",
type, dir, ioe.getMessage());
}
deleted = false;
} catch (Exception e) {
LOG.info("unexpected exception: ", e);
deleted = false;
}
LOG.trace("Finish deleting {} under {}, deleted = {}", type, dir, deleted);
return deleted;
} | 3.68 |
framework_DragSourceExtension_clearDataTransferData | /**
* Clears all data for this drag source element.
*/
public void clearDataTransferData() {
getState().types.clear();
getState().data.clear();
} | 3.68 |
dubbo_AbstractJSONImpl_getObject | /**
* Gets an object from an object for the given key. If the key is not present, this returns null.
* If the value is not a Map, throws an exception.
*/
@SuppressWarnings("unchecked")
@Override
public Map<String, ?> getObject(Map<String, ?> obj, String key) {
assert obj != null;
assert key != null;
if (!obj.containsKey(key)) {
return null;
}
Object value = obj.get(key);
if (!(value instanceof Map)) {
throw new ClassCastException(
String.format("value '%s' for key '%s' in '%s' is not object", value, key, obj));
}
return (Map<String, ?>) value;
} | 3.68 |
hbase_NamedQueueRecorder_persistAll | /**
* Add all in memory queue records to system table. The implementors can use system table or
* direct HDFS file or ZK as persistence system.
*/
public void persistAll(NamedQueuePayload.NamedQueueEvent namedQueueEvent, Connection connection) {
if (this.logEventHandler != null) {
this.logEventHandler.persistAll(namedQueueEvent, connection);
}
} | 3.68 |
flink_UploadThrottle_hasCapacity | /** Test whether some capacity is available. */
public boolean hasCapacity() {
return inFlightBytesCounter < maxBytesInFlight;
} | 3.68 |
flink_BeamPythonFunctionRunner_notifyNoMoreResults | /** Interrupts the progress of takeResult. */
public void notifyNoMoreResults() {
resultBuffer.add(Tuple2.of(null, new byte[0]));
} | 3.68 |
morf_SchemaChangeSequence_addTableFrom | /**
* @see org.alfasoftware.morf.upgrade.SchemaEditor#addTableFrom(org.alfasoftware.morf.metadata.Table, org.alfasoftware.morf.sql.SelectStatement)
*/
@Override
public void addTableFrom(Table table, SelectStatement select) {
// track added tables...
tableAdditions.add(table.getName());
AddTable addTable = new AddTableFrom(table, select);
visitor.visit(addTable);
schemaAndDataChangeVisitor.visit(addTable);
select.accept(schemaAndDataChangeVisitor);
} | 3.68 |
hbase_MetricsUserAggregateImpl_getActiveUser | /**
* Returns the active user to which authorization checks should be applied. If we are in the
* context of an RPC call, the remote user is used, otherwise the currently logged in user is
* used.
*/
private String getActiveUser() {
Optional<User> user = RpcServer.getRequestUser();
if (!user.isPresent()) {
// for non-rpc handling, fallback to system user
try {
user = Optional.of(userProvider.getCurrent());
} catch (IOException ignore) {
}
}
return user.map(User::getShortName).orElse(null);
} | 3.68 |
hbase_KeyValue_getDelimiter | /**
* Find index of passed delimiter walking from start of buffer forwards.
* @param b the kv serialized byte[] to process
* @param delimiter input delimeter to fetch index from start
* @return Index of delimiter having started from start of <code>b</code> moving rightward.
*/
public static int getDelimiter(final byte[] b, int offset, final int length,
final int delimiter) {
if (b == null) {
throw new IllegalArgumentException("Passed buffer is null");
}
int result = -1;
for (int i = offset; i < length + offset; i++) {
if (b[i] == delimiter) {
result = i;
break;
}
}
return result;
} | 3.68 |
framework_Tree_addListener | /**
* @deprecated As of 7.0, replaced by
* {@link #addItemClickListener(ItemClickListener)}
*/
@Override
@Deprecated
public void addListener(ItemClickListener listener) {
addItemClickListener(listener);
} | 3.68 |
hadoop_IdentifierResolver_getOutputReaderClass | /**
* Returns the resolved {@link OutputReader} class.
*/
public Class<? extends OutputReader> getOutputReaderClass() {
return outputReaderClass;
} | 3.68 |
druid_CharsetConvert_encode | /**
* 字符串编码
*
* @param s String
* @return String
* @throws UnsupportedEncodingException
*/
public String encode(String s) throws UnsupportedEncodingException {
if (enable && !isEmpty(s)) {
s = new String(s.getBytes(clientEncoding), serverEncoding);
}
return s;
} | 3.68 |
hudi_HoodieLogBlock_getLogMetadata | /**
* Convert bytes to LogMetadata, follow the same order as {@link HoodieLogBlock#getLogMetadataBytes}.
*/
public static Map<HeaderMetadataType, String> getLogMetadata(DataInputStream dis) throws IOException {
Map<HeaderMetadataType, String> metadata = new HashMap<>();
// 1. Read the metadata written out
int metadataCount = dis.readInt();
try {
while (metadataCount > 0) {
int metadataEntryIndex = dis.readInt();
int metadataEntrySize = dis.readInt();
byte[] metadataEntry = new byte[metadataEntrySize];
dis.readFully(metadataEntry, 0, metadataEntrySize);
metadata.put(HeaderMetadataType.values()[metadataEntryIndex], new String(metadataEntry));
metadataCount--;
}
return metadata;
} catch (EOFException eof) {
throw new IOException("Could not read metadata fields ", eof);
}
} | 3.68 |
pulsar_IScheduler_rebalance | /**
* Rebalances function instances scheduled to workers.
*
* @param currentAssignments
* current assignments
* @param workers
* current list of active workers
* @return
* A list of new assignments
*/
default List<Function.Assignment> rebalance(List<Function.Assignment> currentAssignments, Set<String> workers) {
return Collections.emptyList();
} | 3.68 |
framework_DragEndEvent_isCanceled | /**
* Returns whether the drag event was cancelled. This is a shorthand for
* {@code dropEffect == NONE}.
*
* @return {@code true} if the drop event was cancelled, {@code false}
* otherwise.
*/
public boolean isCanceled() {
return getDropEffect() == DropEffect.NONE;
} | 3.68 |
hbase_Get_getTimeRange | /**
* Method for retrieving the get's TimeRange
*/
public TimeRange getTimeRange() {
return this.tr;
} | 3.68 |
morf_Function_randomString | /**
* Helper method to create a function for generating random strings via SQL.
*
* @param length The length of the generated string
* @return an instance of the randomString function.
*/
public static Function randomString(AliasedField length) {
return new Function(FunctionType.RANDOM_STRING, length);
} | 3.68 |
framework_VScrollTable_addAndRemoveRows | /**
* Inserts rows in the table body or removes them from the table body based
* on the commands in the UIDL.
* <p>
* For internal use only. May be removed or replaced in the future.
*
* @param partialRowAdditions
* the UIDL containing row updates.
*/
public void addAndRemoveRows(UIDL partialRowAdditions) {
if (partialRowAdditions == null) {
return;
}
if (partialRowAdditions.hasAttribute("hide")) {
scrollBody.unlinkAndReindexRows(
partialRowAdditions.getIntAttribute("firstprowix"),
partialRowAdditions.getIntAttribute("numprows"));
scrollBody.ensureCacheFilled();
} else {
if (partialRowAdditions.hasAttribute("delbelow")) {
scrollBody.insertRowsDeleteBelow(partialRowAdditions,
partialRowAdditions.getIntAttribute("firstprowix"),
partialRowAdditions.getIntAttribute("numprows"));
} else {
scrollBody.insertAndReindexRows(partialRowAdditions,
partialRowAdditions.getIntAttribute("firstprowix"),
partialRowAdditions.getIntAttribute("numprows"));
}
}
discardRowsOutsideCacheWindow();
} | 3.68 |
hbase_FavoredNodeAssignmentHelper_placePrimaryRSAsRoundRobin | // Place the regions round-robin across the racks picking one server from each
// rack at a time. Start with a random rack, and a random server from every rack.
// If a rack doesn't have enough servers it will go to the next rack and so on.
// for choosing a primary.
// For example, if 4 racks (r1 .. r4) with 8 servers (s1..s8) each, one possible
// placement could be r2:s5, r3:s5, r4:s5, r1:s5, r2:s6, r3:s6..
// If there were fewer servers in one rack, say r3, which had 3 servers, one possible
// placement could be r2:s5, <skip-r3>, r4:s5, r1:s5, r2:s6, <skip-r3> ...
// The regions should be distributed proportionately to the racksizes
public void placePrimaryRSAsRoundRobin(Map<ServerName, List<RegionInfo>> assignmentMap,
Map<RegionInfo, ServerName> primaryRSMap, List<RegionInfo> regions) {
List<String> rackList = new ArrayList<>(rackToRegionServerMap.size());
rackList.addAll(rackToRegionServerMap.keySet());
int rackIndex = ThreadLocalRandom.current().nextInt(rackList.size());
int maxRackSize = 0;
for (Map.Entry<String, List<ServerName>> r : rackToRegionServerMap.entrySet()) {
if (r.getValue().size() > maxRackSize) {
maxRackSize = r.getValue().size();
}
}
int numIterations = 0;
// Initialize the current processing host index.
int serverIndex = ThreadLocalRandom.current().nextInt(maxRackSize);
for (RegionInfo regionInfo : regions) {
List<ServerName> currentServerList;
String rackName;
while (true) {
rackName = rackList.get(rackIndex);
numIterations++;
// Get the server list for the current rack
currentServerList = rackToRegionServerMap.get(rackName);
if (serverIndex >= currentServerList.size()) { // not enough machines in this rack
if (numIterations % rackList.size() == 0) {
if (++serverIndex >= maxRackSize) serverIndex = 0;
}
if (++rackIndex >= rackList.size()) {
rackIndex = 0; // reset the rack index to 0
}
} else break;
}
// Get the current process region server
ServerName currentServer = currentServerList.get(serverIndex);
// Place the current region with the current primary region server
primaryRSMap.put(regionInfo, currentServer);
if (assignmentMap != null) {
List<RegionInfo> regionsForServer = assignmentMap.get(currentServer);
if (regionsForServer == null) {
regionsForServer = new ArrayList<>();
assignmentMap.put(currentServer, regionsForServer);
}
regionsForServer.add(regionInfo);
}
// Set the next processing index
if (numIterations % rackList.size() == 0) {
++serverIndex;
}
if (++rackIndex >= rackList.size()) {
rackIndex = 0; // reset the rack index to 0
}
}
} | 3.68 |
hbase_ReplicationPeerConfigUtil_convert2Map | /**
* Convert tableCFs Object to Map.
*/
public static Map<TableName, List<String>> convert2Map(ReplicationProtos.TableCF[] tableCFs) {
if (tableCFs == null || tableCFs.length == 0) {
return null;
}
Map<TableName, List<String>> tableCFsMap = new HashMap<>();
for (int i = 0, n = tableCFs.length; i < n; i++) {
ReplicationProtos.TableCF tableCF = tableCFs[i];
List<String> families = new ArrayList<>();
for (int j = 0, m = tableCF.getFamiliesCount(); j < m; j++) {
families.add(tableCF.getFamilies(j).toStringUtf8());
}
if (families.size() > 0) {
tableCFsMap.put(ProtobufUtil.toTableName(tableCF.getTableName()), families);
} else {
tableCFsMap.put(ProtobufUtil.toTableName(tableCF.getTableName()), null);
}
}
return tableCFsMap;
} | 3.68 |
flink_DynamicTableFactory_getPrimaryKeyIndexes | /**
* Returns the primary key indexes, if any, otherwise returns an empty array. A factory can
* use it to compute the schema projection of the key fields with {@code
* Projection.of(ctx.getPrimaryKeyIndexes()).project(dataType)}.
*
* <p>Shortcut for {@code getCatalogTable().getResolvedSchema().getPrimaryKeyIndexes()}.
*
* @see ResolvedSchema#getPrimaryKeyIndexes()
*/
default int[] getPrimaryKeyIndexes() {
return getCatalogTable().getResolvedSchema().getPrimaryKeyIndexes();
} | 3.68 |
hbase_MasterObserver_postCompletedMergeRegionsAction | /**
* called after the regions merge.
* @param ctx the environment to interact with the framework and master
*/
default void postCompletedMergeRegionsAction(
final ObserverContext<MasterCoprocessorEnvironment> ctx, final RegionInfo[] regionsToMerge,
final RegionInfo mergedRegion) throws IOException {
} | 3.68 |
querydsl_GeometryExpression_difference | /**
* Returns a geometric object that represents the Point
* set difference of this geometric object with anotherGeometry.
*
* @param geometry other geometry
* @return difference between this and the other geometry
*/
public GeometryExpression<Geometry> difference(Expression<? extends Geometry> geometry) {
return GeometryExpressions.geometryOperation(SpatialOps.DIFFERENCE, mixin, geometry);
} | 3.68 |
hadoop_HadoopLogsAnalyzer_main | /**
* @param args
*
* Last arg is the input file. That file can be a directory, in which
* case you get all the files in sorted order. We will decompress
* files whose nmes end in .gz .
*
* switches: -c collect line types.
*
* -d debug mode
*
* -delays print out the delays [interval between job submit time and
* launch time]
*
* -runtimes print out the job runtimes
*
* -spreads print out the ratio of 10%ile and 90%ile, of both the
* successful map task attempt run times and the the successful
* reduce task attempt run times
*
* -tasktimes prints out individual task time distributions
*
* collects all the line types and prints the first example of each
* one
*/
public static void main(String[] args) {
try {
HadoopLogsAnalyzer analyzer = new HadoopLogsAnalyzer();
int result = ToolRunner.run(analyzer, args);
if (result == 0) {
return;
}
System.exit(result);
} catch (FileNotFoundException e) {
LOG.error("", e);
e.printStackTrace(staticDebugOutput);
System.exit(1);
} catch (IOException e) {
LOG.error("", e);
e.printStackTrace(staticDebugOutput);
System.exit(2);
} catch (Exception e) {
LOG.error("", e);
e.printStackTrace(staticDebugOutput);
System.exit(3);
}
} | 3.68 |
flink_TestcontainersSettings_baseImage | /**
* Sets the {@code baseImage} and returns a reference to this Builder enabling method
* chaining.
*
* @param baseImage The {@code baseImage} to set.
* @return A reference to this Builder.
*/
public Builder baseImage(String baseImage) {
this.baseImage = baseImage;
return this;
} | 3.68 |
framework_BootstrapHandler_getApplicationParameters | /**
* Gets the application parameters specified by the BootstrapHandler.
*
* @return the application parameters which will be written on the page
*/
public JsonObject getApplicationParameters() {
if (applicationParameters == null) {
applicationParameters = BootstrapHandler.this
.getApplicationParameters(this);
}
return applicationParameters;
} | 3.68 |
flink_Execution_sendOperatorEvent | /**
* Sends the operator event to the Task on the Task Executor.
*
* @return True, of the message was sent, false is the task is currently not running.
*/
public CompletableFuture<Acknowledge> sendOperatorEvent(
OperatorID operatorId, SerializedValue<OperatorEvent> event) {
assertRunningInJobMasterMainThread();
final LogicalSlot slot = assignedResource;
if (slot != null && (getState() == RUNNING || getState() == INITIALIZING)) {
final TaskExecutorOperatorEventGateway eventGateway = slot.getTaskManagerGateway();
return eventGateway.sendOperatorEventToTask(getAttemptId(), operatorId, event);
} else {
return FutureUtils.completedExceptionally(
new TaskNotRunningException(
'"'
+ vertex.getTaskNameWithSubtaskIndex()
+ "\" is not running, but in state "
+ getState()));
}
} | 3.68 |
hudi_CleanPlanner_getFilesToCleanKeepingLatestHours | /**
* This method finds the files to be cleaned based on the number of hours. If {@code config.getCleanerHoursRetained()} is set to 5,
* all the files with commit time earlier than 5 hours will be removed. Also the latest file for any file group is retained.
* This policy gives much more flexibility to users for retaining data for running incremental queries as compared to
* KEEP_LATEST_COMMITS cleaning policy. The default number of hours is 5.
* @param partitionPath partition path to check
* @param earliestCommitToRetain earliest commit to retain
* @return list of files to clean
*/
private Pair<Boolean, List<CleanFileInfo>> getFilesToCleanKeepingLatestHours(String partitionPath, Option<HoodieInstant> earliestCommitToRetain) {
return getFilesToCleanKeepingLatestCommits(partitionPath, 0, earliestCommitToRetain, HoodieCleaningPolicy.KEEP_LATEST_BY_HOURS);
} | 3.68 |
hudi_DagScheduler_schedule | /**
* Method to start executing workflow DAGs.
*
* @throws Exception Thrown if schedule failed.
*/
public void schedule() throws Exception {
ExecutorService service = Executors.newFixedThreadPool(2);
try {
execute(service, workflowDag);
service.shutdown();
} finally {
if (!service.isShutdown()) {
log.info("Forcing shutdown of executor service, this might kill running tasks");
service.shutdownNow();
}
}
} | 3.68 |
dubbo_DubboBootstrap_isStarting | /**
* @return true if the dubbo application is starting.
* @see #isStarted()
*/
public boolean isStarting() {
return applicationDeployer.isStarting();
} | 3.68 |
hadoop_SinglePendingCommit_getSaved | /**
* When was the upload saved?
* @return timestamp
*/
public long getSaved() {
return saved;
} | 3.68 |
framework_PureGWTTestApplication_addChildMenu | /**
* Adds a child menu entry to this menu. The title for this entry is
* taken from the Menu object argument.
*
* @param m
* another Menu object
*/
public void addChildMenu(Menu m) {
menubar.addItem(m.title, m.menubar);
children.add(m);
} | 3.68 |
hbase_Query_setACL | /**
* Set the ACL for the operation.
* @param perms A map of permissions for a user or users
*/
public Query setACL(Map<String, Permission> perms) {
ListMultimap<String, Permission> permMap = ArrayListMultimap.create();
for (Map.Entry<String, Permission> entry : perms.entrySet()) {
permMap.put(entry.getKey(), entry.getValue());
}
setAttribute(AccessControlConstants.OP_ATTRIBUTE_ACL,
AccessControlUtil.toUsersAndPermissions(permMap).toByteArray());
return this;
} | 3.68 |
framework_FilesystemContainer_addItemIds | /**
* Internal recursive method to add the files under the specified directory
* to the collection.
*
* @param col
* the collection where the found items are added
* @param f
* the root file where to start adding files
*/
private void addItemIds(Collection<File> col, File f) {
File[] l;
if (filter != null) {
l = f.listFiles(filter);
} else {
l = f.listFiles();
}
if (l == null) {
// File.listFiles returns null if File does not exist or if there
// was an IO error (permission denied)
return;
}
final List<File> ll = Arrays.asList(l);
Collections.sort(ll);
for (final File lf : ll) {
col.add(lf);
if (lf.isDirectory()) {
addItemIds(col, lf);
}
}
} | 3.68 |
dubbo_StringUtils_splitToList | /**
* Splits String around matches of the given character.
* <p>
* Note: Compare with {@link StringUtils#split(String, char)}, this method reduce memory copy.
*/
public static List<String> splitToList(String str, char ch) {
if (isEmpty(str)) {
return Collections.emptyList();
}
return splitToList0(str, ch);
} | 3.68 |
MagicPlugin_MapController_getMapItem | /**
* A helper function to get an ItemStack from a MapView.
*
* @param name The display name to give the new item. Optional.
*/
public ItemStack getMapItem(String name, int mapId) {
ItemStack newMapItem = createMap(mapId);
if (name != null) {
ItemMeta meta = newMapItem.getItemMeta();
meta.setDisplayName(name);
newMapItem.setItemMeta(meta);
}
return newMapItem;
} | 3.68 |
hbase_ClientUtil_calculateTheClosestNextRowKeyForPrefix | /**
* <p>
* When scanning for a prefix the scan should stop immediately after the the last row that has the
* specified prefix. This method calculates the closest next rowKey immediately following the
* given rowKeyPrefix.
* </p>
* <p>
* <b>IMPORTANT: This converts a rowKey<u>Prefix</u> into a rowKey</b>.
* </p>
* <p>
* If the prefix is an 'ASCII' string put into a byte[] then this is easy because you can simply
* increment the last byte of the array. But if your application uses real binary rowids you may
* run into the scenario that your prefix is something like:
* </p>
* <b>{ 0x12, 0x23, 0xFF, 0xFF }</b><br/>
* Then this stopRow needs to be fed into the actual scan<br/>
* <b>{ 0x12, 0x24 }</b> (Notice that it is shorter now)<br/>
* This method calculates the correct stop row value for this usecase.
* @param rowKeyPrefix the rowKey<u>Prefix</u>.
* @return the closest next rowKey immediately following the given rowKeyPrefix.
*/
public static byte[] calculateTheClosestNextRowKeyForPrefix(byte[] rowKeyPrefix) {
// Essentially we are treating it like an 'unsigned very very long' and doing +1 manually.
// Search for the place where the trailing 0xFFs start
int offset = rowKeyPrefix.length;
while (offset > 0) {
if (rowKeyPrefix[offset - 1] != (byte) 0xFF) {
break;
}
offset--;
}
if (offset == 0) {
// We got an 0xFFFF... (only FFs) stopRow value which is
// the last possible prefix before the end of the table.
// So set it to stop at the 'end of the table'
return HConstants.EMPTY_END_ROW;
}
// Copy the right length of the original
byte[] newStopRow = Arrays.copyOfRange(rowKeyPrefix, 0, offset);
// And increment the last one
newStopRow[newStopRow.length - 1]++;
return newStopRow;
} | 3.68 |
framework_ConnectorTracker_markDirty | /**
* Mark the connector as dirty and notifies any marked as dirty listeners.
* This should not be done while the response is being written.
*
* @see #getDirtyConnectors()
* @see #isWritingResponse()
*
* @param connector
* The connector that should be marked clean.
*/
public void markDirty(ClientConnector connector) {
if (isWritingResponse()) {
throw new IllegalStateException(
"A connector should not be marked as dirty while a response is being written.");
}
if (fineLogging && !isDirty(connector)) {
getLogger().log(Level.FINE, "{0} is now dirty",
getConnectorAndParentInfo(connector));
}
if (!isDirty(connector)) {
notifyMarkedAsDirtyListeners(connector);
}
dirtyConnectors.add(connector);
} | 3.68 |
hbase_KeyValueUtil_keyLength | /**
* Returns number of bytes this cell's key part would have been used if serialized as in
* {@link KeyValue}. Key includes rowkey, family, qualifier, timestamp and type.
* @return the key length
*/
public static int keyLength(final Cell cell) {
return keyLength(cell.getRowLength(), cell.getFamilyLength(), cell.getQualifierLength());
} | 3.68 |
hadoop_FileSystemReleaseFilter_setFileSystem | /**
* Static method that sets the <code>FileSystem</code> to release back to
* the {@link FileSystemAccess} service on servlet request completion.
*
* @param fs a filesystem instance.
*/
public static void setFileSystem(FileSystem fs) {
FILE_SYSTEM_TL.set(fs);
} | 3.68 |
hudi_AvroSchemaCompatibility_getWriterFragment | /**
* Returns the fragment of the writer schema that failed compatibility check.
*
* @return a Schema instance (fragment of the writer schema).
*/
public Schema getWriterFragment() {
return mWriterFragment;
} | 3.68 |
hadoop_MultipleOutputFormat_generateActualValue | /**
* Generate the actual value from the given key and value. The default behavior is that
* the actual value is equal to the given value
*
* @param key
* the key of the output data
* @param value
* the value of the output data
* @return the actual value derived from the given key/value
*/
protected V generateActualValue(K key, V value) {
return value;
} | 3.68 |
hbase_ServerManager_getAverageLoad | /**
* Compute the average load across all region servers. Currently, this uses a very naive
* computation - just uses the number of regions being served, ignoring stats about number of
* requests.
* @return the average load
*/
public double getAverageLoad() {
int totalLoad = 0;
int numServers = 0;
for (ServerMetrics sl : this.onlineServers.values()) {
numServers++;
totalLoad += sl.getRegionMetrics().size();
}
return numServers == 0 ? 0 : (double) totalLoad / (double) numServers;
} | 3.68 |
zxing_ITFReader_decodeStart | /**
* Identify where the start of the middle / payload section starts.
*
* @param row row of black/white values to search
* @return Array, containing index of start of 'start block' and end of
* 'start block'
*/
private int[] decodeStart(BitArray row) throws NotFoundException {
int endStart = skipWhiteSpace(row);
int[] startPattern = findGuardPattern(row, endStart, START_PATTERN);
// Determine the width of a narrow line in pixels. We can do this by
// getting the width of the start pattern and dividing by 4 because its
// made up of 4 narrow lines.
this.narrowLineWidth = (startPattern[1] - startPattern[0]) / 4;
validateQuietZone(row, startPattern[0]);
return startPattern;
} | 3.68 |
hbase_ReportMakingVisitor_checkServer | /**
* Run through referenced servers and save off unknown and the dead.
*/
private void checkServer(RegionLocations locations) {
if (this.services == null) {
// Can't do this test if no services.
return;
}
if (locations == null) {
return;
}
if (locations.getRegionLocations() == null) {
return;
}
// Check referenced servers are known/online. Here we are looking
// at both the default replica -- the main replica -- and then replica
// locations too.
for (HRegionLocation location : locations.getRegionLocations()) {
if (location == null) {
continue;
}
ServerName sn = location.getServerName();
if (sn == null) {
continue;
}
if (location.getRegion() == null) {
LOG.warn("Empty RegionInfo in {}", location);
// This should never happen but if it does, will mess up below.
continue;
}
RegionInfo ri = location.getRegion();
// Skip split parent region
if (ri.isSplitParent()) {
continue;
}
// skip the offline regions which belong to disabled table.
if (isTableDisabled(ri)) {
continue;
}
RegionState rs = this.services.getAssignmentManager().getRegionStates().getRegionState(ri);
if (rs == null || rs.isClosedOrAbnormallyClosed()) {
// If closed against an 'Unknown Server', that is should be fine.
continue;
}
ServerManager.ServerLiveState state =
this.services.getServerManager().isServerKnownAndOnline(sn);
switch (state) {
case UNKNOWN:
this.report.unknownServers.add(new Pair<>(ri, sn));
break;
default:
break;
}
}
} | 3.68 |
hadoop_ReconfigurationTaskStatus_stopped | /**
* Return true if the latest reconfiguration task has finished and there is
* no another active task running.
* @return true if endTime > 0; false if not.
*/
public boolean stopped() {
return endTime > 0;
} | 3.68 |
graphhopper_Downloader_fetch | /**
* This method initiates a connect call of the provided connection and returns the response
* stream. It only returns the error stream if it is available and readErrorStreamNoException is
* true otherwise it throws an IOException if an error happens. Furthermore it wraps the stream
* to decompress it if the connection content encoding is specified.
*/
public InputStream fetch(HttpURLConnection connection, boolean readErrorStreamNoException) throws IOException {
// create connection but before reading get the correct inputstream based on the compression and if error
connection.connect();
InputStream is;
if (readErrorStreamNoException && connection.getResponseCode() >= 400 && connection.getErrorStream() != null)
is = connection.getErrorStream();
else
is = connection.getInputStream();
if (is == null)
throw new IOException("Stream is null. Message:" + connection.getResponseMessage());
// wrap
try {
String encoding = connection.getContentEncoding();
if (encoding != null && encoding.equalsIgnoreCase("gzip"))
is = new GZIPInputStream(is);
else if (encoding != null && encoding.equalsIgnoreCase("deflate"))
is = new InflaterInputStream(is, new Inflater(true));
} catch (IOException ex) {
}
return is;
} | 3.68 |
framework_Table_handleClickEvent | /**
* Handles click event
*
* @param variables
*/
private void handleClickEvent(Map<String, Object> variables) {
// Item click event
if (variables.containsKey("clickEvent")) {
String key = (String) variables.get("clickedKey");
Object itemId = itemIdMapper.get(key);
Object propertyId = null;
String colkey = (String) variables.get("clickedColKey");
// click is not necessary on a property
if (colkey != null) {
propertyId = columnIdMap.get(colkey);
}
MouseEventDetails evt = MouseEventDetails
.deSerialize((String) variables.get("clickEvent"));
Item item = getItem(itemId);
if (item != null) {
fireEvent(new ItemClickEvent(this, item, itemId, propertyId,
evt));
}
} else if (
// Header click event
variables.containsKey("headerClickEvent")) {
MouseEventDetails details = MouseEventDetails
.deSerialize((String) variables.get("headerClickEvent"));
Object cid = variables.get("headerClickCID");
Object propertyId = null;
if (cid != null) {
propertyId = columnIdMap.get(cid.toString());
}
fireEvent(new HeaderClickEvent(this, propertyId, details));
} else if (
// Footer click event
variables.containsKey("footerClickEvent")) {
MouseEventDetails details = MouseEventDetails
.deSerialize((String) variables.get("footerClickEvent"));
Object cid = variables.get("footerClickCID");
Object propertyId = null;
if (cid != null) {
propertyId = columnIdMap.get(cid.toString());
}
fireEvent(new FooterClickEvent(this, propertyId, details));
}
} | 3.68 |
framework_Escalator_setScrollLeft | /**
* Sets the logical horizontal scroll offset. Note that will not necessarily
* become the same as the {@code scrollLeft} attribute in the DOM.
*
* @param scrollLeft
* the number of pixels to scroll horizontally
*/
public void setScrollLeft(final double scrollLeft) {
horizontalScrollbar.setScrollPos(scrollLeft);
} | 3.68 |
morf_AbstractSqlDialectTest_expectedHints3 | /**
* @return The expected SQL for the {@link UpdateStatement#useParallelDml()} directive.
*/
protected String expectedHints3() {
return "UPDATE " + tableName("Foo") + " SET a = b";
} | 3.68 |
dubbo_DubboBootstrap_isStarted | /**
* @return true if the dubbo application has been started.
* @see #start()
* @see #isStarting()
*/
public boolean isStarted() {
return applicationDeployer.isStarted();
} | 3.68 |
hmily_TransactionManagerImpl_initialized | /**
* Initialized.
*/
public void initialized() {
//initialize_it @see HmilyXaTransactionManager;
hmilyXaTransactionManager = HmilyXaTransactionManager.initialized();
} | 3.68 |
hbase_RawBytesTerminated_decode | /**
* Read a {@code byte[]} from the buffer {@code src}.
*/
public byte[] decode(PositionedByteRange src, int length) {
return ((RawBytes) wrapped).decode(src, length);
} | 3.68 |
hadoop_Trash_moveToAppropriateTrash | /**
* In case of the symlinks or mount points, one has to move the appropriate
* trashbin in the actual volume of the path p being deleted.
*
* Hence we get the file system of the fully-qualified resolved-path and
* then move the path p to the trashbin in that volume,
* @param fs - the filesystem of path p
* @param p - the path being deleted - to be moved to trash
* @param conf - configuration
* @return false if the item is already in the trash or trash is disabled
* @throws IOException on error
*/
public static boolean moveToAppropriateTrash(FileSystem fs, Path p,
Configuration conf) throws IOException {
Path fullyResolvedPath = fs.resolvePath(p);
FileSystem fullyResolvedFs =
FileSystem.get(fullyResolvedPath.toUri(), conf);
// If the trash interval is configured server side then clobber this
// configuration so that we always respect the server configuration.
try {
long trashInterval = fullyResolvedFs.getServerDefaults(
fullyResolvedPath).getTrashInterval();
if (0 != trashInterval) {
Configuration confCopy = new Configuration(conf);
confCopy.setLong(CommonConfigurationKeysPublic.FS_TRASH_INTERVAL_KEY,
trashInterval);
conf = confCopy;
}
} catch (Exception e) {
// If we can not determine that trash is enabled server side then
// bail rather than potentially deleting a file when trash is enabled.
LOG.warn("Failed to get server trash configuration", e);
throw new IOException("Failed to get server trash configuration", e);
}
/*
* In HADOOP-18144, we changed getTrashRoot() in ViewFileSystem to return a
* viewFS path, instead of a targetFS path. moveToTrash works for
* ViewFileSystem now. ViewFileSystem will do path resolution internally by
* itself.
*
* When localized trash flag is enabled:
* 1). if fs is a ViewFileSystem, we can initialize Trash() with a
* ViewFileSystem object;
* 2). When fs is not a ViewFileSystem, the only place we would need to
* resolve a path is for symbolic links. However, symlink is not
* enabled in Hadoop due to the complexity to support it
* (HADOOP-10019).
*/
if (conf.getBoolean(CONFIG_VIEWFS_TRASH_FORCE_INSIDE_MOUNT_POINT,
CONFIG_VIEWFS_TRASH_FORCE_INSIDE_MOUNT_POINT_DEFAULT)) {
Trash trash = new Trash(fs, conf);
return trash.moveToTrash(p);
}
Trash trash = new Trash(fullyResolvedFs, conf);
return trash.moveToTrash(fullyResolvedPath);
} | 3.68 |
morf_HumanReadableStatementHelper_generateDeleteStatementString | /**
* Generates a human-readable description of a data delete operation.
*
* @param statement the data upgrade statement to describe.
* @return a string containing the human-readable description of the operation.
*/
private static String generateDeleteStatementString(final DeleteStatement statement) {
if (statement.getWhereCriterion() == null) {
// When no where clause, use the same text as truncation operations
return generateTruncateStatementString(statement.getTable());
} else {
return String.format("Delete records in %s%s", statement.getTable().getName(), generateWhereClause(statement.getWhereCriterion()));
}
} | 3.68 |
framework_VCalendarPanel_buildTime | /**
* Constructs the ListBoxes and updates their value
*
* @param redraw
* Should new instances of the listboxes be created
*/
private void buildTime() {
clear();
hours = createListBox();
if (getDateTimeService().isTwelveHourClock()) {
hours.addItem("12");
for (int i = 1; i < 12; i++) {
hours.addItem((i < 10) ? "0" + i : "" + i);
}
} else {
for (int i = 0; i < 24; i++) {
hours.addItem((i < 10) ? "0" + i : "" + i);
}
}
hours.addChangeHandler(this);
if (getDateTimeService().isTwelveHourClock()) {
ampm = createListBox();
final String[] ampmText = getDateTimeService().getAmPmStrings();
ampm.addItem(ampmText[0]);
ampm.addItem(ampmText[1]);
ampm.addChangeHandler(this);
}
if (getResolution().getCalendarField() >= Resolution.MINUTE
.getCalendarField()) {
mins = createListBox();
for (int i = 0; i < 60; i++) {
mins.addItem((i < 10) ? "0" + i : "" + i);
}
mins.addChangeHandler(this);
}
if (getResolution().getCalendarField() >= Resolution.SECOND
.getCalendarField()) {
sec = createListBox();
for (int i = 0; i < 60; i++) {
sec.addItem((i < 10) ? "0" + i : "" + i);
}
sec.addChangeHandler(this);
}
final String delimiter = getDateTimeService().getClockDelimeter();
if (isReadonly()) {
int h = 0;
if (value != null) {
h = value.getHours();
}
if (getDateTimeService().isTwelveHourClock()) {
h -= h < 12 ? 0 : 12;
}
add(new VLabel(h < 10 ? "0" + h : "" + h));
} else {
add(hours);
}
if (getResolution().getCalendarField() >= Resolution.MINUTE
.getCalendarField()) {
add(new VLabel(delimiter));
if (isReadonly()) {
final int m = mins.getSelectedIndex();
add(new VLabel(m < 10 ? "0" + m : "" + m));
} else {
add(mins);
}
}
if (getResolution().getCalendarField() >= Resolution.SECOND
.getCalendarField()) {
add(new VLabel(delimiter));
if (isReadonly()) {
final int s = sec.getSelectedIndex();
add(new VLabel(s < 10 ? "0" + s : "" + s));
} else {
add(sec);
}
}
if (getResolution() == Resolution.HOUR) {
add(new VLabel(delimiter + "00")); // o'clock
}
if (getDateTimeService().isTwelveHourClock()) {
add(new VLabel(" "));
if (isReadonly()) {
int i = 0;
if (value != null) {
i = (value.getHours() < 12) ? 0 : 1;
}
add(new VLabel(ampm.getItemText(i)));
} else {
add(ampm);
}
}
if (isReadonly()) {
return;
}
// Update times
updateTimes();
ListBox lastDropDown = getLastDropDown();
lastDropDown.addKeyDownHandler(new KeyDownHandler() {
@Override
public void onKeyDown(KeyDownEvent event) {
boolean shiftKey = event.getNativeEvent().getShiftKey();
if (shiftKey) {
return;
} else {
int nativeKeyCode = event.getNativeKeyCode();
if (nativeKeyCode == KeyCodes.KEY_TAB) {
onTabOut(event);
}
}
}
});
} | 3.68 |
hudi_HoodieTableFactory_checkPreCombineKey | /**
* Validate pre_combine key.
*/
private void checkPreCombineKey(Configuration conf, ResolvedSchema schema) {
List<String> fields = schema.getColumnNames();
String preCombineField = conf.get(FlinkOptions.PRECOMBINE_FIELD);
if (!fields.contains(preCombineField)) {
if (OptionsResolver.isDefaultHoodieRecordPayloadClazz(conf)) {
throw new HoodieValidationException("Option '" + FlinkOptions.PRECOMBINE_FIELD.key()
+ "' is required for payload class: " + DefaultHoodieRecordPayload.class.getName());
}
if (preCombineField.equals(FlinkOptions.PRECOMBINE_FIELD.defaultValue())) {
conf.setString(FlinkOptions.PRECOMBINE_FIELD, FlinkOptions.NO_PRE_COMBINE);
} else if (!preCombineField.equals(FlinkOptions.NO_PRE_COMBINE)) {
throw new HoodieValidationException("Field " + preCombineField + " does not exist in the table schema."
+ "Please check '" + FlinkOptions.PRECOMBINE_FIELD.key() + "' option.");
}
}
} | 3.68 |
flink_BinaryStringData_startsWith | /**
* Tests if this BinaryStringData starts with the specified prefix.
*
* @param prefix the prefix.
* @return {@code true} if the bytes represented by the argument is a prefix of the bytes
* represented by this string; {@code false} otherwise. Note also that {@code true} will be
* returned if the argument is an empty BinaryStringData or is equal to this {@code
* BinaryStringData} object as determined by the {@link #equals(Object)} method.
*/
public boolean startsWith(final BinaryStringData prefix) {
ensureMaterialized();
prefix.ensureMaterialized();
return matchAt(prefix, 0);
} | 3.68 |
hbase_DeadServer_cleanPreviousInstance | /**
* Handles restart of a server. The new server instance has a different start code. The new start
* code should be greater than the old one. We don't check that here. Removes the old server from
* deadserver list.
* @param newServerName Servername as either <code>host:port</code> or
* <code>host,port,startcode</code>.
* @return true if this server was dead before and coming back alive again
*/
synchronized boolean cleanPreviousInstance(final ServerName newServerName) {
Iterator<ServerName> it = deadServers.keySet().iterator();
while (it.hasNext()) {
if (cleanOldServerName(newServerName, it)) {
return true;
}
}
return false;
} | 3.68 |
hudi_AbstractColumnReader_supportLazyDecode | /**
* Support lazy dictionary ids decode. See more in {@link ParquetDictionary}.
* If return false, we will decode all the data first.
*/
protected boolean supportLazyDecode() {
return true;
} | 3.68 |
framework_AbstractRemoteDataSource_getRequestedAvailability | /**
* Gets the row index range that was requested by the previous call to
* {@link #ensureAvailability(int, int)}.
*
* @return the requested availability range
*/
public Range getRequestedAvailability() {
return requestedAvailability;
} | 3.68 |
hadoop_AbstractStoreOperation_getAuditSpan | /**
* Get the audit span this object was created with.
* @return the current span or null
*/
public AuditSpan getAuditSpan() {
return auditSpan;
} | 3.68 |
flink_WebMonitorUtils_resolveFileLocation | /**
* Verify log file location.
*
* @param logFilePath Path to log file
* @return File or null if not a valid log file
*/
private static File resolveFileLocation(String logFilePath) {
File logFile = new File(logFilePath);
return (logFile.exists() && logFile.canRead()) ? logFile : null;
} | 3.68 |