name
stringlengths 12
178
| code_snippet
stringlengths 8
36.5k
| score
float64 3.26
3.68
|
---|---|---|
hadoop_FSBuilder_mustDouble | /**
* Set mandatory double parameter for the Builder.
*
* @param key key.
* @param value value.
* @return generic type B.
* @see #opt(String, String)
*/
default B mustDouble(@Nonnull String key, double value) {
return must(key, Double.toString(value));
} | 3.68 |
dubbo_DefaultServiceRestMetadataResolver_supportsPathVariableType | /**
* Supports the type of parameter or not, based by {@link Converter}'s conversion feature
*
* @param parameterType the type of parameter
* @return if supports, this method will return <code>true</code>, or <code>false</code>
*/
private boolean supportsPathVariableType(TypeMirror parameterType) {
String className = parameterType.toString();
ClassLoader classLoader = getClass().getClassLoader();
boolean supported;
try {
Class<?> targetType = forName(className, classLoader);
supported = FrameworkModel.defaultModel()
.getBeanFactory()
.getBean(ConverterUtil.class)
.getConverter(String.class, targetType)
!= null;
} catch (ClassNotFoundException e) {
supported = false;
}
return supported;
} | 3.68 |
hadoop_StringValueMax_addNextValue | /**
* add a value to the aggregator
*
* @param val
* a string.
*
*/
public void addNextValue(Object val) {
String newVal = val.toString();
if (this.maxVal == null || this.maxVal.compareTo(newVal) < 0) {
this.maxVal = newVal;
}
} | 3.68 |
hbase_Client_getHttpClient | /** Returns the wrapped HttpClient */
public HttpClient getHttpClient() {
return httpClient;
} | 3.68 |
pulsar_KubernetesSecretsProviderConfigurator_doAdmissionChecks | // The secret object should be of type Map<String, String> and it should contain "id" and "key"
@Override
public void doAdmissionChecks(AppsV1Api appsV1Api, CoreV1Api coreV1Api, String jobNamespace, String jobName,
Function.FunctionDetails functionDetails) {
if (!StringUtils.isEmpty(functionDetails.getSecretsMap())) {
Type type = new TypeToken<Map<String, Object>>() {
}.getType();
Map<String, Object> secretsMap = new Gson().fromJson(functionDetails.getSecretsMap(), type);
for (Object object : secretsMap.values()) {
if (object instanceof Map) {
Map<String, String> kubernetesSecret = (Map<String, String>) object;
if (kubernetesSecret.size() < 2) {
throw new IllegalArgumentException("Kubernetes Secret should contain id and key");
}
if (!kubernetesSecret.containsKey(idKey)) {
throw new IllegalArgumentException("Kubernetes Secret should contain id information");
}
if (!kubernetesSecret.containsKey(keyKey)) {
throw new IllegalArgumentException("Kubernetes Secret should contain key information");
}
} else {
throw new IllegalArgumentException("Kubernetes Secret should be a Map containing id/key pairs");
}
}
}
} | 3.68 |
flink_PythonEnvUtils_createSymbolicLink | /**
* Creates symbolLink in working directory for pyflink lib.
*
* @param libPath the pyflink lib file path.
* @param symbolicLinkPath the symbolic link to pyflink lib.
*/
private static void createSymbolicLink(
java.nio.file.Path libPath, java.nio.file.Path symbolicLinkPath) throws IOException {
try {
Files.createSymbolicLink(symbolicLinkPath, libPath);
} catch (IOException e) {
LOG.warn(
"Create symbol link from {} to {} failed and copy instead.",
symbolicLinkPath,
libPath,
e);
Files.copy(libPath, symbolicLinkPath);
}
} | 3.68 |
hadoop_FederationUtil_getVersion | /**
* Fetch the Hadoop version string for this jar.
*
* @return Hadoop version string, e.g., 3.0.1.
*/
public static String getVersion() {
return VersionInfo.getVersion();
} | 3.68 |
framework_SpacerVisibilityChangedEvent_isVisible | /**
* Gets whether the spacer element is displayed.
*
* @return {@code true} if the spacer element is shown, {@code false} if the
* spacer element is hidden
*/
public boolean isVisible() {
return visible;
} | 3.68 |
starts_Attribute_put | /**
* Writes all the attributes of this attribute list in the given byte
* vector.
*
* @param cw
* the class writer to be used to convert the attributes into
* byte arrays, with the {@link #write write} method.
* @param code
* the bytecode of the method corresponding to these code
* attributes, or <code>null</code> if these attributes are not code
* attributes.
* @param len
* the length of the bytecode of the method corresponding to
* these code attributes, or <code>null</code> if these attributes
* are not code attributes.
* @param maxStack
* the maximum stack size of the method corresponding to these
* code attributes, or -1 if these attributes are not code
* attributes.
* @param maxLocals
* the maximum number of local variables of the method
* corresponding to these code attributes, or -1 if these
* attributes are not code attributes.
* @param out
* where the attributes must be written.
*/
final void put(final ClassWriter cw, final byte[] code, final int len,
final int maxStack, final int maxLocals, final ByteVector out) {
Attribute attr = this;
while (attr != null) {
ByteVector b = attr.write(cw, code, len, maxStack, maxLocals);
out.putShort(cw.newUTF8(attr.type)).putInt(b.length);
out.putByteArray(b.data, 0, b.length);
attr = attr.next;
}
} | 3.68 |
framework_VTooltip_getUniqueId | /**
* Returns the unique id of the tooltip element.
*
* @return String containing the unique id of the tooltip, which always has
* a value
*/
public String getUniqueId() {
return uniqueId;
} | 3.68 |
hadoop_BondedS3AStatisticsContext_incrementGauge | /**
* Increment a specific gauge.
* <p>
* No-op if not defined.
* @param op operation
* @param count increment value
* @throws ClassCastException if the metric is of the wrong type
*/
@Override
public void incrementGauge(Statistic op, long count) {
getInstrumentation().incrementGauge(op, count);
} | 3.68 |
framework_ApplicationConnection_getHeartbeat | /**
* Returns the hearbeat instance.
*/
public Heartbeat getHeartbeat() {
return heartbeat;
} | 3.68 |
flink_SingleInputUdfOperator_withForwardedFields | /**
* Adds semantic information about forwarded fields of the user-defined function. The forwarded
* fields information declares fields which are never modified by the function and which are
* forwarded at the same position to the output or unchanged copied to another position in the
* output.
*
* <p>Fields that are forwarded at the same position are specified by their position. The
* specified position must be valid for the input and output data type and have the same type.
* For example <code>withForwardedFields("f2")</code> declares that the third field of a Java
* input tuple is copied to the third field of an output tuple.
*
* <p>Fields which are unchanged copied to another position in the output are declared by
* specifying the source field reference in the input and the target field reference in the
* output. {@code withForwardedFields("f0->f2")} denotes that the first field of the Java input
* tuple is unchanged copied to the third field of the Java output tuple. When using a wildcard
* ("*") ensure that the number of declared fields and their types in input and output type
* match.
*
* <p>Multiple forwarded fields can be annotated in one ({@code withForwardedFields("f2; f3->f0;
* f4")}) or separate Strings ({@code withForwardedFields("f2", "f3->f0", "f4")}). Please refer
* to the JavaDoc of {@link org.apache.flink.api.common.functions.Function} or Flink's
* documentation for details on field references such as nested fields and wildcard.
*
* <p>It is not possible to override existing semantic information about forwarded fields which
* was for example added by a {@link
* org.apache.flink.api.java.functions.FunctionAnnotation.ForwardedFields} class annotation.
*
* <p><b>NOTE: Adding semantic information for functions is optional! If used correctly,
* semantic information can help the Flink optimizer to generate more efficient execution plans.
* However, incorrect semantic information can cause the optimizer to generate incorrect
* execution plans which compute wrong results! So be careful when adding semantic information.
* </b>
*
* @param forwardedFields A list of field forward expressions.
* @return This operator with annotated forwarded field information.
* @see org.apache.flink.api.java.functions.FunctionAnnotation
* @see org.apache.flink.api.java.functions.FunctionAnnotation.ForwardedFields
*/
public O withForwardedFields(String... forwardedFields) {
if (this.udfSemantics == null) {
// extract semantic properties from function annotations
setSemanticProperties(extractSemanticAnnotations(getFunction().getClass()));
}
if (this.udfSemantics == null
|| this.analyzedUdfSemantics) { // discard analyzed semantic properties
setSemanticProperties(new SingleInputSemanticProperties());
SemanticPropUtil.getSemanticPropsSingleFromString(
this.udfSemantics,
forwardedFields,
null,
null,
this.getInputType(),
this.getResultType());
} else {
if (udfWithForwardedFieldsAnnotation(getFunction().getClass())) {
// refuse semantic information as it would override the function annotation
throw new SemanticProperties.InvalidSemanticAnnotationException(
"Forwarded field information "
+ "has already been added by a function annotation for this operator. "
+ "Cannot overwrite function annotations.");
} else {
SemanticPropUtil.getSemanticPropsSingleFromString(
this.udfSemantics,
forwardedFields,
null,
null,
this.getInputType(),
this.getResultType());
}
}
@SuppressWarnings("unchecked")
O returnType = (O) this;
return returnType;
} | 3.68 |
flink_Time_milliseconds | /** Creates a new {@link Time} that represents the given number of milliseconds. */
public static Time milliseconds(long milliseconds) {
return of(milliseconds, TimeUnit.MILLISECONDS);
} | 3.68 |
dubbo_InjvmExporterListener_unexported | /**
* Overrides the unexported method to remove the given exporter from the exporters ConcurrentHashMap,
* <p>
* and to notify all registered ExporterChangeListeners of the unexport event.
*
* @param exporter The Exporter instance that has been unexported.
* @throws RpcException If there is an error during the unexport process.
*/
@Override
public void unexported(Exporter<?> exporter) throws RpcException {
String serviceKey = exporter.getInvoker().getUrl().getServiceKey();
exporters.remove(serviceKey, exporter);
Set<ExporterChangeListener> listeners = exporterChangeListeners.get(serviceKey);
if (!CollectionUtils.isEmpty(listeners)) {
for (ExporterChangeListener listener : listeners) {
listener.onExporterChangeUnExport(exporter);
}
}
super.unexported(exporter);
} | 3.68 |
hbase_RegexStringComparator_setCharset | /**
* Specifies the {@link Charset} to use to convert the row key to a String.
* <p>
* The row key needs to be converted to a String in order to be matched against the regular
* expression. This method controls which charset is used to do this conversion.
* <p>
* If the row key is made of arbitrary bytes, the charset {@code ISO-8859-1} is recommended.
* @param charset The charset to use.
*/
public void setCharset(final Charset charset) {
engine.setCharset(charset.name());
} | 3.68 |
rocketmq-connect_PositionStorageWriter_call | /**
* Computes a result, or throws an exception if unable to do so.
*
* @return computed result
* @throws Exception if unable to compute a result
*/
@Override
public Void call() {
try {
// has been canceled
if (flushId != currentFlushId) {
return null;
}
positionManagementService.putPosition(toFlush);
log.debug("Submitting {} entries to backing store. The offsets are: {}", data.size(), toFlush);
positionManagementService.persist();
positionManagementService.synchronize(true);
// persist finished
toFlush = null;
currentFlushId++;
} catch (Throwable throwable) {
// rollback
cancelFlush();
this.callback.onCompletion(throwable, null, null);
}
return null;
} | 3.68 |
hbase_RequestConverter_buildGetRequest | /**
* Create a protocol buffer GetRequest for a client Get
* @param regionName the name of the region to get
* @param get the client Get
* @return a protocol buffer GetRequest
*/
public static GetRequest buildGetRequest(final byte[] regionName, final Get get)
throws IOException {
GetRequest.Builder builder = GetRequest.newBuilder();
RegionSpecifier region = buildRegionSpecifier(RegionSpecifierType.REGION_NAME, regionName);
builder.setRegion(region);
builder.setGet(ProtobufUtil.toGet(get));
return builder.build();
} | 3.68 |
graphhopper_OSMNodeData_getNodeTagCapacity | /**
* @return the number of nodes for which we store tags
*/
public long getNodeTagCapacity() {
return nodeKVStorage.getCapacity();
} | 3.68 |
hudi_CleanPlanner_getDeletePaths | /**
* Returns files to be cleaned for the given partitionPath based on cleaning policy.
*/
public Pair<Boolean, List<CleanFileInfo>> getDeletePaths(String partitionPath, Option<HoodieInstant> earliestCommitToRetain) {
HoodieCleaningPolicy policy = config.getCleanerPolicy();
Pair<Boolean, List<CleanFileInfo>> deletePaths;
if (policy == HoodieCleaningPolicy.KEEP_LATEST_COMMITS) {
deletePaths = getFilesToCleanKeepingLatestCommits(partitionPath, earliestCommitToRetain);
} else if (policy == HoodieCleaningPolicy.KEEP_LATEST_FILE_VERSIONS) {
deletePaths = getFilesToCleanKeepingLatestVersions(partitionPath);
} else if (policy == HoodieCleaningPolicy.KEEP_LATEST_BY_HOURS) {
deletePaths = getFilesToCleanKeepingLatestHours(partitionPath, earliestCommitToRetain);
} else {
throw new IllegalArgumentException("Unknown cleaning policy : " + policy.name());
}
LOG.info(deletePaths.getValue().size() + " patterns used to delete in partition path:" + partitionPath);
if (deletePaths.getKey()) {
LOG.info("Partition " + partitionPath + " to be deleted");
}
return deletePaths;
} | 3.68 |
morf_AbstractSqlDialectTest_testCastStringLiteralToInteger | /**
* Tests the output of a cast to a date.
*/
@Test
public void testCastStringLiteralToInteger() {
String result = testDialect.getSqlFrom(new Cast(new FieldLiteral("1234567890"), DataType.INTEGER, 10));
assertEquals(expectedStringLiteralToIntegerCast(), result);
} | 3.68 |
flink_BasicTypeInfo_shouldAutocastTo | /**
* Returns whether this type should be automatically casted to the target type in an arithmetic
* operation.
*/
@PublicEvolving
public boolean shouldAutocastTo(BasicTypeInfo<?> to) {
for (Class<?> possibleTo : possibleCastTargetTypes) {
if (possibleTo.equals(to.getTypeClass())) {
return true;
}
}
return false;
} | 3.68 |
framework_AbstractInMemoryContainer_addListener | // ItemSetChangeNotifier
/**
* @deprecated As of 7.0, replaced by
* {@link #addItemSetChangeListener(Container.ItemSetChangeListener)}
*/
@Deprecated
@Override
public void addListener(Container.ItemSetChangeListener listener) {
addItemSetChangeListener(listener);
} | 3.68 |
hadoop_RenameFilesStage_getFilesCommitted | /**
* Get the list of files committed.
* Access is not synchronized.
* @return direct access to the list of files.
*/
public synchronized List<FileEntry> getFilesCommitted() {
return filesCommitted;
} | 3.68 |
flink_ResolvedSchema_of | /** Shortcut for a resolved schema of only columns. */
public static ResolvedSchema of(Column... columns) {
return ResolvedSchema.of(Arrays.asList(columns));
} | 3.68 |
dubbo_StringUtils_isNotBlank | /**
* is not blank string.
*
* @param cs source string.
* @return is not blank.
*/
public static boolean isNotBlank(CharSequence cs) {
return !isBlank(cs);
} | 3.68 |
hudi_DataSourceUtils_dropDuplicates | /**
* Drop records already present in the dataset.
*
* @param jssc JavaSparkContext
* @param incomingHoodieRecords HoodieRecords to deduplicate
* @param writeConfig HoodieWriteConfig
*/
@SuppressWarnings("unchecked")
public static JavaRDD<HoodieRecord> dropDuplicates(JavaSparkContext jssc, JavaRDD<HoodieRecord> incomingHoodieRecords,
HoodieWriteConfig writeConfig) {
try {
SparkRDDReadClient client = new SparkRDDReadClient<>(new HoodieSparkEngineContext(jssc), writeConfig);
return client.tagLocation(incomingHoodieRecords)
.filter(r -> !((HoodieRecord<HoodieRecordPayload>) r).isCurrentLocationKnown());
} catch (TableNotFoundException e) {
// this will be executed when there is no hoodie table yet
// so no dups to drop
return incomingHoodieRecords;
}
} | 3.68 |
hadoop_ProducerConsumer_run | /**
* The worker continuously gets an item from input queue, process it and
* then put the processed result into output queue. It waits to get an item
* from input queue if there's none.
*/
public void run() {
while (true) {
WorkRequest<T> work;
try {
work = inputQueue.take();
} catch (InterruptedException e) {
// It is assumed that if an interrupt occurs while taking a work
// out from input queue, the interrupt is likely triggered by
// ProducerConsumer.shutdown(). Therefore, exit the thread.
LOG.debug("Interrupted while waiting for requests from inputQueue.");
return;
}
boolean isDone = false;
while (!isDone) {
try {
// if the interrupt happens while the work is being processed,
// go back to process the same work again.
WorkReport<R> result = processor.processItem(work);
outputQueue.put(result);
isDone = true;
} catch (InterruptedException ie) {
LOG.debug("Worker thread was interrupted while processing an item,"
+ " or putting into outputQueue. Retrying...");
}
}
}
} | 3.68 |
hbase_ArrayBackedTag_getValueLength | /** Returns Length of actual tag bytes within the backed buffer */
@Override
public int getValueLength() {
return this.length - INFRASTRUCTURE_SIZE;
} | 3.68 |
pulsar_AuthorizationService_allowTopicOperationAsync | /**
* Grant authorization-action permission on a topic to the given client.
*
* @param topicName
* @param operation
* @param role
* @param authData
* additional authdata in json for targeted authorization provider
* @return IllegalArgumentException when namespace not found
* @throws IllegalStateException
* when failed to grant permission
*/
public CompletableFuture<Boolean> allowTopicOperationAsync(TopicName topicName,
TopicOperation operation,
String role,
AuthenticationDataSource authData) {
if (log.isDebugEnabled()) {
log.debug("Check if role {} is allowed to execute topic operation {} on topic {}",
role, operation, topicName);
}
if (!this.conf.isAuthorizationEnabled()) {
return CompletableFuture.completedFuture(true);
}
CompletableFuture<Boolean> allowFuture =
provider.allowTopicOperationAsync(topicName, role, operation, authData);
if (log.isDebugEnabled()) {
return allowFuture.whenComplete((allowed, exception) -> {
if (exception == null) {
if (allowed) {
log.debug("Topic operation {} on topic {} is allowed: role = {}",
operation, topicName, role);
} else {
log.debug("Topic operation {} on topic {} is NOT allowed: role = {}",
operation, topicName, role);
}
} else {
log.debug("Failed to check if topic operation {} on topic {} is allowed:"
+ " role = {}",
operation, topicName, role, exception);
}
});
} else {
return allowFuture;
}
} | 3.68 |
flink_StreamExecutionEnvironment_createLocalEnvironment | /**
* Creates a {@link LocalStreamEnvironment}. The local execution environment will run the
* program in a multi-threaded fashion in the same JVM as the environment was created in.
*
* @param configuration Pass a custom configuration into the cluster
* @return A local execution environment with the specified parallelism.
*/
public static LocalStreamEnvironment createLocalEnvironment(Configuration configuration) {
if (configuration.getOptional(CoreOptions.DEFAULT_PARALLELISM).isPresent()) {
return new LocalStreamEnvironment(configuration);
} else {
Configuration copyOfConfiguration = new Configuration();
copyOfConfiguration.addAll(configuration);
copyOfConfiguration.set(CoreOptions.DEFAULT_PARALLELISM, defaultLocalParallelism);
return new LocalStreamEnvironment(copyOfConfiguration);
}
} | 3.68 |
hbase_TBoundedThreadPoolServer_shutdownServer | /**
* Loop until {@link ExecutorService#awaitTermination} finally does return without an interrupted
* exception. If we don't do this, then we'll shut down prematurely. We want to let the executor
* service clear its task queue, closing client sockets appropriately.
*/
private void shutdownServer() {
executorService.shutdown();
long msLeftToWait = serverOptions.stopTimeoutUnit.toMillis(serverOptions.stopTimeoutVal);
long timeMillis = EnvironmentEdgeManager.currentTime();
LOG
.info("Waiting for up to " + msLeftToWait + " ms to finish processing" + " pending requests");
boolean interrupted = false;
while (msLeftToWait >= 0) {
try {
executorService.awaitTermination(msLeftToWait, TimeUnit.MILLISECONDS);
break;
} catch (InterruptedException ix) {
long timePassed = EnvironmentEdgeManager.currentTime() - timeMillis;
msLeftToWait -= timePassed;
timeMillis += timePassed;
interrupted = true;
}
}
LOG.info("Interrupting all worker threads and waiting for " + TIME_TO_WAIT_AFTER_SHUTDOWN_MS
+ " ms longer");
// This will interrupt all the threads, even those running a task.
executorService.shutdownNow();
Threads.sleepWithoutInterrupt(TIME_TO_WAIT_AFTER_SHUTDOWN_MS);
// Preserve the interrupted status.
if (interrupted) {
Thread.currentThread().interrupt();
}
LOG.info("Thrift server shutdown complete");
} | 3.68 |
framework_HierarchicalContainer_addFilteredChild | /**
* Adds the given childItemId as a filteredChildren for the parentItemId and
* sets it filteredParent.
*
* @param parentItemId
* @param childItemId
*/
private void addFilteredChild(Object parentItemId, Object childItemId) {
LinkedList<Object> parentToChildrenList = filteredChildren
.get(parentItemId);
if (parentToChildrenList == null) {
parentToChildrenList = new LinkedList<Object>();
filteredChildren.put(parentItemId, parentToChildrenList);
}
filteredParent.put(childItemId, parentItemId);
parentToChildrenList.add(childItemId);
} | 3.68 |
framework_Escalator_getHeader | /**
* Returns the row container for the header in this Escalator.
*
* @return the header. Never <code>null</code>
*/
public RowContainer getHeader() {
return header;
} | 3.68 |
flink_TableChange_getNewColumnName | /** Returns the new column name after renaming the column name. */
public String getNewColumnName() {
return newColumn.getName();
} | 3.68 |
hbase_KeyValueUtil_write | /**
* Write out a KeyValue in the manner in which we used to when KeyValue was a Writable.
* @return Length written on stream
* @see #create(DataInput) for the inverse function
*/
public static long write(final KeyValue kv, final DataOutput out) throws IOException {
// This is how the old Writables write used to serialize KVs. Need to figure
// way to make it
// work for all implementations.
int length = kv.getLength();
out.writeInt(length);
out.write(kv.getBuffer(), kv.getOffset(), length);
return (long) length + Bytes.SIZEOF_INT;
} | 3.68 |
dubbo_AbstractCluster_doInvoke | /**
* The only purpose is to build a interceptor chain, so the cluster related logic doesn't matter.
* Use ClusterInvoker<T> to replace AbstractClusterInvoker<T> in the future.
*/
@Override
protected Result doInvoke(Invocation invocation, List<Invoker<T>> invokers, LoadBalance loadbalance)
throws RpcException {
return null;
} | 3.68 |
hudi_BaseHoodieTableServiceClient_getPendingRollbackInfos | /**
* Fetch map of pending commits to be rolled-back to {@link HoodiePendingRollbackInfo}.
*
* @param metaClient instance of {@link HoodieTableMetaClient} to use.
* @return map of pending commits to be rolled-back instants to Rollback Instant and Rollback plan Pair.
*/
protected Map<String, Option<HoodiePendingRollbackInfo>> getPendingRollbackInfos(HoodieTableMetaClient metaClient, boolean ignoreCompactionAndClusteringInstants) {
List<HoodieInstant> instants = metaClient.getActiveTimeline().filterPendingRollbackTimeline().getInstants();
Map<String, Option<HoodiePendingRollbackInfo>> infoMap = new HashMap<>();
for (HoodieInstant rollbackInstant : instants) {
HoodieRollbackPlan rollbackPlan;
try {
rollbackPlan = RollbackUtils.getRollbackPlan(metaClient, rollbackInstant);
} catch (Exception e) {
if (rollbackInstant.isRequested()) {
LOG.warn("Fetching rollback plan failed for " + rollbackInstant + ", deleting the plan since it's in REQUESTED state", e);
try {
metaClient.getActiveTimeline().deletePending(rollbackInstant);
} catch (HoodieIOException he) {
LOG.warn("Cannot delete " + rollbackInstant, he);
continue;
}
} else {
// Here we assume that if the rollback is inflight, the rollback plan is intact
// in instant.rollback.requested. The exception here can be due to other reasons.
LOG.warn("Fetching rollback plan failed for " + rollbackInstant + ", skip the plan", e);
}
continue;
}
try {
String action = rollbackPlan.getInstantToRollback().getAction();
String instantToRollback = rollbackPlan.getInstantToRollback().getCommitTime();
if (ignoreCompactionAndClusteringInstants) {
if (!HoodieTimeline.COMPACTION_ACTION.equals(action)) {
boolean isClustering = HoodieTimeline.REPLACE_COMMIT_ACTION.equals(action)
&& ClusteringUtils.getClusteringPlan(metaClient, new HoodieInstant(true, action, instantToRollback)).isPresent();
if (!isClustering) {
infoMap.putIfAbsent(instantToRollback, Option.of(new HoodiePendingRollbackInfo(rollbackInstant, rollbackPlan)));
}
}
} else {
infoMap.putIfAbsent(instantToRollback, Option.of(new HoodiePendingRollbackInfo(rollbackInstant, rollbackPlan)));
}
} catch (Exception e) {
LOG.warn("Processing rollback plan failed for " + rollbackInstant + ", skip the plan", e);
}
}
return infoMap;
} | 3.68 |
hadoop_ListResultEntrySchema_contentLength | /**
* Get the contentLength value.
*
* @return the contentLength value
*/
public Long contentLength() {
return contentLength;
} | 3.68 |
hadoop_YarnRegistryViewForProviders_deleteComponent | /**
* Delete a component.
* @param containerId component name
* @throws IOException
*/
public void deleteComponent(ComponentInstanceId instanceId,
String containerId) throws IOException {
String path = RegistryUtils.componentPath(
user, serviceClass, instanceName,
containerId);
LOG.info(instanceId + ": Deleting registry path " + path);
registryOperations.delete(path, false);
} | 3.68 |
flink_StreamExecutionEnvironment_registerTypeWithKryoSerializer | /**
* Registers the given Serializer via its class as a serializer for the given type at the
* KryoSerializer.
*
* @param type The class of the types serialized with the given serializer.
* @param serializerClass The class of the serializer to use.
*/
@SuppressWarnings("rawtypes")
public void registerTypeWithKryoSerializer(
Class<?> type, Class<? extends Serializer> serializerClass) {
config.registerTypeWithKryoSerializer(type, serializerClass);
} | 3.68 |
dubbo_RestRPCInvocationUtil_createParseContext | /**
* create parseMethodArgs context
*
* @param request
* @param originRequest
* @param originResponse
* @param restMethodMetadata
* @return
*/
private static ProviderParseContext createParseContext(
RequestFacade request, Object originRequest, Object originResponse, RestMethodMetadata restMethodMetadata) {
ProviderParseContext parseContext = new ProviderParseContext(request);
parseContext.setResponse(originResponse);
parseContext.setRequest(originRequest);
Object[] objects = new Object[restMethodMetadata.getArgInfos().size()];
parseContext.setArgs(Arrays.asList(objects));
parseContext.setArgInfos(restMethodMetadata.getArgInfos());
return parseContext;
} | 3.68 |
hadoop_Chain_joinAllThreads | // wait till all threads finish
void joinAllThreads() throws IOException, InterruptedException {
for (Thread thread : threads) {
thread.join();
}
Throwable th = getThrowable();
if (th != null) {
if (th instanceof IOException) {
throw (IOException) th;
} else if (th instanceof InterruptedException) {
throw (InterruptedException) th;
} else {
throw new RuntimeException(th);
}
}
} | 3.68 |
hadoop_LoadManifestsStage_coalesceDirectories | /**
* Coalesce all directories and clear the entry in the manifest.
* There's only ever one writer at a time, which it is hoped reduces
* contention. before the lock is acquired: if there are no new directories,
* the write lock is never needed.
* @param manifest manifest to process
* @return the number of directories created;
*/
@VisibleForTesting
int coalesceDirectories(final TaskManifest manifest) {
// build a list of dirs to create.
// this scans the map
final List<DirEntry> toCreate = manifest.getDestDirectories().stream()
.filter(e -> !directories.containsKey(e))
.collect(Collectors.toList());
if (!toCreate.isEmpty()) {
// need to add more directories;
// still a possibility that they may be created between the
// filtering and this thread having the write lock.
synchronized (directories) {
toCreate.forEach(entry -> {
directories.putIfAbsent(entry.getDir(), entry);
});
}
}
return toCreate.size();
} | 3.68 |
framework_GridLayout_writeEmptyColsAndRows | /**
* Fills in the design with rows and empty columns. This needs to be done
* for empty {@link GridLayout}, because there's no other way to serialize
* info about number of columns and rows if there are absolutely no
* components in the {@link GridLayout}
*
* @param design
*/
private void writeEmptyColsAndRows(Element design) {
int rowCount = getState(false).rows;
int colCount = getState(false).columns;
// only write cols and rows tags if size is not 1x1
if (rowCount == 1 && colCount == 1) {
return;
}
for (int i = 0; i < rowCount; i++) {
Element row = design.appendElement("row");
for (int j = 0; j < colCount; j++) {
row.appendElement("column");
}
}
} | 3.68 |
framework_LegacyLocatorStrategy_getElementByPathStartingAt | /**
* {@inheritDoc}
*/
@Override
public Element getElementByPathStartingAt(String path,
Element baseElement) {
/*
* Path is of type "targetWidgetPath#componentPart" or
* "targetWidgetPath".
*/
String[] parts = path.split(LegacyLocatorStrategy.SUBPART_SEPARATOR, 2);
String widgetPath = parts[0];
// Note that this only works if baseElement can be mapped to a
// widget to which the path is relative. Otherwise, the current
// implementation simply interprets the path as if baseElement was
// null.
Widget baseWidget = WidgetUtil.findWidget(baseElement);
Widget w = getWidgetFromPath(widgetPath, baseWidget);
if (w == null || !WidgetUtil.isAttachedAndDisplayed(w)) {
return null;
}
if (parts.length == 1) {
int pos = widgetPath.indexOf("domChild");
if (pos == -1) {
return w.getElement();
}
// Contains dom reference to a sub element of the widget
String subPath = widgetPath.substring(pos);
return getElementByDOMPath(w.getElement(), subPath);
} else if (parts.length == 2) {
if (w instanceof SubPartAware) {
return ((SubPartAware) w).getSubPartElement(parts[1]);
}
}
return null;
} | 3.68 |
hbase_MiniZooKeeperCluster_setupTestEnv | // / XXX: From o.a.zk.t.ClientBase
private static void setupTestEnv() {
// during the tests we run with 100K prealloc in the logs.
// on windows systems prealloc of 64M was seen to take ~15seconds
// resulting in test failure (client timeout on first session).
// set env and directly in order to handle static init/gc issues
System.setProperty("zookeeper.preAllocSize", "100");
FileTxnLog.setPreallocSize(100 * 1024);
// allow all 4 letter words
System.setProperty("zookeeper.4lw.commands.whitelist", "*");
} | 3.68 |
hbase_ClusterId_toString | /**
* @see java.lang.Object#toString()
*/
@Override
public String toString() {
return this.id;
} | 3.68 |
morf_ColumnTypeBean_isNullable | /**
* @return the nullable
*/
@Override
public boolean isNullable() {
return nullable;
} | 3.68 |
hbase_UserProvider_getCurrent | /**
* Return the current user within the current execution context
* @throws IOException if the user cannot be loaded
*/
public User getCurrent() throws IOException {
return User.getCurrent();
} | 3.68 |
framework_VComboBox_setSuggestions | /**
* Sets the suggestions rendered in the menu.
*
* @param suggestions
* The suggestions to be rendered in the menu
*/
public void setSuggestions(Collection<ComboBoxSuggestion> suggestions) {
if (enableDebug) {
debug("VComboBox.SM: setSuggestions(" + suggestions + ")");
}
clearItems();
boolean isFirstIteration = true;
for (final ComboBoxSuggestion suggestion : suggestions) {
final MenuItem mi = new MenuItem(suggestion.getDisplayString(),
true, suggestion);
String style = suggestion.getStyle();
if (style != null) {
mi.addStyleName("v-filterselect-item-" + style);
}
Roles.getListitemRole().set(mi.getElement());
WidgetUtil.sinkOnloadForImages(mi.getElement());
this.addItem(mi);
// By default, first item on the list is always highlighted,
// unless adding new items is allowed.
if (isFirstIteration && !allowNewItems) {
selectItem(mi);
}
if (currentSuggestion != null && suggestion.getOptionKey()
.equals(currentSuggestion.getOptionKey())) {
// Refresh also selected caption and icon in case they have
// been updated on the server, e.g. just the item has been
// updated, but selection (from state) has stayed the same.
// FIXME need to update selected item caption separately, if
// the selected item is not in "active data range" that is
// being sent to the client. Then this can be removed.
if (currentSuggestion.getReplacementString()
.equals(tb.getText())) {
currentSuggestion = suggestion;
selectItem(mi);
setSelectedCaption(
currentSuggestion.getReplacementString());
setSelectedItemIcon(currentSuggestion.getIconUri());
}
}
isFirstIteration = false;
}
} | 3.68 |
hadoop_OBSFileSystem_getFileStatus | /**
* Return a file status object that represents the path.
*
* @param f the path we want information from
* @return a FileStatus object
* @throws FileNotFoundException when the path does not exist
* @throws IOException on other problems
*/
@Override
public FileStatus getFileStatus(final Path f)
throws FileNotFoundException, IOException {
for (int retryTime = 1;
retryTime < OBSCommonUtils.MAX_RETRY_TIME; retryTime++) {
try {
return innerGetFileStatus(f);
} catch (FileNotFoundException | FileConflictException e) {
throw e;
} catch (IOException e) {
LOG.warn("Failed to get file status for [{}], retry time [{}], "
+ "exception [{}]", f, retryTime, e);
try {
Thread.sleep(OBSCommonUtils.DELAY_TIME);
} catch (InterruptedException ie) {
throw e;
}
}
}
return innerGetFileStatus(f);
} | 3.68 |
hadoop_Lz4Codec_getDefaultExtension | /**
* Get the default filename extension for this kind of compression.
*
* @return <code>.lz4</code>.
*/
@Override
public String getDefaultExtension() {
return CodecConstants.LZ4_CODEC_EXTENSION;
} | 3.68 |
hbase_ParseFilter_checkForAnd | /**
* Checks if the current index of filter string we are on is the beginning of the keyword 'AND'
* <p>
* @param filterStringAsByteArray filter string given by the user
* @param indexOfAnd index at which an 'A' was read
* @return true if the keyword 'AND' is at the current index
*/
public static boolean checkForAnd(byte[] filterStringAsByteArray, int indexOfAnd)
throws CharacterCodingException {
try {
if (
filterStringAsByteArray[indexOfAnd] == ParseConstants.A
&& filterStringAsByteArray[indexOfAnd + 1] == ParseConstants.N
&& filterStringAsByteArray[indexOfAnd + 2] == ParseConstants.D
&& (filterStringAsByteArray[indexOfAnd - 1] == ParseConstants.WHITESPACE
|| filterStringAsByteArray[indexOfAnd - 1] == ParseConstants.RPAREN)
&& (filterStringAsByteArray[indexOfAnd + 3] == ParseConstants.WHITESPACE
|| filterStringAsByteArray[indexOfAnd + 3] == ParseConstants.LPAREN)
) {
return true;
} else {
return false;
}
} catch (ArrayIndexOutOfBoundsException e) {
return false;
}
} | 3.68 |
flink_OptimizerNode_computeOutputEstimates | /**
* Causes this node to compute its output estimates (such as number of rows, size in bytes)
* based on the inputs and the compiler hints. The compiler hints are instantiated with
* conservative default values which are used if no other values are provided. Nodes may access
* the statistics to determine relevant information.
*
* @param statistics The statistics object which may be accessed to get statistical information.
* The parameter may be null, if no statistics are available.
*/
public void computeOutputEstimates(DataStatistics statistics) {
// sanity checking
for (DagConnection c : getIncomingConnections()) {
if (c.getSource() == null) {
throw new CompilerException(
"Bug: Estimate computation called before inputs have been set.");
}
}
// let every operator do its computation
computeOperatorSpecificDefaultEstimates(statistics);
if (this.estimatedOutputSize < 0) {
this.estimatedOutputSize = -1;
}
if (this.estimatedNumRecords < 0) {
this.estimatedNumRecords = -1;
}
// overwrite default estimates with hints, if given
if (getOperator() == null || getOperator().getCompilerHints() == null) {
return;
}
CompilerHints hints = getOperator().getCompilerHints();
if (hints.getOutputSize() >= 0) {
this.estimatedOutputSize = hints.getOutputSize();
}
if (hints.getOutputCardinality() >= 0) {
this.estimatedNumRecords = hints.getOutputCardinality();
}
if (hints.getFilterFactor() >= 0.0f) {
if (this.estimatedNumRecords >= 0) {
this.estimatedNumRecords =
(long) (this.estimatedNumRecords * hints.getFilterFactor());
if (this.estimatedOutputSize >= 0) {
this.estimatedOutputSize =
(long) (this.estimatedOutputSize * hints.getFilterFactor());
}
} else if (this instanceof SingleInputNode) {
OptimizerNode pred = ((SingleInputNode) this).getPredecessorNode();
if (pred != null && pred.getEstimatedNumRecords() >= 0) {
this.estimatedNumRecords =
(long) (pred.getEstimatedNumRecords() * hints.getFilterFactor());
}
}
}
// use the width to infer the cardinality (given size) and vice versa
if (hints.getAvgOutputRecordSize() >= 1) {
// the estimated number of rows based on size
if (this.estimatedNumRecords == -1 && this.estimatedOutputSize >= 0) {
this.estimatedNumRecords =
(long) (this.estimatedOutputSize / hints.getAvgOutputRecordSize());
} else if (this.estimatedOutputSize == -1 && this.estimatedNumRecords >= 0) {
this.estimatedOutputSize =
(long) (this.estimatedNumRecords * hints.getAvgOutputRecordSize());
}
}
} | 3.68 |
framework_ServiceInitEvent_getAddedDependencyFilters | /**
* Gets an unmodifiable list of all dependency filters that have been added
* for the service.
*
* @return the current list of added dependency filters.
*
* @since 8.1
*/
public List<DependencyFilter> getAddedDependencyFilters() {
return Collections.unmodifiableList(addedDependencyFilters);
} | 3.68 |
flink_RocksDBKeyedStateBackend_dispose | /** Should only be called by one thread, and only after all accesses to the DB happened. */
@Override
public void dispose() {
if (this.disposed) {
return;
}
super.dispose();
// This call will block until all clients that still acquire access to the RocksDB instance
// have released it,
// so that we cannot release the native resources while clients are still working with it in
// parallel.
rocksDBResourceGuard.close();
// IMPORTANT: null reference to signal potential async checkpoint workers that the db was
// disposed, as
// working on the disposed object results in SEGFAULTS.
if (db != null) {
IOUtils.closeQuietly(writeBatchWrapper);
// Metric collection occurs on a background thread. When this method returns
// it is guaranteed that thr RocksDB reference has been invalidated
// and no more metric collection will be attempted against the database.
if (nativeMetricMonitor != null) {
nativeMetricMonitor.close();
}
List<ColumnFamilyOptions> columnFamilyOptions =
new ArrayList<>(kvStateInformation.values().size());
// RocksDB's native memory management requires that *all* CFs (including default) are
// closed before the
// DB is closed. See:
// https://github.com/facebook/rocksdb/wiki/RocksJava-Basics#opening-a-database-with-column-families
// Start with default CF ...
RocksDBOperationUtils.addColumnFamilyOptionsToCloseLater(
columnFamilyOptions, defaultColumnFamily);
IOUtils.closeQuietly(defaultColumnFamily);
// ... continue with the ones created by Flink...
for (RocksDbKvStateInfo kvStateInfo : kvStateInformation.values()) {
RocksDBOperationUtils.addColumnFamilyOptionsToCloseLater(
columnFamilyOptions, kvStateInfo.columnFamilyHandle);
IOUtils.closeQuietly(kvStateInfo.columnFamilyHandle);
}
// ... and finally close the DB instance ...
IOUtils.closeQuietly(db);
columnFamilyOptions.forEach(IOUtils::closeQuietly);
IOUtils.closeQuietly(optionsContainer);
ttlCompactFiltersManager.disposeAndClearRegisteredCompactionFactories();
kvStateInformation.clear();
cleanInstanceBasePath();
}
IOUtils.closeQuietly(checkpointSnapshotStrategy);
this.disposed = true;
} | 3.68 |
framework_AbstractColorPicker_setRGBVisibility | /**
* Set the visibility of the RGB Tab.
*
* @param visible
* The visibility
*/
public void setRGBVisibility(boolean visible) {
if (!visible && !hsvVisible && !swatchesVisible) {
throw new IllegalArgumentException("Cannot hide all tabs.");
}
rgbVisible = visible;
if (window != null) {
window.setRGBTabVisible(visible);
}
} | 3.68 |
pulsar_TopicName_getPartitionIndex | /**
* @return partition index of the completeTopicName.
* It returns -1 if the completeTopicName (topic) is not partitioned.
*/
public static int getPartitionIndex(String topic) {
int partitionIndex = -1;
if (topic.contains(PARTITIONED_TOPIC_SUFFIX)) {
try {
String idx = StringUtils.substringAfterLast(topic, PARTITIONED_TOPIC_SUFFIX);
partitionIndex = Integer.parseInt(idx);
if (partitionIndex < 0) {
// for the "topic-partition--1"
partitionIndex = -1;
} else if (StringUtils.length(idx) != String.valueOf(partitionIndex).length()) {
// for the "topic-partition-01"
partitionIndex = -1;
}
} catch (NumberFormatException nfe) {
// ignore exception
}
}
return partitionIndex;
} | 3.68 |
hbase_ScanQueryMatcher_compareKeyForNextRow | /**
* @param nextIndexed the key of the next entry in the block index (if any)
* @param currentCell The Cell we're using to calculate the seek key
* @return result of the compare between the indexed key and the key portion of the passed cell
*/
public int compareKeyForNextRow(Cell nextIndexed, Cell currentCell) {
return PrivateCellUtil.compareKeyBasedOnColHint(rowComparator, nextIndexed, currentCell, 0, 0,
null, 0, 0, PrivateConstants.OLDEST_TIMESTAMP, Type.Minimum.getCode());
} | 3.68 |
morf_AliasedField_lessThanOrEqualTo | /**
* @param value object to compare to (right hand side)
* @return a {@link Criterion} for a less than or equal to expression of this field.
*/
public Criterion lessThanOrEqualTo(Object value) {
return Criterion.lessThanOrEqualTo(this, value);
} | 3.68 |
morf_Criterion_isNotNull | /**
* Helper method to create a new "IS NOT NULL" expression.
*
* <blockquote><pre>
* Criterion.isNotNull(new Field("agreementflag"));</pre></blockquote>
*
* @param field the field to check if not null
* @return a new Criterion object
*/
public static Criterion isNotNull(AliasedField field) {
return new Criterion(Operator.ISNOTNULL, field, null);
} | 3.68 |
hadoop_TaskPool_run | /**
* Execute the task across the data.
* @param task task to execute
* @param <E> exception which may be raised in execution.
* @return true if the operation executed successfully
* @throws E any exception raised.
* @throws IOException IOExceptions raised by remote iterator or in execution.
*/
public <E extends Exception> boolean run(Task<I, E> task) throws E, IOException {
requireNonNull(items, "items");
if (!items.hasNext()) {
// if there are no items, return without worrying about
// execution pools, errors etc.
return true;
}
if (service != null) {
// thread pool, so run in parallel
return runParallel(task);
} else {
// single threaded execution.
return runSingleThreaded(task);
}
} | 3.68 |
flink_HiveASTParseDriver_create | /**
* Creates an HiveParserASTNode for the given token. The HiveParserASTNode is a
* wrapper around antlr's CommonTree class that implements the Node interface.
*
* @param payload The token.
* @return Object (which is actually an HiveParserASTNode) for the token.
*/
@Override
public Object create(Token payload) {
return new HiveParserASTNode(payload);
} | 3.68 |
hbase_FIFOCompactionPolicy_isEmptyStoreFile | /**
* The FIFOCompactionPolicy only choose the TTL expired store files as the compaction candidates.
* If all the store files are TTL expired, then the compaction will generate a new empty file.
* While its max timestamp will be Long.MAX_VALUE. If not considered separately, the store file
* will never be archived because its TTL will be never expired. So we'll check the empty store
* file separately (See HBASE-21504).
*/
private boolean isEmptyStoreFile(HStoreFile sf) {
return sf.getReader().getEntries() == 0;
} | 3.68 |
hbase_CellFlatMap_put | // -------------------------------- Updates --------------------------------
// All updating methods below are unsupported.
// Assuming an array of Cells will be allocated externally,
// fill up with Cells and provided in construction time.
// Later the structure is immutable.
@Override
public Cell put(Cell k, Cell v) {
throw new UnsupportedOperationException();
} | 3.68 |
hbase_IndividualBytesFieldCell_getValueArray | // 7) Value
@Override
public byte[] getValueArray() {
// Value could be null
return (value == null) ? HConstants.EMPTY_BYTE_ARRAY : value;
} | 3.68 |
hadoop_MappableBlockLoaderFactory_createCacheLoader | /**
* Create a specific cache loader according to the configuration.
* If persistent memory volume is not configured, return a cache loader
* for DRAM cache. Otherwise, return a cache loader for pmem cache.
*/
public static MappableBlockLoader createCacheLoader(DNConf conf) {
if (conf.getPmemVolumes() == null || conf.getPmemVolumes().length == 0) {
return new MemoryMappableBlockLoader();
}
if (NativeIO.isAvailable() && NativeIO.POSIX.isPmdkAvailable()) {
return new NativePmemMappableBlockLoader();
}
return new PmemMappableBlockLoader();
} | 3.68 |
hbase_LocalHBaseCluster_getRegionServers | /** Returns Read-only list of region server threads. */
public List<JVMClusterUtil.RegionServerThread> getRegionServers() {
return Collections.unmodifiableList(this.regionThreads);
} | 3.68 |
hadoop_AssumedRoleCredentialProvider_sanitize | /**
* Build a session name from the string, sanitizing it for the permitted
* characters.
* @param session source session
* @return a string for use in role requests.
*/
@VisibleForTesting
static String sanitize(String session) {
StringBuilder r = new StringBuilder(session.length());
for (char c: session.toCharArray()) {
if ("abcdefghijklmnopqrstuvwxyz0123456789,.@-".contains(
Character.toString(c).toLowerCase(Locale.ENGLISH))) {
r.append(c);
} else {
r.append('-');
}
}
return r.toString();
} | 3.68 |
querydsl_JPAExpressions_select | /**
* Create a new detached JPQLQuery instance with the given projection
*
* @param exprs projection
* @return select(exprs)
*/
public static JPQLQuery<Tuple> select(Expression<?>... exprs) {
return new JPASubQuery<Void>().select(exprs);
} | 3.68 |
flink_CatalogManager_close | /**
* Closes the catalog manager and releases its resources.
*
* <p>This method closes all initialized catalogs and the catalog store.
*
* @throws CatalogException if an error occurs while closing the catalogs or the catalog store
*/
public void close() throws CatalogException {
// close the initialized catalogs
List<Throwable> errors = new ArrayList<>();
for (Map.Entry<String, Catalog> entry : catalogs.entrySet()) {
String catalogName = entry.getKey();
Catalog catalog = entry.getValue();
try {
catalog.close();
} catch (Throwable e) {
LOG.error(
String.format(
"Failed to close catalog %s: %s", catalogName, e.getMessage()),
e);
errors.add(e);
}
}
// close the catalog store holder
try {
catalogStoreHolder.close();
} catch (Throwable e) {
errors.add(e);
LOG.error(String.format("Failed to close catalog store holder: %s", e.getMessage()), e);
}
if (!errors.isEmpty()) {
CatalogException exception = new CatalogException("Failed to close catalog manager");
for (Throwable e : errors) {
exception.addSuppressed(e);
}
throw exception;
}
} | 3.68 |
framework_VMenuBar_clearItems | /**
* Remove all the items in this menu.
*/
public void clearItems() {
for (CustomMenuItem child : items) {
remove(child);
}
items.clear();
} | 3.68 |
framework_DesignContext_getCustomAttributes | /**
* Gets the attributes that the component did not handle.
*
* @since 7.7
* @param component
* the component to get the attributes for
* @return map of the attributes which were not recognized by the component
*/
public Map<String, String> getCustomAttributes(Component component) {
return customAttributes.get(component);
} | 3.68 |
AreaShop_RegionEvent_getRegion | /**
* Get the region of this event.
* @return The region the event is about
*/
public T getRegion() {
return region;
} | 3.68 |
flink_RegisteredBroadcastStateBackendMetaInfo_deepCopy | /** Creates a deep copy of the itself. */
@Nonnull
public RegisteredBroadcastStateBackendMetaInfo<K, V> deepCopy() {
return new RegisteredBroadcastStateBackendMetaInfo<>(this);
} | 3.68 |
hadoop_ReplicaUnderConstruction_getExpectedStorageLocation | /**
* Expected block replica location as assigned when the block was allocated.
* This defines the pipeline order.
* It is not guaranteed, but expected, that the data-node actually has
* the replica.
*/
DatanodeStorageInfo getExpectedStorageLocation() {
return expectedLocation;
} | 3.68 |
framework_Form_removeItemProperty | /**
* Removes the property and corresponding field from the form.
*
* @see Item#removeItemProperty(Object)
*/
@Override
public boolean removeItemProperty(Object id) {
ownProperties.remove(id);
final Field<?> field = fields.get(id);
if (field != null) {
propertyIds.remove(id);
fields.remove(id);
detachField(field);
field.removeListener(fieldValueChangeListener);
return true;
}
return false;
} | 3.68 |
flink_TimestampedValue_getValue | /** @return The value wrapped in this {@link TimestampedValue}. */
public T getValue() {
return value;
} | 3.68 |
framework_VDebugWindow_activateSection | /**
* Activates the given {@link Section}
*
* @param section
*/
void activateSection(Section section) {
if (section != null && section != activeSection) {
Highlight.hideAll();
// remove old stuff
if (activeSection != null) {
activeSection.hide();
content.remove(activeSection.getContent());
sectionHead.remove(activeSection.getControls());
}
// update tab styles
for (int i = 0; i < tabs.getWidgetCount(); i++) {
Widget tab = tabs.getWidget(i);
tab.setStyleDependentName(STYLENAME_SELECTED,
tab == section.getTabButton());
}
// add new stuff
content.add(section.getContent());
sectionHead.add(section.getControls());
activeSection = section;
activeSection.show();
}
} | 3.68 |
framework_BasicDateClickHandler_dateClick | /*
* (non-Javadoc)
*
* @see
* com.vaadin.addon.calendar.ui.CalendarComponentEvents.DateClickHandler
* #dateClick
* (com.vaadin.addon.calendar.ui.CalendarComponentEvents.DateClickEvent)
*/
@Override
public void dateClick(DateClickEvent event) {
Date clickedDate = event.getDate();
Calendar javaCalendar = event.getComponent().getInternalCalendar();
javaCalendar.setTime(clickedDate);
// as times are expanded, this is all that is needed to show one day
Date start = javaCalendar.getTime();
Date end = javaCalendar.getTime();
setDates(event, start, end);
} | 3.68 |
zxing_OneDReader_decode | // Note that we don't try rotation without the try harder flag, even if rotation was supported.
@Override
public Result decode(BinaryBitmap image,
Map<DecodeHintType,?> hints) throws NotFoundException, FormatException {
try {
return doDecode(image, hints);
} catch (NotFoundException nfe) {
boolean tryHarder = hints != null && hints.containsKey(DecodeHintType.TRY_HARDER);
if (tryHarder && image.isRotateSupported()) {
BinaryBitmap rotatedImage = image.rotateCounterClockwise();
Result result = doDecode(rotatedImage, hints);
// Record that we found it rotated 90 degrees CCW / 270 degrees CW
Map<ResultMetadataType,?> metadata = result.getResultMetadata();
int orientation = 270;
if (metadata != null && metadata.containsKey(ResultMetadataType.ORIENTATION)) {
// But if we found it reversed in doDecode(), add in that result here:
orientation = (orientation +
(Integer) metadata.get(ResultMetadataType.ORIENTATION)) % 360;
}
result.putMetadata(ResultMetadataType.ORIENTATION, orientation);
// Update result points
ResultPoint[] points = result.getResultPoints();
if (points != null) {
int height = rotatedImage.getHeight();
for (int i = 0; i < points.length; i++) {
points[i] = new ResultPoint(height - points[i].getY() - 1, points[i].getX());
}
}
return result;
} else {
throw nfe;
}
}
} | 3.68 |
flink_PlannerContext_getBuiltinSqlOperatorTable | /** Returns builtin the operator table and external the operator for this environment. */
private SqlOperatorTable getBuiltinSqlOperatorTable() {
return SqlOperatorTables.chain(
new FunctionCatalogOperatorTable(
context.getFunctionCatalog(),
context.getCatalogManager().getDataTypeFactory(),
typeFactory,
context.getRexFactory()),
FlinkSqlOperatorTable.instance(context.isBatchMode()));
} | 3.68 |
hadoop_AzureNativeFileSystemStore_getDataLength | /**
* Return the actual data length of the blob with the specified properties.
* If it is a page blob, you can't rely on the length from the properties
* argument and you must get it from the file. Otherwise, you can.
*/
private long getDataLength(CloudBlobWrapper blob, BlobProperties properties)
throws AzureException {
if (blob instanceof CloudPageBlobWrapper) {
try {
return PageBlobInputStream.getPageBlobDataSize((CloudPageBlobWrapper) blob,
getInstrumentedContext(
isConcurrentOOBAppendAllowed()));
} catch (Exception e) {
throw new AzureException(
"Unexpected exception getting page blob actual data size.", e);
}
}
return properties.getLength();
} | 3.68 |
graphhopper_VectorTile_getDoubleValue | /**
* <code>optional double double_value = 3;</code>
*/
public double getDoubleValue() {
return doubleValue_;
} | 3.68 |
hudi_DirectMarkerBasedDetectionStrategy_checkMarkerConflict | /**
* We need to do list operation here.
* In order to reduce the list pressure as much as possible, first we build path prefix in advance:
* '$base_path/.temp/instant_time/partition_path', and only list these specific partition_paths
* we need instead of list all the '$base_path/.temp/'
*
* @param basePath Base path of the table.
* @param maxAllowableHeartbeatIntervalInMs Heartbeat timeout.
* @return true if current fileID is already existed under .temp/instant_time/partition_path/..
* @throws IOException upon errors.
*/
public boolean checkMarkerConflict(String basePath, long maxAllowableHeartbeatIntervalInMs) throws IOException {
String tempFolderPath = basePath + Path.SEPARATOR + HoodieTableMetaClient.TEMPFOLDER_NAME;
List<String> candidateInstants = MarkerUtils.getCandidateInstants(activeTimeline, Arrays.stream(fs.listStatus(new Path(tempFolderPath))).map(FileStatus::getPath).collect(Collectors.toList()),
instantTime, maxAllowableHeartbeatIntervalInMs, fs, basePath);
long res = candidateInstants.stream().flatMap(currentMarkerDirPath -> {
try {
Path markerPartitionPath;
if (StringUtils.isNullOrEmpty(partitionPath)) {
markerPartitionPath = new Path(currentMarkerDirPath);
} else {
markerPartitionPath = new Path(currentMarkerDirPath, partitionPath);
}
if (!StringUtils.isNullOrEmpty(partitionPath) && !fs.exists(markerPartitionPath)) {
return Stream.empty();
} else {
return Arrays.stream(fs.listStatus(markerPartitionPath)).parallel()
.filter((path) -> path.toString().contains(fileId));
}
} catch (IOException e) {
throw new HoodieIOException("IOException occurs during checking marker file conflict");
}
}).count();
if (res != 0L) {
LOG.warn("Detected conflict marker files: " + partitionPath + "/" + fileId + " for " + instantTime);
return true;
}
return false;
} | 3.68 |
hadoop_LocalJobOutputFiles_getOutputIndexFile | /**
* Return the path to a local map output index file created earlier
*/
public Path getOutputIndexFile() throws IOException {
String path = String.format(OUTPUT_FILE_INDEX_FORMAT_STRING, TASKTRACKER_OUTPUT);
return lDirAlloc.getLocalPathToRead(path, conf);
} | 3.68 |
graphhopper_VectorTile_addAllKeys | /**
* <pre>
* Dictionary encoding for keys
* </pre>
*
* <code>repeated string keys = 3;</code>
*/
public Builder addAllKeys(
java.lang.Iterable<java.lang.String> values) {
ensureKeysIsMutable();
com.google.protobuf.AbstractMessageLite.Builder.addAll(
values, keys_);
onChanged();
return this;
} | 3.68 |
hbase_CompactionRequestImpl_getSize | /** Gets the total size of all StoreFiles in compaction */
@Override
public long getSize() {
return totalSize;
} | 3.68 |
hudi_InternalSchemaMerger_mergeSchema | /**
* Create final read schema to read avro/parquet file.
*
* @return read schema to read avro/parquet file.
*/
public InternalSchema mergeSchema() {
Types.RecordType record = (Types.RecordType) mergeType(querySchema.getRecord(), 0);
return new InternalSchema(record);
} | 3.68 |
framework_CalendarConnector_getActionStartDate | /**
* Get the start date for an action item.
*
* @param actionKey
* The unique action key
* @return
* @throws ParseException
*/
public Date getActionStartDate(String actionKey) throws ParseException {
String dateStr = actionMap.get(actionKey + "_s");
DateTimeFormat formatter = DateTimeFormat
.getFormat(DateConstants.ACTION_DATE_FORMAT_PATTERN);
return formatter.parse(dateStr);
} | 3.68 |
hudi_BaseHoodieTableServiceClient_completeLogCompaction | /**
* Commit Log Compaction and track metrics.
*/
protected void completeLogCompaction(HoodieCommitMetadata metadata, HoodieTable table, String logCompactionCommitTime) {
this.context.setJobStatus(this.getClass().getSimpleName(), "Collect log compaction write status and commit compaction");
List<HoodieWriteStat> writeStats = metadata.getWriteStats();
handleWriteErrors(writeStats, TableServiceType.LOG_COMPACT);
final HoodieInstant logCompactionInstant = new HoodieInstant(HoodieInstant.State.INFLIGHT, HoodieTimeline.LOG_COMPACTION_ACTION, logCompactionCommitTime);
try {
this.txnManager.beginTransaction(Option.of(logCompactionInstant), Option.empty());
preCommit(metadata);
finalizeWrite(table, logCompactionCommitTime, writeStats);
// commit to data table after committing to metadata table.
writeTableMetadata(table, logCompactionCommitTime, metadata, context.emptyHoodieData());
LOG.info("Committing Log Compaction " + logCompactionCommitTime + ". Finished with result " + metadata);
CompactHelpers.getInstance().completeInflightLogCompaction(table, logCompactionCommitTime, metadata);
} finally {
this.txnManager.endTransaction(Option.of(logCompactionInstant));
}
WriteMarkersFactory.get(config.getMarkersType(), table, logCompactionCommitTime)
.quietDeleteMarkerDir(context, config.getMarkersDeleteParallelism());
if (logCompactionTimer != null) {
long durationInMs = metrics.getDurationInMs(logCompactionTimer.stop());
HoodieActiveTimeline.parseDateFromInstantTimeSafely(logCompactionCommitTime).ifPresent(parsedInstant ->
metrics.updateCommitMetrics(parsedInstant.getTime(), durationInMs, metadata, HoodieActiveTimeline.LOG_COMPACTION_ACTION)
);
}
LOG.info("Log Compacted successfully on commit " + logCompactionCommitTime);
} | 3.68 |
flink_DeduplicateFunctionHelper_processFirstRowOnProcTime | /**
* Processes element to deduplicate on keys with process time semantic, sends current element if
* it is first row.
*
* @param currentRow latest row received by deduplicate function
* @param state state of function
* @param out underlying collector
*/
static void processFirstRowOnProcTime(
RowData currentRow, ValueState<Boolean> state, Collector<RowData> out)
throws Exception {
checkInsertOnly(currentRow);
// ignore record if it is not first row
if (state.value() != null) {
return;
}
state.update(true);
// emit the first row which is INSERT message
out.collect(currentRow);
} | 3.68 |
morf_GraphBasedUpgradeSchemaChangeVisitor_startStep | /**
* Set the current {@link GraphBasedUpgradeNode} which is being processed.
*
* @param upgradeClass upgrade which is currently being processed
*/
@Override
public void startStep(Class<? extends UpgradeStep> upgradeClass) {
currentNode = upgradeNodes.get(upgradeClass.getName());
if (currentNode == null) {
throw new IllegalStateException("UpgradeNode: " + upgradeClass.getName() + " doesn't exist.");
}
} | 3.68 |
hadoop_MutableRatesWithAggregation_aggregateLocalStatesToGlobalMetrics | /**
* Aggregates the thread's local samples into the global metrics. The caller
* should ensure its thread safety.
*/
private void aggregateLocalStatesToGlobalMetrics(
final ConcurrentMap<String, ThreadSafeSampleStat> localStats) {
for (Map.Entry<String, ThreadSafeSampleStat> entry : localStats
.entrySet()) {
String name = entry.getKey();
MutableRate globalMetric = addMetricIfNotExists(name);
entry.getValue().snapshotInto(globalMetric);
}
} | 3.68 |
flink_LatencyTrackingStateFactory_createStateAndWrapWithLatencyTrackingIfEnabled | /** Create latency tracking state if enabled. */
public static <K, N, V, S extends State>
InternalKvState<K, N, ?> createStateAndWrapWithLatencyTrackingIfEnabled(
InternalKvState<K, N, ?> kvState,
StateDescriptor<S, V> stateDescriptor,
LatencyTrackingStateConfig latencyTrackingStateConfig)
throws Exception {
if (latencyTrackingStateConfig.isEnabled()) {
return new LatencyTrackingStateFactory<>(
kvState, stateDescriptor, latencyTrackingStateConfig)
.createState();
}
return kvState;
} | 3.68 |
morf_TableDataHomology_compareTable | /**
* Compare all the records for this table.
*
* @param table the active {@link Table}
* @param records1 the first set of records
* @param records2 the second set of records
*/
public void compareTable(final Table table, Iterable<Record> records1, Iterable<Record> records2) {
Iterator<Record> iterator1;
Iterator<Record> iterator2;
if (orderComparator == null) {
// no comparator - just compare the results in the order they arrive.
iterator1 = records1.iterator();
iterator2 = records2.iterator();
} else {
// There is a comparator. Sort the rows before comparison
iterator1 = copyAndSort(table, records1, orderComparator).iterator();
iterator2 = copyAndSort(table, records2, orderComparator).iterator();
}
int recordNumber = 0;
List<Column> primaryKeys = primaryKeysForTable(table);
List<Column> primaryKeysForComparison = FluentIterable.from(primaryKeys).filter(excludingExcludedColumns()).toList();
Optional<Record> next1 = optionalNext(iterator1);
Optional<Record> next2 = optionalNext(iterator2);
while (moreRecords(table, next1, next2, primaryKeys)) {
int compareResult = primaryKeysForComparison.isEmpty() ? 0 : compareKeys(next1, next2, primaryKeysForComparison);
if (compareResult > 0) {
differences.add(String.format("Table [%s]: Dataset1 is missing %s (Dataset2=%s)", table.getName(), keyColumnsIds(next2.get(), primaryKeysForComparison), RecordHelper.joinRecordValues(table.columns(), next2.get(), ",", "null")));
next2 = optionalNext(iterator2);
} else if (compareResult < 0) {
differences.add(String.format("Table [%s]: Dataset2 is missing %s (Dataset1=%s)", table.getName(), keyColumnsIds(next1.get(), primaryKeysForComparison), RecordHelper.joinRecordValues(table.columns(), next1.get(), ",", "null")));
next1 = optionalNext(iterator1);
} else {
compareRecords(table, recordNumber++, next1.get(), next2.get(), primaryKeys);
next1 = optionalNext(iterator1);
next2 = optionalNext(iterator2);
}
}
} | 3.68 |
hudi_SchemaChangeUtils_applyTableChanges2Schema | /**
* Apply all the DDL update operations to internalSchema to produce a new internalSchema.
*
* @param internalSchema origin internalSchema.
* @param updates a wrapper class for all the DDL update operations.
* @return a new internalSchema.
*/
public static InternalSchema applyTableChanges2Schema(InternalSchema internalSchema, TableChanges.ColumnUpdateChange updates) {
Types.RecordType newType = (Types.RecordType)applyTableChange2Type(internalSchema.getRecord(), updates);
// deal with root level changes
List<Types.Field> newFields = TableChangesHelper.applyAddChange2Fields(newType.fields(),
new ArrayList<>(), updates.getPositionChangeMap().get(-1));
return new InternalSchema(Types.RecordType.get(newFields, newType.name()));
} | 3.68 |
hbase_StorageClusterStatusModel_setStorefileSizeMB | /**
* @param storefileSizeMB total size of store files, in MB
*/
public void setStorefileSizeMB(int storefileSizeMB) {
this.storefileSizeMB = storefileSizeMB;
} | 3.68 |
hbase_TestingHBaseClusterOption_convert | /**
* Convert to the internal option. Not for public use so package private.
*/
StartTestingClusterOption convert() {
return StartTestingClusterOption.builder().numMasters(numMasters)
.numAlwaysStandByMasters(numAlwaysStandByMasters).numRegionServers(numRegionServers)
.rsPorts(rsPorts).numDataNodes(numDataNodes).dataNodeHosts(dataNodeHosts)
.numZkServers(numZkServers).createRootDir(createRootDir).createWALDir(createWALDir).build();
} | 3.68 |