name
stringlengths 12
178
| code_snippet
stringlengths 8
36.5k
| score
float64 3.26
3.68
|
---|---|---|
framework_VMenuBar_getItems | /**
* Returns a list of items in this menu.
*/
public List<CustomMenuItem> getItems() {
return items;
} | 3.68 |
flink_OperatorChain_flushOutputs | /**
* This method should be called before finishing the record emission, to make sure any data that
* is still buffered will be sent. It also ensures that all data sending related exceptions are
* recognized.
*
* @throws IOException Thrown, if the buffered data cannot be pushed into the output streams.
*/
public void flushOutputs() throws IOException {
for (RecordWriterOutput<?> streamOutput : getStreamOutputs()) {
streamOutput.flush();
}
} | 3.68 |
hbase_HBaseTestingUtility_setReplicas | /**
* Set the number of Region replicas.
*/
public static void setReplicas(Admin admin, TableName table, int replicaCount)
throws IOException, InterruptedException {
TableDescriptor desc = TableDescriptorBuilder.newBuilder(admin.getDescriptor(table))
.setRegionReplication(replicaCount).build();
admin.modifyTable(desc);
} | 3.68 |
framework_DateTimeField_setPlaceholder | /**
* Sets the placeholder text. The placeholder is text that is displayed when
* the field would otherwise be empty, to prompt the user for input.
*
* @param placeholder
* the placeholder text to set
*/
public void setPlaceholder(String placeholder) {
getState().placeholder = placeholder;
} | 3.68 |
AreaShop_RegionSign_getStringChunk | /**
* Chunk string to be used as key in maps.
* @return Chunk string
*/
public String getStringChunk() {
return SignsFeature.chunkToString(getLocation());
} | 3.68 |
morf_ViewChanges_dropNode | /**
* Add a node to the drop set
*
* @param node the node to drop.
*/
private void dropNode(String node) {
dropSet.add(node);
if (knownSet.contains(node)) {
if (log.isDebugEnabled()) log.debug("Expanding views to deploy to include " + node + "because it is now dropped and exists in all views");
deploySet.add(node);
}
} | 3.68 |
querydsl_SimpleExpression_eq | /**
* Create a {@code this == right} expression
*
* @param right rhs of the comparison
* @return this == right
*/
public BooleanExpression eq(Expression<? super T> right) {
return Expressions.booleanOperation(Ops.EQ, mixin, right);
} | 3.68 |
framework_VPopupView_showPopup | /**
* Determines the correct position for a popup and displays the popup at
* that position.
*
* By default, the popup is shown centered relative to its host component,
* ensuring it is visible on the screen if possible.
*
* Can be overridden to customize the popup position.
*
* @param popup
* the popup whose position should be updated
*/
public void showPopup(final CustomPopup popup) {
popup.setPopupPosition(0, 0);
} | 3.68 |
querydsl_MetaDataExporter_patternAsList | /**
* Splits the input on ',' if non-null and a ',' is present.
* Returns a singletonList of null if null
*/
private List<String> patternAsList(@Nullable String input) {
if (input != null && input.contains(",")) {
return Arrays.asList(input.split(","));
} else {
return Collections.singletonList(input);
}
} | 3.68 |
framework_ViewChangeListener_getParameters | /**
* Returns the parameters for the view being activated.
*
* @return navigation parameters (potentially bookmarkable) for the new
* view
*/
public String getParameters() {
return parameters;
} | 3.68 |
hadoop_GPGPolicyFacade_setPolicyManager | /**
* Provides a utility for the policy generator to write a policy manager
* into the FederationStateStore. The facade keeps a cache and will only write
* into the FederationStateStore if the policy configuration has changed.
*
* @param policyManager The policy manager we want to update into the state
* store. It contains policy information as well as
* the queue name we will update for.
* @throws YarnException exceptions from yarn servers.
*/
public void setPolicyManager(FederationPolicyManager policyManager)
throws YarnException {
if (policyManager == null) {
LOG.warn("Attempting to set null policy manager");
return;
}
// Extract the configuration from the policy manager
String queue = policyManager.getQueue();
SubClusterPolicyConfiguration conf;
try {
conf = policyManager.serializeConf();
} catch (FederationPolicyInitializationException e) {
LOG.warn("Error serializing policy for queue {}", queue);
throw e;
}
if (conf == null) {
// State store does not currently support setting a policy back to null
// because it reads the queue name to set from the policy!
LOG.warn("Skip setting policy to null for queue {} into state store",
queue);
return;
}
// Compare with configuration cache, if different, write the conf into
// store and update our conf and manager cache
if (!confCacheEqual(queue, conf)) {
try {
if (readOnly) {
LOG.info("[read-only] Skipping policy update for queue {}", queue);
return;
}
LOG.info("Updating policy for queue {} into state store", queue);
stateStore.setPolicyConfiguration(conf);
policyConfMap.put(queue, conf);
policyManagerMap.put(queue, policyManager);
} catch (YarnException e) {
LOG.warn("Error writing SubClusterPolicyConfiguration to state "
+ "store for queue: {}", queue);
throw e;
}
} else {
LOG.info("Setting unchanged policy - state store write skipped");
}
} | 3.68 |
dubbo_ServiceModel_getReferenceConfig | /**
* ServiceModel should be decoupled from AbstractInterfaceConfig and removed in a future version
* @return
*/
@Deprecated
public ReferenceConfigBase<?> getReferenceConfig() {
if (config == null) {
return null;
}
if (config instanceof ReferenceConfigBase) {
return (ReferenceConfigBase<?>) config;
} else {
throw new IllegalArgumentException("Current ServiceModel is not a ConsumerModel");
}
} | 3.68 |
hbase_BloomFilterUtil_idealMaxKeys | /**
* The maximum number of keys we can put into a Bloom filter of a certain size to maintain the
* given error rate, assuming the number of hash functions is chosen optimally and does not even
* have to be an integer (hence the "ideal" in the function name).
* @return maximum number of keys that can be inserted into the Bloom filter
* @see #computeMaxKeys(long, double, int) for a more precise estimate
*/
public static long idealMaxKeys(long bitSize, double errorRate) {
// The reason we need to use floor here is that otherwise we might put
// more keys in a Bloom filter than is allowed by the target error rate.
return (long) (bitSize * (LOG2_SQUARED / -Math.log(errorRate)));
} | 3.68 |
flink_HadoopInputs_createHadoopInput | /**
* Creates a Flink {@link InputFormat} that wraps the given Hadoop {@link
* org.apache.hadoop.mapreduce.InputFormat}.
*
* @return A Flink InputFormat that wraps the Hadoop InputFormat.
*/
public static <K, V>
org.apache.flink.api.java.hadoop.mapreduce.HadoopInputFormat<K, V> createHadoopInput(
org.apache.hadoop.mapreduce.InputFormat<K, V> mapreduceInputFormat,
Class<K> key,
Class<V> value,
Job job) {
return new org.apache.flink.api.java.hadoop.mapreduce.HadoopInputFormat<>(
mapreduceInputFormat, key, value, job);
} | 3.68 |
framework_SelectorPredicate_getValue | /**
* @return the value
*/
public String getValue() {
return value;
} | 3.68 |
framework_VaadinSession_getRequestHandlers | /**
* Gets the request handlers that are registered to the session. The
* iteration order of the returned collection is the same as the order in
* which the request handlers will be invoked when a request is handled.
*
* @return a collection of request handlers, with the iteration order
* according to the order they would be invoked
*
* @see #addRequestHandler(RequestHandler)
* @see #removeRequestHandler(RequestHandler)
*
* @since 7.0
*/
public Collection<RequestHandler> getRequestHandlers() {
assert hasLock();
return Collections.unmodifiableCollection(requestHandlers);
} | 3.68 |
flink_StreamOperatorWrapper_finish | /**
* Finishes the wrapped operator and propagates the finish operation to the next wrapper that
* the {@link #next} points to.
*
* <p>Note that this method must be called in the task thread, because we need to call {@link
* MailboxExecutor#yield()} to take the mails of closing operator and running timers and run
* them.
*/
public void finish(StreamTaskActionExecutor actionExecutor, StopMode stopMode)
throws Exception {
if (!isHead && stopMode == StopMode.DRAIN) {
// NOTE: This only do for the case where the operator is one-input operator. At present,
// any non-head operator on the operator chain is one-input operator.
actionExecutor.runThrowing(() -> endOperatorInput(1));
}
quiesceTimeServiceAndFinishOperator(actionExecutor, stopMode);
// propagate the close operation to the next wrapper
if (next != null) {
next.finish(actionExecutor, stopMode);
}
} | 3.68 |
pulsar_ClientConfiguration_setConnectionsPerBroker | /**
* Sets the max number of connection that the client library will open to a single broker.
* <p>
* By default, the connection pool will use a single connection for all the producers and consumers. Increasing this
* parameter may improve throughput when using many producers over a high latency connection.
* <p>
*
* @param connectionsPerBroker
* max number of connections per broker (needs to be greater than 0)
*/
public void setConnectionsPerBroker(int connectionsPerBroker) {
checkArgument(connectionsPerBroker > 0, "Connections per broker need to be greater than 0");
confData.setConnectionsPerBroker(connectionsPerBroker);
} | 3.68 |
hbase_CompactionProgress_getCurrentCompactedKvs | /** Returns the completed count of key values in currently running compaction */
public long getCurrentCompactedKvs() {
return currentCompactedKVs;
} | 3.68 |
hudi_InLineFSUtils_getInlineFilePath | /**
* Get the InlineFS Path for a given schema and its Path.
* <p>
* Examples:
* Input Path: s3a://file1, origScheme: file, startOffset = 20, length = 40
* Output: "inlinefs://file1/s3a/?start_offset=20&length=40"
*
* @param outerPath The outer file Path
* @param origScheme The file schema
* @param inLineStartOffset Start offset for the inline file
* @param inLineLength Length for the inline file
* @return InlineFS Path for the requested outer path and schema
*/
public static Path getInlineFilePath(Path outerPath, String origScheme, long inLineStartOffset, long inLineLength) {
final String subPath = new File(outerPath.toString().substring(outerPath.toString().indexOf(":") + 1)).getPath();
return new Path(
InLineFileSystem.SCHEME + SCHEME_SEPARATOR + PATH_SEPARATOR + subPath + PATH_SEPARATOR + origScheme
+ PATH_SEPARATOR + "?" + START_OFFSET_STR + EQUALS_STR + inLineStartOffset
+ "&" + LENGTH_STR + EQUALS_STR + inLineLength
);
} | 3.68 |
flink_FileDataIndexCache_handleRemove | // This is a callback after internal cache removed an entry from itself.
private void handleRemove(RemovalNotification<CachedRegionKey, Object> removedEntry) {
CachedRegionKey removedKey = removedEntry.getKey();
// remove the corresponding region from memory.
T removedRegion =
subpartitionFirstBufferIndexRegions
.get(removedKey.getSubpartition())
.remove(removedKey.getFirstBufferIndex());
// write this region to file. After that, no strong reference point to this region, it can
// be safely released by gc.
writeRegion(removedKey.getSubpartition(), removedRegion);
} | 3.68 |
rocketmq-connect_WorkerTask_recordCommitSuccess | /**
* record commit success
*
* @param duration
*/
protected void recordCommitSuccess(long duration) {
taskMetricsGroup.recordCommit(duration, true);
} | 3.68 |
flink_MapValue_clear | /*
* (non-Javadoc)
* @see java.util.Map#clear()
*/
@Override
public void clear() {
this.map.clear();
} | 3.68 |
graphhopper_GraphHopperConfig_putObject | // We can add explicit configuration properties to GraphHopperConfig (for example to allow lists or nested objects),
// everything else is stored in a HashMap
@JsonAnySetter
public GraphHopperConfig putObject(String key, Object value) {
map.putObject(key, value);
return this;
} | 3.68 |
flink_AbstractInvokable_getIndexInSubtaskGroup | /**
* Returns the index of this subtask in the subtask group.
*
* @return the index of this subtask in the subtask group
*/
public int getIndexInSubtaskGroup() {
return this.environment.getTaskInfo().getIndexOfThisSubtask();
} | 3.68 |
morf_UnionSetOperator_accept | /**
* @see org.alfasoftware.morf.sql.SchemaAndDataChangeVisitable#accept(org.alfasoftware.morf.upgrade.SchemaAndDataChangeVisitor)
*/
@Override
public void accept(SchemaAndDataChangeVisitor visitor) {
visitor.visit(this);
selectStatement.accept(visitor);
} | 3.68 |
hadoop_FindOptions_setFollowArgLink | /**
* Sets flag indicating whether command line symbolic links should be
* followed.
*
* @param followArgLink true indicates follow links
*/
public void setFollowArgLink(boolean followArgLink) {
this.followArgLink = followArgLink;
} | 3.68 |
flink_AbstractPythonStreamAggregateOperator_getUserDefinedFunctionsProto | /**
* Gets the proto representation of the Python user-defined aggregate functions to be executed.
*/
protected FlinkFnApi.UserDefinedAggregateFunctions getUserDefinedFunctionsProto() {
FlinkFnApi.UserDefinedAggregateFunctions.Builder builder =
FlinkFnApi.UserDefinedAggregateFunctions.newBuilder();
builder.setMetricEnabled(config.get(PYTHON_METRIC_ENABLED));
builder.setProfileEnabled(config.get(PYTHON_PROFILE_ENABLED));
builder.addAllGrouping(Arrays.stream(grouping).boxed().collect(Collectors.toList()));
builder.setGenerateUpdateBefore(generateUpdateBefore);
builder.setIndexOfCountStar(indexOfCountStar);
builder.setKeyType(toProtoType(getKeyType()));
builder.setStateCacheSize(stateCacheSize);
builder.setMapStateReadCacheSize(mapStateReadCacheSize);
builder.setMapStateWriteCacheSize(mapStateWriteCacheSize);
for (int i = 0; i < aggregateFunctions.length; i++) {
DataViewSpec[] specs = null;
if (i < dataViewSpecs.length) {
specs = dataViewSpecs[i];
}
builder.addUdfs(
ProtoUtils.createUserDefinedAggregateFunctionProto(
aggregateFunctions[i], specs));
}
builder.addAllJobParameters(
getRuntimeContext().getExecutionConfig().getGlobalJobParameters().toMap().entrySet()
.stream()
.map(
entry ->
FlinkFnApi.JobParameter.newBuilder()
.setKey(entry.getKey())
.setValue(entry.getValue())
.build())
.collect(Collectors.toList()));
return builder.build();
} | 3.68 |
hadoop_GetApplicationAttemptsResponsePBImpl_initLocalApplicationAttemptsList | // Once this is called. containerList will never be null - until a getProto
// is called.
private void initLocalApplicationAttemptsList() {
if (this.applicationAttemptList != null) {
return;
}
GetApplicationAttemptsResponseProtoOrBuilder p = viaProto ? proto : builder;
List<ApplicationAttemptReportProto> list = p.getApplicationAttemptsList();
applicationAttemptList = new ArrayList<ApplicationAttemptReport>();
for (ApplicationAttemptReportProto a : list) {
applicationAttemptList.add(convertFromProtoFormat(a));
}
} | 3.68 |
framework_PushConfiguration_setPushMode | /*
* (non-Javadoc)
*
* @see com.vaadin.ui.PushConfiguration#setPushMode(com.vaadin.shared.
* communication .PushMode)
*/
@Override
public void setPushMode(PushMode pushMode) {
if (pushMode == null) {
throw new IllegalArgumentException("Push mode cannot be null");
}
VaadinSession session = ui.getSession();
if (session == null) {
throw new UIDetachedException(
"Cannot set the push mode for a detached UI");
}
assert session.hasLock();
if (pushMode.isEnabled()
&& !session.getService().ensurePushAvailable()) {
throw new IllegalStateException(
"Push is not available. See previous log messages for more information.");
}
PushMode oldMode = getState().mode;
if (oldMode != pushMode) {
getState().mode = pushMode;
if (!oldMode.isEnabled() && pushMode.isEnabled()) {
// The push connection is initially in a disconnected state;
// the client will establish the connection
ui.setPushConnection(new AtmospherePushConnection(ui));
}
// Nothing to do here if disabling push;
// the client will close the connection
}
} | 3.68 |
hbase_ExceptionUtil_asInterrupt | /** Returns an InterruptedIOException if t was an interruption, null otherwise */
public static InterruptedIOException asInterrupt(Throwable t) {
if (t instanceof SocketTimeoutException) {
return null;
}
if (t instanceof InterruptedIOException) {
return (InterruptedIOException) t;
}
if (t instanceof InterruptedException || t instanceof ClosedByInterruptException) {
InterruptedIOException iie =
new InterruptedIOException("Origin: " + t.getClass().getSimpleName());
iie.initCause(t);
return iie;
}
return null;
} | 3.68 |
dubbo_SharedClientsProvider_checkClientCanUse | /**
* Check if the client list is all available
*
* @param referenceCountExchangeClients
* @return true-available,false-unavailable
*/
private boolean checkClientCanUse(List<ReferenceCountExchangeClient> referenceCountExchangeClients) {
if (CollectionUtils.isEmpty(referenceCountExchangeClients)) {
return false;
}
// As long as one client is not available, you need to replace the unavailable client with the available one.
return referenceCountExchangeClients.stream()
.noneMatch(referenceCountExchangeClient -> referenceCountExchangeClient == null
|| referenceCountExchangeClient.getCount() <= 0
|| referenceCountExchangeClient.isClosed());
} | 3.68 |
hbase_LocalHBaseCluster_join | /**
* Wait for Mini HBase Cluster to shut down. Presumes you've already called {@link #shutdown()}.
*/
public void join() {
if (this.regionThreads != null) {
for (Thread t : this.regionThreads) {
if (t.isAlive()) {
try {
Threads.threadDumpingIsAlive(t);
} catch (InterruptedException e) {
LOG.debug("Interrupted", e);
}
}
}
}
if (this.masterThreads != null) {
for (Thread t : this.masterThreads) {
if (t.isAlive()) {
try {
Threads.threadDumpingIsAlive(t);
} catch (InterruptedException e) {
LOG.debug("Interrupted", e);
}
}
}
}
} | 3.68 |
hadoop_MetricsLoggerTask_run | /**
* Write metrics to the metrics appender when invoked.
*/
@Override
public void run() {
// Skip querying metrics if there are no known appenders.
if (!metricsLog.isInfoEnabled() || !hasAppenders(metricsLog)
|| objectName == null) {
return;
}
metricsLog.info(" >> Begin " + nodeName + " metrics dump");
final MBeanServer server = ManagementFactory.getPlatformMBeanServer();
// Iterate over each MBean.
for (final ObjectName mbeanName : server.queryNames(objectName, null)) {
try {
MBeanInfo mBeanInfo = server.getMBeanInfo(mbeanName);
final String mBeanNameName = MBeans.getMbeanNameName(mbeanName);
final Set<String> attributeNames = getFilteredAttributes(mBeanInfo);
final AttributeList attributes = server.getAttributes(mbeanName,
attributeNames.toArray(new String[attributeNames.size()]));
for (Object o : attributes) {
final Attribute attribute = (Attribute) o;
final Object value = attribute.getValue();
final String valueStr = (value != null) ? value.toString() : "null";
// Truncate the value if it is too long
metricsLog.info(mBeanNameName + ":" + attribute.getName() + "="
+ trimLine(valueStr));
}
} catch (Exception e) {
metricsLog.error("Failed to get " + nodeName + " metrics for mbean "
+ mbeanName.toString(), e);
}
}
metricsLog.info(" << End " + nodeName + " metrics dump");
} | 3.68 |
hbase_MemorySizeUtil_getGlobalMemStoreSize | /** Returns Pair of global memstore size and memory type(ie. on heap or off heap). */
public static Pair<Long, MemoryType> getGlobalMemStoreSize(Configuration conf) {
long offheapMSGlobal = conf.getLong(OFFHEAP_MEMSTORE_SIZE_KEY, 0);// Size in MBs
if (offheapMSGlobal > 0) {
// Off heap memstore size has not relevance when MSLAB is turned OFF. We will go with making
// this entire size split into Chunks and pooling them in MemstoreLABPoool. We dont want to
// create so many on demand off heap chunks. In fact when this off heap size is configured, we
// will go with 100% of this size as the pool size
if (MemStoreLAB.isEnabled(conf)) {
// We are in offheap Memstore use
long globalMemStoreLimit = (long) (offheapMSGlobal * 1024 * 1024); // Size in bytes
return new Pair<>(globalMemStoreLimit, MemoryType.NON_HEAP);
} else {
// Off heap max memstore size is configured with turning off MSLAB. It makes no sense. Do a
// warn log and go with on heap memstore percentage. By default it will be 40% of Xmx
LOG.warn("There is no relevance of configuring '" + OFFHEAP_MEMSTORE_SIZE_KEY + "' when '"
+ MemStoreLAB.USEMSLAB_KEY + "' is turned off."
+ " Going with on heap global memstore size ('" + MEMSTORE_SIZE_KEY + "')");
}
}
return new Pair<>(getOnheapGlobalMemStoreSize(conf), MemoryType.HEAP);
} | 3.68 |
AreaShop_GeneralRegion_hasRegionsInLimitGroup | /**
* Get the amount of regions a player has matching a certain limits group (config.yml -- limitGroups)
* @param player The player to check the amount for
* @param limitGroup The group to check
* @param regions All the regions a player has bought or rented
* @param exclude Exclude this region from the count
* @return The number of regions that the player has bought or rented matching the limit group (worlds and groups filters)
*/
public int hasRegionsInLimitGroup(OfflinePlayer player, String limitGroup, List<? extends GeneralRegion> regions, GeneralRegion exclude) {
int result = 0;
for(GeneralRegion region : regions) {
if(region.getBooleanSetting("general.countForLimits")
&& region.isOwner(player)
&& region.matchesLimitGroup(limitGroup)
&& (exclude == null || !exclude.getName().equals(region.getName()))) {
result++;
}
}
return result;
} | 3.68 |
framework_TreeFilesystem_populateNode | /**
* Populates files to tree as items. In this example items are of String
* type that consist of file path. New items are added to tree and item's
* parent and children properties are updated.
*
* @param file
* path which contents are added to tree
* @param parent
* for added nodes, if null then new nodes are added to root node
*/
private void populateNode(String file, Object parent) {
final File subdir = new File(file);
final File[] files = subdir.listFiles();
for (int x = 0; x < files.length; x++) {
try {
// add new item (String) to tree
final String path = files[x].getCanonicalPath();
tree.addItem(path);
// set parent if this item has one
if (parent != null) {
tree.setParent(path, parent);
}
// check if item is a directory and read access exists
if (files[x].isDirectory() && files[x].canRead()) {
// yes, childrens therefore exists
tree.setChildrenAllowed(path, true);
} else {
// no, childrens therefore do not exists
tree.setChildrenAllowed(path, false);
}
} catch (final Exception e) {
throw new RuntimeException(e);
}
}
} | 3.68 |
morf_UpgradeGraph_orderedSteps | /**
* @return total ordering of upgrade steps.
*/
public Collection<Class<? extends UpgradeStep>> orderedSteps() {
return Collections.unmodifiableCollection(orderedSteps.values());
} | 3.68 |
flink_PlanNode_setCosts | /**
* Sets the basic cost for this node to the given value, and sets the cumulative costs to those
* costs plus the cost shares of all inputs (regular and broadcast).
*
* @param nodeCosts The already knows costs for this node (this cost a produces by a concrete
* {@code OptimizerNode} subclass.
*/
public void setCosts(Costs nodeCosts) {
// set the node costs
this.nodeCosts = nodeCosts;
// the cumulative costs are the node costs plus the costs of all inputs
this.cumulativeCosts = nodeCosts.clone();
// add all the normal inputs
for (PlanNode pred : getPredecessors()) {
Costs parentCosts = pred.getCumulativeCostsShare();
if (parentCosts != null) {
this.cumulativeCosts.addCosts(parentCosts);
} else {
throw new CompilerException(
"Trying to set the costs of an operator before the predecessor costs are computed.");
}
}
// add all broadcast variable inputs
if (this.broadcastInputs != null) {
for (NamedChannel nc : this.broadcastInputs) {
Costs bcInputCost = nc.getSource().getCumulativeCostsShare();
if (bcInputCost != null) {
this.cumulativeCosts.addCosts(bcInputCost);
} else {
throw new CompilerException(
"Trying to set the costs of an operator before the broadcast input costs are computed.");
}
}
}
} | 3.68 |
hbase_DirectMemoryUtils_getDirectMemoryUsage | /** Returns the current amount of direct memory used. */
public static long getDirectMemoryUsage() {
if (BEAN_SERVER == null || NIO_DIRECT_POOL == null || !HAS_MEMORY_USED_ATTRIBUTE) return 0;
try {
Long value = (Long) BEAN_SERVER.getAttribute(NIO_DIRECT_POOL, MEMORY_USED);
return value == null ? 0 : value;
} catch (JMException e) {
// should print further diagnostic information?
return 0;
}
} | 3.68 |
pulsar_FieldParser_stringToInteger | /**
* Converts String to Integer.
*
* @param val
* The String to be converted.
* @return The converted Integer value.
*/
public static Integer stringToInteger(String val) {
String v = trim(val);
if (io.netty.util.internal.StringUtil.isNullOrEmpty(v)) {
return null;
} else {
return Integer.valueOf(v);
}
} | 3.68 |
hadoop_OutputReader_initialize | /**
* Initializes the OutputReader. This method has to be called before
* calling any of the other methods.
*/
public void initialize(PipeMapRed pipeMapRed) throws IOException {
// nothing here yet, but that might change in the future
} | 3.68 |
querydsl_PathMetadataFactory_forVariable | /**
* Create a new PathMetadata instance for a variable
*
* @param variable variable name
* @return variable path
*/
public static PathMetadata forVariable(String variable) {
return new PathMetadata(null, variable, PathType.VARIABLE);
} | 3.68 |
hadoop_FSDataOutputStreamBuilder_isRecursive | /**
* Return true to create the parent directories if they do not exist.
*
* @return if create the parent directories if they do not exist true,not false.
*/
protected boolean isRecursive() {
return recursive;
} | 3.68 |
hadoop_TFile_writeValue | /**
* Writing the value to the output stream. This method avoids copying
* value data from Scanner into user buffer, then writing to the output
* stream. It does not require the value length to be known.
*
* @param out
* The output stream
* @return the length of the value
* @throws IOException raised on errors performing I/O.
*/
public long writeValue(OutputStream out) throws IOException {
DataInputStream dis = getValueStream();
long size = 0;
try {
int chunkSize;
while ((chunkSize = valueBufferInputStream.getRemain()) > 0) {
chunkSize = Math.min(chunkSize, MAX_VAL_TRANSFER_BUF_SIZE);
valTransferBuffer.setSize(chunkSize);
dis.readFully(valTransferBuffer.getBytes(), 0, chunkSize);
out.write(valTransferBuffer.getBytes(), 0, chunkSize);
size += chunkSize;
}
return size;
} finally {
dis.close();
}
} | 3.68 |
morf_InsertStatementBuilder_getFromTable | /**
* Gets the table to select from. This is a short-hand for "SELECT * FROM [Table]".
*
* @return the table to select from.
*/
TableReference getFromTable() {
return fromTable;
} | 3.68 |
hadoop_AllocateRequest_resourceBlacklistRequest | /**
* Set the <code>resourceBlacklistRequest</code> of the request.
* @see AllocateRequest#setResourceBlacklistRequest(
* ResourceBlacklistRequest)
* @param resourceBlacklistRequest
* <code>resourceBlacklistRequest</code> of the request
* @return {@link AllocateRequestBuilder}
*/
@Public
@Stable
public AllocateRequestBuilder resourceBlacklistRequest(
ResourceBlacklistRequest resourceBlacklistRequest) {
allocateRequest.setResourceBlacklistRequest(resourceBlacklistRequest);
return this;
} | 3.68 |
hbase_Result_getValueAsByteBuffer | /**
* Returns the value wrapped in a new <code>ByteBuffer</code>.
* @param family family name
* @param foffset family offset
* @param flength family length
* @param qualifier column qualifier
* @param qoffset qualifier offset
* @param qlength qualifier length
* @return the latest version of the column, or <code>null</code> if none found
*/
public ByteBuffer getValueAsByteBuffer(byte[] family, int foffset, int flength, byte[] qualifier,
int qoffset, int qlength) {
Cell kv = getColumnLatestCell(family, foffset, flength, qualifier, qoffset, qlength);
if (kv == null) {
return null;
}
return ByteBuffer.wrap(kv.getValueArray(), kv.getValueOffset(), kv.getValueLength())
.asReadOnlyBuffer();
} | 3.68 |
framework_Overlay_needsShimElement | /**
* Returns true if we should add a shim iframe below the overlay to deal
* with zindex issues with PDFs and applets. Can be overridden to disable
* shim iframes if they are not needed.
*
* @return true if a shim iframe should be added, false otherwise
*/
protected boolean needsShimElement() {
BrowserInfo info = BrowserInfo.get();
return info.isIE() && info.isBrowserVersionNewerOrEqual(8, 0);
} | 3.68 |
morf_Upgrade_getUpgradeAuditRecords | /**
* This method queries the database for upgrade audit information, including
* upgrade UUIDs and their corresponding descriptions.
*
* @return A Map<String, String> containing upgrade audit information.
* The keys are upgrade descriptions and the values are corresponding UUIDs.
* If an error occurs during the retrieval, an empty map is returned.
*/
private Map<String, String> getUpgradeAuditRecords() {
Map<String, String> upgradeAuditMap = new HashMap<>();
try {
TableReference upgradeAuditTable = tableRef(DatabaseUpgradeTableContribution.UPGRADE_AUDIT_NAME);
SelectStatement selectStatement = select(
upgradeAuditTable.field("upgradeUUID"),
upgradeAuditTable.field("description")
)
.from(upgradeAuditTable)
.build();
SqlScriptExecutorProvider sqlScriptExecutorProvider = new SqlScriptExecutorProvider(connectionResources);
upgradeAuditMap = sqlScriptExecutorProvider.get().executeQuery(
connectionResources.sqlDialect().convertStatementToSQL(selectStatement),
resultSetProcessor()
);
} catch (Exception e) {
log.warn("Unable to read from UpgradeAudit table", e);
}
return upgradeAuditMap;
} | 3.68 |
morf_OracleMetaDataProvider_determineDefaultValue | /**
* Sets the default value to an empty string for any column other than version. Database-schema level default values are
* not supported by ALFA's domain model hence we don't want to include a default value in the xml definition of a table.
*
* @param columnName the name of the column
* @return the default value
*/
private String determineDefaultValue(String columnName) {
if (columnName.equals("VERSION")) {
return "0";
}
return "";
} | 3.68 |
framework_VaadinPortletSession_getPortletSession | /**
* Returns the underlying portlet session.
*
* @return portlet session
*/
public PortletSession getPortletSession() {
WrappedSession wrappedSession = getSession();
PortletSession session = ((WrappedPortletSession) wrappedSession)
.getPortletSession();
return session;
} | 3.68 |
framework_VAccordion_onSelectTab | /**
* Handle stack item selection.
*
* @param item
* the selected stack item
*/
public void onSelectTab(StackItem item) {
final int index = getWidgetIndex(item);
if (index != activeTabIndex && !disabled && !readonly
&& !disabledTabKeys.contains(tabKeys.get(index))) {
addStyleDependentName("loading");
connector.getRpcProxy(TabsheetServerRpc.class)
.setSelected(tabKeys.get(index));
}
} | 3.68 |
pulsar_AbstractTopic_enableProducerReadForPublishRateLimiting | /**
* it sets cnx auto-readable if producer's cnx is disabled due to publish-throttling.
*/
protected void enableProducerReadForPublishRateLimiting() {
if (producers != null) {
producers.values().forEach(producer -> {
producer.getCnx().cancelPublishRateLimiting();
producer.getCnx().enableCnxAutoRead();
});
}
} | 3.68 |
hbase_Scan_setReversed | /**
* Set whether this scan is a reversed one
* <p>
* This is false by default which means forward(normal) scan.
* @param reversed if true, scan will be backward order
*/
public Scan setReversed(boolean reversed) {
this.reversed = reversed;
return this;
} | 3.68 |
streampipes_TextMiningUtil_extractSpans | /*
* Given an array of spans and an array of tokens, it extracts and merges the tokens
* specified in the spans and adds them to a list. This list is returned
*/
public static List<String> extractSpans(Span[] spans, String[] tokens) throws SpRuntimeException {
List<String> list = new ArrayList<>();
for (Span span : spans) {
StringBuilder stringBuilder = new StringBuilder();
for (int i = span.getStart(); i < span.getEnd(); i++) {
if (i >= tokens.length) {
throw new SpRuntimeException("token list does not fit spans (token list lenght: " + tokens.length
+ ", span: [" + span.getStart() + ", " + span.getEnd() + "))");
}
stringBuilder.append(tokens[i]).append(' ');
}
// Removing the last space
stringBuilder.setLength(Math.max(stringBuilder.length() - 1, 0));
list.add(stringBuilder.toString());
}
return list;
} | 3.68 |
hadoop_TimelineEntity_getOtherInfo | /**
* Get the other information of the entity
*
* @return the other information of the entity
*/
public Map<String, Object> getOtherInfo() {
return otherInfo;
} | 3.68 |
hadoop_SimpleUdpServer_getBoundPort | // boundPort will be set only after server starts
public int getBoundPort() {
return this.boundPort;
} | 3.68 |
hudi_AbstractTableFileSystemView_filterUncommittedFiles | /**
* Ignores the uncommitted base and log files.
*
* @param fileSlice File Slice
* @param includeEmptyFileSlice include empty file-slice
*/
private Stream<FileSlice> filterUncommittedFiles(FileSlice fileSlice, boolean includeEmptyFileSlice) {
Option<HoodieBaseFile> committedBaseFile = fileSlice.getBaseFile().isPresent() && completionTimeQueryView.isCompleted(fileSlice.getBaseInstantTime()) ? fileSlice.getBaseFile() : Option.empty();
List<HoodieLogFile> committedLogFiles = fileSlice.getLogFiles().filter(logFile -> completionTimeQueryView.isCompleted(logFile.getDeltaCommitTime())).collect(Collectors.toList());
if ((fileSlice.getBaseFile().isPresent() && !committedBaseFile.isPresent())
|| committedLogFiles.size() != fileSlice.getLogFiles().count()) {
LOG.debug("File Slice (" + fileSlice + ") has uncommitted files.");
// A file is filtered out of the file-slice if the corresponding
// instant has not completed yet.
FileSlice transformed = new FileSlice(fileSlice.getPartitionPath(), fileSlice.getBaseInstantTime(), fileSlice.getFileId());
committedBaseFile.ifPresent(transformed::setBaseFile);
committedLogFiles.forEach(transformed::addLogFile);
if (transformed.isEmpty() && !includeEmptyFileSlice) {
return Stream.of();
}
return Stream.of(transformed);
}
return Stream.of(fileSlice);
} | 3.68 |
dubbo_DubboBootstrapApplicationListener_isOriginalEventSource | /**
* Is original {@link ApplicationContext} as the event source
*
* @param event {@link ApplicationEvent}
* @return if original, return <code>true</code>, or <code>false</code>
*/
private boolean isOriginalEventSource(ApplicationEvent event) {
boolean originalEventSource = nullSafeEquals(getApplicationContext(), event.getSource());
return originalEventSource;
} | 3.68 |
flink_ResourceProfile_isMatching | /**
* Check whether required resource profile can be matched.
*
* @param required the required resource profile
* @return true if the requirement is matched, otherwise false
*/
public boolean isMatching(final ResourceProfile required) {
checkNotNull(required, "Cannot check matching with null resources");
throwUnsupportedOperationExceptionIfUnknown();
if (this.equals(ANY)) {
return true;
}
if (this.equals(required)) {
return true;
}
if (required.equals(UNKNOWN)) {
return true;
}
return false;
} | 3.68 |
flink_UserDefinedFunctionHelper_createSpecializedFunction | /**
* Creates the runtime implementation of a {@link FunctionDefinition} as an instance of {@link
* UserDefinedFunction}.
*
* @see SpecializedFunction
*/
public static UserDefinedFunction createSpecializedFunction(
String functionName,
FunctionDefinition definition,
CallContext callContext,
ClassLoader builtInClassLoader,
@Nullable ReadableConfig configuration,
@Nullable ExpressionEvaluatorFactory evaluatorFactory) {
if (definition instanceof SpecializedFunction) {
final SpecializedFunction specialized = (SpecializedFunction) definition;
final SpecializedContext specializedContext =
new SpecializedContext() {
@Override
public CallContext getCallContext() {
return callContext;
}
@Override
public ReadableConfig getConfiguration() {
if (configuration == null) {
throw new TableException(
"Access to configuration is currently not supported for all kinds of calls.");
}
return configuration;
}
@Override
public ClassLoader getBuiltInClassLoader() {
return builtInClassLoader;
}
@Override
public ExpressionEvaluator createEvaluator(
Expression expression,
DataType outputDataType,
DataTypes.Field... args) {
if (evaluatorFactory == null) {
throw new TableException(
"Access to expression evaluation is currently not supported "
+ "for all kinds of calls.");
}
return evaluatorFactory.createEvaluator(
expression, outputDataType, args);
}
@Override
public ExpressionEvaluator createEvaluator(
String sqlExpression,
DataType outputDataType,
DataTypes.Field... args) {
if (evaluatorFactory == null) {
throw new TableException(
"Access to expression evaluation is currently not supported "
+ "for all kinds of calls.");
}
return evaluatorFactory.createEvaluator(
sqlExpression, outputDataType, args);
}
@Override
public ExpressionEvaluator createEvaluator(
BuiltInFunctionDefinition function,
DataType outputDataType,
DataType... args) {
if (evaluatorFactory == null) {
throw new TableException(
"Access to expression evaluation is currently not supported "
+ "for all kinds of calls.");
}
return evaluatorFactory.createEvaluator(function, outputDataType, args);
}
};
final UserDefinedFunction udf = specialized.specialize(specializedContext);
checkState(
udf.getKind() == definition.getKind(),
"Function kind must not change during specialization.");
return udf;
} else if (definition instanceof UserDefinedFunction) {
return (UserDefinedFunction) definition;
} else {
throw new TableException(
String.format(
"Could not find a runtime implementation for function definition '%s'.",
functionName));
}
} | 3.68 |
framework_BootstrapPageResponse_setHeader | /**
* Sets a header value that will be added to the HTTP response. If the
* header had already been set, the new value overwrites the previous one.
*
* @see VaadinResponse#setHeader(String, String)
*
* @param name
* the name of the header
* @param value
* the header value
*/
public void setHeader(String name, String value) {
headers.put(name, value);
} | 3.68 |
flink_SqlFunctionUtils_tanh | /** Calculates the hyperbolic tangent of a big decimal number. */
public static double tanh(DecimalData a) {
return Math.tanh(doubleValue(a));
} | 3.68 |
hbase_SplitTableRegionProcedure_createDaughterRegions | /**
* Create daughter regions
*/
public void createDaughterRegions(final MasterProcedureEnv env) throws IOException {
final MasterFileSystem mfs = env.getMasterServices().getMasterFileSystem();
final Path tabledir = CommonFSUtils.getTableDir(mfs.getRootDir(), getTableName());
final FileSystem fs = mfs.getFileSystem();
HRegionFileSystem regionFs = HRegionFileSystem.openRegionFromFileSystem(
env.getMasterConfiguration(), fs, tabledir, getParentRegion(), false);
regionFs.createSplitsDir(daughterOneRI, daughterTwoRI);
Pair<List<Path>, List<Path>> expectedReferences = splitStoreFiles(env, regionFs);
assertSplitResultFilesCount(fs, expectedReferences.getFirst().size(),
regionFs.getSplitsDir(daughterOneRI));
regionFs.commitDaughterRegion(daughterOneRI, expectedReferences.getFirst(), env);
assertSplitResultFilesCount(fs, expectedReferences.getFirst().size(),
new Path(tabledir, daughterOneRI.getEncodedName()));
assertSplitResultFilesCount(fs, expectedReferences.getSecond().size(),
regionFs.getSplitsDir(daughterTwoRI));
regionFs.commitDaughterRegion(daughterTwoRI, expectedReferences.getSecond(), env);
assertSplitResultFilesCount(fs, expectedReferences.getSecond().size(),
new Path(tabledir, daughterTwoRI.getEncodedName()));
} | 3.68 |
hadoop_GlobExpander_expandLeftmost | /**
* Expand the leftmost outer curly bracket pair containing a
* slash character ("/") in <code>filePattern</code>.
* @param filePatternWithOffset
* @return expanded file patterns
* @throws IOException
*/
private static List<StringWithOffset> expandLeftmost(StringWithOffset
filePatternWithOffset) throws IOException {
String filePattern = filePatternWithOffset.string;
int leftmost = leftmostOuterCurlyContainingSlash(filePattern,
filePatternWithOffset.offset);
if (leftmost == -1) {
return null;
}
int curlyOpen = 0;
StringBuilder prefix = new StringBuilder(filePattern.substring(0, leftmost));
StringBuilder suffix = new StringBuilder();
List<String> alts = new ArrayList<String>();
StringBuilder alt = new StringBuilder();
StringBuilder cur = prefix;
for (int i = leftmost; i < filePattern.length(); i++) {
char c = filePattern.charAt(i);
if (cur == suffix) {
cur.append(c);
} else if (c == '\\') {
i++;
if (i >= filePattern.length()) {
throw new IOException("Illegal file pattern: "
+ "An escaped character does not present for glob "
+ filePattern + " at " + i);
}
c = filePattern.charAt(i);
cur.append(c);
} else if (c == '{') {
if (curlyOpen++ == 0) {
alt.setLength(0);
cur = alt;
} else {
cur.append(c);
}
} else if (c == '}' && curlyOpen > 0) {
if (--curlyOpen == 0) {
alts.add(alt.toString());
alt.setLength(0);
cur = suffix;
} else {
cur.append(c);
}
} else if (c == ',') {
if (curlyOpen == 1) {
alts.add(alt.toString());
alt.setLength(0);
} else {
cur.append(c);
}
} else {
cur.append(c);
}
}
List<StringWithOffset> exp = new ArrayList<StringWithOffset>();
for (String string : alts) {
exp.add(new StringWithOffset(prefix + string + suffix, prefix.length()));
}
return exp;
} | 3.68 |
hadoop_TokenIdentifier_getBytes | /**
* Get the bytes for the token identifier
* @return the bytes of the identifier
*/
public byte[] getBytes() {
DataOutputBuffer buf = new DataOutputBuffer(4096);
try {
this.write(buf);
} catch (IOException ie) {
throw new RuntimeException("i/o error in getBytes", ie);
}
return Arrays.copyOf(buf.getData(), buf.getLength());
} | 3.68 |
hudi_DagUtils_convertYamlToDag | /**
* Converts a YAML representation to {@link WorkflowDag}.
*/
public static WorkflowDag convertYamlToDag(String yaml) throws IOException {
int dagRounds = DEFAULT_DAG_ROUNDS;
int intermittentDelayMins = DEFAULT_INTERMITTENT_DELAY_MINS;
String dagName = DEFAULT_DAG_NAME;
Map<String, DagNode> allNodes = new HashMap<>();
final ObjectMapper yamlReader = new ObjectMapper(new YAMLFactory());
final JsonNode jsonNode = yamlReader.readTree(yaml);
Iterator<Entry<String, JsonNode>> itr = jsonNode.fields();
while (itr.hasNext()) {
Entry<String, JsonNode> dagNode = itr.next();
String key = dagNode.getKey();
switch (key) {
case DAG_NAME:
dagName = dagNode.getValue().asText();
break;
case DAG_ROUNDS:
dagRounds = dagNode.getValue().asInt();
break;
case DAG_INTERMITTENT_DELAY_MINS:
intermittentDelayMins = dagNode.getValue().asInt();
break;
case DAG_CONTENT:
JsonNode dagContent = dagNode.getValue();
Iterator<Entry<String, JsonNode>> contentItr = dagContent.fields();
while (contentItr.hasNext()) {
Entry<String, JsonNode> dagContentNode = contentItr.next();
allNodes.put(dagContentNode.getKey(), convertJsonToDagNode(allNodes, dagContentNode.getKey(), dagContentNode.getValue()));
}
break;
default:
break;
}
}
return new WorkflowDag(dagName, dagRounds, intermittentDelayMins, findRootNodes(allNodes));
} | 3.68 |
graphhopper_HmmProbabilities_emissionLogProbability | /**
* Returns the logarithmic emission probability density.
*
* @param distance Absolute distance [m] between GPS measurement and map
* matching candidate.
*/
public double emissionLogProbability(double distance) {
return Distributions.logNormalDistribution(sigma, distance);
} | 3.68 |
morf_DatabaseMetaDataProvider_loadTablePrimaryKey | /**
* Loads the primary key column names for the given table name,
* as a map of case-agnostic names and respective positions within the key.
*
* @param tableName Name of the table.
* @return Map of respective positions by column names.
*/
protected Map<AName, Integer> loadTablePrimaryKey(RealName tableName) {
final ImmutableMap.Builder<AName, Integer> columns = ImmutableMap.builder();
try {
final DatabaseMetaData databaseMetaData = connection.getMetaData();
try (ResultSet primaryKeyResultSet = databaseMetaData.getPrimaryKeys(null, schemaName, tableName.getDbName())) {
while (primaryKeyResultSet.next()) {
int sequenceNumber = primaryKeyResultSet.getShort(PRIMARY_KEY_SEQ) - 1;
String columnName = primaryKeyResultSet.getString(PRIMARY_COLUMN_NAME);
columns.put(named(columnName), sequenceNumber);
}
if (log.isDebugEnabled()) {
log.debug("Found primary key [" + columns.build() + "] on table [" + tableName + "]");
}
return columns.build();
}
}
catch (SQLException e) {
throw new RuntimeSqlException("Error reading primary keys for table [" + tableName + "]", e);
}
} | 3.68 |
shardingsphere-elasticjob_ZookeeperProperties_toZookeeperConfiguration | /**
* Create ZooKeeper configuration.
*
* @return instance of ZooKeeper configuration
*/
public ZookeeperConfiguration toZookeeperConfiguration() {
ZookeeperConfiguration result = new ZookeeperConfiguration(serverLists, namespace);
result.setBaseSleepTimeMilliseconds(baseSleepTimeMilliseconds);
result.setMaxSleepTimeMilliseconds(maxSleepTimeMilliseconds);
result.setMaxRetries(maxRetries);
result.setSessionTimeoutMilliseconds(sessionTimeoutMilliseconds);
result.setConnectionTimeoutMilliseconds(connectionTimeoutMilliseconds);
result.setDigest(digest);
return result;
} | 3.68 |
querydsl_LiteralExpression_castToNum | /**
* Create a cast expression to the given numeric type
*
* @param <A> numeric type
* @param type numeric type
* @return cast expression
*/
public <A extends Number & Comparable<? super A>> NumberExpression<A> castToNum(Class<A> type) {
return Expressions.numberOperation(type, Ops.NUMCAST, mixin, ConstantImpl.create(type));
} | 3.68 |
hadoop_CreateFlag_validateForAppend | /**
* Validate the CreateFlag for the append operation. The flag must contain
* APPEND, and cannot contain OVERWRITE.
*
* @param flag enum set flag.
*/
public static void validateForAppend(EnumSet<CreateFlag> flag) {
validate(flag);
if (!flag.contains(APPEND)) {
throw new HadoopIllegalArgumentException(flag
+ " does not contain APPEND");
}
} | 3.68 |
hadoop_Find_registerExpressions | /** Register the expressions with the expression factory. */
private static void registerExpressions(ExpressionFactory factory) {
for (Class<? extends Expression> exprClass : EXPRESSIONS) {
factory.registerExpression(exprClass);
}
} | 3.68 |
flink_FlinkImageBuilder_copyFile | /** Copies file into the image. */
public FlinkImageBuilder copyFile(Path localPath, Path containerPath) {
filesToCopy.put(localPath, containerPath);
return this;
} | 3.68 |
hudi_TimelineUtils_getCommitMetadata | /**
* Returns the commit metadata of the given instant.
*
* @param instant The hoodie instant
* @param timeline The timeline
* @return the commit metadata
*/
public static HoodieCommitMetadata getCommitMetadata(
HoodieInstant instant,
HoodieTimeline timeline) throws IOException {
byte[] data = timeline.getInstantDetails(instant).get();
if (instant.getAction().equals(REPLACE_COMMIT_ACTION)) {
return HoodieReplaceCommitMetadata.fromBytes(data, HoodieReplaceCommitMetadata.class);
} else {
return HoodieCommitMetadata.fromBytes(data, HoodieCommitMetadata.class);
}
} | 3.68 |
starts_Handle_getOwner | /**
* Returns the internal name of the class that owns the field or method
* designated by this handle.
*
* @return the internal name of the class that owns the field or method
* designated by this handle.
*/
public String getOwner() {
return owner;
} | 3.68 |
framework_DownloadStream_setContentType | /**
* Sets stream content type.
*
* @param contentType
* the contentType to set
*/
public void setContentType(String contentType) {
this.contentType = contentType;
} | 3.68 |
hbase_QuotaTableUtil_getSnapshots | /**
* Fetches all {@link SpaceQuotaSnapshot} objects from the {@code hbase:quota} table.
* @param conn The HBase connection
* @return A map of table names and their computed snapshot.
*/
public static Map<TableName, SpaceQuotaSnapshot> getSnapshots(Connection conn)
throws IOException {
Map<TableName, SpaceQuotaSnapshot> snapshots = new HashMap<>();
try (Table quotaTable = conn.getTable(QUOTA_TABLE_NAME);
ResultScanner rs = quotaTable.getScanner(makeQuotaSnapshotScan())) {
for (Result r : rs) {
extractQuotaSnapshot(r, snapshots);
}
}
return snapshots;
} | 3.68 |
hadoop_ConsistentHashRing_getLocation | /**
* Return location (owner) of specified item. Owner is the next
* entry on the hash ring (with a hash value > hash value of item).
* @param item Item to look for.
* @return The location of the item.
*/
public String getLocation(String item) {
readLock.lock();
try {
if (ring.isEmpty()) {
return null;
}
String hash = getHash(item);
if (!ring.containsKey(hash)) {
SortedMap<String, String> tailMap = ring.tailMap(hash);
hash = tailMap.isEmpty() ? ring.firstKey() : tailMap.firstKey();
}
String virtualNode = ring.get(hash);
int index = virtualNode.lastIndexOf(SEPARATOR);
if (index >= 0) {
return virtualNode.substring(0, index);
} else {
return virtualNode;
}
} finally {
readLock.unlock();
}
} | 3.68 |
hadoop_JobTokenIdentifier_getJobId | /**
* Get the jobid
* @return the jobid
*/
public Text getJobId() {
return jobid;
} | 3.68 |
hbase_SplitLogWorker_stop | /**
* stop the SplitLogWorker thread
*/
public void stop() {
coordination.stopProcessingTasks();
stopTask();
} | 3.68 |
flink_ResourceProfile_getManagedMemory | /**
* Get the managed memory needed.
*
* @return The managed memory
*/
public MemorySize getManagedMemory() {
throwUnsupportedOperationExceptionIfUnknown();
return managedMemory;
} | 3.68 |
morf_InsertStatement_insert | /**
* Creates a new insert statement. See class-level documentation for usage instructions.
*
* @return A builder.
*/
public static InsertStatementBuilder insert() {
return new InsertStatementBuilder();
} | 3.68 |
graphhopper_MapMatching_setMeasurementErrorSigma | /**
* Standard deviation of the normal distribution [m] used for modeling the
* GPS error.
*/
public void setMeasurementErrorSigma(double measurementErrorSigma) {
this.measurementErrorSigma = measurementErrorSigma;
} | 3.68 |
hudi_DiskMap_close | /**
* Close and cleanup the Map.
*/
public void close() {
cleanup(false);
} | 3.68 |
pulsar_ConsumerInterceptor_onPartitionsChange | /**
* This method is called when partitions of the topic (partitioned-topic) changes.
*
* @param topicName topic name
* @param partitions new updated number of partitions
*/
default void onPartitionsChange(String topicName, int partitions) {
} | 3.68 |
hbase_RegionServerObserver_postClearCompactionQueues | /**
* This will be called after clearing compaction queues
* @param ctx the environment to interact with the framework and region server.
*/
default void postClearCompactionQueues(
final ObserverContext<RegionServerCoprocessorEnvironment> ctx) throws IOException {
} | 3.68 |
hbase_MasterObserver_preAbortProcedure | /**
* Called before a abortProcedure request has been processed.
* @param ctx the environment to interact with the framework and master
* @param procId the Id of the procedure
*/
default void preAbortProcedure(ObserverContext<MasterCoprocessorEnvironment> ctx,
final long procId) throws IOException {
} | 3.68 |
flink_BinaryStringData_fromBytes | /**
* Creates a {@link BinaryStringData} instance from the given UTF-8 bytes with offset and number
* of bytes.
*/
public static BinaryStringData fromBytes(byte[] bytes, int offset, int numBytes) {
return new BinaryStringData(
new MemorySegment[] {MemorySegmentFactory.wrap(bytes)}, offset, numBytes);
} | 3.68 |
hadoop_RecordComparator_define | /**
* Register an optimized comparator for a {@link Record} implementation.
*
* @param c record classs for which a raw comparator is provided
* @param comparator Raw comparator instance for class c
*/
public static synchronized void define(Class c, RecordComparator comparator) {
WritableComparator.define(c, comparator);
} | 3.68 |
flink_ConfigurationUtils_filterPrefixMapKey | /** Filter condition for prefix map keys. */
public static boolean filterPrefixMapKey(String key, String candidate) {
final String prefixKey = key + ".";
return candidate.startsWith(prefixKey);
} | 3.68 |
framework_VAbstractCalendarPanel_setDateStyles | /**
* Sets the style names for dates.
*
* @param dateStyles
* the map of date string to style name
*
* @since 8.3
*/
public void setDateStyles(Map<String, String> dateStyles) {
this.dateStyles.clear();
if (dateStyles != null) {
this.dateStyles.putAll(dateStyles);
}
} | 3.68 |
dubbo_NacosRegistry_getServiceNamesForOps | /**
* Get the service names for Dubbo OPS
*
* @param url {@link URL}
* @return non-null
*/
private Set<String> getServiceNamesForOps(URL url) {
Set<String> serviceNames = getAllServiceNames();
filterServiceNames(serviceNames, url);
return serviceNames;
} | 3.68 |
hbase_ScanWildcardColumnTracker_checkColumn | /**
* {@inheritDoc} This receives puts *and* deletes.
*/
@Override
public MatchCode checkColumn(Cell cell, byte type) throws IOException {
return MatchCode.INCLUDE;
} | 3.68 |
morf_SqlServerDialect_getDatabaseType | /**
* @see org.alfasoftware.morf.jdbc.SqlDialect#getDatabaseType()
*/
@Override
public DatabaseType getDatabaseType() {
return DatabaseType.Registry.findByIdentifier(SqlServer.IDENTIFIER);
} | 3.68 |
hadoop_RenameOperation_completeActiveCopies | /**
* Wait for the active copies to complete then reset the list.
* @param reason for messages
* @throws IOException if one of the called futures raised an IOE.
* @throws RuntimeException if one of the futures raised one.
*/
@Retries.OnceTranslated
private void completeActiveCopies(String reason) throws IOException {
LOG.debug("Waiting for {} active copies to complete: {}",
activeCopies.size(), reason);
waitForCompletion(activeCopies);
activeCopies.clear();
} | 3.68 |
AreaShop_FileManager_preUpdateFiles | /**
* Checks for old file formats and converts them to the latest format.
* After conversion the region files need to be loaded.
*/
@SuppressWarnings("unchecked")
private void preUpdateFiles() {
Integer fileStatus = versions.get(AreaShop.versionFiles);
// If the the files are already the current version
if(fileStatus != null && fileStatus == AreaShop.versionFilesCurrent) {
return;
}
AreaShop.info("Updating AreaShop data to the latest format:");
// Update to YAML based format
if(fileStatus == null || fileStatus < 2) {
String rentPath = plugin.getDataFolder() + File.separator + "rents";
String buyPath = plugin.getDataFolder() + File.separator + "buys";
File rentFile = new File(rentPath);
File buyFile = new File(buyPath);
String oldFolderPath = plugin.getDataFolder() + File.separator + "#old" + File.separator;
File oldFolderFile = new File(oldFolderPath);
// Convert old rent files
boolean buyFileFound = false, rentFileFound = false;
if(rentFile.exists()) {
rentFileFound = true;
if(!oldFolderFile.exists() & !oldFolderFile.mkdirs()) {
AreaShop.warn("Could not create directory: " + oldFolderFile.getAbsolutePath());
}
versions.putIfAbsent("rents", -1);
HashMap<String, HashMap<String, String>> rents = null;
try {
ObjectInputStream input = new ObjectInputStream(new FileInputStream(rentPath));
rents = (HashMap<String, HashMap<String, String>>)input.readObject();
input.close();
} catch(IOException | ClassNotFoundException | ClassCastException e) {
AreaShop.warn(" Error: Something went wrong reading file: " + rentPath);
}
// Delete the file if it is totally wrong
if(rents == null) {
try {
if(!rentFile.delete()) {
AreaShop.warn("Could not delete file: " + rentFile.getAbsolutePath());
}
} catch(Exception e) {
AreaShop.warn("Could not delete file: " + rentFile.getAbsolutePath());
}
} else {
// Move old file
try {
Files.move(new File(rentPath), new File(oldFolderPath + "rents"));
} catch(Exception e) {
AreaShop.warn(" Could not create a backup of '" + rentPath + "', check the file permissions (conversion to next version continues)");
}
// Check if conversion is needed
if(versions.get("rents") < 1) {
// Upgrade the rent to the latest version
if(versions.get("rents") < 0) {
for(String rentName : rents.keySet()) {
HashMap<String, String> rent = rents.get(rentName);
// Save the rentName in the hashmap and use a small caps rentName as key
if(rent.get("name") == null) {
rent.put("name", rentName);
rents.remove(rentName);
rents.put(rentName.toLowerCase(), rent);
}
// Save the default setting for region restoring
rent.putIfAbsent("restore", "general");
// Save the default setting for the region restore profile
rent.putIfAbsent("profile", "default");
// Change to version 0
versions.put("rents", 0);
}
AreaShop.info(" Updated version of '" + buyPath + "' from -1 to 0 (switch to using lowercase region names, adding default schematic enabling and profile)");
}
if(versions.get("rents") < 1) {
for(String rentName : rents.keySet()) {
HashMap<String, String> rent = rents.get(rentName);
if(rent.get("player") != null) {
@SuppressWarnings("deprecation") // Fake deprecation by Bukkit to inform developers, method will stay
OfflinePlayer offlinePlayer = Bukkit.getOfflinePlayer(rent.get("player"));
rent.put("playeruuid", offlinePlayer.getUniqueId().toString());
rent.remove("player");
}
// Change version to 1
versions.put("rents", 1);
}
AreaShop.info(" Updated version of '" + rentPath + "' from 0 to 1 (switch to UUID's for player identification)");
}
}
// Save rents to new format
File regionsFile = new File(regionsPath);
if(!regionsFile.exists() & !regionsFile.mkdirs()) {
AreaShop.warn("Could not create directory: " + regionsFile.getAbsolutePath());
return;
}
for(HashMap<String, String> rent : rents.values()) {
YamlConfiguration regionConfig = new YamlConfiguration();
regionConfig.set("general.name", rent.get("name").toLowerCase());
regionConfig.set("general.type", "rent");
regionConfig.set("general.world", rent.get("world"));
regionConfig.set("general.signs.0.location.world", rent.get("world"));
regionConfig.set("general.signs.0.location.x", Double.parseDouble(rent.get("x")));
regionConfig.set("general.signs.0.location.y", Double.parseDouble(rent.get("y")));
regionConfig.set("general.signs.0.location.z", Double.parseDouble(rent.get("z")));
regionConfig.set("rent.price", Double.parseDouble(rent.get("price")));
regionConfig.set("rent.duration", rent.get("duration"));
if(rent.get("restore") != null && !rent.get("restore").equals("general")) {
regionConfig.set("general.enableRestore", rent.get("restore"));
}
if(rent.get("profile") != null && !rent.get("profile").equals("default")) {
regionConfig.set("general.schematicProfile", rent.get("profile"));
}
if(rent.get("tpx") != null) {
regionConfig.set("general.teleportLocation.world", rent.get("world"));
regionConfig.set("general.teleportLocation.x", Double.parseDouble(rent.get("tpx")));
regionConfig.set("general.teleportLocation.y", Double.parseDouble(rent.get("tpy")));
regionConfig.set("general.teleportLocation.z", Double.parseDouble(rent.get("tpz")));
regionConfig.set("general.teleportLocation.yaw", rent.get("tpyaw"));
regionConfig.set("general.teleportLocation.pitch", rent.get("tppitch"));
}
if(rent.get("playeruuid") != null) {
regionConfig.set("rent.renter", rent.get("playeruuid"));
regionConfig.set("rent.renterName", Utils.toName(rent.get("playeruuid")));
regionConfig.set("rent.rentedUntil", Long.parseLong(rent.get("rented")));
}
try {
regionConfig.save(new File(regionsPath + File.separator + rent.get("name").toLowerCase() + ".yml"));
} catch(IOException e) {
AreaShop.warn(" Error: Could not save region file while converting: " + regionsPath + File.separator + rent.get("name").toLowerCase() + ".yml");
}
}
AreaShop.info(" Updated rent regions to new .yml format (check the /regions folder)");
}
// Change version number
versions.remove("rents");
versions.put(AreaShop.versionFiles, AreaShop.versionFilesCurrent);
saveVersions();
}
if(buyFile.exists()) {
buyFileFound = true;
if(!oldFolderFile.exists() & !oldFolderFile.mkdirs()) {
AreaShop.warn("Could not create directory: " + oldFolderFile.getAbsolutePath());
return;
}
versions.putIfAbsent("buys", -1);
HashMap<String, HashMap<String, String>> buys = null;
try {
ObjectInputStream input = new ObjectInputStream(new FileInputStream(buyPath));
buys = (HashMap<String, HashMap<String, String>>)input.readObject();
input.close();
} catch(IOException | ClassNotFoundException | ClassCastException e) {
AreaShop.warn(" Something went wrong reading file: " + buyPath);
}
// Delete the file if it is totally wrong
if(buys == null) {
try {
if(!buyFile.delete()) {
AreaShop.warn("Could not delete file: " + buyFile.getAbsolutePath());
}
} catch(Exception e) {
AreaShop.warn("Could not delete file: " + buyFile.getAbsolutePath());
}
} else {
// Backup current file
try {
Files.move(new File(buyPath), new File(oldFolderPath + "buys"));
} catch(Exception e) {
AreaShop.warn(" Could not create a backup of '" + buyPath + "', check the file permissions (conversion to next version continues)");
}
// Check if conversion is needed
if(versions.get("buys") < 1) {
// Upgrade the buy to the latest version
if(versions.get("buys") < 0) {
for(String buyName : buys.keySet()) {
HashMap<String, String> buy = buys.get(buyName);
// Save the buyName in the hashmap and use a small caps buyName as key
if(buy.get("name") == null) {
buy.put("name", buyName);
buys.remove(buyName);
buys.put(buyName.toLowerCase(), buy);
}
// Save the default setting for region restoring
buy.putIfAbsent("restore", "general");
// Save the default setting for the region restore profile
buy.putIfAbsent("profile", "default");
// Change to version 0
versions.put("buys", 0);
}
AreaShop.info(" Updated version of '" + buyPath + "' from -1 to 0 (switch to using lowercase region names, adding default schematic enabling and profile)");
}
if(versions.get("buys") < 1) {
for(String buyName : buys.keySet()) {
HashMap<String, String> buy = buys.get(buyName);
if(buy.get("player") != null) {
@SuppressWarnings("deprecation") // Fake deprecation by Bukkit to inform developers, method will stay
OfflinePlayer offlinePlayer = Bukkit.getOfflinePlayer(buy.get("player"));
buy.put("playeruuid", offlinePlayer.getUniqueId().toString());
buy.remove("player");
}
// Change version to 1
versions.put("buys", 1);
}
AreaShop.info(" Updated version of '" + buyPath + "' from 0 to 1 (switch to UUID's for player identification)");
}
}
// Save buys to new format
File regionsFile = new File(regionsPath);
if(!regionsFile.exists() & !regionsFile.mkdirs()) {
AreaShop.warn("Could not create directory: " + regionsFile.getAbsolutePath());
}
for(HashMap<String, String> buy : buys.values()) {
YamlConfiguration regionConfig = new YamlConfiguration();
regionConfig.set("general.name", buy.get("name").toLowerCase());
regionConfig.set("general.type", "buy");
regionConfig.set("general.world", buy.get("world"));
regionConfig.set("general.signs.0.location.world", buy.get("world"));
regionConfig.set("general.signs.0.location.x", Double.parseDouble(buy.get("x")));
regionConfig.set("general.signs.0.location.y", Double.parseDouble(buy.get("y")));
regionConfig.set("general.signs.0.location.z", Double.parseDouble(buy.get("z")));
regionConfig.set("buy.price", Double.parseDouble(buy.get("price")));
if(buy.get("restore") != null && !buy.get("restore").equals("general")) {
regionConfig.set("general.enableRestore", buy.get("restore"));
}
if(buy.get("profile") != null && !buy.get("profile").equals("default")) {
regionConfig.set("general.schematicProfile", buy.get("profile"));
}
if(buy.get("tpx") != null) {
regionConfig.set("general.teleportLocation.world", buy.get("world"));
regionConfig.set("general.teleportLocation.x", Double.parseDouble(buy.get("tpx")));
regionConfig.set("general.teleportLocation.y", Double.parseDouble(buy.get("tpy")));
regionConfig.set("general.teleportLocation.z", Double.parseDouble(buy.get("tpz")));
regionConfig.set("general.teleportLocation.yaw", buy.get("tpyaw"));
regionConfig.set("general.teleportLocation.pitch", buy.get("tppitch"));
}
if(buy.get("playeruuid") != null) {
regionConfig.set("buy.buyer", buy.get("playeruuid"));
regionConfig.set("buy.buyerName", Utils.toName(buy.get("playeruuid")));
}
try {
regionConfig.save(new File(regionsPath + File.separator + buy.get("name").toLowerCase() + ".yml"));
} catch(IOException e) {
AreaShop.warn(" Error: Could not save region file while converting: " + regionsPath + File.separator + buy.get("name").toLowerCase() + ".yml");
}
}
AreaShop.info(" Updated buy regions to new .yml format (check the /regions folder)");
}
// Change version number
versions.remove("buys");
}
// Separate try-catch blocks to try them all individually (don't stop after 1 has failed)
try {
Files.move(new File(rentPath + ".old"), new File(oldFolderPath + "rents.old"));
} catch(Exception e) {
// Ignore
}
try {
Files.move(new File(buyPath + ".old"), new File(oldFolderPath + "buys.old"));
} catch(Exception e) {
// Ignore
}
if(buyFileFound || rentFileFound) {
try {
Files.move(new File(plugin.getDataFolder() + File.separator + "config.yml"), new File(oldFolderPath + "config.yml"));
} catch(Exception e) {
// Ignore
}
}
// Update versions file to 2
versions.put(AreaShop.versionFiles, 2);
saveVersions();
if(buyFileFound || rentFileFound) {
AreaShop.info(" Updated to YAML based storage (v1 to v2)");
}
}
} | 3.68 |
morf_SchemaUtils_versionColumn | /**
* Creates a Column that defines the standard version column.
*
* @return a version Column.
*/
public static Column versionColumn() {
return new ColumnBean("version", DataType.INTEGER, 0, 0, true, "0");
} | 3.68 |
hudi_LSMTimelineWriter_compactAndClean | /**
* Compacts the small parquet files.
*
* <p>The parquet naming convention is:
*
* <pre>${min_instant}_${max_instant}_${level}.parquet</pre>
*
* <p>The 'min_instant' and 'max_instant' represent the instant time range of the parquet file.
* The 'level' represents the number of the level where the file is located, currently we
* have no limit for the number of layers.
*
* <p>These parquet files composite as an LSM tree layout, one parquet file contains
* instant metadata entries with consecutive timestamp. Different parquet files may have
* overlapping with the instant time ranges.
*
* <pre>
* t1_t2_0.parquet, t3_t4_0.parquet, ... t5_t6_0.parquet L0 layer
* \ /
* \ /
* |
* V
* t3_t6_1.parquet L1 layer
* </pre>
*
* <p>Compaction and cleaning: once the files number exceed a threshold(now constant 10) N,
* the oldest N files are then replaced with a compacted file in the next layer.
* A cleaning action is triggered right after the compaction.
*
* @param context HoodieEngineContext
*/
@VisibleForTesting
public void compactAndClean(HoodieEngineContext context) throws IOException {
// 1. List all the latest snapshot files
HoodieLSMTimelineManifest latestManifest = LSMTimeline.latestSnapshotManifest(metaClient);
int layer = 0;
// 2. triggers the compaction for L0
Option<String> compactedFileName = doCompact(latestManifest, layer);
while (compactedFileName.isPresent()) {
// 3. once a compaction had been executed for the current layer,
// continues to trigger compaction for the next layer.
latestManifest.addFile(getFileEntry(compactedFileName.get()));
compactedFileName = doCompact(latestManifest, ++layer);
}
// cleaning
clean(context, layer);
} | 3.68 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.