name
stringlengths 12
178
| code_snippet
stringlengths 8
36.5k
| score
float64 3.26
3.68
|
---|---|---|
hudi_ConfigProperty_markAdvanced | /**
* Marks the config as an advanced config.
*/
public ConfigProperty<T> markAdvanced() {
return new ConfigProperty<>(key, defaultValue, docOnDefaultValue, doc, sinceVersion, deprecatedVersion, inferFunction, validValues, true, alternatives);
} | 3.68 |
flink_KubernetesUtils_getServiceAccount | /**
* Get the service account from the input pod first, if not specified, the service account name
* will be used.
*
* @param flinkPod the Flink pod to parse the service account
* @return the parsed service account
*/
@Nullable
public static String getServiceAccount(FlinkPod flinkPod) {
final String serviceAccount =
flinkPod.getPodWithoutMainContainer().getSpec().getServiceAccount();
if (serviceAccount == null) {
return flinkPod.getPodWithoutMainContainer().getSpec().getServiceAccountName();
}
return serviceAccount;
} | 3.68 |
hbase_ZKUtil_getData | /**
* Get znode data. Does not set a watcher.
* @return ZNode data, null if the node does not exist or if there is an error.
*/
public static byte[] getData(ZKWatcher zkw, String znode)
throws KeeperException, InterruptedException {
try {
byte[] data = zkw.getRecoverableZooKeeper().getData(znode, null, null);
logRetrievedMsg(zkw, znode, data, false);
return data;
} catch (KeeperException.NoNodeException e) {
LOG.debug(zkw.prefix("Unable to get data of znode " + znode + " "
+ "because node does not exist (not an error)"));
return null;
} catch (KeeperException e) {
LOG.warn(zkw.prefix("Unable to get data of znode " + znode), e);
zkw.keeperException(e);
return null;
}
} | 3.68 |
framework_TabSheet_fireSelectedTabChange | /**
* Sends an event that the currently selected tab has changed.
*
* @param userOriginated
* <code>true</code> if the event originates from the client
* side, <code>false</code> otherwise
* @since 8.1
*/
protected void fireSelectedTabChange(boolean userOriginated) {
fireEvent(new SelectedTabChangeEvent(this, userOriginated));
} | 3.68 |
flink_Tuple24_equals | /**
* Deep equality for tuples by calling equals() on the tuple members.
*
* @param o the object checked for equality
* @return true if this is equal to o.
*/
@Override
public boolean equals(Object o) {
if (this == o) {
return true;
}
if (!(o instanceof Tuple24)) {
return false;
}
@SuppressWarnings("rawtypes")
Tuple24 tuple = (Tuple24) o;
if (f0 != null ? !f0.equals(tuple.f0) : tuple.f0 != null) {
return false;
}
if (f1 != null ? !f1.equals(tuple.f1) : tuple.f1 != null) {
return false;
}
if (f2 != null ? !f2.equals(tuple.f2) : tuple.f2 != null) {
return false;
}
if (f3 != null ? !f3.equals(tuple.f3) : tuple.f3 != null) {
return false;
}
if (f4 != null ? !f4.equals(tuple.f4) : tuple.f4 != null) {
return false;
}
if (f5 != null ? !f5.equals(tuple.f5) : tuple.f5 != null) {
return false;
}
if (f6 != null ? !f6.equals(tuple.f6) : tuple.f6 != null) {
return false;
}
if (f7 != null ? !f7.equals(tuple.f7) : tuple.f7 != null) {
return false;
}
if (f8 != null ? !f8.equals(tuple.f8) : tuple.f8 != null) {
return false;
}
if (f9 != null ? !f9.equals(tuple.f9) : tuple.f9 != null) {
return false;
}
if (f10 != null ? !f10.equals(tuple.f10) : tuple.f10 != null) {
return false;
}
if (f11 != null ? !f11.equals(tuple.f11) : tuple.f11 != null) {
return false;
}
if (f12 != null ? !f12.equals(tuple.f12) : tuple.f12 != null) {
return false;
}
if (f13 != null ? !f13.equals(tuple.f13) : tuple.f13 != null) {
return false;
}
if (f14 != null ? !f14.equals(tuple.f14) : tuple.f14 != null) {
return false;
}
if (f15 != null ? !f15.equals(tuple.f15) : tuple.f15 != null) {
return false;
}
if (f16 != null ? !f16.equals(tuple.f16) : tuple.f16 != null) {
return false;
}
if (f17 != null ? !f17.equals(tuple.f17) : tuple.f17 != null) {
return false;
}
if (f18 != null ? !f18.equals(tuple.f18) : tuple.f18 != null) {
return false;
}
if (f19 != null ? !f19.equals(tuple.f19) : tuple.f19 != null) {
return false;
}
if (f20 != null ? !f20.equals(tuple.f20) : tuple.f20 != null) {
return false;
}
if (f21 != null ? !f21.equals(tuple.f21) : tuple.f21 != null) {
return false;
}
if (f22 != null ? !f22.equals(tuple.f22) : tuple.f22 != null) {
return false;
}
if (f23 != null ? !f23.equals(tuple.f23) : tuple.f23 != null) {
return false;
}
return true;
} | 3.68 |
framework_VTabsheet_scrolledOutOfView | /**
* Checks if the tab with the selected index has been scrolled out of the
* view (on the left side).
*
* @param index
* the index of the tab to check
* @return {@code true} if the index is smaller than the first visible tab's
* index, {@code false} otherwise
*/
private boolean scrolledOutOfView(int index) {
return scrollerIndex > index;
} | 3.68 |
flink_BooleanParser_byteArrayEquals | /**
* Checks if a part of a byte array matches another byte array with chars (case-insensitive).
*
* @param source The source byte array.
* @param start The offset into the source byte array.
* @param length The length of the match.
* @param other The byte array which is fully compared to the part of the source array.
* @return true if other can be found in the specified part of source, false otherwise.
*/
private static boolean byteArrayEquals(byte[] source, int start, int length, byte[] other) {
if (length != other.length) {
return false;
}
for (int i = 0; i < other.length; i++) {
if (Character.toLowerCase(source[i + start]) != other[i]) {
return false;
}
}
return true;
} | 3.68 |
streampipes_SpTrajectoryBuilder_addPointToTrajectory | /**
* Adds a Point to the trajectory object and also handle removes old point
* if {link #numberSubPoints} threshold is exceeded.
*
* @param point {@link org.locationtech.jts.geom.Point}
* @param m stores an extra double value to the subpoint of a
* trajectory {@link org.locationtech.jts.geom.CoordinateXYM#M}
*/
public void addPointToTrajectory(Point point, Double m) {
coordinateList.add(createSingleTrajectoryCoordinate(point, m));
if (coordinateList.size() > numberSubPoints) {
removeOldestPoint();
}
} | 3.68 |
flink_QueryableStateClient_setUserClassLoader | /**
* * Replaces the existing {@link ClassLoader} (possibly {@code null}), with the provided one.
*
* @param userClassLoader The new {@code userClassLoader}.
* @return The old classloader, or {@code null} if none was specified.
*/
public ClassLoader setUserClassLoader(ClassLoader userClassLoader) {
ClassLoader prev = this.userClassLoader;
this.userClassLoader = userClassLoader;
return prev;
} | 3.68 |
framework_LayoutDependencyTree_getVerticalLayoutTargets | /**
* @return array of managed layouts waiting for vertical layouting
* @deprecated As of 7.0.1, use {@link #getVerticalLayoutTargetsJsArray()}
* for improved performance.
*/
@Deprecated
public ManagedLayout[] getVerticalLayoutTargets() {
return asManagedLayoutArray(getVerticalLayoutTargetsJsArray());
} | 3.68 |
framework_GridSingleSelect_addSingleSelectionListener | /**
* Adds a single selection listener that is called when the value of this
* select is changed either by the user or programmatically.
*
* @param listener
* the value change listener, not {@code null}
* @return a registration for the listener
*/
public Registration addSingleSelectionListener(
SingleSelectionListener<T> listener) {
return model.addSingleSelectionListener(listener);
} | 3.68 |
flink_ObjectIdentifier_toList | /** List of the component names of this object identifier. */
public List<String> toList() {
if (catalogName == null) {
return Collections.singletonList(getObjectName());
}
return Arrays.asList(getCatalogName(), getDatabaseName(), getObjectName());
} | 3.68 |
hadoop_AzureBlobFileSystemStore_populateAbfsOutputStreamContext | /**
* Method to populate AbfsOutputStreamContext with different parameters to
* be used to construct {@link AbfsOutputStream}.
*
* @param isAppendBlob is Append blob support enabled?
* @param lease instance of AbfsLease for this AbfsOutputStream.
* @param client AbfsClient.
* @param statistics FileSystem statistics.
* @param path Path for AbfsOutputStream.
* @param position Position or offset of the file being opened, set to 0
* when creating a new file, but needs to be set for APPEND
* calls on the same file.
* @param tracingContext instance of TracingContext for this AbfsOutputStream.
* @return AbfsOutputStreamContext instance with the desired parameters.
*/
private AbfsOutputStreamContext populateAbfsOutputStreamContext(
boolean isAppendBlob,
AbfsLease lease,
AbfsClient client,
FileSystem.Statistics statistics,
String path,
long position,
TracingContext tracingContext) {
int bufferSize = abfsConfiguration.getWriteBufferSize();
if (isAppendBlob && bufferSize > FileSystemConfigurations.APPENDBLOB_MAX_WRITE_BUFFER_SIZE) {
bufferSize = FileSystemConfigurations.APPENDBLOB_MAX_WRITE_BUFFER_SIZE;
}
return new AbfsOutputStreamContext(abfsConfiguration.getSasTokenRenewPeriodForStreamsInSeconds())
.withWriteBufferSize(bufferSize)
.enableExpectHeader(abfsConfiguration.isExpectHeaderEnabled())
.enableFlush(abfsConfiguration.isFlushEnabled())
.enableSmallWriteOptimization(abfsConfiguration.isSmallWriteOptimizationEnabled())
.disableOutputStreamFlush(abfsConfiguration.isOutputStreamFlushDisabled())
.withStreamStatistics(new AbfsOutputStreamStatisticsImpl())
.withAppendBlob(isAppendBlob)
.withWriteMaxConcurrentRequestCount(abfsConfiguration.getWriteMaxConcurrentRequestCount())
.withMaxWriteRequestsToQueue(abfsConfiguration.getMaxWriteRequestsToQueue())
.withLease(lease)
.withBlockFactory(getBlockFactory())
.withBlockOutputActiveBlocks(blockOutputActiveBlocks)
.withClient(client)
.withPosition(position)
.withFsStatistics(statistics)
.withPath(path)
.withExecutorService(new SemaphoredDelegatingExecutor(boundedThreadPool,
blockOutputActiveBlocks, true))
.withTracingContext(tracingContext)
.withAbfsBackRef(fsBackRef)
.build();
} | 3.68 |
framework_VScrollTable_setContentWidth | /**
* helper to set pixel size of head and body part
*
* @param pixels
*/
private void setContentWidth(int pixels) {
tHead.setWidth(pixels + "px");
scrollBodyPanel.setWidth(pixels + "px");
tFoot.setWidth(pixels + "px");
} | 3.68 |
hbase_ServerMetrics_getVersionNumber | /** Returns the version number of a regionserver. */
default int getVersionNumber() {
return 0;
} | 3.68 |
hudi_AbstractHoodieLogRecordReader_processDataBlock | /**
* Iterate over the GenericRecord in the block, read the hoodie key and partition path and call subclass processors to
* handle it.
*/
private void processDataBlock(HoodieDataBlock dataBlock, Option<KeySpec> keySpecOpt) throws Exception {
checkState(partitionNameOverrideOpt.isPresent() || partitionPathFieldOpt.isPresent(),
"Either partition-name override or partition-path field had to be present");
Option<Pair<String, String>> recordKeyPartitionPathFieldPair = populateMetaFields
? Option.empty()
: Option.of(Pair.of(recordKeyField, partitionPathFieldOpt.orElse(null)));
Pair<ClosableIterator<HoodieRecord>, Schema> recordsIteratorSchemaPair =
getRecordsIterator(dataBlock, keySpecOpt);
try (ClosableIterator<HoodieRecord> recordIterator = recordsIteratorSchemaPair.getLeft()) {
while (recordIterator.hasNext()) {
HoodieRecord completedRecord = recordIterator.next()
.wrapIntoHoodieRecordPayloadWithParams(recordsIteratorSchemaPair.getRight(),
hoodieTableMetaClient.getTableConfig().getProps(),
recordKeyPartitionPathFieldPair,
this.withOperationField,
this.partitionNameOverrideOpt,
populateMetaFields,
Option.empty());
processNextRecord(completedRecord);
totalLogRecords.incrementAndGet();
}
}
} | 3.68 |
hbase_CompactedHFilesDischarger_setUseExecutor | /**
* CompactedHFilesDischarger runs asynchronously by default using the hosting RegionServer's
* Executor. In tests it can be useful to force a synchronous cleanup. Use this method to set
* no-executor before you call run.
* @return The old setting for <code>useExecutor</code>
*/
boolean setUseExecutor(final boolean useExecutor) {
boolean oldSetting = this.useExecutor;
this.useExecutor = useExecutor;
return oldSetting;
} | 3.68 |
hadoop_FileSystemReleaseFilter_destroy | /**
* Destroys the filter.
* <p>
* This implementation is a NOP.
*/
@Override
public void destroy() {
} | 3.68 |
hadoop_Error_toIndentedString | /**
* Convert the given object to string with each line indented by 4 spaces
* (except the first line).
*/
private String toIndentedString(java.lang.Object o) {
if (o == null) {
return "null";
}
return o.toString().replace("\n", "\n ");
} | 3.68 |
hbase_LogRollBackupSubprocedure_insideBarrier | /**
* do a log roll.
* @return some bytes
*/
@Override
public byte[] insideBarrier() throws ForeignException {
rolllog();
return null;
} | 3.68 |
hbase_ChunkCreator_createChunk | /**
* Creates the chunk either onheap or offheap
* @param pool indicates if the chunks have to be created which will be used by the Pool
* @param chunkType whether the requested chunk is data chunk or index chunk.
* @param size the size of the chunk to be allocated, in bytes
* @return the chunk
*/
private Chunk createChunk(boolean pool, ChunkType chunkType, int size) {
Chunk chunk = null;
int id = chunkID.getAndIncrement();
assert id > 0;
// do not create offheap chunk on demand
if (pool && this.offheap) {
chunk = new OffheapChunk(size, id, chunkType, pool);
} else {
chunk = new OnheapChunk(size, id, chunkType, pool);
}
/**
* Here we always put the chunk into the {@link ChunkCreator#chunkIdMap} no matter whether the
* chunk is pooled or not. <br/>
* For {@link CompactingMemStore},because the chunk could only be acquired from
* {@link ChunkCreator} through {@link MemStoreLABImpl}, and
* {@link CompactingMemStore#indexType} could only be {@link IndexType.CHUNK_MAP} when using
* {@link MemStoreLABImpl}, so we must put chunk into this {@link ChunkCreator#chunkIdMap} to
* make sure the chunk could be got by chunkId.
* <p>
* For {@link DefaultMemStore},it is also reasonable to put the chunk in
* {@link ChunkCreator#chunkIdMap} because: <br/>
* 1.When the {@link MemStoreLAB} which created the chunk is not closed, this chunk is used by
* the {@link Segment} which references this {@link MemStoreLAB}, so this chunk certainly should
* not be GC-ed, putting the chunk in {@link ChunkCreator#chunkIdMap} does not prevent useless
* chunk to be GC-ed. <br/>
* 2.When the {@link MemStoreLAB} which created the chunk is closed, and if the chunk is not
* pooled, {@link ChunkCreator#removeChunk} is invoked to remove the chunk from this
* {@link ChunkCreator#chunkIdMap}, so there is no memory leak.
*/
this.chunkIdMap.put(chunk.getId(), chunk);
return chunk;
} | 3.68 |
hadoop_Hash_getInstance | /**
* Get a singleton instance of hash function of a type
* defined in the configuration.
* @param conf current configuration
* @return defined hash type, or null if type is invalid
*/
public static Hash getInstance(Configuration conf) {
int type = getHashType(conf);
return getInstance(type);
} | 3.68 |
hbase_MetaTableLocator_blockUntilAvailable | /**
* Wait until the meta region is available and is not in transition.
* @param zkw reference to the {@link ZKWatcher} which also contains configuration and
* constants
* @param replicaId the ID of the replica
* @param timeout maximum time to wait in millis
* @return ServerName or null if we timed out.
* @throws InterruptedException if waiting for the socket operation fails
*/
private static ServerName blockUntilAvailable(final ZKWatcher zkw, int replicaId,
final long timeout) throws InterruptedException {
if (timeout < 0) {
throw new IllegalArgumentException();
}
if (zkw == null) {
throw new IllegalArgumentException();
}
long startTime = EnvironmentEdgeManager.currentTime();
ServerName sn = null;
while (true) {
sn = getMetaRegionLocation(zkw, replicaId);
if (
sn != null || (EnvironmentEdgeManager.currentTime() - startTime)
> timeout - HConstants.SOCKET_RETRY_WAIT_MS
) {
break;
}
Thread.sleep(HConstants.SOCKET_RETRY_WAIT_MS);
}
return sn;
} | 3.68 |
hbase_WALPrettyPrinter_setRowPrefixFilter | /**
* sets the rowPrefix key prefix by which output will be filtered when not null, serves as a
* filter; only log entries with rows having this prefix will be printed
*/
public void setRowPrefixFilter(String rowPrefix) {
this.rowPrefix = rowPrefix;
} | 3.68 |
hudi_HoodieRealtimeRecordReaderUtils_arrayWritableToString | /**
* Prints a JSON representation of the ArrayWritable for easier debuggability.
*/
public static String arrayWritableToString(ArrayWritable writable) {
if (writable == null) {
return "null";
}
Random random = new Random(2);
StringBuilder builder = new StringBuilder();
Writable[] values = writable.get();
builder.append("\"values_" + random.nextDouble() + "_" + values.length + "\": {");
int i = 0;
for (Writable w : values) {
if (w instanceof ArrayWritable) {
builder.append(arrayWritableToString((ArrayWritable) w)).append(",");
} else {
builder.append("\"value" + i + "\":\"" + w + "\"").append(",");
if (w == null) {
builder.append("\"type" + i + "\":\"unknown\"").append(",");
} else {
builder.append("\"type" + i + "\":\"" + w.getClass().getSimpleName() + "\"").append(",");
}
}
i++;
}
builder.deleteCharAt(builder.length() - 1);
builder.append("}");
return builder.toString();
} | 3.68 |
framework_DesignContext_shouldWriteData | /**
* Determines whether the container data of a component should be written
* out by delegating to a {@link ShouldWriteDataDelegate}. The default
* delegate assumes that all component data is provided by a data provider
* connected to a back end system and that the data should thus not be
* written.
*
* @since 7.5.0
* @see #setShouldWriteDataDelegate(ShouldWriteDataDelegate)
* @param component
* the component to check
* @return <code>true</code> if container data should be written out for the
* provided component; otherwise <code>false</code>.
*/
public boolean shouldWriteData(Component component) {
return getShouldWriteDataDelegate().shouldWriteData(component);
} | 3.68 |
hbase_Increment_getFamilyMapOfLongs | /**
* Before 0.95, when you called Increment#getFamilyMap(), you got back a map of families to a list
* of Longs. Now, {@link #getFamilyCellMap()} returns families by list of Cells. This method has
* been added so you can have the old behavior.
* @return Map of families to a Map of qualifiers and their Long increments.
* @since 0.95.0
*/
public Map<byte[], NavigableMap<byte[], Long>> getFamilyMapOfLongs() {
NavigableMap<byte[], List<Cell>> map = super.getFamilyCellMap();
Map<byte[], NavigableMap<byte[], Long>> results = new TreeMap<>(Bytes.BYTES_COMPARATOR);
for (Map.Entry<byte[], List<Cell>> entry : map.entrySet()) {
NavigableMap<byte[], Long> longs = new TreeMap<>(Bytes.BYTES_COMPARATOR);
for (Cell cell : entry.getValue()) {
longs.put(CellUtil.cloneQualifier(cell),
Bytes.toLong(cell.getValueArray(), cell.getValueOffset(), cell.getValueLength()));
}
results.put(entry.getKey(), longs);
}
return results;
} | 3.68 |
hadoop_HostnameFilter_get | /**
* Returns the requester hostname.
*
* @return the requester hostname.
*/
public static String get() {
return HOSTNAME_TL.get();
} | 3.68 |
dubbo_Application_main | /**
* In order to make sure multicast registry works, need to specify '-Djava.net.preferIPv4Stack=true' before
* launch the application
*/
public static void main(String[] args) throws Exception {
ClassPathXmlApplicationContext context = new ClassPathXmlApplicationContext("spring/dubbo-consumer.xml");
context.start();
DemoService demoService = context.getBean("demoService", DemoService.class);
GreetingService greetingService = context.getBean("greetingService", GreetingService.class);
RestDemoService restDemoService = context.getBean("restDemoService", RestDemoService.class);
TripleService tripleService = context.getBean("tripleService", TripleService.class);
new Thread(() -> {
while (true) {
try {
String greetings = greetingService.hello();
System.out.println(greetings + " from separated thread.");
} catch (Exception e) {
// e.printStackTrace();
}
try {
Thread.sleep(1000);
} catch (InterruptedException e) {
}
}
})
.start();
new Thread(() -> {
while (true) {
try {
Object restResult = restDemoService.sayHello("rest");
System.out.println(restResult + " from separated thread.");
restResult = restDemoService.testBody5(TestPO.getInstance());
System.out.println(restResult + " from separated thread.");
restResult = restDemoService.hello(1, 2);
System.out.println(restResult + " from separated thread.");
String form1 = restDemoService.testForm1("form1");
System.out.println(form1);
MultivaluedHashMap multivaluedHashMap = new MultivaluedHashMap();
multivaluedHashMap.put("1", Arrays.asList("1"));
multivaluedHashMap.put("2", Arrays.asList("2"));
MultivaluedMap form2 = restDemoService.testForm2(multivaluedHashMap);
System.out.println(form2);
} catch (Exception e) {
e.printStackTrace();
}
try {
Thread.sleep(1000);
} catch (InterruptedException e) {
}
}
})
.start();
while (true) {
try {
CompletableFuture<String> hello = demoService.sayHelloAsync("world");
System.out.println("result: " + hello.get());
String greetings = greetingService.hello();
System.out.println("result: " + greetings);
} catch (Exception e) {
// e.printStackTrace();
}
Thread.sleep(5000);
}
} | 3.68 |
Activiti_BpmnDeploymentHelper_addAuthorizationsForNewProcessDefinition | /**
* @param processDefinition
*/
public void addAuthorizationsForNewProcessDefinition(Process process, ProcessDefinitionEntity processDefinition) {
CommandContext commandContext = Context.getCommandContext();
if (process != null) {
addAuthorizationsFromIterator(commandContext, process.getCandidateStarterUsers(), processDefinition, ExpressionType.USER);
addAuthorizationsFromIterator(commandContext, process.getCandidateStarterGroups(), processDefinition, ExpressionType.GROUP);
}
} | 3.68 |
hbase_HRegionServer_closeUserRegions | /**
* Schedule closes on all user regions. Should be safe calling multiple times because it wont'
* close regions that are already closed or that are closing.
* @param abort Whether we're running an abort.
*/
private void closeUserRegions(final boolean abort) {
this.onlineRegionsLock.writeLock().lock();
try {
for (Map.Entry<String, HRegion> e : this.onlineRegions.entrySet()) {
HRegion r = e.getValue();
if (!r.getRegionInfo().isMetaRegion() && r.isAvailable()) {
// Don't update zk with this close transition; pass false.
closeRegionIgnoreErrors(r.getRegionInfo(), abort);
}
}
} finally {
this.onlineRegionsLock.writeLock().unlock();
}
} | 3.68 |
morf_SelectFirstStatement_toString | /**
* @see org.alfasoftware.morf.sql.AbstractSelectStatement#toString()
*/
@Override
public String toString() {
StringBuilder result = new StringBuilder("SQL SELECT FIRST ");
result.append(super.toString());
return result.toString();
} | 3.68 |
framework_VCalendar_setEventMoveAllowed | /**
* Is moving an event allowed.
*
* @param eventMoveAllowed
* True if moving is allowed, false if not
*/
public void setEventMoveAllowed(boolean eventMoveAllowed) {
this.eventMoveAllowed = eventMoveAllowed;
} | 3.68 |
dubbo_StringUtils_isEquals | /**
* if s1 is null and s2 is null, then return true
*
* @param s1 str1
* @param s2 str2
* @return equals
*/
public static boolean isEquals(String s1, String s2) {
if (s1 == null && s2 == null) {
return true;
}
if (s1 == null || s2 == null) {
return false;
}
return s1.equals(s2);
} | 3.68 |
hbase_PrefixFilter_areSerializedFieldsEqual | /**
* Returns true if and only if the fields of the filter that are serialized are equal to the
* corresponding fields in other. Used for testing.
*/
@Override
boolean areSerializedFieldsEqual(Filter o) {
if (o == this) {
return true;
}
if (!(o instanceof PrefixFilter)) {
return false;
}
PrefixFilter other = (PrefixFilter) o;
return Bytes.equals(this.getPrefix(), other.getPrefix());
} | 3.68 |
framework_AbsoluteLayout_iterator | /**
* Gets an iterator for going through all components enclosed in the
* absolute layout.
*/
@Override
public Iterator<Component> iterator() {
return Collections
.unmodifiableCollection(componentToCoordinates.keySet())
.iterator();
} | 3.68 |
querydsl_ComparableExpression_goe | /**
* Create a {@code this >= right} expression
*
* @param right rhs of the comparison
* @return this >= right
* @see java.lang.Comparable#compareTo(Object)
*/
public BooleanExpression goe(Expression<T> right) {
return Expressions.booleanOperation(Ops.GOE, mixin, right);
} | 3.68 |
hadoop_SingleFilePerBlockCache_getIntList | /**
* Produces a human readable list of blocks for the purpose of logging.
* This method minimizes the length of returned list by converting
* a contiguous list of blocks into a range.
* for example,
* 1, 3, 4, 5, 6, 8 becomes 1, 3~6, 8
*/
private String getIntList(Iterable<Integer> nums) {
List<String> numList = new ArrayList<>();
List<Integer> numbers = new ArrayList<Integer>();
for (Integer n : nums) {
numbers.add(n);
}
Collections.sort(numbers);
int index = 0;
while (index < numbers.size()) {
int start = numbers.get(index);
int prev = start;
int end = start;
while ((++index < numbers.size()) && ((end = numbers.get(index)) == prev + 1)) {
prev = end;
}
if (start == prev) {
numList.add(Integer.toString(start));
} else {
numList.add(String.format("%d~%d", start, prev));
}
}
return String.join(", ", numList);
} | 3.68 |
flink_FlinkRexBuilder_toSarg | /**
* Converts a list of expressions to a search argument, or returns null if not possible.
*
* <p>Copied from the {@link RexBuilder} to fix the {@link RexBuilder#makeIn}.
*/
@SuppressWarnings("UnstableApiUsage")
private static <C extends Comparable<C>> Sarg<C> toSarg(
Class<C> clazz, List<? extends RexNode> ranges, boolean containsNull) {
if (ranges.isEmpty()) {
// Cannot convert an empty list to a Sarg (by this interface, at least)
// because we use the type of the first element.
return null;
}
final com.google.common.collect.RangeSet<C> rangeSet =
com.google.common.collect.TreeRangeSet.create();
for (RexNode range : ranges) {
final C value = toComparable(clazz, range);
if (value == null) {
return null;
}
rangeSet.add(com.google.common.collect.Range.singleton(value));
}
return Sarg.of(containsNull, rangeSet);
} | 3.68 |
framework_Result_map | /**
* If this Result has a value, returns a Result of applying the given
* function to the value. Otherwise, returns a Result bearing the same error
* as this one. Note that any exceptions thrown by the mapping function are
* not wrapped but allowed to propagate.
*
* @param <S>
* the type of the mapped value
* @param mapper
* the mapping function
* @return the mapped result
*/
public default <S> Result<S> map(SerializableFunction<R, S> mapper) {
return flatMap(value -> ok(mapper.apply(value)));
} | 3.68 |
hadoop_WriteOperationHelper_getConf | /**
* Get the configuration of this instance; essentially the owning
* filesystem configuration.
* @return the configuration.
*/
public Configuration getConf() {
return conf;
} | 3.68 |
flink_QuickSort_sort | /**
* Sort the given range of items using quick sort. {@inheritDoc} If the recursion depth falls
* below {@link #getMaxDepth}, then switch to {@link HeapSort}.
*/
public void sort(final IndexedSortable s, int p, int r) {
int recordsPerSegment = s.recordsPerSegment();
int recordSize = s.recordSize();
int maxOffset = recordSize * (recordsPerSegment - 1);
int pN = p / recordsPerSegment;
int pO = (p % recordsPerSegment) * recordSize;
int rN = r / recordsPerSegment;
int rO = (r % recordsPerSegment) * recordSize;
sortInternal(
s,
recordsPerSegment,
recordSize,
maxOffset,
p,
pN,
pO,
r,
rN,
rO,
getMaxDepth(r - p));
} | 3.68 |
flink_HiveParserTypeCheckProcFactory_convert | // temporary type-safe casting
private static Map<HiveParserASTNode, ExprNodeDesc> convert(Map<Node, Object> outputs) {
Map<HiveParserASTNode, ExprNodeDesc> converted = new LinkedHashMap<>();
for (Map.Entry<Node, Object> entry : outputs.entrySet()) {
if (entry.getKey() instanceof HiveParserASTNode
&& (entry.getValue() == null || entry.getValue() instanceof ExprNodeDesc)) {
converted.put((HiveParserASTNode) entry.getKey(), (ExprNodeDesc) entry.getValue());
} else {
LOG.warn("Invalid type entry " + entry);
}
}
return converted;
} | 3.68 |
hbase_ScannerModel_getFilter | /** Returns the filter specification */
@XmlElement
public String getFilter() {
return filter;
} | 3.68 |
querydsl_BeanMap_getReadMethod | /**
* Returns the accessor for the property with the given name.
*
* @param name the name of the property
* @return the accessor method for the property, or null
*/
public Method getReadMethod(String name) {
return readMethods.get(name);
} | 3.68 |
streampipes_AbstractProcessingElementBuilder_setStream2 | /**
* @deprecated Use {@link #requiredStream(CollectedStreamRequirements)} instead
*/
@Deprecated(since = "0.90.0", forRemoval = true)
public K setStream2() {
stream2 = true;
return me();
} | 3.68 |
flink_GSCommitRecoverableSerializer_deserializeCommitRecoverable | /**
* Deserializes a commit recoverable from the input stream.
*
* @param dataInputStream The input stream
* @return The commit recoverable
* @throws IOException On underlying failure
*/
static GSCommitRecoverable deserializeCommitRecoverable(DataInputStream dataInputStream)
throws IOException {
// finalBlobId
String finalBucketName = dataInputStream.readUTF();
String finalObjectName = dataInputStream.readUTF();
GSBlobIdentifier finalBlobIdentifier =
new GSBlobIdentifier(finalBucketName, finalObjectName);
// componentObjectIds
ArrayList<UUID> componentObjectIds = new ArrayList<>();
int count = dataInputStream.readInt();
for (int i = 0; i < count; i++) {
long msbValue = dataInputStream.readLong();
long lsbValue = dataInputStream.readLong();
UUID componentObjectId = new UUID(msbValue, lsbValue);
componentObjectIds.add(componentObjectId);
}
GSCommitRecoverable recoverable =
new GSCommitRecoverable(finalBlobIdentifier, componentObjectIds);
LOGGER.trace("Deserialized commit recoverable {}", recoverable);
return recoverable;
} | 3.68 |
hadoop_QuorumException_create | /**
* Create a QuorumException instance with a descriptive message detailing
* the underlying exceptions, as well as any successful responses which
* were returned.
* @param <K> the keys for the quorum calls
* @param <V> the success response type
* @param successes any successful responses returned
* @param exceptions the exceptions returned
*/
public static <K, V> QuorumException create(
String simpleMsg,
Map<K, V> successes,
Map<K, Throwable> exceptions) {
Preconditions.checkArgument(!exceptions.isEmpty(),
"Must pass exceptions");
StringBuilder msg = new StringBuilder();
msg.append(simpleMsg).append(". ");
if (!successes.isEmpty()) {
msg.append(successes.size()).append(" successful responses:\n");
Joiner.on("\n")
.useForNull("null [success]")
.withKeyValueSeparator(": ")
.appendTo(msg, successes);
msg.append("\n");
}
msg.append(exceptions.size() + " exceptions thrown:\n");
boolean isFirst = true;
for (Map.Entry<K, Throwable> e : exceptions.entrySet()) {
if (!isFirst) {
msg.append("\n");
}
isFirst = false;
msg.append(e.getKey()).append(": ");
if (e.getValue() instanceof RuntimeException) {
msg.append(StringUtils.stringifyException(e.getValue()));
} else if (e.getValue().getLocalizedMessage() != null) {
msg.append(e.getValue().getLocalizedMessage());
} else {
msg.append(StringUtils.stringifyException(e.getValue()));
}
}
return new QuorumException(msg.toString());
} | 3.68 |
hbase_HFileBlockIndex_getRootBlockCount | /** Returns the number of root-level blocks in this block index */
public int getRootBlockCount() {
return rootCount;
} | 3.68 |
framework_ColumnProperty_isPersistent | /**
* Return whether the value of this property should be persisted to the
* database.
*
* @return true if the value should be written to the database, false
* otherwise.
*/
public boolean isPersistent() {
if (isVersionColumn()) {
return false;
}
return isReadOnlyChangeAllowed() && !isReadOnly();
} | 3.68 |
hbase_MasterObserver_postMergeRegions | /**
* called after merge regions request.
* @param c coprocessor environment
* @param regionsToMerge regions to be merged
*/
default void postMergeRegions(final ObserverContext<MasterCoprocessorEnvironment> c,
final RegionInfo[] regionsToMerge) throws IOException {
} | 3.68 |
hbase_QuotaObserverChore_getNumReportedRegions | /**
* Computes the number of regions reported for a table.
*/
int getNumReportedRegions(TableName table, QuotaSnapshotStore<TableName> tableStore)
throws IOException {
return Iterables.size(tableStore.filterBySubject(table));
} | 3.68 |
dubbo_AsyncRpcResult_newDefaultAsyncResult | /**
* Some utility methods used to quickly generate default AsyncRpcResult instance.
*/
public static AsyncRpcResult newDefaultAsyncResult(AppResponse appResponse, Invocation invocation) {
return new AsyncRpcResult(CompletableFuture.completedFuture(appResponse), invocation);
} | 3.68 |
morf_Upgrade_performUpgrade | /**
* Static convenience method which takes the specified database and upgrades it to the target
* schema, using the upgrade steps supplied which have not already been applied.
* <b>This static context does not support Graph Based Upgrade.</b>
*
* @param targetSchema The target database schema.
* @param upgradeSteps All upgrade steps which should be deemed to have already run.
* @param connectionResources Connection details for the database.
* @param viewDeploymentValidator External view deployment validator.
*/
public static void performUpgrade(Schema targetSchema, Collection<Class<? extends UpgradeStep>> upgradeSteps, ConnectionResources connectionResources, ViewDeploymentValidator viewDeploymentValidator) {
SqlScriptExecutorProvider sqlScriptExecutorProvider = new SqlScriptExecutorProvider(connectionResources);
UpgradeStatusTableService upgradeStatusTableService = new UpgradeStatusTableServiceImpl(sqlScriptExecutorProvider, connectionResources.sqlDialect());
DatabaseUpgradePathValidationService databaseUpgradePathValidationService = new DatabaseUpgradePathValidationServiceImpl(connectionResources, upgradeStatusTableService);
try {
UpgradePath path = Upgrade.createPath(targetSchema, upgradeSteps, connectionResources, upgradeStatusTableService, viewDeploymentValidator, databaseUpgradePathValidationService);
if (path.hasStepsToApply()) {
sqlScriptExecutorProvider.get(new LoggingSqlScriptVisitor()).execute(path.getSql());
}
} finally {
upgradeStatusTableService.tidyUp(connectionResources.getDataSource());
}
}
/**
* Static convenience method which creates the required {@link UpgradePath} | 3.68 |
querydsl_JTSGeometryExpressions_ymin | /**
* Returns Y minima of a bounding box 2d or 3d or a geometry.
*
* @param expr geometry
* @return y minima
*/
public static NumberExpression<Double> ymin(JTSGeometryExpression<?> expr) {
return Expressions.numberOperation(Double.class, SpatialOps.YMIN, expr);
} | 3.68 |
framework_FilesystemContainer_getFilter | /**
* Returns the file filter used to limit the files in this container.
*
* @return Used filter instance or null if no filter is assigned.
*/
public FilenameFilter getFilter() {
return filter;
} | 3.68 |
framework_Notification_close | /**
* Closes (hides) the notification.
* <p>
* If the notification is not shown, does nothing.
*
* @param userOriginated
* <code>true</code> if the notification was closed because the
* user clicked on it, <code>false</code> if the notification was
* closed from the server
* @since 8.4
*/
protected void close(boolean userOriginated) {
if (!isAttached()) {
return;
}
remove();
fireEvent(new CloseEvent(this, userOriginated));
} | 3.68 |
framework_VComboBox_setPagingEnabled | /**
* Should paging be enabled. If paging is enabled then only a certain
* amount of items are visible at a time and a scrollbar or buttons are
* visible to change page. If paging is turned of then all options are
* rendered into the popup menu.
*
* @param paging
* Should the paging be turned on?
*/
public void setPagingEnabled(boolean paging) {
debug("VComboBox.SP: setPagingEnabled(" + paging + ")");
if (isPagingEnabled == paging) {
return;
}
if (paging) {
down.getStyle().clearDisplay();
up.getStyle().clearDisplay();
status.getStyle().clearDisplay();
} else {
down.getStyle().setDisplay(Display.NONE);
up.getStyle().setDisplay(Display.NONE);
status.getStyle().setDisplay(Display.NONE);
}
isPagingEnabled = paging;
} | 3.68 |
hadoop_StagingCommitter_getJobPath | /**
* Compute the path under which all job attempts will be placed.
* @return the path to store job attempt data.
*/
protected Path getJobPath() {
return getPendingJobAttemptsPath(commitsDirectory);
} | 3.68 |
morf_DatabaseMetaDataProvider_tableTypesForTables | /**
* Types for {@link DatabaseMetaData#getTables(String, String, String, String[])}
* used by {@link #loadAllTableNames()}.
*
* @return Array of relevant JDBC types.
*/
protected String[] tableTypesForTables() {
return new String[] { "TABLE" };
} | 3.68 |
flink_SignalHandler_register | /**
* Register some signal handlers.
*
* @param LOG The slf4j logger
*/
public static void register(final Logger LOG) {
synchronized (SignalHandler.class) {
if (registered) {
return;
}
registered = true;
final String[] SIGNALS =
OperatingSystem.isWindows()
? new String[] {"TERM", "INT"}
: new String[] {"TERM", "HUP", "INT"};
StringBuilder bld = new StringBuilder();
bld.append("Registered UNIX signal handlers for [");
String separator = "";
for (String signalName : SIGNALS) {
try {
new Handler(signalName, LOG);
bld.append(separator);
bld.append(signalName);
separator = ", ";
} catch (Exception e) {
LOG.info("Error while registering signal handler", e);
}
}
bld.append("]");
LOG.info(bld.toString());
}
} | 3.68 |
framework_WrappedHttpSession_enumerationToSet | // Helper shared with WrappedPortletSession
static <T> Set<T> enumerationToSet(Enumeration<T> values) {
HashSet<T> set = new HashSet<>();
while (values.hasMoreElements()) {
set.add(values.nextElement());
}
return Collections.unmodifiableSet(set);
} | 3.68 |
framework_GlobalResourceHandler_getUri | /**
* Gets a global URI for a resource if it's registered with this handler.
*
* @param connector
* the connector for which the uri should be generated.
* @param resource
* the resource for which the uri should be generated.
* @return an URI string, or <code>null</code> if the resource is not
* registered.
*/
public String getUri(ClientConnector connector,
ConnectorResource resource) {
// app://APP/global/[ui]/[type]/[id]
String uri = legacyResourceKeys.get(resource);
if (uri != null && !uri.isEmpty()) {
return ApplicationConstants.APP_PROTOCOL_PREFIX
+ ApplicationConstants.APP_PATH + '/'
+ RESOURCE_REQUEST_PATH + connector.getUI().getUIId() + '/'
+ uri;
} else {
return null;
}
} | 3.68 |
hadoop_OBSObjectBucketUtils_renameBasedOnObject | /**
* The inner rename operation.
*
* @param owner OBS File System instance
* @param src path to be renamed
* @param dst new path after rename
* @return boolean
* @throws RenameFailedException if some criteria for a state changing rename
* was not met. This means work didn't happen;
* it's not something which is reported upstream
* to the FileSystem APIs, for which the
* semantics of "false" are pretty vague.
* @throws FileNotFoundException there's no source file.
* @throws IOException on IO failure.
* @throws ObsException on failures inside the OBS SDK
*/
static boolean renameBasedOnObject(final OBSFileSystem owner,
final Path src, final Path dst) throws RenameFailedException,
FileNotFoundException, IOException,
ObsException {
String srcKey = OBSCommonUtils.pathToKey(owner, src);
String dstKey = OBSCommonUtils.pathToKey(owner, dst);
if (srcKey.isEmpty()) {
LOG.error("rename: src [{}] is root directory", src);
throw new IOException(src + " is root directory");
}
// get the source file status; this raises a FNFE if there is no source
// file.
FileStatus srcStatus = owner.getFileStatus(src);
FileStatus dstStatus;
try {
dstStatus = owner.getFileStatus(dst);
// if there is no destination entry, an exception is raised.
// hence this code sequence can assume that there is something
// at the end of the path; the only detail being what it is and
// whether or not it can be the destination of the rename.
if (dstStatus.isDirectory()) {
String newDstKey = OBSCommonUtils.maybeAddTrailingSlash(dstKey);
String filename = srcKey.substring(
OBSCommonUtils.pathToKey(owner, src.getParent()).length()
+ 1);
newDstKey = newDstKey + filename;
dstKey = newDstKey;
dstStatus = owner.getFileStatus(
OBSCommonUtils.keyToPath(dstKey));
if (dstStatus.isDirectory()) {
throw new RenameFailedException(src, dst,
"new destination is an existed directory")
.withExitCode(false);
} else {
throw new RenameFailedException(src, dst,
"new destination is an existed file")
.withExitCode(false);
}
} else {
if (srcKey.equals(dstKey)) {
LOG.warn(
"rename: src and dest refer to the same file or"
+ " directory: {}",
dst);
return true;
} else {
throw new RenameFailedException(src, dst,
"destination is an existed file")
.withExitCode(false);
}
}
} catch (FileNotFoundException e) {
LOG.debug("rename: destination path {} not found", dst);
// Parent must exist
checkDestinationParent(owner, src, dst);
}
if (dstKey.startsWith(srcKey)
&& dstKey.charAt(srcKey.length()) == Path.SEPARATOR_CHAR) {
LOG.error("rename: dest [{}] cannot be a descendant of src [{}]",
dst, src);
return false;
}
// Ok! Time to start
if (srcStatus.isFile()) {
LOG.debug("rename: renaming file {} to {}", src, dst);
renameFile(owner, srcKey, dstKey, srcStatus);
} else {
LOG.debug("rename: renaming directory {} to {}", src, dst);
// This is a directory to directory copy
dstKey = OBSCommonUtils.maybeAddTrailingSlash(dstKey);
srcKey = OBSCommonUtils.maybeAddTrailingSlash(srcKey);
renameFolder(owner, srcKey, dstKey);
}
if (src.getParent() != dst.getParent()) {
// deleteUnnecessaryFakeDirectories(dst.getParent());
createFakeDirectoryIfNecessary(owner, src.getParent());
}
return true;
} | 3.68 |
streampipes_PipelineApi_all | /**
* Receives all pipelines owned by the current user
*
* @return (list) {@link org.apache.streampipes.model.pipeline.Pipeline} a list of all pipelines
*/
@Override
public List<Pipeline> all() {
return getAll(getBaseResourcePath());
} | 3.68 |
framework_VaadinService_storeSession | /**
* Called when the VaadinSession should be stored.
* <p>
* By default stores the VaadinSession in the underlying HTTP session.
*
* @since 7.6
* @param session
* the VaadinSession to store
* @param wrappedSession
* the underlying HTTP session
*/
protected void storeSession(VaadinSession session,
WrappedSession wrappedSession) {
assert VaadinSession.hasLock(this, wrappedSession);
writeToHttpSession(wrappedSession, session);
session.refreshTransients(wrappedSession, this);
} | 3.68 |
hbase_Bytes_iterateOnSplits | /**
* Iterate over keys within the passed range.
*/
public static Iterable<byte[]> iterateOnSplits(final byte[] a, final byte[] b, boolean inclusive,
final int num) {
byte[] aPadded;
byte[] bPadded;
if (a.length < b.length) {
aPadded = padTail(a, b.length - a.length);
bPadded = b;
} else if (b.length < a.length) {
aPadded = a;
bPadded = padTail(b, a.length - b.length);
} else {
aPadded = a;
bPadded = b;
}
if (compareTo(aPadded, bPadded) >= 0) {
throw new IllegalArgumentException("b <= a");
}
if (num <= 0) {
throw new IllegalArgumentException("num cannot be <= 0");
}
byte[] prependHeader = { 1, 0 };
final BigInteger startBI = new BigInteger(add(prependHeader, aPadded));
final BigInteger stopBI = new BigInteger(add(prependHeader, bPadded));
BigInteger diffBI = stopBI.subtract(startBI);
if (inclusive) {
diffBI = diffBI.add(BigInteger.ONE);
}
final BigInteger splitsBI = BigInteger.valueOf(num + 1);
// when diffBI < splitBI, use an additional byte to increase diffBI
if (diffBI.compareTo(splitsBI) < 0) {
byte[] aPaddedAdditional = new byte[aPadded.length + 1];
byte[] bPaddedAdditional = new byte[bPadded.length + 1];
for (int i = 0; i < aPadded.length; i++) {
aPaddedAdditional[i] = aPadded[i];
}
for (int j = 0; j < bPadded.length; j++) {
bPaddedAdditional[j] = bPadded[j];
}
aPaddedAdditional[aPadded.length] = 0;
bPaddedAdditional[bPadded.length] = 0;
return iterateOnSplits(aPaddedAdditional, bPaddedAdditional, inclusive, num);
}
final BigInteger intervalBI;
try {
intervalBI = diffBI.divide(splitsBI);
} catch (Exception e) {
LOG.error("Exception caught during division", e);
return null;
}
final Iterator<byte[]> iterator = new Iterator<byte[]>() {
private int i = -1;
@Override
public boolean hasNext() {
return i < num + 1;
}
@Override
public byte[] next() {
i++;
if (i == 0) return a;
if (i == num + 1) return b;
BigInteger curBI = startBI.add(intervalBI.multiply(BigInteger.valueOf(i)));
byte[] padded = curBI.toByteArray();
if (padded[1] == 0) padded = tail(padded, padded.length - 2);
else padded = tail(padded, padded.length - 1);
return padded;
}
@Override
public void remove() {
throw new UnsupportedOperationException();
}
};
return new Iterable<byte[]>() {
@Override
public Iterator<byte[]> iterator() {
return iterator;
}
};
} | 3.68 |
flink_ExecNodeContext_getId | /** The unique identifier for each ExecNode in the JSON plan. */
int getId() {
return checkNotNull(id);
} | 3.68 |
hudi_SerializationUtils_deserialize | /**
* <p>
* Deserializes a single {@code Object} from an array of bytes.
* </p>
*
* <p>
* If the call site incorrectly types the return value, a {@link ClassCastException} is thrown from the call site.
* Without Generics in this declaration, the call site must type cast and can cause the same ClassCastException. Note
* that in both cases, the ClassCastException is in the call site, not in this method.
* </p>
*
* @param <T> the object type to be deserialized
* @param objectData the serialized object, must not be null
* @return the deserialized object
* @throws IllegalArgumentException if {@code objectData} is {@code null}
*/
public static <T> T deserialize(final byte[] objectData) {
if (objectData == null) {
throw new IllegalArgumentException("The byte[] must not be null");
}
return (T) SERIALIZER_REF.get().deserialize(objectData);
} | 3.68 |
hadoop_AbfsInputStreamStatisticsImpl_readAheadBytesRead | /**
* Total bytes read from readAhead buffer during a read operation.
*
* @param bytes the bytes to be incremented.
*/
@Override
public void readAheadBytesRead(long bytes) {
ioStatisticsStore.incrementCounter(StreamStatisticNames.READ_AHEAD_BYTES_READ, bytes);
} | 3.68 |
hadoop_ServletUtil_initHTML | /**
* Initial HTML header.
*
* @param response response.
* @param title title.
* @throws IOException raised on errors performing I/O.
* @return PrintWriter.
*/
public static PrintWriter initHTML(ServletResponse response, String title
) throws IOException {
response.setContentType("text/html");
PrintWriter out = response.getWriter();
out.println("<html>\n"
+ "<link rel='stylesheet' type='text/css' href='/static/hadoop.css'>\n"
+ "<title>" + title + "</title>\n"
+ "<body>\n"
+ "<h1>" + title + "</h1>\n");
return out;
} | 3.68 |
flink_MasterHooks_restoreMasterHooks | /**
* Calls the restore method given checkpoint master hooks and passes the given master state to
* them where state with a matching name is found.
*
* <p>If state is found and no hook with the same name is found, the method throws an exception,
* unless the {@code allowUnmatchedState} flag is set.
*
* @param masterHooks The hooks to call restore on
* @param states The state to pass to the hooks
* @param checkpointId The checkpoint ID of the restored checkpoint
* @param allowUnmatchedState If true, the method fails if not all states are picked up by a
* hook.
* @param log The logger for log messages
* @throws FlinkException Thrown, if the hooks throw an exception, or the state+ deserialization
* fails.
*/
public static void restoreMasterHooks(
final Map<String, MasterTriggerRestoreHook<?>> masterHooks,
final Collection<MasterState> states,
final long checkpointId,
final boolean allowUnmatchedState,
final Logger log)
throws FlinkException {
// early out
if (states == null || states.isEmpty() || masterHooks == null || masterHooks.isEmpty()) {
log.info("No master state to restore");
return;
}
log.info("Calling master restore hooks");
// collect the hooks
final LinkedHashMap<String, MasterTriggerRestoreHook<?>> allHooks =
new LinkedHashMap<>(masterHooks);
// first, deserialize all hook state
final ArrayList<Tuple2<MasterTriggerRestoreHook<?>, Object>> hooksAndStates =
new ArrayList<>();
for (MasterState state : states) {
if (state != null) {
final String name = state.name();
final MasterTriggerRestoreHook<?> hook = allHooks.remove(name);
if (hook != null) {
log.debug("Found state to restore for hook '{}'", name);
Object deserializedState = deserializeState(state, hook);
hooksAndStates.add(new Tuple2<>(hook, deserializedState));
} else if (!allowUnmatchedState) {
throw new IllegalStateException(
"Found state '" + state.name() + "' which is not resumed by any hook.");
} else {
log.info("Dropping unmatched state from '{}'", name);
}
}
}
// now that all is deserialized, call the hooks
for (Tuple2<MasterTriggerRestoreHook<?>, Object> hookAndState : hooksAndStates) {
restoreHook(hookAndState.f1, hookAndState.f0, checkpointId);
}
// trigger the remaining hooks without checkpointed state
for (MasterTriggerRestoreHook<?> hook : allHooks.values()) {
restoreHook(null, hook, checkpointId);
}
} | 3.68 |
hadoop_RegistryTypeUtils_ipcEndpoint | /**
* Create an IPC endpoint
* @param api API
* @param address the address as a tuple of (hostname, port)
* @return the new endpoint
*/
public static Endpoint ipcEndpoint(String api, InetSocketAddress address) {
return new Endpoint(api,
ADDRESS_HOSTNAME_AND_PORT,
ProtocolTypes.PROTOCOL_HADOOP_IPC,
address== null ? null: hostnamePortPair(address));
} | 3.68 |
hadoop_SuccessData_load | /**
* Load an instance from a file, then validate it.
* @param fs filesystem
* @param path path
* @return the loaded instance
* @throws IOException IO failure
* @throws ValidationFailure if the data is invalid
*/
public static SuccessData load(FileSystem fs, Path path)
throws IOException {
LOG.debug("Reading success data from {}", path);
SuccessData instance = serializer().load(fs, path);
instance.validate();
return instance;
} | 3.68 |
hadoop_CommonAuditContext_removeGlobalContextEntry | /**
* Remove a global entry.
* @param key key to clear.
*/
public static void removeGlobalContextEntry(String key) {
GLOBAL_CONTEXT_MAP.remove(key);
} | 3.68 |
querydsl_MathExpressions_sinh | /**
* Create a {@code sinh(num)} expression
*
* <p>Returns the hyperbolic sine of num radians.</p>
*
* @param num numeric expression
* @return sinh(num)
*/
public static <A extends Number & Comparable<?>> NumberExpression<Double> sinh(Expression<A> num) {
return Expressions.numberOperation(Double.class, Ops.MathOps.SINH, num);
} | 3.68 |
hudi_UtilHelpers_getJDBCSchema | /***
* call spark function get the schema through jdbc.
* The code logic implementation refers to spark 2.4.x and spark 3.x.
* @param options
* @return
* @throws Exception
*/
public static Schema getJDBCSchema(Map<String, String> options) {
Connection conn;
String url;
String table;
boolean tableExists;
try {
conn = createConnectionFactory(options);
url = options.get(JDBCOptions.JDBC_URL());
table = options.get(JDBCOptions.JDBC_TABLE_NAME());
tableExists = tableExists(conn, options);
} catch (Exception e) {
throw new HoodieSchemaFetchException("Failed to connect to jdbc", e);
}
if (!tableExists) {
throw new HoodieSchemaFetchException(String.format("%s table does not exists!", table));
}
try {
JdbcDialect dialect = JdbcDialects.get(url);
try (PreparedStatement statement = conn.prepareStatement(dialect.getSchemaQuery(table))) {
statement.setQueryTimeout(Integer.parseInt(options.get("queryTimeout")));
try (ResultSet rs = statement.executeQuery()) {
StructType structType;
if (Boolean.parseBoolean(options.get("nullable"))) {
structType = SparkJdbcUtils.getSchema(rs, dialect, true);
} else {
structType = SparkJdbcUtils.getSchema(rs, dialect, false);
}
return AvroConversionUtils.convertStructTypeToAvroSchema(structType, table, "hoodie." + table);
}
}
} catch (HoodieException e) {
throw e;
} catch (Exception e) {
throw new HoodieSchemaFetchException(String.format("Unable to fetch schema from %s table", table), e);
}
} | 3.68 |
querydsl_DateExpression_coalesce | /**
* Create a {@code coalesce(this, args...)} expression
*
* @param args additional arguments
* @return coalesce
*/
@Override
@SuppressWarnings("unchecked")
public DateExpression<T> coalesce(T... args) {
Coalesce<T> coalesce = new Coalesce<T>(getType(), mixin);
for (T arg : args) {
coalesce.add(arg);
}
return coalesce.asDate();
} | 3.68 |
hbase_ReversedMobStoreScanner_next | /**
* Firstly reads the cells from the HBase. If the cell is a reference cell (which has the
* reference tag), the scanner need seek this cell from the mob file, and use the cell found from
* the mob file as the result.
*/
@Override
public boolean next(List<Cell> outResult, ScannerContext ctx) throws IOException {
boolean result = super.next(outResult, ctx);
if (!rawMobScan) {
// retrieve the mob data
if (outResult.isEmpty()) {
return result;
}
long mobKVCount = 0;
long mobKVSize = 0;
for (int i = 0; i < outResult.size(); i++) {
Cell cell = outResult.get(i);
if (MobUtils.isMobReferenceCell(cell)) {
MobCell mobCell =
mobStore.resolve(cell, cacheMobBlocks, readPt, readEmptyValueOnMobCellMiss);
mobKVCount++;
mobKVSize += mobCell.getCell().getValueLength();
outResult.set(i, mobCell.getCell());
// Keep the MobCell here unless we shipped the RPC or close the scanner.
referencedMobCells.add(mobCell);
}
}
mobStore.updateMobScanCellsCount(mobKVCount);
mobStore.updateMobScanCellsSize(mobKVSize);
}
return result;
} | 3.68 |
framework_InlineDateFieldElement_getFocusElement | /**
* Returns the element which receives focus when the component is focused.
*
* @return the element which receives focus when the component is focused
* @since 8.1.1
*/
public WebElement getFocusElement() {
return findElement(By.tagName("table"));
} | 3.68 |
hbase_MetricsBalancer_initSource | /**
* A function to instantiate the metrics source. This function can be overridden in its subclasses
* to provide extended sources
*/
protected void initSource() {
source = CompatibilitySingletonFactory.getInstance(MetricsBalancerSource.class);
} | 3.68 |
hadoop_AllocateResponse_collectorInfo | /**
* Set the <code>collectorInfo</code> of the response.
* @see AllocateResponse#setCollectorInfo(CollectorInfo)
* @param collectorInfo <code>collectorInfo</code> of the response which
* contains collector address, RM id, version and collector token.
* @return {@link AllocateResponseBuilder}
*/
@Private
@Unstable
public AllocateResponseBuilder collectorInfo(
CollectorInfo collectorInfo) {
allocateResponse.setCollectorInfo(collectorInfo);
return this;
} | 3.68 |
framework_StaticSection_setDescription | /**
* Sets the tooltip for the cell to be shown with the given content
* mode.
*
* @see ContentMode
* @param description
* the tooltip to show when hovering the cell
* @param descriptionContentMode
* the content mode to use for the tooltip (HTML or plain
* text)
* @since 8.4
*/
public void setDescription(String description,
ContentMode descriptionContentMode) {
setDescription(description);
setDescriptionContentMode(descriptionContentMode);
} | 3.68 |
flink_ZooKeeperStateHandleStore_clearEntries | /**
* Recursively deletes all children.
*
* @throws Exception ZK errors
*/
@Override
public void clearEntries() throws Exception {
final String path = "/" + client.getNamespace();
LOG.info("Removing {} from ZooKeeper", path);
ZKPaths.deleteChildren(client.getZookeeperClient().getZooKeeper(), path, true);
} | 3.68 |
hbase_Response_setBody | /**
* @param body the response body
*/
public void setBody(byte[] body) {
this.body = body;
} | 3.68 |
flink_FlinkJoinToMultiJoinRule_combineJoinFilters | /**
* Combines the join filters from the left and right inputs (if they are MultiJoinRels) with the
* join filter in the joinrel into a single AND'd join filter, unless the inputs correspond to
* null generating inputs in an outer join.
*
* @param join Join
* @param left Left input of the join
* @param right Right input of the join
* @return combined join filters AND-ed together
*/
private List<RexNode> combineJoinFilters(
Join join, RelNode left, RelNode right, List<Boolean> inputNullGenFieldList) {
JoinRelType joinType = join.getJoinType();
JoinInfo joinInfo = join.analyzeCondition();
ImmutableIntList leftKeys = joinInfo.leftKeys;
ImmutableIntList rightKeys = joinInfo.rightKeys;
// AND the join condition if this isn't a left or right outer join; In those cases, the
// outer join condition is already tracked separately.
final List<RexNode> filters = new ArrayList<>();
if ((joinType != JoinRelType.LEFT) && (joinType != JoinRelType.RIGHT)) {
filters.add(join.getCondition());
}
if (canCombine(
left,
leftKeys,
joinType,
joinType.generatesNullsOnLeft(),
true,
inputNullGenFieldList,
0)) {
filters.add(((MultiJoin) left).getJoinFilter());
}
// Need to adjust the RexInputs of the right child, since those need to shift over to the
// right.
if (canCombine(
right,
rightKeys,
joinType,
joinType.generatesNullsOnRight(),
false,
inputNullGenFieldList,
left.getRowType().getFieldCount())) {
MultiJoin multiJoin = (MultiJoin) right;
filters.add(shiftRightFilter(join, left, multiJoin, multiJoin.getJoinFilter()));
}
return filters;
} | 3.68 |
framework_FieldGroup_clear | /**
* Clears the value of all fields.
*
* @since 7.4
*/
public void clear() {
for (Field<?> f : getFields()) {
if (f instanceof AbstractField) {
((AbstractField) f).clear();
}
}
} | 3.68 |
pulsar_ManagedCursorImpl_setAcknowledgedPosition | /**
*
* @param newMarkDeletePosition
* the new acknowledged position
* @return the previous acknowledged position
*/
PositionImpl setAcknowledgedPosition(PositionImpl newMarkDeletePosition) {
if (newMarkDeletePosition.compareTo(markDeletePosition) < 0) {
throw new MarkDeletingMarkedPosition(
"Mark deleting an already mark-deleted position. Current mark-delete: " + markDeletePosition
+ " -- attempted mark delete: " + newMarkDeletePosition);
}
PositionImpl oldMarkDeletePosition = markDeletePosition;
if (!newMarkDeletePosition.equals(oldMarkDeletePosition)) {
long skippedEntries = 0;
if (newMarkDeletePosition.getLedgerId() == oldMarkDeletePosition.getLedgerId()
&& newMarkDeletePosition.getEntryId() == oldMarkDeletePosition.getEntryId() + 1) {
// Mark-deleting the position next to current one
skippedEntries = individualDeletedMessages.contains(newMarkDeletePosition.getLedgerId(),
newMarkDeletePosition.getEntryId()) ? 0 : 1;
} else {
skippedEntries = getNumberOfEntries(Range.openClosed(oldMarkDeletePosition, newMarkDeletePosition));
}
PositionImpl positionAfterNewMarkDelete = ledger.getNextValidPosition(newMarkDeletePosition);
// sometime ranges are connected but belongs to different ledgers so, they are placed sequentially
// eg: (2:10..3:15] can be returned as (2:10..2:15],[3:0..3:15]. So, try to iterate over connected range and
// found the last non-connected range which gives new markDeletePosition
while (positionAfterNewMarkDelete.compareTo(ledger.lastConfirmedEntry) <= 0) {
if (individualDeletedMessages.contains(positionAfterNewMarkDelete.getLedgerId(),
positionAfterNewMarkDelete.getEntryId())) {
Range<PositionImpl> rangeToBeMarkDeleted = individualDeletedMessages.rangeContaining(
positionAfterNewMarkDelete.getLedgerId(), positionAfterNewMarkDelete.getEntryId());
newMarkDeletePosition = rangeToBeMarkDeleted.upperEndpoint();
positionAfterNewMarkDelete = ledger.getNextValidPosition(newMarkDeletePosition);
// check if next valid position is also deleted and part of the deleted-range
continue;
}
break;
}
if (log.isDebugEnabled()) {
log.debug("[{}] Moved ack position from: {} to: {} -- skipped: {}", ledger.getName(),
oldMarkDeletePosition, newMarkDeletePosition, skippedEntries);
}
MSG_CONSUMED_COUNTER_UPDATER.addAndGet(this, skippedEntries);
}
// markDelete-position and clear out deletedMsgSet
markDeletePosition = newMarkDeletePosition;
individualDeletedMessages.removeAtMost(markDeletePosition.getLedgerId(), markDeletePosition.getEntryId());
READ_POSITION_UPDATER.updateAndGet(this, currentReadPosition -> {
if (currentReadPosition.compareTo(markDeletePosition) <= 0) {
// If the position that is mark-deleted is past the read position, it
// means that the client has skipped some entries. We need to move
// read position forward
PositionImpl newReadPosition = ledger.getNextValidPosition(markDeletePosition);
if (log.isDebugEnabled()) {
log.debug("[{}] Moved read position from: {} to: {}, and new mark-delete position {}",
ledger.getName(), currentReadPosition, newReadPosition, markDeletePosition);
}
ledger.onCursorReadPositionUpdated(ManagedCursorImpl.this, newReadPosition);
return newReadPosition;
} else {
return currentReadPosition;
}
});
return newMarkDeletePosition;
} | 3.68 |
hudi_AbstractTableFileSystemView_fetchAllFileSlices | /**
* Default implementation for fetching all file-slices for a partition-path.
*
* @param partitionPath Partition path
* @return file-slice stream
*/
Stream<FileSlice> fetchAllFileSlices(String partitionPath) {
return fetchAllStoredFileGroups(partitionPath).map(this::addBootstrapBaseFileIfPresent)
.flatMap(HoodieFileGroup::getAllFileSlices);
} | 3.68 |
flink_MapView_remove | /**
* Deletes the value for the given key.
*
* @param key The key for which the value is deleted.
* @throws Exception Thrown if the system cannot access the map.
*/
public void remove(K key) throws Exception {
map.remove(key);
} | 3.68 |
morf_SchemaUtils_autoNumbersForTable | /**
* List auto-numbered columns for a given table.
*
* @param table The table
* @return The auto-numbered columns
*/
public static List<Column> autoNumbersForTable(Table table) {
return table.columns().stream().filter(Column::isAutoNumbered).collect(Collectors.toList());
} | 3.68 |
hudi_HoodieRowCreateHandle_write | /**
* Writes an {@link InternalRow} to the underlying HoodieInternalRowFileWriter. Before writing, value for meta columns are computed as required
* and wrapped in {@link HoodieInternalRow}. {@link HoodieInternalRow} is what gets written to HoodieInternalRowFileWriter.
*
* @param row instance of {@link InternalRow} that needs to be written to the fileWriter.
* @throws IOException
*/
public void write(InternalRow row) throws IOException {
if (populateMetaFields) {
writeRow(row);
} else {
writeRowNoMetaFields(row);
}
} | 3.68 |
hbase_SegmentScanner_seekToLastRow | /**
* Seek the scanner at the first KeyValue of last row
* @return true if scanner has values left, false if the underlying data is empty
*/
@Override
public boolean seekToLastRow() throws IOException {
if (closed) {
return false;
}
Cell higherCell = segment.isEmpty() ? null : segment.last();
if (higherCell == null) {
return false;
}
Cell firstCellOnLastRow = PrivateCellUtil.createFirstOnRow(higherCell);
if (seek(firstCellOnLastRow)) {
return true;
} else {
return seekToPreviousRow(higherCell);
}
} | 3.68 |
hadoop_FederationBlock_initLocalClusterPage | /**
* Initialize the Federation page of the local-cluster.
*
* @param tbody HTML tbody.
* @param lists subCluster page data list.
*/
private void initLocalClusterPage(TBODY<TABLE<Hamlet>> tbody, List<Map<String, String>> lists) {
Configuration config = this.router.getConfig();
SubClusterInfo localCluster = getSubClusterInfoByLocalCluster(config);
if (localCluster != null) {
try {
initSubClusterPageItem(tbody, localCluster, lists);
} catch (Exception e) {
LOG.error("init LocalCluster = {} page data error.", localCluster, e);
}
}
} | 3.68 |
hadoop_Lz4Codec_createInputStream | /**
* Create a {@link CompressionInputStream} that will read from the given
* {@link InputStream} with the given {@link Decompressor}.
*
* @param in the stream to read compressed bytes from
* @param decompressor decompressor to use
* @return a stream to read uncompressed bytes from
* @throws IOException raised on errors performing I/O.
*/
@Override
public CompressionInputStream createInputStream(InputStream in,
Decompressor decompressor)
throws IOException {
return new BlockDecompressorStream(in, decompressor, conf.getInt(
CommonConfigurationKeys.IO_COMPRESSION_CODEC_LZ4_BUFFERSIZE_KEY,
CommonConfigurationKeys.IO_COMPRESSION_CODEC_LZ4_BUFFERSIZE_DEFAULT));
} | 3.68 |
hadoop_NamenodeStatusReport_getNumBlocksMissing | /**
* Get the number of missing blocks.
*
* @return Number of missing blocks.
*/
public long getNumBlocksMissing() {
return this.numOfBlocksMissing;
} | 3.68 |
MagicPlugin_PreLoadEvent_registerRequirementsProcessor | /**
* Register a RequirementsProcessor for handling a specific type of requirement.
*
* <p>Requirement types are 1:1 with processors, each type may only have one processor associated with it.
*
* <p>Processors must be re-registered with each load.
*
* <p>Example requirement block, which might appear in a spell, Selector or other config:
*
* <code>
* requirements:
* - type: skillapi
* skill: enchanting
* - type: avengers
* power: hulkout
* character: Hulk
* </code>
*
* @param requirementType The type of requirements this processor handles
* @param processor The processor to register
*/
public void registerRequirementsProcessor(String requirementType, RequirementsProcessor processor) {
if (requirementProcessors.containsKey(requirementType)) {
controller.getLogger().warning("Tried to register RequiremensProcessor twice for same type: " + requirementType);
}
requirementProcessors.put(requirementType, processor);
} | 3.68 |
hbase_WALFactory_createTailingReader | /**
* Create a tailing reader for the given path. Mainly used in replication.
*/
public static WALTailingReader createTailingReader(FileSystem fs, Path path, Configuration conf,
long startPosition) throws IOException {
ProtobufWALTailingReader reader = new ProtobufWALTailingReader();
reader.init(fs, path, conf, startPosition);
return reader;
} | 3.68 |
hadoop_BlockManagerParameters_withBlockData | /**
* Sets the object holding blocks data info for the underlying file.
*
* @param data The block data object.
* @return The builder.
*/
public BlockManagerParameters withBlockData(
final BlockData data) {
this.blockData = data;
return this;
} | 3.68 |
hbase_PrivateCellUtil_compareValue | /**
* Compare cell's value against given comparator
* @param cell the cell to use for comparison
* @param comparator the {@link CellComparator} to use for comparison
* @return result comparing cell's value
*/
public static int compareValue(Cell cell, ByteArrayComparable comparator) {
if (cell instanceof ByteBufferExtendedCell) {
return comparator.compareTo(((ByteBufferExtendedCell) cell).getValueByteBuffer(),
((ByteBufferExtendedCell) cell).getValuePosition(), cell.getValueLength());
}
return comparator.compareTo(cell.getValueArray(), cell.getValueOffset(), cell.getValueLength());
} | 3.68 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.