name
stringlengths 12
178
| code_snippet
stringlengths 8
36.5k
| score
float64 3.26
3.68
|
---|---|---|
morf_DirectoryDataSet_openInputStreamForTable | /**
* @see org.alfasoftware.morf.xml.XmlStreamProvider.XmlInputStreamProvider#openInputStreamForTable(java.lang.String)
*/
@Override
public InputStream openInputStreamForTable(String tableName) {
try {
return new FileInputStream(new File(directory, fileNameForTable(tableName)));
} catch (FileNotFoundException e) {
throw new RuntimeException("Error opening output stream", e);
}
} | 3.68 |
morf_XmlDataSetProducer_getDefaultValue | /**
* @see org.alfasoftware.morf.metadata.Column#getDefaultValue()
*/
@Override
public String getDefaultValue() {
return defaultValue;
} | 3.68 |
hbase_RegionCoprocessorHost_preWALRestore | /**
* Supports Coprocessor 'bypass'.
* @return true if default behavior should be bypassed, false otherwise
* @deprecated Since hbase-2.0.0. No replacement. To be removed in hbase-3.0.0 and replaced with
* something that doesn't expose IntefaceAudience.Private classes.
*/
@Deprecated
public boolean preWALRestore(final RegionInfo info, final WALKey logKey, final WALEdit logEdit)
throws IOException {
return execOperation(
coprocEnvironments.isEmpty() ? null : new RegionObserverOperationWithoutResult(true) {
@Override
public void call(RegionObserver observer) throws IOException {
observer.preWALRestore(this, info, logKey, logEdit);
}
});
} | 3.68 |
pulsar_AuthorizationProvider_getPermissionsAsync | /**
* Get authorization-action permissions on a namespace.
* @param namespaceName
* @return CompletableFuture<Map<String, Set<AuthAction>>>
*/
default CompletableFuture<Map<String, Set<AuthAction>>> getPermissionsAsync(NamespaceName namespaceName) {
return FutureUtil.failedFuture(new IllegalStateException(
String.format("getPermissionsAsync on namespaceName %s is not supported by the Authorization",
namespaceName)));
} | 3.68 |
flink_LeaderElectionUtils_convertToString | /**
* Converts the passed {@link LeaderInformation} into a human-readable representation that can
* be used in log messages.
*/
public static String convertToString(LeaderInformation leaderInformation) {
return leaderInformation.isEmpty()
? "<no leader>"
: convertToString(
leaderInformation.getLeaderSessionID(),
leaderInformation.getLeaderAddress());
} | 3.68 |
dubbo_CollectionUtils_isEmpty | /**
* Return {@code true} if the supplied Collection is {@code null} or empty.
* Otherwise, return {@code false}.
*
* @param collection the Collection to check
* @return whether the given Collection is empty
*/
public static boolean isEmpty(Collection<?> collection) {
return collection == null || collection.isEmpty();
} | 3.68 |
querydsl_BeanPath_createList | /**
* Create a new List typed path
*
* @param <A>
* @param <E>
* @param property property name
* @param type property type
* @param queryType expression type
* @return property path
*/
@SuppressWarnings("unchecked")
protected <A, E extends SimpleExpression<? super A>> ListPath<A, E> createList(String property, Class<? super A> type, Class<? super E> queryType, PathInits inits) {
return add(new ListPath<A, E>(type, (Class) queryType, forProperty(property), inits));
} | 3.68 |
hbase_HRegionServer_getWriteRequestCount | /** Returns Current write count for all online regions. */
private long getWriteRequestCount() {
long writeCount = 0;
for (Map.Entry<String, HRegion> e : this.onlineRegions.entrySet()) {
writeCount += e.getValue().getWriteRequestsCount();
}
return writeCount;
} | 3.68 |
flink_AvroSchemaConverter_convertToTypeInfo | /**
* Converts an Avro schema string into a nested row structure with deterministic field order and
* data types that are compatible with Flink's Table & SQL API.
*
* @param avroSchemaString Avro schema definition string
* @return type information matching the schema
*/
@SuppressWarnings("unchecked")
public static <T> TypeInformation<T> convertToTypeInfo(String avroSchemaString) {
Preconditions.checkNotNull(avroSchemaString, "Avro schema must not be null.");
final Schema schema;
try {
schema = new Schema.Parser().parse(avroSchemaString);
} catch (SchemaParseException e) {
throw new IllegalArgumentException("Could not parse Avro schema string.", e);
}
return (TypeInformation<T>) convertToTypeInfo(schema);
} | 3.68 |
hadoop_AMRMProxyService_authorizeAndGetInterceptorChain | /**
* Authorizes the request and returns the application specific request
* processing pipeline.
*
* @return the interceptor wrapper instance
* @throws YarnException if fails
*/
private RequestInterceptorChainWrapper authorizeAndGetInterceptorChain()
throws YarnException {
AMRMTokenIdentifier tokenIdentifier =
YarnServerSecurityUtils.authorizeRequest();
return getInterceptorChain(tokenIdentifier);
} | 3.68 |
flink_SubsequenceInputTypeStrategy_argument | /** Defines that we expect a single argument at the next position. */
public SubsequenceStrategyBuilder argument(ArgumentTypeStrategy argumentTypeStrategy) {
SequenceInputTypeStrategy singleArgumentStrategy =
new SequenceInputTypeStrategy(
Collections.singletonList(argumentTypeStrategy), null);
argumentsSplits.add(
new ArgumentsSplit(currentPos, currentPos + 1, singleArgumentStrategy));
currentPos += 1;
return this;
} | 3.68 |
framework_CvalChecker_getFirstLaunch | /*
* Get the GWT firstLaunch timestamp.
*/
String getFirstLaunch() {
try {
Class<?> clz = Class
.forName("com.google.gwt.dev.shell.CheckForUpdates");
return Preferences.userNodeForPackage(clz).get("firstLaunch",
"-");
} catch (ClassNotFoundException e) {
return "-";
}
} | 3.68 |
hbase_DefaultMobStoreFlusher_flushSnapshot | /**
* Flushes the snapshot of the MemStore. If this store is not a mob store, flush the cells in the
* snapshot to store files of HBase. If the store is a mob one, the flusher flushes the MemStore
* into two places. One is the store files of HBase, the other is the mob files.
* <ol>
* <li>Cells that are not PUT type or have the delete mark will be directly flushed to HBase.</li>
* <li>If the size of a cell value is larger than a threshold, it'll be flushed to a mob file,
* another cell with the path of this file will be flushed to HBase.</li>
* <li>If the size of a cell value is smaller than or equal with a threshold, it'll be flushed to
* HBase directly.</li>
* </ol>
*/
@Override
public List<Path> flushSnapshot(MemStoreSnapshot snapshot, long cacheFlushId,
MonitoredTask status, ThroughputController throughputController, FlushLifeCycleTracker tracker,
Consumer<Path> writerCreationTracker) throws IOException {
ArrayList<Path> result = new ArrayList<>();
long cellsCount = snapshot.getCellsCount();
if (cellsCount == 0) return result; // don't flush if there are no entries
// Use a store scanner to find which rows to flush.
InternalScanner scanner = createScanner(snapshot.getScanners(), tracker);
StoreFileWriter writer;
try {
// TODO: We can fail in the below block before we complete adding this flush to
// list of store files. Add cleanup of anything put on filesystem if we fail.
synchronized (flushLock) {
status.setStatus("Flushing " + store + ": creating writer");
// Write the map out to the disk
writer = createWriter(snapshot, true, writerCreationTracker);
IOException e = null;
try {
// It's a mob store, flush the cells in a mob way. This is the difference of flushing
// between a normal and a mob store.
performMobFlush(snapshot, cacheFlushId, scanner, writer, status, throughputController,
writerCreationTracker);
} catch (IOException ioe) {
e = ioe;
// throw the exception out
throw ioe;
} finally {
if (e != null) {
writer.close();
} else {
finalizeWriter(writer, cacheFlushId, status);
}
}
}
} finally {
scanner.close();
}
LOG.info("Mob store is flushed, sequenceid=" + cacheFlushId + ", memsize="
+ StringUtils.TraditionalBinaryPrefix.long2String(snapshot.getDataSize(), "", 1)
+ ", hasBloomFilter=" + writer.hasGeneralBloom() + ", into tmp file " + writer.getPath());
result.add(writer.getPath());
return result;
} | 3.68 |
hmily_BindData_of | /**
* Of bind data.
*
* @param <T> the type parameter
* @param type the type
* @param value the value
* @return the bind data
*/
public static <T> BindData<T> of(final DataType type, final Supplier<T> value) {
return new BindData<>(type, value);
} | 3.68 |
pulsar_LedgerMetadataUtils_buildMetadataForSchema | /**
* Build additional metadata for a Schema.
*
* @param schemaId id of the schema
* @return an immutable map which describes the schema
*/
public static Map<String, byte[]> buildMetadataForSchema(String schemaId) {
return Map.of(
METADATA_PROPERTY_APPLICATION, METADATA_PROPERTY_APPLICATION_PULSAR,
METADATA_PROPERTY_COMPONENT, METADATA_PROPERTY_COMPONENT_SCHEMA,
METADATA_PROPERTY_SCHEMAID, schemaId.getBytes(StandardCharsets.UTF_8)
);
} | 3.68 |
framework_SelectorPredicate_setWildcard | /**
* @param wildcard
* the wildcard to set
*/
public void setWildcard(boolean wildcard) {
this.wildcard = wildcard;
} | 3.68 |
hadoop_TFile_getRecordNumNear | /**
* Get the RecordNum for the first key-value pair in a compressed block
* whose byte offset in the TFile is greater than or equal to the specified
* offset.
*
* @param offset
* the user supplied offset.
* @return the RecordNum to the corresponding entry. If no such entry
* exists, it returns the total entry count.
* @throws IOException raised on errors performing I/O.
*/
public long getRecordNumNear(long offset) throws IOException {
return getRecordNumByLocation(getLocationNear(offset));
} | 3.68 |
hadoop_AdlFsInputStream_getPos | /**
* Return the current offset from the start of the file.
*/
@Override
public synchronized long getPos() throws IOException {
return in.getPos();
} | 3.68 |
flink_MemorySegment_swapBytes | /**
* Swaps bytes between two memory segments, using the given auxiliary buffer.
*
* @param tempBuffer The auxiliary buffer in which to put data during triangle swap.
* @param seg2 Segment to swap bytes with
* @param offset1 Offset of this segment to start swapping
* @param offset2 Offset of seg2 to start swapping
* @param len Length of the swapped memory region
*/
public void swapBytes(
byte[] tempBuffer, MemorySegment seg2, int offset1, int offset2, int len) {
if ((offset1 | offset2 | len | (tempBuffer.length - len)) >= 0) {
final long thisPos = this.address + offset1;
final long otherPos = seg2.address + offset2;
if (thisPos <= this.addressLimit - len && otherPos <= seg2.addressLimit - len) {
// this -> temp buffer
UNSAFE.copyMemory(
this.heapMemory, thisPos, tempBuffer, BYTE_ARRAY_BASE_OFFSET, len);
// other -> this
UNSAFE.copyMemory(seg2.heapMemory, otherPos, this.heapMemory, thisPos, len);
// temp buffer -> other
UNSAFE.copyMemory(
tempBuffer, BYTE_ARRAY_BASE_OFFSET, seg2.heapMemory, otherPos, len);
return;
} else if (this.address > this.addressLimit) {
throw new IllegalStateException("this memory segment has been freed.");
} else if (seg2.address > seg2.addressLimit) {
throw new IllegalStateException("other memory segment has been freed.");
}
}
// index is in fact invalid
throw new IndexOutOfBoundsException(
String.format(
"offset1=%d, offset2=%d, len=%d, bufferSize=%d, address1=%d, address2=%d",
offset1, offset2, len, tempBuffer.length, this.address, seg2.address));
} | 3.68 |
framework_VCalendar_setLastDayNumber | /**
* Set the number when a week ends.
*
* @param dayNumber
* The number of the day
*/
public void setLastDayNumber(int dayNumber) {
assert (dayNumber >= 1 && dayNumber <= 7);
lastDay = dayNumber;
} | 3.68 |
hbase_WALEntryStream_next | /**
* Returns the next WAL entry in this stream and advance the stream. Will throw
* {@link IllegalStateException} if you do not call {@link #hasNext()} before calling this method.
* Please see the javadoc of {@link #peek()} method to see why we need this.
* @throws IllegalStateException Every time you want to call this method, please call
* {@link #hasNext()} first, otherwise a
* {@link IllegalStateException} will be thrown.
* @see #hasNext()
* @see #peek()
*/
public Entry next() {
if (currentEntry == null) {
throw new IllegalStateException("Call hasNext first");
}
Entry save = peek();
currentPositionOfEntry = currentPositionOfReader;
currentEntry = null;
state = null;
return save;
} | 3.68 |
flink_SubsequenceInputTypeStrategy_finish | /** Constructs the given strategy. */
public InputTypeStrategy finish() {
return new SubsequenceInputTypeStrategy(
argumentsSplits, ConstantArgumentCount.of(currentPos));
} | 3.68 |
hadoop_AllocateResponse_getRejectedSchedulingRequests | /**
* Get a list of all SchedulingRequests that the RM has rejected between
* this allocate call and the previous one.
* @return List of RejectedSchedulingRequests.
*/
@Public
@Unstable
public List<RejectedSchedulingRequest> getRejectedSchedulingRequests() {
return Collections.emptyList();
} | 3.68 |
dubbo_StringUtils_parseQueryString | /**
* parse query string to Parameters.
*
* @param qs query string.
* @return Parameters instance.
*/
public static Map<String, String> parseQueryString(String qs) {
if (isEmpty(qs)) {
return new HashMap<String, String>();
}
return parseKeyValuePair(qs, "\\&");
} | 3.68 |
hbase_RestoreSnapshotHelper_hasRegionsToRestore | /** Returns true if there're regions to restore */
public boolean hasRegionsToRestore() {
return this.regionsToRestore != null && this.regionsToRestore.size() > 0;
} | 3.68 |
hadoop_Paths_getRelativePath | /**
* Using {@code URI#relativize()}, build the relative path from the
* base path to the full path.
* If {@code childPath} is not a child of {@code basePath} the outcome
* os undefined.
* @param basePath base path
* @param fullPath full path under the base path.
* @return the relative path
*/
public static String getRelativePath(Path basePath,
Path fullPath) {
return basePath.toUri().relativize(fullPath.toUri()).getPath();
} | 3.68 |
flink_FutureUtils_runAsync | /**
* Returns a future which is completed when {@link RunnableWithException} is finished.
*
* @param runnable represents the task
* @param executor to execute the runnable
* @return Future which is completed when runnable is finished
*/
public static CompletableFuture<Void> runAsync(
RunnableWithException runnable, Executor executor) {
return CompletableFuture.runAsync(
() -> {
try {
runnable.run();
} catch (Throwable e) {
throw new CompletionException(e);
}
},
executor);
} | 3.68 |
hadoop_ChangeDetectionPolicy_getPolicy | /**
* Reads the change detection policy from Configuration.
*
* @param configuration the configuration
* @return the policy
*/
public static ChangeDetectionPolicy getPolicy(Configuration configuration) {
Mode mode = Mode.fromConfiguration(configuration);
Source source = Source.fromConfiguration(configuration);
boolean requireVersion = configuration.getBoolean(
CHANGE_DETECT_REQUIRE_VERSION, CHANGE_DETECT_REQUIRE_VERSION_DEFAULT);
return createPolicy(mode, source, requireVersion);
} | 3.68 |
hbase_FavoredStochasticBalancer_generateFavoredNodesForDaughter | /**
* Generate Favored Nodes for daughters during region split.
* <p/>
* If the parent does not have FN, regenerates them for the daughters.
* <p/>
* If the parent has FN, inherit two FN from parent for each daughter and generate the remaining.
* The primary FN for both the daughters should be the same as parent. Inherit the secondary FN
* from the parent but keep it different for each daughter. Choose the remaining FN randomly. This
* would give us better distribution over a period of time after enough splits.
*/
@Override
public void generateFavoredNodesForDaughter(List<ServerName> servers, RegionInfo parent,
RegionInfo regionA, RegionInfo regionB) throws IOException {
Map<RegionInfo, List<ServerName>> result = new HashMap<>();
FavoredNodeAssignmentHelper helper = new FavoredNodeAssignmentHelper(servers, rackManager);
helper.initialize();
List<ServerName> parentFavoredNodes = fnm.getFavoredNodes(parent);
if (parentFavoredNodes == null) {
LOG.debug("Unable to find favored nodes for parent, " + parent
+ " generating new favored nodes for daughter");
result.put(regionA, helper.generateFavoredNodes(regionA));
result.put(regionB, helper.generateFavoredNodes(regionB));
} else {
// Lets get the primary and secondary from parent for regionA
Set<ServerName> regionAFN =
getInheritedFNForDaughter(helper, parentFavoredNodes, PRIMARY, SECONDARY);
result.put(regionA, Lists.newArrayList(regionAFN));
// Lets get the primary and tertiary from parent for regionB
Set<ServerName> regionBFN =
getInheritedFNForDaughter(helper, parentFavoredNodes, PRIMARY, TERTIARY);
result.put(regionB, Lists.newArrayList(regionBFN));
}
fnm.updateFavoredNodes(result);
} | 3.68 |
framework_TableSqlContainer_insertTestData | /**
* Adds test data to the test table
*
* @param connectionPool
* @throws SQLException
*/
private void insertTestData(JDBCConnectionPool connectionPool)
throws SQLException {
Connection conn = null;
try {
conn = connectionPool.reserveConnection();
Statement statement = conn.createStatement();
statement.executeUpdate(
"INSERT INTO mytable VALUES(1, '2013-05-24', 'A0')");
statement.executeUpdate(
"INSERT INTO mytable VALUES(2, '2013-04-26', 'A1')");
statement.executeUpdate(
"INSERT INTO mytable VALUES(3, '2013-05-27', 'B0')");
statement.executeUpdate(
"INSERT INTO mytable VALUES(4, '2013-04-28', 'B1')");
statement.close();
conn.commit();
} catch (SQLException e) {
e.printStackTrace();
} finally {
connectionPool.releaseConnection(conn);
}
} | 3.68 |
framework_VaadinFinderLocatorStrategy_getElementsByPathStartingAt | /**
* {@inheritDoc}
*/
@Override
public List<Element> getElementsByPathStartingAt(String path,
Element root) {
List<SelectorPredicate> postFilters = SelectorPredicate
.extractPostFilterPredicates(path);
if (!postFilters.isEmpty()) {
path = path.substring(1, path.lastIndexOf(')'));
}
final ComponentConnector searchRoot = Util.findPaintable(client, root);
List<Element> elements = getElementsByPathStartingAtConnector(path,
searchRoot, root);
for (SelectorPredicate p : postFilters) {
// Post filtering supports only indexes and follows instruction
// blindly. Index that is outside of our list results into an empty
// list and multiple indexes are likely to ruin a search completely
if (p.getIndex() >= 0) {
if (p.getIndex() >= elements.size()) {
elements.clear();
} else {
Element e = elements.get(p.getIndex());
elements.clear();
elements.add(e);
}
}
}
return elements;
} | 3.68 |
framework_SQLContainer_hasContainerFilters | /**
* Returns true if any filters have been applied to the container.
*
* @return true if the container has filters applied, false otherwise
* @since 7.1
*/
public boolean hasContainerFilters() {
return !getContainerFilters().isEmpty();
} | 3.68 |
morf_InsertStatementBuilder_withDefaults | /**
* Specifies the defaults to use when inserting new fields.
*
* @param defaultValues the list of values to use as defaults
* @return this, for method chaining.
*/
public InsertStatementBuilder withDefaults(List<AliasedFieldBuilder> defaultValues) {
for(AliasedField currentValue : Builder.Helper.buildAll(defaultValues)) {
if (StringUtils.isBlank(currentValue.getAlias())) {
throw new IllegalArgumentException("Cannot specify a blank alias for a field default");
}
fieldDefaults.put(currentValue.getAlias(), currentValue);
}
return this;
} | 3.68 |
MagicPlugin_ActionFactory_registerResolver | /**
* Registers an action resolver.
*
* @param actionResolver
* The action resolver to register.
* @param highPriority
* When this is set to true, the resolver is registered such that
* it is used before any of the currently registered resolvers.
* @throws NullPointerException
* When actionResolver is null.
*/
public static void registerResolver(ActionResolver actionResolver,
boolean highPriority) {
Preconditions.checkNotNull(actionResolver);
if (!resolvers.contains(actionResolver)) {
if (highPriority) {
resolvers.add(0, actionResolver);
} else {
resolvers.add(actionResolver);
}
}
} | 3.68 |
hbase_StreamSlowMonitor_checkProcessTimeAndSpeed | /**
* Check if the packet process time shows that the relevant datanode is a slow node.
* @param datanodeInfo the datanode that processed the packet
* @param packetDataLen the data length of the packet (in bytes)
* @param processTimeMs the process time (in ms) of the packet on the datanode,
* @param lastAckTimestamp the last acked timestamp of the packet on another datanode
* @param unfinished if the packet is unfinished flushed to the datanode replicas
*/
public void checkProcessTimeAndSpeed(DatanodeInfo datanodeInfo, long packetDataLen,
long processTimeMs, long lastAckTimestamp, int unfinished) {
long current = EnvironmentEdgeManager.currentTime();
// Here are two conditions used to determine whether a datanode is slow,
// 1. For small packet, we just have a simple time limit, without considering
// the size of the packet.
// 2. For large packet, we will calculate the speed, and check if the speed is too slow.
boolean slow = (packetDataLen <= minLengthForSpeedCheck && processTimeMs > slowPacketAckMs)
|| (packetDataLen > minLengthForSpeedCheck
&& (double) packetDataLen / processTimeMs < minPacketFlushSpeedKBs);
if (slow) {
// Check if large diff ack timestamp between replicas,
// should try to avoid misjudgments that caused by GC STW.
if (
(lastAckTimestamp > 0 && current - lastAckTimestamp > slowPacketAckMs / 2)
|| (lastAckTimestamp <= 0 && unfinished == 0)
) {
LOG.info(
"Slow datanode: {}, data length={}, duration={}ms, unfinishedReplicas={}, "
+ "lastAckTimestamp={}, monitor name: {}",
datanodeInfo, packetDataLen, processTimeMs, unfinished, lastAckTimestamp, this.name);
if (addSlowAckData(datanodeInfo, packetDataLen, processTimeMs)) {
excludeDatanodeManager.tryAddExcludeDN(datanodeInfo, "slow packet ack");
}
}
}
} | 3.68 |
graphhopper_BaseGraph_edge | /**
* Create edge between nodes a and b
*
* @return EdgeIteratorState of newly created edge
*/
@Override
public EdgeIteratorState edge(int nodeA, int nodeB) {
if (isFrozen())
throw new IllegalStateException("Cannot create edge if graph is already frozen");
if (nodeA == nodeB)
// Loop edges would only make sense if their attributes were the same for both 'directions',
// because for routing algorithms (which ignore the way geometry) loop edges do not even
// have a well-defined 'direction'. So we either need to make sure the attributes
// are the same for both directions, or reject loop edges altogether. Since we currently
// don't know any use-case for loop edges in road networks (there is one for PT),
// we reject them here.
throw new IllegalArgumentException("Loop edges are not supported, got: " + nodeA + " - " + nodeB);
int edgeId = store.edge(nodeA, nodeB);
EdgeIteratorStateImpl edge = new EdgeIteratorStateImpl(this);
boolean valid = edge.init(edgeId, nodeB);
assert valid;
return edge;
} | 3.68 |
hbase_MasterObserver_postDeleteSnapshot | /**
* Called after the delete snapshot operation has been requested. Called as part of deleteSnapshot
* RPC call.
* @param ctx the environment to interact with the framework and master
* @param snapshot the SnapshotDescriptor of the snapshot to delete
*/
default void postDeleteSnapshot(final ObserverContext<MasterCoprocessorEnvironment> ctx,
final SnapshotDescription snapshot) throws IOException {
} | 3.68 |
framework_VConsole_setImplementation | /**
* Used by ApplicationConfiguration to initialize VConsole.
*
* @param console
*/
static void setImplementation(VDebugWindow console) {
impl = console;
} | 3.68 |
flink_HiveParallelismInference_limit | /**
* Apply limit to calculate the parallelism. Here limit is the limit in query <code>
* SELECT * FROM xxx LIMIT [limit]</code>.
*/
int limit(Long limit) {
if (!infer) {
return parallelism;
}
if (limit != null) {
parallelism = Math.min(parallelism, (int) (limit / 1000));
}
// make sure that parallelism is at least 1
return Math.max(1, parallelism);
} | 3.68 |
flink_JobEdge_setForward | /** Sets whether the edge is forward edge. */
public void setForward(boolean forward) {
isForward = forward;
} | 3.68 |
morf_DeleteStatement_getLimit | /**
* Gets the limit.
*
* @return the limit on the number of deleted records.
*/
public Optional<Integer> getLimit() {
return limit;
} | 3.68 |
framework_SQLContainer_getPageLength | /**
* Returns the currently set page length.
*
* @return current page length
*/
public int getPageLength() {
return pageLength;
} | 3.68 |
framework_AbsoluteLayoutResizeComponents_addStartWithFullWidth | /**
* Build test layout for #8255
*/
private void addStartWithFullWidth(AbsoluteLayout layout) {
final Panel full = new Panel(
new CssLayout(new Label("Start Width 100%")));
full.setWidth("100%");
full.setId("expanding-panel");
layout.addComponent(full, "right:0;top:10px;");
layout.addComponent(expandButton(full), "left: 10x; top: 50px;");
} | 3.68 |
hadoop_JsonSerialization_fromBytes | /**
* Deserialize from a byte array.
* @param bytes byte array
* @throws IOException IO problems
* @throws EOFException not enough data
* @return byte array.
*/
public T fromBytes(byte[] bytes) throws IOException {
return fromJson(new String(bytes, 0, bytes.length, UTF_8));
} | 3.68 |
framework_VaadinService_verifyNoOtherSessionLocked | /**
* Checks that another {@link VaadinSession} instance is not locked. This is
* internally used by {@link VaadinSession#accessSynchronously(Runnable)}
* and {@link UI#accessSynchronously(Runnable)} to help avoid causing
* deadlocks.
*
* @since 7.1
* @param session
* the session that is being locked
* @throws IllegalStateException
* if the current thread holds the lock for another session
*/
public static void verifyNoOtherSessionLocked(VaadinSession session) {
if (isOtherSessionLocked(session)) {
throw new IllegalStateException(
"Can't access session while another session is locked by the same thread. This restriction is intended to help avoid deadlocks.");
}
} | 3.68 |
morf_JdbcUrlElements_getDatabaseName | /**
* @return the database name. The meaning of this varies between database types.
*/
public String getDatabaseName() {
return databaseName;
} | 3.68 |
flink_SubtaskStateStats_getSyncCheckpointDuration | /**
* @return Duration of the synchronous part of the checkpoint or <code>-1</code> if the runtime
* did not report this.
*/
public long getSyncCheckpointDuration() {
return syncCheckpointDuration;
} | 3.68 |
framework_VComboBox_selectLastItem | /**
* @deprecated use {@link SuggestionPopup#selectLastItem()} instead.
*/
@Deprecated
public void selectLastItem() {
debug("VComboBox.SM: selectLastItem()");
List<MenuItem> items = getItems();
MenuItem lastItem = items.get(items.size() - 1);
selectItem(lastItem);
} | 3.68 |
flink_StreamSource_markCanceledOrStopped | /**
* Marks this source as canceled or stopped.
*
* <p>This indicates that any exit of the {@link #run(Object, Output, OperatorChain)} method
* cannot be interpreted as the result of a finite source.
*/
protected void markCanceledOrStopped() {
this.canceledOrStopped = true;
} | 3.68 |
hadoop_ValueAggregatorBaseDescriptor_generateValueAggregator | /**
*
* @param type the aggregation type
* @return a value aggregator of the given type.
*/
static public ValueAggregator generateValueAggregator(String type) {
ValueAggregator retv = null;
if (type.compareToIgnoreCase(LONG_VALUE_SUM) == 0) {
retv = new LongValueSum();
} if (type.compareToIgnoreCase(LONG_VALUE_MAX) == 0) {
retv = new LongValueMax();
} else if (type.compareToIgnoreCase(LONG_VALUE_MIN) == 0) {
retv = new LongValueMin();
} else if (type.compareToIgnoreCase(STRING_VALUE_MAX) == 0) {
retv = new StringValueMax();
} else if (type.compareToIgnoreCase(STRING_VALUE_MIN) == 0) {
retv = new StringValueMin();
} else if (type.compareToIgnoreCase(DOUBLE_VALUE_SUM) == 0) {
retv = new DoubleValueSum();
} else if (type.compareToIgnoreCase(UNIQ_VALUE_COUNT) == 0) {
retv = new UniqValueCount(maxNumItems);
} else if (type.compareToIgnoreCase(VALUE_HISTOGRAM) == 0) {
retv = new ValueHistogram();
}
return retv;
} | 3.68 |
morf_SqlDialect_getSqlForFloor | /**
* Converts the FLOOR function into SQL.
*
* @param function the function to convert.
* @return a string representation of the SQL.
* @see org.alfasoftware.morf.sql.element.Function#floor(AliasedField)
*/
protected String getSqlForFloor(Function function) {
return "FLOOR(" + getSqlFrom(function.getArguments().get(0)) + ")";
} | 3.68 |
flink_SSLUtils_createRestClientSSLEngineFactory | /**
* Creates a {@link SSLHandlerFactory} to be used by the REST Clients.
*
* @param config The application configuration.
*/
public static SSLHandlerFactory createRestClientSSLEngineFactory(final Configuration config)
throws Exception {
ClientAuth clientAuth =
SecurityOptions.isRestSSLAuthenticationEnabled(config)
? ClientAuth.REQUIRE
: ClientAuth.NONE;
SslContext sslContext = createRestNettySSLContext(config, true, clientAuth);
if (sslContext == null) {
throw new IllegalConfigurationException("SSL is not enabled for REST endpoints.");
}
return new SSLHandlerFactory(sslContext, -1, -1);
} | 3.68 |
hadoop_WriteOperationHelper_newUploadPartRequestBuilder | /**
* Create and initialize a part request builder of a multipart upload.
* The part number must be less than 10000.
* Retry policy is once-translated; to much effort
* @param destKey destination key of ongoing operation
* @param uploadId ID of ongoing upload
* @param partNumber current part number of the upload
* @param size amount of data
* @return the request builder.
* @throws IllegalArgumentException if the parameters are invalid.
* @throws PathIOException if the part number is out of range.
*/
@Override
@Retries.OnceTranslated
public UploadPartRequest.Builder newUploadPartRequestBuilder(
String destKey,
String uploadId,
int partNumber,
long size) throws IOException {
return once("upload part request", destKey,
withinAuditSpan(getAuditSpan(), () ->
getRequestFactory().newUploadPartRequestBuilder(
destKey,
uploadId,
partNumber,
size)));
} | 3.68 |
hbase_FileArchiverNotifierImpl_getLastFullCompute | /**
* Returns a strictly-increasing measure of time extracted by {@link System#nanoTime()}.
*/
long getLastFullCompute() {
return lastFullCompute;
} | 3.68 |
hbase_AbstractFSWAL_getFiles | /**
* Get the backing files associated with this WAL.
* @return may be null if there are no files.
*/
FileStatus[] getFiles() throws IOException {
return CommonFSUtils.listStatus(fs, walDir, ourFiles);
} | 3.68 |
framework_JsonCodec_encodeObject | /*
* Loops through the fields of value and encodes them.
*/
private static EncodeResult encodeObject(Object value, Class<?> valueType,
JsonObject referenceValue, ConnectorTracker connectorTracker) {
JsonObject encoded = Json.createObject();
JsonObject diff = Json.createObject();
try {
for (BeanProperty property : getProperties(valueType)) {
String fieldName = property.getName();
// We can't use PropertyDescriptor.getPropertyType() as it does
// not support generics
Type fieldType = property.getType();
Object fieldValue = property.getValue(value);
if (encoded.hasKey(fieldName)) {
throw new RuntimeException("Can't encode "
+ valueType.getName()
+ " as it has multiple properties with the name "
+ fieldName.toLowerCase(Locale.ROOT)
+ ". This can happen if there are getters and setters for a public field (the framework can't know which to ignore) or if there are properties with only casing distinguishing between the names (e.g. getFoo() and getFOO())");
}
JsonValue fieldReference;
if (referenceValue != null) {
fieldReference = referenceValue.get(fieldName);
if (fieldReference instanceof JsonNull) {
fieldReference = null;
}
} else {
fieldReference = null;
}
EncodeResult encodeResult = encode(fieldValue, fieldReference,
fieldType, connectorTracker);
encoded.put(fieldName, encodeResult.getEncodedValue());
if (valueChanged(encodeResult.getEncodedValue(),
fieldReference)) {
diff.put(fieldName, encodeResult.getDiffOrValue());
}
}
} catch (Exception e) {
// TODO: Should exceptions be handled in a different way?
throw new RuntimeException(e);
}
return new EncodeResult(encoded, diff);
} | 3.68 |
hbase_ServerCrashProcedure_isMatchingRegionLocation | /**
* Moved out here so can be overridden by the HBCK fix-up SCP to be less strict about what it will
* tolerate as a 'match'.
* @return True if the region location in <code>rsn</code> matches that of this crashed server.
*/
protected boolean isMatchingRegionLocation(RegionStateNode rsn) {
return this.serverName.equals(rsn.getRegionLocation());
} | 3.68 |
morf_ViewBean_getSelectStatement | /**
* @see org.alfasoftware.morf.metadata.View#getSelectStatement()
*/
@Override
public SelectStatement getSelectStatement() {
if (!knowsSelectStatement)
throw new UnsupportedOperationException("Unable to return select statement for view [" + name + "]");
return selectStatement;
} | 3.68 |
rocketmq-connect_JsonSchemaConverterConfig_decimalFormat | /**
* decimal format
*
* @return
*/
public DecimalFormat decimalFormat() {
return props.containsKey(DECIMAL_FORMAT_CONFIG) ?
DecimalFormat.valueOf(props.get(DECIMAL_FORMAT_CONFIG).toString().toUpperCase(Locale.ROOT)) : DECIMAL_FORMAT_DEFAULT;
} | 3.68 |
hbase_Mutation_toCellVisibility | /**
* Convert a protocol buffer CellVisibility bytes to a client CellVisibility
* @return the converted client CellVisibility
*/
private static CellVisibility toCellVisibility(byte[] protoBytes)
throws DeserializationException {
if (protoBytes == null) return null;
ClientProtos.CellVisibility.Builder builder = ClientProtos.CellVisibility.newBuilder();
ClientProtos.CellVisibility proto = null;
try {
ProtobufUtil.mergeFrom(builder, protoBytes);
proto = builder.build();
} catch (IOException e) {
throw new DeserializationException(e);
}
return toCellVisibility(proto);
} | 3.68 |
framework_Escalator_moveAndUpdateEscalatorRows | /**
* Move escalator rows around, and make sure everything gets
* appropriately repositioned and repainted.
*
* @param visualSourceRange
* the range of rows to move to a new place
* @param visualTargetIndex
* the visual index where the rows will be placed to
* @param logicalTargetIndex
* the logical index to be assigned to the first moved row
*/
private void moveAndUpdateEscalatorRows(final Range visualSourceRange,
final int visualTargetIndex, final int logicalTargetIndex)
throws IllegalArgumentException {
if (visualSourceRange.isEmpty()) {
return;
}
assert visualSourceRange.getStart() >= 0 : "Visual source start "
+ "must be 0 or greater (was "
+ visualSourceRange.getStart() + ")";
assert logicalTargetIndex >= 0 : "Logical target must be 0 or "
+ "greater (was " + logicalTargetIndex + ")";
assert visualTargetIndex >= 0 : "Visual target must be 0 or greater (was "
+ visualTargetIndex + ")";
assert visualTargetIndex <= getDomRowCount() : "Visual target "
+ "must not be greater than the number of escalator rows (was "
+ visualTargetIndex + ", escalator rows " + getDomRowCount()
+ ")";
assert logicalTargetIndex
+ visualSourceRange.length() <= getRowCount() : "Logical "
+ "target leads to rows outside of the data range ("
+ Range.withLength(logicalTargetIndex,
visualSourceRange.length())
+ " goes beyond "
+ Range.withLength(0, getRowCount()) + ")";
/*
* Since we move a range into another range, the indices might move
* about. Having 10 rows, if we move 0..1 to index 10 (to the end of
* the collection), the target range will end up being 8..9, instead
* of 10..11.
*
* This applies only if we move elements forward in the collection,
* not backward.
*/
final int adjustedVisualTargetIndex;
if (visualSourceRange.getStart() < visualTargetIndex) {
adjustedVisualTargetIndex = visualTargetIndex
- visualSourceRange.length();
} else {
adjustedVisualTargetIndex = visualTargetIndex;
}
if (visualSourceRange.getStart() != adjustedVisualTargetIndex) {
/*
* Reorder the rows to their correct places within
* visualRowOrder (unless rows are moved back to their original
* places)
*/
/*
* TODO [[optimize]]: move whichever set is smaller: the ones
* explicitly moved, or the others. So, with 10 escalator rows,
* if we are asked to move idx[0..8] to the end of the list,
* it's faster to just move idx[9] to the beginning.
*/
final List<TableRowElement> removedRows = new ArrayList<TableRowElement>(
visualSourceRange.length());
for (int i = 0; i < visualSourceRange.length(); i++) {
final TableRowElement tr = visualRowOrder
.remove(visualSourceRange.getStart());
removedRows.add(tr);
}
visualRowOrder.addAll(adjustedVisualTargetIndex, removedRows);
}
{ // Refresh the contents of the affected rows
final ListIterator<TableRowElement> iter = visualRowOrder
.listIterator(adjustedVisualTargetIndex);
for (int logicalIndex = logicalTargetIndex; logicalIndex < logicalTargetIndex
+ visualSourceRange.length(); logicalIndex++) {
final TableRowElement tr = iter.next();
refreshRow(tr, logicalIndex);
}
}
{ // Reposition the rows that were moved
double newRowTop = getRowTop(logicalTargetIndex);
final ListIterator<TableRowElement> iter = visualRowOrder
.listIterator(adjustedVisualTargetIndex);
for (int i = 0; i < visualSourceRange.length(); i++) {
final TableRowElement tr = iter.next();
setRowPosition(tr, 0, newRowTop);
newRowTop += getDefaultRowHeight();
newRowTop += spacerContainer
.getSpacerHeight(logicalTargetIndex + i);
}
}
} | 3.68 |
hbase_ZNodePaths_isMetaZNodePath | /** Returns True is the fully qualified path is for meta location */
public boolean isMetaZNodePath(String path) {
int prefixLen = baseZNode.length() + 1;
return path.length() > prefixLen && isMetaZNodePrefix(path.substring(prefixLen));
} | 3.68 |
hadoop_BlockBlobAppendStream_flush | /**
* Flushes this output stream and forces any buffered output bytes to be
* written out. If any data remains in the payload it is committed to the
* service. Data is queued for writing and forced out to the service
* before the call returns.
*/
@Override
public void flush() throws IOException {
if (closed) {
// calling close() after the stream is closed starts with call to flush()
return;
}
addBlockUploadCommand();
if (committedBlobLength.get() < blobLength) {
try {
// wait until the block list is committed
addFlushCommand().await();
} catch (InterruptedException ie) {
Thread.currentThread().interrupt();
}
}
} | 3.68 |
framework_RowVisibilityChangeEvent_dispatch | /*
* (non-Javadoc)
*
* @see
* com.google.gwt.event.shared.GwtEvent#dispatch(com.google.gwt.event.shared
* .EventHandler)
*/
@Override
protected void dispatch(RowVisibilityChangeHandler handler) {
handler.onRowVisibilityChange(this);
} | 3.68 |
flink_Tuple7_equals | /**
* Deep equality for tuples by calling equals() on the tuple members.
*
* @param o the object checked for equality
* @return true if this is equal to o.
*/
@Override
public boolean equals(Object o) {
if (this == o) {
return true;
}
if (!(o instanceof Tuple7)) {
return false;
}
@SuppressWarnings("rawtypes")
Tuple7 tuple = (Tuple7) o;
if (f0 != null ? !f0.equals(tuple.f0) : tuple.f0 != null) {
return false;
}
if (f1 != null ? !f1.equals(tuple.f1) : tuple.f1 != null) {
return false;
}
if (f2 != null ? !f2.equals(tuple.f2) : tuple.f2 != null) {
return false;
}
if (f3 != null ? !f3.equals(tuple.f3) : tuple.f3 != null) {
return false;
}
if (f4 != null ? !f4.equals(tuple.f4) : tuple.f4 != null) {
return false;
}
if (f5 != null ? !f5.equals(tuple.f5) : tuple.f5 != null) {
return false;
}
if (f6 != null ? !f6.equals(tuple.f6) : tuple.f6 != null) {
return false;
}
return true;
} | 3.68 |
flink_Time_hours | /** Creates a new {@link Time} that represents the given number of hours. */
public static Time hours(long hours) {
return of(hours, TimeUnit.HOURS);
} | 3.68 |
hadoop_ServiceLauncher_setService | /**
* Setter is to give subclasses the ability to manipulate the service.
* @param s the new service
*/
protected void setService(S s) {
this.service = s;
} | 3.68 |
hbase_MasterObserver_postListTablesInRSGroup | /**
* Called after listing all tables in the region server group.
* @param ctx the environment to interact with the framework and master
* @param groupName name of the region server group
*/
default void postListTablesInRSGroup(final ObserverContext<MasterCoprocessorEnvironment> ctx,
final String groupName) throws IOException {
} | 3.68 |
dubbo_TriHttp2RemoteFlowController_channelHandlerContext | /**
* {@inheritDoc}
* <p>
* Any queued {@link FlowControlled} objects will be sent.
*/
@Override
public void channelHandlerContext(ChannelHandlerContext ctx) throws Http2Exception {
this.ctx = checkNotNull(ctx, "ctx");
// Writing the pending bytes will not check writability change and instead a writability change notification
// to be provided by an explicit call.
channelWritabilityChanged();
// Don't worry about cleaning up queued frames here if ctx is null. It is expected that all streams will be
// closed and the queue cleanup will occur when the stream state transitions occur.
// If any frames have been queued up, we should send them now that we have a channel context.
if (isChannelWritable()) {
writePendingBytes();
}
} | 3.68 |
framework_CustomizedSystemMessages_setInternalErrorNotificationEnabled | /**
* Enables or disables the notification. If disabled, the set URL (or
* current) is loaded directly.
*
* @param internalErrorNotificationEnabled
* true = enabled, false = disabled
*/
public void setInternalErrorNotificationEnabled(
boolean internalErrorNotificationEnabled) {
this.internalErrorNotificationEnabled = internalErrorNotificationEnabled;
} | 3.68 |
framework_DefaultFieldGroupFieldFactory_anyField | /**
* @since 7.4
* @param fieldType
* the type of the field
* @return true if any AbstractField can be assigned to the field
*/
protected boolean anyField(Class<?> fieldType) {
return fieldType == Field.class || fieldType == AbstractField.class;
} | 3.68 |
hmily_HmilyRepositoryStorage_updateHmilyParticipantStatus | /**
* Update hmily participant status.
*
* @param hmilyParticipant the hmily participant
*/
public static void updateHmilyParticipantStatus(final HmilyParticipant hmilyParticipant) {
if (Objects.nonNull(hmilyParticipant)) {
PUBLISHER.publishEvent(hmilyParticipant, EventTypeEnum.UPDATE_HMILY_PARTICIPANT_STATUS.getCode());
}
} | 3.68 |
flink_AbstractParameterTool_getUnrequestedParameters | /**
* Returns the set of parameter names which have not been requested with {@link #has(String)} or
* one of the {@code get} methods. Access to the map returned by {@link #toMap()} is not
* tracked.
*/
@PublicEvolving
public Set<String> getUnrequestedParameters() {
return Collections.unmodifiableSet(unrequestedParameters);
} | 3.68 |
flink_SourceTestSuiteBase_checkSourceMetrics | /** Compare the metrics. */
private boolean checkSourceMetrics(
MetricQuerier queryRestClient,
TestEnvironment testEnv,
JobID jobId,
String sourceName,
long allRecordSize)
throws Exception {
Double sumNumRecordsIn =
queryRestClient.getAggregatedMetricsByRestAPI(
testEnv.getRestEndpoint(),
jobId,
sourceName,
MetricNames.IO_NUM_RECORDS_IN,
null);
return Precision.equals(allRecordSize, sumNumRecordsIn);
} | 3.68 |
graphhopper_InstructionsOutgoingEdges_getAllowedTurns | /**
* This method calculates the number of allowed outgoing edges, which could be considered the number of possible
* roads one might take at the intersection. This excludes the road you are coming from and inaccessible roads.
*/
public int getAllowedTurns() {
return 1 + allowedAlternativeTurns.size();
} | 3.68 |
hadoop_FileBasedCopyListing_getNumberOfPaths | /** {@inheritDoc} */
@Override
protected long getNumberOfPaths() {
return globbedListing.getNumberOfPaths();
} | 3.68 |
hadoop_S3ListResult_v2 | /**
* Restricted constructors to ensure v1 or v2, not both.
* @param result v2 result
* @return new list result container
*/
public static S3ListResult v2(ListObjectsV2Response result) {
return new S3ListResult(null, requireNonNull(result));
} | 3.68 |
hbase_TableMapReduceUtil_addDependencyJarsForClasses | /**
* Add the jars containing the given classes to the job's configuration such that JobClient will
* ship them to the cluster and add them to the DistributedCache. N.B. that this method at most
* adds one jar per class given. If there is more than one jar available containing a class with
* the same name as a given class, we don't define which of those jars might be chosen.
* @param conf The Hadoop Configuration to modify
* @param classes will add just those dependencies needed to find the given classes
* @throws IOException if an underlying library call fails.
*/
@InterfaceAudience.Private
public static void addDependencyJarsForClasses(Configuration conf, Class<?>... classes)
throws IOException {
FileSystem localFs = FileSystem.getLocal(conf);
Set<String> jars = new HashSet<>();
// Add jars that are already in the tmpjars variable
jars.addAll(conf.getStringCollection("tmpjars"));
// add jars as we find them to a map of contents jar name so that we can avoid
// creating new jars for classes that have already been packaged.
Map<String, String> packagedClasses = new HashMap<>();
// Add jars containing the specified classes
for (Class<?> clazz : classes) {
if (clazz == null) continue;
Path path = findOrCreateJar(clazz, localFs, packagedClasses);
if (path == null) {
LOG.warn("Could not find jar for class " + clazz + " in order to ship it to the cluster.");
continue;
}
if (!localFs.exists(path)) {
LOG.warn("Could not validate jar file " + path + " for class " + clazz);
continue;
}
jars.add(path.toString());
}
if (jars.isEmpty()) return;
conf.set("tmpjars", StringUtils.arrayToString(jars.toArray(new String[jars.size()])));
} | 3.68 |
hadoop_AbfsStatistic_getStatDescription | /**
* Getter for statistic description.
*
* @return Description of statistic.
*/
public String getStatDescription() {
return statDescription;
} | 3.68 |
graphhopper_GTFSFeed_getShape | /** Get the shape for the given shape ID */
public Shape getShape (String shape_id) {
Shape shape = new Shape(this, shape_id);
return shape.shape_dist_traveled.length > 0 ? shape : null;
} | 3.68 |
graphhopper_TranslationMap_get | /**
* Returns the Translation object for the specified locale and returns null if not found.
*/
public Translation get(String locale) {
locale = locale.replace("-", "_");
Translation tr = translations.get(locale);
if (locale.contains("_") && tr == null)
tr = translations.get(locale.substring(0, 2));
return tr;
} | 3.68 |
zxing_MinimalEncoder_getB256Size | // does not count beyond 250
int getB256Size() {
int cnt = 0;
Edge current = this;
while (current != null && current.mode == Mode.B256 && cnt <= 250) {
cnt++;
current = current.previous;
}
return cnt;
} | 3.68 |
flink_CalciteParser_parseIdentifier | /**
* Parses a SQL string as an identifier into a {@link SqlIdentifier}.
*
* @param identifier a sql string to parse as an identifier
* @return a parsed sql node
* @throws SqlParserException if an exception is thrown when parsing the identifier
*/
public SqlIdentifier parseIdentifier(String identifier) throws SqlParserException {
try {
SqlAbstractParserImpl flinkParser = createFlinkParser(identifier);
if (flinkParser instanceof FlinkSqlParserImpl) {
return ((FlinkSqlParserImpl) flinkParser).TableApiIdentifier();
} else {
throw new IllegalArgumentException(
"Unrecognized sql parser type " + flinkParser.getClass().getName());
}
} catch (Exception e) {
throw new SqlParserException(
String.format("Invalid SQL identifier %s.", identifier), e);
}
} | 3.68 |
hadoop_ResourceEstimatorService_getHistoryResourceSkyline | /**
* Get history {@link ResourceSkyline} from {@link SkylineStore}. This
* function supports the following special wildcard operations regarding
* {@link RecurrenceId}: If the {@code pipelineId} is "*", it will return all
* entries in the store; else, if the {@code runId} is "*", it will return all
* {@link ResourceSkyline}s belonging to the {@code pipelineId}; else, it will
* return all {@link ResourceSkyline}s belonging to the {{@code pipelineId},
* {@code runId}}. If the {@link RecurrenceId} does not exist, it will not do
* anything.
*
* @param pipelineId pipelineId of the history run.
* @param runId runId of the history run.
* @return Json format of history {@link ResourceSkyline}s.
* @throws SkylineStoreException if fails to getHistory
* {@link ResourceSkyline} from {@link SkylineStore}.
*/
@GET @Path("/skylinestore/history/{pipelineId}/{runId}")
@Produces(MediaType.APPLICATION_JSON)
public String getHistoryResourceSkyline(
@PathParam("pipelineId") String pipelineId,
@PathParam("runId") String runId) throws SkylineStoreException {
RecurrenceId recurrenceId = new RecurrenceId(pipelineId, runId);
Map<RecurrenceId, List<ResourceSkyline>> jobHistory =
skylineStore.getHistory(recurrenceId);
final String skyline = gson.toJson(jobHistory, skylineStoreType);
LOGGER
.debug("Query the skyline store for recurrenceId: {}." + recurrenceId);
return skyline;
} | 3.68 |
rocketmq-connect_AbstractConnectController_deleteConnectorConfig | /**
* Remove the connector with the specified connector name in the cluster.
*
* @param connectorName
*/
public void deleteConnectorConfig(String connectorName) {
configManagementService.deleteConnectorConfig(connectorName);
} | 3.68 |
flink_OperatingSystem_isWindows | /**
* Checks whether the operating system this JVM runs on is Windows.
*
* @return <code>true</code> if the operating system this JVM runs on is Windows, <code>false
* </code> otherwise
*/
public static boolean isWindows() {
return getCurrentOperatingSystem() == WINDOWS;
} | 3.68 |
flink_ResultPartitionType_canBePipelinedConsumed | /** return if this partition's upstream and downstream support scheduling in the same time. */
public boolean canBePipelinedConsumed() {
return consumingConstraint == ConsumingConstraint.CAN_BE_PIPELINED
|| consumingConstraint == ConsumingConstraint.MUST_BE_PIPELINED;
} | 3.68 |
zxing_BitSource_getBitOffset | /**
* @return index of next bit in current byte which would be read by the next call to {@link #readBits(int)}.
*/
public int getBitOffset() {
return bitOffset;
} | 3.68 |
hbase_CommonFSUtils_getRegionDir | /**
* Returns the {@link org.apache.hadoop.fs.Path} object representing the region directory under
* path rootdir
* @param rootdir qualified path of HBase root directory
* @param tableName name of table
* @param regionName The encoded region name
* @return {@link org.apache.hadoop.fs.Path} for region
*/
public static Path getRegionDir(Path rootdir, TableName tableName, String regionName) {
return new Path(getTableDir(rootdir, tableName), regionName);
} | 3.68 |
flink_BloomFilter_mergeSerializedBloomFilters | /** Merge the bf2 bytes to bf1. After merge completes, the contents of bf1 will be changed. */
private static byte[] mergeSerializedBloomFilters(
byte[] bf1Bytes,
int bf1Start,
int bf1Length,
byte[] bf2Bytes,
int bf2Start,
int bf2Length) {
if (bf1Length != bf2Length) {
throw new IllegalArgumentException(
String.format(
"bf1Length %s does not match bf2Length %s when merging",
bf1Length, bf2Length));
}
// Validation on hash functions
if (UNSAFE.getByte(bf1Bytes, BYTE_ARRAY_BASE_OFFSET + bf1Start)
!= UNSAFE.getByte(bf2Bytes, BYTE_ARRAY_BASE_OFFSET + bf2Start)) {
throw new IllegalArgumentException(
"bf1 numHashFunctions does not match bf2 when merging");
}
for (int idx = 8 + BYTE_ARRAY_BASE_OFFSET;
idx < bf1Length + BYTE_ARRAY_BASE_OFFSET;
idx += 1) {
byte l1 = UNSAFE.getByte(bf1Bytes, bf1Start + idx);
byte l2 = UNSAFE.getByte(bf2Bytes, bf2Start + idx);
UNSAFE.putByte(bf1Bytes, bf1Start + idx, (byte) (l1 | l2));
}
return bf1Bytes;
} | 3.68 |
hbase_StoreUtils_hasReferences | /**
* Determines whether any files in the collection are references.
* @param files The files.
*/
public static boolean hasReferences(Collection<HStoreFile> files) {
// TODO: make sure that we won't pass null here in the future.
return files != null && files.stream().anyMatch(HStoreFile::isReference);
} | 3.68 |
hadoop_FederationUtil_updateMountPointStatus | /**
* Add the number of children for an existing HdfsFileStatus object.
* @param dirStatus HdfsfileStatus object.
* @param children number of children to be added.
* @return HdfsFileStatus with the number of children specified.
*/
public static HdfsFileStatus updateMountPointStatus(HdfsFileStatus dirStatus,
int children) {
// Get flags to set in new FileStatus.
EnumSet<HdfsFileStatus.Flags> flags =
DFSUtil.getFlags(dirStatus.isEncrypted(), dirStatus.isErasureCoded(),
dirStatus.isSnapshotEnabled(), dirStatus.hasAcl());
EnumSet.noneOf(HdfsFileStatus.Flags.class);
return new HdfsFileStatus.Builder().atime(dirStatus.getAccessTime())
.blocksize(dirStatus.getBlockSize()).children(children)
.ecPolicy(dirStatus.getErasureCodingPolicy())
.feInfo(dirStatus.getFileEncryptionInfo()).fileId(dirStatus.getFileId())
.group(dirStatus.getGroup()).isdir(dirStatus.isDir())
.length(dirStatus.getLen()).mtime(dirStatus.getModificationTime())
.owner(dirStatus.getOwner()).path(dirStatus.getLocalNameInBytes())
.perm(dirStatus.getPermission()).replication(dirStatus.getReplication())
.storagePolicy(dirStatus.getStoragePolicy())
.symlink(dirStatus.getSymlinkInBytes()).flags(flags).build();
} | 3.68 |
hadoop_CommitContext_buildThreadPool | /**
* Returns an {@link ExecutorService} for parallel tasks. The number of
* threads in the thread-pool is set by fs.s3a.committer.threads.
* If num-threads is 0, this will raise an exception.
* The threads have a lifespan set by
* {@link InternalCommitterConstants#THREAD_KEEP_ALIVE_TIME}.
* When the thread pool is full, the caller runs
* policy takes over.
* @param numThreads thread count, may be negative.
* @return an {@link ExecutorService} for the number of threads
*/
private ExecutorService buildThreadPool(
int numThreads) {
if (numThreads < 0) {
// a negative number means "multiple of available processors"
numThreads = numThreads * -Runtime.getRuntime().availableProcessors();
}
Preconditions.checkArgument(numThreads > 0,
"Cannot create a thread pool with no threads");
LOG.debug("creating thread pool of size {}", numThreads);
final ThreadFactory factory = new ThreadFactoryBuilder()
.setDaemon(true)
.setNameFormat(THREAD_PREFIX + jobId + "-%d")
.build();
return new HadoopThreadPoolExecutor(numThreads, numThreads,
THREAD_KEEP_ALIVE_TIME,
TimeUnit.SECONDS,
new LinkedBlockingQueue<>(),
factory,
new ThreadPoolExecutor.CallerRunsPolicy());
} | 3.68 |
hadoop_RegistryPathUtils_join | /**
* Join two paths, guaranteeing that there will not be exactly
* one separator between the two, and exactly one at the front
* of the path. There will be no trailing "/" except for the special
* case that this is the root path
* @param base base path
* @param path second path to add
* @return a combined path.
*/
public static String join(String base, String path) {
Preconditions.checkArgument(path != null, "null path");
Preconditions.checkArgument(base != null, "null path");
StringBuilder fullpath = new StringBuilder();
if (!base.startsWith("/")) {
fullpath.append('/');
}
fullpath.append(base);
// guarantee a trailing /
if (!fullpath.toString().endsWith("/")) {
fullpath.append("/");
}
// strip off any at the beginning
if (path.startsWith("/")) {
// path starts with /, so append all other characters -if present
if (path.length() > 1) {
fullpath.append(path.substring(1));
}
} else {
fullpath.append(path);
}
//here there may be a trailing "/"
String finalpath = fullpath.toString();
if (finalpath.endsWith("/") && !"/".equals(finalpath)) {
finalpath = finalpath.substring(0, finalpath.length() - 1);
}
return finalpath;
} | 3.68 |
dubbo_MeshRuleRouter_getDubboRoute | /**
* Match virtual service (by serviceName)
*/
protected DubboRoute getDubboRoute(VirtualServiceRule virtualServiceRule, Invocation invocation) {
String serviceName = invocation.getServiceName();
VirtualServiceSpec spec = virtualServiceRule.getSpec();
List<DubboRoute> dubboRouteList = spec.getDubbo();
if (CollectionUtils.isNotEmpty(dubboRouteList)) {
for (DubboRoute dubboRoute : dubboRouteList) {
List<StringMatch> stringMatchList = dubboRoute.getServices();
if (CollectionUtils.isEmpty(stringMatchList)) {
return dubboRoute;
}
for (StringMatch stringMatch : stringMatchList) {
if (stringMatch.isMatch(serviceName)) {
return dubboRoute;
}
}
}
}
return null;
} | 3.68 |
flink_StreamingJobGraphGenerator_setVertexParallelismsForDynamicGraphIfNecessary | /**
* This method is used to reset or set job vertices' parallelism for dynamic graph:
*
* <p>1. Reset parallelism for job vertices whose parallelism is not configured.
*
* <p>2. Set parallelism and maxParallelism for job vertices in forward group, to ensure the
* parallelism and maxParallelism of vertices in the same forward group to be the same; set the
* parallelism at early stage if possible, to avoid invalid partition reuse.
*/
private void setVertexParallelismsForDynamicGraphIfNecessary() {
// Note that the jobVertices are reverse topological order
final List<JobVertex> topologicalOrderVertices =
IterableUtils.toStream(jobVertices.values()).collect(Collectors.toList());
Collections.reverse(topologicalOrderVertices);
// reset parallelism for job vertices whose parallelism is not configured
jobVertices.forEach(
(startNodeId, jobVertex) -> {
final OperatorChainInfo chainInfo = chainInfos.get(startNodeId);
if (!jobVertex.isParallelismConfigured()
&& streamGraph.isAutoParallelismEnabled()) {
jobVertex.setParallelism(ExecutionConfig.PARALLELISM_DEFAULT);
chainInfo
.getAllChainedNodes()
.forEach(
n ->
n.setParallelism(
ExecutionConfig.PARALLELISM_DEFAULT,
false));
}
});
final Map<JobVertex, Set<JobVertex>> forwardProducersByJobVertex = new HashMap<>();
jobVertices.forEach(
(startNodeId, jobVertex) -> {
Set<JobVertex> forwardConsumers =
chainInfos.get(startNodeId).getTransitiveOutEdges().stream()
.filter(
edge ->
edge.getPartitioner()
instanceof ForwardPartitioner)
.map(StreamEdge::getTargetId)
.map(jobVertices::get)
.collect(Collectors.toSet());
for (JobVertex forwardConsumer : forwardConsumers) {
forwardProducersByJobVertex.compute(
forwardConsumer,
(ignored, producers) -> {
if (producers == null) {
producers = new HashSet<>();
}
producers.add(jobVertex);
return producers;
});
}
});
// compute forward groups
final Map<JobVertexID, ForwardGroup> forwardGroupsByJobVertexId =
ForwardGroupComputeUtil.computeForwardGroups(
topologicalOrderVertices,
jobVertex ->
forwardProducersByJobVertex.getOrDefault(
jobVertex, Collections.emptySet()));
jobVertices.forEach(
(startNodeId, jobVertex) -> {
ForwardGroup forwardGroup = forwardGroupsByJobVertexId.get(jobVertex.getID());
// set parallelism for vertices in forward group
if (forwardGroup != null && forwardGroup.isParallelismDecided()) {
jobVertex.setParallelism(forwardGroup.getParallelism());
jobVertex.setParallelismConfigured(true);
chainInfos
.get(startNodeId)
.getAllChainedNodes()
.forEach(
streamNode ->
streamNode.setParallelism(
forwardGroup.getParallelism(), true));
}
// set max parallelism for vertices in forward group
if (forwardGroup != null && forwardGroup.isMaxParallelismDecided()) {
jobVertex.setMaxParallelism(forwardGroup.getMaxParallelism());
chainInfos
.get(startNodeId)
.getAllChainedNodes()
.forEach(
streamNode ->
streamNode.setMaxParallelism(
forwardGroup.getMaxParallelism()));
}
});
} | 3.68 |
framework_PointerEvent_getHeight | /**
* Gets the height of the contact geometry of the pointer in CSS pixels.
*
* @return height in CSS pixels.
*/
public final int getHeight() {
return getHeight(getNativeEvent());
} | 3.68 |
hadoop_ManifestCommitterSupport_manifestPathForTask | /**
* Get the path in the job attempt dir for a manifest for a task.
* @param manifestDir manifest directory
* @param taskId taskID.
* @return the final path to rename the manifest file to
*/
public static Path manifestPathForTask(Path manifestDir, String taskId) {
return new Path(manifestDir, taskId + MANIFEST_SUFFIX);
} | 3.68 |
hudi_FutureUtils_allOf | /**
* Similar to {@link CompletableFuture#allOf(CompletableFuture[])} with a few important
* differences:
*
* <ol>
* <li>Completes successfully as soon as *all* of the futures complete successfully</li>
* <li>Completes exceptionally as soon as *any* of the futures complete exceptionally</li>
* <li>In case it's completed exceptionally all the other futures not completed yet, will be
* cancelled</li>
* </ol>
*
* @param futures list of {@link CompletableFuture}s
*/
public static <T> CompletableFuture<List<T>> allOf(List<CompletableFuture<T>> futures) {
CompletableFuture<Void> union = CompletableFuture.allOf(futures.toArray(new CompletableFuture[0]));
futures.forEach(future -> {
// NOTE: We add a callback to every future, to cancel all the other not yet completed futures,
// which will be providing for an early termination semantic: whenever any of the futures
// fail other futures will be cancelled and the exception will be returned as a result
future.whenComplete((ignored, throwable) -> {
if (throwable != null) {
futures.forEach(f -> f.cancel(true));
union.completeExceptionally(throwable);
}
});
});
return union.thenApply(aVoid ->
futures.stream()
// NOTE: This join wouldn't block, since all the
// futures are completed at this point.
.map(CompletableFuture::join)
.collect(Collectors.toList()));
} | 3.68 |
hadoop_LpSolver_getJobLen | /**
* Get the job length of recurring pipeline.
*
* @param resourceSkylines the history ResourceSkylines allocated to the
* recurring pipeline.
* @param numJobs number of history runs of the recurring pipeline.
* @return length of (discretized time intervals of) the recurring pipeline.
*/
private int getJobLen(final List<ResourceSkyline> resourceSkylines,
final int numJobs) {
int curLen = 0;
int jobLen = 0;
for (int indexJobI = 0; indexJobI < numJobs; indexJobI++) {
curLen = (int) (resourceSkylines.get(indexJobI).getSkylineList()
.getLatestNonNullTime() - resourceSkylines.get(indexJobI)
.getSkylineList().getEarliestStartTime() + timeInterval - 1)
/ timeInterval; // for round up
if (jobLen < curLen) {
jobLen = curLen;
}
}
return jobLen;
} | 3.68 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.