name
stringlengths 12
178
| code_snippet
stringlengths 8
36.5k
| score
float64 3.26
3.68
|
---|---|---|
framework_AbsoluteLayout_getLeftUnits | /**
* Gets the unit for the 'left' attribute.
*
* @return See {@link Sizeable} UNIT_SYMBOLS for a description of the
* available units.
*/
public Unit getLeftUnits() {
return leftUnits;
} | 3.68 |
framework_AbsoluteLayoutResizeComponents_expandButton | /**
* Create size change button for component
*
* @param component
* Component to controll with button
* @return Created Expand Button
*/
private Button expandButton(Component component) {
Button button = new Button("Change Size",
clickEvent -> resizeComponent(component));
button.setId(component.getId() + "-button");
return button;
} | 3.68 |
querydsl_BeanPath_createMap | /**
* Create a new Map typed path
*
* @param <K>
* @param <V>
* @param <E>
* @param property property name
* @param key key type
* @param value value type
* @param queryType expression type
* @return property path
*/
@SuppressWarnings("unchecked")
protected <K, V, E extends SimpleExpression<? super V>> MapPath<K, V, E> createMap(String property, Class<? super K> key, Class<? super V> value, Class<? super E> queryType) {
return add(new MapPath<K, V, E>(key, value, (Class) queryType, forProperty(property)));
} | 3.68 |
hbase_MasterRpcServices_snapshot | /**
* Triggers an asynchronous attempt to take a snapshot. {@inheritDoc}
*/
@Override
public SnapshotResponse snapshot(RpcController controller, SnapshotRequest request)
throws ServiceException {
try {
server.checkInitialized();
server.snapshotManager.checkSnapshotSupport();
LOG.info(server.getClientIdAuditPrefix() + " snapshot request for:"
+ ClientSnapshotDescriptionUtils.toString(request.getSnapshot()));
// get the snapshot information
SnapshotDescription snapshot =
SnapshotDescriptionUtils.validate(request.getSnapshot(), server.getConfiguration());
// send back the max amount of time the client should wait for the snapshot to complete
long waitTime = SnapshotDescriptionUtils.getMaxMasterTimeout(server.getConfiguration(),
snapshot.getType(), SnapshotDescriptionUtils.DEFAULT_MAX_WAIT_TIME);
SnapshotResponse.Builder builder = SnapshotResponse.newBuilder().setExpectedTimeout(waitTime);
// If there is nonce group and nonce in the snapshot request, then the client can
// handle snapshot procedure procId. And if enable the snapshot procedure, we
// will do the snapshot work with proc-v2, otherwise we will fall back to zk proc.
if (
request.hasNonceGroup() && request.hasNonce()
&& server.snapshotManager.snapshotProcedureEnabled()
) {
long nonceGroup = request.getNonceGroup();
long nonce = request.getNonce();
long procId = server.snapshotManager.takeSnapshot(snapshot, nonceGroup, nonce);
return builder.setProcId(procId).build();
} else {
server.snapshotManager.takeSnapshot(snapshot);
return builder.build();
}
} catch (ForeignException e) {
throw new ServiceException(e.getCause());
} catch (IOException e) {
throw new ServiceException(e);
}
} | 3.68 |
morf_ChangeIndex_getToIndex | /**
* Gets the index after the change
*
* @return the index after the change
*/
public Index getToIndex() {
return toIndex;
} | 3.68 |
hudi_SerializableSchema_readObjectFrom | // create a public read method for unit test
public void readObjectFrom(ObjectInputStream in) throws IOException {
try {
schema = new Schema.Parser().parse(in.readObject().toString());
} catch (ClassNotFoundException e) {
throw new IOException("unable to parse schema", e);
}
} | 3.68 |
hbase_SnapshotRegionProcedure_complete | // keep retrying until success
private void complete(MasterProcedureEnv env, Throwable error) {
if (isFinished()) {
LOG.info("This procedure {} is already finished, skip the rest processes", this.getProcId());
return;
}
if (event == null) {
LOG.warn("procedure event for {} is null, maybe the procedure is created when recovery",
getProcId());
return;
}
if (error == null) {
LOG.info("finish snapshot {} on region {}", snapshot.getName(), region.getEncodedName());
succ = true;
}
event.wake(env.getProcedureScheduler());
event = null;
} | 3.68 |
hibernate-validator_AnnotationApiHelper_isInterface | /**
* Test if the given {@link TypeMirror} represents an interface or not.
*/
public boolean isInterface(TypeMirror typeMirror) {
return TypeKind.DECLARED.equals( typeMirror.getKind() ) && ( (DeclaredType) typeMirror ).asElement().getKind().isInterface();
} | 3.68 |
hudi_BaseHoodieDateTimeParser_getOutputDateFormat | /**
* Returns the output date format in which the partition paths will be created for the hudi dataset.
*/
public String getOutputDateFormat() {
return getStringWithAltKeys(config, TIMESTAMP_OUTPUT_DATE_FORMAT);
} | 3.68 |
flink_CrossOperator_projectTuple24 | /**
* Projects a pair of crossed elements to a {@link Tuple} with the previously selected
* fields.
*
* @return The projected data set.
* @see Tuple
* @see DataSet
*/
public <
T0,
T1,
T2,
T3,
T4,
T5,
T6,
T7,
T8,
T9,
T10,
T11,
T12,
T13,
T14,
T15,
T16,
T17,
T18,
T19,
T20,
T21,
T22,
T23>
ProjectCross<
I1,
I2,
Tuple24<
T0,
T1,
T2,
T3,
T4,
T5,
T6,
T7,
T8,
T9,
T10,
T11,
T12,
T13,
T14,
T15,
T16,
T17,
T18,
T19,
T20,
T21,
T22,
T23>>
projectTuple24() {
TypeInformation<?>[] fTypes = extractFieldTypes(fieldIndexes);
TupleTypeInfo<
Tuple24<
T0,
T1,
T2,
T3,
T4,
T5,
T6,
T7,
T8,
T9,
T10,
T11,
T12,
T13,
T14,
T15,
T16,
T17,
T18,
T19,
T20,
T21,
T22,
T23>>
tType =
new TupleTypeInfo<
Tuple24<
T0,
T1,
T2,
T3,
T4,
T5,
T6,
T7,
T8,
T9,
T10,
T11,
T12,
T13,
T14,
T15,
T16,
T17,
T18,
T19,
T20,
T21,
T22,
T23>>(fTypes);
return new ProjectCross<
I1,
I2,
Tuple24<
T0,
T1,
T2,
T3,
T4,
T5,
T6,
T7,
T8,
T9,
T10,
T11,
T12,
T13,
T14,
T15,
T16,
T17,
T18,
T19,
T20,
T21,
T22,
T23>>(
this.ds1, this.ds2, this.fieldIndexes, this.isFieldInFirst, tType, this, hint);
} | 3.68 |
hbase_MiniZooKeeperCluster_waitForServerUp | // XXX: From o.a.zk.t.ClientBase. Its in the test jar but we don't depend on zk test jar.
// We remove the SSL/secure bit. Not used in here.
private static boolean waitForServerUp(int port, long timeout) throws IOException {
long start = EnvironmentEdgeManager.currentTime();
while (true) {
try {
String result = send4LetterWord(HOST, port, "stat", false, (int) timeout);
if (result.startsWith("Zookeeper version:") && !result.contains("READ-ONLY")) {
return true;
} else {
LOG.debug("Read {}", result);
}
} catch (ConnectException e) {
// ignore as this is expected, do not log stacktrace
LOG.info("{}:{} not up: {}", HOST, port, e.toString());
} catch (IOException | X509Exception.SSLContextException e) {
// ignore as this is expected
LOG.info("{}:{} not up", HOST, port, e);
}
if (EnvironmentEdgeManager.currentTime() > start + timeout) {
break;
}
try {
Thread.sleep(TIMEOUT);
} catch (InterruptedException e) {
throw (InterruptedIOException) new InterruptedIOException().initCause(e);
}
}
return false;
} | 3.68 |
framework_GridSelectionAllowedEvent_isSelectionAllowed | /**
* Gets selection allowed value.
*
* @return {@code true} if selection is allowed, {@code false} otherwise
*/
public boolean isSelectionAllowed() {
return isSelectionAllowed;
} | 3.68 |
morf_ExecuteStatement_apply | /**
* {@inheritDoc}
*
* @see org.alfasoftware.morf.upgrade.SchemaChange#apply(org.alfasoftware.morf.metadata.Schema)
*/
@Override
public Schema apply(Schema schema) {
return schema;
} | 3.68 |
flink_ProjectOperator_projectTuple16 | /**
* Projects a {@link Tuple} {@link DataSet} to the previously selected fields.
*
* @return The projected DataSet.
* @see Tuple
* @see DataSet
*/
public <T0, T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14, T15>
ProjectOperator<
T,
Tuple16<
T0,
T1,
T2,
T3,
T4,
T5,
T6,
T7,
T8,
T9,
T10,
T11,
T12,
T13,
T14,
T15>>
projectTuple16() {
TypeInformation<?>[] fTypes = extractFieldTypes(fieldIndexes, ds.getType());
TupleTypeInfo<
Tuple16<
T0,
T1,
T2,
T3,
T4,
T5,
T6,
T7,
T8,
T9,
T10,
T11,
T12,
T13,
T14,
T15>>
tType =
new TupleTypeInfo<
Tuple16<
T0,
T1,
T2,
T3,
T4,
T5,
T6,
T7,
T8,
T9,
T10,
T11,
T12,
T13,
T14,
T15>>(fTypes);
return new ProjectOperator<
T,
Tuple16<T0, T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14, T15>>(
this.ds, this.fieldIndexes, tType);
} | 3.68 |
hadoop_BCFile_getBlockIndexNear | /**
* Find the smallest Block index whose starting offset is greater than or
* equal to the specified offset.
*
* @param offset
* User-specific offset.
* @return the index to the data Block if such block exists; or -1
* otherwise.
*/
public int getBlockIndexNear(long offset) {
ArrayList<BlockRegion> list = dataIndex.getBlockRegionList();
int idx =
Utils
.lowerBound(list, new ScalarLong(offset), new ScalarComparator());
if (idx == list.size()) {
return -1;
}
return idx;
} | 3.68 |
hadoop_GangliaConf_getUnits | /**
* @return the units
*/
String getUnits() {
return units;
} | 3.68 |
flink_IntParser_parseField | /**
* Static utility to parse a field of type int from a byte sequence that represents text
* characters (such as when read from a file stream).
*
* @param bytes The bytes containing the text data that should be parsed.
* @param startPos The offset to start the parsing.
* @param length The length of the byte sequence (counting from the offset).
* @param delimiter The delimiter that terminates the field.
* @return The parsed value.
* @throws NumberFormatException Thrown when the value cannot be parsed because the text
* represents not a correct number.
*/
public static final int parseField(byte[] bytes, int startPos, int length, char delimiter) {
long val = 0;
boolean neg = false;
if (bytes[startPos] == delimiter) {
throw new NumberFormatException("Empty field.");
}
if (bytes[startPos] == '-') {
neg = true;
startPos++;
length--;
if (length == 0 || bytes[startPos] == delimiter) {
throw new NumberFormatException("Orphaned minus sign.");
}
}
for (; length > 0; startPos++, length--) {
if (bytes[startPos] == delimiter) {
return (int) (neg ? -val : val);
}
if (bytes[startPos] < 48 || bytes[startPos] > 57) {
throw new NumberFormatException("Invalid character.");
}
val *= 10;
val += bytes[startPos] - 48;
if (val > OVERFLOW_BOUND && (!neg || val > UNDERFLOW_BOUND)) {
throw new NumberFormatException("Value overflow/underflow");
}
}
return (int) (neg ? -val : val);
} | 3.68 |
hadoop_BaseRecord_like | /**
* Check if this record matches a partial record.
*
* @param other Partial record.
* @return If this record matches.
*/
public boolean like(BaseRecord other) {
if (other == null) {
return false;
}
Map<String, String> thisKeys = this.getPrimaryKeys();
Map<String, String> otherKeys = other.getPrimaryKeys();
if (thisKeys == null) {
return otherKeys == null;
}
return thisKeys.equals(otherKeys);
} | 3.68 |
streampipes_Formats_thriftFormat | /**
* Defines the transport format Apache Thrift used by a data stream at runtime.
*
* @return The {@link org.apache.streampipes.model.grounding.TransportFormat} of type Thrift.
*/
public static TransportFormat thriftFormat() {
return new TransportFormat(MessageFormat.THRIFT);
} | 3.68 |
framework_VAbsoluteLayout_setStylePrimaryName | /*
* (non-Javadoc)
*
* @see
* com.google.gwt.user.client.ui.UIObject#setStylePrimaryName(java.lang.
* String)
*/
@Override
public void setStylePrimaryName(String style) {
updateStylenames(style);
} | 3.68 |
framework_VAbstractCalendarPanel_focusPreviousDay | /**
* Moves the focus backward the given number of days.
*/
private void focusPreviousDay(int days) {
focusNextDay(-days);
} | 3.68 |
querydsl_Expressions_set | /**
* Combine the given expressions into a set expression
*
* @param exprs list elements
* @return list expression
*/
public static Expression<Tuple> set(Expression<?>... exprs) {
return set(Tuple.class, exprs);
} | 3.68 |
hadoop_FilterFileSystem_getWorkingDirectory | /**
* Get the current working directory for the given file system
*
* @return the directory pathname
*/
@Override
public Path getWorkingDirectory() {
return fs.getWorkingDirectory();
} | 3.68 |
rocketmq-connect_ExpressionBuilder_quote | /**
* Get a {@link Transform} that will surround the inputs with quotes.
*
* @return the transform; never null
*/
public static Transform<String> quote() {
return (builder, input) -> builder.appendColumnName(input);
}
/**
* Get a {@link Transform} that will quote just the column names.
*
* @return the transform; never null
*/
// public static Transform<ColumnId> columnNames() {
// return (builder, input) -> builder.appendColumnName(input.name());
// }
/**
* Get a {@link Transform} that will quote just the column names and append the given string.
*
* @param appended the string to append after the quoted column names
* @return the transform; never null
*/
// public static Transform<ColumnId> columnNamesWith(final String appended) {
// return (builder, input) -> {
// builder.appendColumnName(input.name());
// builder.append(appended);
// };
// }
/**
* Get a {@link Transform} that will append a placeholder rather than each of the column names.
*
* @param str the string to output instead the each column name
* @return the transform; never null
*/
// public static Transform<ColumnId> placeholderInsteadOfColumnNames(final String str) {
// return (builder, input) -> builder.append(str);
// }
/**
* Get a {@link Transform} that will append the prefix and then the quoted column name.
*
* @param prefix the string to output before the quoted column names
* @return the transform; never null
*/
// public static Transform<ColumnId> columnNamesWithPrefix(final String prefix) {
// return (builder, input) -> {
// builder.append(prefix);
// builder.appendColumnName(input.name());
// };
// } | 3.68 |
hadoop_MutableGaugeLong_incr | /**
* Increment by delta
* @param delta of the increment
*/
public void incr(long delta) {
value.addAndGet(delta);
setChanged();
} | 3.68 |
framework_ValueChangeHandler_setValueChangeTimeout | /**
* Sets the value change timeout to use.
*
* @see ValueChangeMode
*
* @param valueChangeTimeout
* the value change timeout
*/
public void setValueChangeTimeout(int valueChangeTimeout) {
this.valueChangeTimeout = valueChangeTimeout;
} | 3.68 |
flink_ExternalResourceUtils_generateExternalResourcesString | /** Generate the string expression of the given external resources. */
public static String generateExternalResourcesString(
Collection<ExternalResource> extendedResources) {
return extendedResources.stream()
.map(resource -> resource.getName() + "=" + resource.getValue())
.collect(Collectors.joining(", "));
} | 3.68 |
hbase_HMaster_getClusterMetrics | /** Returns cluster status */
public ClusterMetrics getClusterMetrics() throws IOException {
return getClusterMetrics(EnumSet.allOf(Option.class));
} | 3.68 |
flink_FileSystemSafetyNet_initializeSafetyNetForThread | /**
* Activates the safety net for a thread. {@link FileSystem} instances obtained by the thread
* that called this method will be guarded, meaning that their created streams are tracked and
* can be closed via the safety net closing hook.
*
* <p>This method should be called at the beginning of a thread that should be guarded.
*
* @throws IllegalStateException Thrown, if a safety net was already registered for the thread.
*/
@Internal
public static void initializeSafetyNetForThread() {
SafetyNetCloseableRegistry oldRegistry = REGISTRIES.get();
checkState(
null == oldRegistry,
"Found an existing FileSystem safety net for this thread: %s "
+ "This may indicate an accidental repeated initialization, or a leak of the"
+ "(Inheritable)ThreadLocal through a ThreadPool.",
oldRegistry);
SafetyNetCloseableRegistry newRegistry = new SafetyNetCloseableRegistry();
REGISTRIES.set(newRegistry);
} | 3.68 |
hbase_PrivateCellUtil_compareKeyIgnoresMvcc | /**
* Compares only the key portion of a cell. It does not include the sequence id/mvcc of the cell
* @return an int greater than 0 if left > than right lesser than 0 if left < than right
* equal to 0 if left is equal to right
*/
public static final int compareKeyIgnoresMvcc(CellComparator comparator, Cell left, Cell right) {
return ((CellComparatorImpl) comparator).compare(left, right, true);
} | 3.68 |
zxing_AddressBookParsedResult_getPhoneTypes | /**
* @return optional descriptions of the type of each phone number. It could be like "HOME", but,
* there is no guaranteed or standard format.
*/
public String[] getPhoneTypes() {
return phoneTypes;
} | 3.68 |
hbase_SyncTable_nextCellInRow | /**
* Returns the next Cell in the current row or null iff none remain.
*/
public Cell nextCellInRow() {
if (currentRowResult == null) {
// nothing left in current row
return null;
}
Cell nextCell = currentRowResult.rawCells()[nextCellInRow];
nextCellInRow++;
if (nextCellInRow == currentRowResult.size()) {
if (results.hasNext()) {
Result result = results.next();
Cell cell = result.rawCells()[0];
if (
Bytes.equals(currentRow, 0, currentRow.length, cell.getRowArray(),
cell.getRowOffset(), cell.getRowLength())
) {
// result is part of current row
currentRowResult = result;
nextCellInRow = 0;
} else {
// result is part of next row, cache it
nextRowResult = result;
// current row is complete
currentRowResult = null;
}
} else {
// end of data
currentRowResult = null;
}
}
return nextCell;
} | 3.68 |
framework_HasHierarchicalDataProvider_setItems | /**
* Sets the data items of this listing.
* <p>
* The provided items are wrapped into a {@link TreeDataProvider} backed by
* a flat {@link TreeData} structure. The data provider instance is used as
* a parameter for the {@link #setDataProvider(DataProvider)} method. It
* means that the items collection can be accessed later on via
* {@link #getTreeData()}:
*
* <pre>
* <code>
* TreeGrid<String> treeGrid = new TreeGrid<>();
* treeGrid.setItems("a","b");
* ...
*
* TreeData<String> data = treeGrid.getTreeData();
* </code>
* </pre>
* <p>
* The returned {@link TreeData} instance may be used as-is to add, remove
* or modify items in the hierarchy. These modifications to the object are
* not automatically reflected back to the TreeGrid. Items modified should
* be refreshed with {@link HierarchicalDataProvider#refreshItem(Object)}
* and when adding or removing items
* {@link HierarchicalDataProvider#refreshAll()} should be called.
*
* @param items
* the data items to display, not {@code null}
*/
@Override
public default void setItems(@SuppressWarnings("unchecked") T... items) {
Objects.requireNonNull(items, "Given items may not be null");
setItems(Arrays.asList(items));
} | 3.68 |
hadoop_BlockData_getStateString | // Debug helper.
public String getStateString() {
StringBuilder sb = new StringBuilder();
int blockNumber = 0;
while (blockNumber < numBlocks) {
State tstate = getState(blockNumber);
int endBlockNumber = blockNumber;
while ((endBlockNumber < numBlocks) && (getState(endBlockNumber)
== tstate)) {
endBlockNumber++;
}
sb.append(
String.format("[%03d ~ %03d] %s%n", blockNumber, endBlockNumber - 1,
tstate));
blockNumber = endBlockNumber;
}
return sb.toString();
} | 3.68 |
hbase_FileCleanerDelegate_postClean | /**
* Will be called after cleaner run.
*/
default void postClean() {
} | 3.68 |
hadoop_OSSListRequest_v1 | /**
* Restricted constructors to ensure v1 or v2, not both.
* @param request v1 request
* @return new list request container
*/
public static OSSListRequest v1(ListObjectsRequest request) {
return new OSSListRequest(request, null);
} | 3.68 |
hbase_VersionModel_getServerVersion | /** Returns the servlet container version */
@XmlAttribute(name = "Server")
public String getServerVersion() {
return serverVersion;
} | 3.68 |
hbase_Addressing_inetSocketAddress2String | /**
* Given an InetSocketAddress object returns a String represent of it. This is a util method for
* Java 17. The toString() function of InetSocketAddress will flag the unresolved address with a
* substring in the string, which will result in unexpected problem. We should use this util
* function to get the string when we not sure whether the input address is resolved or not.
* @param address address to convert to a "host:port" String.
* @return the String represent of the given address, like "foo:1234".
*/
public static String inetSocketAddress2String(InetSocketAddress address) {
return address.isUnresolved()
? address.toString().replace("/<unresolved>", "")
: address.toString();
} | 3.68 |
hmily_HmilyTimer_addRemovalListener | /**
* 增加一个缓存移除的监听器.
*
* @param listener 监听器;
*/
public void addRemovalListener(final TimerRemovalListener<V> listener) {
this.timerRemovalListener = listener;
} | 3.68 |
flink_HiveParserCalcitePlanner_genLogicalPlan | // Given an AST, generate and return the RelNode plan. Returns null if nothing needs to be done.
public RelNode genLogicalPlan(HiveParserASTNode ast) throws SemanticException {
LOG.info("Starting generating logical plan");
HiveParserPreCboCtx cboCtx = new HiveParserPreCboCtx();
// change the location of position alias process here
processPositionAlias(ast, semanticAnalyzer.getConf());
if (!semanticAnalyzer.genResolvedParseTree(ast, cboCtx)) {
return null;
}
// flink requires orderBy removed from sub-queries, otherwise it can fail to generate the
// plan
for (String alias : semanticAnalyzer.getQB().getSubqAliases()) {
removeOBInSubQuery(semanticAnalyzer.getQB().getSubqForAlias(alias));
}
HiveParserASTNode queryForCbo = ast;
if (cboCtx.type == HiveParserPreCboCtx.Type.CTAS
|| cboCtx.type == HiveParserPreCboCtx.Type.VIEW) {
queryForCbo = cboCtx.nodeOfInterest; // nodeOfInterest is the query
}
verifyCanHandleAst(queryForCbo, getQB(), semanticAnalyzer.getQueryProperties());
semanticAnalyzer.disableJoinMerge = true;
return logicalPlan();
} | 3.68 |
framework_VCaption_getOwner | /**
* Returns Paintable for which this Caption belongs to.
*
* @return owner Widget
*/
public ComponentConnector getOwner() {
return owner;
} | 3.68 |
flink_ArrowUtils_createRowDataArrowWriter | /** Creates an {@link ArrowWriter} for the specified {@link VectorSchemaRoot}. */
public static ArrowWriter<RowData> createRowDataArrowWriter(
VectorSchemaRoot root, RowType rowType) {
ArrowFieldWriter<RowData>[] fieldWriters =
new ArrowFieldWriter[root.getFieldVectors().size()];
List<FieldVector> vectors = root.getFieldVectors();
for (int i = 0; i < vectors.size(); i++) {
FieldVector vector = vectors.get(i);
vector.allocateNew();
fieldWriters[i] = createArrowFieldWriterForRow(vector, rowType.getTypeAt(i));
}
return new ArrowWriter<>(root, fieldWriters);
} | 3.68 |
framework_StringToShortConverter_getFormat | /**
* Returns the format used by
* {@link #convertToPresentation(Short, Class, Locale)} and
* {@link #convertToModel(String, Class, Locale)}.
*
* @param locale
* The locale to use
* @return A NumberFormat instance
*/
@Override
protected NumberFormat getFormat(Locale locale) {
if (locale == null) {
locale = Locale.getDefault();
}
return NumberFormat.getIntegerInstance(locale);
} | 3.68 |
hbase_MasterWalManager_getFailedServersFromLogFolders | /**
* Inspect the log directory to find dead servers which need recovery work
* @return A set of ServerNames which aren't running but still have WAL files left in file system
* @deprecated With proc-v2, we can record the crash server with procedure store, so do not need
* to scan the wal directory to find out the splitting wal directory any more. Leave
* it here only because {@code RecoverMetaProcedure}(which is also deprecated) uses
* it.
*/
@Deprecated
public Set<ServerName> getFailedServersFromLogFolders() throws IOException {
boolean retrySplitting =
!conf.getBoolean(WALSplitter.SPLIT_SKIP_ERRORS_KEY, WALSplitter.SPLIT_SKIP_ERRORS_DEFAULT);
Set<ServerName> serverNames = new HashSet<>();
Path logsDirPath = new Path(CommonFSUtils.getWALRootDir(conf), HConstants.HREGION_LOGDIR_NAME);
do {
if (services.isStopped()) {
LOG.warn("Master stopped while trying to get failed servers.");
break;
}
try {
if (!this.fs.exists(logsDirPath)) return serverNames;
FileStatus[] logFolders = CommonFSUtils.listStatus(this.fs, logsDirPath, null);
// Get online servers after getting log folders to avoid log folder deletion of newly
// checked in region servers . see HBASE-5916
Set<ServerName> onlineServers = services.getServerManager().getOnlineServers().keySet();
if (logFolders == null || logFolders.length == 0) {
LOG.debug("No log files to split, proceeding...");
return serverNames;
}
for (FileStatus status : logFolders) {
FileStatus[] curLogFiles = CommonFSUtils.listStatus(this.fs, status.getPath(), null);
if (curLogFiles == null || curLogFiles.length == 0) {
// Empty log folder. No recovery needed
continue;
}
final ServerName serverName =
AbstractFSWALProvider.getServerNameFromWALDirectoryName(status.getPath());
if (null == serverName) {
LOG.warn("Log folder " + status.getPath() + " doesn't look like its name includes a "
+ "region server name; leaving in place. If you see later errors about missing "
+ "write ahead logs they may be saved in this location.");
} else if (!onlineServers.contains(serverName)) {
LOG.info("Log folder " + status.getPath() + " doesn't belong "
+ "to a known region server, splitting");
serverNames.add(serverName);
} else {
LOG.info("Log folder " + status.getPath() + " belongs to an existing region server");
}
}
retrySplitting = false;
} catch (IOException ioe) {
LOG.warn("Failed getting failed servers to be recovered.", ioe);
if (!checkFileSystem()) {
LOG.warn("Bad Filesystem, exiting");
Runtime.getRuntime().halt(1);
}
try {
if (retrySplitting) {
Thread.sleep(conf.getInt("hbase.hlog.split.failure.retry.interval", 30 * 1000));
}
} catch (InterruptedException e) {
LOG.warn("Interrupted, aborting since cannot return w/o splitting");
Thread.currentThread().interrupt();
retrySplitting = false;
Runtime.getRuntime().halt(1);
}
}
} while (retrySplitting);
return serverNames;
} | 3.68 |
flink_StreamProjection_projectTuple9 | /**
* Projects a {@link Tuple} {@link DataStream} to the previously selected fields.
*
* @return The projected DataStream.
* @see Tuple
* @see DataStream
*/
public <T0, T1, T2, T3, T4, T5, T6, T7, T8>
SingleOutputStreamOperator<Tuple9<T0, T1, T2, T3, T4, T5, T6, T7, T8>> projectTuple9() {
TypeInformation<?>[] fTypes = extractFieldTypes(fieldIndexes, dataStream.getType());
TupleTypeInfo<Tuple9<T0, T1, T2, T3, T4, T5, T6, T7, T8>> tType =
new TupleTypeInfo<Tuple9<T0, T1, T2, T3, T4, T5, T6, T7, T8>>(fTypes);
return dataStream.transform(
"Projection",
tType,
new StreamProject<IN, Tuple9<T0, T1, T2, T3, T4, T5, T6, T7, T8>>(
fieldIndexes, tType.createSerializer(dataStream.getExecutionConfig())));
} | 3.68 |
framework_VLoadingIndicator_getElement | /**
* Returns the root element of the loading indicator.
*
* @return The loading indicator DOM element
*/
public com.google.gwt.user.client.Element getElement() {
if (element == null) {
element = DOM.createDiv();
element.getStyle().setPosition(Position.ABSOLUTE);
getConnection().getUIConnector().getWidget().getElement()
.appendChild(element);
}
return DOM.asOld(element);
} | 3.68 |
hadoop_AzureNativeFileSystemStore_trim | /**
* Trims a suffix/prefix from the given string. For example if
* s is given as "/xy" and toTrim is "/", this method returns "xy"
*/
private static String trim(String s, String toTrim) {
return StringUtils.removeEnd(StringUtils.removeStart(s, toTrim),
toTrim);
} | 3.68 |
hbase_BlockType_readAndCheck | /**
* Reads a magic record of the length {@link #MAGIC_LENGTH} from the given byte buffer and expects
* it to match this block type.
*/
public void readAndCheck(ByteBuffer in) throws IOException {
byte[] buf = new byte[MAGIC_LENGTH];
in.get(buf);
if (Bytes.compareTo(buf, magic) != 0) {
throw new IOException("Invalid magic: expected " + Bytes.toStringBinary(magic) + ", got "
+ Bytes.toStringBinary(buf));
}
} | 3.68 |
pulsar_PulsarLedgerIdGenerator_formatHalfId | /**
* Formats half an ID as 10-character 0-padded string.
* @param i - 32 bits of the ID to format
* @return a 10-character 0-padded string.
*/
private String formatHalfId(int i) {
return String.format("%010d", i);
} | 3.68 |
pulsar_BrokerInterceptor_producerCreated | /**
* Called by the broker when a new connection is created.
*/
default void producerCreated(ServerCnx cnx, Producer producer,
Map<String, String> metadata){
} | 3.68 |
hbase_AccessControlFilter_parseFrom | /**
* @param pbBytes A pb serialized {@link AccessControlFilter} instance
* @return An instance of {@link AccessControlFilter} made from <code>bytes</code>
* @throws org.apache.hadoop.hbase.exceptions.DeserializationException
* @see #toByteArray()
*/
public static AccessControlFilter parseFrom(final byte[] pbBytes)
throws DeserializationException {
// no implementation, server-side use only
throw new UnsupportedOperationException(
"Serialization not supported. Intended for server-side use only.");
} | 3.68 |
mutate-test-kata_CompanyFixed_employeeWithLargestSalary | /**
* find the employee with the largest salary
* @return the employee with the largest salary
* @throws NoSuchElementException if there are no employees at the company
*/
public EmployeeFixed employeeWithLargestSalary()
{
return this.employees
.stream()
.max(Comparator.comparing(EmployeeFixed::getSalary))
.orElseThrow(NoSuchElementException::new);
} | 3.68 |
flink_MethodlessRouter_route | /** @return {@code null} if no match */
public RouteResult<T> route(
String uri,
String decodedPath,
Map<String, List<String>> queryParameters,
String[] pathTokens) {
// Optimize: reuse requestPathTokens and pathParams in the loop
Map<String, String> pathParams = new HashMap<>();
for (Entry<PathPattern, T> entry : routes.entrySet()) {
PathPattern pattern = entry.getKey();
if (pattern.match(pathTokens, pathParams)) {
T target = entry.getValue();
return new RouteResult<T>(uri, decodedPath, pathParams, queryParameters, target);
}
// Reset for the next try
pathParams.clear();
}
return null;
} | 3.68 |
hmily_HmilyRepositoryFacade_removeHmilyTransaction | /**
* Remove hmily transaction.
*
* @param transId the trans id
*/
public void removeHmilyTransaction(final Long transId) {
if (hmilyConfig.isPhyDeleted()) {
checkRows(hmilyRepository.removeHmilyTransaction(transId));
} else {
updateHmilyTransactionStatus(transId, HmilyActionEnum.DELETE.getCode());
}
} | 3.68 |
flink_EnvironmentInformation_logEnvironmentInfo | /**
* Logs information about the environment, like code revision, current user, Java version, and
* JVM parameters.
*
* @param log The logger to log the information to.
* @param componentName The component name to mention in the log.
* @param commandLineArgs The arguments accompanying the starting the component.
*/
public static void logEnvironmentInfo(
Logger log, String componentName, String[] commandLineArgs) {
if (log.isInfoEnabled()) {
RevisionInformation rev = getRevisionInformation();
String version = getVersion();
String scalaVersion = getScalaVersion();
String jvmVersion = getJvmVersion();
String[] options = getJvmStartupOptionsArray();
String javaHome = System.getenv("JAVA_HOME");
String inheritedLogs = System.getenv("FLINK_INHERITED_LOGS");
String arch = System.getProperty("os.arch");
long maxHeapMegabytes = getMaxJvmHeapMemory() >>> 20;
if (inheritedLogs != null) {
log.info(
"--------------------------------------------------------------------------------");
log.info(" Preconfiguration: ");
log.info(inheritedLogs);
}
log.info(
"--------------------------------------------------------------------------------");
log.info(
" Starting "
+ componentName
+ " (Version: "
+ version
+ ", Scala: "
+ scalaVersion
+ ", "
+ "Rev:"
+ rev.commitId
+ ", "
+ "Date:"
+ rev.commitDate
+ ")");
log.info(" OS current user: " + System.getProperty("user.name"));
log.info(" Current Hadoop/Kerberos user: " + getHadoopUser());
log.info(" JVM: " + jvmVersion);
log.info(" Arch: " + arch);
log.info(" Maximum heap size: " + maxHeapMegabytes + " MiBytes");
log.info(" JAVA_HOME: " + (javaHome == null ? "(not set)" : javaHome));
String hadoopVersionString = getHadoopVersionString();
if (hadoopVersionString != null) {
log.info(" Hadoop version: " + hadoopVersionString);
} else {
log.info(" No Hadoop Dependency available");
}
if (options.length == 0) {
log.info(" JVM Options: (none)");
} else {
log.info(" JVM Options:");
for (String s : options) {
log.info(" " + s);
}
}
if (commandLineArgs == null || commandLineArgs.length == 0) {
log.info(" Program Arguments: (none)");
} else {
log.info(" Program Arguments:");
for (String s : commandLineArgs) {
if (GlobalConfiguration.isSensitive(s)) {
log.info(
" "
+ GlobalConfiguration.HIDDEN_CONTENT
+ " (sensitive information)");
} else {
log.info(" " + s);
}
}
}
log.info(" Classpath: " + System.getProperty("java.class.path"));
log.info(
"--------------------------------------------------------------------------------");
}
} | 3.68 |
hmily_HmilyRepositoryFacade_removeHmilyParticipant | /**
* Remove hmily participant.
*
* @param participantId the participant id
*/
public void removeHmilyParticipant(final Long participantId) {
if (hmilyConfig.isPhyDeleted()) {
checkRows(hmilyRepository.removeHmilyParticipant(participantId));
} else {
updateHmilyParticipantStatus(participantId, HmilyActionEnum.DELETE.getCode());
}
} | 3.68 |
morf_SchemaUtils_unique | /**
* @see org.alfasoftware.morf.metadata.SchemaUtils.IndexBuilder#unique()
*/
@Override
public IndexBuilder unique() {
return new IndexBuilderImpl(getName(), true, columnNames());
} | 3.68 |
framework_FileDownloader_getFileDownloadResource | /**
* Gets the resource set for download.
*
* @return the resource that will be downloaded if clicking the extended
* component
*/
public Resource getFileDownloadResource() {
return getResource("dl");
} | 3.68 |
hbase_RegionStates_getAssignmentsForBalancer | /**
* This is an EXPENSIVE clone. Cloning though is the safest thing to do. Can't let out original
* since it can change and at least the load balancer wants to iterate this exported list. We need
* to synchronize on regions since all access to this.servers is under a lock on this.regions.
* @return A clone of current open or opening assignments.
*/
public Map<TableName, Map<ServerName, List<RegionInfo>>>
getAssignmentsForBalancer(TableStateManager tableStateManager, List<ServerName> onlineServers) {
final Map<TableName, Map<ServerName, List<RegionInfo>>> result = new HashMap<>();
for (RegionStateNode node : regionsMap.values()) {
// DisableTableProcedure first sets the table state to DISABLED and then force unassigns
// the regions in a loop. The balancer should ignore all regions for tables in DISABLED
// state because even if still currently open we expect them to be offlined very soon.
if (isTableDisabled(tableStateManager, node.getTable())) {
if (LOG.isTraceEnabled()) {
LOG.trace("Ignoring {} because table is disabled", node);
}
continue;
}
// When balancing, we are only interested in OPEN or OPENING regions. These can be
// expected to remain online until the next balancer iteration or unless the balancer
// decides to move it. Regions in other states are not eligible for balancing, because
// they are closing, splitting, merging, or otherwise already in transition.
if (!node.isInState(State.OPEN, State.OPENING)) {
if (LOG.isTraceEnabled()) {
LOG.trace("Ignoring {} because region is not OPEN or OPENING", node);
}
continue;
}
Map<ServerName, List<RegionInfo>> tableResult =
result.computeIfAbsent(node.getTable(), t -> new HashMap<>());
final ServerName serverName = node.getRegionLocation();
// A region in ONLINE or OPENING state should have a location.
if (serverName == null) {
LOG.warn("Skipping, no server for {}", node);
continue;
}
List<RegionInfo> serverResult =
tableResult.computeIfAbsent(serverName, s -> new ArrayList<>());
serverResult.add(node.getRegionInfo());
}
// Add online servers with no assignment for the table.
for (Map<ServerName, List<RegionInfo>> table : result.values()) {
for (ServerName serverName : onlineServers) {
table.computeIfAbsent(serverName, key -> new ArrayList<>());
}
}
return result;
} | 3.68 |
framework_AbstractOrderedLayoutConnector_getTooltipInfo | /*
* (non-Javadoc)
*
* @see
* com.vaadin.client.ui.AbstractComponentConnector#getTooltipInfo(com.google
* .gwt.dom.client.Element)
*/
@Override
public TooltipInfo getTooltipInfo(
com.google.gwt.dom.client.Element element) {
if (element != getWidget().getElement()) {
Slot slot = WidgetUtil.findWidget(element, Slot.class);
if (slot != null && slot.getCaptionElement() != null
&& slot.getParent() == getWidget()
&& slot.getCaptionElement().isOrHasChild(element)) {
ComponentConnector connector = Util
.findConnectorFor(slot.getWidget());
if (connector != null) {
return connector.getTooltipInfo(element);
}
}
}
return super.getTooltipInfo(element);
} | 3.68 |
Activiti_SimpleContext_setELResolver | /**
* Set our resolver.
*
* @param resolver
*/
public void setELResolver(ELResolver resolver) {
this.resolver = resolver;
} | 3.68 |
hadoop_ProxyUtils_sendRedirect | /**
* Handle redirects with a status code that can in future support verbs other
* than GET, thus supporting full REST functionality.
* <p>
* The target URL is included in the redirect text returned
* <p>
* At the end of this method, the output stream is closed.
*
* @param request request (hence: the verb and any other information
* relevant to a redirect)
* @param response the response
* @param target the target URL -unencoded
*
*/
public static void sendRedirect(HttpServletRequest request,
HttpServletResponse response,
String target)
throws IOException {
LOG.debug("Redirecting {} {} to {}",
request.getMethod(),
request.getRequestURI(),
target);
String location = response.encodeRedirectURL(target);
response.setStatus(HttpServletResponse.SC_FOUND);
response.setHeader(LOCATION, location);
response.setContentType(MimeType.HTML);
PrintWriter writer = response.getWriter();
Page p = new Page(writer);
p.html()
.head().title("Moved").__()
.body()
.h1("Moved")
.div()
.__("Content has moved ")
.a(location, "here").__()
.__().__();
writer.close();
} | 3.68 |
querydsl_SQLExpressions_addMinutes | /**
* Add the given amount of minutes to the date
*
* @param date datetime
* @param minutes minutes to add
* @return converted datetime
*/
public static <D extends Comparable> DateTimeExpression<D> addMinutes(DateTimeExpression<D> date, int minutes) {
return Expressions.dateTimeOperation(date.getType(), Ops.DateTimeOps.ADD_MINUTES, date, ConstantImpl.create(minutes));
} | 3.68 |
framework_AbstractInMemoryContainer_getFirstVisibleItem | /**
* Returns the item id of the first visible item after filtering. 'Null' is
* returned if there is no visible items.
* <p>
* For internal use only.
*
* @since 7.4
*
* @return item id of the first visible item
*/
protected ITEMIDTYPE getFirstVisibleItem() {
if (!getVisibleItemIds().isEmpty()) {
return getVisibleItemIds().get(0);
}
return null;
} | 3.68 |
hudi_CompactionUtils_buildHoodieCompactionOperation | /**
* Build Avro generated Compaction operation payload from compaction operation POJO for serialization.
*/
public static HoodieCompactionOperation buildHoodieCompactionOperation(CompactionOperation op) {
return HoodieCompactionOperation.newBuilder().setFileId(op.getFileId()).setBaseInstantTime(op.getBaseInstantTime())
.setPartitionPath(op.getPartitionPath())
.setBootstrapFilePath(op.getBootstrapFilePath().orElse(null))
.setDataFilePath(op.getDataFileName().isPresent() ? op.getDataFileName().get() : null)
.setDeltaFilePaths(op.getDeltaFileNames()).setMetrics(op.getMetrics()).build();
} | 3.68 |
dubbo_ServiceAnnotationPostProcessor_buildServiceBeanDefinition | /**
* Build the {@link AbstractBeanDefinition Bean Definition}
*
*
* @param serviceAnnotationAttributes
* @param serviceInterface
* @param refServiceBeanName
* @return
* @since 2.7.3
*/
private AbstractBeanDefinition buildServiceBeanDefinition(
Map<String, Object> serviceAnnotationAttributes, String serviceInterface, String refServiceBeanName) {
BeanDefinitionBuilder builder = rootBeanDefinition(ServiceBean.class);
AbstractBeanDefinition beanDefinition = builder.getBeanDefinition();
beanDefinition.setAutowireMode(AbstractBeanDefinition.AUTOWIRE_CONSTRUCTOR);
MutablePropertyValues propertyValues = beanDefinition.getPropertyValues();
String[] ignoreAttributeNames = ObjectUtils.of(
"provider",
"monitor",
"application",
"module",
"registry",
"protocol",
"methods",
"interfaceName",
"parameters",
"executor");
propertyValues.addPropertyValues(
new AnnotationPropertyValuesAdapter(serviceAnnotationAttributes, environment, ignoreAttributeNames));
// set config id, for ConfigManager cache key
// builder.addPropertyValue("id", beanName);
// References "ref" property to annotated-@Service Bean
addPropertyReference(builder, "ref", refServiceBeanName);
// Set interface
builder.addPropertyValue("interface", serviceInterface);
// Convert parameters into map
builder.addPropertyValue("parameters", DubboAnnotationUtils.convertParameters((String[])
serviceAnnotationAttributes.get("parameters")));
// Add methods parameters
List<MethodConfig> methodConfigs = convertMethodConfigs(serviceAnnotationAttributes.get("methods"));
if (!methodConfigs.isEmpty()) {
builder.addPropertyValue("methods", methodConfigs);
}
// convert provider to providerIds
String providerConfigId = (String) serviceAnnotationAttributes.get("provider");
if (StringUtils.hasText(providerConfigId)) {
addPropertyValue(builder, "providerIds", providerConfigId);
}
// Convert registry[] to registryIds
String[] registryConfigIds = (String[]) serviceAnnotationAttributes.get("registry");
if (registryConfigIds != null && registryConfigIds.length > 0) {
resolveStringArray(registryConfigIds);
builder.addPropertyValue("registryIds", StringUtils.join(registryConfigIds, ','));
}
// Convert protocol[] to protocolIds
String[] protocolConfigIds = (String[]) serviceAnnotationAttributes.get("protocol");
if (protocolConfigIds != null && protocolConfigIds.length > 0) {
resolveStringArray(protocolConfigIds);
builder.addPropertyValue("protocolIds", StringUtils.join(protocolConfigIds, ','));
}
// TODO Could we ignore these attributes: applicatin/monitor/module ? Use global config
// monitor reference
String monitorConfigId = (String) serviceAnnotationAttributes.get("monitor");
if (StringUtils.hasText(monitorConfigId)) {
addPropertyReference(builder, "monitor", monitorConfigId);
}
// module reference
String moduleConfigId = (String) serviceAnnotationAttributes.get("module");
if (StringUtils.hasText(moduleConfigId)) {
addPropertyReference(builder, "module", moduleConfigId);
}
String executorBeanName = (String) serviceAnnotationAttributes.get("executor");
if (StringUtils.hasText(executorBeanName)) {
addPropertyReference(builder, "executor", executorBeanName);
}
return builder.getBeanDefinition();
} | 3.68 |
flink_HandlerRequest_create | /**
* Creates a new {@link HandlerRequest}. The given {@link MessageParameters} are expected to be
* resolved.
*/
@VisibleForTesting
public static <R extends RequestBody, M extends MessageParameters> HandlerRequest<R> create(
R requestBody, M messageParameters, Collection<File> uploadedFiles) {
return new HandlerRequest<R>(
requestBody,
mapParameters(messageParameters.getPathParameters()),
mapParameters(messageParameters.getQueryParameters()),
uploadedFiles);
} | 3.68 |
rocketmq-connect_ColumnDefinition_isAutoIncrement | /**
* Indicates whether the column is automatically numbered.
*
* @return <code>true</code> if so; <code>false</code> otherwise
*/
public boolean isAutoIncrement() {
return autoIncremented;
} | 3.68 |
hadoop_DomainNameResolverFactory_newInstance | /**
* This function gets the instance based on the config.
*
* @param conf Configuration
* @param configKey config key name.
* @return Domain name resolver.
*/
public static DomainNameResolver newInstance(
Configuration conf, String configKey) {
Class<? extends DomainNameResolver> resolverClass = conf.getClass(
configKey,
DNSDomainNameResolver.class,
DomainNameResolver.class);
return ReflectionUtils.newInstance(resolverClass, conf);
} | 3.68 |
framework_QuerySortOrder_getSorted | /**
* Gets sorting information.
*
* @return sorting entity, usually field id
*/
@Override
public String getSorted() {
return super.getSorted();
} | 3.68 |
framework_IndexedContainer_addItemProperty | /**
* IndexedContainerItem does not support adding new properties. Add
* properties at container level. See
* {@link IndexedContainer#addContainerProperty(Object, Class, Object)}
*
* @see Item#addProperty(Object, Property)
*/
@Override
public boolean addItemProperty(Object id, Property property)
throws UnsupportedOperationException {
throw new UnsupportedOperationException("Indexed container item "
+ "does not support adding new properties");
} | 3.68 |
hadoop_RegistryPathStatus_hashCode | /**
* The hash code is derived from the path.
* @return hash code for storing the path in maps.
*/
@Override
public int hashCode() {
return path != null ? path.hashCode() : 0;
} | 3.68 |
hbase_MetricRegistryInfo_getMetricsJmxContext | /**
* Get the name of the context in JMX that this source will be exposed through. This is in
* ObjectName format. With the default context being Hadoop -> HBase
*/
public String getMetricsJmxContext() {
return metricsJmxContext;
} | 3.68 |
hbase_ProcedureUtil_convertToProcedure | /**
* Helper to convert the protobuf procedure.
* <p/>
* Used by ProcedureStore implementations.
* <p/>
* TODO: OPTIMIZATION: some of the field never change during the execution (e.g. className,
* procId, parentId, ...). We can split in 'data' and 'state', and the store may take advantage of
* it by storing the data only on insert().
*/
public static Procedure<?> convertToProcedure(ProcedureProtos.Procedure proto)
throws IOException {
// Procedure from class name
Procedure<?> proc = newProcedure(proto.getClassName());
// set fields
proc.setProcId(proto.getProcId());
proc.setState(proto.getState());
proc.setSubmittedTime(proto.getSubmittedTime());
proc.setLastUpdate(proto.getLastUpdate());
if (proto.hasParentId()) {
proc.setParentProcId(proto.getParentId());
}
if (proto.hasOwner()) {
proc.setOwner(proto.getOwner());
}
if (proto.hasTimeout()) {
proc.setTimeout(proto.getTimeout());
}
if (proto.getStackIdCount() > 0) {
proc.setStackIndexes(proto.getStackIdList());
}
if (proto.hasException()) {
assert proc.getState() == ProcedureProtos.ProcedureState.FAILED
|| proc.getState() == ProcedureProtos.ProcedureState.ROLLEDBACK
: "The procedure must be failed (waiting to rollback) or rolledback";
proc.setFailure(RemoteProcedureException.fromProto(proto.getException()));
}
if (proto.hasResult()) {
proc.setResult(proto.getResult().toByteArray());
}
if (proto.getNonce() != HConstants.NO_NONCE) {
proc.setNonceKey(new NonceKey(proto.getNonceGroup(), proto.getNonce()));
}
if (proto.getLocked()) {
proc.lockedWhenLoading();
}
if (proto.getBypass()) {
proc.bypass(null);
}
ProcedureStateSerializer serializer = null;
if (proto.getStateMessageCount() > 0) {
serializer = new StateSerializer(proto.toBuilder());
} else if (proto.hasStateData()) {
InputStream inputStream = proto.getStateData().newInput();
serializer = new CompatStateSerializer(inputStream);
}
if (serializer != null) {
proc.deserializeStateData(serializer);
}
return proc;
} | 3.68 |
hadoop_EntityTypeReader_readEntityTypes | /**
* Reads a set of timeline entity types from the HBase storage for the given
* context.
*
* @param hbaseConf HBase Configuration.
* @param conn HBase Connection.
* @return a set of <cite>TimelineEntity</cite> objects, with only type field
* set.
* @throws IOException if any exception is encountered while reading entities.
*/
public Set<String> readEntityTypes(Configuration hbaseConf,
Connection conn) throws IOException {
validateParams();
augmentParams(hbaseConf, conn);
Set<String> types = new TreeSet<>();
TimelineReaderContext context = getContext();
EntityRowKeyPrefix prefix = new EntityRowKeyPrefix(context.getClusterId(),
context.getUserId(), context.getFlowName(), context.getFlowRunId(),
context.getAppId());
byte[] currRowKey = prefix.getRowKeyPrefix();
byte[] nextRowKey = prefix.getRowKeyPrefix();
nextRowKey[nextRowKey.length - 1]++;
FilterList typeFilterList = new FilterList();
typeFilterList.addFilter(new FirstKeyOnlyFilter());
typeFilterList.addFilter(new KeyOnlyFilter());
typeFilterList.addFilter(new PageFilter(1));
LOG.debug("FilterList created for scan is - {}", typeFilterList);
int counter = 0;
while (true) {
try (ResultScanner results =
getResult(hbaseConf, conn, typeFilterList, currRowKey, nextRowKey)) {
TimelineEntity entity = parseEntityForType(results.next());
if (entity == null) {
break;
}
++counter;
if (!types.add(entity.getType())) {
LOG.warn("Failed to add type " + entity.getType()
+ " to the result set because there is a duplicated copy. ");
}
String currType = entity.getType();
if (LOG.isDebugEnabled()) {
LOG.debug("Current row key: " + Arrays.toString(currRowKey));
LOG.debug("New entity type discovered: " + currType);
}
currRowKey = getNextRowKey(prefix.getRowKeyPrefix(), currType);
}
}
LOG.debug("Scanned {} records for {} types", counter, types.size());
return types;
} | 3.68 |
hbase_Mutation_getCellList | /**
* Creates an empty list if one doesn't exist for the given column family or else it returns the
* associated list of Cell objects.
* @param family column family
* @return a list of Cell objects, returns an empty list if one doesn't exist.
*/
List<Cell> getCellList(byte[] family) {
List<Cell> list = getFamilyCellMap().get(family);
if (list == null) {
list = new ArrayList<>();
getFamilyCellMap().put(family, list);
}
return list;
} | 3.68 |
hibernate-validator_NodeImpl_includeTypeParameterInformation | // TODO: this is used to reduce the number of differences until we agree on the string representation
// it introduces some inconsistent behavior e.g. you get '<V>' for a Multimap but not for a Map
private static boolean includeTypeParameterInformation(Class<?> containerClass, Integer typeArgumentIndex) {
if ( containerClass == null || typeArgumentIndex == null ) {
return false;
}
if ( containerClass.getTypeParameters().length < 2 ) {
return false;
}
if ( Map.class.isAssignableFrom( containerClass ) && typeArgumentIndex == 1 ) {
return false;
}
return true;
} | 3.68 |
flink_Configuration_keySet | /**
* Returns the keys of all key/value pairs stored inside this configuration object.
*
* @return the keys of all key/value pairs stored inside this configuration object
*/
public Set<String> keySet() {
synchronized (this.confData) {
return new HashSet<>(this.confData.keySet());
}
} | 3.68 |
flink_TypeExtractor_getTypeInfoFactory | /** Returns the type information factory for an annotated field. */
@Internal
@SuppressWarnings("unchecked")
public static <OUT> TypeInfoFactory<OUT> getTypeInfoFactory(Field field) {
if (!isClassType(field.getType()) || !field.isAnnotationPresent(TypeInfo.class)) {
return null;
}
Class<?> factoryClass = field.getAnnotation(TypeInfo.class).value();
// check for valid factory class
if (!TypeInfoFactory.class.isAssignableFrom(factoryClass)) {
throw new InvalidTypesException(
"TypeInfo annotation does not specify a valid TypeInfoFactory.");
}
return (TypeInfoFactory<OUT>) InstantiationUtil.instantiate(factoryClass);
} | 3.68 |
pulsar_ReaderConfiguration_setCryptoFailureAction | /**
* Sets the ConsumerCryptoFailureAction to the value specified.
*
* @param action
* The action to take when the decoding fails
*/
public void setCryptoFailureAction(ConsumerCryptoFailureAction action) {
conf.setCryptoFailureAction(action);
} | 3.68 |
hbase_RegionReplicationFlushRequester_recordFlush | /**
* Record that we have already finished a flush with the given {@code sequenceId}.
* <p/>
* We can cancel the pending flush request if the failed sequence id is less than the given
* {@code sequenceId}.
*/
synchronized void recordFlush(long sequenceId) {
this.lastFlushedSequenceId = sequenceId;
// cancel the pending flush request if it is necessary, i.e, we have already finished a flush
// with higher sequence id.
if (sequenceId > pendingFlushRequestSequenceId && pendingFlushRequest != null) {
pendingFlushRequest.cancel();
pendingFlushRequest = null;
}
} | 3.68 |
flink_KeyGroupRange_getEndKeyGroup | /** @return The last key-group in the range. */
public int getEndKeyGroup() {
return endKeyGroup;
} | 3.68 |
flink_DefaultRollingPolicy_withRolloverInterval | /**
* Sets the max time a part file can stay open before having to roll. The frequency at which
* this is checked is controlled by the {@link
* org.apache.flink.streaming.api.functions.sink.filesystem.StreamingFileSink.RowFormatBuilder#withBucketCheckInterval(long)}
* setting.
*
* @param interval the desired rollover interval.
*/
public DefaultRollingPolicy.PolicyBuilder withRolloverInterval(final Duration interval) {
Preconditions.checkNotNull(interval, "Rolling policy rollover interval cannot be null");
return new PolicyBuilder(partSize, interval.toMillis(), inactivityInterval);
} | 3.68 |
hadoop_ZKDelegationTokenSecretManagerImpl_rebuildTokenCache | /**
* This function will rebuild local token cache from zk storage.
* It is first called when the secret manager is initialized and
* then regularly at a configured interval.
*
* @param initial whether this is called during initialization
* @throws IOException
*/
private void rebuildTokenCache(boolean initial) throws IOException {
localTokenCache.clear();
// Use bare zookeeper client to get all children since curator will
// wrap the same API with a sorting process. This is time consuming given
// millions of tokens
List<String> zkTokens;
try {
zkTokens = zookeeper.getChildren(TOKEN_PATH, false);
} catch (KeeperException | InterruptedException e) {
throw new IOException("Tokens cannot be fetched from path "
+ TOKEN_PATH, e);
}
byte[] data;
for (String tokenPath : zkTokens) {
try {
data = zkClient.getData().forPath(
ZK_DTSM_TOKENS_ROOT + "/" + tokenPath);
} catch (KeeperException.NoNodeException e) {
LOG.debug("No node in path [" + tokenPath + "]");
continue;
} catch (Exception ex) {
throw new IOException(ex);
}
// Store data to currentTokenMap
AbstractDelegationTokenIdentifier ident = processTokenAddOrUpdate(data);
// Store data to localTokenCache for sync
localTokenCache.add(ident);
}
if (!initial) {
// Sync zkTokens with local cache, specifically
// 1) add/update tokens to local cache from zk, which is done through
// processTokenAddOrUpdate above
// 2) remove tokens in local cache but not in zk anymore
for (AbstractDelegationTokenIdentifier ident : currentTokens.keySet()) {
if (!localTokenCache.contains(ident)) {
currentTokens.remove(ident);
}
}
}
syncTokenOwnerStats();
} | 3.68 |
hbase_MasterCoprocessorHost_preTruncateRegion | /**
* Invoked just before calling the truncate region procedure
* @param regionInfo region being truncated
*/
public void preTruncateRegion(RegionInfo regionInfo) throws IOException {
execOperation(coprocEnvironments.isEmpty() ? null : new MasterObserverOperation() {
@Override
public void call(MasterObserver observer) {
observer.preTruncateRegion(this, regionInfo);
}
});
} | 3.68 |
morf_AbstractSqlDialectTest_expectedSqlForMathOperations1 | /**
* @return expected SQL for math operation 1
*/
protected String expectedSqlForMathOperations1() {
return "a / b + c";
} | 3.68 |
flink_DataStream_keyBy | /**
* Partitions the operator state of a {@link DataStream} using field expressions. A field
* expression is either the name of a public field or a getter method with parentheses of the
* {@link DataStream}'s underlying type. A dot can be used to drill down into objects, as in
* {@code "field1.getInnerField2()" }.
*
* @deprecated Use {@link DataStream#keyBy(KeySelector)}.
* @param fields One or more field expressions on which the state of the {@link DataStream}
* operators will be partitioned.
* @return The {@link DataStream} with partitioned state (i.e. KeyedStream)
*/
@Deprecated
public KeyedStream<T, Tuple> keyBy(String... fields) {
return keyBy(new Keys.ExpressionKeys<>(fields, getType()));
} | 3.68 |
flink_HandlerRequestUtils_fromRequestBodyOrQueryParameter | /**
* Returns {@code requestValue} if it is not null, otherwise returns the query parameter value
* if it is not null, otherwise returns the default value.
*/
public static <T> T fromRequestBodyOrQueryParameter(
T requestValue,
SupplierWithException<T, RestHandlerException> queryParameterExtractor,
T defaultValue,
Logger log)
throws RestHandlerException {
if (requestValue != null) {
return requestValue;
} else {
T queryParameterValue = queryParameterExtractor.get();
if (queryParameterValue != null) {
log.warn(
"Configuring the job submission via query parameters is deprecated."
+ " Please migrate to submitting a JSON request instead.");
return queryParameterValue;
} else {
return defaultValue;
}
}
} | 3.68 |
hbase_HBackupFileSystem_getLogBackupDir | /**
* Given the backup root dir and the backup id, return the log file location for an incremental
* backup.
* @param backupRootDir backup root directory
* @param backupId backup id
* @return logBackupDir: ".../user/biadmin/backup/WALs/backup_1396650096738"
*/
public static String getLogBackupDir(String backupRootDir, String backupId) {
return backupRootDir + Path.SEPARATOR + backupId + Path.SEPARATOR
+ HConstants.HREGION_LOGDIR_NAME;
} | 3.68 |
morf_SchemaChangeSequence_addTable | /**
* @see org.alfasoftware.morf.upgrade.SchemaEditor#addTable(org.alfasoftware.morf.metadata.Table)
*/
@Override
public void addTable(Table definition) {
// track added tables...
tableAdditions.add(definition.getName());
AddTable addTable = new AddTable(definition);
visitor.visit(addTable);
schemaAndDataChangeVisitor.visit(addTable);
} | 3.68 |
pulsar_AdminResource_validateBundleOwnership | // This is a stub method for Mockito
@Override
protected void validateBundleOwnership(String property, String cluster, String namespace, boolean authoritative,
boolean readOnly, NamespaceBundle bundle) {
super.validateBundleOwnership(property, cluster, namespace, authoritative, readOnly, bundle);
} | 3.68 |
hbase_RpcServer_call | /**
* This is a server side method, which is invoked over RPC. On success the return response has
* protobuf response payload. On failure, the exception name and the stack trace are returned in
* the protobuf response.
*/
@Override
public Pair<Message, CellScanner> call(RpcCall call, MonitoredRPCHandler status)
throws IOException {
try {
MethodDescriptor md = call.getMethod();
Message param = call.getParam();
status.setRPC(md.getName(), new Object[] { param }, call.getReceiveTime());
// TODO: Review after we add in encoded data blocks.
status.setRPCPacket(param);
status.resume("Servicing call");
// get an instance of the method arg type
HBaseRpcController controller = new HBaseRpcControllerImpl(call.getCellScanner());
controller.setCallTimeout(call.getTimeout());
Message result = call.getService().callBlockingMethod(md, controller, param);
long receiveTime = call.getReceiveTime();
long startTime = call.getStartTime();
long endTime = EnvironmentEdgeManager.currentTime();
int processingTime = (int) (endTime - startTime);
int qTime = (int) (startTime - receiveTime);
int totalTime = (int) (endTime - receiveTime);
if (LOG.isTraceEnabled()) {
LOG.trace(
"{}, response: {}, receiveTime: {}, queueTime: {}, processingTime: {}, totalTime: {}",
CurCall.get().toString(), TextFormat.shortDebugString(result),
CurCall.get().getReceiveTime(), qTime, processingTime, totalTime);
}
// Use the raw request call size for now.
long requestSize = call.getSize();
long responseSize = result.getSerializedSize();
long responseBlockSize = call.getBlockBytesScanned();
if (call.isClientCellBlockSupported()) {
// Include the payload size in HBaseRpcController
responseSize += call.getResponseCellSize();
}
metrics.dequeuedCall(qTime);
metrics.processedCall(processingTime);
metrics.totalCall(totalTime);
metrics.receivedRequest(requestSize);
metrics.sentResponse(responseSize);
// log any RPC responses that are slower than the configured warn
// response time or larger than configured warning size
boolean tooSlow = isTooSlow(call, processingTime);
boolean tooLarge = isTooLarge(call, responseSize, responseBlockSize);
if (tooSlow || tooLarge) {
final String userName = call.getRequestUserName().orElse(StringUtils.EMPTY);
// when tagging, we let TooLarge trump TooSmall to keep output simple
// note that large responses will often also be slow.
logResponse(param, md.getName(), md.getName() + "(" + param.getClass().getName() + ")",
tooLarge, tooSlow, status.getClient(), startTime, processingTime, qTime, responseSize,
responseBlockSize, userName);
if (this.namedQueueRecorder != null && this.isOnlineLogProviderEnabled) {
// send logs to ring buffer owned by slowLogRecorder
final String className =
server == null ? StringUtils.EMPTY : server.getClass().getSimpleName();
this.namedQueueRecorder.addRecord(new RpcLogDetails(call, param, status.getClient(),
responseSize, responseBlockSize, className, tooSlow, tooLarge));
}
}
return new Pair<>(result, controller.cellScanner());
} catch (Throwable e) {
// The above callBlockingMethod will always return a SE. Strip the SE wrapper before
// putting it on the wire. Its needed to adhere to the pb Service Interface but we don't
// need to pass it over the wire.
if (e instanceof ServiceException) {
if (e.getCause() == null) {
LOG.debug("Caught a ServiceException with null cause", e);
} else {
e = e.getCause();
}
}
// increment the number of requests that were exceptions.
metrics.exception(e);
if (e instanceof LinkageError) throw new DoNotRetryIOException(e);
if (e instanceof IOException) throw (IOException) e;
LOG.error("Unexpected throwable object ", e);
throw new IOException(e.getMessage(), e);
}
} | 3.68 |
framework_Resolution_getResolutionsHigherOrEqualTo | /**
* Returns the resolutions that are higher or equal to the given resolution,
* starting from the given resolution. In other words passing DAY to this
* methods returns DAY,MONTH,YEAR
*
* @param r
* The resolution to start from
* @return An iterable for the resolutions higher or equal to r
*/
public static Iterable<Resolution> getResolutionsHigherOrEqualTo(
Resolution r) {
List<Resolution> resolutions = new ArrayList<Resolution>();
Resolution[] values = Resolution.values();
for (int i = r.ordinal(); i < values.length; i++) {
resolutions.add(values[i]);
}
return resolutions;
} | 3.68 |
flink_MessageParameter_resolveFromString | /**
* Resolves this parameter for the given string value representation.
*
* @param value string representation of value to resolve this parameter with
*/
public final void resolveFromString(String value) throws ConversionException {
resolve(convertFromString(value));
} | 3.68 |
hbase_ScannerContext_hasTimeLimit | /** Returns true if the time limit can be enforced in the checker's scope */
boolean hasTimeLimit(LimitScope checkerScope) {
return limits.canEnforceTimeLimitFromScope(checkerScope)
&& (limits.getTime() > 0 || returnImmediately);
} | 3.68 |
flink_IterableUtils_toStream | /**
* Convert the given {@link Iterable} to a {@link Stream}.
*
* @param iterable to convert to a stream
* @param <E> type of the elements of the iterable
* @return stream converted from the given {@link Iterable}
*/
public static <E> Stream<E> toStream(Iterable<E> iterable) {
checkNotNull(iterable);
return iterable instanceof Collection
? ((Collection<E>) iterable).stream()
: StreamSupport.stream(iterable.spliterator(), false);
} | 3.68 |
flink_ResultPartitionType_isBlockingOrBlockingPersistentResultPartition | /**
* {@link #isBlockingOrBlockingPersistentResultPartition()} is used to judge whether it is the
* specified {@link #BLOCKING} or {@link #BLOCKING_PERSISTENT} resultPartitionType.
*
* <p>this method suitable for judgment conditions related to the specific implementation of
* {@link ResultPartitionType}.
*
* <p>this method not related to data consumption and partition release. As for the logic
* related to partition release, use {@link #isReleaseByScheduler()} instead, and as consume
* type, use {@link #mustBePipelinedConsumed()} or {@link #canBePipelinedConsumed()} instead.
*/
public boolean isBlockingOrBlockingPersistentResultPartition() {
return this == BLOCKING || this == BLOCKING_PERSISTENT;
} | 3.68 |
flink_ReOpenableMutableHashTable_storeInitialHashTable | /**
* This method stores the initial hash table's contents on disk if hash join needs the memory
* for further partition processing. The initial hash table is rebuild before a new secondary
* input is opened.
*
* <p>For the sake of simplicity we iterate over all in-memory elements and store them in one
* file. The file is hashed into memory upon opening a new probe input.
*
* @throws IOException
*/
void storeInitialHashTable() throws IOException {
if (spilled) {
return; // we create the initialHashTable only once. Later calls are caused by deeper
// recursion lvls
}
spilled = true;
for (int partIdx = 0; partIdx < initialPartitions.size(); partIdx++) {
final ReOpenableHashPartition<BT, PT> p =
(ReOpenableHashPartition<BT, PT>) initialPartitions.get(partIdx);
if (p.isInMemory()) { // write memory resident partitions to disk
this.writeBehindBuffersAvailable +=
p.spillInMemoryPartition(
spilledInMemoryPartitions.next(), ioManager, writeBehindBuffers);
}
}
} | 3.68 |
graphhopper_PbfFieldDecoder_decodeString | /**
* Decodes a raw string into a String.
* <p>
*
* @param rawString The PBF encoding string.
* @return The string as a String.
*/
public String decodeString(int rawString) {
return strings[rawString];
} | 3.68 |
framework_DragSourceExtensionConnector_onDragStart | /**
* Event handler for the {@code dragstart} event. Called when {@code
* dragstart} event occurs.
*
* @param event
* browser event to be handled
*/
protected void onDragStart(Event event) {
// Convert elemental event to have access to dataTransfer
NativeEvent nativeEvent = (NativeEvent) event;
// Do not allow drag starts from native Android Chrome, since it doesn't
// work properly (doesn't fire dragend reliably)
if (isAndoidChrome() && isNativeDragEvent(nativeEvent)) {
event.preventDefault();
event.stopPropagation();
return;
}
// Set effectAllowed parameter
if (getState().effectAllowed != null) {
setEffectAllowed(nativeEvent.getDataTransfer(),
getState().effectAllowed.getValue());
}
// Set drag image
setDragImage(nativeEvent);
// Create drag data
Map<String, String> dataMap = createDataTransferData(nativeEvent);
if (dataMap != null) {
// Always set something as the text data, or DnD won't work in FF !
dataMap.putIfAbsent(DragSourceState.DATA_TYPE_TEXT, "");
if (!BrowserInfo.get().isIE11()) {
// Set data to the event's data transfer
dataMap.forEach((type, data) -> nativeEvent.getDataTransfer()
.setData(type, data));
} else {
// IE11 accepts only data with type "text"
nativeEvent.getDataTransfer().setData(
DragSourceState.DATA_TYPE_TEXT,
dataMap.get(DragSourceState.DATA_TYPE_TEXT));
}
// Set style to indicate the element being dragged
addDraggedStyle(nativeEvent);
// Initiate firing server side dragstart event when there is a
// DragStartListener attached on the server side
if (hasEventListener(DragSourceState.EVENT_DRAGSTART)) {
sendDragStartEventToServer(nativeEvent);
}
} else {
// If returned data map is null, cancel drag event
nativeEvent.preventDefault();
}
// Stop event bubbling
nativeEvent.stopPropagation();
} | 3.68 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.