name
stringlengths 12
178
| code_snippet
stringlengths 8
36.5k
| score
float64 3.26
3.68
|
---|---|---|
flink_SegmentsUtil_copyFromBytes | /**
* Copy target segments from source byte[].
*
* @param segments target segments.
* @param offset target segments offset.
* @param bytes source byte[].
* @param bytesOffset source byte[] offset.
* @param numBytes the number bytes to copy.
*/
public static void copyFromBytes(
MemorySegment[] segments, int offset, byte[] bytes, int bytesOffset, int numBytes) {
if (segments.length == 1) {
segments[0].put(offset, bytes, bytesOffset, numBytes);
} else {
copyMultiSegmentsFromBytes(segments, offset, bytes, bytesOffset, numBytes);
}
} | 3.68 |
flink_CopyOnWriteSkipListStateMapSnapshot_getSnapshotVersion | /** Returns the internal version of the when this snapshot was created. */
int getSnapshotVersion() {
return snapshotVersion;
} | 3.68 |
dubbo_RpcContext_getResponse | /**
* Get the response object of the underlying RPC protocol, e.g. HttpServletResponse
*
* @return null if the underlying protocol doesn't provide support for getting response or the response is not of the specified type
*/
@SuppressWarnings("unchecked")
public <T> T getResponse(Class<T> clazz) {
return newRpcContext.getResponse(clazz);
} | 3.68 |
flink_HiveParserUnparseTranslator_addIdentifierTranslation | /** Register a translation for an identifier. */
public void addIdentifierTranslation(HiveParserASTNode identifier) {
if (!enabled) {
return;
}
assert (identifier.getToken().getType() == HiveASTParser.Identifier);
String replacementText = identifier.getText();
replacementText = HiveParserBaseSemanticAnalyzer.unescapeIdentifier(replacementText);
replacementText = HiveUtils.unparseIdentifier(replacementText, conf);
addTranslation(identifier, replacementText);
} | 3.68 |
framework_VCalendarPanel_getResetKey | /**
* Returns the reset key which will reset the calendar to the previous
* selection. By default this is backspace but it can be overridden to
* change the key to whatever you want.
*
* @return
*/
protected int getResetKey() {
return KeyCodes.KEY_BACKSPACE;
} | 3.68 |
morf_AbstractSqlDialectTest_shouldGenerateCorrectSqlForMathOperations13 | /**
* Test for proper SQL mathematics operation generation from DSL expressions.
*/
@Test
public void shouldGenerateCorrectSqlForMathOperations13() {
String result = testDialect.getSqlFrom(field("a").plus(field("b")).plus(field("c")).divideBy(literal(2)));
assertEquals(expectedSqlForMathOperations13(), result);
} | 3.68 |
flink_MetricStore_getJobManager | /** @deprecated Use semantically equivalent {@link #getJobManagerMetricStore()}. */
@Deprecated
public synchronized ComponentMetricStore getJobManager() {
return ComponentMetricStore.unmodifiable(jobManager);
} | 3.68 |
flink_AbstractStreamOperator_getExecutionConfig | /**
* Gets the execution config defined on the execution environment of the job to which this
* operator belongs.
*
* @return The job's execution config.
*/
public ExecutionConfig getExecutionConfig() {
return container.getExecutionConfig();
} | 3.68 |
flink_DualInputOperator_setFirstInputs | /**
* Sets the first input to the union of the given operators.
*
* @param inputs The operator(s) that form the first inputs.
* @deprecated This method will be removed in future versions. Use the {@link Union} operator
* instead.
*/
@Deprecated
public void setFirstInputs(List<Operator<IN1>> inputs) {
this.input1 = Operator.createUnionCascade(inputs);
} | 3.68 |
hadoop_LocalIdentityTransformer_transformIdentityForGetRequest | /**
* Perform identity transformation for the Get request results.
* @param originalIdentity the original user or group in the get request results: FileStatus, AclStatus.
* @param isUserName indicate whether the input originalIdentity is an owner name or owning group name.
* @param localIdentity the local user or group, should be parsed from UserGroupInformation.
* @return local identity.
*/
@Override
public String transformIdentityForGetRequest(String originalIdentity, boolean isUserName, String localIdentity)
throws IOException {
String localIdentityForOrig = isUserName ? localToAadIdentityLookup.lookupForLocalUserIdentity(originalIdentity)
: localToAadIdentityLookup.lookupForLocalGroupIdentity(originalIdentity);
if (localIdentityForOrig == null || localIdentityForOrig.isEmpty()) {
return super.transformIdentityForGetRequest(originalIdentity, isUserName, localIdentity);
}
return localIdentityForOrig;
} | 3.68 |
shardingsphere-elasticjob_ZookeeperElectionService_start | /**
* Start election.
*/
public void start() {
log.debug("Elastic job: {} start to elect leadership", leaderSelector.getId());
leaderSelector.start();
} | 3.68 |
flink_CatalogManager_listSchemas | /**
* Lists all available schemas in the given catalog. It is not equivalent to listing databases
* within the given catalog as it includes also different database parts of the temporary
* objects identifiers.
*
* <p><b>NOTE:</b>It is primarily used for interacting with Calcite's schema.
*
* @param catalogName filter for the catalog part of the schema
* @return list of schemas with the given prefix
*/
public Set<String> listSchemas(String catalogName) {
return Stream.concat(
getCatalog(catalogName).map(Catalog::listDatabases)
.orElse(Collections.emptyList()).stream(),
temporaryTables.keySet().stream()
.filter(i -> i.getCatalogName().equals(catalogName))
.map(ObjectIdentifier::getDatabaseName))
.collect(Collectors.toSet());
} | 3.68 |
flink_ManuallyTriggeredScheduledExecutorService_numQueuedRunnables | /** Gets the number of Runnables currently queued. */
public int numQueuedRunnables() {
synchronized (queuedRunnables) {
return queuedRunnables.size();
}
} | 3.68 |
hbase_CommonFSUtils_deleteDirectory | /**
* Delete if exists.
* @param fs filesystem object
* @param dir directory to delete
* @return True if deleted <code>dir</code>
* @throws IOException e
*/
public static boolean deleteDirectory(final FileSystem fs, final Path dir) throws IOException {
return fs.exists(dir) && fs.delete(dir, true);
} | 3.68 |
hbase_HandlerUtil_getRetryCounter | /**
* Get an exponential backoff retry counter. The base unit is 100 milliseconds, and the max
* backoff time is 30 seconds.
*/
public static RetryCounter getRetryCounter() {
return new RetryCounterFactory(
new RetryCounter.RetryConfig().setBackoffPolicy(new RetryCounter.ExponentialBackoffPolicy())
.setSleepInterval(100).setMaxSleepTime(30000).setMaxAttempts(Integer.MAX_VALUE)
.setTimeUnit(TimeUnit.MILLISECONDS).setJitter(0.01f)).create();
} | 3.68 |
framework_VTabsheet_recalculateCaptionWidth | /**
* Recalculates the required caption width and sets it as the new width.
* Also updates the tab width bookkeeping of the tab bar if needed. The
* default implementation for the bookkeeping logic attempts to account
* for different margins and paddings in the first tab element and its
* caption element versus the same values in the next visible tab.
*/
public void recalculateCaptionWidth() {
boolean visible = isVisible();
boolean first = td.hasClassName(Tab.TD_FIRST_CLASSNAME);
if (visible && !tabBar.firstAdjusted) {
if (first) {
tabBar.pendingTab = this;
} else if (tabBar.pendingTab != null) {
// the first visible tab usually has different styling than
// the rest, compare the styles against the second visible
// tab in order to adjust the saved width for the first tab
ComputedStyle tabStyle = new ComputedStyle(getElement());
ComputedStyle captionStyle = new ComputedStyle(
tabCaption.getElement());
ComputedStyle pendingTabStyle = new ComputedStyle(
tabBar.pendingTab.getElement());
ComputedStyle pendingCaptionStyle = new ComputedStyle(
tabBar.pendingTab.tabCaption.getElement());
double tabPadding = tabStyle.getPaddingWidth();
double tabMargin = tabStyle.getMarginWidth();
double captionPadding = captionStyle.getPaddingWidth();
double captionMargin = captionStyle.getMarginWidth();
double pendingTabPadding = pendingTabStyle
.getPaddingWidth();
double pendingTabMargin = pendingTabStyle.getMarginWidth();
double pendingCaptionPadding = pendingCaptionStyle
.getPaddingWidth();
double pendingCaptionMargin = pendingCaptionStyle
.getMarginWidth();
// update the adjuster
tabBar.firstTabWidthAdjuster = (int) Math.ceil(tabPadding
+ tabMargin + captionPadding + captionMargin
- pendingTabPadding - pendingTabMargin
- pendingCaptionPadding - pendingCaptionMargin);
// update the pending tab
tabBar.tabWidths.put(tabBar.pendingTab,
tabBar.pendingTab.getOffsetWidth()
+ tabBar.firstTabWidthAdjuster);
// mark adjusting done
tabBar.firstAdjusted = true;
tabBar.pendingTab = null;
}
}
tabCaption.setWidth(tabCaption.getRequiredWidth() + "px");
if (visible) {
if (first) {
tabBar.tabWidths.put(this,
getOffsetWidth() + tabBar.firstTabWidthAdjuster);
} else {
tabBar.tabWidths.put(this, getOffsetWidth());
}
}
} | 3.68 |
hadoop_NativeTaskOutputFiles_getSpillIndexFileForWrite | /**
* Create a local map spill index file name.
*
* @param spillNumber the number
* @param size the size of the file
*/
public Path getSpillIndexFileForWrite(int spillNumber, long size) throws IOException {
String path = String
.format(SPILL_INDEX_FILE_FORMAT_STRING, id, TASKTRACKER_OUTPUT, spillNumber);
return lDirAlloc.getLocalPathForWrite(path, size, conf);
} | 3.68 |
hadoop_AbfsCountersImpl_formString | /**
* {@inheritDoc}
*
* Method to aggregate all the counters in the MetricRegistry and form a
* string with prefix, separator and suffix.
*
* @param prefix string that would be before metric.
* @param separator string that would be between metric name and value.
* @param suffix string that would be after metric value.
* @param all gets all the values even if unchanged.
* @return a String with all the metrics and their values.
*/
@Override
public String formString(String prefix, String separator, String suffix,
boolean all) {
MetricStringBuilder metricStringBuilder = new MetricStringBuilder(null,
prefix, separator, suffix);
registry.snapshot(metricStringBuilder, all);
return metricStringBuilder.toString();
} | 3.68 |
morf_SqlDialect_prepareStatementParameters | /**
* Sets up parameters on a {@link NamedParameterPreparedStatement} with a set of values.
*
* @param statement The {@link PreparedStatement} to set up
* @param parameters The parameters.
* @param values The values.
* @throws RuntimeException if a data type is not supported or if a
* supplied string value cannot be converted to the column data type.
*/
public void prepareStatementParameters(NamedParameterPreparedStatement statement, Iterable<SqlParameter> parameters, DataValueLookup values) {
parameters.forEach(parameter -> {
try {
prepareStatementParameters(statement, values, parameter);
} catch (Exception e) {
throw new RuntimeException(String.format("Error setting parameter value, column [%s], value [%s] on prepared statement",
parameter.getMetadata().getName(), values.getObject(parameter.getMetadata())), e);
}
});
}
/**
* Sets up a parameter on {@link NamedParameterPreparedStatement} | 3.68 |
flink_VertexThreadInfoStats_getSamplesBySubtask | /**
* Returns the a map of thread info samples by subtask (execution ID).
*
* @return Map of thread info samples by task (execution ID)
*/
public Map<ExecutionAttemptID, Collection<ThreadInfoSample>> getSamplesBySubtask() {
return samplesBySubtask;
} | 3.68 |
morf_SqlDateUtils_castAsDateCaseStatement | /**
* Return the replacement value(casted as date) if the criterion is true,
* otherwise returns the value casted as date.
*
* @param expression the field to be casted as DATE
* @param criterion the criterion to evaluate
* @param replace the replacement value
* @return expression or replacement value casted as date
*/
private static AliasedField castAsDateCaseStatement(AliasedField expression, Criterion criterion, AliasedField replace) {
if (replace instanceof FieldLiteral &&
((FieldLiteral)replace).getDataType() == DataType.DATE) {
return caseStatement(when(criterion).then(replace)).otherwise(castAsDate(expression));
}
else {
return castAsDate(caseStatement(when(criterion).then(replace)).otherwise(expression));
}
} | 3.68 |
graphhopper_AlternativeRoute_isBestPath | // returns true if fromSPTEntry is identical to the specified best path
boolean isBestPath(SPTEntry fromSPTEntry) {
if (traversalMode.isEdgeBased()) {
if (GHUtility.getEdgeFromEdgeKey(startTID.get()) == fromSPTEntry.edge) {
if (fromSPTEntry.parent == null)
throw new IllegalStateException("best path must have no parent but was non-null: " + fromSPTEntry);
if (bestEntry.get() != null && bestEntry.get().edge != fromSPTEntry.edge)
throw new IllegalStateException("there can be only one best entry but was " + fromSPTEntry + " vs old: " + bestEntry.get()
+ " " + graph.getEdgeIteratorState(fromSPTEntry.edge, fromSPTEntry.adjNode).fetchWayGeometry(FetchMode.ALL));
bestEntry.set(fromSPTEntry);
return true;
}
} else if (fromSPTEntry.parent == null) {
if (startTID.get() != fromSPTEntry.adjNode)
throw new IllegalStateException("Start traversal ID has to be identical to root edge entry "
+ "which is the plateau start of the best path but was: " + startTID + " vs. adjNode: " + fromSPTEntry.adjNode);
if (bestEntry.get() != null)
throw new IllegalStateException("there can be only one best entry but was " + fromSPTEntry + " vs old: " + bestEntry.get()
+ " " + graph.getEdgeIteratorState(fromSPTEntry.edge, fromSPTEntry.adjNode).fetchWayGeometry(FetchMode.ALL));
bestEntry.set(fromSPTEntry);
return true;
}
return false;
} | 3.68 |
framework_PublishedFileHandler_handleRequest | /**
* Writes the connector resource identified by the request URI to the
* response. If a published resource corresponding to the URI path is not
* found, writes a HTTP Not Found error to the response.
*/
@Override
public boolean handleRequest(VaadinSession session, VaadinRequest request,
VaadinResponse response) throws IOException {
if (!ServletPortletHelper.isPublishedFileRequest(request)) {
return false;
}
String pathInfo = request.getPathInfo();
// + 2 to also remove beginning and ending slashes
String fileName = pathInfo.substring(
ApplicationConstants.PUBLISHED_FILE_PATH.length() + 2);
final String mimetype = response.getService().getMimeType(fileName);
// Security check: avoid accidentally serving from the UI of the
// classpath instead of relative to the context class
if (fileName.startsWith("/")) {
getLogger()
.warning("Published file request starting with / rejected: "
+ fileName);
response.sendError(HttpServletResponse.SC_NOT_FOUND, fileName);
return true;
}
// Check that the resource name has been registered
session.lock();
Class<?> context;
try {
context = session.getCommunicationManager().getDependencies()
.get(fileName);
} finally {
session.unlock();
}
// Security check: don't serve resource if the name hasn't been
// registered in the map
if (context == null) {
getLogger().warning(
"Rejecting published file request for file that has not been published: "
+ fileName);
response.sendError(HttpServletResponse.SC_NOT_FOUND, fileName);
return true;
}
// Resolve file relative to the location of the context class
InputStream in = context.getResourceAsStream(fileName);
if (in == null) {
getLogger().warning(fileName + " published by " + context.getName()
+ " not found. Verify that the file "
+ context.getPackage().getName().replace('.', '/') + '/'
+ fileName + " is available on the classpath.");
response.sendError(HttpServletResponse.SC_NOT_FOUND, fileName);
return true;
}
// Set caching for the published file
String cacheControl = "public, max-age=0, must-revalidate";
int resourceCacheTime = request.getService()
.getDeploymentConfiguration().getResourceCacheTime();
if (resourceCacheTime > 0) {
cacheControl = "max-age=" + String.valueOf(resourceCacheTime);
}
response.setHeader("Cache-Control", cacheControl);
OutputStream out = null;
try {
if (mimetype != null) {
response.setContentType(mimetype);
}
out = response.getOutputStream();
final byte[] buffer = new byte[Constants.DEFAULT_BUFFER_SIZE];
int bytesRead = 0;
while ((bytesRead = in.read(buffer)) > 0) {
out.write(buffer, 0, bytesRead);
}
out.flush();
} finally {
try {
in.close();
} catch (Exception e) {
// Do nothing
}
if (out != null) {
try {
out.close();
} catch (Exception e) {
// Do nothing
}
}
}
return true;
} | 3.68 |
querydsl_AbstractSQLServerQuery_tableHints | /**
* Set the table hints
*
* @param tableHints table hints
* @return the current object
*/
public C tableHints(SQLServerTableHints... tableHints) {
if (tableHints.length > 0) {
String hints = SQLServerGrammar.tableHints(tableHints);
addJoinFlag(hints, JoinFlag.Position.BEFORE_CONDITION);
}
return (C) this;
} | 3.68 |
hbase_RequestConverter_buildIsCatalogJanitorEnabledRequest | /**
* Creates a request for querying the master whether the catalog janitor is enabled
* @return A {@link IsCatalogJanitorEnabledRequest}
*/
public static IsCatalogJanitorEnabledRequest buildIsCatalogJanitorEnabledRequest() {
return IsCatalogJanitorEnabledRequest.getDefaultInstance();
} | 3.68 |
framework_InMemoryDataProvider_setFilter | /**
* Sets a filter for an item property. The filter replaces any filter that
* has been set or added previously.
*
* @see #setFilter(SerializablePredicate)
* @see #setFilterByValue(ValueProvider, Object)
* @see #addFilter(ValueProvider, SerializablePredicate)
*
* @param valueProvider
* value provider that gets the property value, not
* <code>null</code>
* @param valueFilter
* filter for testing the property value, not <code>null</code>
*/
public default <V> void setFilter(ValueProvider<T, V> valueProvider,
SerializablePredicate<V> valueFilter) {
setFilter(InMemoryDataProviderHelpers
.createValueProviderFilter(valueProvider, valueFilter));
} | 3.68 |
flink_BufferBuilder_markFinished | /**
* Marks this position as finished and returns the current position.
*
* @return current position as of {@link #getCached()}
*/
public int markFinished() {
int currentPosition = getCached();
int newValue = -currentPosition;
if (newValue == 0) {
newValue = FINISHED_EMPTY;
}
set(newValue);
return currentPosition;
} | 3.68 |
framework_Label_getProperty | /**
* Gets the Property that has been modified.
*
* @see Property.ValueChangeEvent#getProperty()
*/
@Override
public Property getProperty() {
return (Property) getSource();
} | 3.68 |
hbase_SnapshotInfo_printSchema | /**
* Dump the {@link org.apache.hadoop.hbase.client.TableDescriptor}
*/
private void printSchema() {
System.out.println("Table Descriptor");
System.out.println("----------------------------------------");
System.out.println(snapshotManifest.getTableDescriptor().toString());
System.out.println();
} | 3.68 |
framework_DefaultItemSorter_compareProperty | /**
* Compares the property indicated by <code>propertyId</code> in the items
* indicated by <code>item1</code> and <code>item2</code> for order. Returns
* a negative integer, zero, or a positive integer as the property value in
* the first item is less than, equal to, or greater than the property value
* in the second item. If the <code>sortDirection</code> is false the
* returned value is negated.
* <p>
* The comparator set for this <code>DefaultItemSorter</code> is used for
* comparing the two property values.
*
* @param propertyId
* The property id for the property that is used for comparison.
* @param sortDirection
* The direction of the sort. A false value negates the result.
* @param item1
* The first item to compare.
* @param item2
* The second item to compare.
* @return a negative, zero, or positive integer if the property value in
* the first item is less than, equal to, or greater than the
* property value in the second item. Negated if
* {@code sortDirection} is false.
*/
protected int compareProperty(Object propertyId, boolean sortDirection,
Item item1, Item item2) {
// Get the properties to compare
final Property<?> property1 = item1.getItemProperty(propertyId);
final Property<?> property2 = item2.getItemProperty(propertyId);
// Get the values to compare
final Object value1 = (property1 == null) ? null : property1.getValue();
final Object value2 = (property2 == null) ? null : property2.getValue();
// Result of the comparison
int r = 0;
if (sortDirection) {
r = propertyValueComparator.compare(value1, value2);
} else {
r = propertyValueComparator.compare(value2, value1);
}
return r;
} | 3.68 |
hbase_RegionInfo_parseFrom | /**
* Parses an RegionInfo instance from the passed in stream. Presumes the RegionInfo was serialized
* to the stream with {@link #toDelimitedByteArray(RegionInfo)}.
* @return An instance of RegionInfo.
*/
static RegionInfo parseFrom(final DataInputStream in) throws IOException {
// I need to be able to move back in the stream if this is not a pb
// serialization so I can do the Writable decoding instead.
int pblen = ProtobufUtil.lengthOfPBMagic();
byte[] pbuf = new byte[pblen];
if (in.markSupported()) { // read it with mark()
in.mark(pblen);
}
// assumption: if Writable serialization, it should be longer than pblen.
IOUtils.readFully(in, pbuf, 0, pblen);
if (ProtobufUtil.isPBMagicPrefix(pbuf)) {
return ProtobufUtil.toRegionInfo(HBaseProtos.RegionInfo.parseDelimitedFrom(in));
} else {
throw new IOException("PB encoded RegionInfo expected");
}
} | 3.68 |
framework_ColumnVisibilityChangeEvent_isUserOriginated | /**
* Is the visibility change triggered by user.
*
* @return <code>true</code> if the change was triggered by user,
* <code>false</code> if not
*/
public boolean isUserOriginated() {
return userOriginated;
} | 3.68 |
flink_TaskIOMetricGroup_getTaskInitializationDuration | /**
* Returns the duration of time required for a task's restoring/initialization, which reaches
* its maximum when the task begins running and remains constant throughout the task's running.
* Return 0 when the task is not in initialization/running status.
*/
@VisibleForTesting
public long getTaskInitializationDuration() {
if (taskInitializeTime == INVALID_TIMESTAMP) {
return 0L;
} else if (taskStartTime == INVALID_TIMESTAMP) {
return clock.absoluteTimeMillis() - taskInitializeTime;
} else {
return taskStartTime - taskInitializeTime;
}
} | 3.68 |
hadoop_SingleFilePerBlockCache_getCacheFilePath | /**
* Return temporary file created based on the file path retrieved from local dir allocator.
*
* @param conf The configuration object.
* @param localDirAllocator Local dir allocator instance.
* @return Path of the temporary file created.
* @throws IOException if IO error occurs while local dir allocator tries to retrieve path
* from local FS or file creation fails or permission set fails.
*/
protected Path getCacheFilePath(final Configuration conf,
final LocalDirAllocator localDirAllocator)
throws IOException {
return getTempFilePath(conf, localDirAllocator);
} | 3.68 |
morf_AbstractSqlDialectTest_testDecimalFormatter | /**
* Tests formatting of numerical values in a {@link Record}.
*
* @throws SQLException when a database access error occurs
*/
@Test
public void testDecimalFormatter() throws SQLException {
assertEquals("Do nothing if no trailing zeroes", "123.123", checkDatabaseSafeStringToRecordValue(DataType.DECIMAL, "123.123"));
assertEquals("Remove trailing zeroes from genuine decimal", "123.123", checkDatabaseSafeStringToRecordValue(DataType.DECIMAL, "123.12300"));
assertEquals("Ignore zeroes that are not trailing", "0.00003", checkDatabaseSafeStringToRecordValue(DataType.DECIMAL, "000.00003"));
assertEquals("Remove trailing zeroes from zero value decimal", "0", checkDatabaseSafeStringToRecordValue(DataType.DECIMAL, "0.0000"));
assertNull("Nulls get passed through even for BigDecimals", checkDatabaseSafeStringToRecordValue(DataType.DECIMAL, null));
assertEquals("Do nothing to zero value integer", "0", checkDatabaseSafeStringToRecordValue(DataType.DECIMAL, "0"));
assertEquals("Do nothing to zero ending integer", "200", checkDatabaseSafeStringToRecordValue(DataType.DECIMAL, "200"));
assertEquals("Boolean: 0 --> false", "false", checkDatabaseSafeStringToRecordValue(DataType.BOOLEAN, "0"));
assertEquals("Boolean: 1 --> true", "true", checkDatabaseSafeStringToRecordValue(DataType.BOOLEAN, "1"));
assertEquals("Boolean: null --> null", null, checkDatabaseSafeStringToRecordValue(DataType.BOOLEAN, null));
} | 3.68 |
framework_UIConnector_replaceTheme | /**
* Loads the new theme and removes references to the old theme.
*
* @since 7.4.3
* @param oldTheme
* The name of the old theme
* @param newTheme
* The name of the new theme
* @param oldThemeUrl
* The url of the old theme
* @param newThemeUrl
* The url of the new theme
*/
protected void replaceTheme(final String oldTheme, final String newTheme,
String oldThemeUrl, final String newThemeUrl) {
LinkElement tagToReplace = null;
if (oldTheme != null) {
tagToReplace = findStylesheetTag(oldThemeUrl);
if (tagToReplace == null) {
getLogger()
.warning("Did not find the link tag for the old theme ("
+ oldThemeUrl
+ "), adding a new stylesheet for the new theme ("
+ newThemeUrl + ")");
}
}
if (newTheme != null) {
loadTheme(newTheme, newThemeUrl, tagToReplace);
} else {
if (tagToReplace != null) {
tagToReplace.getParentElement().removeChild(tagToReplace);
}
activateTheme(null);
}
} | 3.68 |
hbase_StreamUtils_readRawVarint32 | /**
* Reads a varInt value stored in an array. Input array where the varInt is available Offset in
* the input array where varInt is available
* @return A pair of integers in which first value is the actual decoded varInt value and second
* value as number of bytes taken by this varInt for it's storage in the input array.
* @throws IOException When varint is malformed and not able to be read correctly
*/
public static Pair<Integer, Integer> readRawVarint32(byte[] input, int offset)
throws IOException {
int newOffset = offset;
byte tmp = input[newOffset++];
if (tmp >= 0) {
return new Pair<>((int) tmp, newOffset - offset);
}
int result = tmp & 0x7f;
tmp = input[newOffset++];
if (tmp >= 0) {
result |= tmp << 7;
} else {
result |= (tmp & 0x7f) << 7;
tmp = input[newOffset++];
if (tmp >= 0) {
result |= tmp << 14;
} else {
result |= (tmp & 0x7f) << 14;
tmp = input[newOffset++];
if (tmp >= 0) {
result |= tmp << 21;
} else {
result |= (tmp & 0x7f) << 21;
tmp = input[newOffset++];
result |= tmp << 28;
if (tmp < 0) {
// Discard upper 32 bits.
for (int i = 0; i < 5; i++) {
tmp = input[newOffset++];
if (tmp >= 0) {
return new Pair<>(result, newOffset - offset);
}
}
throw new IOException("Malformed varint");
}
}
}
}
return new Pair<>(result, newOffset - offset);
} | 3.68 |
flink_HandlerUtils_sendErrorResponse | /**
* Sends the given error response and status code to the given channel.
*
* @param channelHandlerContext identifying the open channel
* @param keepAlive If the connection should be kept alive.
* @param errorMessage which should be sent
* @param statusCode of the message to send
* @param headers additional header values
*/
public static CompletableFuture<Void> sendErrorResponse(
ChannelHandlerContext channelHandlerContext,
boolean keepAlive,
ErrorResponseBody errorMessage,
HttpResponseStatus statusCode,
Map<String, String> headers) {
StringWriter sw = new StringWriter();
try {
mapper.writeValue(sw, errorMessage);
} catch (IOException e) {
// this should never happen
LOG.error("Internal server error. Could not map error response to JSON.", e);
return sendResponse(
channelHandlerContext,
keepAlive,
"Internal server error. Could not map error response to JSON.",
HttpResponseStatus.INTERNAL_SERVER_ERROR,
headers);
}
return sendResponse(channelHandlerContext, keepAlive, sw.toString(), statusCode, headers);
} | 3.68 |
flink_BlockStatementGrouper_groupBlock | // Group continuous block of statements together. If Statement is an IF/ELSE/WHILE,
// its body can be further grouped by recursive call to visitStatement method.
private void groupBlock(
StatementContext ctx, String context, TokenStreamRewriter rewriter) {
int localGroupCodeLength = 0;
List<LocalGroupElement> localGroup = new ArrayList<>();
for (BlockStatementContext bsc : ctx.block().blockStatement()) {
StatementContext statement = bsc.statement();
if (statement.IF() != null
|| statement.ELSE() != null
|| statement.WHILE() != null) {
String localContext = context + "_rewriteGroup" + this.counter++;
CommonTokenStream tokenStream =
new CommonTokenStream(
new JavaLexer(
CharStreams.fromString(
CodeSplitUtil.getContextString(statement))));
TokenStreamRewriter localRewriter = new TokenStreamRewriter(tokenStream);
JavaParser javaParser = new JavaParser(tokenStream);
javaParser.getInterpreter().setPredictionMode(PredictionMode.SLL);
visitStatement(javaParser.statement(), localContext, localRewriter);
localGroup.add(new RewriteContextGroupElement(statement, localRewriter));
// new method call length to the localGroupCodeLength. The "3" contains two
// brackets for parameters and semicolon at the end of method call
localGroupCodeLength += 3 + localContext.length() + parameters.length();
} else {
if (localGroupCodeLength + 1 + bsc.getText().length() <= maxMethodLength) {
localGroup.add(new ContextGroupElement(bsc));
localGroupCodeLength += bsc.getText().length();
} else {
if (addLocalGroup(localGroup, context, rewriter)) {
localGroup = new ArrayList<>();
localGroupCodeLength = 0;
}
localGroupCodeLength += bsc.getText().length();
localGroup.add(new ContextGroupElement(bsc));
}
}
}
// Groups that have only one statement that is "single line statement" such as
// "a[2] += b[2];" will not be extracted.
addLocalGroup(localGroup, context, rewriter);
} | 3.68 |
hbase_ReopenTableRegionsProcedure_setTimeoutFailure | /**
* At end of timeout, wake ourselves up so we run again.
*/
@Override
protected synchronized boolean setTimeoutFailure(MasterProcedureEnv env) {
setState(ProcedureProtos.ProcedureState.RUNNABLE);
env.getProcedureScheduler().addFront(this);
return false; // 'false' means that this procedure handled the timeout
} | 3.68 |
hbase_ZKNodeTracker_getData | /**
* Gets the data of the node.
* <p>
* If the node is currently available, the most up-to-date known version of the data is returned.
* If the node is not currently available, null is returned.
* @param refresh whether to refresh the data by calling ZK directly.
* @return data of the node, null if unavailable
*/
public synchronized byte[] getData(boolean refresh) {
if (refresh) {
try {
this.data = ZKUtil.getDataAndWatch(watcher, node);
} catch (KeeperException e) {
abortable.abort("Unexpected exception handling getData", e);
}
}
return this.data;
} | 3.68 |
flink_TypeMappingUtils_computePhysicalIndices | /**
* Computes indices of physical fields corresponding to the selected logical fields of a {@link
* TableSchema}.
*
* @param logicalColumns Logical columns that describe the physical type.
* @param physicalType Physical type to retrieve indices from.
* @param nameRemapping Additional remapping of a logical to a physical field name.
* TimestampExtractor works with logical names, but accesses physical fields
* @return Physical indices of logical fields selected with {@code projectedLogicalFields} mask.
*/
public static int[] computePhysicalIndices(
List<TableColumn> logicalColumns,
DataType physicalType,
Function<String, String> nameRemapping) {
Map<TableColumn, Integer> physicalIndexLookup =
computePhysicalIndices(logicalColumns.stream(), physicalType, nameRemapping);
return logicalColumns.stream().mapToInt(physicalIndexLookup::get).toArray();
} | 3.68 |
hbase_KeyValueHeap_next | /**
* Gets the next row of keys from the top-most scanner.
* <p>
* This method takes care of updating the heap.
* <p>
* This can ONLY be called when you are using Scanners that implement InternalScanner as well as
* KeyValueScanner (a {@link StoreScanner}).
* @return true if more rows exist after this one, false if scanner is done
*/
@Override
public boolean next(List<Cell> result, ScannerContext scannerContext) throws IOException {
if (this.current == null) {
return scannerContext.setScannerState(NextState.NO_MORE_VALUES).hasMoreValues();
}
InternalScanner currentAsInternal = (InternalScanner) this.current;
boolean moreCells = currentAsInternal.next(result, scannerContext);
Cell pee = this.current.peek();
/*
* By definition, any InternalScanner must return false only when it has no further rows to be
* fetched. So, we can close a scanner if it returns false. All existing implementations seem to
* be fine with this. It is much more efficient to close scanners which are not needed than keep
* them in the heap. This is also required for certain optimizations.
*/
if (pee == null || !moreCells) {
// add the scanner that is to be closed
this.scannersForDelayedClose.add(this.current);
} else {
this.heap.add(this.current);
}
this.current = null;
this.current = pollRealKV();
if (this.current == null) {
moreCells = scannerContext.setScannerState(NextState.NO_MORE_VALUES).hasMoreValues();
}
return moreCells;
} | 3.68 |
hudi_AbstractTableFileSystemView_listPartition | /**
* Return all the files from the partition.
*
* @param partitionPath The absolute path of the partition
* @throws IOException
*/
protected FileStatus[] listPartition(Path partitionPath) throws IOException {
try {
return metaClient.getFs().listStatus(partitionPath);
} catch (IOException e) {
// Create the path if it does not exist already
if (!metaClient.getFs().exists(partitionPath)) {
metaClient.getFs().mkdirs(partitionPath);
return new FileStatus[0];
} else {
// in case the partition path was created by another caller
return metaClient.getFs().listStatus(partitionPath);
}
}
} | 3.68 |
graphhopper_EdgeBasedTarjanSCC_findComponentsRecursive | /**
* Runs Tarjan's algorithm in a recursive way. Doing it like this requires a large stack size for large graphs,
* which can be set like `-Xss1024M`. Usually the version using an explicit stack ({@link #findComponents()}) should be
* preferred. However, this recursive implementation is easier to understand.
*
* @see #findComponents(Graph, EdgeTransitionFilter, boolean)
*/
public static ConnectedComponents findComponentsRecursive(Graph graph, EdgeTransitionFilter edgeTransitionFilter, boolean excludeSingleEdgeComponents) {
return new EdgeBasedTarjanSCC(graph, edgeTransitionFilter, excludeSingleEdgeComponents).findComponentsRecursive();
} | 3.68 |
AreaShop_RentRegion_getMoneyBackPercentage | /**
* Get the moneyBack percentage.
* @return The % of money the player will get back when unrenting
*/
public double getMoneyBackPercentage() {
return Utils.evaluateToDouble(getStringSetting("rent.moneyBack"), this);
} | 3.68 |
querydsl_DefaultQueryMetadata_noValidate | /**
* Disable validation
*
* @return the current object
*/
public DefaultQueryMetadata noValidate() {
validate = false;
return this;
} | 3.68 |
hbase_QuotaState_getGlobalLimiter | /**
* Return the limiter associated with this quota.
* @return the quota limiter
*/
public synchronized QuotaLimiter getGlobalLimiter() {
lastQuery = EnvironmentEdgeManager.currentTime();
return globalLimiter;
} | 3.68 |
framework_Calendar_setContainerDataSource | /**
* Sets a container as a data source for the events in the calendar.
* Equivalent for doing
* <code>Calendar.setEventProvider(new ContainerEventProvider(container))</code>
*
* Please note that the container must be sorted by date!
*
* @param container
* The container to use as a data source
* @param captionProperty
* The property that has the caption, null if no caption property
* is present
* @param descriptionProperty
* The property that has the description, null if no description
* property is present
* @param startDateProperty
* The property that has the starting date
* @param endDateProperty
* The property that has the ending date
* @param styleNameProperty
* The property that has the stylename, null if no stylname
* property is present
*/
public void setContainerDataSource(Container.Indexed container,
Object captionProperty, Object descriptionProperty,
Object startDateProperty, Object endDateProperty,
Object styleNameProperty) {
ContainerEventProvider provider = new ContainerEventProvider(container);
provider.setCaptionProperty(captionProperty);
provider.setDescriptionProperty(descriptionProperty);
provider.setStartDateProperty(startDateProperty);
provider.setEndDateProperty(endDateProperty);
provider.setStyleNameProperty(styleNameProperty);
provider.addEventSetChangeListener(
new CalendarEventProvider.EventSetChangeListener() {
@Override
public void eventSetChange(
EventSetChangeEvent changeEvent) {
// Repaint if events change
markAsDirty();
}
});
provider.addEventChangeListener(new EventChangeListener() {
@Override
public void eventChange(EventChangeEvent changeEvent) {
// Repaint if event changes
markAsDirty();
}
});
setEventProvider(provider);
} | 3.68 |
AreaShop_BuyRegion_disableReselling | /**
* Stop this region from being in resell mode.
*/
public void disableReselling() {
setSetting("buy.resellMode", null);
setSetting("buy.resellPrice", null);
} | 3.68 |
AreaShop_GeneralRegion_restrictedToRegion | /**
* Check if for renting this region you should be inside of it.
* @return true if you need to be inside, otherwise false
*/
public boolean restrictedToRegion() {
return getBooleanSetting("general.restrictedToRegion");
} | 3.68 |
hudi_InternalSchemaChangeApplier_applyRenameChange | /**
* Rename col name for hudi table.
*
* @param colName col name to be renamed. if we want to rename col from a nested filed, the fullName should be specify
* @param newName new name for current col. no need to specify fullName.
*/
public InternalSchema applyRenameChange(String colName, String newName) {
TableChanges.ColumnUpdateChange updateChange = TableChanges.ColumnUpdateChange.get(latestSchema);
updateChange.renameColumn(colName, newName);
return SchemaChangeUtils.applyTableChanges2Schema(latestSchema, updateChange);
} | 3.68 |
querydsl_AbstractPostgreSQLQuery_forShare | /**
* FOR SHARE causes the rows retrieved by the SELECT statement to be locked as though for update.
*
* @return the current object
*/
public C forShare() {
// global forShare support was added later, delegating to super implementation
return super.forShare();
} | 3.68 |
rocketmq-connect_AbstractConfigManagementService_processTaskConfigRecord | /**
* process task config record
*
* @param taskId
* @param schemaAndValue
*/
private void processTaskConfigRecord(ConnectorTaskId taskId, SchemaAndValue schemaAndValue) {
// No-op [Wait for implementation]
} | 3.68 |
morf_AbstractSqlDialectTest_testUpper | /**
* Tests that UPPER functionality works.
*/
@Test
public void testUpper() {
SelectStatement statement = new SelectStatement(upperCase(new FieldReference("field1"))).from(new TableReference(
"schedule"));
String actual = testDialect.convertStatementToSQL(statement);
assertEquals("UpperCase script should match expected", expectedUpper(), actual);
} | 3.68 |
hbase_RegionStates_getRegionsOfTableForReopen | /**
* Get the regions to be reopened when modifying a table.
* <p/>
* Notice that the {@code openSeqNum} in the returned HRegionLocation is also used to indicate the
* state of this region, positive means the region is in {@link State#OPEN}, -1 means
* {@link State#OPENING}. And for regions in other states we do not need reopen them.
*/
public List<HRegionLocation> getRegionsOfTableForReopen(TableName tableName) {
return getTableRegionStateNodes(tableName).stream().map(this::createRegionForReopen)
.filter(r -> r != null).collect(Collectors.toList());
} | 3.68 |
framework_VaadinService_createRequestHandlers | /**
* Called during initialization to add the request handlers for the service.
* Note that the returned list will be reversed so the last handler will be
* called first. This enables overriding this method and using add on the
* returned list to add a custom request handler which overrides any
* predefined handler.
*
* @return The list of request handlers used by this service.
* @throws ServiceException
* if a problem occurs when creating the request handlers
*/
protected List<RequestHandler> createRequestHandlers()
throws ServiceException {
List<RequestHandler> handlers = new ArrayList<>();
handlers.add(new SessionRequestHandler());
handlers.add(new PublishedFileHandler());
handlers.add(new HeartbeatHandler());
handlers.add(new FileUploadHandler());
handlers.add(new UidlRequestHandler());
handlers.add(new UnsupportedBrowserHandler());
handlers.add(new ConnectorResourceHandler());
return handlers;
} | 3.68 |
hadoop_DiskBalancerWorkItem_setTolerancePercent | /**
* Sets the tolerance percentage.
*
* @param tolerancePercent - tolerance.
*/
public void setTolerancePercent(long tolerancePercent) {
this.tolerancePercent = tolerancePercent;
} | 3.68 |
dubbo_ServiceConfigBase_export | /**
* export service and auto start application instance
*/
public final void export() {
export(RegisterTypeEnum.AUTO_REGISTER);
} | 3.68 |
framework_Validator_getErrorMessage | // Intentional change in compatibility package
@Override
public ErrorMessage getErrorMessage() {
UserError error = new UserError(getHtmlMessage(), ContentMode.HTML,
ErrorLevel.ERROR);
for (Validator.InvalidValueException nestedException : getCauses()) {
error.addCause(AbstractErrorMessage
.getErrorMessageForException(nestedException));
}
return error;
} | 3.68 |
morf_AbstractSqlDialectTest_testConcatenateWithOneField | /**
* Check that we get an illegal argument exception when we try to concatenate
* a single field.
*/
@Test
public void testConcatenateWithOneField() {
try {
new SelectStatement(new ConcatenatedField(new FieldReference("field1")).as("test")).from(new TableReference("schedule"));
fail("Should have thrown an exception on construction");
} catch (IllegalArgumentException e) {
// Should have thrown an exception on construction
}
} | 3.68 |
hbase_HMaster_isNormalizerOn | /**
* Queries the state of the {@link RegionNormalizerStateStore}. If it's not initialized, false is
* returned.
*/
public boolean isNormalizerOn() {
return !isInMaintenanceMode() && getRegionNormalizerManager().isNormalizerOn();
} | 3.68 |
framework_Form_setItemDataSource | /**
* Set the item datasource for the form, but limit the form contents to
* specified properties of the item.
*
* <p>
* Setting item datasource clears any fields, the form might contain and
* adds the specified the properties as fields to the form, in the specified
* order.
* </p>
*
* @see Item.Viewer#setItemDataSource(Item)
*/
public void setItemDataSource(Item newDataSource,
Collection<?> propertyIds) {
if (getLayout() instanceof GridLayout) {
GridLayout gl = (GridLayout) getLayout();
if (gridlayoutCursorX == -1) {
// first setItemDataSource, remember initial cursor
gridlayoutCursorX = gl.getCursorX();
gridlayoutCursorY = gl.getCursorY();
} else {
// restore initial cursor
gl.setCursorX(gridlayoutCursorX);
gl.setCursorY(gridlayoutCursorY);
}
}
// Removes all fields first from the form
removeAllProperties();
// Sets the datasource
itemDatasource = newDataSource;
// If the new datasource is null, just set null datasource
if (itemDatasource == null) {
markAsDirty();
return;
}
// Adds all the properties to this form
for (final Object id : propertyIds) {
final Property<?> property = itemDatasource.getItemProperty(id);
if (id != null && property != null) {
final Field<?> f = fieldFactory.createField(itemDatasource, id,
this);
if (f != null) {
bindPropertyToField(id, property, f);
addField(id, f);
}
}
}
} | 3.68 |
flink_OSSTestCredentials_getOSSSecretKey | /**
* Get OSS secret key.
*
* @return OSS secret key
*/
public static String getOSSSecretKey() {
if (SECRET_KEY != null) {
return SECRET_KEY;
} else {
throw new IllegalStateException("OSS secret key is not available");
}
} | 3.68 |
hadoop_TypedBytesOutput_writeLong | /**
* Writes a long as a typed bytes sequence.
*
* @param l the long to be written
* @throws IOException
*/
public void writeLong(long l) throws IOException {
out.write(Type.LONG.code);
out.writeLong(l);
} | 3.68 |
hudi_TableSchemaResolver_readSchemaFromLastCompaction | /**
* Read schema from a data file from the last compaction commit done.
*
* @deprecated please use {@link #getTableAvroSchema(HoodieInstant, boolean)} instead
*/
public MessageType readSchemaFromLastCompaction(Option<HoodieInstant> lastCompactionCommitOpt) throws Exception {
HoodieActiveTimeline activeTimeline = metaClient.getActiveTimeline();
HoodieInstant lastCompactionCommit = lastCompactionCommitOpt.orElseThrow(() -> new Exception(
"Could not read schema from last compaction, no compaction commits found on path " + metaClient));
// Read from the compacted file wrote
HoodieCommitMetadata compactionMetadata = HoodieCommitMetadata
.fromBytes(activeTimeline.getInstantDetails(lastCompactionCommit).get(), HoodieCommitMetadata.class);
String filePath = compactionMetadata.getFileIdAndFullPaths(metaClient.getBasePathV2()).values().stream().findAny()
.orElseThrow(() -> new IllegalArgumentException("Could not find any data file written for compaction "
+ lastCompactionCommit + ", could not get schema for table " + metaClient.getBasePath()));
return readSchemaFromBaseFile(filePath);
} | 3.68 |
hbase_HRegion_sawFailedSanityCheck | /**
* Records that a {@link FailedSanityCheckException} has been observed.
*/
void sawFailedSanityCheck() {
failedSanityCheck = true;
} | 3.68 |
querydsl_GeometryExpressions_pointOperation | /**
* Create a new Point operation expression
*
* @param op operator
* @param args arguments
* @return operation expression
*/
public static PointExpression<Point> pointOperation(Operator op, Expression<?>... args) {
return new PointOperation<Point>(Point.class, op, args);
} | 3.68 |
graphhopper_RouterConfig_setCalcPoints | /**
* This methods enables gps point calculation. If disabled only distance will be calculated.
*/
public void setCalcPoints(boolean calcPoints) {
this.calcPoints = calcPoints;
} | 3.68 |
framework_WindowsCssTest_createWindowWith | /**
*
* @param caption
* @param primaryStyleName
* - the style defined styleName
* @param styleName
* - the user defined styleName
* @return
*/
private void createWindowWith(String caption, String primaryStyleName,
String styleName) {
Window window = new Window();
VerticalLayout layout = new VerticalLayout();
layout.setMargin(true);
window.setContent(layout);
layout.addComponent(new Label("Some content"));
if (caption != null) {
window.setCaption(caption);
}
if (primaryStyleName != null) {
window.addStyleName(primaryStyleName);
}
if (styleName != null) {
window.addStyleName(styleName);
}
parent.getUI().addWindow(window);
} | 3.68 |
hbase_Reference_createTopReference | /** Returns A {@link Reference} that points at top half of a an hfile */
public static Reference createTopReference(final byte[] splitRow) {
return new Reference(splitRow, Range.top);
} | 3.68 |
hadoop_IOStatisticsSupport_stubDurationTrackerFactory | /**
* Return a stub duration tracker factory whose returned trackers
* are always no-ops.
*
* As singletons are returned, this is very low-cost to use.
* @return a duration tracker factory.
*/
public static DurationTrackerFactory stubDurationTrackerFactory() {
return StubDurationTrackerFactory.STUB_DURATION_TRACKER_FACTORY;
} | 3.68 |
hbase_ReplicationLoad_sourceToString | /**
* sourceToString
* @return a string contains sourceReplicationLoad information
*/
public String sourceToString() {
StringBuilder sb = new StringBuilder();
for (ClusterStatusProtos.ReplicationLoadSource rls : this.replicationLoadSourceEntries) {
sb = Strings.appendKeyValue(sb, "\n PeerID", rls.getPeerID());
sb = Strings.appendKeyValue(sb, "AgeOfLastShippedOp", rls.getAgeOfLastShippedOp());
sb = Strings.appendKeyValue(sb, "SizeOfLogQueue", rls.getSizeOfLogQueue());
sb = Strings.appendKeyValue(sb, "TimestampsOfLastShippedOp",
(new Date(rls.getTimeStampOfLastShippedOp()).toString()));
sb = Strings.appendKeyValue(sb, "Replication Lag", rls.getReplicationLag());
}
return sb.toString();
} | 3.68 |
pulsar_KeyStoreSSLContext_createServerSslContext | // the web server only use this method to get SSLContext, it won't use this to configure engine
// no need ciphers and protocols
public static SSLContext createServerSslContext(String sslProviderString,
String keyStoreTypeString,
String keyStorePath,
String keyStorePassword,
boolean allowInsecureConnection,
String trustStoreTypeString,
String trustStorePath,
String trustStorePassword,
boolean requireTrustedClientCertOnConnect)
throws GeneralSecurityException, IOException {
return createServerKeyStoreSslContext(
sslProviderString,
keyStoreTypeString,
keyStorePath,
keyStorePassword,
allowInsecureConnection,
trustStoreTypeString,
trustStorePath,
trustStorePassword,
requireTrustedClientCertOnConnect,
null,
null).getSslContext();
} | 3.68 |
hmily_HmilyTacParticipantCoordinator_beginParticipant | /**
* Begin hmily transaction.
*
* @param context the context
* @param point the point
* @return the hmily transaction
*/
public HmilyParticipant beginParticipant(final HmilyTransactionContext context, final ProceedingJoinPoint point) {
//创建全局的事务,创建一个参与者
final HmilyParticipant hmilyParticipant = buildHmilyParticipant(point, context.getParticipantId(), context.getParticipantRefId(), context.getTransId());
HmilyParticipantCacheManager.getInstance().cacheHmilyParticipant(hmilyParticipant);
HmilyRepositoryStorage.createHmilyParticipant(hmilyParticipant);
context.setRole(HmilyRoleEnum.PARTICIPANT.getCode());
HmilyContextHolder.set(context);
log.debug("TAC-participate-join ::: {}", hmilyParticipant);
return hmilyParticipant;
} | 3.68 |
hudi_HoodieMetadataTableValidator_readConfigFromFileSystem | /**
* Reads config from the file system.
*
* @param jsc {@link JavaSparkContext} instance.
* @param cfg {@link Config} instance.
* @return the {@link TypedProperties} instance.
*/
private TypedProperties readConfigFromFileSystem(JavaSparkContext jsc, Config cfg) {
return UtilHelpers.readConfig(jsc.hadoopConfiguration(), new Path(cfg.propsFilePath), cfg.configs)
.getProps(true);
} | 3.68 |
hbase_HFileBlock_finishBlockAndWriteHeaderAndData | /**
* Writes the header and the compressed data of this block (or uncompressed data when not using
* compression) into the given stream. Can be called in the "writing" state or in the "block
* ready" state. If called in the "writing" state, transitions the writer to the "block ready"
* state.
* @param out the output stream to write the
*/
protected void finishBlockAndWriteHeaderAndData(DataOutputStream out) throws IOException {
ensureBlockReady();
long startTime = EnvironmentEdgeManager.currentTime();
out.write(onDiskBlockBytesWithHeader.getBuffer(), 0, onDiskBlockBytesWithHeader.size());
out.write(onDiskChecksum);
HFile.updateWriteLatency(EnvironmentEdgeManager.currentTime() - startTime);
} | 3.68 |
flink_ClassLoadingUtils_withContextClassLoader | /**
* Wraps the given executor such that all submitted are runnables are run in a {@link
* TemporaryClassLoaderContext} based on the given classloader.
*
* @param executor executor to wrap
* @param contextClassLoader class loader that should be set as the context class loader
* @return wrapped executor
*/
public static Executor withContextClassLoader(
Executor executor, ClassLoader contextClassLoader) {
return new ContextClassLoaderSettingExecutor(executor, contextClassLoader);
} | 3.68 |
hbase_AsyncBufferedMutatorBuilder_setWriteBufferPeriodicFlush | /**
* Set the periodical flush interval. If the data in the buffer has not been flush for a long
* time, i.e, reach this timeout limit, we will flush it automatically.
* <p/>
* Notice that, set the timeout to 0 or a negative value means disable periodical flush, not
* 'flush immediately'. If you want to flush immediately then you should not use this class, as it
* is designed to be 'buffered'.
*/
default AsyncBufferedMutatorBuilder setWriteBufferPeriodicFlush(long timeout, TimeUnit unit) {
throw new UnsupportedOperationException("Not implemented");
} | 3.68 |
hadoop_StoreContext_getActiveAuditSpan | /**
* Return the active audit span.
* This is thread local -it MUST be passed into workers.
* To ensure the correct span is used, it SHOULD be
* collected as early as possible, ideally during construction/
* or service init/start.
* @return active audit span.
*/
@Override
public AuditSpan getActiveAuditSpan() {
return contextAccessors.getActiveAuditSpan();
} | 3.68 |
framework_GridElement_getHorizontalScroller | /**
* Get the horizontal scroll element.
*
* @return The element representing the horizontal scrollbar
*/
public TestBenchElement getHorizontalScroller() {
List<WebElement> rootElements = findElements(By.xpath("./div"));
return (TestBenchElement) rootElements.get(1);
} | 3.68 |
hadoop_FederationStateStoreFacade_deleteReservationHomeSubCluster | /**
* Delete the home {@link SubClusterId} for the specified
* {@link ReservationId}.
*
* @param reservationId the identifier of the reservation
* @throws YarnException if the call to the state store is unsuccessful
*/
public void deleteReservationHomeSubCluster(ReservationId reservationId) throws YarnException {
DeleteReservationHomeSubClusterRequest request =
DeleteReservationHomeSubClusterRequest.newInstance(reservationId);
stateStore.deleteReservationHomeSubCluster(request);
} | 3.68 |
zxing_ITFReader_validateQuietZone | /**
* The start & end patterns must be pre/post fixed by a quiet zone. This
* zone must be at least 10 times the width of a narrow line. Scan back until
* we either get to the start of the barcode or match the necessary number of
* quiet zone pixels.
*
* Note: Its assumed the row is reversed when using this method to find
* quiet zone after the end pattern.
*
* ref: http://www.barcode-1.net/i25code.html
*
* @param row bit array representing the scanned barcode.
* @param startPattern index into row of the start or end pattern.
* @throws NotFoundException if the quiet zone cannot be found
*/
private void validateQuietZone(BitArray row, int startPattern) throws NotFoundException {
int quietCount = this.narrowLineWidth * 10; // expect to find this many pixels of quiet zone
// if there are not so many pixel at all let's try as many as possible
quietCount = Math.min(quietCount, startPattern);
for (int i = startPattern - 1; quietCount > 0 && i >= 0; i--) {
if (row.get(i)) {
break;
}
quietCount--;
}
if (quietCount != 0) {
// Unable to find the necessary number of quiet zone pixels.
throw NotFoundException.getNotFoundInstance();
}
} | 3.68 |
hbase_StoreFileInfo_getModificationTime | /** Returns Get the modification time of the file. */
public long getModificationTime() throws IOException {
return getFileStatus().getModificationTime();
} | 3.68 |
hudi_AvroInternalSchemaConverter_buildTypeFromAvroSchema | /**
* Build hudi type from avro schema.
*
* @param schema a avro schema.
* @return a hudi type.
*/
public static Type buildTypeFromAvroSchema(Schema schema) {
// set flag to check this has not been visited.
Deque<String> visited = new LinkedList();
AtomicInteger nextId = new AtomicInteger(1);
return visitAvroSchemaToBuildType(schema, visited, true, nextId);
} | 3.68 |
flink_DataSink_setParallelism | /**
* Sets the parallelism for this data sink. The degree must be 1 or more.
*
* @param parallelism The parallelism for this data sink. A value equal to {@link
* ExecutionConfig#PARALLELISM_DEFAULT} will use the system default.
* @return This data sink with set parallelism.
*/
public DataSink<T> setParallelism(int parallelism) {
OperatorValidationUtils.validateParallelism(parallelism);
this.parallelism = parallelism;
return this;
} | 3.68 |
morf_SqlParameter_deepCopyInternal | /**
* @see org.alfasoftware.morf.sql.element.AliasedField#deepCopyInternal(DeepCopyTransformation)
*/
@Override
protected SqlParameter deepCopyInternal(DeepCopyTransformation transformer) {
return new SqlParameter(name, type, width, scale);
} | 3.68 |
flink_StreamProjection_projectTuple13 | /**
* Projects a {@link Tuple} {@link DataStream} to the previously selected fields.
*
* @return The projected DataStream.
* @see Tuple
* @see DataStream
*/
public <T0, T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12>
SingleOutputStreamOperator<
Tuple13<T0, T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12>>
projectTuple13() {
TypeInformation<?>[] fTypes = extractFieldTypes(fieldIndexes, dataStream.getType());
TupleTypeInfo<Tuple13<T0, T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12>> tType =
new TupleTypeInfo<Tuple13<T0, T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12>>(
fTypes);
return dataStream.transform(
"Projection",
tType,
new StreamProject<
IN, Tuple13<T0, T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12>>(
fieldIndexes, tType.createSerializer(dataStream.getExecutionConfig())));
} | 3.68 |
morf_AliasedField_greaterThan | /**
* @param value object to compare to (right hand side)
* @return a {@link Criterion} for a greater than expression of this field.
*/
public Criterion greaterThan(Object value) {
return Criterion.greaterThan(this, value);
} | 3.68 |
flink_HighAvailabilityServicesUtils_getWebMonitorAddress | /**
* Get address of web monitor from configuration.
*
* @param configuration Configuration contains those for WebMonitor.
* @param resolution Whether to try address resolution of the given hostname or not. This allows
* to fail fast in case that the hostname cannot be resolved.
* @return Address of WebMonitor.
*/
public static String getWebMonitorAddress(
Configuration configuration, AddressResolution resolution) throws UnknownHostException {
final String address =
checkNotNull(
configuration.getString(RestOptions.ADDRESS),
"%s must be set",
RestOptions.ADDRESS.key());
if (resolution == AddressResolution.TRY_ADDRESS_RESOLUTION) {
// Fail fast if the hostname cannot be resolved
//noinspection ResultOfMethodCallIgnored
InetAddress.getByName(address);
}
final int port = configuration.getInteger(RestOptions.PORT);
final boolean enableSSL = SecurityOptions.isRestSSLEnabled(configuration);
final String protocol = enableSSL ? "https://" : "http://";
return String.format("%s%s:%s", protocol, address, port);
} | 3.68 |
hbase_ScheduledChore_getMaximumAllowedTimeBetweenRuns | /** Returns max allowed time in millis between runs. */
private double getMaximumAllowedTimeBetweenRuns() {
// Threshold used to determine if the Chore's current run started too late
return 1.5 * timeUnit.toMillis(period);
} | 3.68 |
framework_VaadinSession_getAllSessions | /**
* Retrieves all {@link VaadinSession}s which are stored in the given HTTP
* session.
*
* @since 7.2
* @param httpSession
* the HTTP session
* @return the found VaadinSessions
*/
public static Collection<VaadinSession> getAllSessions(
HttpSession httpSession) {
Set<VaadinSession> sessions = new HashSet<>();
Enumeration<String> attributeNames = httpSession.getAttributeNames();
while (attributeNames.hasMoreElements()) {
String attributeName = attributeNames.nextElement();
if (attributeName.startsWith(VaadinSession.class.getName() + ".")) {
Object value = httpSession.getAttribute(attributeName);
if (value instanceof VaadinSession) {
sessions.add((VaadinSession) value);
}
}
}
return sessions;
} | 3.68 |
flink_UserDefinedFunctionHelper_getAccumulatorTypeOfAggregateFunction | /**
* Tries to infer the TypeInformation of an AggregateFunction's accumulator type.
*
* @param aggregateFunction The AggregateFunction for which the accumulator type is inferred.
* @param scalaType The implicitly inferred type of the accumulator type.
* @return The inferred accumulator type of the AggregateFunction.
*/
public static <T, ACC> TypeInformation<ACC> getAccumulatorTypeOfAggregateFunction(
ImperativeAggregateFunction<T, ACC> aggregateFunction, TypeInformation<ACC> scalaType) {
TypeInformation<ACC> userProvidedType = aggregateFunction.getAccumulatorType();
if (userProvidedType != null) {
return userProvidedType;
} else if (scalaType != null) {
return scalaType;
} else {
return TypeExtractor.createTypeInfo(
aggregateFunction,
ImperativeAggregateFunction.class,
aggregateFunction.getClass(),
1);
}
} | 3.68 |
morf_Oracle_matchesProduct | /**
* @see org.alfasoftware.morf.jdbc.DatabaseType#matchesProduct(java.lang.String)
*/
@Override
public boolean matchesProduct(String product) {
return product.equalsIgnoreCase("Oracle");
} | 3.68 |
framework_BasicEvent_setDescription | /*
* (non-Javadoc)
*
* @see
* com.vaadin.addon.calendar.event.CalendarEventEditor#setDescription(java
* .lang.String)
*/
@Override
public void setDescription(String description) {
this.description = description;
fireEventChange();
} | 3.68 |
hbase_ChecksumUtil_generateExceptionForChecksumFailureForTest | /**
* Mechanism to throw an exception in case of hbase checksum failure. This is used by unit tests
* only.
* @param value Setting this to true will cause hbase checksum verification failures to generate
* exceptions.
*/
public static void generateExceptionForChecksumFailureForTest(boolean value) {
generateExceptions = value;
} | 3.68 |
hbase_ConnectionUtils_createClosestRowAfter | /**
* Create the closest row after the specified row
*/
static byte[] createClosestRowAfter(byte[] row) {
return Arrays.copyOf(row, row.length + 1);
} | 3.68 |
framework_PushRequestHandler_getPreInitializedAtmosphere | /**
* Returns an AtmosphereFramework instance which was initialized in the
* servlet context init phase by {@link JSR356WebsocketInitializer}, if such
* exists
*/
private AtmosphereFramework getPreInitializedAtmosphere(
ServletConfig vaadinServletConfig) {
String attributeName = JSR356WebsocketInitializer
.getAttributeName(vaadinServletConfig.getServletName());
Object framework = vaadinServletConfig.getServletContext()
.getAttribute(attributeName);
if (framework instanceof AtmosphereFramework) {
return (AtmosphereFramework) framework;
}
return null;
} | 3.68 |
morf_AbstractSqlDialectTest_testAlterStringColumn | /**
* Test altering a string column.
*/
@Test
public void testAlterStringColumn() {
testAlterTableColumn(TEST_TABLE, AlterationType.ALTER, getColumn(TEST_TABLE, STRING_FIELD), column(STRING_FIELD, DataType.STRING, 6).nullable(), expectedAlterTableAlterStringColumnStatement());
} | 3.68 |
hudi_FlinkInMemoryStateIndex_isGlobal | /**
* Only looks up by recordKey.
*/
@Override
public boolean isGlobal() {
return true;
} | 3.68 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.