name
stringlengths 12
178
| code_snippet
stringlengths 8
36.5k
| score
float64 3.26
3.68
|
---|---|---|
framework_DesignAttributeHandler_writeAlignment | /**
* Writes the alignment to the given child element attributes.
*
* @since 7.6.4
* @param childElement
* the child element
* @param alignment
* the component alignment
*/
public static void writeAlignment(Element childElement,
Alignment alignment) {
if (alignment.isMiddle()) {
childElement.attr(":middle", true);
} else if (alignment.isBottom()) {
childElement.attr(":bottom", true);
}
if (alignment.isCenter()) {
childElement.attr(":center", true);
} else if (alignment.isRight()) {
childElement.attr(":right", true);
}
} | 3.68 |
framework_AbstractComponentContainer_addComponentDetachListener | /* documented in interface */
@Override
public Registration addComponentDetachListener(
ComponentDetachListener listener) {
return addListener(ComponentDetachEvent.class, listener,
ComponentDetachListener.detachMethod);
} | 3.68 |
hadoop_HdfsFileStatus_length | /**
* Set the length of the entity (default = 0).
* @param length Entity length
* @return This Builder instance
*/
public Builder length(long length) {
this.length = length;
return this;
} | 3.68 |
hbase_ByteBufferUtils_toStringBinary | // For testing purpose
public static String toStringBinary(final ByteBuffer b, int off, int len) {
StringBuilder result = new StringBuilder();
// Just in case we are passed a 'len' that is > buffer length...
if (off >= b.capacity()) {
return result.toString();
}
if (off + len > b.capacity()) {
len = b.capacity() - off;
}
for (int i = off; i < off + len; ++i) {
int ch = b.get(i) & 0xFF;
if (
(ch >= '0' && ch <= '9') || (ch >= 'A' && ch <= 'Z') || (ch >= 'a' && ch <= 'z')
|| " `~!@#$%^&*()-_=+[]{}|;:'\",.<>/?".indexOf(ch) >= 0
) {
result.append((char) ch);
} else {
result.append(String.format("\\x%02X", ch));
}
}
return result.toString();
} | 3.68 |
framework_AbstractSelect_fireItemSetChange | /**
* Fires the item set change event.
*/
protected void fireItemSetChange() {
if (itemSetEventListeners != null && !itemSetEventListeners.isEmpty()) {
final Container.ItemSetChangeEvent event = new ItemSetChangeEvent(
this);
for (Object l : itemSetEventListeners.toArray()) {
((Container.ItemSetChangeListener) l)
.containerItemSetChange(event);
}
}
markAsDirty();
} | 3.68 |
framework_BeanValidator_getJavaxBeanValidator | /**
* Returns a shared JSR-303 validator instance to use.
*
* @return the validator to use
*/
public javax.validation.Validator getJavaxBeanValidator() {
return getJavaxBeanValidatorFactory().getValidator();
} | 3.68 |
flink_JsonRowDeserializationSchema_setFailOnMissingField | /** @deprecated Use the provided {@link Builder} instead. */
@Deprecated
public void setFailOnMissingField(boolean failOnMissingField) {
// TODO make this class immutable once we drop this method
this.failOnMissingField = failOnMissingField;
this.runtimeConverter = createConverter(this.typeInfo);
} | 3.68 |
hudi_TypeUtils_getValueToEnumMap | /**
* Maps values from the provided Enum's {@link Class} into corresponding values,
* extracted by provided {@code valueMapper}
*/
public static <EnumT extends Enum<EnumT>> Map<String, EnumT> getValueToEnumMap(
@Nonnull Class<EnumT> klass,
@Nonnull Function<EnumT, String> valueMapper
) {
return Arrays.stream(klass.getEnumConstants())
.collect(Collectors.toMap(valueMapper, Function.identity()));
} | 3.68 |
pulsar_PulsarProtobufNativeRowDecoder_decodeRow | /**
* Decode ByteBuf by {@link org.apache.pulsar.client.api.schema.GenericSchema}.
* @param byteBuf
* @return
*/
@Override
public Optional<Map<DecoderColumnHandle, FieldValueProvider>> decodeRow(ByteBuf byteBuf) {
DynamicMessage dynamicMessage;
try {
GenericProtobufNativeRecord record = (GenericProtobufNativeRecord) genericProtobufNativeSchema
.decode(byteBuf);
dynamicMessage = record.getProtobufRecord();
} catch (Exception e) {
log.error(e);
throw new TrinoException(GENERIC_INTERNAL_ERROR, "Decoding protobuf record failed.", e);
}
return Optional.of(columnDecoders.entrySet().stream()
.collect(toImmutableMap(
Map.Entry::getKey,
entry -> entry.getValue().decodeField(dynamicMessage))));
} | 3.68 |
cron-utils_FieldDayOfWeekDefinitionBuilder_withIntMapping | /**
* Defines mapping between integer values with equivalent meaning.
*
* @param source - higher value
* @param dest - lower value with equivalent meaning to source
* @return this FieldDayOfWeekDefinitionBuilder instance
*/
@Override
public FieldDayOfWeekDefinitionBuilder withIntMapping(final int source, final int dest) {
super.withIntMapping(source, dest);
return this;
} | 3.68 |
hbase_MergeTableRegionsProcedure_postMergeRegionsCommit | /**
* Post merge region action
* @param env MasterProcedureEnv
**/
private void postMergeRegionsCommit(final MasterProcedureEnv env) throws IOException {
final MasterCoprocessorHost cpHost = env.getMasterCoprocessorHost();
if (cpHost != null) {
cpHost.postMergeRegionsCommit(regionsToMerge, mergedRegion, getUser());
}
} | 3.68 |
hmily_HmilyXaResource_rollback | /**
* Rollback.
*
* @throws XAException the xa exception
*/
public void rollback() throws XAException {
this.rollback(this.xid);
} | 3.68 |
AreaShop_FastAsyncWorldEditWorldGuardHandler_buildDomain | /**
* Build a DefaultDomain from a RegionAccessSet.
* @param regionAccessSet RegionAccessSet to read
* @return DefaultDomain containing the entities from the RegionAccessSet
*/
private DefaultDomain buildDomain(RegionAccessSet regionAccessSet) {
DefaultDomain owners = new DefaultDomain();
for(String playerName : regionAccessSet.getPlayerNames()) {
owners.addPlayer(playerName);
}
for(UUID uuid : regionAccessSet.getPlayerUniqueIds()) {
owners.addPlayer(uuid);
}
for(String group : regionAccessSet.getGroupNames()) {
owners.addGroup(group);
}
return owners;
} | 3.68 |
flink_GivenJavaClasses_noJavaClassesThat | /** Equivalent of {@link ArchRuleDefinition#noClasses()}, but only for Java classes. */
public static GivenClassesConjunction noJavaClassesThat(
DescribedPredicate<JavaClass> predicate) {
return noClasses().that(areJavaClasses()).and(predicate);
} | 3.68 |
morf_ViewChanges_isEmpty | /**
* @return true if both sets are empty, false otherwise.
*/
public boolean isEmpty() {
return dropSet.isEmpty() && deploySet.isEmpty();
} | 3.68 |
hadoop_WritableFactories_newInstance | /**
* Create a new instance of a class with a defined factory.
* @param c input c.
* @return a new instance of a class with a defined factory.
*/
public static Writable newInstance(Class<? extends Writable> c) {
return newInstance(c, null);
} | 3.68 |
flink_Description_text | /**
* Creates a simple block of text.
*
* @param text a simple block of text
* @return block of text
*/
public DescriptionBuilder text(String text) {
blocks.add(TextElement.text(text));
return this;
} | 3.68 |
flink_CsvBulkWriter_forSchema | /**
* Builds a writer with Jackson schema and a type converter.
*
* @param mapper The specialized mapper for producing CSV.
* @param schema The schema that defined the mapping properties.
* @param converter The type converter that converts incoming elements of type {@code <T>} into
* elements of type JsonNode.
* @param stream The output stream.
* @param <T> The type of the elements accepted by this writer.
* @param <C> The type of the converter context.
* @param <R> The type of the elements produced by this writer.
*/
static <T, R, C> CsvBulkWriter<T, R, C> forSchema(
CsvMapper mapper,
CsvSchema schema,
Converter<T, R, C> converter,
@Nullable C converterContext,
FSDataOutputStream stream) {
return new CsvBulkWriter<>(mapper, schema, converter, converterContext, stream);
} | 3.68 |
streampipes_SpTrajectoryBuilder_removeOldestPoint | /**
* removes the oldest point (Index 0) from the CoordinateList object.
*/
private void removeOldestPoint() {
coordinateList.remove(0);
} | 3.68 |
dubbo_ReflectUtils_findHierarchicalTypes | /**
* Find the hierarchical types from the source {@link Class class} by specified {@link Class type}.
*
* @param sourceClass the source {@link Class class}
* @param matchType the type to match
* @param <T> the type to match
* @return non-null read-only {@link Set}
* @since 2.7.5
*/
public static <T> Set<Class<T>> findHierarchicalTypes(Class<?> sourceClass, Class<T> matchType) {
if (sourceClass == null) {
return Collections.emptySet();
}
Set<Class<T>> hierarchicalTypes = new LinkedHashSet<>();
if (matchType.isAssignableFrom(sourceClass)) {
hierarchicalTypes.add((Class<T>) sourceClass);
}
// Find all super classes
hierarchicalTypes.addAll(findHierarchicalTypes(sourceClass.getSuperclass(), matchType));
return unmodifiableSet(hierarchicalTypes);
} | 3.68 |
hbase_ServerName_getHostNameMinusDomain | /**
* @param hostname the hostname string to get the actual hostname from
* @return hostname minus the domain, if there is one (will do pass-through on ip addresses)
*/
private static String getHostNameMinusDomain(final String hostname) {
if (InetAddresses.isInetAddress(hostname)) {
return hostname;
}
List<String> parts = Splitter.on('.').splitToList(hostname);
if (parts.size() == 0) {
return hostname;
}
Iterator<String> i = parts.iterator();
return i.next();
} | 3.68 |
hbase_BlockingRpcCallback_run | /**
* Called on completion of the RPC call with the response object, or {@code null} in the case of
* an error.
* @param parameter the response object or {@code null} if an error occurred
*/
@Override
public void run(R parameter) {
synchronized (this) {
result = parameter;
resultSet = true;
this.notifyAll();
}
} | 3.68 |
hbase_ExceptionUtil_isInterrupt | /** Returns true if the throwable comes an interruption, false otherwise. */
public static boolean isInterrupt(Throwable t) {
if (t instanceof InterruptedException) {
return true;
}
if (t instanceof SocketTimeoutException) {
return false;
}
return (t instanceof InterruptedIOException || t instanceof ClosedByInterruptException);
} | 3.68 |
hbase_StoreFileInfo_isMobRefFile | /**
* Checks if the file is a MOB reference file, created by snapshot
* @param path path to a file
* @return true, if - yes, false otherwise
*/
public static boolean isMobRefFile(final Path path) {
String fileName = path.getName();
int lastIndex = fileName.lastIndexOf(MobUtils.SEP);
if (lastIndex < 0) {
return false;
}
String[] parts = new String[2];
parts[0] = fileName.substring(0, lastIndex);
parts[1] = fileName.substring(lastIndex + 1);
String name = parts[0] + "." + parts[1];
Matcher m = REF_NAME_PATTERN.matcher(name);
return m.matches() && m.groupCount() > 1;
} | 3.68 |
flink_TaskExecutorMemoryConfiguration_getJvmMetaspace | /** Returns the maximum Metaspace size allowed for the task manager. */
public Long getJvmMetaspace() {
return jvmMetaspace;
} | 3.68 |
hbase_PrivateCellUtil_getValueAsDouble | /**
* Converts the value bytes of the given cell into a double value
* @return value as double
*/
public static double getValueAsDouble(Cell cell) {
if (cell instanceof ByteBufferExtendedCell) {
return ByteBufferUtils.toDouble(((ByteBufferExtendedCell) cell).getValueByteBuffer(),
((ByteBufferExtendedCell) cell).getValuePosition());
}
return Bytes.toDouble(cell.getValueArray(), cell.getValueOffset());
} | 3.68 |
hudi_AppendWriteFunction_endInput | /**
* End input action for batch source.
*/
public void endInput() {
super.endInput();
flushData(true);
this.writeStatuses.clear();
} | 3.68 |
hbase_ParseFilter_parseFilterString | /**
* Parses the filterString and constructs a filter using it
* <p>
* @param filterStringAsByteArray filter string given by the user
* @return filter object we constructed
*/
public Filter parseFilterString(byte[] filterStringAsByteArray) throws CharacterCodingException {
// stack for the operators and parenthesis
Stack<ByteBuffer> operatorStack = new Stack<>();
// stack for the filter objects
Stack<Filter> filterStack = new Stack<>();
Filter filter = null;
for (int i = 0; i < filterStringAsByteArray.length; i++) {
if (filterStringAsByteArray[i] == ParseConstants.LPAREN) {
// LPAREN found
operatorStack.push(ParseConstants.LPAREN_BUFFER);
} else if (
filterStringAsByteArray[i] == ParseConstants.WHITESPACE
|| filterStringAsByteArray[i] == ParseConstants.TAB
) {
// WHITESPACE or TAB found
continue;
} else if (checkForOr(filterStringAsByteArray, i)) {
// OR found
i += ParseConstants.OR_ARRAY.length - 1;
reduce(operatorStack, filterStack, ParseConstants.OR_BUFFER);
operatorStack.push(ParseConstants.OR_BUFFER);
} else if (checkForAnd(filterStringAsByteArray, i)) {
// AND found
i += ParseConstants.AND_ARRAY.length - 1;
reduce(operatorStack, filterStack, ParseConstants.AND_BUFFER);
operatorStack.push(ParseConstants.AND_BUFFER);
} else if (checkForSkip(filterStringAsByteArray, i)) {
// SKIP found
i += ParseConstants.SKIP_ARRAY.length - 1;
reduce(operatorStack, filterStack, ParseConstants.SKIP_BUFFER);
operatorStack.push(ParseConstants.SKIP_BUFFER);
} else if (checkForWhile(filterStringAsByteArray, i)) {
// WHILE found
i += ParseConstants.WHILE_ARRAY.length - 1;
reduce(operatorStack, filterStack, ParseConstants.WHILE_BUFFER);
operatorStack.push(ParseConstants.WHILE_BUFFER);
} else if (filterStringAsByteArray[i] == ParseConstants.RPAREN) {
// RPAREN found
if (operatorStack.empty()) {
throw new IllegalArgumentException("Mismatched parenthesis");
}
ByteBuffer argumentOnTopOfStack = operatorStack.peek();
if (argumentOnTopOfStack.equals(ParseConstants.LPAREN_BUFFER)) {
operatorStack.pop();
continue;
}
while (!argumentOnTopOfStack.equals(ParseConstants.LPAREN_BUFFER)) {
filterStack.push(popArguments(operatorStack, filterStack));
if (operatorStack.empty()) {
throw new IllegalArgumentException("Mismatched parenthesis");
}
argumentOnTopOfStack = operatorStack.pop();
}
} else {
// SimpleFilterExpression found
byte[] filterSimpleExpression = extractFilterSimpleExpression(filterStringAsByteArray, i);
i += (filterSimpleExpression.length - 1);
filter = parseSimpleFilterExpression(filterSimpleExpression);
filterStack.push(filter);
}
}
// Finished parsing filterString
while (!operatorStack.empty()) {
filterStack.push(popArguments(operatorStack, filterStack));
}
if (filterStack.empty()) {
throw new IllegalArgumentException("Incorrect Filter String");
}
filter = filterStack.pop();
if (!filterStack.empty()) {
throw new IllegalArgumentException("Incorrect Filter String");
}
return filter;
} | 3.68 |
graphhopper_PointList_makeImmutable | /**
* Once immutable, there is no way to make this object mutable again. This is done to ensure the consistency of
* shallow copies. If you need to modify this object again, you have to create a deep copy of it.
*/
public PointList makeImmutable() {
this.isImmutable = true;
return this;
} | 3.68 |
dubbo_AbstractReferenceConfig_setGeneric | /**
* @deprecated Replace to {@link AbstractReferenceConfig#setGeneric(String)}
*/
@Deprecated
public void setGeneric(Boolean generic) {
if (generic != null) {
this.generic = generic.toString();
}
} | 3.68 |
framework_AbstractOrderedLayoutConnector_onConnectorHierarchyChange | /*
* (non-Javadoc)
*
* @see com.vaadin.client.ui.AbstractComponentContainerConnector#
* onConnectorHierarchyChange
* (com.vaadin.client.ConnectorHierarchyChangeEvent)
*/
@Override
public void onConnectorHierarchyChange(
ConnectorHierarchyChangeEvent event) {
Profiler.enter("AOLC.onConnectorHierarchyChange");
List<ComponentConnector> previousChildren = event.getOldChildren();
int currentIndex = 0;
VAbstractOrderedLayout layout = getWidget();
// remove spacing as it is exists as separate elements that cannot be
// removed easily after reordering the contents
Profiler.enter(
"AOLC.onConnectorHierarchyChange temporarily remove spacing");
layout.setSpacing(false);
Profiler.leave(
"AOLC.onConnectorHierarchyChange temporarily remove spacing");
// first remove extra components to avoid extra detaches and attaches
for (ComponentConnector child : previousChildren) {
Profiler.enter("AOLC.onConnectorHierarchyChange remove children");
if (child.getParent() != this) {
Slot slot = layout.getSlot(child.getWidget());
slot.setWidgetResizeListener(null);
if (slot.hasCaption()) {
slot.setCaptionResizeListener(null);
}
slot.setSpacingResizeListener(null);
child.removeStateChangeHandler(childStateChangeHandler);
layout.removeWidget(child.getWidget());
}
Profiler.leave("AOLC.onConnectorHierarchyChange remove children");
}
Profiler.leave("AOLC.onConnectorHierarchyChange");
// reorder remaining components and add any new components
for (ComponentConnector child : getChildComponents()) {
Profiler.enter("AOLC.onConnectorHierarchyChange add children");
Slot slot = layout.getSlot(child.getWidget());
if (slot.getParent() != layout) {
Profiler.enter(
"AOLC.onConnectorHierarchyChange add state change handler");
child.addStateChangeHandler(childStateChangeHandler);
Profiler.leave(
"AOLC.onConnectorHierarchyChange add state change handler");
}
Profiler.enter("AOLC.onConnectorHierarchyChange addOrMoveSlot");
layout.addOrMoveSlot(slot, currentIndex++, false);
Profiler.leave("AOLC.onConnectorHierarchyChange addOrMoveSlot");
Profiler.leave("AOLC.onConnectorHierarchyChange add children");
}
// re-add spacing for the elements that should have it
Profiler.enter("AOLC.onConnectorHierarchyChange setSpacing");
// spacings were removed above
if (getState().spacing) {
layout.setSpacing(true);
}
Profiler.leave("AOLC.onConnectorHierarchyChange setSpacing");
updateInternalState();
} | 3.68 |
framework_ContainerHierarchicalWrapper_addPropertySetChangeListener | /*
* Registers a new Property set change listener for this Container. Don't
* add a JavaDoc comment here, we use the default documentation from
* implemented interface.
*/
@Override
public void addPropertySetChangeListener(
Container.PropertySetChangeListener listener) {
if (container instanceof Container.PropertySetChangeNotifier) {
((Container.PropertySetChangeNotifier) container)
.addPropertySetChangeListener(
new PiggybackListener(listener));
}
} | 3.68 |
framework_Page_getBrowserWindowHeight | /**
* Gets the last known height of the browser window in which this UI
* resides.
*
* @return the browser window height in pixels
*/
public int getBrowserWindowHeight() {
return browserWindowHeight;
} | 3.68 |
hudi_HoodieColumnProjectionUtils_supportTimestamp | /**
* If schema contains timestamp columns, this method is used for compatibility when there is no timestamp fields.
*
* <p>We expect to use parquet-avro reader {@link org.apache.hudi.hadoop.avro.HoodieAvroParquetReader} to read
* timestamp column when read columns contain timestamp type.
*/
public static boolean supportTimestamp(Configuration conf) {
List<String> readCols = Arrays.asList(getReadColumnNames(conf));
if (readCols.isEmpty()) {
return false;
}
String colTypes = conf.get(IOConstants.COLUMNS_TYPES, "");
if (colTypes == null || colTypes.isEmpty()) {
return false;
}
ArrayList<TypeInfo> types = TypeInfoUtils.getTypeInfosFromTypeString(colTypes);
List<String> names = getIOColumns(conf);
return IntStream.range(0, names.size()).filter(i -> readCols.contains(names.get(i)))
.anyMatch(i -> typeContainsTimestamp(types.get(i)));
} | 3.68 |
hmily_OriginTrackedPropertiesLoader_load | /**
* Load map.
*
* @param expandLists the expand lists
* @return the map
* @throws IOException the io exception
*/
public Map<String, Object> load(final boolean expandLists) throws IOException {
try (CharacterReader reader = new CharacterReader(this.resource)) {
Map<String, Object> result = new LinkedHashMap<>();
StringBuilder buffer = new StringBuilder();
while (reader.read()) {
String key = loadKey(buffer, reader).trim();
if (expandLists && key.endsWith("[]")) {
key = key.substring(0, key.length() - 2);
int index = 0;
do {
Object value = loadValue(buffer, reader, true);
put(result, key + "[" + (index++) + "]", value);
if (!reader.isEndOfLine()) {
reader.read();
}
}
while (!reader.isEndOfLine());
} else {
Object value = loadValue(buffer, reader, false);
put(result, key, value);
}
}
return result;
}
} | 3.68 |
hadoop_NameValuePair_getValue | /**
* Get the value.
* @return The value.
*/
public Object getValue() {
return value;
} | 3.68 |
framework_DateField_changeVariables | /*
* Invoked when a variable of the component changes. Don't add a JavaDoc
* comment here, we use the default documentation from implemented
* interface.
*/
@Override
public void changeVariables(Object source, Map<String, Object> variables) {
if (!isReadOnly() && (variables.containsKey("year")
|| variables.containsKey("month")
|| variables.containsKey("day") || variables.containsKey("hour")
|| variables.containsKey("min") || variables.containsKey("sec")
|| variables.containsKey("msec")
|| variables.containsKey("dateString"))) {
// Old and new dates
final Date oldDate = getValue();
Date newDate = null;
// this enables analyzing invalid input on the server
final String newDateString = (String) variables.get("dateString");
dateString = newDateString;
// Gets the new date in parts
boolean hasChanges = false;
Map<Resolution, Integer> calendarFieldChanges = new HashMap<Resolution, Integer>();
for (Resolution r : Resolution
.getResolutionsHigherOrEqualTo(resolution)) {
// Only handle what the client is allowed to send. The same
// resolutions that are painted
String variableName = variableNameForResolution.get(r);
if (variables.containsKey(variableName)) {
Integer value = (Integer) variables.get(variableName);
if (r == Resolution.MONTH) {
// Calendar MONTH is zero based
value--;
}
if (value >= 0) {
hasChanges = true;
calendarFieldChanges.put(r, value);
}
}
}
// If no new variable values were received, use the previous value
if (!hasChanges) {
newDate = null;
} else {
// Clone the calendar for date operation
final Calendar cal = getCalendar();
// Update the value based on the received info
// Must set in this order to avoid invalid dates (or wrong
// dates if lenient is true) in calendar
for (int r = Resolution.YEAR.ordinal(); r >= 0; r--) {
Resolution res = Resolution.values()[r];
if (calendarFieldChanges.containsKey(res)) {
// Field resolution should be included. Others are
// skipped so that client can not make unexpected
// changes (e.g. day change even though resolution is
// year).
Integer newValue = calendarFieldChanges.get(res);
cal.set(res.getCalendarField(), newValue);
}
}
newDate = cal.getTime();
}
if (newDate == null && dateString != null
&& !"".equals(dateString)) {
try {
Date parsedDate = handleUnparsableDateString(dateString);
setValue(parsedDate, true);
/*
* Ensure the value is sent to the client if the value is
* set to the same as the previous (#4304). Does not repaint
* if handleUnparsableDateString throws an exception. In
* this case the invalid text remains in the DateField.
*/
markAsDirty();
} catch (Converter.ConversionException e) {
/*
* Datefield now contains some text that could't be parsed
* into date. ValueChangeEvent is fired after the value is
* changed and the flags are set
*/
if (oldDate != null) {
/*
* Set the logic value to null without firing the
* ValueChangeEvent
*/
preventValueChangeEvent = true;
try {
setValue(null);
} finally {
preventValueChangeEvent = false;
}
/*
* Reset the dateString (overridden to null by setValue)
*/
dateString = newDateString;
}
/*
* Saves the localized message of parse error. This can be
* overridden in handleUnparsableDateString. The message
* will later be used to show a validation error.
*/
currentParseErrorMessage = e.getLocalizedMessage();
/*
* The value of the DateField should be null if an invalid
* value has been given. Not using setValue() since we do
* not want to cause the client side value to change.
*/
uiHasValidDateString = false;
/*
* If value was changed fire the ValueChangeEvent
*/
if (oldDate != null) {
fireValueChange(false);
}
/*
* Because of our custom implementation of isValid(), that
* also checks the parsingSucceeded flag, we must also
* notify the form (if this is used in one) that the
* validity of this field has changed.
*
* Normally fields validity doesn't change without value
* change and form depends on this implementation detail.
*/
notifyFormOfValidityChange();
markAsDirty();
}
} else if (newDate != oldDate
&& (newDate == null || !newDate.equals(oldDate))) {
// Don't require a repaint, client updates itself
setValue(newDate, true);
} else if (!uiHasValidDateString) { // oldDate ==
// newDate == null
// Empty value set, previously contained unparsable date string,
// clear related internal fields
setValue(null);
}
}
if (variables.containsKey(FocusEvent.EVENT_ID)) {
fireEvent(new FocusEvent(this));
}
if (variables.containsKey(BlurEvent.EVENT_ID)) {
fireEvent(new BlurEvent(this));
}
} | 3.68 |
hibernate-validator_BaseHibernateValidatorConfiguration_locales | /**
* Allows setting the list of the locales supported by this ValidatorFactory.
* <p>
* Can be used for advanced locale resolution and/or to force the initialization of the resource bundles at
* bootstrap.
* <p>
* If not set, defaults to a singleton containing {@link Locale#getDefault()}.
*
* @since 6.1.1
*/
@Incubating
default S locales(Locale... locales) {
return locales( new HashSet<>( Arrays.asList( locales ) ) );
} | 3.68 |
framework_ConnectorFocusAndBlurHandler_addHandlers | /**
* Add focus/blur handlers to the widget and a state change handler for the
* {@code connector}.
*
* @param connector
* connector to register state change handler
* @param widget
* widget to register focus/blur handler
* @return ConnectorFocusAndBlurHandler instance to remove all registered
* handlers
*/
public static ConnectorFocusAndBlurHandler addHandlers(
AbstractComponentConnector connector, Widget widget) {
ConnectorFocusAndBlurHandler handler = new ConnectorFocusAndBlurHandler(
connector, widget);
handler.stateChangeRegistration = connector
.addStateChangeHandler("registeredEventListeners", handler);
return handler;
} | 3.68 |
framework_Slot_setRelativeHeight | /**
* Set if the slot has a relative height.
*
* @param relativeHeight
* True if the slot uses a relative height, false if the slot has
* a static height
*/
public void setRelativeHeight(boolean relativeHeight) {
this.relativeHeight = relativeHeight;
updateRelativeSize(relativeHeight, "height");
} | 3.68 |
morf_Function_substring | /**
* Helper method to create an instance of the "substring" SQL function.
*
* @param expression the expression to evaluate
* @param start the start point in the substring
* @param length the length of the substring
* @return an instance of the substring function
*/
public static Function substring(AliasedField expression, AliasedField start, AliasedField length) {
return new Function(FunctionType.SUBSTRING, expression, start, length);
} | 3.68 |
shardingsphere-elasticjob_JobRegistry_getCurrentShardingTotalCount | /**
* Get sharding total count which running on current job server.
*
* @param jobName job name
* @return sharding total count which running on current job server
*/
public int getCurrentShardingTotalCount(final String jobName) {
return currentShardingTotalCountMap.getOrDefault(jobName, 0);
} | 3.68 |
hbase_HBaseCluster_restoreInitialStatus | /**
* Restores the cluster to it's initial state if this is a real cluster, otherwise does nothing.
* This is a best effort restore. If the servers are not reachable, or insufficient permissions,
* etc. restoration might be partial.
* @return whether restoration is complete
*/
public boolean restoreInitialStatus() throws IOException {
return restoreClusterMetrics(getInitialClusterMetrics());
} | 3.68 |
flink_TimeWindowUtil_getShiftTimeZone | /**
* Get the shifted timezone of window if the time attribute type is TIMESTAMP_LTZ, always
* returns UTC timezone if the time attribute type is TIMESTAMP which means do not shift.
*/
public static ZoneId getShiftTimeZone(LogicalType timeAttributeType, ZoneId zoneFromConfig) {
boolean needShiftTimeZone = timeAttributeType instanceof LocalZonedTimestampType;
return needShiftTimeZone ? zoneFromConfig : UTC_ZONE_ID;
} | 3.68 |
flink_TableDescriptor_option | /**
* Sets the given option on the table.
*
* <p>Option keys must be fully specified. When defining options for a {@link Format
* format}, use {@link #format(FormatDescriptor)} instead.
*
* <p>Example:
*
* <pre>{@code
* TableDescriptor.forConnector("kafka")
* .option("scan.startup.mode", "latest-offset")
* .build();
* }</pre>
*/
public Builder option(String key, String value) {
Preconditions.checkNotNull(key, "Key must not be null.");
Preconditions.checkNotNull(value, "Value must not be null.");
options.put(key, value);
return this;
} | 3.68 |
streampipes_PipelineElementMigrationManager_handleFailedMigrations | /**
* Takes care about the failed migrations of pipeline elements.
* This includes the following steps:
* <ul>
* <li> logging of failed pipeline elements
* <li> setting migration results as pipeline notifications
* <li> updating pipeline health status
* <li> stopping the pipeline
* </ul>
*
* @param pipeline the pipeline affected by failed migrations
* @param failedMigrations the list of failed migrations
*/
protected void handleFailedMigrations(Pipeline pipeline, List<MigrationResult<?>> failedMigrations) {
LOG.error("Failures in migration detected - The following pipeline elements could to be migrated:\n"
+ StringUtils.join(failedMigrations.stream().map(Record::toString).toList()), "\n");
pipeline.setPipelineNotifications(failedMigrations.stream().map(
failedMigration -> "Failed migration of pipeline element: %s".formatted(failedMigration.message())
).toList());
pipeline.setHealthStatus(PipelineHealthStatus.REQUIRES_ATTENTION);
pipelineStorage.updatePipeline(pipeline);
// get updated version of pipeline after modification
pipeline = pipelineStorage.getPipeline(pipeline.getPipelineId());
stopPipeline(pipeline);
} | 3.68 |
hbase_SyncTable_nextRow | /**
* Advance to the next row and return its row key. Returns null iff there are no more rows.
*/
public byte[] nextRow() {
if (nextRowResult == null) {
// no cached row - check scanner for more
while (results.hasNext()) {
nextRowResult = results.next();
Cell nextCell = nextRowResult.rawCells()[0];
if (
currentRow == null || !Bytes.equals(currentRow, 0, currentRow.length,
nextCell.getRowArray(), nextCell.getRowOffset(), nextCell.getRowLength())
) {
// found next row
break;
} else {
// found another result from current row, keep scanning
nextRowResult = null;
}
}
if (nextRowResult == null) {
// end of data, no more rows
currentRowResult = null;
currentRow = null;
return null;
}
}
// advance to cached result for next row
currentRowResult = nextRowResult;
nextCellInRow = 0;
currentRow = currentRowResult.getRow();
nextRowResult = null;
return currentRow;
} | 3.68 |
hbase_ReportMakingVisitor_isTableTransition | /** Returns True iff first row in hbase:meta or if we've broached a new table in hbase:meta */
private boolean isTableTransition(RegionInfo ri) {
return this.previous == null || !this.previous.getTable().equals(ri.getTable());
} | 3.68 |
hudi_HoodieSparkQuickstart_deleteByPartition | /**
* Delete the data of the first partition.
*/
public static void deleteByPartition(SparkSession spark, String tablePath, String tableName) {
Dataset<Row> df = spark.emptyDataFrame();
df.write().format("hudi")
.options(QuickstartUtils.getQuickstartWriteConfigs())
.option(HoodieWriteConfig.PRECOMBINE_FIELD_NAME.key(), "ts")
.option(KeyGeneratorOptions.RECORDKEY_FIELD_NAME.key(), "uuid")
.option(KeyGeneratorOptions.PARTITIONPATH_FIELD_NAME.key(), "partitionpath")
.option(TBL_NAME.key(), tableName)
.option("hoodie.datasource.write.operation", WriteOperationType.DELETE_PARTITION.value())
.option("hoodie.datasource.write.partitions.to.delete", HoodieExampleDataGenerator.DEFAULT_FIRST_PARTITION_PATH)
.mode(Append)
.save(tablePath);
} | 3.68 |
hadoop_CommitUtilsWithMR_getConfigurationOption | /**
* Get a configuration option, with any value in the job configuration
* taking priority over that in the filesystem.
* This allows for per-job override of FS parameters.
*
* Order is: job context, filesystem config, default value
*
* @param context job/task context
* @param fsConf filesystem configuration. Get this from the FS to guarantee
* per-bucket parameter propagation
* @param key key to look for
* @param defVal default value
* @return the configuration option.
*/
public static String getConfigurationOption(
JobContext context,
Configuration fsConf,
String key,
String defVal) {
return context.getConfiguration().getTrimmed(key,
fsConf.getTrimmed(key, defVal));
} | 3.68 |
hadoop_SysInfoWindows_getCpuUsagePercentage | /** {@inheritDoc} */
@Override
public synchronized float getCpuUsagePercentage() {
refreshIfNeeded();
float ret = cpuUsage;
if (ret != -1) {
ret = ret / numProcessors;
}
return ret;
} | 3.68 |
rocketmq-connect_WorkerTask_run | /**
* do execute data
*/
@Override
public void run() {
ClassLoader savedLoader = Plugin.compareAndSwapLoaders(loader);
String savedName = Thread.currentThread().getName();
try {
Thread.currentThread().setName(THREAD_NAME_PREFIX + id);
doRun();
} catch (InterruptedException e) {
// set interrupted flag to caller
Thread.currentThread().interrupt();
} catch (Throwable t) {
onFailure(t);
throw t;
} finally {
Thread.currentThread().setName(savedName);
Plugin.compareAndSwapLoaders(savedLoader);
shutdownLatch.countDown();
}
} | 3.68 |
flink_TestingSourceSettings_getCheckpointingMode | /** Checkpointing mode required for the source. */
public CheckpointingMode getCheckpointingMode() {
return checkpointingMode;
} | 3.68 |
flink_SymbolUtil_commonToCalcite | /**
* Converts from a common to a Calcite symbol. The common symbol can be a publicly exposed one
* such as {@link TimeIntervalUnit} or internal one such as {@link DateTimeUtils.TimeUnitRange}.
*/
public static Enum<?> commonToCalcite(Enum<?> commonSymbol) {
checkCommonSymbol(commonSymbol);
Enum<?> calciteSymbol = commonToCalcite.get(commonSymbol);
if (calciteSymbol == null) {
calciteSymbol = internalCommonToCalcite.get(commonSymbol);
if (calciteSymbol == null) {
throw new UnsupportedOperationException(
String.format("Cannot map '%s' to an internal symbol.", commonSymbol));
}
}
return calciteSymbol;
} | 3.68 |
flink_DataStream_broadcast | /**
* Sets the partitioning of the {@link DataStream} so that the output elements are broadcasted
* to every parallel instance of the next operation. In addition, it implicitly as many {@link
* org.apache.flink.api.common.state.BroadcastState broadcast states} as the specified
* descriptors which can be used to store the element of the stream.
*
* @param broadcastStateDescriptors the descriptors of the broadcast states to create.
* @return A {@link BroadcastStream} which can be used in the {@link #connect(BroadcastStream)}
* to create a {@link BroadcastConnectedStream} for further processing of the elements.
*/
@PublicEvolving
public BroadcastStream<T> broadcast(
final MapStateDescriptor<?, ?>... broadcastStateDescriptors) {
Preconditions.checkNotNull(broadcastStateDescriptors);
final DataStream<T> broadcastStream = setConnectionType(new BroadcastPartitioner<>());
return new BroadcastStream<>(environment, broadcastStream, broadcastStateDescriptors);
} | 3.68 |
framework_VAbstractTextualDate_buildDate | /**
* Updates the text field according to the current date (provided by
* {@link #getDate()}). Takes care of updating text, enabling and disabling
* the field, setting/removing readonly status and updating readonly styles.
* <p>
* For internal use only. May be removed or replaced in the future.
* <p>
* TODO: Split part of this into a method that only updates the text as this
* is what usually is needed except for updateFromUIDL.
*/
public void buildDate() {
removeStyleName(getStylePrimaryName() + PARSE_ERROR_CLASSNAME);
// Create the initial text for the textfield
String dateText;
Date currentDate = getDate();
// Always call this to ensure the format ends up in the element
String formatString = getFormatString();
if (currentDate != null) {
dateText = getDateTimeService().formatDate(currentDate,
formatString, timeZone);
} else {
dateText = "";
}
setText(dateText);
text.setEnabled(enabled);
text.setReadOnly(readonly);
if (readonly) {
text.addStyleName("v-readonly");
Roles.getTextboxRole().setAriaReadonlyProperty(text.getElement(),
true);
} else {
text.removeStyleName("v-readonly");
Roles.getTextboxRole()
.removeAriaReadonlyProperty(text.getElement());
}
} | 3.68 |
flink_CheckpointConfig_getCheckpointIntervalDuringBacklog | /**
* Gets the interval in which checkpoints are periodically scheduled during backlog.
*
* <p>This setting defines the base interval. Checkpoint triggering may be delayed by the
* settings {@link #getMaxConcurrentCheckpoints()} and {@link #getMinPauseBetweenCheckpoints()}.
*
* <p>If not explicitly configured, checkpoint interval during backlog will be the same as that
* in normal situation(see {@link #getCheckpointInterval()}). If the return value is zero, it
* means that checkpoints would be disabled during backlog.
*
* @return The checkpoint interval, in milliseconds.
*/
public long getCheckpointIntervalDuringBacklog() {
long intervalDuringBacklog =
configuration
.getOptional(
ExecutionCheckpointingOptions.CHECKPOINTING_INTERVAL_DURING_BACKLOG)
.map(Duration::toMillis)
.orElseGet(this::getCheckpointInterval);
if (intervalDuringBacklog < MINIMAL_CHECKPOINT_TIME) {
intervalDuringBacklog = CheckpointCoordinatorConfiguration.DISABLED_CHECKPOINT_INTERVAL;
}
long checkpointInterval = getCheckpointInterval();
if (checkpointInterval < MINIMAL_CHECKPOINT_TIME) {
checkpointInterval = CheckpointCoordinatorConfiguration.DISABLED_CHECKPOINT_INTERVAL;
}
if (intervalDuringBacklog < checkpointInterval) {
throw new IllegalArgumentException(
"Checkpoint interval during backlog must "
+ "be larger than or equal to that in normal situation.");
}
return intervalDuringBacklog;
} | 3.68 |
hbase_MasterProcedureUtil_checkGroupNotEmpty | /**
* Do not allow creating new tables/namespaces which has an empty rs group, expect the default rs
* group. Notice that we do not check for online servers, as this is not stable because region
* servers can die at any time.
*/
public static void checkGroupNotEmpty(RSGroupInfo rsGroupInfo, Supplier<String> forWhom)
throws ConstraintException {
if (rsGroupInfo == null || rsGroupInfo.getName().equals(RSGroupInfo.DEFAULT_GROUP)) {
// we do not have a rs group config or we explicitly set the rs group to default, then no need
// to check.
return;
}
if (rsGroupInfo.getServers().isEmpty()) {
throw new ConstraintException(
"No servers in the rsgroup " + rsGroupInfo.getName() + " for " + forWhom.get());
}
} | 3.68 |
hadoop_OBSPosixBucketUtils_innerFsGetObjectStatus | // Used to get the status of a file or folder in a file-gateway bucket.
static OBSFileStatus innerFsGetObjectStatus(final OBSFileSystem owner,
final Path f) throws IOException {
final Path path = OBSCommonUtils.qualify(owner, f);
String key = OBSCommonUtils.pathToKey(owner, path);
LOG.debug("Getting path status for {} ({})", path, key);
if (key.isEmpty()) {
LOG.debug("Found root directory");
return new OBSFileStatus(path, owner.getUsername());
}
try {
final GetAttributeRequest getAttrRequest = new GetAttributeRequest(
owner.getBucket(), key);
ObsFSAttribute meta = owner.getObsClient()
.getAttribute(getAttrRequest);
owner.getSchemeStatistics().incrementReadOps(1);
if (fsIsFolder(meta)) {
LOG.debug("Found file (with /): fake directory");
return new OBSFileStatus(path,
OBSCommonUtils.dateToLong(meta.getLastModified()),
owner.getUsername());
} else {
LOG.debug(
"Found file (with /): real file? should not happen: {}",
key);
return new OBSFileStatus(
meta.getContentLength(),
OBSCommonUtils.dateToLong(meta.getLastModified()),
path,
owner.getDefaultBlockSize(path),
owner.getUsername());
}
} catch (ObsException e) {
if (e.getResponseCode() == OBSCommonUtils.NOT_FOUND_CODE) {
LOG.debug("Not Found: {}", path);
throw new FileNotFoundException(
"No such file or directory: " + path);
}
if (e.getResponseCode() == OBSCommonUtils.CONFLICT_CODE) {
throw new FileConflictException(
"file conflicts: " + e.getResponseStatus());
}
throw OBSCommonUtils.translateException("getFileStatus", path, e);
}
} | 3.68 |
flink_LocalProperties_getOrdering | /**
* Gets the key order.
*
* @return The key order, or <code>null</code> if nothing is ordered.
*/
public Ordering getOrdering() {
return ordering;
} | 3.68 |
dubbo_DubboBootstrap_reference | // {@link Reference} correlative methods
public <S> Module reference(Consumer<ReferenceBuilder<S>> consumerBuilder) {
return reference(null, consumerBuilder);
} | 3.68 |
hadoop_ActiveAuditManagerS3A_extractAndActivateSpanFromRequest | /**
* Get the active span from the execution attributes,
* falling back to the active thread span if there
* is nothing in the attributes.
* Provided the span is a wrapped span, the span is
* activated.
* @param request request
* @param executionAttributes the execution attributes
* @return the active span
*/
private AuditSpanS3A extractAndActivateSpanFromRequest(
final SdkRequest request,
final ExecutionAttributes executionAttributes) {
AuditSpanS3A span = retrieveAttachedSpan(executionAttributes);
if (span == null) {
// no span is attached. Not unusual for the copy operations,
// or for calls to GetBucketLocation made by the AWS client
LOG.debug("No audit span attached to request {}",
request);
// fall back to the active thread span.
// this will be the unbonded span if the thread is unbonded.
span = getActiveAuditSpan();
} else {
if (span instanceof WrappingAuditSpan) {
switchToActiveSpan((WrappingAuditSpan) span);
} else {
// warn/log and continue without switching.
WARN_OF_SPAN_TYPE.warn(NOT_A_WRAPPED_SPAN + ": {}", span);
LOG.debug(NOT_A_WRAPPED_SPAN + ": {}", span);
}
}
return span;
} | 3.68 |
hadoop_AbfsOutputStreamStatisticsImpl_getIOStatistics | /**
* {@inheritDoc}
*
* A getter for IOStatisticsStore instance which extends IOStatistics.
*
* @return IOStatisticsStore instance.
*/
@Override
public IOStatistics getIOStatistics() {
return ioStatisticsStore;
} | 3.68 |
morf_UpgradePath_getGraphBasedUpgradeUpgrade | /**
* @return {@link GraphBasedUpgrade} instance if it's available - may return
* null
*/
public GraphBasedUpgrade getGraphBasedUpgradeUpgrade() {
return graphBasedUpgradeSupplier.get();
} | 3.68 |
flink_StreamNonDeterministicPhysicalPlanResolver_resolvePhysicalPlan | /**
* Try to resolve the NDU problem if configured {@link
* OptimizerConfigOptions#TABLE_OPTIMIZER_NONDETERMINISTIC_UPDATE_STRATEGY} is in `TRY_RESOLVE`
* mode. Will raise an error if the NDU issues in the given plan can not be completely solved.
*/
public static List<RelNode> resolvePhysicalPlan(
List<RelNode> expanded, TableConfig tableConfig) {
OptimizerConfigOptions.NonDeterministicUpdateStrategy handling =
tableConfig
.getConfiguration()
.get(
OptimizerConfigOptions
.TABLE_OPTIMIZER_NONDETERMINISTIC_UPDATE_STRATEGY);
if (handling == OptimizerConfigOptions.NonDeterministicUpdateStrategy.TRY_RESOLVE) {
Preconditions.checkArgument(
expanded.stream().allMatch(rel -> rel instanceof StreamPhysicalRel));
StreamNonDeterministicUpdatePlanVisitor planResolver =
new StreamNonDeterministicUpdatePlanVisitor();
return expanded.stream()
.map(rel -> (StreamPhysicalRel) rel)
.map(planResolver::visit)
.collect(Collectors.toList());
}
// do nothing, return original relNodes
return expanded;
} | 3.68 |
hbase_HRegion_replayWALMetaEdit | /**
* Replay the meta edits, i.e, flush marker, compaction marker, bulk load marker, region event
* marker, etc.
* <p/>
* For all events other than start flush, we will just call {@link #refreshStoreFiles()} as the
* logic is straight-forward and robust. For start flush, we need to snapshot the memstore, so
* later {@link #refreshStoreFiles()} call could drop the snapshot, otherwise we may run out of
* memory.
*/
private void replayWALMetaEdit(Cell cell) throws IOException {
startRegionOperation(Operation.REPLAY_EVENT);
try {
FlushDescriptor flushDesc = WALEdit.getFlushDescriptor(cell);
if (flushDesc != null) {
switch (flushDesc.getAction()) {
case START_FLUSH:
// for start flush, we need to take a snapshot of the current memstore
synchronized (writestate) {
if (!writestate.flushing) {
this.writestate.flushing = true;
} else {
// usually this should not happen but let's make the code more robust, it is not a
// big deal to just ignore it, the refreshStoreFiles call should have the ability to
// clean up the inconsistent state.
LOG.debug("NOT flushing {} as already flushing", getRegionInfo());
break;
}
}
MonitoredTask status =
TaskMonitor.get().createStatus("Preparing flush " + getRegionInfo());
Collection<HStore> storesToFlush = getStoresToFlush(flushDesc);
try {
PrepareFlushResult prepareResult =
internalPrepareFlushCache(null, flushDesc.getFlushSequenceNumber(), storesToFlush,
status, false, FlushLifeCycleTracker.DUMMY);
if (prepareResult.result == null) {
// save the PrepareFlushResult so that we can use it later from commit flush
this.prepareFlushResult = prepareResult;
status.markComplete("Flush prepare successful");
if (LOG.isDebugEnabled()) {
LOG.debug("{} prepared flush with seqId: {}", getRegionInfo(),
flushDesc.getFlushSequenceNumber());
}
} else {
// special case empty memstore. We will still save the flush result in this case,
// since our memstore is empty, but the primary is still flushing
if (
prepareResult.getResult().getResult()
== FlushResult.Result.CANNOT_FLUSH_MEMSTORE_EMPTY
) {
this.prepareFlushResult = prepareResult;
if (LOG.isDebugEnabled()) {
LOG.debug("{} prepared empty flush with seqId: {}", getRegionInfo(),
flushDesc.getFlushSequenceNumber());
}
}
status.abort("Flush prepare failed with " + prepareResult.result);
// nothing much to do. prepare flush failed because of some reason.
}
} finally {
status.cleanup();
}
break;
case ABORT_FLUSH:
// do nothing, an abort flush means the source region server will crash itself, after
// the primary region online, it will send us an open region marker, then we can clean
// up the memstore.
synchronized (writestate) {
writestate.flushing = false;
}
break;
case COMMIT_FLUSH:
case CANNOT_FLUSH:
// just call refreshStoreFiles
refreshStoreFiles();
logRegionFiles();
synchronized (writestate) {
writestate.flushing = false;
}
break;
default:
LOG.warn("{} received a flush event with unknown action: {}", getRegionInfo(),
TextFormat.shortDebugString(flushDesc));
}
} else {
// for all other region events, we will do a refreshStoreFiles
refreshStoreFiles();
logRegionFiles();
}
} finally {
closeRegionOperation(Operation.REPLAY_EVENT);
}
} | 3.68 |
hibernate-validator_MessagerAdapter_reportWarnings | /**
* Reports the given warnings against the underlying {@link Messager} using
* the specified {@link Kind}.
*
* @param warnings A set with errors to report. May be empty but must not be
* null.
*/
public void reportWarnings(Collection<ConstraintCheckIssue> warnings) {
for ( ConstraintCheckIssue warning : warnings ) {
reportWarning( warning );
}
} | 3.68 |
flink_CachedDataStream_invalidate | /**
* Invalidate the cache intermediate result of this DataStream to release the physical
* resources. Users are not required to invoke this method to release physical resources unless
* they want to. Cache will be recreated if it is used after invalidated.
*/
public void invalidate() throws Exception {
final CacheTransformation<T> t = (CacheTransformation<T>) this.getTransformation();
environment.invalidateClusterDataset(t.getDatasetId());
} | 3.68 |
hbase_RowMutations_getMutations | /** Returns An unmodifiable list of the current mutations. */
public List<Mutation> getMutations() {
return Collections.unmodifiableList(mutations);
} | 3.68 |
morf_DataSetProducerAdapter_records | /**
* @see org.alfasoftware.morf.dataset.DataSetProducer#records(java.lang.String)
*/
@Override
public Iterable<Record> records(String tableName) {
return delegate.records(tableName);
} | 3.68 |
hudi_HoodieCombineHiveInputFormat_getRecordReader | /**
* Create a generic Hive RecordReader than can iterate over all chunks in a CombinedFileSplit.
*/
@Override
public RecordReader getRecordReader(InputSplit split, JobConf job, Reporter reporter) throws IOException {
if (!(split instanceof CombineHiveInputSplit)) {
return super.getRecordReader(split, job, reporter);
}
CombineHiveInputSplit hsplit = (CombineHiveInputSplit) split;
String inputFormatClassName = null;
Class<?> inputFormatClass;
try {
inputFormatClassName = hsplit.inputFormatClassName();
inputFormatClass = job.getClassByName(inputFormatClassName);
} catch (Exception e) {
throw new IOException("cannot find class " + inputFormatClassName);
}
pushProjectionsAndFilters(job, inputFormatClass, hsplit.getPath(0));
if (inputFormatClass.getName().equals(getParquetRealtimeInputFormatClassName())) {
HoodieCombineFileInputFormatShim shims = createInputFormatShim();
IOContextMap.get(job).setInputPath(((CombineHiveInputSplit) split).getPath(0));
return shims.getRecordReader(job, ((CombineHiveInputSplit) split).getInputSplitShim(),
reporter, CombineHiveRecordReader.class);
} else {
return ShimLoader.getHadoopShims().getCombineFileInputFormat().getRecordReader(job, (CombineFileSplit) split,
reporter, CombineHiveRecordReader.class);
}
} | 3.68 |
hbase_RegionReplicaUtil_isDefaultReplica | /** Returns true if this region is a default replica for the region */
public static boolean isDefaultReplica(RegionInfo hri) {
return hri.getReplicaId() == DEFAULT_REPLICA_ID;
} | 3.68 |
zxing_ResultHandler_getDisplayContents | /**
* Create a possibly styled string for the contents of the current barcode.
*
* @return The text to be displayed.
*/
public CharSequence getDisplayContents() {
String contents = result.getDisplayResult();
return contents.replace("\r", "");
} | 3.68 |
hbase_HRegionServer_keepLooping | /**
* @return True if we should break loop because cluster is going down or this server has been
* stopped or hdfs has gone bad.
*/
private boolean keepLooping() {
return !this.stopped && isClusterUp();
} | 3.68 |
hudi_BaseHoodieWriteClient_updateColumnNullability | /**
* update col nullable attribute for hudi table.
*
* @param colName col name to be changed. if we want to change col from a nested filed, the fullName should be specified
* @param nullable .
*/
public void updateColumnNullability(String colName, boolean nullable) {
Pair<InternalSchema, HoodieTableMetaClient> pair = getInternalSchemaAndMetaClient();
InternalSchema newSchema = new InternalSchemaChangeApplier(pair.getLeft()).applyColumnNullabilityChange(colName, nullable);
commitTableChange(newSchema, pair.getRight());
} | 3.68 |
framework_VTabsheet_showTabs | /**
* Makes tab bar visible.
*
* @since 7.2
*/
public void showTabs() {
tb.setVisible(true);
removeStyleName(CLASSNAME + "-hidetabs");
tb.recalculateCaptionWidths();
} | 3.68 |
pulsar_ManagedLedgerConfig_getMaxBacklogBetweenCursorsForCaching | /**
* Max backlog gap between backlogged cursors while caching to avoid caching entry which can be
* invalidated before other backlog cursor can reuse it from cache.
*
* @return
*/
public int getMaxBacklogBetweenCursorsForCaching() {
return maxBacklogBetweenCursorsForCaching;
} | 3.68 |
framework_VScrollTable_disableAutoColumnWidthCalculation | /**
* Disables the automatic calculation of all column widths by forcing
* the widths to be "defined" thus turning off expand ratios and such.
*/
public void disableAutoColumnWidthCalculation(HeaderCell source) {
for (HeaderCell cell : availableCells.values()) {
cell.disableAutoWidthCalculation();
}
// fire column resize events for all columns but the source of the
// resize action, since an event will fire separately for this.
List<HeaderCell> columns = new ArrayList<HeaderCell>(
availableCells.values());
columns.remove(source);
sendColumnWidthUpdates(columns);
forceRealignColumnHeaders();
} | 3.68 |
hbase_Result_getCursor | /**
* Return the cursor if this Result is a cursor result. {@link Scan#setNeedCursorResult(boolean)}
* {@link Cursor} {@link #isCursor()}
*/
public Cursor getCursor() {
return cursor;
} | 3.68 |
hadoop_RollingWindow_incAt | /**
* When an event occurs at the specified time, this method reflects that in
* the rolling window.
* <p>
*
* @param time the time at which the event occurred
* @param delta the delta that will be added to the window
*/
public void incAt(long time, long delta) {
int bi = computeBucketIndex(time);
Bucket bucket = buckets[bi];
// If the last time the bucket was updated is out of the scope of the
// rolling window, reset the bucket.
if (bucket.isStaleNow(time)) {
bucket.safeReset(time);
}
bucket.inc(delta);
} | 3.68 |
hbase_HFileReaderImpl__readMvccVersion | /**
* Actually do the mvcc read. Does no checks.
*/
private void _readMvccVersion(int offsetFromPos) {
// This is Bytes#bytesToVint inlined so can save a few instructions in this hot method; i.e.
// previous if one-byte vint, we'd redo the vint call to find int size.
// Also the method is kept small so can be inlined.
byte firstByte = blockBuffer.getByteAfterPosition(offsetFromPos);
int len = WritableUtils.decodeVIntSize(firstByte);
if (len == 1) {
this.currMemstoreTS = firstByte;
} else {
int remaining = len - 1;
long i = 0;
offsetFromPos++;
if (remaining >= Bytes.SIZEOF_INT) {
// The int read has to be converted to unsigned long so the & op
i = (blockBuffer.getIntAfterPosition(offsetFromPos) & 0x00000000ffffffffL);
remaining -= Bytes.SIZEOF_INT;
offsetFromPos += Bytes.SIZEOF_INT;
}
if (remaining >= Bytes.SIZEOF_SHORT) {
short s = blockBuffer.getShortAfterPosition(offsetFromPos);
i = i << 16;
i = i | (s & 0xFFFF);
remaining -= Bytes.SIZEOF_SHORT;
offsetFromPos += Bytes.SIZEOF_SHORT;
}
for (int idx = 0; idx < remaining; idx++) {
byte b = blockBuffer.getByteAfterPosition(offsetFromPos + idx);
i = i << 8;
i = i | (b & 0xFF);
}
currMemstoreTS = (WritableUtils.isNegativeVInt(firstByte) ? ~i : i);
}
this.currMemstoreTSLen = len;
} | 3.68 |
pulsar_AuthenticationDataHttp_hasSubscription | /*
* Subscription
*/
@Override
public boolean hasSubscription() {
return this.subscription != null;
} | 3.68 |
hbase_BufferedDataBlockEncoder_afterEncodingKeyValue | /** Returns unencoded size added */
protected final int afterEncodingKeyValue(Cell cell, DataOutputStream out,
HFileBlockDefaultEncodingContext encodingCtx) throws IOException {
int size = 0;
if (encodingCtx.getHFileContext().isIncludesTags()) {
int tagsLength = cell.getTagsLength();
ByteBufferUtils.putCompressedInt(out, tagsLength);
// There are some tags to be written
if (tagsLength > 0) {
TagCompressionContext tagCompressionContext = encodingCtx.getTagCompressionContext();
// When tag compression is enabled, tagCompressionContext will have a not null value. Write
// the tags using Dictionary compression in such a case
if (tagCompressionContext != null) {
// Not passing tagsLength considering that parsing of the tagsLength is not costly
PrivateCellUtil.compressTags(out, cell, tagCompressionContext);
} else {
PrivateCellUtil.writeTags(out, cell, tagsLength);
}
}
size += tagsLength + KeyValue.TAGS_LENGTH_SIZE;
}
if (encodingCtx.getHFileContext().isIncludesMvcc()) {
// Copy memstore timestamp from the byte buffer to the output stream.
long memstoreTS = cell.getSequenceId();
WritableUtils.writeVLong(out, memstoreTS);
// TODO use a writeVLong which returns the #bytes written so that 2 time parsing can be
// avoided.
size += WritableUtils.getVIntSize(memstoreTS);
}
return size;
} | 3.68 |
hbase_MetricsAssignmentManager_updateRITCountOverThreshold | /**
* update RIT count that are in this state for more than the threshold as defined by the property
* rit.metrics.threshold.time.
*/
public void updateRITCountOverThreshold(final int ritCountOverThreshold) {
assignmentManagerSource.setRITCountOverThreshold(ritCountOverThreshold);
} | 3.68 |
flink_HybridShuffleConfiguration_getMaxBuffersReadAhead | /**
* Determine how many buffers to read ahead at most for each subpartition to prevent other
* consumers from starving.
*/
public int getMaxBuffersReadAhead() {
return maxBuffersReadAhead;
} | 3.68 |
framework_MultiSelectionEvent_getRemovedSelection | /**
* Gets the items that were removed from selection.
* <p>
* This is just a convenience method for checking what was previously
* selected in {@link #getOldSelection()} but not selected anymore in
* {@link #getNewSelection()}.
*
* @return the items that were removed from selection
*/
public Set<T> getRemovedSelection() {
LinkedHashSet<T> copy = new LinkedHashSet<>(getOldValue());
copy.removeAll(getNewSelection());
return copy;
} | 3.68 |
morf_SpreadsheetDataSetProducer_allBlank | /**
* Determines if the given cells are all blank or not.
* @param cells to check if they are blank or not
* @return true if all the cells are blank, otherwise false.
*/
private boolean allBlank(final Cell... cells) {
for (Cell cell : cells) {
if (cell.getContents().length() != 0) {
return false;
}
}
return true;
} | 3.68 |
graphhopper_GpxConversions_calcDirection | /**
* Return the direction like 'NE' based on the first tracksegment of the instruction. If
* Instruction does not contain enough coordinate points, an empty string will be returned.
*/
public static String calcDirection(Instruction instruction, Instruction nextI) {
double azimuth = calcAzimuth(instruction, nextI);
if (Double.isNaN(azimuth))
return "";
return AC.azimuth2compassPoint(azimuth);
} | 3.68 |
hbase_RegionLocations_updateLocation | /**
* Updates the location with new only if the new location has a higher seqNum than the old one or
* force is true.
* @param location the location to add or update
* @param checkForEquals whether to update the location if seqNums for the HRegionLocations for
* the old and new location are the same
* @param force whether to force update
* @return an RegionLocations object with updated locations or the same object if nothing is
* updated
*/
@SuppressWarnings("ReferenceEquality")
public RegionLocations updateLocation(HRegionLocation location, boolean checkForEquals,
boolean force) {
assert location != null;
int replicaId = location.getRegion().getReplicaId();
HRegionLocation oldLoc = getRegionLocation(location.getRegion().getReplicaId());
HRegionLocation selectedLoc = selectRegionLocation(oldLoc, location, checkForEquals, force);
if (selectedLoc == oldLoc) {
return this;
}
HRegionLocation[] newLocations = new HRegionLocation[Math.max(locations.length, replicaId + 1)];
System.arraycopy(locations, 0, newLocations, 0, locations.length);
newLocations[replicaId] = location;
// ensure that all replicas share the same start code. Otherwise delete them
for (int i = 0; i < newLocations.length; i++) {
if (newLocations[i] != null) {
if (
!RegionReplicaUtil.isReplicasForSameRegion(location.getRegion(),
newLocations[i].getRegion())
) {
newLocations[i] = null;
}
}
}
return new RegionLocations(newLocations);
} | 3.68 |
hbase_ColumnCount_getBuffer | /** Returns the buffer */
public byte[] getBuffer() {
return this.bytes;
} | 3.68 |
rocketmq-connect_JsonSchemaData_fromConnectData | /**
* Convert connect data to json schema
*
* @param schema
* @param logicalValue
* @return
*/
public JsonNode fromConnectData(Schema schema, Object logicalValue) {
if (logicalValue == null) {
if (schema == null) {
// Any schema is valid and we don't have a default, so treat this as an optional schema
return null;
}
if (schema.getDefaultValue() != null) {
return fromConnectData(schema, schema.getDefaultValue());
}
if (schema.isOptional()) {
return JSON_NODE_FACTORY.nullNode();
}
return null;
}
Object value = logicalValue;
if (schema != null && schema.getName() != null) {
ConnectToJsonLogicalTypeConverter logicalConverter =
TO_JSON_LOGICAL_CONVERTERS.get(schema.getName());
if (logicalConverter != null) {
return logicalConverter.convert(schema, logicalValue, config);
}
}
try {
final FieldType schemaType;
if (schema == null) {
schemaType = Schema.schemaType(value.getClass());
if (schemaType == null) {
throw new ConnectException("Java class "
+ value.getClass()
+ " does not have corresponding schema type.");
}
} else {
schemaType = schema.getFieldType();
}
switch (schemaType) {
case INT8:
// Use shortValue to create a ShortNode, otherwise an IntNode will be created
return JSON_NODE_FACTORY.numberNode(((Byte) value).shortValue());
case INT16:
return JSON_NODE_FACTORY.numberNode((Short) value);
case INT32:
return JSON_NODE_FACTORY.numberNode((Integer) value);
case INT64:
return JSON_NODE_FACTORY.numberNode((Long) value);
case FLOAT32:
return JSON_NODE_FACTORY.numberNode((Float) value);
case FLOAT64:
return JSON_NODE_FACTORY.numberNode((Double) value);
case BOOLEAN:
return JSON_NODE_FACTORY.booleanNode((Boolean) value);
case STRING:
CharSequence charSeq = (CharSequence) value;
return JSON_NODE_FACTORY.textNode(charSeq.toString());
case BYTES:
if (value instanceof byte[]) {
return JSON_NODE_FACTORY.binaryNode((byte[]) value);
} else if (value instanceof ByteBuffer) {
return JSON_NODE_FACTORY.binaryNode(((ByteBuffer) value).array());
} else if (value instanceof BigDecimal) {
return JSON_NODE_FACTORY.numberNode((BigDecimal) value);
} else {
throw new ConnectException("Invalid type for bytes type: " + value.getClass());
}
case ARRAY: {
Collection collection = (Collection) value;
ArrayNode list = JSON_NODE_FACTORY.arrayNode();
for (Object elem : collection) {
Schema valueSchema = schema == null ? null : schema.getValueSchema();
JsonNode fieldValue = fromConnectData(valueSchema, elem);
list.add(fieldValue);
}
return list;
}
case MAP: {
Map<?, ?> map = (Map<?, ?>) value;
// If true, using string keys and JSON object; if false, using non-string keys and
// Array-encoding
boolean objectMode;
if (schema == null) {
objectMode = true;
for (Map.Entry<?, ?> entry : map.entrySet()) {
if (!(entry.getKey() instanceof String)) {
objectMode = false;
break;
}
}
} else {
objectMode = schema.getKeySchema().getFieldType() == FieldType.STRING && !schema.getKeySchema()
.isOptional();
}
ObjectNode obj = null;
ArrayNode list = null;
if (objectMode) {
obj = JSON_NODE_FACTORY.objectNode();
} else {
list = JSON_NODE_FACTORY.arrayNode();
}
for (Map.Entry<?, ?> entry : map.entrySet()) {
Schema keySchema = schema == null ? null : schema.getKeySchema();
Schema valueSchema = schema == null ? null : schema.getValueSchema();
JsonNode mapKey = fromConnectData(keySchema, entry.getKey());
JsonNode mapValue = fromConnectData(valueSchema, entry.getValue());
if (objectMode) {
obj.set(mapKey.asText(), mapValue);
} else {
ObjectNode o = JSON_NODE_FACTORY.objectNode();
o.set(KEY_FIELD, mapKey);
o.set(VALUE_FIELD, mapValue);
list.add(o);
}
}
return objectMode ? obj : list;
}
case STRUCT: {
Struct struct = (Struct) value;
if (!struct.schema().equals(schema)) {
throw new ConnectException("Mismatching schema.");
}
//This handles the inverting of a union which is held as a struct, where each field is
// one of the union types.
if (JSON_TYPE_ONE_OF.equals(schema.getName())) {
for (Field field : schema.getFields()) {
Object object = struct.get(field);
if (object != null) {
return fromConnectData(field.getSchema(), object);
}
}
return fromConnectData(schema, null);
} else {
ObjectNode obj = JSON_NODE_FACTORY.objectNode();
for (Field field : schema.getFields()) {
JsonNode jsonNode = fromConnectData(field.getSchema(), struct.get(field));
if (jsonNode != null) {
obj.set(field.getName(), jsonNode);
}
}
return obj;
}
}
default:
break;
}
throw new ConnectException("Couldn't convert " + value + " to JSON.");
} catch (ClassCastException e) {
String schemaTypeStr = (schema != null) ? schema.getFieldType().toString() : "unknown schema";
throw new ConnectException("Invalid type for " + schemaTypeStr + ": " + value.getClass());
}
} | 3.68 |
morf_SqlServerDialect_indexDeploymentStatements | /**
* @see org.alfasoftware.morf.jdbc.SqlDialect#indexDeploymentStatements(org.alfasoftware.morf.metadata.Table,
* org.alfasoftware.morf.metadata.Index)
*/
@Override
protected Collection<String> indexDeploymentStatements(Table table, Index index) {
StringBuilder createIndexStatement = new StringBuilder();
createIndexStatement.append("CREATE ");
if (index.isUnique()) {
createIndexStatement.append("UNIQUE NONCLUSTERED ");
}
createIndexStatement.append("INDEX ");
createIndexStatement.append(index.getName());
createIndexStatement.append(" ON ");
createIndexStatement.append(schemaNamePrefix());
createIndexStatement.append(table.getName());
createIndexStatement.append(" (");
boolean firstColumn = true;
for (String columnName : index.columnNames()) {
if (firstColumn) {
firstColumn = false;
} else {
createIndexStatement.append(", ");
}
createIndexStatement.append(String.format("[%s]", columnName));
}
createIndexStatement.append(")");
return Collections.singletonList(createIndexStatement.toString());
} | 3.68 |
flink_DynamicSinkUtils_addExtraMetaCols | /**
* Add extra meta columns for underlying table scan, return a new resolve schema after adding
* extra meta columns.
*/
private static ResolvedSchema addExtraMetaCols(
LogicalTableModify tableModify,
LogicalTableScan tableScan,
String tableDebugName,
List<MetadataColumn> metadataColumns,
FlinkTypeFactory typeFactory) {
final TableSourceTable sourceTable = tableScan.getTable().unwrap(TableSourceTable.class);
DynamicTableSource dynamicTableSource = sourceTable.tableSource();
// get old schema and new schema after add some cols
ResolvedSchema oldSchema = sourceTable.contextResolvedTable().getResolvedSchema();
List<Column> newColumns = new ArrayList<>(oldSchema.getColumns());
newColumns.addAll(metadataColumns);
// get the new resolved schema after adding extra meta columns
ResolvedSchema resolvedSchema = ResolvedSchema.of(newColumns);
List<RelDataTypeField> oldFields = sourceTable.getRowType().getFieldList();
List<RelDataTypeField> newFields = new ArrayList<>(sourceTable.getRowType().getFieldList());
for (int i = 0; i < metadataColumns.size(); i++) {
MetadataColumn column = metadataColumns.get(i);
// add a new field
newFields.add(
new RelDataTypeFieldImpl(
column.getName(),
oldFields.size() + i,
typeFactory.createFieldTypeFromLogicalType(
column.getDataType().getLogicalType())));
}
// create a copy for TableSourceTable with new resolved schema
TableSourceTable newTableSourceTab =
sourceTable.copy(
dynamicTableSource,
sourceTable.contextResolvedTable().copy(resolvedSchema),
new RelRecordType(StructKind.FULLY_QUALIFIED, newFields, false),
sourceTable.abilitySpecs());
// create a copy for table scan with new TableSourceTable
LogicalTableScan newTableScan =
new LogicalTableScan(
tableScan.getCluster(),
tableScan.getTraitSet(),
tableScan.getHints(),
newTableSourceTab);
Project project = (Project) tableModify.getInput();
// replace with the new table scan
if (project.getInput() instanceof LogicalFilter) {
LogicalFilter logicalFilter = (LogicalFilter) project.getInput();
project.replaceInput(
0,
logicalFilter.copy(
logicalFilter.getTraitSet(),
newTableScan,
logicalFilter.getCondition()));
} else {
project.replaceInput(0, newTableScan);
}
// validate and apply metadata
// TODO FLINK-33083 we should not ignore the produced abilities but actually put those into
// the table scan
DynamicSourceUtils.validateAndApplyMetadata(
tableDebugName, resolvedSchema, newTableSourceTab.tableSource(), new ArrayList<>());
return resolvedSchema;
} | 3.68 |
morf_Function_getArguments | /**
* Gets the list of arguments associated with the function.
*
* @return the arguments
*/
public List<AliasedField> getArguments() {
return arguments;
} | 3.68 |
hadoop_BlockPoolTokenSecretManager_addBlockPool | /**
* Add a block pool Id and corresponding {@link BlockTokenSecretManager} to map
* @param bpid block pool Id
* @param secretMgr {@link BlockTokenSecretManager}
*/
public void addBlockPool(String bpid, BlockTokenSecretManager secretMgr) {
map.put(bpid, secretMgr);
} | 3.68 |
hbase_ServerRpcConnection_processConnectionHeader | // Reads the connection header following version
private void processConnectionHeader(ByteBuff buf) throws IOException {
this.connectionHeader = ConnectionHeader.parseFrom(createCis(buf));
// we want to copy the attributes prior to releasing the buffer so that they don't get corrupted
// eventually
if (connectionHeader.getAttributeList().isEmpty()) {
this.connectionAttributes = Collections.emptyMap();
} else {
this.connectionAttributes =
Maps.newHashMapWithExpectedSize(connectionHeader.getAttributeList().size());
for (HBaseProtos.NameBytesPair nameBytesPair : connectionHeader.getAttributeList()) {
this.connectionAttributes.put(nameBytesPair.getName(),
nameBytesPair.getValue().toByteArray());
}
}
String serviceName = connectionHeader.getServiceName();
if (serviceName == null) {
throw new EmptyServiceNameException();
}
this.service = RpcServer.getService(this.rpcServer.services, serviceName);
if (this.service == null) {
throw new UnknownServiceException(serviceName);
}
setupCellBlockCodecs();
sendConnectionHeaderResponseIfNeeded();
UserGroupInformation protocolUser = createUser(connectionHeader);
if (!useSasl) {
ugi = protocolUser;
if (ugi != null) {
ugi.setAuthenticationMethod(AuthenticationMethod.SIMPLE);
}
// audit logging for SASL authenticated users happens in saslReadAndProcess()
if (authenticatedWithFallback) {
RpcServer.LOG.warn("Allowed fallback to SIMPLE auth for {} connecting from {}", ugi,
getHostAddress());
}
} else {
// user is authenticated
ugi.setAuthenticationMethod(provider.getSaslAuthMethod().getAuthMethod());
// Now we check if this is a proxy user case. If the protocol user is
// different from the 'user', it is a proxy user scenario. However,
// this is not allowed if user authenticated with DIGEST.
if ((protocolUser != null) && (!protocolUser.getUserName().equals(ugi.getUserName()))) {
if (!provider.supportsProtocolAuthentication()) {
// Not allowed to doAs if token authentication is used
throw new AccessDeniedException("Authenticated user (" + ugi
+ ") doesn't match what the client claims to be (" + protocolUser + ")");
} else {
// Effective user can be different from authenticated user
// for simple auth or kerberos auth
// The user is the real user. Now we create a proxy user
UserGroupInformation realUser = ugi;
ugi = UserGroupInformation.createProxyUser(protocolUser.getUserName(), realUser);
// Now the user is a proxy user, set Authentication method Proxy.
ugi.setAuthenticationMethod(AuthenticationMethod.PROXY);
}
}
}
String version;
if (this.connectionHeader.hasVersionInfo()) {
// see if this connection will support RetryImmediatelyException
this.retryImmediatelySupported = VersionInfoUtil.hasMinimumVersion(getVersionInfo(), 1, 2);
version = this.connectionHeader.getVersionInfo().getVersion();
} else {
version = "UNKNOWN";
}
RpcServer.AUDITLOG.info("Connection from {}:{}, version={}, sasl={}, ugi={}, service={}",
this.hostAddress, this.remotePort, version, this.useSasl, this.ugi, serviceName);
} | 3.68 |
hbase_ObjectPool_get | /**
* Returns a shared object associated with the given {@code key}, which is identified by the
* {@code equals} method.
* @throws NullPointerException if {@code key} is {@code null}
*/
public V get(K key) {
Reference<V> ref = referenceCache.get(Objects.requireNonNull(key));
if (ref != null) {
V obj = ref.get();
if (obj != null) {
return obj;
}
referenceCache.remove(key, ref);
}
V newObj = objectFactory.createObject(key);
Reference<V> newRef = createReference(key, newObj);
while (true) {
Reference<V> existingRef = referenceCache.putIfAbsent(key, newRef);
if (existingRef == null) {
return newObj;
}
V existingObject = existingRef.get();
if (existingObject != null) {
return existingObject;
}
referenceCache.remove(key, existingRef);
}
} | 3.68 |
hbase_Table_batchCoprocessorService | /**
* Creates an instance of the given {@link Service} subclass for each table region spanning the
* range from the {@code startKey} row to {@code endKey} row (inclusive), all the invocations to
* the same region server will be batched into one call. The coprocessor service is invoked
* according to the service instance, method name and parameters.
* <p/>
* The given
* {@link org.apache.hadoop.hbase.client.coprocessor.Batch.Callback#update(byte[],byte[],Object)}
* method will be called with the return value from each region's invocation.
* @param methodDescriptor the descriptor for the protobuf service method to call.
* @param request the method call parameters
* @param startKey start region selection with region containing this row. If
* {@code null}, the selection will start with the first table region.
* @param endKey select regions up to and including the region containing this row. If
* {@code null}, selection will continue through the last table region.
* @param responsePrototype the proto type of the response of the method in Service.
* @param callback callback to invoke with the response for each region
* @param <R> the response type for the coprocessor Service method
* @deprecated since 3.0.0, will removed in 4.0.0. The batch call here references the blocking
* interface for of a protobuf stub, so it is not possible to do it in an asynchronous
* way, even if now we are building the {@link Table} implementation based on the
* {@link AsyncTable}, which is not good. Use the coprocessorService methods in
* {@link AsyncTable} directly instead.
* @see Connection#toAsyncConnection()
*/
@Deprecated
default <R extends Message> void batchCoprocessorService(
Descriptors.MethodDescriptor methodDescriptor, Message request, byte[] startKey, byte[] endKey,
R responsePrototype, Batch.Callback<R> callback) throws ServiceException, Throwable {
throw new NotImplementedException("Add an implementation!");
} | 3.68 |
hbase_HFileBlock_headerSize | /**
* Maps a minor version to the size of the header.
*/
public static int headerSize(boolean usesHBaseChecksum) {
return usesHBaseChecksum
? HConstants.HFILEBLOCK_HEADER_SIZE
: HConstants.HFILEBLOCK_HEADER_SIZE_NO_CHECKSUM;
} | 3.68 |
graphhopper_VectorTile_getSintValue | /**
* <code>optional sint64 sint_value = 6;</code>
*/
public long getSintValue() {
return sintValue_;
} | 3.68 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.