name
stringlengths 12
178
| code_snippet
stringlengths 8
36.5k
| score
float64 3.26
3.68
|
---|---|---|
morf_SQLEntityNameValidationService_isNameConventional | /**
* <p>
* Method to establish if a name matches an allowed pattern of characters to
* follow the correct naming convention.
* </p>
* <p>
* The name must:
* </p>
* <ul>
* <li>begin with an alphabetic character [a-zA-Z]</li>
* <li>only contain alphanumeric characters or underscore [a-zA-Z0-9_]</li>
* </ul>
*
* @param name The string to check if it follows the correct naming convention
* @return true if the name is valid otherwise false
*/
public boolean isNameConventional(String name) {
return schemaValidator.isNameConventional(name);
} | 3.68 |
hbase_QuotaCache_getUserQuotaState | /**
* Returns the QuotaState associated to the specified user.
* @param ugi the user
* @return the quota info associated to specified user
*/
public UserQuotaState getUserQuotaState(final UserGroupInformation ugi) {
return computeIfAbsent(userQuotaCache, getQuotaUserName(ugi), UserQuotaState::new,
this::triggerCacheRefresh);
} | 3.68 |
morf_PortableSqlStatement_inplaceUpdateTransitionalTableNames | /**
* Sets the {@link TableNameResolver}.
*
* @param nameResolver The {@link TableNameResolver}.
*/
public void inplaceUpdateTransitionalTableNames(TableNameResolver nameResolver) {
this.nameResolver = nameResolver;
} | 3.68 |
framework_VaadinPortletSession_removePortletListener | /**
* Removes a portlet request listener registered with
* {@link #addPortletListener(PortletListener)}.
*
* @param listener
* to remove
* @deprecated Use a {@link Registration} object returned by
* {@link #addPortletListener(PortletListener)} to remove a
* listener
*/
@Deprecated
public void removePortletListener(PortletListener listener) {
portletListeners.remove(listener);
} | 3.68 |
flink_PekkoUtils_getInetSocketAddressFromRpcURL | /**
* Extracts the hostname and the port of the remote actor system from the given Pekko URL. The
* result is an {@link InetSocketAddress} instance containing the extracted hostname and port.
* If the Pekko URL does not contain the hostname and port information, e.g. a local Pekko URL
* is provided, then an {@link Exception} is thrown.
*
* @param rpcURL The URL to extract the host and port from.
* @throws java.lang.Exception Thrown, if the given string does not represent a proper url
* @return The InetSocketAddress with the extracted host and port.
*/
public static InetSocketAddress getInetSocketAddressFromRpcURL(String rpcURL) throws Exception {
// Pekko URLs have the form schema://systemName@host:port/.... if it's a remote Pekko URL
try {
final Address address = getAddressFromRpcURL(rpcURL);
if (address.host().isDefined() && address.port().isDefined()) {
return new InetSocketAddress(address.host().get(), (int) address.port().get());
} else {
throw new MalformedURLException();
}
} catch (MalformedURLException e) {
throw new Exception("Could not retrieve InetSocketAddress from Pekko URL " + rpcURL);
}
} | 3.68 |
flink_HiveParserUtils_genFilterRelNode | // creates LogicFilter node
public static RelNode genFilterRelNode(
RelNode relNode, RexNode rexNode, Collection<CorrelationId> variables) {
Class[] argTypes =
new Class[] {
RelNode.class,
RexNode.class,
useShadedImmutableSet ? shadedImmutableSetClz : immutableSetClz
};
Method method = HiveReflectionUtils.tryGetMethod(LogicalFilter.class, "create", argTypes);
Preconditions.checkState(method != null, "Cannot get the method to create a LogicalFilter");
try {
return (LogicalFilter) method.invoke(null, relNode, rexNode, toImmutableSet(variables));
} catch (IllegalAccessException | InvocationTargetException e) {
throw new FlinkHiveException("Failed to create LogicalFilter", e);
}
} | 3.68 |
framework_SliderTooltip_getTestDescription | /*
* (non-Javadoc)
*
* @see com.vaadin.tests.components.AbstractTestUI#getTestDescription()
*/
@Override
protected String getTestDescription() {
return "Testing that sliders have tooltips.";
} | 3.68 |
hadoop_ProducerConsumer_put | /**
* Blocking put workRequest to ProducerConsumer input queue.
*
* @param workRequest item to be processed.
*/
public void put(WorkRequest<T> workRequest) {
boolean isDone = false;
while (!isDone) {
try {
inputQueue.put(workRequest);
workCnt.incrementAndGet();
isDone = true;
} catch (InterruptedException ie) {
LOG.error("Could not put workRequest into inputQueue. Retrying...");
}
}
} | 3.68 |
flink_VertexFlameGraphFactory_createFullFlameGraphFrom | /**
* Converts {@link VertexThreadInfoStats} into a FlameGraph.
*
* @param sample Thread details sample containing stack traces.
* @return FlameGraph data structure
*/
public static VertexFlameGraph createFullFlameGraphFrom(VertexThreadInfoStats sample) {
EnumSet<Thread.State> included = EnumSet.allOf(Thread.State.class);
return createFlameGraphFromSample(sample, included);
} | 3.68 |
flink_TableFunctionResultFuture_getResultFuture | /** Gets the internal collector which used to emit the final row. */
public ResultFuture<?> getResultFuture() {
return this.resultFuture;
} | 3.68 |
AreaShop_GeneralRegion_getWorldName | /**
* Get the name of the world where the region is located.
* @return The name of the world of the region
*/
@Override
public String getWorldName() {
return getStringSetting("general.world");
} | 3.68 |
flink_OperatorCoordinatorCheckpointContext_notifyCheckpointAborted | /**
* We override the method here to remove the checked exception. Please check the Java docs of
* {@link CheckpointListener#notifyCheckpointAborted(long)} for more detail semantic of the
* method.
*/
@Override
default void notifyCheckpointAborted(long checkpointId) {} | 3.68 |
hbase_StoreFileInfo_getSize | /**
* Size of the Hfile
*/
public long getSize() {
return size;
} | 3.68 |
framework_ComponentSizeValidator_validateComponentRelativeSizes | /**
* Recursively checks given component and its subtree for invalid layout
* setups. Prints errors to std err stream.
*
* @param component
* component to check
* @return set of first level errors found
*/
public static List<InvalidLayout> validateComponentRelativeSizes(
Component component, List<InvalidLayout> errors,
InvalidLayout parent) {
if (component != null) {
boolean invalidHeight = !checkHeights(component);
boolean invalidWidth = !checkWidths(component);
if (invalidHeight || invalidWidth) {
InvalidLayout error = new InvalidLayout(component,
invalidHeight, invalidWidth);
if (parent != null) {
parent.addError(error);
} else {
if (errors == null) {
errors = new LinkedList<>();
}
errors.add(error);
}
parent = error;
}
}
if (component instanceof Panel) {
Panel panel = (Panel) component;
errors = validateComponentRelativeSizes(panel.getContent(), errors,
parent);
} else if (component instanceof ComponentContainer) {
ComponentContainer lo = (ComponentContainer) component;
Iterator<Component> it = lo.getComponentIterator();
while (it.hasNext()) {
errors = validateComponentRelativeSizes(it.next(), errors,
parent);
}
} else if (isForm(component)) {
HasComponents form = (HasComponents) component;
for (Component child : form) {
errors = validateComponentRelativeSizes(child, errors, parent);
}
}
return errors;
} | 3.68 |
hbase_MasterObserver_postDecommissionRegionServers | /**
* Called after decommission region servers.
*/
default void postDecommissionRegionServers(ObserverContext<MasterCoprocessorEnvironment> ctx,
List<ServerName> servers, boolean offload) throws IOException {
} | 3.68 |
pulsar_AbstractMetrics_getManagedLedgers | /**
* Returns managed ledgers map from ML factory.
*
* @return
*/
protected Map<String, ManagedLedgerImpl> getManagedLedgers() {
return ((ManagedLedgerFactoryImpl) pulsar.getManagedLedgerFactory()).getManagedLedgers();
} | 3.68 |
hadoop_CompositeService_addService | /**
* Add the passed {@link Service} to the list of services managed by this
* {@link CompositeService}
* @param service the {@link Service} to be added
*/
protected void addService(Service service) {
if (LOG.isDebugEnabled()) {
LOG.debug("Adding service " + service.getName());
}
synchronized (serviceList) {
serviceList.add(service);
}
} | 3.68 |
hadoop_BlockBlobAppendStream_writeBlockListRequestInternal | /**
* Write block list. The method captures retry logic
*/
private void writeBlockListRequestInternal() {
IOException lastLocalException = null;
int uploadRetryAttempts = 0;
while (uploadRetryAttempts < MAX_BLOCK_UPLOAD_RETRIES) {
try {
long startTime = System.nanoTime();
blob.commitBlockList(blockEntries, accessCondition,
new BlobRequestOptions(), opContext);
LOG.debug("Upload block list took {} ms for blob {} ",
TimeUnit.NANOSECONDS.toMillis(
System.nanoTime() - startTime), key);
break;
} catch(Exception ioe) {
LOG.debug("Encountered exception during uploading block for Blob {}"
+ " Exception : {}", key, ioe);
uploadRetryAttempts++;
lastLocalException = new AzureException(
"Encountered Exception while uploading block: " + ioe, ioe);
try {
Thread.sleep(
BLOCK_UPLOAD_RETRY_INTERVAL * (uploadRetryAttempts + 1));
} catch(InterruptedException ie) {
Thread.currentThread().interrupt();
break;
}
}
}
if (uploadRetryAttempts == MAX_BLOCK_UPLOAD_RETRIES) {
maybeSetFirstError(lastLocalException);
}
} | 3.68 |
hbase_Mutation_createPutKeyValue | /**
* Create a KeyValue with this objects row key and the Put identifier.
* @return a KeyValue with this objects row key and the Put identifier.
*/
KeyValue createPutKeyValue(byte[] family, ByteBuffer qualifier, long ts, ByteBuffer value,
Tag[] tags) {
return new KeyValue(this.row, 0, this.row == null ? 0 : this.row.length, family, 0,
family == null ? 0 : family.length, qualifier, ts, KeyValue.Type.Put, value,
tags != null ? Arrays.asList(tags) : null);
} | 3.68 |
framework_VFilterSelect_setSuggestions | /**
* Sets the suggestions rendered in the menu.
*
* @param suggestions
* The suggestions to be rendered in the menu
*/
public void setSuggestions(
Collection<FilterSelectSuggestion> suggestions) {
if (enableDebug) {
debug("VFS.SM: setSuggestions(" + suggestions + ")");
}
clearItems();
boolean isFirstIteration = true;
for (final FilterSelectSuggestion s : suggestions) {
final MenuItem mi = new MenuItem(s.getDisplayString(), true, s);
String style = s.getStyle();
if (style != null) {
mi.addStyleName("v-filterselect-item-" + style);
}
Roles.getListitemRole().set(mi.getElement());
WidgetUtil.sinkOnloadForImages(mi.getElement());
this.addItem(mi);
// By default, first item on the list is always highlighted,
// unless adding new items is allowed.
if (isFirstIteration && !allowNewItem) {
selectItem(mi);
}
// If the filter matches the current selection, highlight that
// instead of the first item.
if (tb.getText().equals(s.getReplacementString())
&& s == currentSuggestion) {
selectItem(mi);
}
isFirstIteration = false;
}
if (suggestionPopupWidth != null && BrowserInfo.get().isIE()
&& BrowserInfo.get().getBrowserMajorVersion() < 10) {
// set TD width to a low value so that they won't mandate the
// suggestion pop-up width
suggestionPopup.setTdWidth(suggestionPopup.menu.getElement(),
1);
}
} | 3.68 |
querydsl_PathBuilder_getList | /**
* Create a new List typed path
*
* @param <A>
* @param <E>
* @param property property name
* @param type property type
* @param queryType expression type
* @return property path
*/
public <A, E extends SimpleExpression<A>> ListPath<A, E> getList(String property, Class<A> type, Class<? super E> queryType) {
validate(property, List.class);
return super.createList(property, type, queryType, PathInits.DIRECT);
} | 3.68 |
morf_DatabaseSchemaManager_truncateTable | /**
* Truncates the specified table.
*
* @param table the table to truncate.
*/
private Collection<String> truncateTable(Table table) {
if (log.isDebugEnabled()) log.debug("Truncating table [" + table.getName() + "]");
// use delete-all rather than truncate, because at least on Oracle this is a lot faster when the table is small.
return dialect.get().deleteAllFromTableStatements(table);
} | 3.68 |
flink_ResourceManager_onFatalError | /**
* Notifies the ResourceManager that a fatal error has occurred and it cannot proceed.
*
* @param t The exception describing the fatal error
*/
protected void onFatalError(Throwable t) {
try {
log.error("Fatal error occurred in ResourceManager.", t);
} catch (Throwable ignored) {
}
// The fatal error handler implementation should make sure that this call is non-blocking
fatalErrorHandler.onFatalError(t);
} | 3.68 |
flink_DataStreamSink_getTransformation | /** Returns the transformation that contains the actual sink operator of this sink. */
@Internal
public Transformation<T> getTransformation() {
return transformation;
} | 3.68 |
flink_PermanentBlobCache_getFile | /**
* Returns the path to a local copy of the file associated with the provided job ID and blob
* key.
*
* <p>We will first attempt to serve the BLOB from the local storage. If the BLOB is not in
* there, we will try to download it from the HA store, or directly from the {@link BlobServer}.
*
* @param jobId ID of the job this blob belongs to
* @param key blob key associated with the requested file
* @return The path to the file.
* @throws java.io.FileNotFoundException if the BLOB does not exist;
* @throws IOException if any other error occurs when retrieving the file
*/
@Override
public File getFile(JobID jobId, PermanentBlobKey key) throws IOException {
checkNotNull(jobId);
return getFileInternal(jobId, key);
} | 3.68 |
flink_SharedBuffer_getEntry | /**
* It always returns node either from state or cache.
*
* @param nodeId id of the node
* @return SharedBufferNode
*/
Lockable<SharedBufferNode> getEntry(NodeId nodeId) {
try {
Lockable<SharedBufferNode> lockableFromCache = entryCache.getIfPresent(nodeId);
if (Objects.nonNull(lockableFromCache)) {
return lockableFromCache;
} else {
Lockable<SharedBufferNode> lockableFromState = entries.get(nodeId);
if (Objects.nonNull(lockableFromState)) {
entryCache.put(nodeId, lockableFromState);
}
return lockableFromState;
}
} catch (Exception ex) {
throw new WrappingRuntimeException(ex);
}
} | 3.68 |
druid_Lexer_reset | //todo fix reset reset字段会导致lexer的游标不对齐 不建议使用
@Deprecated
public void reset(int mark, char markChar, Token token) {
this.pos = mark;
this.ch = markChar;
this.token = token;
} | 3.68 |
hbase_MetaRegionLocationCache_loadMetaLocationsFromZk | /**
* Populates the current snapshot of meta locations from ZK. If no meta znodes exist, it registers
* a watcher on base znode to check for any CREATE/DELETE events on the children.
* @param retryCounter controls the number of retries and sleep between retries.
*/
private void loadMetaLocationsFromZk(RetryCounter retryCounter, ZNodeOpType opType) {
TraceUtil.trace(() -> {
List<String> znodes = null;
while (retryCounter.shouldRetry()) {
try {
znodes = watcher.getMetaReplicaNodesAndWatchChildren();
break;
} catch (KeeperException ke) {
LOG.debug("Error populating initial meta locations", ke);
if (!retryCounter.shouldRetry()) {
// Retries exhausted and watchers not set. This is not a desirable state since the cache
// could remain stale forever. Propagate the exception.
watcher.abort("Error populating meta locations", ke);
return;
}
try {
retryCounter.sleepUntilNextRetry();
} catch (InterruptedException ie) {
LOG.error("Interrupted while loading meta locations from ZK", ie);
Thread.currentThread().interrupt();
return;
}
}
}
if (znodes == null || znodes.isEmpty()) {
// No meta znodes exist at this point but we registered a watcher on the base znode to
// listen for updates. They will be handled via nodeChildrenChanged().
return;
}
if (znodes.size() == cachedMetaLocations.size()) {
// No new meta znodes got added.
return;
}
for (String znode : znodes) {
String path = ZNodePaths.joinZNode(watcher.getZNodePaths().baseZNode, znode);
updateMetaLocation(path, opType);
}
}, "MetaRegionLocationCache.loadMetaLocationsFromZk");
} | 3.68 |
hudi_HoodieTable_getFileSystemView | /**
* Get the view of the file system for this table.
*/
public TableFileSystemView getFileSystemView() {
return new HoodieTableFileSystemView(metaClient, getCompletedCommitsTimeline());
} | 3.68 |
morf_H2MetaDataProvider_setAdditionalColumnMetadata | /**
* H2 can (and must) provide the auto-increment start value from the column remarks.
*
* @see org.alfasoftware.morf.jdbc.DatabaseMetaDataProvider#setAdditionalColumnMetadata(RealName, ColumnBuilder, ResultSet)
*/
@Override
protected ColumnBuilder setAdditionalColumnMetadata(RealName tableName, ColumnBuilder columnBuilder, ResultSet columnMetaData) throws SQLException {
columnBuilder = super.setAdditionalColumnMetadata(tableName, columnBuilder, columnMetaData);
if (columnBuilder.isAutoNumbered()) {
int startValue = getAutoIncrementStartValue(columnMetaData.getString(COLUMN_REMARKS));
return columnBuilder.autoNumbered(startValue == -1 ? 1 : startValue);
} else {
return columnBuilder;
}
} | 3.68 |
flink_JobVertex_getInvokableClass | /**
* Returns the invokable class which represents the task of this vertex.
*
* @param cl The classloader used to resolve user-defined classes
* @return The invokable class, <code>null</code> if it is not set
*/
public Class<? extends TaskInvokable> getInvokableClass(ClassLoader cl) {
if (cl == null) {
throw new NullPointerException("The classloader must not be null.");
}
if (invokableClassName == null) {
return null;
}
try {
return Class.forName(invokableClassName, true, cl).asSubclass(TaskInvokable.class);
} catch (ClassNotFoundException e) {
throw new RuntimeException("The user-code class could not be resolved.", e);
} catch (ClassCastException e) {
throw new RuntimeException(
"The user-code class is no subclass of " + TaskInvokable.class.getName(), e);
}
} | 3.68 |
dubbo_Mixin_mixin | /**
* mixin interface and delegates.
* all class must be public.
*
* @param ics interface class array.
* @param dcs delegate class array.
* @param cl class loader.
* @return Mixin instance.
*/
public static Mixin mixin(Class<?>[] ics, Class<?>[] dcs, ClassLoader cl) {
assertInterfaceArray(ics);
long id = MIXIN_CLASS_COUNTER.getAndIncrement();
String pkg = null;
ClassGenerator ccp = null, ccm = null;
try {
ccp = ClassGenerator.newInstance(cl);
// impl constructor
StringBuilder code = new StringBuilder();
for (int i = 0; i < dcs.length; i++) {
if (!Modifier.isPublic(dcs[i].getModifiers())) {
String npkg = dcs[i].getPackage().getName();
if (pkg == null) {
pkg = npkg;
} else {
if (!pkg.equals(npkg)) {
throw new IllegalArgumentException("non-public interfaces class from different packages");
}
}
}
ccp.addField("private " + dcs[i].getName() + " d" + i + ";");
code.append('d')
.append(i)
.append(" = (")
.append(dcs[i].getName())
.append(")$1[")
.append(i)
.append("];\n");
if (MixinAware.class.isAssignableFrom(dcs[i])) {
code.append('d').append(i).append(".setMixinInstance(this);\n");
}
}
ccp.addConstructor(Modifier.PUBLIC, new Class<?>[] {Object[].class}, code.toString());
Class<?> neighbor = null;
// impl methods.
Set<String> worked = new HashSet<String>();
for (int i = 0; i < ics.length; i++) {
if (!Modifier.isPublic(ics[i].getModifiers())) {
String npkg = ics[i].getPackage().getName();
if (pkg == null) {
pkg = npkg;
neighbor = ics[i];
} else {
if (!pkg.equals(npkg)) {
throw new IllegalArgumentException("non-public delegate class from different packages");
}
}
}
ccp.addInterface(ics[i]);
for (Method method : ics[i].getMethods()) {
if ("java.lang.Object".equals(method.getDeclaringClass().getName())) {
continue;
}
String desc = ReflectUtils.getDesc(method);
if (worked.contains(desc)) {
continue;
}
worked.add(desc);
int ix = findMethod(dcs, desc);
if (ix < 0) {
throw new RuntimeException("Missing method [" + desc + "] implement.");
}
Class<?> rt = method.getReturnType();
String mn = method.getName();
if (Void.TYPE.equals(rt)) {
ccp.addMethod(
mn,
method.getModifiers(),
rt,
method.getParameterTypes(),
method.getExceptionTypes(),
"d" + ix + "." + mn + "($$);");
} else {
ccp.addMethod(
mn,
method.getModifiers(),
rt,
method.getParameterTypes(),
method.getExceptionTypes(),
"return ($r)d" + ix + "." + mn + "($$);");
}
}
}
if (pkg == null) {
pkg = PACKAGE_NAME;
neighbor = Mixin.class;
}
// create MixinInstance class.
String micn = pkg + ".mixin" + id;
ccp.setClassName(micn);
ccp.toClass(neighbor);
// create Mixin class.
String fcn = Mixin.class.getName() + id;
ccm = ClassGenerator.newInstance(cl);
ccm.setClassName(fcn);
ccm.addDefaultConstructor();
ccm.setSuperClass(Mixin.class.getName());
ccm.addMethod("public Object newInstance(Object[] delegates){ return new " + micn + "($1); }");
Class<?> mixin = ccm.toClass(Mixin.class);
return (Mixin) mixin.getDeclaredConstructor().newInstance();
} catch (RuntimeException e) {
throw e;
} catch (Exception e) {
throw new RuntimeException(e.getMessage(), e);
} finally {
// release ClassGenerator
if (ccp != null) {
ccp.release();
}
if (ccm != null) {
ccm.release();
}
}
} | 3.68 |
framework_DesignFormatter_mapDefaultTypes | /**
* Maps default types to their converters.
*
*/
protected void mapDefaultTypes() {
// numbers use standard toString/valueOf approach
for (Class<?> c : new Class<?>[] { Byte.class, Short.class,
Integer.class, Long.class }) {
DesignToStringConverter<?> conv = new DesignToStringConverter(c);
converterMap.put(c, conv);
try {
converterMap.put((Class<?>) c.getField("TYPE").get(null), conv);
} catch (Exception e) {
; // this will never happen
}
}
// booleans use a bit different converter than the standard one
// "false" is boolean false, everything else is boolean true
Converter<String, Boolean> booleanConverter = new Converter<String, Boolean>() {
@Override
public Result<Boolean> convertToModel(String value,
ValueContext context) {
return Result.ok(!value.equalsIgnoreCase("false"));
}
@Override
public String convertToPresentation(Boolean value,
ValueContext context) {
if (value.booleanValue()) {
return "";
} else {
return "false";
}
}
};
converterMap.put(Boolean.class, booleanConverter);
converterMap.put(boolean.class, booleanConverter);
// floats and doubles use formatters
final DecimalFormatSymbols symbols = new DecimalFormatSymbols(
new Locale("en_US"));
final DecimalFormat fmt = new DecimalFormat("0.###", symbols);
fmt.setGroupingUsed(false);
Converter<String, ?> floatConverter = new StringToFloatConverter(
"Error converting value") {
@Override
protected NumberFormat getFormat(Locale locale) {
return fmt;
};
};
converterMap.put(Float.class, floatConverter);
converterMap.put(float.class, floatConverter);
Converter<String, ?> doubleConverter = new StringToDoubleConverter(
"Error converting value") {
@Override
protected NumberFormat getFormat(Locale locale) {
return fmt;
};
};
converterMap.put(Double.class, doubleConverter);
converterMap.put(double.class, doubleConverter);
final DecimalFormat bigDecimalFmt = new DecimalFormat("0.###", symbols);
bigDecimalFmt.setGroupingUsed(false);
bigDecimalFmt.setParseBigDecimal(true);
converterMap.put(BigDecimal.class,
new StringToBigDecimalConverter("Error converting value") {
@Override
protected NumberFormat getFormat(Locale locale) {
return bigDecimalFmt;
};
});
// strings do nothing
converterMap.put(String.class, new Converter<String, String>() {
@Override
public Result<String> convertToModel(String value,
ValueContext context) {
return Result.ok(value);
}
@Override
public String convertToPresentation(String value,
ValueContext context) {
return value;
}
});
// char takes the first character from the string
Converter<String, Character> charConverter = new DesignToStringConverter<Character>(
Character.class) {
@Override
public Result<Character> convertToModel(String value,
ValueContext context) {
return Result.ok(value.charAt(0));
}
};
converterMap.put(Character.class, charConverter);
converterMap.put(char.class, charConverter);
converterMap.put(Date.class, new DesignDateConverter());
converterMap.put(LocalDate.class, new DesignLocalDateConverter());
converterMap.put(LocalDateTime.class,
new DesignLocalDateTimeConverter());
converterMap.put(ShortcutAction.class,
new DesignShortcutActionConverter());
converterMap.put(Resource.class, new DesignResourceConverter());
converterMap.put(TimeZone.class, new DesignTimeZoneConverter());
} | 3.68 |
dubbo_LruCache_put | /**
* API to store value against a key in the calling thread scope.
* @param key Unique identifier for the object being store.
* @param value Value getting store
*/
@Override
public void put(Object key, Object value) {
store.put(key, value);
} | 3.68 |
flink_BinarySegmentUtils_readRowData | /** Gets an instance of {@link RowData} from underlying {@link MemorySegment}. */
public static RowData readRowData(
MemorySegment[] segments, int numFields, int baseOffset, long offsetAndSize) {
final int size = ((int) offsetAndSize);
int offset = (int) (offsetAndSize >> 32);
NestedRowData row = new NestedRowData(numFields);
row.pointTo(segments, offset + baseOffset, size);
return row;
} | 3.68 |
hadoop_RBFMetrics_getSecondsSince | /**
* Get the number of seconds passed since a date.
*
* @param timeMs to use as a reference.
* @return Seconds since the date.
*/
private static long getSecondsSince(long timeMs) {
if (timeMs < 0) {
return -1;
}
return (now() - timeMs) / 1000;
} | 3.68 |
flink_HiveParserBaseSemanticAnalyzer_getGroupByForClause | // This function is a wrapper of parseInfo.getGroupByForClause which automatically translates
// SELECT DISTINCT a,b,c to SELECT a,b,c GROUP BY a,b,c.
public static List<HiveParserASTNode> getGroupByForClause(
HiveParserQBParseInfo parseInfo, String dest) {
if (parseInfo.getSelForClause(dest).getToken().getType() == HiveASTParser.TOK_SELECTDI) {
HiveParserASTNode selectExprs = parseInfo.getSelForClause(dest);
List<HiveParserASTNode> result =
new ArrayList<>(selectExprs == null ? 0 : selectExprs.getChildCount());
if (selectExprs != null) {
for (int i = 0; i < selectExprs.getChildCount(); ++i) {
if (((HiveParserASTNode) selectExprs.getChild(i)).getToken().getType()
== HiveASTParser.QUERY_HINT) {
continue;
}
// table.column AS alias
HiveParserASTNode grpbyExpr =
(HiveParserASTNode) selectExprs.getChild(i).getChild(0);
result.add(grpbyExpr);
}
}
return result;
} else {
HiveParserASTNode grpByExprs = parseInfo.getGroupByForClause(dest);
List<HiveParserASTNode> result =
new ArrayList<>(grpByExprs == null ? 0 : grpByExprs.getChildCount());
if (grpByExprs != null) {
for (int i = 0; i < grpByExprs.getChildCount(); ++i) {
HiveParserASTNode grpbyExpr = (HiveParserASTNode) grpByExprs.getChild(i);
if (grpbyExpr.getType() != HiveASTParser.TOK_GROUPING_SETS_EXPRESSION) {
result.add(grpbyExpr);
}
}
}
return result;
}
} | 3.68 |
framework_SerializablePredicate_or | /**
* Returns a composed predicate that represents a short-circuiting logical
* OR of this predicate and another. When evaluating the composed predicate,
* if this predicate is {@code true}, then the {@code other} predicate is
* not evaluated.
*
* <p>
* Any exceptions thrown during evaluation of either predicate are relayed
* to the caller; if evaluation of this predicate throws an exception, the
* {@code other} predicate will not be evaluated.
*
* @param other
* a predicate that will be logically-ORed with this predicate
* @return a composed predicate that represents the short-circuiting logical
* OR of this predicate and the {@code other} predicate
* @throws NullPointerException
* if other is null
* @since 8.5
*/
default SerializablePredicate<T> or(
SerializablePredicate<? super T> other) {
Objects.requireNonNull(other);
return t -> test(t) || other.test(t);
} | 3.68 |
hadoop_ClusterTopologyReader_get | /**
* Get the {@link LoggedNetworkTopology} object.
*
* @return The {@link LoggedNetworkTopology} object parsed from the input.
*/
public LoggedNetworkTopology get() {
return topology;
} | 3.68 |
flink_FlinkContainersSettings_zookeeperHostname | /**
* Sets the {@code zookeeperHostname} and returns a reference to this Builder enabling
* method chaining.
*
* @param zookeeperHostname The Zookeeper hostname.
* @return A reference to this Builder.
*/
public Builder zookeeperHostname(String zookeeperHostname) {
this.zookeeperHostname = zookeeperHostname;
return this;
} | 3.68 |
morf_AbstractSqlDialectTest_expectedSqlForMathOperationsForExistingDataFix1 | /**
* @return expected SQL for math operation for existing data fix 1
*/
protected String expectedSqlForMathOperationsForExistingDataFix1() {
return "ROUND(doublevalue / 1000 * doublevalue, 2)";
} | 3.68 |
dubbo_LoggerFactory_setLoggerAdapter | /**
* Set logger provider
*
* @param loggerAdapter logger provider
*/
public static void setLoggerAdapter(LoggerAdapter loggerAdapter) {
if (loggerAdapter != null) {
if (loggerAdapter == LoggerFactory.loggerAdapter) {
return;
}
loggerAdapter.getLogger(LoggerFactory.class.getName());
LoggerFactory.loggerAdapter = loggerAdapter;
for (Map.Entry<String, FailsafeLogger> entry : LOGGERS.entrySet()) {
entry.getValue().setLogger(LoggerFactory.loggerAdapter.getLogger(entry.getKey()));
}
}
} | 3.68 |
hadoop_SysInfoWindows_getAvailablePhysicalMemorySize | /** {@inheritDoc} */
@Override
public long getAvailablePhysicalMemorySize() {
refreshIfNeeded();
return memAvailable;
} | 3.68 |
framework_ClickableRenderer_findClosestParentGrid | /**
* Returns the Grid instance containing the given element, if any.
* <p>
* <strong>Note:</strong> This method may not work reliably if the grid
* in question is wrapped in a {@link Composite} <em>unless</em> the
* element is inside another widget that is a child of the wrapped grid;
* please refer to the note in
* {@link WidgetUtil#findWidget(Element, Class) Util.findWidget} for
* details.
*
* @param e
* the element whose parent grid to find
* @return the parent grid or null if none found.
*/
private static Grid<?> findClosestParentGrid(Element e) {
Widget w = WidgetUtil.findWidget(e, null);
while (w != null && !(w instanceof Grid)) {
w = w.getParent();
}
return (Grid<?>) w;
} | 3.68 |
morf_SchemaBean_getView | /**
* @see org.alfasoftware.morf.metadata.Schema#getView(java.lang.String)
*/
@Override
public View getView(String name) {
return views.get(name.toUpperCase());
} | 3.68 |
hadoop_FederationPolicyUtils_loadAMRMPolicy | /**
* Get AMRMProxy policy from state store, using default queue and
* configuration as fallback.
*
* @param queue the queue of the application
* @param oldPolicy the previous policy instance (can be null)
* @param conf the YARN configuration
* @param federationFacade state store facade
* @param homeSubClusterId home sub-cluster id
* @return FederationAMRMProxyPolicy recreated
* @throws FederationPolicyInitializationException if fails
*/
public static FederationAMRMProxyPolicy loadAMRMPolicy(String queue,
FederationAMRMProxyPolicy oldPolicy, Configuration conf,
FederationStateStoreFacade federationFacade,
SubClusterId homeSubClusterId)
throws FederationPolicyInitializationException {
// Local policy and its configuration
SubClusterPolicyConfiguration configuration =
loadPolicyConfiguration(queue, conf, federationFacade);
// Instantiate the policyManager and get policy
FederationPolicyInitializationContext context =
new FederationPolicyInitializationContext(configuration,
federationFacade.getSubClusterResolver(), federationFacade,
homeSubClusterId);
LOG.info("Creating policy manager of type: " + configuration.getType());
FederationPolicyManager federationPolicyManager =
instantiatePolicyManager(configuration.getType());
// set queue, reinit policy if required (implementation lazily check
// content of conf), and cache it
federationPolicyManager.setQueue(configuration.getQueue());
return federationPolicyManager.getAMRMPolicy(context, oldPolicy);
} | 3.68 |
hadoop_LocalityMulticastAMRMProxyPolicy_getNumLocalizedContainers | /**
* Returns the number of containers matching an allocation Id that are
* localized in the targetId subcluster.
*/
private long getNumLocalizedContainers(long allocationId,
SubClusterId targetId) {
AtomicLong c = countContainersPerRM.get(allocationId).get(targetId);
return c == null ? 0 : c.get();
} | 3.68 |
hudi_HoodieConsistentBucketIndex_rollbackCommit | /**
* Do nothing.
* A failed write may create a hashing metadata for a partition. In this case, we still do nothing when rolling back
* the failed write. Because the hashing metadata created by a writer must have 00000000000000 timestamp and can be viewed
* as the initialization of a partition rather than as a part of the failed write.
*/
@Override
public boolean rollbackCommit(String instantTime) {
return true;
} | 3.68 |
flink_TopNBuffer_get | /**
* Gets the record list from the buffer under the sortKey.
*
* @param sortKey key to get
* @return the record list from the buffer under the sortKey
*/
public Collection<RowData> get(RowData sortKey) {
return treeMap.get(sortKey);
} | 3.68 |
flink_RowKind_fromByteValue | /**
* Creates a {@link RowKind} from the given byte value. Each {@link RowKind} has a byte value
* representation.
*
* @see #toByteValue() for mapping of byte value and {@link RowKind}.
*/
public static RowKind fromByteValue(byte value) {
switch (value) {
case 0:
return INSERT;
case 1:
return UPDATE_BEFORE;
case 2:
return UPDATE_AFTER;
case 3:
return DELETE;
default:
throw new UnsupportedOperationException(
"Unsupported byte value '" + value + "' for row kind.");
}
} | 3.68 |
AreaShop_WorldGuardRegionFlagsFeature_updateRegionFlags | /**
* Set the region flags/options to the values of a ConfigurationSection.
* @param region The region to update the flags for
* @param flags The flags to apply
* @return true if the flags have been set correctly, otherwise false
*/
private boolean updateRegionFlags(GeneralRegion region, ConfigurationSection flags) {
boolean result = true;
Set<String> flagNames = flags.getKeys(false);
WorldGuardPlugin worldGuard = plugin.getWorldGuard();
// Get the region
ProtectedRegion worldguardRegion = region.getRegion();
if(worldguardRegion == null) {
AreaShop.debug("Region '" + region.getName() + "' does not exist, setting flags failed");
return false;
}
// Loop through all flags that are set in the config
for(String flagName : flagNames) {
String value = Message.fromString(flags.getString(flagName)).replacements(region).getPlain();
// In the config normal Bukkit color codes are used, those only need to be translated on 5.X WorldGuard versions
if(plugin.getWorldGuard().getDescription().getVersion().startsWith("5.")) {
value = translateBukkitToWorldGuardColors(value);
}
if(flagName.equalsIgnoreCase("members")) {
plugin.getWorldGuardHandler().setMembers(worldguardRegion, parseAccessSet(value));
//AreaShop.debug(" Flag " + flagName + " set: " + members.toUserFriendlyString());
} else if(flagName.equalsIgnoreCase("owners")) {
plugin.getWorldGuardHandler().setOwners(worldguardRegion, parseAccessSet(value));
//AreaShop.debug(" Flag " + flagName + " set: " + owners.toUserFriendlyString());
} else if(flagName.equalsIgnoreCase("priority")) {
try {
int priority = Integer.parseInt(value);
if(worldguardRegion.getPriority() != priority) {
worldguardRegion.setPriority(priority);
}
//AreaShop.debug(" Flag " + flagName + " set: " + value);
} catch(NumberFormatException e) {
AreaShop.warn("The value of flag " + flagName + " is not a number");
result = false;
}
} else if(flagName.equalsIgnoreCase("parent")) {
if(region.getWorld() == null || plugin.getRegionManager(region.getWorld()) == null) {
continue;
}
ProtectedRegion parentRegion = plugin.getRegionManager(region.getWorld()).getRegion(value);
if(parentRegion != null) {
if(!parentRegion.equals(worldguardRegion.getParent())) {
try {
worldguardRegion.setParent(parentRegion);
//AreaShop.debug(" Flag " + flagName + " set: " + value);
} catch(ProtectedRegion.CircularInheritanceException e) {
AreaShop.warn("The parent set in the config is not correct (circular inheritance)");
}
}
} else {
AreaShop.warn("The parent set in the config is not correct (region does not exist)");
}
} else {
// Parse all other normal flags (groups are also handled)
String flagSetting = null;
com.sk89q.worldguard.protection.flags.RegionGroup groupValue = null;
Flag<?> foundFlag = plugin.getWorldGuardHandler().fuzzyMatchFlag(flagName);
if(foundFlag == null) {
AreaShop.warn("Found wrong flag in flagProfiles section: " + flagName + ", check if that is the correct WorldGuard flag");
continue;
}
RegionGroupFlag groupFlag = foundFlag.getRegionGroupFlag();
if(value == null || value.isEmpty()) {
if(worldguardRegion.getFlag(foundFlag) != null) {
worldguardRegion.setFlag(foundFlag, null);
}
if(groupFlag != null && worldguardRegion.getFlag(groupFlag) != null) {
worldguardRegion.setFlag(groupFlag, null);
}
//AreaShop.debug(" Flag " + flagName + " reset (+ possible group of flag)");
} else {
if(groupFlag == null) {
flagSetting = value;
} else {
for(String part : value.split(" ")) {
if(part.startsWith("g:")) {
if(part.length() > 2) {
try {
groupValue = plugin.getWorldGuardHandler().parseFlagGroupInput(groupFlag, part.substring(2));
} catch(InvalidFlagFormat e) {
AreaShop.warn("Found wrong group value for flag " + flagName);
}
}
} else {
if(flagSetting == null) {
flagSetting = part;
} else {
flagSetting += " " + part;
}
}
}
}
if(flagSetting != null) {
try {
setFlag(worldguardRegion, foundFlag, flagSetting);
//AreaShop.debug(" Flag " + flagName + " set: " + flagSetting);
} catch(InvalidFlagFormat e) {
AreaShop.warn("Found wrong value for flag " + flagName);
}
}
if(groupValue != null) {
if(groupValue == groupFlag.getDefault()) {
worldguardRegion.setFlag(groupFlag, null);
//AreaShop.debug(" Group of flag " + flagName + " set to default: " + groupValue);
} else {
worldguardRegion.setFlag(groupFlag, groupValue);
//AreaShop.debug(" Group of flag " + flagName + " set: " + groupValue);
}
}
}
}
}
// Indicate that the regions needs to be saved
if(worldGuard.getDescription().getVersion().startsWith("5.")) {
plugin.getFileManager().saveIsRequiredForRegionWorld(region.getWorldName());
}
return result;
} | 3.68 |
flink_WebLogAnalysis_filter | /**
* Filters for records of the visits relation where the year of visit is equal to a
* specified value. The URL of all visit records passing the filter is emitted.
*
* <p>Output Format: 0: URL 1: DATE
*/
@Override
public boolean filter(Tuple2<String, String> value) throws Exception {
// Parse date string with the format YYYY-MM-DD and extract the year
String dateString = value.f1;
int year = Integer.parseInt(dateString.substring(0, 4));
return (year == YEARFILTER);
} | 3.68 |
hbase_OutputSink_updateStatusWithMsg | /**
* Set status message in {@link MonitoredTask} instance that is set in this OutputSink
* @param msg message to update the status with
*/
protected final void updateStatusWithMsg(String msg) {
if (status != null) {
status.setStatus(msg);
}
} | 3.68 |
hadoop_ChangeDetectionPolicy_createPolicy | /**
* Create a policy.
* @param mode mode pf checks
* @param source source of change
* @param requireVersion throw exception when no version available?
* @return the policy
*/
@VisibleForTesting
public static ChangeDetectionPolicy createPolicy(final Mode mode,
final Source source, final boolean requireVersion) {
switch (source) {
case ETag:
return new ETagChangeDetectionPolicy(mode, requireVersion);
case VersionId:
return new VersionIdChangeDetectionPolicy(mode, requireVersion);
default:
return new NoChangeDetection();
}
} | 3.68 |
morf_AbstractSqlDialectTest_testAlterBlobColumn | /**
* Test altering a blob column.
*/
@Test
public void testAlterBlobColumn() {
testAlterTableColumn(TEST_TABLE, AlterationType.ALTER, getColumn(TEST_TABLE, BLOB_FIELD), column(BLOB_FIELD, DataType.BLOB), expectedAlterTableAlterBlobColumnStatement());
} | 3.68 |
zxing_Detector_getMatrixCornerPoints | /**
* Gets the Aztec code corners from the bull's eye corners and the parameters.
*
* @param bullsEyeCorners the array of bull's eye corners
* @return the array of aztec code corners
*/
private ResultPoint[] getMatrixCornerPoints(ResultPoint[] bullsEyeCorners) {
return expandSquare(bullsEyeCorners, 2 * nbCenterLayers, getDimension());
} | 3.68 |
pulsar_EnumValuesDataProvider_toDataProviderArray | /*
* Converts all values of an Enum class to a TestNG DataProvider object array
*/
public static Object[][] toDataProviderArray(Class<? extends Enum<?>> enumClass) {
Enum<?>[] enumValues = enumClass.getEnumConstants();
return Stream.of(enumValues)
.map(enumValue -> new Object[]{enumValue})
.collect(Collectors.toList())
.toArray(new Object[0][]);
} | 3.68 |
hadoop_SingleFilePerBlockCache_addToLinkedListAndEvictIfRequired | /**
* Add the given entry to the head of the linked list and if the LRU cache size
* exceeds the max limit, evict tail of the LRU linked list.
*
* @param entry Block entry to add.
*/
private void addToLinkedListAndEvictIfRequired(Entry entry) {
blocksLock.writeLock().lock();
try {
addToHeadOfLinkedList(entry);
entryListSize++;
if (entryListSize > maxBlocksCount && !closed.get()) {
Entry elementToPurge = tail;
tail = tail.getPrevious();
if (tail == null) {
tail = head;
}
tail.setNext(null);
elementToPurge.setPrevious(null);
deleteBlockFileAndEvictCache(elementToPurge);
}
} finally {
blocksLock.writeLock().unlock();
}
} | 3.68 |
querydsl_AbstractMySQLQuery_noCache | /**
* With SQL_NO_CACHE, the server does not use the query cache. It neither checks the query cache
* to see whether the result is already cached, nor does it cache the query result.
*
* @return the current object
*/
public C noCache() {
return addFlag(Position.AFTER_SELECT, SQL_NO_CACHE);
} | 3.68 |
framework_VaadinPortletService_getRequestType | /**
* Gets the request type for the request.
*
* @param request
* the request to get a request type for
* @return the request type
*
* @deprecated As of 7.0. Will likely change or be removed in a future
* version
*/
@Deprecated
protected RequestType getRequestType(VaadinRequest request) {
RequestType type = (RequestType) request
.getAttribute(RequestType.class.getName());
if (type == null) {
type = getPortlet().getRequestType((VaadinPortletRequest) request);
request.setAttribute(RequestType.class.getName(), type);
}
return type;
} | 3.68 |
hbase_CommonFSUtils_getWALRootDir | /**
* Get the path for the root directory for WAL data
* @param c configuration
* @return {@link Path} to hbase log root directory: e.g. {@value HBASE_WAL_DIR} from
* configuration as a qualified Path. Defaults to HBase root dir.
* @throws IOException e
*/
public static Path getWALRootDir(final Configuration c) throws IOException {
Path p = new Path(c.get(HBASE_WAL_DIR, c.get(HConstants.HBASE_DIR)));
if (!isValidWALRootDir(p, c)) {
return getRootDir(c);
}
FileSystem fs = p.getFileSystem(c);
return p.makeQualified(fs.getUri(), fs.getWorkingDirectory());
} | 3.68 |
flink_CsvReader_getCharset | /**
* Gets the character set for the reader. Default is UTF-8.
*
* @return The charset for the reader.
*/
@PublicEvolving
public String getCharset() {
return this.charset;
} | 3.68 |
framework_PropertysetItem_removePropertySetChangeListener | /**
* Removes a previously registered property set change listener.
*
* @param listener
* the Listener to be removed.
*/
@Override
public void removePropertySetChangeListener(
Item.PropertySetChangeListener listener) {
if (propertySetChangeListeners != null) {
propertySetChangeListeners.remove(listener);
}
} | 3.68 |
dubbo_AbstractStateRouter_continueRoute | /**
* Call next router to get result
*
* @param invokers current router filtered invokers
*/
protected final BitList<Invoker<T>> continueRoute(
BitList<Invoker<T>> invokers,
URL url,
Invocation invocation,
boolean needToPrintMessage,
Holder<RouterSnapshotNode<T>> nodeHolder) {
if (nextRouter != null) {
return nextRouter.route(invokers, url, invocation, needToPrintMessage, nodeHolder);
} else {
return invokers;
}
} | 3.68 |
hadoop_BaseRecord_hashCode | /**
* Override hash code to use primary key(s) for comparison.
*/
@Override
public int hashCode() {
Map<String, String> keyset = this.getPrimaryKeys();
return keyset.hashCode();
} | 3.68 |
pulsar_PersistentSubscription_mergeCursorProperties | /**
* Return a merged map that contains the cursor properties specified by used
* (eg. when using compaction subscription) and the subscription properties.
*/
protected Map<String, Long> mergeCursorProperties(Map<String, Long> userProperties) {
Map<String, Long> baseProperties = getBaseCursorProperties(isReplicated());
if (userProperties.isEmpty()) {
// Use only the static instance in the common case
return baseProperties;
} else {
Map<String, Long> merged = new TreeMap<>();
merged.putAll(userProperties);
merged.putAll(baseProperties);
return merged;
}
} | 3.68 |
druid_SchemaResolveVisitorFactory_resolveExpr | // for performance
static void resolveExpr(SchemaResolveVisitor visitor, SQLExpr x) {
if (x == null) {
return;
}
Class<?> clazz = x.getClass();
if (clazz == SQLIdentifierExpr.class) {
visitor.visit((SQLIdentifierExpr) x);
return;
} else if (clazz == SQLIntegerExpr.class || clazz == SQLCharExpr.class) {
// skip
return;
}
x.accept(visitor);
} | 3.68 |
hbase_StoreScanner_updateReaders | // Implementation of ChangedReadersObserver
@Override
public void updateReaders(List<HStoreFile> sfs, List<KeyValueScanner> memStoreScanners)
throws IOException {
if (CollectionUtils.isEmpty(sfs) && CollectionUtils.isEmpty(memStoreScanners)) {
return;
}
boolean updateReaders = false;
flushLock.lock();
try {
if (!closeLock.tryLock()) {
// The reason for doing this is that when the current store scanner does not retrieve
// any new cells, then the scanner is considered to be done. The heap of this scanner
// is not closed till the shipped() call is completed. Hence in that case if at all
// the partial close (close (false)) has been called before updateReaders(), there is no
// need for the updateReaders() to happen.
LOG.debug("StoreScanner already has the close lock. There is no need to updateReaders");
// no lock acquired.
clearAndClose(memStoreScanners);
return;
}
// lock acquired
updateReaders = true;
if (this.closing) {
LOG.debug("StoreScanner already closing. There is no need to updateReaders");
clearAndClose(memStoreScanners);
return;
}
flushed = true;
final boolean isCompaction = false;
boolean usePread = get || scanUsePread;
// SEE HBASE-19468 where the flushed files are getting compacted even before a scanner
// calls next(). So its better we create scanners here rather than next() call. Ensure
// these scanners are properly closed() whether or not the scan is completed successfully
// Eagerly creating scanners so that we have the ref counting ticking on the newly created
// store files. In case of stream scanners this eager creation does not induce performance
// penalty because in scans (that uses stream scanners) the next() call is bound to happen.
List<KeyValueScanner> scanners = store.getScanners(sfs, cacheBlocks, get, usePread,
isCompaction, matcher, scan.getStartRow(), scan.getStopRow(), this.readPt, false);
flushedstoreFileScanners.addAll(scanners);
if (!CollectionUtils.isEmpty(memStoreScanners)) {
clearAndClose(memStoreScannersAfterFlush);
memStoreScannersAfterFlush.addAll(memStoreScanners);
}
} finally {
flushLock.unlock();
if (updateReaders) {
closeLock.unlock();
}
}
// Let the next() call handle re-creating and seeking
} | 3.68 |
flink_OrCondition_getRight | /** @return One of the {@link IterativeCondition conditions} combined in this condition. */
public IterativeCondition<T> getRight() {
return right;
} | 3.68 |
dubbo_DubboCertManager_refreshCert | /**
* Request remote certificate authorization to generate cert pair for current Dubbo instance
*
* @return cert pair
* @throws IOException ioException
*/
protected CertPair refreshCert() throws IOException {
KeyPair keyPair = signWithEcdsa();
if (keyPair == null) {
keyPair = signWithRsa();
}
if (keyPair == null) {
logger.error(
CONFIG_SSL_CERT_GENERATE_FAILED,
"",
"",
"Generate Key failed. Please check if your system support.");
return null;
}
String csr = generateCsr(keyPair);
DubboCertificateServiceGrpc.DubboCertificateServiceBlockingStub stub =
DubboCertificateServiceGrpc.newBlockingStub(channel);
stub = setHeaderIfNeed(stub);
String privateKeyPem = generatePrivatePemKey(keyPair);
DubboCertificateResponse certificateResponse = stub.createCertificate(generateRequest(csr));
if (certificateResponse == null || !certificateResponse.getSuccess()) {
logger.error(
CONFIG_SSL_CERT_GENERATE_FAILED,
"",
"",
"Failed to generate cert from Dubbo Certificate Authority. " + "Message: "
+ (certificateResponse == null ? "null" : certificateResponse.getMessage()));
return null;
}
logger.info("Successfully generate cert from Dubbo Certificate Authority. Cert expire time: "
+ certificateResponse.getExpireTime());
return new CertPair(
privateKeyPem,
certificateResponse.getCertPem(),
String.join("\n", certificateResponse.getTrustCertsList()),
certificateResponse.getExpireTime());
} | 3.68 |
framework_Tree_getItemIconAlternateText | /**
* Return the alternate text of an icon in a tree item.
*
* @param itemId
* Object with the ID of the item
* @return String with the alternate text of the icon, or null when no icon
* was set
*/
public String getItemIconAlternateText(Object itemId) {
String storedAlt = itemIconAlts.get(itemId);
return storedAlt == null ? "" : storedAlt;
} | 3.68 |
hbase_RESTServlet_isReadOnly | /**
* Helper method to determine if server should only respond to GET HTTP method requests.
* @return boolean for server read-only state
*/
boolean isReadOnly() {
return getConfiguration().getBoolean("hbase.rest.readonly", false);
} | 3.68 |
hudi_BinaryUtil_compareTo | /**
* Lexicographically compare two arrays.
* copy from hbase
* @param buffer1 left operand
* @param buffer2 right operand
* @param offset1 Where to start comparing in the left buffer
* @param offset2 Where to start comparing in the right buffer
* @param length1 How much to compare from the left buffer
* @param length2 How much to compare from the right buffer
* @return 0 if equal, < 0 if left is less than right, etc.
*/
public static int compareTo(byte[] buffer1, int offset1, int length1,
byte[] buffer2, int offset2, int length2) {
// Short circuit equal case
if (buffer1 == buffer2
&& offset1 == offset2
&& length1 == length2) {
return 0;
}
// Bring WritableComparator code local
int end1 = offset1 + length1;
int end2 = offset2 + length2;
for (int i = offset1, j = offset2; i < end1 && j < end2; i++, j++) {
int a = (buffer1[i] & 0xff);
int b = (buffer2[j] & 0xff);
if (a != b) {
return a - b;
}
}
return length1 - length2;
} | 3.68 |
hbase_FavoredNodesManager_filterNonFNApplicableRegions | /**
* Filter and return regions for which favored nodes is not applicable.
* @return set of regions for which favored nodes is not applicable
*/
public static Set<RegionInfo> filterNonFNApplicableRegions(Collection<RegionInfo> regions) {
return regions.stream().filter(r -> !isFavoredNodeApplicable(r)).collect(Collectors.toSet());
} | 3.68 |
graphhopper_VectorTile_getStringValueBytes | /**
* <pre>
* Exactly one of these values must be present in a valid message
* </pre>
*
* <code>optional string string_value = 1;</code>
*/
public com.google.protobuf.ByteString
getStringValueBytes() {
java.lang.Object ref = stringValue_;
if (ref instanceof String) {
com.google.protobuf.ByteString b =
com.google.protobuf.ByteString.copyFromUtf8(
(java.lang.String) ref);
stringValue_ = b;
return b;
} else {
return (com.google.protobuf.ByteString) ref;
}
} | 3.68 |
hadoop_FifoCandidatesSelector_preemptAMContainers | /**
* As more resources are needed for preemption, saved AMContainers has to be
* rescanned. Such AMContainers can be preemptionCandidates based on resToObtain, but
* maxAMCapacityForThisQueue resources will be still retained.
*
* @param clusterResource
* @param preemptMap
* @param skippedAMContainerlist
* @param skippedAMSize
* @param maxAMCapacityForThisQueue
*/
private void preemptAMContainers(Resource clusterResource,
Map<ApplicationAttemptId, Set<RMContainer>> preemptMap,
Map<ApplicationAttemptId, Set<RMContainer>> curCandidates,
List<RMContainer> skippedAMContainerlist,
Map<String, Resource> resToObtainByPartition, Resource skippedAMSize,
Resource maxAMCapacityForThisQueue, Resource totalPreemptionAllowed) {
for (RMContainer c : skippedAMContainerlist) {
// Got required amount of resources for preemption, can stop now
if (resToObtainByPartition.isEmpty()) {
break;
}
// Once skippedAMSize reaches down to maxAMCapacityForThisQueue,
// container selection iteration for preemption will be stopped.
if (Resources.lessThanOrEqual(rc, clusterResource, skippedAMSize,
maxAMCapacityForThisQueue)) {
break;
}
boolean preempted = CapacitySchedulerPreemptionUtils
.tryPreemptContainerAndDeductResToObtain(rc, preemptionContext,
resToObtainByPartition, c, clusterResource, preemptMap,
curCandidates, totalPreemptionAllowed,
preemptionContext.getCrossQueuePreemptionConservativeDRF());
if (preempted) {
Resources.subtractFrom(skippedAMSize, c.getAllocatedResource());
}
}
skippedAMContainerlist.clear();
} | 3.68 |
morf_SqlDialect_getSqlForLeftTrim | /**
* Converts the LEFT_TRIM function into SQL.
*
* @param function the function to convert.
* @return a string representation of the SQL.
*/
protected String getSqlForLeftTrim(Function function) {
return "LTRIM(" + getSqlFrom(function.getArguments().get(0)) + ")";
} | 3.68 |
flink_SkipListUtils_helpSetNextValuePointer | /**
* Sets the next value pointer of the value.
*
* @param valuePointer the value pointer.
* @param nextValuePointer the next value pointer to set.
* @param spaceAllocator the space allocator.
*/
static void helpSetNextValuePointer(
long valuePointer, long nextValuePointer, Allocator spaceAllocator) {
Chunk chunk = spaceAllocator.getChunkById(SpaceUtils.getChunkIdByAddress(valuePointer));
int offsetInChunk = SpaceUtils.getChunkOffsetByAddress(valuePointer);
MemorySegment segment = chunk.getMemorySegment(offsetInChunk);
int offsetInByteBuffer = chunk.getOffsetInSegment(offsetInChunk);
putNextValuePointer(segment, offsetInByteBuffer, nextValuePointer);
} | 3.68 |
morf_OracleDialect_getSqlForAnalyseTable | /**
* @see org.alfasoftware.morf.jdbc.SqlDialect#getSqlForAnalyseTable(Table)
*/
@Override
public Collection<String> getSqlForAnalyseTable(Table table) {
return ImmutableList.of(
"BEGIN \n" +
"DBMS_STATS.GATHER_TABLE_STATS(ownname=> '" + getSchemaName() + "', "
+ "tabname=>'" + table.getName() + "', "
+ "cascade=>true, degree=>DBMS_STATS.AUTO_DEGREE, no_invalidate=>false); \n"
+ "END;");
} | 3.68 |
hbase_UserProvider_isHBaseSecurityEnabled | /** Returns <tt>true</tt> if security is enabled, <tt>false</tt> otherwise */
public boolean isHBaseSecurityEnabled() {
return User.isHBaseSecurityEnabled(this.getConf());
} | 3.68 |
morf_SqlUtils_merge | /**
* Constructs a Merge Statement which either inserts or updates
* a record into a table depending on whether a condition exists in
* the table.
*
* <p>Usage is discouraged; this method will be deprecated at some point. Use
* {@link MergeStatement#merge()} for preference.</p>
*
* @return {@link MergeStatement}
*/
public static MergeStatement merge() {
return new MergeStatement();
} | 3.68 |
flink_JobGraph_addUserArtifact | /**
* Adds the path of a custom file required to run the job on a task manager.
*
* @param name a name under which this artifact will be accessible through {@link
* DistributedCache}
* @param file path of a custom file required to run the job on a task manager
*/
public void addUserArtifact(String name, DistributedCache.DistributedCacheEntry file) {
if (file == null) {
throw new IllegalArgumentException();
}
userArtifacts.putIfAbsent(name, file);
} | 3.68 |
hbase_ScannerContext_hasSizeLimit | /** Returns true if the size limit can be enforced in the checker's scope */
boolean hasSizeLimit(LimitScope checkerScope) {
return limits.canEnforceSizeLimitFromScope(checkerScope)
&& (limits.getDataSize() > 0 || limits.getHeapSize() > 0 || limits.getBlockSize() > 0);
} | 3.68 |
framework_BasicEvent_setCaption | /*
* (non-Javadoc)
*
* @see
* com.vaadin.addon.calendar.event.CalendarEventEditor#setCaption(java.lang
* .String)
*/
@Override
public void setCaption(String caption) {
this.caption = caption;
fireEventChange();
} | 3.68 |
hbase_SnapshotManager_snapshotEnabledTable | /**
* Take a snapshot of an enabled table.
* @param snapshot description of the snapshot to take.
* @throws IOException if the snapshot could not be started or filesystem for snapshot temporary
* directory could not be determined
*/
private synchronized void snapshotEnabledTable(SnapshotDescription snapshot) throws IOException {
// setup the snapshot
prepareWorkingDirectory(snapshot);
// Take the snapshot of the enabled table
EnabledTableSnapshotHandler handler = new EnabledTableSnapshotHandler(snapshot, master, this);
snapshotTable(snapshot, handler);
} | 3.68 |
framework_Table_getColumnGenerator | /**
* Returns the ColumnGenerator used to generate the given column.
*
* @param columnId
* The id of the generated column
* @return The ColumnGenerator used for the given columnId or null.
*/
public ColumnGenerator getColumnGenerator(Object columnId)
throws IllegalArgumentException {
return columnGenerators.get(columnId);
} | 3.68 |
flink_PostVersionedIOReadableWritable_read | /**
* We do not support reading from a {@link DataInputView}, because it does not support pushing
* back already read bytes.
*/
@Override
public final void read(DataInputView in) throws IOException {
throw new UnsupportedOperationException(
"PostVersionedIOReadableWritable cannot read from a DataInputView.");
} | 3.68 |
AreaShop_GeneralRegion_isRestoreEnabled | /**
* Check if restoring is enabled.
* @return true if restoring is enabled, otherwise false
*/
public boolean isRestoreEnabled() {
return getBooleanSetting("general.enableRestore");
} | 3.68 |
hbase_MemcachedBlockCache_evictBlocksByHfileName | /**
* This method does nothing so that memcached can handle all evictions.
*/
@Override
public int evictBlocksByHfileName(String hfileName) {
return 0;
} | 3.68 |
framework_UIProviderEvent_getService | /**
* Gets the Vaadin service from which the event originates.
*
* @return the Vaadin service
*/
public VaadinService getService() {
return (VaadinService) getSource();
} | 3.68 |
hadoop_AbstractAuditSpanImpl_close | /**
* Invoke {@link AuditSpan#deactivate()}.
* This is final: subclasses MUST override the
* {@code deactivate()} method.
*/
@Override
public final void close() {
deactivate();
} | 3.68 |
AreaShop_RentRegion_setPrice | /**
* Change the price of the region.
* @param price The price of the region
*/
public void setPrice(Double price) {
setSetting("rent.price", price);
} | 3.68 |
hadoop_ResponseInfo__r | //Value is raw HTML and shouldn't be escaped
public ResponseInfo _r(String key, Object value) {
items.add(Item.of(key, value, true));
return this;
} | 3.68 |
dubbo_ReferenceConfig_checkAndUpdateSubConfigs | /**
* This method should be called right after the creation of this class's instance, before any property in other config modules is used.
* Check each config modules are created properly and override their properties if necessary.
*/
protected void checkAndUpdateSubConfigs() {
if (StringUtils.isEmpty(interfaceName)) {
throw new IllegalStateException("<dubbo:reference interface=\"\" /> interface not allow null!");
}
// get consumer's global configuration
completeCompoundConfigs();
// init some null configuration.
List<ConfigInitializer> configInitializers = this.getExtensionLoader(ConfigInitializer.class)
.getActivateExtension(URL.valueOf("configInitializer://"), (String[]) null);
configInitializers.forEach(e -> e.initReferConfig(this));
if (getGeneric() == null && getConsumer() != null) {
setGeneric(getConsumer().getGeneric());
}
if (ProtocolUtils.isGeneric(generic)) {
if (interfaceClass != null && !interfaceClass.equals(GenericService.class)) {
logger.warn(
CONFIG_PROPERTY_CONFLICT,
"",
"",
String.format(
"Found conflicting attributes for interface type: [interfaceClass=%s] and [generic=%s], "
+ "because the 'generic' attribute has higher priority than 'interfaceClass', so change 'interfaceClass' to '%s'. "
+ "Note: it will make this reference bean as a candidate bean of type '%s' instead of '%s' when resolving dependency in Spring.",
interfaceClass.getName(),
generic,
GenericService.class.getName(),
GenericService.class.getName(),
interfaceClass.getName()));
}
interfaceClass = GenericService.class;
} else {
try {
if (getInterfaceClassLoader() != null
&& (interfaceClass == null || interfaceClass.getClassLoader() != getInterfaceClassLoader())) {
interfaceClass = Class.forName(interfaceName, true, getInterfaceClassLoader());
} else if (interfaceClass == null) {
interfaceClass = Class.forName(
interfaceName, true, Thread.currentThread().getContextClassLoader());
}
} catch (ClassNotFoundException e) {
throw new IllegalStateException(e.getMessage(), e);
}
}
checkStubAndLocal(interfaceClass);
ConfigValidationUtils.checkMock(interfaceClass, this);
if (StringUtils.isEmpty(url)) {
checkRegistry();
}
resolveFile();
ConfigValidationUtils.validateReferenceConfig(this);
postProcessConfig();
} | 3.68 |
MagicPlugin_BaseSpell_onCancelSelection | /**
* Called when a material selection spell is cancelled mid-selection.
*/
public boolean onCancelSelection()
{
return false;
} | 3.68 |
hbase_HttpServer_getRequestURL | /**
* Quote the url so that users specifying the HOST HTTP header can't inject attacks.
*/
@Override
public StringBuffer getRequestURL() {
String url = rawRequest.getRequestURL().toString();
return new StringBuffer(HtmlQuoting.quoteHtmlChars(url));
} | 3.68 |
hbase_HBaseTestingUtility_createRootDir | /**
* Same as {@link HBaseTestingUtility#createRootDir(boolean create)} except that
* <code>create</code> flag is false.
* @return Fully qualified path to hbase root dir
*/
public Path createRootDir() throws IOException {
return createRootDir(false);
} | 3.68 |
flink_JobEdge_getUpstreamSubtaskStateMapper | /**
* Gets the channel state rescaler used for rescaling persisted data on upstream side of this
* JobEdge.
*
* @return The channel state rescaler to use, or null, if none was set.
*/
public SubtaskStateMapper getUpstreamSubtaskStateMapper() {
return upstreamSubtaskStateMapper;
} | 3.68 |
flink_ScopeFormat_concat | /**
* Concatenates the given component names separated by the delimiter character. Additionally the
* character filter is applied to all component names.
*
* @param filter Character filter to be applied to the component names
* @param delimiter Delimiter to separate component names
* @param components Array of component names
* @return The concatenated component name
*/
public static String concat(CharacterFilter filter, Character delimiter, String... components) {
StringBuilder sb = new StringBuilder();
sb.append(filter.filterCharacters(components[0]));
for (int x = 1; x < components.length; x++) {
sb.append(delimiter);
sb.append(filter.filterCharacters(components[x]));
}
return sb.toString();
} | 3.68 |
flink_CombinedWatermarkStatus_setWatermark | /**
* Returns true if the watermark was advanced, that is if the new watermark is larger than
* the previous one.
*
* <p>Setting a watermark will clear the idleness flag.
*/
public boolean setWatermark(long watermark) {
this.idle = false;
final boolean updated = watermark > this.watermark;
if (updated) {
this.onWatermarkUpdate.onWatermarkUpdate(watermark);
this.watermark = Math.max(watermark, this.watermark);
}
return updated;
} | 3.68 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.