name
stringlengths 12
178
| code_snippet
stringlengths 8
36.5k
| score
float64 3.26
3.68
|
---|---|---|
hadoop_ContainerReInitEvent_isAutoCommit | /**
* Should this re-Initialization be auto-committed.
* @return AutoCommit.
*/
public boolean isAutoCommit() {
return autoCommit;
} | 3.68 |
flink_BigIntParser_parseField | /**
* Static utility to parse a field of type BigInteger from a byte sequence that represents text
* characters (such as when read from a file stream).
*
* @param bytes The bytes containing the text data that should be parsed.
* @param startPos The offset to start the parsing.
* @param length The length of the byte sequence (counting from the offset).
* @param delimiter The delimiter that terminates the field.
* @return The parsed value.
* @throws IllegalArgumentException Thrown when the value cannot be parsed because the text
* represents not a correct number.
*/
public static final BigInteger parseField(
byte[] bytes, int startPos, int length, char delimiter) {
final int limitedLen = nextStringLength(bytes, startPos, length, delimiter);
if (limitedLen > 0
&& (Character.isWhitespace(bytes[startPos])
|| Character.isWhitespace(bytes[startPos + limitedLen - 1]))) {
throw new NumberFormatException(
"There is leading or trailing whitespace in the numeric field.");
}
final String str = new String(bytes, startPos, limitedLen, ConfigConstants.DEFAULT_CHARSET);
return new BigInteger(str);
} | 3.68 |
flink_CompiledPlan_printJsonString | /** Like {@link #asJsonString()}, but prints the result to {@link System#out}. */
default CompiledPlan printJsonString() {
System.out.println(this.asJsonString());
return this;
} | 3.68 |
framework_DragSourceExtensionConnector_getMatrixValue | /**
* Parses 1-dimensional matrix (six values) values.
*
* @param matrix
* the matrix string of format {@code matrix(a,b,c,d,x,y)}
* @param n
* the Nth value to parse
* @return the value, which is in pixels, or 0 if not able to determine
* value from given matrix string
*/
private static int getMatrixValue(String matrix, int n) {
if (matrix == null || matrix.isEmpty()
|| matrix.equalsIgnoreCase("none")
|| !matrix.startsWith("matrix(")) {
return 0;
}
try {
// the matrix is e.g. "matrix(x?, y?, 0, 0, tx, ty)" (note no unit
// postfix, e.g. 10 instead of 10px)
String x = matrix.substring(7, matrix.length() - 1).split(",")[n]
.trim();
return Integer.parseInt(x);
} catch (NumberFormatException nfe) {
Logger.getLogger(DragSourceExtensionConnector.class.getName()).info(
"Unable to parse \"transform: translate(...)\" matrix " + n
+ ". value from computed style, matrix \"" + matrix
+ "\", drag image might not be visible");
}
return 0;
} | 3.68 |
flink_ManagedTableListener_notifyTableCreation | /** Notify for creating managed table. */
public ResolvedCatalogBaseTable<?> notifyTableCreation(
@Nullable Catalog catalog,
ObjectIdentifier identifier,
ResolvedCatalogBaseTable<?> table,
boolean isTemporary,
boolean ignoreIfExists) {
if (isManagedTable(catalog, table)) {
ResolvedCatalogTable managedTable = enrichOptions(identifier, table, isTemporary);
discoverManagedTableFactory(classLoader)
.onCreateTable(
createTableFactoryContext(identifier, managedTable, isTemporary),
ignoreIfExists);
return managedTable;
}
return table;
} | 3.68 |
shardingsphere-elasticjob_GuaranteeService_isRegisterStartSuccess | /**
* Judge whether current sharding items are all register start success.
*
* @param shardingItems current sharding items
* @return current sharding items are all start success or not
*/
public boolean isRegisterStartSuccess(final Collection<Integer> shardingItems) {
for (int each : shardingItems) {
if (!jobNodeStorage.isJobNodeExisted(GuaranteeNode.getStartedNode(each))) {
return false;
}
}
return true;
} | 3.68 |
framework_ValoColorPickerTestUI_getStream | /*
* Must implement this method that returns the resource as a stream.
*/
@Override
public InputStream getStream() {
/* Create an image and draw something on it. */
BufferedImage image = new BufferedImage(270, 270,
BufferedImage.TYPE_INT_RGB);
Graphics drawable = image.getGraphics();
drawable.setColor(bgColor);
drawable.fillRect(0, 0, 270, 270);
drawable.setColor(fgColor);
drawable.fillOval(25, 25, 220, 220);
drawable.setColor(java.awt.Color.blue);
drawable.drawRect(0, 0, 269, 269);
drawable.setColor(java.awt.Color.black);
drawable.drawString("r=" + String.valueOf(fgColor.getRed()) + ",g="
+ String.valueOf(fgColor.getGreen()) + ",b="
+ String.valueOf(fgColor.getBlue()), 50, 100);
drawable.drawString("r=" + String.valueOf(bgColor.getRed()) + ",g="
+ String.valueOf(bgColor.getGreen()) + ",b="
+ String.valueOf(bgColor.getBlue()), 5, 15);
try {
/* Write the image to a buffer. */
imagebuffer = new ByteArrayOutputStream();
ImageIO.write(image, "png", imagebuffer);
/* Return a stream from the buffer. */
return new ByteArrayInputStream(imagebuffer.toByteArray());
} catch (IOException e) {
return null;
}
} | 3.68 |
framework_Dependency_findAndRegisterResources | /**
* Finds all the URLs defined for the given class using annotations for the
* given type, registers the URLs to the communication manager and adds the
* registered dependencies to the given list.
*
* @param type
* the type of dependencies to look for
* @param cls
* the class to scan
* @param manager
* a reference to the communication manager which tracks
* dependencies
* @param dependencies
* the list to add registered dependencies to
*
* @return a stream of resource URLs in the order defined by the annotations
*/
@SuppressWarnings("deprecation")
private static void findAndRegisterResources(Type type,
Class<? extends ClientConnector> cls,
LegacyCommunicationManager manager, List<Dependency> dependencies) {
Annotation[] annotations = cls
.getAnnotationsByType(type.annotationType);
if (annotations != null) {
for (Annotation annotation : annotations) {
String[] resources;
if (annotation instanceof StyleSheet) {
resources = ((StyleSheet) annotation).value();
} else if (annotation instanceof JavaScript) {
resources = ((JavaScript) annotation).value();
} else if (annotation instanceof HtmlImport) {
resources = ((HtmlImport) annotation).value();
} else {
throw new IllegalArgumentException(
"Unknown annotation type: "
+ annotation.annotationType().getName());
}
for (String resource : resources) {
String url = manager.registerDependency(resource, cls);
dependencies.add(new Dependency(type, url));
}
}
}
} | 3.68 |
hadoop_ManifestCommitter_resolveDestinationDirectory | /**
* Get the final output path, including resolving any relative path.
* @param outputPath output path
* @param conf configuration to create any FS with
* @return a resolved path.
* @throws IOException failure.
*/
private Path resolveDestinationDirectory(Path outputPath,
Configuration conf) throws IOException {
return FileSystem.get(outputPath.toUri(), conf).makeQualified(outputPath);
} | 3.68 |
dubbo_RpcStatus_get | /**
* get value.
*
* @param key
* @return value
*/
public Object get(String key) {
return values.get(key);
} | 3.68 |
hbase_AggregateImplementation_start | /**
* Stores a reference to the coprocessor environment provided by the
* {@link org.apache.hadoop.hbase.regionserver.RegionCoprocessorHost} from the region where this
* coprocessor is loaded. Since this is a coprocessor endpoint, it always expects to be loaded on
* a table region, so always expects this to be an instance of
* {@link RegionCoprocessorEnvironment}.
* @param env the environment provided by the coprocessor host
* @throws IOException if the provided environment is not an instance of
* {@code RegionCoprocessorEnvironment}
*/
@Override
public void start(CoprocessorEnvironment env) throws IOException {
if (env instanceof RegionCoprocessorEnvironment) {
this.env = (RegionCoprocessorEnvironment) env;
} else {
throw new CoprocessorException("Must be loaded on a table region!");
}
} | 3.68 |
hbase_ParseFilter_createCompareOperator | /**
* Takes a compareOperator symbol as a byte array and returns the corresponding CompareOperator
* @param compareOpAsByteArray the comparatorOperator symbol as a byte array
* @return the Compare Operator
*/
public static CompareOperator createCompareOperator(byte[] compareOpAsByteArray) {
ByteBuffer compareOp = ByteBuffer.wrap(compareOpAsByteArray);
if (compareOp.equals(ParseConstants.LESS_THAN_BUFFER)) return CompareOperator.LESS;
else if (compareOp.equals(ParseConstants.LESS_THAN_OR_EQUAL_TO_BUFFER))
return CompareOperator.LESS_OR_EQUAL;
else if (compareOp.equals(ParseConstants.GREATER_THAN_BUFFER)) return CompareOperator.GREATER;
else if (compareOp.equals(ParseConstants.GREATER_THAN_OR_EQUAL_TO_BUFFER))
return CompareOperator.GREATER_OR_EQUAL;
else if (compareOp.equals(ParseConstants.NOT_EQUAL_TO_BUFFER)) return CompareOperator.NOT_EQUAL;
else if (compareOp.equals(ParseConstants.EQUAL_TO_BUFFER)) return CompareOperator.EQUAL;
else throw new IllegalArgumentException("Invalid compare operator");
} | 3.68 |
dubbo_MemoryLimiter_fullyLock | /**
* Locks to prevent both acquires and releases.
*/
private void fullyLock() {
acquireLock.lock();
releaseLock.lock();
} | 3.68 |
dubbo_ServiceConfig_findConfiguredHosts | /**
* Register & bind IP address for service provider, can be configured separately.
* Configuration priority: environment variables -> java system properties -> host property in config file ->
* /etc/hosts -> default network address -> first available network address
*
* @param protocolConfig
* @param map
* @return
*/
private static String findConfiguredHosts(
ProtocolConfig protocolConfig, ProviderConfig provider, Map<String, String> map) {
boolean anyhost = false;
String hostToBind = getValueFromConfig(protocolConfig, DUBBO_IP_TO_BIND);
if (StringUtils.isNotEmpty(hostToBind) && isInvalidLocalHost(hostToBind)) {
throw new IllegalArgumentException(
"Specified invalid bind ip from property:" + DUBBO_IP_TO_BIND + ", value:" + hostToBind);
}
// if bind ip is not found in environment, keep looking up
if (StringUtils.isEmpty(hostToBind)) {
hostToBind = protocolConfig.getHost();
if (provider != null && StringUtils.isEmpty(hostToBind)) {
hostToBind = provider.getHost();
}
if (isInvalidLocalHost(hostToBind)) {
anyhost = true;
if (logger.isDebugEnabled()) {
logger.debug("No valid ip found from environment, try to get local host.");
}
hostToBind = getLocalHost();
}
}
map.put(BIND_IP_KEY, hostToBind);
// bind ip is not used for registry ip by default
String hostToRegistry = getValueFromConfig(protocolConfig, DUBBO_IP_TO_REGISTRY);
if (StringUtils.isNotEmpty(hostToRegistry) && isInvalidLocalHost(hostToRegistry)) {
throw new IllegalArgumentException("Specified invalid registry ip from property:" + DUBBO_IP_TO_REGISTRY
+ ", value:" + hostToRegistry);
} else if (StringUtils.isEmpty(hostToRegistry)) {
// bind ip is used as registry ip by default
hostToRegistry = hostToBind;
}
map.put(ANYHOST_KEY, String.valueOf(anyhost));
return hostToRegistry;
} | 3.68 |
pulsar_FieldParser_value | /**
* Converts value as per appropriate DataType of the field.
*
* @param strValue
* : string value of the object
* @param field
* : field of the attribute
* @return
*/
public static Object value(String strValue, Field field) {
requireNonNull(field);
// if field is not primitive type
Type fieldType = field.getGenericType();
if (fieldType instanceof ParameterizedType) {
Class<?> clazz = (Class<?>) ((ParameterizedType) field.getGenericType()).getActualTypeArguments()[0];
if (field.getType().equals(List.class)) {
// convert to list
return stringToList(strValue, clazz);
} else if (field.getType().equals(Set.class)) {
// covert to set
return stringToSet(strValue, clazz);
} else if (field.getType().equals(Map.class)) {
Class<?> valueClass =
(Class<?>) ((ParameterizedType) field.getGenericType()).getActualTypeArguments()[1];
return stringToMap(strValue, clazz, valueClass);
} else if (field.getType().equals(Optional.class)) {
Type typeClazz = ((ParameterizedType) fieldType).getActualTypeArguments()[0];
if (typeClazz instanceof ParameterizedType) {
throw new IllegalArgumentException(format("unsupported non-primitive Optional<%s> for %s",
typeClazz.getClass(), field.getName()));
}
return Optional.ofNullable(convert(strValue, (Class) typeClazz));
} else {
throw new IllegalArgumentException(
format("unsupported field-type %s for %s", field.getType(), field.getName()));
}
} else {
return convert(strValue, field.getType());
}
} | 3.68 |
pulsar_ConsumerInterceptors_onAcknowledgeCumulative | /**
* This is called when acknowledge cumulative request return from the broker.
* <p>
* This method calls {@link ConsumerInterceptor#onAcknowledgeCumulative(Consumer, MessageId, Throwable)} method
* for each interceptor.
* <p>
* This method does not throw exceptions. Exceptions thrown by any of interceptors in the chain are logged, but not
* propagated.
*
* @param consumer the consumer which contains the interceptors
* @param messageId messages to acknowledge.
* @param exception exception returned by broker.
*/
public void onAcknowledgeCumulative(Consumer<T> consumer, MessageId messageId, Throwable exception) {
for (int i = 0, interceptorsSize = interceptors.size(); i < interceptorsSize; i++) {
try {
interceptors.get(i).onAcknowledgeCumulative(consumer, messageId, exception);
} catch (Throwable e) {
log.warn("Error executing interceptor onAcknowledgeCumulative callback ", e);
}
}
} | 3.68 |
hbase_ReplicationPeerConfigUtil_convert | /**
* Convert TableCFs Object to String. Output String Format:
* ns1.table1:cf1,cf2;ns2.table2:cfA,cfB;table3
*/
public static String convert(ReplicationProtos.TableCF[] tableCFs) {
StringBuilder sb = new StringBuilder();
for (int i = 0, n = tableCFs.length; i < n; i++) {
ReplicationProtos.TableCF tableCF = tableCFs[i];
String namespace = tableCF.getTableName().getNamespace().toStringUtf8();
if (StringUtils.isNotEmpty(namespace)) {
sb.append(namespace).append(".")
.append(tableCF.getTableName().getQualifier().toStringUtf8()).append(":");
} else {
sb.append(tableCF.getTableName().toString()).append(":");
}
for (int j = 0; j < tableCF.getFamiliesCount(); j++) {
sb.append(tableCF.getFamilies(j).toStringUtf8()).append(",");
}
sb.deleteCharAt(sb.length() - 1).append(";");
}
if (sb.length() > 0) {
sb.deleteCharAt(sb.length() - 1);
}
return sb.toString();
} | 3.68 |
flink_TableFactoryUtil_buildCatalogStoreFactoryContext | /**
* Build a {@link CatalogStoreFactory.Context} for opening the {@link CatalogStoreFactory}.
*
* <p>The configuration format should be as follows:
*
* <pre>{@code
* table.catalog-store.kind: {identifier}
* table.catalog-store.{identifier}.{param1}: xxx
* table.catalog-store.{identifier}.{param2}: xxx
* }</pre>
*/
public static CatalogStoreFactory.Context buildCatalogStoreFactoryContext(
Configuration configuration, ClassLoader classLoader) {
String identifier = configuration.getString(CommonCatalogOptions.TABLE_CATALOG_STORE_KIND);
String catalogStoreOptionPrefix =
CommonCatalogOptions.TABLE_CATALOG_STORE_OPTION_PREFIX + identifier + ".";
Map<String, String> options =
new DelegatingConfiguration(configuration, catalogStoreOptionPrefix).toMap();
CatalogStoreFactory.Context context =
new FactoryUtil.DefaultCatalogStoreContext(options, configuration, classLoader);
return context;
} | 3.68 |
hadoop_InMemoryConfigurationStore_getConfirmedConfHistory | /**
* Configuration mutations not logged (i.e. not persisted) but directly
* confirmed. As such, a list of persisted configuration mutations does not
* exist.
* @return null Configuration mutation list not applicable for this store.
*/
@Override
public List<LogMutation> getConfirmedConfHistory(long fromId) {
// Unimplemented.
return null;
} | 3.68 |
hbase_CompactionProgress_getProgressPct | /**
* getter for calculated percent complete
*/
public float getProgressPct() {
return (float) currentCompactedKVs / getTotalCompactingKVs();
} | 3.68 |
hadoop_FindOptions_setFollowLink | /**
* Sets flag indicating whether symbolic links should be followed.
*
* @param followLink true indicates follow links
*/
public void setFollowLink(boolean followLink) {
this.followLink = followLink;
} | 3.68 |
hbase_CompactionRequestImpl_setDescription | /**
* Sets the region/store name, for logging.
*/
public void setDescription(String regionName, String storeName) {
this.regionName = regionName;
this.storeName = storeName;
} | 3.68 |
hbase_CheckAndMutateResult_isSuccess | /** Returns Whether the CheckAndMutate operation is successful or not */
public boolean isSuccess() {
return success;
} | 3.68 |
hbase_Bytes_indexOf | /**
* Returns the start position of the first occurrence of the specified {@code
* target} within {@code array}, or {@code -1} if there is no such occurrence.
* <p>
* More formally, returns the lowest index {@code i} such that {@code
* java.util.Arrays.copyOfRange(array, i, i + target.length)} contains exactly the same elements
* as {@code target}.
* @param array the array to search for the sequence {@code target}
* @param target the array to search for as a sub-sequence of {@code array}
*/
public static int indexOf(byte[] array, byte[] target) {
checkNotNull(array, "array");
checkNotNull(target, "target");
if (target.length == 0) {
return 0;
}
outer: for (int i = 0; i < array.length - target.length + 1; i++) {
for (int j = 0; j < target.length; j++) {
if (array[i + j] != target[j]) {
continue outer;
}
}
return i;
}
return -1;
} | 3.68 |
AreaShop_GeneralRegion_getLandlordName | /**
* Get the name of the landlord.
* @return The name of the landlord, if unavailable by UUID it will return the old cached name, if that is unavailable it will return <UNKNOWN>
*/
public String getLandlordName() {
String result = Utils.toName(getLandlord());
if(result == null || result.isEmpty()) {
result = config.getString("general.landlordName");
if(result == null || result.isEmpty()) {
result = null;
}
}
return result;
} | 3.68 |
hbase_ColumnRangeFilter_parseFrom | /**
* Parse a serialized representation of {@link ColumnRangeFilter}
* @param pbBytes A pb serialized {@link ColumnRangeFilter} instance
* @return An instance of {@link ColumnRangeFilter} made from <code>bytes</code>
* @throws DeserializationException if an error occurred
* @see #toByteArray
*/
public static ColumnRangeFilter parseFrom(final byte[] pbBytes) throws DeserializationException {
FilterProtos.ColumnRangeFilter proto;
try {
proto = FilterProtos.ColumnRangeFilter.parseFrom(pbBytes);
} catch (InvalidProtocolBufferException e) {
throw new DeserializationException(e);
}
return new ColumnRangeFilter(proto.hasMinColumn() ? proto.getMinColumn().toByteArray() : null,
proto.getMinColumnInclusive(),
proto.hasMaxColumn() ? proto.getMaxColumn().toByteArray() : null,
proto.getMaxColumnInclusive());
} | 3.68 |
framework_HierarchicalContainer_filterIncludingParents | /**
* Scans the itemId and all its children for which items should be included
* when filtering. All items which passes the filters are included.
* Additionally all items that have a child node that should be included are
* also themselves included.
*
* @param itemId
* @param includedItems
* @return true if the itemId should be included in the filtered container.
*/
private boolean filterIncludingParents(Object itemId,
HashSet<Object> includedItems) {
boolean toBeIncluded = passesFilters(itemId);
LinkedList<Object> childList = children.get(itemId);
if (childList != null) {
for (Object childItemId : children.get(itemId)) {
toBeIncluded |= filterIncludingParents(childItemId,
includedItems);
}
}
if (toBeIncluded) {
includedItems.add(itemId);
}
return toBeIncluded;
} | 3.68 |
hbase_Procedure_afterReplay | /**
* Called when the procedure is ready to be added to the queue after the loading/replay operation.
*/
protected void afterReplay(TEnvironment env) {
// no-op
} | 3.68 |
hadoop_RBFMetrics_close | /**
* Unregister the JMX beans.
*/
public void close() {
if (this.routerBeanName != null) {
MBeans.unregister(routerBeanName);
}
if (this.federationBeanName != null) {
MBeans.unregister(federationBeanName);
}
MetricsSystem ms = DefaultMetricsSystem.instance();
ms.unregisterSource(RBFMetrics.class.getName());
} | 3.68 |
pulsar_SaslRoleToken_setExpires | /**
* Sets the expiration of the token.
*
* @param expires expiration time of the token in milliseconds since the epoch.
*/
public void setExpires(long expires) {
if (this != SaslRoleToken.ANONYMOUS) {
this.expires = expires;
generateToken();
}
} | 3.68 |
hadoop_User_getShortName | /**
* Get the user name up to the first '/' or '@'
* @return the leading part of the user name
*/
public String getShortName() {
return shortName;
} | 3.68 |
hbase_MasterObserver_postIsRpcThrottleEnabled | /**
* Called after getting if is rpc throttle enabled.
* @param ctx the coprocessor instance's environment
* @param rpcThrottleEnabled the rpc throttle enabled value
*/
default void postIsRpcThrottleEnabled(final ObserverContext<MasterCoprocessorEnvironment> ctx,
final boolean rpcThrottleEnabled) throws IOException {
} | 3.68 |
hmily_MetricsReporter_registerMetrics | /**
* Register metrics.
*
* @param metrics metric collection
*/
public static void registerMetrics(final Collection<Metric> metrics) {
for (Metric metric : metrics) {
switch (metric.getType()) {
case COUNTER:
registerCounter(metric.getName(), getLabelNames(metric.getLabels()), metric.getDocument());
break;
case GAUGE:
registerGauge(metric.getName(), getLabelNames(metric.getLabels()), metric.getDocument());
break;
case HISTOGRAM:
registerHistogram(metric.getName(), getLabelNames(metric.getLabels()), metric.getDocument());
break;
default:
throw new RuntimeException("we not support metric registration for type: " + metric.getType());
}
}
} | 3.68 |
framework_Component_getComponent | /**
* Gets the component where the event occurred.
*
* @return the source component of the event
*/
public Component getComponent() {
return (Component) getSource();
} | 3.68 |
hadoop_ClusterMetrics_getReservedMapSlots | /**
* Get number of reserved map slots in the cluster.
*
* @return reserved map slot count
*/
public int getReservedMapSlots() {
return reservedMapSlots;
} | 3.68 |
flink_MemorySegmentFactory_allocateUnpooledSegment | /**
* Allocates some unpooled memory and creates a new memory segment that represents that memory.
*
* <p>This method is similar to {@link #allocateUnpooledSegment(int)}, but additionally sets the
* owner of the memory segment.
*
* @param size The size of the memory segment to allocate.
* @param owner The owner to associate with the memory segment.
* @return A new memory segment, backed by unpooled heap memory.
*/
public static MemorySegment allocateUnpooledSegment(int size, Object owner) {
return new MemorySegment(new byte[size], owner);
} | 3.68 |
flink_HandlerRequestUtils_getQueryParameter | /**
* Returns the value of a query parameter, or {@code null} if the query parameter is not set.
*
* @throws RestHandlerException If the query parameter is repeated.
*/
public static <
X,
P extends MessageQueryParameter<X>,
R extends RequestBody,
M extends MessageParameters>
X getQueryParameter(final HandlerRequest<R> request, final Class<P> queryParameterClass)
throws RestHandlerException {
return getQueryParameter(request, queryParameterClass, null);
} | 3.68 |
framework_MenuBarElement_closeAll | /**
* Closes all submenus, if any is open.<br>
* This is done by clicking on the currently selected top level item.
*/
private void closeAll() {
lastItemLocationMovedTo = null;
WebElement selectedItem = getSelectedTopLevelItem();
if (selectedItem != null) {
activateOrOpenSubmenu(selectedItem, true);
}
} | 3.68 |
framework_VFlash_setStandby | /**
* Sets standby.
*
* @param standby
* the standby text
*/
public void setStandby(String standby) {
if (this.standby != standby) {
this.standby = standby;
needsRebuild = true;
}
} | 3.68 |
hadoop_BoundedByteArrayOutputStream_getLimit | /**
* Return the current limit.
* @return limit.
*/
public int getLimit() {
return limit;
} | 3.68 |
hbase_ChoreService_printChoreDetails | /** Prints a summary of important details about the chore. Used for debugging purposes */
private void printChoreDetails(final String header, ScheduledChore chore) {
if (!LOG.isTraceEnabled()) {
return;
}
LinkedHashMap<String, String> output = new LinkedHashMap<>();
output.put(header, "");
output.put("Chore name: ", chore.getName());
output.put("Chore period: ", Integer.toString(chore.getPeriod()));
output.put("Chore timeBetweenRuns: ", Long.toString(chore.getTimeBetweenRuns()));
for (Entry<String, String> entry : output.entrySet()) {
LOG.trace(entry.getKey() + entry.getValue());
}
} | 3.68 |
hadoop_QueueCapacityConfigParser_parse | /**
* Creates a {@code QueueCapacityVector} parsed from the capacity configuration
* property set for a queue.
* @param capacityString capacity string to parse
* @param queuePath queue for which the capacity property is parsed
* @return a parsed capacity vector
*/
public QueueCapacityVector parse(String capacityString, String queuePath) {
if (queuePath.equals(CapacitySchedulerConfiguration.ROOT)) {
return QueueCapacityVector.of(100f, ResourceUnitCapacityType.PERCENTAGE);
}
if (capacityString == null) {
return new QueueCapacityVector();
}
// Trim all spaces from capacity string
capacityString = capacityString.replaceAll(" ", "");
for (Parser parser : parsers) {
Matcher matcher = parser.regex.matcher(capacityString);
if (matcher.find()) {
return parser.parser.apply(matcher);
}
}
return new QueueCapacityVector();
} | 3.68 |
flink_DoubleMaximum_add | /** Consider using {@link #add(double)} instead for primitive double values */
@Override
public void add(Double value) {
this.max = Math.max(this.max, value);
} | 3.68 |
hadoop_AWSRequestAnalyzer_request | /**
* A request.
* @param verb verb
* @param mutating does this update the store
* @param key object/prefix, etc.
* @param size nullable size
* @return request info
*/
private RequestInfo request(final String verb,
final boolean mutating,
final String key,
final Number size) {
return new RequestInfo(verb, mutating, key, size);
} | 3.68 |
flink_LeaderInformationRegister_merge | /**
* Merges another {@code LeaderInformationRegister} with additional leader information into a
* new {@code LeaderInformationRegister} instance. Any existing {@link LeaderInformation} for
* the passed {@code componentId} will be overwritten.
*
* <p>Empty {@code LeaderInformation} results in the removal of the corresponding entry (if it
* exists).
*/
public static LeaderInformationRegister merge(
@Nullable LeaderInformationRegister leaderInformationRegister,
String componentId,
LeaderInformation leaderInformation) {
final Map<String, LeaderInformation> existingLeaderInformation =
new HashMap<>(
leaderInformationRegister == null
? Collections.emptyMap()
: leaderInformationRegister.leaderInformationPerComponentId);
if (leaderInformation.isEmpty()) {
existingLeaderInformation.remove(componentId);
} else {
existingLeaderInformation.put(componentId, leaderInformation);
}
return new LeaderInformationRegister(existingLeaderInformation);
} | 3.68 |
morf_AbstractSqlDialectTest_expectedHints7 | /**
* @return The expected SQL for the {@link SelectStatement#withCustomHint(CustomHint customHint)} directive. Testing the OracleDialect adds the hints successfully.
*/
protected String expectedHints7() {
return "SELECT * FROM SCHEMA2.Foo"; //NOSONAR
} | 3.68 |
framework_VComboBox_createTextBox | /**
* This method will create the TextBox used by the VComboBox instance. It is
* invoked during the Constructor and should only be overridden if a custom
* TextBox shall be used. The overriding method cannot use any instance
* variables.
*
* @since 7.1.5
* @return TextBox instance used by this VComboBox
*/
protected TextBox createTextBox() {
return new FilterSelectTextBox();
} | 3.68 |
rocketmq-connect_AbstractConnectController_pauseConnector | /**
* Pause the connector. This call will asynchronously suspend processing by the connector and all
* of its tasks.
*
* @param connector name of the connector
*/
public void pauseConnector(String connector) {
configManagementService.pauseConnector(connector);
} | 3.68 |
flink_TaskEventHandler_publish | /**
* Publishes the task event to all subscribed event listeners.
*
* @param event The event to publish.
*/
public void publish(TaskEvent event) {
synchronized (listeners) {
for (EventListener<TaskEvent> listener : listeners.get(event.getClass())) {
listener.onEvent(event);
}
}
} | 3.68 |
hbase_CachedMobFile_open | /**
* Opens the mob file if it's not opened yet and increases the reference. It's not thread-safe.
* Use MobFileCache.openFile() instead. The reader of the mob file is just opened when it's not
* opened no matter how many times this open() method is invoked. The reference is a counter that
* how many times this reader is referenced. When the reference is 0, this reader is closed.
*/
@Override
public void open() throws IOException {
super.open();
referenceCount.incrementAndGet();
} | 3.68 |
hbase_HFileLink_getArchivePath | /** Returns the path of the archived hfile. */
public Path getArchivePath() {
return this.archivePath;
} | 3.68 |
rocketmq-connect_WorkerConnector_getConnectorName | /**
* connector name
*
* @return
*/
public String getConnectorName() {
return connectorName;
} | 3.68 |
pulsar_ManagedLedgerConfig_setMinimumRolloverTime | /**
* Set the minimum rollover time for ledgers in this managed ledger.
*
* <p/>If this time is > 0, a ledger will not be rolled over more frequently than the specified time, even if it has
* reached the maximum number of entries or maximum size. This parameter can be used to reduce the amount of
* rollovers on managed ledger with high write throughput.
*
* @param minimumRolloverTime
* the minimum rollover time
* @param unit
* the time unit
*/
public void setMinimumRolloverTime(int minimumRolloverTime, TimeUnit unit) {
this.minimumRolloverTimeMs = (int) unit.toMillis(minimumRolloverTime);
checkArgument(maximumRolloverTimeMs >= minimumRolloverTimeMs,
"Minimum rollover time needs to be less than maximum rollover time");
} | 3.68 |
shardingsphere-elasticjob_ShardingService_getLocalShardingItems | /**
* Get sharding items from localhost job server.
*
* @return sharding items from localhost job server
*/
public List<Integer> getLocalShardingItems() {
if (JobRegistry.getInstance().isShutdown(jobName) || !serverService.isAvailableServer(JobRegistry.getInstance().getJobInstance(jobName).getServerIp())) {
return Collections.emptyList();
}
return getShardingItems(JobRegistry.getInstance().getJobInstance(jobName).getJobInstanceId());
} | 3.68 |
hbase_Scan_setTimestamp | /**
* Get versions of columns with the specified timestamp. Note, default maximum versions to return
* is 1. If your time range spans more than one version and you want all versions returned, up the
* number of versions beyond the defaut.
* @param timestamp version timestamp
* @see #readAllVersions()
* @see #readVersions(int)
*/
public Scan setTimestamp(long timestamp) {
try {
tr = TimeRange.at(timestamp);
} catch (Exception e) {
// This should never happen, unless integer overflow or something extremely wrong...
LOG.error("TimeRange failed, likely caused by integer overflow. ", e);
throw e;
}
return this;
} | 3.68 |
hadoop_BCFile_prepareDataBlock | /**
* Create a Data Block and obtain an output stream for adding data into the
* block. There can only be one BlockAppender stream active at any time.
* Data Blocks may not be created after the first Meta Blocks. The caller
* must call BlockAppender.close() to conclude the block creation.
*
* @return The BlockAppender stream
* @throws IOException
*/
public BlockAppender prepareDataBlock() throws IOException {
if (blkInProgress == true) {
throw new IllegalStateException(
"Cannot create Data Block until previous block is closed.");
}
if (metaBlkSeen == true) {
throw new IllegalStateException(
"Cannot create Data Block after Meta Blocks.");
}
DataBlockRegister dbr = new DataBlockRegister();
WBlockState wbs =
new WBlockState(getDefaultCompressionAlgorithm(), out,
fsOutputBuffer, conf);
BlockAppender ba = new BlockAppender(dbr, wbs);
blkInProgress = true;
return ba;
} | 3.68 |
hadoop_HAProxyFactory_setAlignmentContext | /**
* Set the alignment context to be used when creating new proxies using
* this factory. Not all implementations will use this alignment context.
*/
default void setAlignmentContext(AlignmentContext alignmentContext) {
// noop
} | 3.68 |
framework_VComboBox_popupOpenerClicked | /**
* Record that the popup opener has been clicked and the popup should be
* opened on the next request.
*
* This handles the special case where are not filtering yet and the
* selected value has changed on the server-side. See #2119. The flag is
* cleared on each server reply.
*/
public void popupOpenerClicked() {
popupOpenerClicked = true;
showPopup = true;
} | 3.68 |
hadoop_AllocateRequest_responseId | /**
* Set the <code>responseId</code> of the request.
* @see AllocateRequest#setResponseId(int)
* @param responseId <code>responseId</code> of the request
* @return {@link AllocateRequestBuilder}
*/
@Public
@Stable
public AllocateRequestBuilder responseId(int responseId) {
allocateRequest.setResponseId(responseId);
return this;
} | 3.68 |
hadoop_ResourceSkyline_getJobId | /**
* Get the id of the job.
*
* @return the id of this job.
*/
public final String getJobId() {
return jobId;
} | 3.68 |
framework_SortOrder_getOpposite | /**
* Returns a new SortOrder object with the sort direction reversed.
*
* @return a new sort order object
*/
public SortOrder getOpposite() {
return new SortOrder(column, direction.getOpposite());
} | 3.68 |
flink_KubernetesJobGraphStoreUtil_nameToJobID | /**
* Convert a key in ConfigMap to {@link JobID}. The key is stored with prefix {@link
* Constants#JOB_GRAPH_STORE_KEY_PREFIX}.
*
* @param key job graph key in ConfigMap.
* @return the parsed {@link JobID}.
*/
public JobID nameToJobID(String key) {
return JobID.fromHexString(key.substring(JOB_GRAPH_STORE_KEY_PREFIX.length()));
} | 3.68 |
framework_VAccordion_setWidth | /**
* Sets the width of the stack item, or removes it if given value is
* {@code -1}.
*
* @param width
* the width to set (in pixels), or {@code -1} to remove
* width
*/
public void setWidth(int width) {
if (width == -1) {
super.setWidth("");
} else {
super.setWidth(width + "px");
}
} | 3.68 |
framework_VComboBox_setNextButtonActive | /**
* Should the next page button be visible to the user?
*
* @param active
*/
private void setNextButtonActive(boolean active) {
if (enableDebug) {
debug("VComboBox.SP: setNextButtonActive(" + active + ")");
}
if (active) {
DOM.sinkEvents(down, Event.ONCLICK);
down.setClassName(
VComboBox.this.getStylePrimaryName() + "-nextpage");
} else {
DOM.sinkEvents(down, 0);
down.setClassName(
VComboBox.this.getStylePrimaryName() + "-nextpage-off");
}
} | 3.68 |
graphhopper_GraphHopper_loadOrPrepareLM | /**
* For landmarks it is required to always call this method: either it creates the landmark data or it loads it.
*/
protected void loadOrPrepareLM(boolean closeEarly) {
for (LMProfile profile : lmPreparationHandler.getLMProfiles())
if (!getLMProfileVersion(profile.getProfile()).isEmpty()
&& !getLMProfileVersion(profile.getProfile()).equals("" + profilesByName.get(profile.getProfile()).getVersion()))
throw new IllegalArgumentException("LM preparation of " + profile.getProfile() + " already exists in storage and doesn't match configuration");
// we load landmark storages that already exist and prepare the other ones
List<LMConfig> lmConfigs = createLMConfigs(lmPreparationHandler.getLMProfiles());
List<LandmarkStorage> loaded = lmPreparationHandler.load(lmConfigs, baseGraph, encodingManager);
List<LMConfig> loadedConfigs = loaded.stream().map(LandmarkStorage::getLMConfig).collect(Collectors.toList());
List<LMConfig> configsToPrepare = lmConfigs.stream().filter(c -> !loadedConfigs.contains(c)).collect(Collectors.toList());
List<PrepareLandmarks> prepared = prepareLM(closeEarly, configsToPrepare);
// we map all profile names for which there is LM support to the according LM storages
landmarks = new LinkedHashMap<>();
for (LMProfile lmp : lmPreparationHandler.getLMProfiles()) {
// cross-querying
String prepProfile = lmp.usesOtherPreparation() ? lmp.getPreparationProfile() : lmp.getProfile();
Optional<LandmarkStorage> loadedLMS = loaded.stream().filter(lms -> lms.getLMConfig().getName().equals(prepProfile)).findFirst();
Optional<PrepareLandmarks> preparedLMS = prepared.stream().filter(pl -> pl.getLandmarkStorage().getLMConfig().getName().equals(prepProfile)).findFirst();
if (loadedLMS.isPresent() && preparedLMS.isPresent())
throw new IllegalStateException("LM should be either loaded or prepared, but not both: " + prepProfile);
else if (preparedLMS.isPresent()) {
setLMProfileVersion(lmp.getProfile(), profilesByName.get(lmp.getProfile()).getVersion());
landmarks.put(lmp.getProfile(), preparedLMS.get().getLandmarkStorage());
} else
loadedLMS.ifPresent(landmarkStorage -> landmarks.put(lmp.getProfile(), landmarkStorage));
}
} | 3.68 |
flink_YarnApplicationFileUploader_registerProvidedLocalResources | /**
* Register all the files in the provided lib directories as Yarn local resources with PUBLIC
* visibility, which means that they will be cached in the nodes and reused by different
* applications.
*
* @return list of class paths with the file name
*/
List<String> registerProvidedLocalResources() {
checkNotNull(localResources);
final ArrayList<String> classPaths = new ArrayList<>();
final Set<String> resourcesJar = new HashSet<>();
final Set<String> resourcesDir = new HashSet<>();
providedSharedLibs.forEach(
(fileName, fileStatus) -> {
final Path filePath = fileStatus.getPath();
LOG.debug("Using remote file {} to register local resource", filePath);
final YarnLocalResourceDescriptor descriptor =
YarnLocalResourceDescriptor.fromFileStatus(
fileName,
fileStatus,
LocalResourceVisibility.PUBLIC,
LocalResourceType.FILE);
localResources.put(fileName, descriptor.toLocalResource());
remotePaths.add(filePath);
envShipResourceList.add(descriptor);
if (!isFlinkDistJar(filePath.getName()) && !isPlugin(filePath)) {
if (fileName.endsWith("jar")) {
resourcesJar.add(fileName);
} else {
resourcesDir.add(new Path(fileName).getParent().toString());
}
} else if (isFlinkDistJar(filePath.getName())) {
flinkDist = descriptor;
}
});
// Construct classpath where resource directories go first followed
// by resource files. Sort both resources and resource directories in
// order to make classpath deterministic.
resourcesDir.stream().sorted().forEach(classPaths::add);
resourcesJar.stream().sorted().forEach(classPaths::add);
return classPaths;
} | 3.68 |
flink_TimeEvictor_hasTimestamp | /**
* Returns true if the first element in the Iterable of {@link TimestampedValue} has a
* timestamp.
*/
private boolean hasTimestamp(Iterable<TimestampedValue<Object>> elements) {
Iterator<TimestampedValue<Object>> it = elements.iterator();
if (it.hasNext()) {
return it.next().hasTimestamp();
}
return false;
} | 3.68 |
framework_ComboBoxConnector_updateFromUIDL | /*
* (non-Javadoc)
*
* @see com.vaadin.client.Paintable#updateFromUIDL(com.vaadin.client.UIDL,
* com.vaadin.client.ApplicationConnection)
*/
@Override
public void updateFromUIDL(UIDL uidl, ApplicationConnection client) {
VFilterSelect widget = getWidget();
// Save details
widget.client = client;
widget.paintableId = uidl.getId();
widget.readonly = isReadOnly();
widget.updateReadOnly();
if (!isRealUpdate(uidl)) {
return;
}
// Inverse logic here to make the default case (text input enabled)
// work without additional UIDL messages
boolean noTextInput = uidl
.hasAttribute(ComboBoxConstants.ATTR_NO_TEXT_INPUT)
&& uidl.getBooleanAttribute(
ComboBoxConstants.ATTR_NO_TEXT_INPUT);
widget.setTextInputEnabled(!noTextInput);
// not a FocusWidget -> needs own tabindex handling
widget.tb.setTabIndex(getState().tabIndex);
if (uidl.hasAttribute("filteringmode")) {
widget.filteringmode = FilteringMode
.valueOf(uidl.getStringAttribute("filteringmode"));
}
widget.immediate = getState().immediate;
widget.nullSelectionAllowed = uidl.hasAttribute("nullselect");
widget.nullSelectItem = uidl.hasAttribute("nullselectitem")
&& uidl.getBooleanAttribute("nullselectitem");
widget.currentPage = uidl.getIntVariable("page");
if (uidl.hasAttribute("pagelength")) {
widget.pageLength = uidl.getIntAttribute("pagelength");
}
if (uidl.hasAttribute(ComboBoxConstants.ATTR_INPUTPROMPT)) {
// input prompt changed from server
widget.inputPrompt = uidl
.getStringAttribute(ComboBoxConstants.ATTR_INPUTPROMPT);
} else {
widget.inputPrompt = "";
}
if (uidl.hasAttribute("suggestionPopupWidth")) {
widget.suggestionPopupWidth = uidl
.getStringAttribute("suggestionPopupWidth");
} else {
widget.suggestionPopupWidth = null;
}
if (uidl.hasAttribute("suggestionPopupWidth")) {
widget.suggestionPopupWidth = uidl
.getStringAttribute("suggestionPopupWidth");
} else {
widget.suggestionPopupWidth = null;
}
widget.suggestionPopup.updateStyleNames(uidl, getState());
widget.allowNewItem = uidl.hasAttribute("allownewitem");
widget.lastNewItemString = null;
final UIDL options = uidl.getChildUIDL(0);
if (uidl.hasAttribute("totalMatches")) {
widget.totalMatches = uidl.getIntAttribute("totalMatches");
} else {
widget.totalMatches = 0;
}
List<FilterSelectSuggestion> newSuggestions = new ArrayList<FilterSelectSuggestion>();
for (final Object child : options) {
final UIDL optionUidl = (UIDL) child;
final FilterSelectSuggestion suggestion = widget.new FilterSelectSuggestion(
optionUidl);
newSuggestions.add(suggestion);
}
// only close the popup if the suggestions list has actually changed
boolean suggestionsChanged = !widget.initDone
|| !newSuggestions.equals(widget.currentSuggestions);
// An ItemSetChangeEvent on server side clears the current suggestion
// popup. Popup needs to be repopulated with suggestions from UIDL.
boolean popupOpenAndCleared = false;
oldSuggestionTextMatchTheOldSelection = false;
if (suggestionsChanged) {
oldSuggestionTextMatchTheOldSelection = isWidgetsCurrentSelectionTextInTextBox();
widget.currentSuggestions.clear();
if (!widget.waitingForFilteringResponse) {
/*
* Clear the current suggestions as the server response always
* includes the new ones. Exception is when filtering, then we
* need to retain the value if the user does not select any of
* the options matching the filter.
*/
widget.currentSuggestion = null;
/*
* Also ensure no old items in menu. Unless cleared the old
* values may cause odd effects on blur events. Suggestions in
* menu might not necessary exist in select at all anymore.
*/
widget.suggestionPopup.menu.clearItems();
popupOpenAndCleared = widget.suggestionPopup.isAttached();
}
for (FilterSelectSuggestion suggestion : newSuggestions) {
widget.currentSuggestions.add(suggestion);
}
}
// handle selection (null or a single value)
if (uidl.hasVariable("selected")
// In case we're switching page no need to update the selection as the
// selection process didn't finish.
// && widget.selectPopupItemWhenResponseIsReceived ==
// VFilterSelect.Select.NONE
//
) {
String[] selectedKeys = uidl.getStringArrayVariable("selected");
// when filtering with empty filter, server sets the selected key
// to "", which we don't select here. Otherwise we won't be able to
// reset back to the item that was selected before filtering
// started.
if (selectedKeys.length > 0 && !selectedKeys[0].equals("")) {
performSelection(selectedKeys[0]);
// if selected key is available, assume caption is know based on
// it as well and clear selected caption
widget.setSelectedCaption(null);
} else if (!widget.waitingForFilteringResponse
&& uidl.hasAttribute("selectedCaption")) {
// scrolling to correct page is disabled, caption is passed as a
// special parameter
widget.setSelectedCaption(
uidl.getStringAttribute("selectedCaption"));
} else {
resetSelection();
}
}
if ((widget.waitingForFilteringResponse
&& widget.lastFilter.toLowerCase(Locale.ROOT)
.equals(uidl.getStringVariable("filter")))
|| popupOpenAndCleared) {
widget.suggestionPopup.showSuggestions(widget.currentSuggestions,
widget.currentPage, widget.totalMatches);
widget.waitingForFilteringResponse = false;
if (!widget.popupOpenerClicked
&& widget.selectPopupItemWhenResponseIsReceived != VFilterSelect.Select.NONE) {
// we're paging w/ arrows
Scheduler.get().scheduleDeferred(new ScheduledCommand() {
@Override
public void execute() {
navigateItemAfterPageChange();
}
});
}
if (widget.updateSelectionWhenReponseIsReceived) {
widget.suggestionPopup.menu
.doPostFilterSelectedItemAction();
}
}
// Calculate minimum textarea width
widget.updateSuggestionPopupMinWidth();
widget.popupOpenerClicked = false;
/*
* if this is our first time we need to recalculate the root width.
*/
if (!widget.initDone) {
widget.updateRootWidth();
}
// Focus dependent style names are lost during the update, so we add
// them here back again
if (widget.focused) {
widget.addStyleDependentName("focus");
}
widget.initDone = true;
} | 3.68 |
framework_ScrollbarBundle_setScrollSize | /**
* Sets the amount of pixels the scrollbar needs to be able to scroll
* through.
*
* @param px
* the number of pixels the scrollbar should be able to scroll
* through
* @see #setOffsetSizeAndScrollSize(double, double)
*/
public final void setScrollSize(final double px) {
boolean newScrollSizeIsSmallerThanOffsetSize = px <= getOffsetSize();
boolean scrollSizeBecomesSmallerThanOffsetSize = showsScrollHandle()
&& newScrollSizeIsSmallerThanOffsetSize;
if (scrollSizeBecomesSmallerThanOffsetSize && getScrollPos() != 0) {
setScrollPos(0);
setScrollSizeNow(px);
} else if (px != getScrollSize()) {
setScrollSizeNow(px);
}
} | 3.68 |
framework_Notification_setIcon | /**
* Sets the icon part of the notification message.
*
* @param icon
* The desired message icon
*/
public void setIcon(Resource icon) {
setResource("icon", icon);
} | 3.68 |
hmily_DatabaseMetaDataDialectHandler_formatTableNamePattern | /**
* Format table name pattern.
*
* @param tableNamePattern table name pattern
* @return formatted table name pattern
*/
default String formatTableNamePattern(final String tableNamePattern) {
return tableNamePattern;
} | 3.68 |
hudi_HoodieTableMetaserverClient_getActiveTimeline | /**
* Get the active instants as a timeline.
*
* @return Active instants timeline
*/
public synchronized HoodieActiveTimeline getActiveTimeline() {
if (activeTimeline == null) {
activeTimeline = new HoodieMetaserverBasedTimeline(this, metaserverConfig);
}
return activeTimeline;
} | 3.68 |
framework_Color_getRed | /**
* Returns the red value of the color.
*
*/
public int getRed() {
return red;
} | 3.68 |
flink_AsynchronousFileIOChannel_closeAndDelete | /**
* This method waits for all pending asynchronous requests to return. When the last request has
* returned, the channel is closed and deleted.
*
* <p>Even if an exception interrupts the closing, such that not all request are handled, the
* underlying <tt>FileChannel</tt> is closed and deleted.
*
* @throws IOException Thrown, if an I/O exception occurred while waiting for the buffers, or if
* the closing was interrupted.
*/
@Override
public void closeAndDelete() throws IOException {
try {
close();
} finally {
deleteChannel();
}
} | 3.68 |
flink_FunctionAnnotation_readSingleForwardAnnotations | /**
* Reads the annotations of a user defined function with one input and returns semantic
* properties according to the forwarded fields annotated.
*
* @param udfClass The user defined function, represented by its class.
* @return The DualInputSemanticProperties containing the forwarded fields.
*/
@Internal
public static Set<Annotation> readSingleForwardAnnotations(Class<?> udfClass) {
ForwardedFields forwardedFields = udfClass.getAnnotation(ForwardedFields.class);
NonForwardedFields nonForwardedFields = udfClass.getAnnotation(NonForwardedFields.class);
ReadFields readSet = udfClass.getAnnotation(ReadFields.class);
Set<Annotation> annotations = new HashSet<Annotation>();
if (forwardedFields != null) {
annotations.add(forwardedFields);
}
if (nonForwardedFields != null) {
if (!annotations.isEmpty()) {
throw new InvalidProgramException(
"Either "
+ ForwardedFields.class.getSimpleName()
+ " or "
+ NonForwardedFields.class.getSimpleName()
+ " can be annotated to a function, not both.");
}
annotations.add(nonForwardedFields);
}
if (readSet != null) {
annotations.add(readSet);
}
return !annotations.isEmpty() ? annotations : null;
} | 3.68 |
flink_VertexThreadInfoStats_getEndTime | /**
* Returns the timestamp, when all samples where collected.
*
* @return Timestamp, when all samples where collected
*/
@Override
public long getEndTime() {
return endTime;
} | 3.68 |
hadoop_Sets_newHashSet | /**
* Creates a <i>mutable</i> {@code HashSet} instance containing the given
* elements. A very thin convenience for creating an empty set and then
* calling Iterators#addAll.
*
* <p><b>Note:</b> if mutability is not required and the elements are
* non-null, use ImmutableSet#copyOf(Iterator) instead.</p>
*
* <p><b>Note:</b> if {@code E} is an {@link Enum} type, you should create
* an {@link EnumSet} instead.</p>
*
* <p>Overall, this method is not very useful and will likely be deprecated
* in the future.</p>
*
* @param <E> Generics Type E.
* @param elements elements.
* @return a new, empty thread-safe {@code Set}.
*/
public static <E> HashSet<E> newHashSet(Iterator<? extends E> elements) {
HashSet<E> set = newHashSet();
addAll(set, elements);
return set;
} | 3.68 |
hbase_ChoreService_requestCorePoolIncrease | /**
* Represents a request to increase the number of core pool threads. Typically a request
* originates from the fact that the current core pool size is not sufficient to service all of
* the currently running Chores
* @return true when the request to increase the core pool size succeeds
*/
private synchronized boolean requestCorePoolIncrease() {
// There is no point in creating more threads than scheduledChores.size since scheduled runs
// of the same chore cannot run concurrently (i.e. happen-before behavior is enforced
// amongst occurrences of the same chore).
if (scheduler.getCorePoolSize() < scheduledChores.size()) {
scheduler.setCorePoolSize(scheduler.getCorePoolSize() + 1);
printChoreServiceDetails("requestCorePoolIncrease");
return true;
}
return false;
} | 3.68 |
dubbo_PathUtils_normalize | /**
* Normalize path:
* <ol>
* <li>To remove query string if presents</li>
* <li>To remove duplicated slash("/") if exists</li>
* </ol>
*
* @param path path to be normalized
* @return a normalized path if required
*/
static String normalize(String path) {
if (isEmpty(path)) {
return SLASH;
}
String normalizedPath = path;
int index = normalizedPath.indexOf(QUESTION_MASK);
if (index > -1) {
normalizedPath = normalizedPath.substring(0, index);
}
while (normalizedPath.contains("//")) {
normalizedPath = replace(normalizedPath, "//", "/");
}
return normalizedPath;
} | 3.68 |
graphhopper_InstructionsOutgoingEdges_getOtherContinue | /**
* Returns an edge that has more or less in the same orientation as the prevEdge, but is not the currentEdge.
* If there is one, this indicates that we might need an instruction to help finding the correct edge out of the different choices.
* If there is none, return null.
*/
public EdgeIteratorState getOtherContinue(double prevLat, double prevLon, double prevOrientation) {
int tmpSign;
for (EdgeIteratorState edge : allowedAlternativeTurns) {
GHPoint point = InstructionsHelper.getPointForOrientationCalculation(edge, nodeAccess);
tmpSign = InstructionsHelper.calculateSign(prevLat, prevLon, point.getLat(), point.getLon(), prevOrientation);
if (Math.abs(tmpSign) <= 1) {
return edge;
}
}
return null;
} | 3.68 |
flink_AbstractUdfStreamOperator_getUserFunctionParameters | /**
* Since the streaming API does not implement any parametrization of functions via a
* configuration, the config returned here is actually empty.
*
* @return The user function parameters (currently empty)
*/
public Configuration getUserFunctionParameters() {
return new Configuration();
} | 3.68 |
hbase_HRegion_replayWALBatchMutate | /**
* Replay the batch mutate for secondary replica.
* <p/>
* We will directly apply the cells to the memstore. This is because:
* <ol>
* <li>All the cells are gotten from {@link WALEdit}, so we only have {@link Put} and
* {@link Delete} here</li>
* <li>The replay is single threaded, we do not need to acquire row lock, as the region is read
* only so no one else can write it.</li>
* <li>We do not need to write WAL.</li>
* <li>We will advance MVCC in the caller directly.</li>
* </ol>
*/
private void replayWALBatchMutate(Map<byte[], List<Cell>> family2Cells) throws IOException {
startRegionOperation(Operation.REPLAY_BATCH_MUTATE);
try {
for (Map.Entry<byte[], List<Cell>> entry : family2Cells.entrySet()) {
applyToMemStore(getStore(entry.getKey()), entry.getValue(), false, memStoreSizing);
}
} finally {
closeRegionOperation(Operation.REPLAY_BATCH_MUTATE);
}
} | 3.68 |
graphhopper_AngleCalc_calcAzimuth | /**
* Calculate the azimuth in degree for a line given by two coordinates. Direction in 'degree'
* where 0 is north, 90 is east, 180 is south and 270 is west.
*/
public double calcAzimuth(double lat1, double lon1, double lat2, double lon2) {
double orientation = Math.PI / 2 - calcOrientation(lat1, lon1, lat2, lon2);
if (orientation < 0)
orientation += 2 * Math.PI;
return Math.toDegrees(Helper.round4(orientation)) % 360;
} | 3.68 |
pulsar_ReaderConfiguration_getCryptoFailureAction | /**
* @return The ConsumerCryptoFailureAction
*/
public ConsumerCryptoFailureAction getCryptoFailureAction() {
return conf.getCryptoFailureAction();
} | 3.68 |
hbase_Result_getStats | /**
* Returns the associated statistics about the region from which this was returned. Can be
* <tt>null</tt> if stats are disabled.
*/
public RegionLoadStats getStats() {
return stats;
} | 3.68 |
flink_DynamicSinkUtils_createConsumedType | /**
* Returns the {@link DataType} that a sink should consume as the output from the runtime.
*
* <p>The format looks as follows: {@code PHYSICAL COLUMNS + PERSISTED METADATA COLUMNS}
*/
private static RowType createConsumedType(ResolvedSchema schema, DynamicTableSink sink) {
final Map<String, DataType> metadataMap = extractMetadataMap(sink);
final Stream<RowField> physicalFields =
schema.getColumns().stream()
.filter(Column::isPhysical)
.map(c -> new RowField(c.getName(), c.getDataType().getLogicalType()));
final Stream<RowField> metadataFields =
createRequiredMetadataColumns(schema, sink).stream()
.map(
column ->
new RowField(
// Use alias to ensures that physical and metadata
// columns don't collide.
column.getName(),
metadataMap
.get(
column.getMetadataKey()
.orElse(column.getName()))
.getLogicalType()));
final List<RowField> rowFields =
Stream.concat(physicalFields, metadataFields).collect(Collectors.toList());
return new RowType(false, rowFields);
} | 3.68 |
flink_ExpressionResolver_getExpandingResolverRules | /** List of rules for (possibly) expanding the list of unresolved expressions. */
public static List<ResolverRule> getExpandingResolverRules() {
return Arrays.asList(
ResolverRules.UNWRAP_API_EXPRESSION,
ResolverRules.LOOKUP_CALL_BY_NAME,
ResolverRules.FLATTEN_STAR_REFERENCE,
ResolverRules.EXPAND_COLUMN_FUNCTIONS);
} | 3.68 |
flink_ClockService_of | /** Creates a {@link ClockService} from the given {@link InternalTimerService}. */
static ClockService of(InternalTimerService<?> timerService) {
return timerService::currentProcessingTime;
} | 3.68 |
hadoop_ServiceRegistryUtils_mkUserHomePath | /**
* Build the path to a user home folder;
*/
public static String mkUserHomePath(String username) {
return SVC_USERS + "/" + username;
} | 3.68 |
hudi_LSMTimelineWriter_write | /**
* Writes the list of active actions into the timeline.
*
* @param activeActions The active actions
* @param preWriteCallback The callback before writing each action
* @param exceptionHandler The handle for exception
*/
public void write(
List<ActiveAction> activeActions,
Option<Consumer<ActiveAction>> preWriteCallback,
Option<Consumer<Exception>> exceptionHandler) throws HoodieCommitException {
ValidationUtils.checkArgument(!activeActions.isEmpty(), "The instant actions to write should not be empty");
Path filePath = new Path(metaClient.getArchivePath(),
newFileName(activeActions.get(0).getInstantTime(), activeActions.get(activeActions.size() - 1).getInstantTime(), FILE_LAYER_ZERO));
try (HoodieFileWriter writer = openWriter(filePath)) {
Schema wrapperSchema = HoodieLSMTimelineInstant.getClassSchema();
LOG.info("Writing schema " + wrapperSchema.toString());
for (ActiveAction activeAction : activeActions) {
try {
preWriteCallback.ifPresent(callback -> callback.accept(activeAction));
// in local FS and HDFS, there could be empty completed instants due to crash.
final HoodieLSMTimelineInstant metaEntry = MetadataConversionUtils.createLSMTimelineInstant(activeAction, metaClient);
writer.write(metaEntry.getInstantTime(), new HoodieAvroIndexedRecord(metaEntry), wrapperSchema);
} catch (Exception e) {
LOG.error("Failed to write instant: " + activeAction.getInstantTime(), e);
exceptionHandler.ifPresent(handler -> handler.accept(e));
}
}
updateManifest(filePath.getName());
} catch (Exception e) {
throw new HoodieCommitException("Failed to write commits", e);
}
} | 3.68 |
pulsar_PulsarConfigurationLoader_isComplete | /**
* Validates {@link FieldContext} annotation on each field of the class element. If element is annotated required
* and value of the element is null or number value is not in a provided (min,max) range then consider as incomplete
* object and throws exception with incomplete parameters
*
* @param obj
* @return
* @throws IllegalArgumentException
* if object is field values are not completed according to {@link FieldContext} constraints.
* @throws IllegalAccessException
*/
public static boolean isComplete(Object obj) throws IllegalArgumentException {
requireNonNull(obj);
Field[] fields = obj.getClass().getDeclaredFields();
StringBuilder error = new StringBuilder();
for (Field field : fields) {
if (field.isAnnotationPresent(FieldContext.class)) {
field.setAccessible(true);
Object value;
try {
value = field.get(obj);
} catch (IllegalAccessException e) {
throw new RuntimeException(e);
}
if (log.isDebugEnabled()) {
log.debug("Validating configuration field '{}' = '{}'", field.getName(), value);
}
boolean isRequired = field.getAnnotation(FieldContext.class).required();
long minValue = field.getAnnotation(FieldContext.class).minValue();
long maxValue = field.getAnnotation(FieldContext.class).maxValue();
if (isRequired && isEmpty(value)) {
error.append(String.format("Required %s is null,", field.getName()));
}
if (value != null && Number.class.isAssignableFrom(value.getClass())) {
long fieldVal = ((Number) value).longValue();
boolean valid = fieldVal >= minValue && fieldVal <= maxValue;
if (!valid) {
error.append(String.format("%s value %d doesn't fit in given range (%d, %d),", field.getName(),
fieldVal, minValue, maxValue));
}
}
}
}
if (error.length() > 0) {
throw new IllegalArgumentException(error.substring(0, error.length() - 1));
}
return true;
} | 3.68 |
zxing_MatrixUtil_clearMatrix | // Set all cells to -1. -1 means that the cell is empty (not set yet).
//
// JAVAPORT: We shouldn't need to do this at all. The code should be rewritten to begin encoding
// with the ByteMatrix initialized all to zero.
static void clearMatrix(ByteMatrix matrix) {
matrix.clear((byte) -1);
} | 3.68 |
flink_DualInputOperator_clearSecondInput | /** Clears this operator's second input. */
public void clearSecondInput() {
this.input2 = null;
} | 3.68 |
hadoop_MoveStep_setMaxDiskErrors | /**
* Sets the maximum numbers of Errors to be tolerated before this
* step is aborted.
* @param maxDiskErrors - long
*/
@Override
public void setMaxDiskErrors(long maxDiskErrors) {
this.maxDiskErrors = maxDiskErrors;
} | 3.68 |
hadoop_AbfsClientThrottlingAnalyzer_suspendIfNecessary | /**
* Suspends the current storage operation, as necessary, to reduce throughput.
* @return true if Thread sleeps(Throttling occurs) else false.
*/
public boolean suspendIfNecessary() {
lastExecutionTime.set(now());
timerOrchestrator(TimerFunctionality.RESUME, null);
int duration = sleepDuration;
if (duration > 0) {
try {
Thread.sleep(duration);
return true;
} catch (InterruptedException ie) {
Thread.currentThread().interrupt();
}
}
return false;
} | 3.68 |
hbase_DynamicMetricsRegistry_newTimeHistogram | /**
* Create a new histogram with time range counts.
* @param name The name of the histogram
* @param desc The description of the data in the histogram.
* @return A new MutableTimeHistogram
*/
public MutableTimeHistogram newTimeHistogram(String name, String desc) {
MutableTimeHistogram histo = new MutableTimeHistogram(name, desc);
return addNewMetricIfAbsent(name, histo, MutableTimeHistogram.class);
} | 3.68 |
framework_WindowConnector_updateComponentSize | // Need to override default because of window mode
@Override
protected void updateComponentSize() {
if (getState().windowMode == WindowMode.NORMAL) {
super.updateComponentSize();
} else if (getState().windowMode == WindowMode.MAXIMIZED) {
super.updateComponentSize("100%", "100%");
}
} | 3.68 |
hadoop_StorageUnit_multiply | /**
* Using BigDecimal so we can throw if we are overflowing the Long.Max.
*
* @param first - First Num.
* @param second - Second Num.
* @return Returns a double
*/
private static double multiply(double first, double second) {
BigDecimal firstVal = new BigDecimal(first);
BigDecimal secondVal = new BigDecimal(second);
return firstVal.multiply(secondVal)
.setScale(PRECISION, RoundingMode.HALF_UP).doubleValue();
} | 3.68 |
hbase_AbstractFSWAL_findRegionsToForceFlush | /**
* If the number of un-archived WAL files ('live' WALs) is greater than maximum allowed, check the
* first (oldest) WAL, and return those regions which should be flushed so that it can be
* let-go/'archived'.
* @return stores of regions (encodedRegionNames) to flush in order to archive oldest WAL file.
*/
Map<byte[], List<byte[]>> findRegionsToForceFlush() throws IOException {
Map<byte[], List<byte[]>> regions = null;
int logCount = getNumRolledLogFiles();
if (logCount > this.maxLogs && logCount > 0) {
Map.Entry<Path, WALProps> firstWALEntry = this.walFile2Props.firstEntry();
regions =
this.sequenceIdAccounting.findLower(firstWALEntry.getValue().encodedName2HighestSequenceId);
}
if (regions != null) {
List<String> listForPrint = new ArrayList<>();
for (Map.Entry<byte[], List<byte[]>> r : regions.entrySet()) {
StringBuilder families = new StringBuilder();
for (int i = 0; i < r.getValue().size(); i++) {
if (i > 0) {
families.append(",");
}
families.append(Bytes.toString(r.getValue().get(i)));
}
listForPrint.add(Bytes.toStringBinary(r.getKey()) + "[" + families.toString() + "]");
}
LOG.info("Too many WALs; count=" + logCount + ", max=" + this.maxLogs
+ "; forcing (partial) flush of " + regions.size() + " region(s): "
+ StringUtils.join(",", listForPrint));
}
return regions;
} | 3.68 |
hibernate-validator_SizeValidatorForArraysOfBoolean_isValid | /**
* Checks the number of entries in an array.
*
* @param array The array to validate.
* @param constraintValidatorContext context in which the constraint is evaluated.
*
* @return Returns {@code true} if the array is {@code null} or the number of entries in
* {@code array} is between the specified {@code min} and {@code max} values (inclusive),
* {@code false} otherwise.
*/
@Override
public boolean isValid(boolean[] array, ConstraintValidatorContext constraintValidatorContext) {
if ( array == null ) {
return true;
}
return array.length >= min && array.length <= max;
} | 3.68 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.