name
stringlengths 12
178
| code_snippet
stringlengths 8
36.5k
| score
float64 3.26
3.68
|
---|---|---|
morf_AbstractSqlDialectTest_testRenameTableStatements | /**
* Tests that the syntax is correct for renaming a table.
*/
@SuppressWarnings("unchecked")
@Test
public void testRenameTableStatements() {
Table fromTable = metadata.getTable(TEST_TABLE);
Table renamed = table("Renamed")
.columns(
idColumn(),
versionColumn(),
column(STRING_FIELD, DataType.STRING, 3).nullable(),
column(INT_FIELD, DataType.DECIMAL, 8).nullable(),
column(FLOAT_FIELD, DataType.DECIMAL, 13, 2),
column(DATE_FIELD, DataType.DATE).nullable(),
column(BOOLEAN_FIELD, DataType.BOOLEAN).nullable(),
column(CHAR_FIELD, DataType.STRING, 1).nullable(),
column(BLOB_FIELD, DataType.BLOB).nullable(),
column(BIG_INTEGER_FIELD, DataType.BIG_INTEGER).nullable().defaultValue("12345"),
column(CLOB_FIELD, DataType.CLOB).nullable()
).indexes(
index(TEST_NK).unique().columns(STRING_FIELD),
index(TEST_1).columns(INT_FIELD, FLOAT_FIELD).unique()
);
compareStatements(expectedRenameTableStatements(), testDialect.renameTableStatements(fromTable, renamed));
} | 3.68 |
framework_EventRouter_getListeners | /**
* Returns all listeners that match or extend the given event type.
*
* @param eventType
* The type of event to return listeners for.
* @return A collection with all registered listeners. Empty if no listeners
* are found.
*/
public Collection<?> getListeners(Class<?> eventType) {
List<Object> listeners = new ArrayList<>();
if (listenerList != null) {
for (ListenerMethod lm : listenerList) {
if (lm.isOrExtendsType(eventType)) {
listeners.add(lm.getTarget());
}
}
}
return listeners;
} | 3.68 |
flink_DataStreamUtils_collectWithClient | /**
* Starts the execution of the program and returns an iterator to read the result of the given
* data stream, plus a {@link JobClient} to interact with the application execution.
*
* @deprecated Please use {@link DataStream#executeAndCollect()}.
*/
@Deprecated
public static <OUT> ClientAndIterator<OUT> collectWithClient(
DataStream<OUT> stream, String jobExecutionName) throws Exception {
return stream.executeAndCollectWithClient(jobExecutionName);
} | 3.68 |
hadoop_AbfsLease_free | /**
* Cancel future and free the lease. If an exception occurs while releasing the lease, the error
* will be logged. If the lease cannot be released, AzureBlobFileSystem breakLease will need to
* be called before another client will be able to write to the file.
*/
public void free() {
if (leaseFreed) {
return;
}
try {
LOG.debug("Freeing lease: path {}, lease id {}", path, leaseID);
if (future != null && !future.isDone()) {
future.cancel(true);
}
TracingContext tracingContext = new TracingContext(this.tracingContext);
tracingContext.setOperation(FSOperationType.RELEASE_LEASE);
client.releaseLease(path, leaseID, tracingContext);
} catch (IOException e) {
LOG.warn("Exception when trying to release lease {} on {}. Lease will need to be broken: {}",
leaseID, path, e.getMessage());
} finally {
// Even if releasing the lease fails (e.g. because the file was deleted),
// make sure to record that we freed the lease
leaseFreed = true;
LOG.debug("Freed lease {} on {}", leaseID, path);
}
} | 3.68 |
hadoop_DatanodeVolumeInfo_getStorageType | /**
* get storage type.
*/
public StorageType getStorageType() {
return storageType;
} | 3.68 |
framework_DateCellDayEvent_getDatesWidth | /* Returns total width of all date cells. */
private int getDatesWidth() {
if (weekGrid.width == -1) {
// Undefined width. Needs to be calculated by the known cell
// widths.
int count = weekGrid.content.getWidgetCount() - 1;
return count * getDateCellWidth();
}
return weekGrid.getInternalWidth();
} | 3.68 |
hibernate-validator_Mod10CheckValidator_isCheckDigitValid | /**
* Validate check digit using Mod10
*
* @param digits The digits over which to calculate the checksum
* @param checkDigit the check digit
*
* @return {@code true} if the mod 10 result matches the check digit, {@code false} otherwise
*/
@Override
public boolean isCheckDigitValid(List<Integer> digits, char checkDigit) {
int modResult = ModUtil.calculateMod10Check( digits, this.multiplier, this.weight );
if ( !Character.isDigit( checkDigit ) ) {
return false;
}
int checkValue = extractDigit( checkDigit );
return checkValue == modResult;
} | 3.68 |
framework_LocaleService_getState | /**
* Returns the state for this service
* <p>
* The state is transmitted inside the UI state rather than as an individual
* entity.
* </p>
*
* @since 7.1
* @param markAsDirty
* true to mark the state as dirty
* @return a LocaleServiceState object that can be read in any case and
* modified if markAsDirty is true
*/
private LocaleServiceState getState(boolean markAsDirty) {
if (markAsDirty) {
getUI().markAsDirty();
}
return state;
} | 3.68 |
hadoop_XDR_writeMessageUdp | /**
* Write an XDR message to a UDP ChannelBuffer.
* @param response XDR response
* @return UDP buffer
*/
public static ByteBuf writeMessageUdp(XDR response) {
Preconditions.checkState(response.state == XDR.State.READING);
// TODO: Investigate whether making a copy of the buffer is necessary.
return Unpooled.copiedBuffer(response.buf);
} | 3.68 |
flink_MemoryUtils_allocateUnsafe | /**
* Allocates unsafe native memory.
*
* @param size size of the unsafe memory to allocate.
* @return address of the allocated unsafe memory
*/
static long allocateUnsafe(long size) {
return UNSAFE.allocateMemory(Math.max(1L, size));
} | 3.68 |
framework_FieldGroup_isModified | /**
* Checks if any bound field has been modified.
*
* @return true if at least one field has been modified, false otherwise
*/
public boolean isModified() {
for (Field<?> field : getFields()) {
if (field.isModified()) {
return true;
}
}
return false;
} | 3.68 |
framework_AbstractInMemoryContainer_removeFilters | /**
* Remove all container filters for a given property identifier and
* re-filter the view. This also removes filters applying to multiple
* properties including the one identified by propertyId.
*
* This can be used to implement
* {@link Filterable#removeContainerFilters(Object)}.
*
* @param propertyId
* @return Collection<Filter> removed filters
*/
protected Collection<Filter> removeFilters(Object propertyId) {
if (getFilters().isEmpty() || propertyId == null) {
return Collections.emptyList();
}
List<Filter> removedFilters = new LinkedList<Filter>();
for (Iterator<Filter> iterator = getFilters().iterator(); iterator
.hasNext();) {
Filter f = iterator.next();
if (f.appliesToProperty(propertyId)) {
removedFilters.add(f);
iterator.remove();
}
}
if (!removedFilters.isEmpty()) {
filterAll();
return removedFilters;
}
return Collections.emptyList();
} | 3.68 |
hbase_AuthUtil_loginClientAsService | /**
* For kerberized cluster, return login user (from kinit or from keytab). Principal should be the
* following format: name/fully.qualified.domain.name@REALM. For non-kerberized cluster, return
* system user.
* <p>
* NOT recommend to use to method unless you're sure what you're doing, it is for canary only.
* Please use User#loginClient.
* @param conf configuration file
* @throws IOException login exception
*/
private static User loginClientAsService(Configuration conf) throws IOException {
UserProvider provider = UserProvider.instantiate(conf);
if (provider.isHBaseSecurityEnabled() && provider.isHadoopSecurityEnabled()) {
try {
if (provider.shouldLoginFromKeytab()) {
String host = Strings.domainNamePointerToHostName(
DNS.getDefaultHost(conf.get("hbase.client.dns.interface", "default"),
conf.get("hbase.client.dns.nameserver", "default")));
provider.login(HBASE_CLIENT_KEYTAB_FILE, HBASE_CLIENT_KERBEROS_PRINCIPAL, host);
}
} catch (UnknownHostException e) {
LOG.error("Error resolving host name: " + e.getMessage(), e);
throw e;
} catch (IOException e) {
LOG.error("Error while trying to perform the initial login: " + e.getMessage(), e);
throw e;
}
}
return provider.getCurrent();
} | 3.68 |
hbase_ReplicationStorageFactory_getReplicationPeerStorage | /**
* Create a new {@link ReplicationPeerStorage}.
*/
public static ReplicationPeerStorage getReplicationPeerStorage(FileSystem fs, ZKWatcher zk,
Configuration conf) {
Class<? extends ReplicationPeerStorage> clazz = getReplicationPeerStorageClass(conf);
for (Constructor<?> c : clazz.getConstructors()) {
if (c.getParameterCount() != 2) {
continue;
}
if (c.getParameterTypes()[0].isAssignableFrom(FileSystem.class)) {
return ReflectionUtils.newInstance(clazz, fs, conf);
} else if (c.getParameterTypes()[0].isAssignableFrom(ZKWatcher.class)) {
return ReflectionUtils.newInstance(clazz, zk, conf);
}
}
throw new IllegalArgumentException(
"Can not create replication peer storage with type " + clazz);
} | 3.68 |
flink_WindowedStateTransformation_process | /**
* Applies the given window function to each window. The window function is called for each
* evaluation of the window for each key individually. The output of the window function is
* interpreted as a regular non-windowed stream.
*
* <p>Note that this function requires that all data in the windows is buffered until the window
* is evaluated, as the function provides no means of incremental aggregation.
*
* @param function The window function.
* @return The data stream that is the result of applying the window function to the window.
*/
@PublicEvolving
public <R> StateBootstrapTransformation<T> process(ProcessWindowFunction<T, R, K, W> function) {
WindowOperator<K, T, ?, R, W> operator = builder.process(function);
SavepointWriterOperatorFactory factory =
(timestamp, path) -> new StateBootstrapWrapperOperator<>(timestamp, path, operator);
return new StateBootstrapTransformation<>(
input, operatorMaxParallelism, factory, keySelector, keyType);
} | 3.68 |
hbase_MasterObserver_preBalance | /**
* Called prior to requesting rebalancing of the cluster regions, though after the initial checks
* for regions in transition and the balance switch flag.
* @param ctx the environment to interact with the framework and master
* @param request the request used to trigger the balancer
*/
default void preBalance(final ObserverContext<MasterCoprocessorEnvironment> ctx,
BalanceRequest request) throws IOException {
} | 3.68 |
hbase_TableMapReduceUtil_limitNumReduceTasks | /**
* Ensures that the given number of reduce tasks for the given job configuration does not exceed
* the number of regions for the given table.
* @param table The table to get the region count for.
* @param job The current job to adjust.
* @throws IOException When retrieving the table details fails.
*/
public static void limitNumReduceTasks(String table, Job job) throws IOException {
int regions = getRegionCount(job.getConfiguration(), TableName.valueOf(table));
if (job.getNumReduceTasks() > regions) {
job.setNumReduceTasks(regions);
}
} | 3.68 |
framework_Link_setTargetHeight | /**
* Sets the target window height.
*
* @param targetHeight
* the targetHeight to set.
*/
public void setTargetHeight(int targetHeight) {
getState().targetHeight = targetHeight;
} | 3.68 |
flink_SupportsFilterPushDown_of | /**
* Constructs a filter push-down result.
*
* <p>See the documentation of {@link SupportsFilterPushDown} for more information.
*
* @param acceptedFilters filters that are consumed by the source but may be applied on a
* best effort basis
* @param remainingFilters filters that a subsequent filter operation still needs to perform
* during runtime
*/
public static Result of(
List<ResolvedExpression> acceptedFilters,
List<ResolvedExpression> remainingFilters) {
return new Result(acceptedFilters, remainingFilters);
} | 3.68 |
hbase_MasterObserver_postCompletedDeleteTableAction | /**
* Called after {@link org.apache.hadoop.hbase.master.HMaster} deletes a table. Called as part of
* delete table procedure and it is async to the delete RPC call.
* @param ctx the environment to interact with the framework and master
* @param tableName the name of the table
*/
default void postCompletedDeleteTableAction(
final ObserverContext<MasterCoprocessorEnvironment> ctx, final TableName tableName)
throws IOException {
} | 3.68 |
Activiti_SpringAsyncExecutor_setRejectedJobsHandler | /**
* Required spring injected {@link SpringRejectedJobsHandler} implementation that will be used when jobs were rejected by the task executor.
*
* @param rejectedJobsHandler
*/
public void setRejectedJobsHandler(SpringRejectedJobsHandler rejectedJobsHandler) {
this.rejectedJobsHandler = rejectedJobsHandler;
} | 3.68 |
hmily_LogUtil_info | /**
* Info.
*
* @param logger the logger
* @param supplier the supplier
*/
public static void info(final Logger logger, final Supplier<Object> supplier) {
if (logger.isInfoEnabled()) {
logger.info(Objects.toString(supplier.get()));
}
} | 3.68 |
hadoop_StagingCommitter_deleteDestinationPaths | /**
* Delete the working paths of a job.
* <ol>
* <li>{@code $dest/__temporary}</li>
* <li>the local working directory for staged files</li>
* </ol>
* Does not attempt to clean up the work of the wrapped committer.
* @param context job context
* @throws IOException IO failure
*/
protected void deleteDestinationPaths(JobContext context) throws IOException {
// delete the __temporary directory. This will cause problems
// if there is >1 task targeting the same dest dir
deleteWithWarning(getDestFS(),
new Path(getOutputPath(), TEMPORARY),
true);
// and the working path
deleteTaskWorkingPathQuietly(context);
} | 3.68 |
flink_BinarySegmentUtils_setFloat | /**
* set float from segments.
*
* @param segments target segments.
* @param offset value offset.
*/
public static void setFloat(MemorySegment[] segments, int offset, float value) {
if (inFirstSegment(segments, offset, 4)) {
segments[0].putFloat(offset, value);
} else {
setFloatMultiSegments(segments, offset, value);
}
} | 3.68 |
flink_ByteParser_parseField | /**
* Static utility to parse a field of type byte from a byte sequence that represents text
* characters (such as when read from a file stream).
*
* @param bytes The bytes containing the text data that should be parsed.
* @param startPos The offset to start the parsing.
* @param length The length of the byte sequence (counting from the offset).
* @param delimiter The delimiter that terminates the field.
* @return The parsed value.
* @throws NumberFormatException Thrown when the value cannot be parsed because the text
* represents not a correct number.
*/
public static final byte parseField(byte[] bytes, int startPos, int length, char delimiter) {
long val = 0;
boolean neg = false;
if (bytes[startPos] == delimiter) {
throw new NumberFormatException("Empty field.");
}
if (bytes[startPos] == '-') {
neg = true;
startPos++;
length--;
if (length == 0 || bytes[startPos] == delimiter) {
throw new NumberFormatException("Orphaned minus sign.");
}
}
for (; length > 0; startPos++, length--) {
if (bytes[startPos] == delimiter) {
return (byte) (neg ? -val : val);
}
if (bytes[startPos] < 48 || bytes[startPos] > 57) {
throw new NumberFormatException("Invalid character.");
}
val *= 10;
val += bytes[startPos] - 48;
if (val > Byte.MAX_VALUE && (!neg || val > -Byte.MIN_VALUE)) {
throw new NumberFormatException("Value overflow/underflow");
}
}
return (byte) (neg ? -val : val);
} | 3.68 |
hadoop_ExitUtil_terminateCalled | /**
* @return true if terminate has been called.
*/
public static boolean terminateCalled() {
// Either we set this member or we actually called System#exit
return FIRST_EXIT_EXCEPTION.get() != null;
} | 3.68 |
hbase_RegionStates_getRegionStates | /** Returns A snapshot of region state nodes for all the regions. */
public ArrayList<RegionState> getRegionStates() {
final ArrayList<RegionState> regions = new ArrayList<>(regionsMap.size());
for (RegionStateNode node : regionsMap.values()) {
regions.add(node.toRegionState());
}
return regions;
} | 3.68 |
hbase_OrderedBytes_isNull | /**
* Return true when the next encoded value in {@code src} is null, false otherwise.
*/
public static boolean isNull(PositionedByteRange src) {
return NULL == (-1 == Integer.signum(src.peek()) ? DESCENDING : ASCENDING).apply(src.peek());
} | 3.68 |
hbase_HBaseTestingUtility_getRSForFirstRegionInTable | /**
* Tool to get the reference to the region server object that holds the region of the specified
* user table.
* @param tableName user table to lookup in hbase:meta
* @return region server that holds it, null if the row doesn't exist
*/
public HRegionServer getRSForFirstRegionInTable(TableName tableName)
throws IOException, InterruptedException {
List<RegionInfo> regions = getRegions(tableName);
if (regions == null || regions.isEmpty()) {
return null;
}
LOG.debug("Found " + regions.size() + " regions for table " + tableName);
byte[] firstRegionName =
regions.stream().filter(r -> !r.isOffline()).map(RegionInfo::getRegionName).findFirst()
.orElseThrow(() -> new IOException("online regions not found in table " + tableName));
LOG.debug("firstRegionName=" + Bytes.toString(firstRegionName));
long pause = getConfiguration().getLong(HConstants.HBASE_CLIENT_PAUSE,
HConstants.DEFAULT_HBASE_CLIENT_PAUSE);
int numRetries = getConfiguration().getInt(HConstants.HBASE_CLIENT_RETRIES_NUMBER,
HConstants.DEFAULT_HBASE_CLIENT_RETRIES_NUMBER);
RetryCounter retrier = new RetryCounter(numRetries + 1, (int) pause, TimeUnit.MICROSECONDS);
while (retrier.shouldRetry()) {
int index = getMiniHBaseCluster().getServerWith(firstRegionName);
if (index != -1) {
return getMiniHBaseCluster().getRegionServerThreads().get(index).getRegionServer();
}
// Came back -1. Region may not be online yet. Sleep a while.
retrier.sleepUntilNextRetry();
}
return null;
} | 3.68 |
dubbo_ApolloDynamicConfiguration_addListener | /**
* Since all governance rules will lay under dubbo group, this method now always uses the default dubboConfig and
* ignores the group parameter.
*/
@Override
public void addListener(String key, String group, ConfigurationListener listener) {
ApolloListener apolloListener = listeners.computeIfAbsent(group + key, k -> createTargetListener(key, group));
apolloListener.addListener(listener);
dubboConfig.addChangeListener(apolloListener, Collections.singleton(key));
} | 3.68 |
hbase_Encryption_pbkdf128 | /**
* Return a 128 bit key derived from the concatenation of the supplied arguments using
* PBKDF2WithHmacSHA1 at 10,000 iterations.
*/
public static byte[] pbkdf128(byte[]... args) {
StringBuilder sb = new StringBuilder();
for (byte[] b : args) {
sb.append(Arrays.toString(b));
}
return generateSecretKey("PBKDF2WithHmacSHA1", AES.KEY_LENGTH, sb.toString().toCharArray());
} | 3.68 |
hadoop_InputWriter_initialize | /**
* Initializes the InputWriter. This method has to be called before calling
* any of the other methods.
*/
public void initialize(PipeMapRed pipeMapRed) throws IOException {
// nothing here yet, but that might change in the future
} | 3.68 |
hbase_BloomFilterMetrics_getEligibleRequestsCount | /**
* Returns Current value for requests which could have used bloom filters but wasn't defined or
* loaded.
*/
public long getEligibleRequestsCount() {
return eligibleRequests.sum();
} | 3.68 |
hbase_FavoredNodeAssignmentHelper_getFavoredNodesList | /**
* Convert PB bytes to ServerName.
* @param favoredNodes The PB'ed bytes of favored nodes
* @return the array of {@link ServerName} for the byte array of favored nodes.
*/
public static ServerName[] getFavoredNodesList(byte[] favoredNodes) throws IOException {
FavoredNodes f = FavoredNodes.parseFrom(favoredNodes);
List<HBaseProtos.ServerName> protoNodes = f.getFavoredNodeList();
ServerName[] servers = new ServerName[protoNodes.size()];
int i = 0;
for (HBaseProtos.ServerName node : protoNodes) {
servers[i++] = ProtobufUtil.toServerName(node);
}
return servers;
} | 3.68 |
flink_Tuple8_of | /**
* Creates a new tuple and assigns the given values to the tuple's fields. This is more
* convenient than using the constructor, because the compiler can infer the generic type
* arguments implicitly. For example: {@code Tuple3.of(n, x, s)} instead of {@code new
* Tuple3<Integer, Double, String>(n, x, s)}
*/
public static <T0, T1, T2, T3, T4, T5, T6, T7> Tuple8<T0, T1, T2, T3, T4, T5, T6, T7> of(
T0 f0, T1 f1, T2 f2, T3 f3, T4 f4, T5 f5, T6 f6, T7 f7) {
return new Tuple8<>(f0, f1, f2, f3, f4, f5, f6, f7);
} | 3.68 |
hadoop_XFrameOptionsFilter_getFilterParams | /**
* Constructs a mapping of configuration properties to be used for filter
* initialization. The mapping includes all properties that start with the
* specified configuration prefix. Property names in the mapping are trimmed
* to remove the configuration prefix.
*
* @param conf configuration to read
* @param confPrefix configuration prefix
* @return mapping of configuration properties to be used for filter
* initialization
*/
public static Map<String, String> getFilterParams(Configuration conf,
String confPrefix) {
return conf.getPropsWithPrefix(confPrefix);
} | 3.68 |
dubbo_MethodConfig_constructMethodConfig | /**
* TODO remove constructMethodConfig
*
* @param methods
* @return
*/
@Deprecated
public static List<MethodConfig> constructMethodConfig(Method[] methods) {
if (methods != null && methods.length != 0) {
List<MethodConfig> methodConfigs = new ArrayList<>(methods.length);
for (int i = 0; i < methods.length; i++) {
MethodConfig methodConfig = new MethodConfig(methods[i]);
methodConfigs.add(methodConfig);
}
return methodConfigs;
}
return Collections.emptyList();
} | 3.68 |
flink_ExecutionEnvironment_createInput | /**
* Generic method to create an input DataSet with in {@link InputFormat}. The {@link DataSet}
* will not be immediately created - instead, this method returns a {@link DataSet} that will be
* lazily created from the input format once the program is executed.
*
* <p>The {@link DataSet} is typed to the given TypeInformation. This method is intended for
* input formats that where the return type cannot be determined by reflection analysis, and
* that do not implement the {@link ResultTypeQueryable} interface.
*
* @param inputFormat The input format used to create the data set.
* @return A {@link DataSet} that represents the data created by the input format.
* @see #createInput(InputFormat)
*/
public <X> DataSource<X> createInput(
InputFormat<X, ?> inputFormat, TypeInformation<X> producedType) {
if (inputFormat == null) {
throw new IllegalArgumentException("InputFormat must not be null.");
}
if (producedType == null) {
throw new IllegalArgumentException("Produced type information must not be null.");
}
return new DataSource<>(this, inputFormat, producedType, Utils.getCallLocationName());
} | 3.68 |
hbase_HFileBlockIndex_getEntryBySubEntry | /**
* Finds the entry corresponding to the deeper-level index block containing the given
* deeper-level entry (a "sub-entry"), assuming a global 0-based ordering of sub-entries.
* <p>
* <i> Implementation note. </i> We are looking for i such that numSubEntriesAt[i - 1] <= k <
* numSubEntriesAt[i], because a deeper-level block #i (0-based) contains sub-entries #
* numSubEntriesAt[i - 1]'th through numSubEntriesAt[i] - 1, assuming a global 0-based ordering
* of sub-entries. i is by definition the insertion point of k in numSubEntriesAt.
* @param k sub-entry index, from 0 to the total number sub-entries - 1
* @return the 0-based index of the entry corresponding to the given sub-entry
*/
@Override
public int getEntryBySubEntry(long k) {
// We define mid-key as the key corresponding to k'th sub-entry
// (0-based).
int i = Collections.binarySearch(numSubEntriesAt, k);
// Exact match: cumulativeWeight[i] = k. This means chunks #0 through
// #i contain exactly k sub-entries, and the sub-entry #k (0-based)
// is in the (i + 1)'th chunk.
if (i >= 0) return i + 1;
// Inexact match. Return the insertion point.
return -i - 1;
} | 3.68 |
morf_AliasedField_in | /**
* @param values The values for comparison
* @return The resulting {@link Criterion}.
*/
public Criterion in(Iterable<? extends Object> values) {
return Criterion.in(this, values);
} | 3.68 |
Activiti_BpmnDeploymentHelper_verifyProcessDefinitionsDoNotShareKeys | /**
* Verifies that no two process definitions share the same key, to prevent database unique
* index violation.
*
* @throws ActivitiException if any two processes have the same key
*/
public void verifyProcessDefinitionsDoNotShareKeys(
Collection<ProcessDefinitionEntity> processDefinitions) {
Set<String> keySet = new LinkedHashSet<String>();
for (ProcessDefinitionEntity processDefinition : processDefinitions) {
if (keySet.contains(processDefinition.getKey())) {
throw new ActivitiException(
"The deployment contains process definitions with the same key (process id attribute), this is not allowed");
}
keySet.add(processDefinition.getKey());
}
} | 3.68 |
hbase_CacheableDeserializerIdManager_getDeserializer | /**
* Get the cacheable deserializer registered at the given identifier Id.
* @see #registerDeserializer(CacheableDeserializer)
*/
public static CacheableDeserializer<Cacheable> getDeserializer(int id) {
return registeredDeserializers.get(id);
} | 3.68 |
flink_LongParser_parseField | /**
* Static utility to parse a field of type long from a byte sequence that represents text
* characters (such as when read from a file stream).
*
* @param bytes The bytes containing the text data that should be parsed.
* @param startPos The offset to start the parsing.
* @param length The length of the byte sequence (counting from the offset).
* @param delimiter The delimiter that terminates the field.
* @return The parsed value.
* @throws NumberFormatException Thrown when the value cannot be parsed because the text
* represents not a correct number.
*/
public static final long parseField(byte[] bytes, int startPos, int length, char delimiter) {
long val = 0;
boolean neg = false;
if (bytes[startPos] == delimiter) {
throw new NumberFormatException("Empty field.");
}
if (bytes[startPos] == '-') {
neg = true;
startPos++;
length--;
if (length == 0 || bytes[startPos] == delimiter) {
throw new NumberFormatException("Orphaned minus sign.");
}
}
for (; length > 0; startPos++, length--) {
if (bytes[startPos] == delimiter) {
return neg ? -val : val;
}
if (bytes[startPos] < 48 || bytes[startPos] > 57) {
throw new NumberFormatException("Invalid character.");
}
val *= 10;
val += bytes[startPos] - 48;
// check for overflow / underflow
if (val < 0) {
// this is an overflow/underflow, unless we hit exactly the Long.MIN_VALUE
if (neg && val == Long.MIN_VALUE) {
if (length == 1 || bytes[startPos + 1] == delimiter) {
return Long.MIN_VALUE;
} else {
throw new NumberFormatException("value overflow");
}
} else {
throw new NumberFormatException("value overflow");
}
}
}
return neg ? -val : val;
} | 3.68 |
hbase_ParseFilter_checkForSkip | /**
* Checks if the current index of filter string we are on is the beginning of the keyword 'SKIP'
* <p>
* @param filterStringAsByteArray filter string given by the user
* @param indexOfSkip index at which an 'S' was read
* @return true if the keyword 'SKIP' is at the current index
*/
public static boolean checkForSkip(byte[] filterStringAsByteArray, int indexOfSkip)
throws CharacterCodingException {
try {
if (
filterStringAsByteArray[indexOfSkip] == ParseConstants.S
&& filterStringAsByteArray[indexOfSkip + 1] == ParseConstants.K
&& filterStringAsByteArray[indexOfSkip + 2] == ParseConstants.I
&& filterStringAsByteArray[indexOfSkip + 3] == ParseConstants.P
&& (indexOfSkip == 0
|| filterStringAsByteArray[indexOfSkip - 1] == ParseConstants.WHITESPACE
|| filterStringAsByteArray[indexOfSkip - 1] == ParseConstants.RPAREN
|| filterStringAsByteArray[indexOfSkip - 1] == ParseConstants.LPAREN)
&& (filterStringAsByteArray[indexOfSkip + 4] == ParseConstants.WHITESPACE
|| filterStringAsByteArray[indexOfSkip + 4] == ParseConstants.LPAREN)
) {
return true;
} else {
return false;
}
} catch (ArrayIndexOutOfBoundsException e) {
return false;
}
} | 3.68 |
hbase_SaslClientAuthenticationProviders_getInstance | /**
* Returns a singleton instance of {@link SaslClientAuthenticationProviders}.
*/
public static synchronized SaslClientAuthenticationProviders getInstance(Configuration conf) {
SaslClientAuthenticationProviders providers = providersRef.get();
if (providers == null) {
providers = instantiate(conf);
providersRef.set(providers);
}
return providers;
} | 3.68 |
flink_Pool_tryPollEntry | /** Tries to get the next cached entry. If the pool is empty, this method returns null. */
@Nullable
public T tryPollEntry() {
return pool.poll();
} | 3.68 |
graphhopper_HeadingResolver_getEdgesWithDifferentHeading | /**
* Returns a list of edge IDs of edges adjacent to the given base node that do *not* have the same or a similar
* heading as the given heading. If for example the tolerance is 45 degrees this method returns all edges for which
* the absolute difference to the given heading is greater than 45 degrees. The heading of an edge is defined as
* the direction of the first segment of an edge (adjacent and facing away from the base node).
*
* @param heading north based azimuth, between 0 and 360 degrees
* @see #setTolerance
*/
public IntArrayList getEdgesWithDifferentHeading(int baseNode, double heading) {
double xAxisAngle = AngleCalc.ANGLE_CALC.convertAzimuth2xaxisAngle(heading);
IntArrayList edges = new IntArrayList(1);
EdgeIterator iter = edgeExplorer.setBaseNode(baseNode);
while (iter.next()) {
PointList points = iter.fetchWayGeometry(FetchMode.ALL);
double orientation = AngleCalc.ANGLE_CALC.calcOrientation(
points.getLat(0), points.getLon(0),
points.getLat(1), points.getLon(1)
);
orientation = AngleCalc.ANGLE_CALC.alignOrientation(xAxisAngle, orientation);
double diff = Math.abs(orientation - xAxisAngle);
if (diff > toleranceRad)
edges.add(iter.getEdge());
}
return edges;
} | 3.68 |
framework_TreeGridElement_isRowExpanded | /**
* Returns whether the row at the given index is expanded or not.
*
* @param rowIndex
* 0-based row index
* @param hierarchyColumnIndex
* 0-based index of the hierarchy column
* @return {@code true} if expanded, {@code false} if collapsed
*/
public boolean isRowExpanded(int rowIndex, int hierarchyColumnIndex) {
WebElement expandElement = getExpandElement(rowIndex,
hierarchyColumnIndex);
List<String> classes = Arrays
.asList(expandElement.getAttribute("class").split(" "));
return classes.contains("expanded") && !classes.contains("collapsed");
} | 3.68 |
hadoop_SCMController_overview | /**
* It is referenced in SCMWebServer.SCMWebApp.setup()
*/
@SuppressWarnings("unused")
public void overview() {
render(SCMOverviewPage.class);
} | 3.68 |
streampipes_NumWordsRulesClassifier_getInstance | /**
* Returns the singleton instance for RulebasedBoilerpipeClassifier.
*/
public static NumWordsRulesClassifier getInstance() {
return INSTANCE;
} | 3.68 |
hadoop_HistoryServerStateStoreService_serviceInit | /**
* Initialize the state storage
*
* @param conf the configuration
* @throws IOException
*/
@Override
public void serviceInit(Configuration conf) throws IOException {
initStorage(conf);
} | 3.68 |
querydsl_JTSGeometryExpression_crosses | /**
* Returns 1 (TRUE) if this geometric object “spatially crosses’ anotherGeometry.
*
* @param geometry other geometry
* @return true, if crosses
*/
public BooleanExpression crosses(Expression<? extends Geometry> geometry) {
return Expressions.booleanOperation(SpatialOps.CROSSES, mixin, geometry);
} | 3.68 |
hudi_HoodieReaderContext_generateMetadataForRecord | /**
* Generates metadata of the record. Only fetches record key that is necessary for merging.
*
* @param record The record.
* @param schema The Avro schema of the record.
* @return A mapping containing the metadata.
*/
public Map<String, Object> generateMetadataForRecord(T record, Schema schema) {
Map<String, Object> meta = new HashMap<>();
meta.put(INTERNAL_META_RECORD_KEY, getRecordKey(record, schema));
meta.put(INTERNAL_META_SCHEMA, schema);
return meta;
} | 3.68 |
flink_InputChannel_getCurrentBackoff | /** Returns the current backoff in ms. */
protected int getCurrentBackoff() {
return currentBackoff <= 0 ? 0 : currentBackoff;
} | 3.68 |
hadoop_RouterRMAdminService_finalize | /**
* Shutdown the chain of interceptors when the object is destroyed.
*/
@Override
protected void finalize() {
rootInterceptor.shutdown();
} | 3.68 |
hadoop_SlowPeerReports_equals | /**
* Return true if the two objects represent the same set slow peer
* entries. Primarily for unit testing convenience.
*/
@Override
public boolean equals(Object o) {
if (this == o) {
return true;
}
if (!(o instanceof SlowPeerReports)) {
return false;
}
SlowPeerReports that = (SlowPeerReports) o;
return slowPeers.equals(that.slowPeers);
} | 3.68 |
streampipes_MqttClient_createMqttClient | /**
* Create new MQTT client
*/
public void createMqttClient() {
this.mqtt = new MQTT();
this.uri = MqttUtils.makeMqttServerUri(options.getProtocol(), options.getHost(), options.getPort());
try {
/**
* Sets the url for connecting to the MQTT broker, e.g. {@code: tcp://localhost:1883}.
*/
mqtt.setHost(uri);
// authentication
if (options.isBasicAuth()) {
/**
* The username for authenticated sessions.
*/
mqtt.setUserName(options.getUsername());
/**
* The password for authenticated sessions.
*/
mqtt.setPassword(options.getPassword());
}
/**
* The client id used when connecting to the MQTT broker.
*/
mqtt.setClientId(options.getClientId());
/**
* Set to false if you want the MQTT server to persist topic subscriptions and ack positions across
* client sessions. Defaults to true.
*/
mqtt.setCleanSession(options.isCleanSession());
/**
* The maximum amount of time in ms to wait between reconnect attempts. Defaults to 30,000.
*/
mqtt.setReconnectDelayMax(options.getReconnectDelayMaxInMs());
/**
* Configures the Keep Alive timer in seconds. Defines the maximum time interval between messages
* received from a client. It enables the server to detect that the network connection to a client has
* dropped, without having to wait for the long TCP/IP timeout.
*/
mqtt.setKeepAlive(options.getKeepAliveInSec());
/**
* Set to "3.1.1" to use MQTT version 3.1.1. Otherwise defaults to the 3.1 protocol version.
*/
mqtt.setVersion(options.getMqttProtocolVersion());
// last will and testament options
if (options.isLastWill()) {
/**
* If set the server will publish the client's Will message to the specified topics if the client has
* an unexpected disconnection.
*/
mqtt.setWillTopic(options.getWillTopic());
/**
* Sets the quality of service to use for the Will message. Defaults to QoS.AT_MOST_ONCE.
*/
mqtt.setWillQos(options.getWillQoS());
/**
* The Will message to send. Defaults to a zero length message.
*/
mqtt.setWillMessage(options.getWillMessage());
/**
* Set to true if you want the Will to be published with the retain option.
*/
mqtt.setWillRetain(options.getWillRetain());
}
} catch (Exception e) {
throw new SpRuntimeException("Failed to initialize MQTT Client: " + e.getMessage(), e);
}
} | 3.68 |
flink_SharedReference_applySync | /**
* Executes the code on the referenced object in a synchronized fashion. Note that this method
* is prone to deadlock if multiple references are accessed in a synchronized fashion in a
* nested call-chain.
*/
default <R> R applySync(Function<T, R> function) {
T object = get();
synchronized (object) {
return function.apply(object);
}
} | 3.68 |
hibernate-validator_AnnotationApiHelper_isClass | /**
* Test if the given {@link TypeMirror} represents a class or not.
*/
public boolean isClass(TypeMirror typeMirror) {
return TypeKind.DECLARED.equals( typeMirror.getKind() ) && ( (DeclaredType) typeMirror ).asElement().getKind().isClass();
} | 3.68 |
hbase_CanaryTool_newMonitor | /**
* A Factory method for {@link Monitor}. Makes a RegionServerMonitor, or a ZooKeeperMonitor, or a
* RegionMonitor.
* @return a Monitor instance
*/
private Monitor newMonitor(final Connection connection, String[] monitorTargets) {
Monitor monitor;
boolean useRegExp = conf.getBoolean(HBASE_CANARY_USE_REGEX, false);
boolean regionServerAllRegions = conf.getBoolean(HBASE_CANARY_REGIONSERVER_ALL_REGIONS, false);
boolean failOnError = conf.getBoolean(HBASE_CANARY_FAIL_ON_ERROR, true);
int permittedFailures = conf.getInt(HBASE_CANARY_ZOOKEEPER_PERMITTED_FAILURES, 0);
boolean writeSniffing = conf.getBoolean(HBASE_CANARY_REGION_WRITE_SNIFFING, false);
String writeTableName =
conf.get(HBASE_CANARY_REGION_WRITE_TABLE_NAME, DEFAULT_WRITE_TABLE_NAME.getNameAsString());
long configuredWriteTableTimeout =
conf.getLong(HBASE_CANARY_REGION_WRITE_TABLE_TIMEOUT, DEFAULT_TIMEOUT);
if (this.regionServerMode) {
monitor = new RegionServerMonitor(connection, monitorTargets, useRegExp,
getSink(connection.getConfiguration(), RegionServerStdOutSink.class), this.executor,
regionServerAllRegions, failOnError, permittedFailures);
} else if (this.zookeeperMode) {
monitor = new ZookeeperMonitor(connection, monitorTargets, useRegExp,
getSink(connection.getConfiguration(), ZookeeperStdOutSink.class), this.executor,
failOnError, permittedFailures);
} else {
monitor = new RegionMonitor(connection, monitorTargets, useRegExp,
getSink(connection.getConfiguration(), RegionStdOutSink.class), this.executor,
writeSniffing, TableName.valueOf(writeTableName), failOnError, configuredReadTableTimeouts,
configuredWriteTableTimeout, permittedFailures);
}
return monitor;
} | 3.68 |
flink_StreamTableSinkFactory_createStreamTableSink | /**
* Creates and configures a {@link StreamTableSink} using the given properties.
*
* @param properties normalized properties describing a table sink.
* @return the configured table sink.
* @deprecated {@link Context} contains more information, and already contains table schema too.
* Please use {@link #createTableSink(Context)} instead.
*/
@Deprecated
default StreamTableSink<T> createStreamTableSink(Map<String, String> properties) {
return null;
} | 3.68 |
hadoop_BaseNMTokenSecretManager_createNMToken | /**
* Helper function for creating NMTokens.
*
* @param applicationAttemptId application AttemptId.
* @param nodeId node Id.
* @param applicationSubmitter application Submitter.
* @return NMToken.
*/
public Token createNMToken(ApplicationAttemptId applicationAttemptId,
NodeId nodeId, String applicationSubmitter) {
byte[] password;
NMTokenIdentifier identifier;
this.readLock.lock();
try {
identifier =
new NMTokenIdentifier(applicationAttemptId, nodeId,
applicationSubmitter, this.currentMasterKey.getMasterKey()
.getKeyId());
password = this.createPassword(identifier);
} finally {
this.readLock.unlock();
}
return newInstance(password, identifier);
} | 3.68 |
hadoop_StreamXmlRecordReader_nextState | /* also updates firstMatchStart_; */
int nextState(int state, int input, int bufPos) {
switch (state) {
case CDATA_UNK:
case CDATA_OUT:
switch (input) {
case CDATA_BEGIN:
return CDATA_IN;
case CDATA_END:
if (state == CDATA_OUT) {
// System.out.println("buggy XML " + bufPos);
}
return CDATA_OUT;
case RECORD_MAYBE:
return (state == CDATA_UNK) ? CDATA_UNK : RECORD_ACCEPT;
}
break;
case CDATA_IN:
return (input == CDATA_END) ? CDATA_OUT : CDATA_IN;
}
throw new IllegalStateException(state + " " + input + " " + bufPos + " "
+ splitName_);
} | 3.68 |
framework_RendererCellReference_getElement | /**
* Returns the element of the cell. Can be either a <code>TD</code> element
* or a <code>TH</code> element.
*
* @return the element of the cell
*/
@Override
public TableCellElement getElement() {
return cell.getElement();
} | 3.68 |
hadoop_SelectBinding_isSelectEnabled | /**
* Static probe for select being enabled.
* @param conf configuration
* @return true iff select is enabled.
*/
public static boolean isSelectEnabled(Configuration conf) {
return conf.getBoolean(FS_S3A_SELECT_ENABLED, true);
} | 3.68 |
hbase_SizeCachedByteBufferKeyValue_getSerializedSize | /**
* Override by just returning the length for saving cost of method dispatching. If not, it will
* call {@link ExtendedCell#getSerializedSize()} firstly, then forward to
* {@link SizeCachedKeyValue#getSerializedSize(boolean)}. (See HBASE-21657)
*/
@Override
public int getSerializedSize() {
return this.length;
} | 3.68 |
hadoop_AbfsClient_getDirectoryQueryParameter | /**
* Get the directory query parameter used by the List Paths REST API and used
* as the path in the continuation token. If the input path is null or the
* root path "/", empty string is returned. If the input path begins with '/',
* the return value is the substring beginning at offset 1. Otherwise, the
* input path is returned.
* @param path the path to be listed.
* @return the value of the directory query parameter
*/
public static String getDirectoryQueryParameter(final String path) {
String directory = path;
if (Strings.isNullOrEmpty(directory)) {
directory = AbfsHttpConstants.EMPTY_STRING;
} else if (directory.charAt(0) == '/') {
directory = directory.substring(1);
}
return directory;
} | 3.68 |
dubbo_RpcServiceContext_getRemoteHost | /**
* get remote host.
*
* @return remote host
*/
@Override
public String getRemoteHost() {
return remoteAddress == null
? null
: remoteAddress.getAddress() == null
? remoteAddress.getHostName()
: NetUtils.filterLocalHost(remoteAddress.getAddress().getHostAddress());
} | 3.68 |
pulsar_ProducerImpl_recoverChecksumError | /**
* Checks message checksum to retry if message was corrupted while sending to broker. Recomputes checksum of the
* message header-payload again.
* <ul>
* <li><b>if matches with existing checksum</b>: it means message was corrupt while sending to broker. So, resend
* message</li>
* <li><b>if doesn't match with existing checksum</b>: it means message is already corrupt and can't retry again.
* So, fail send-message by failing callback</li>
* </ul>
*
* @param cnx
* @param sequenceId
*/
protected synchronized void recoverChecksumError(ClientCnx cnx, long sequenceId) {
OpSendMsg op = pendingMessages.peek();
if (op == null) {
if (log.isDebugEnabled()) {
log.debug("[{}] [{}] Got send failure for timed out msg {}", topic, producerName, sequenceId);
}
} else {
long expectedSequenceId = getHighestSequenceId(op);
if (sequenceId == expectedSequenceId) {
boolean corrupted = !verifyLocalBufferIsNotCorrupted(op);
if (corrupted) {
// remove message from pendingMessages queue and fail callback
pendingMessages.remove();
releaseSemaphoreForSendOp(op);
try {
op.sendComplete(
new PulsarClientException.ChecksumException(
format("The checksum of the message which is produced by producer %s to the topic "
+ "%s is corrupted", producerName, topic)));
} catch (Throwable t) {
log.warn("[{}] [{}] Got exception while completing the callback for msg {}:", topic,
producerName, sequenceId, t);
}
ReferenceCountUtil.safeRelease(op.cmd);
op.recycle();
return;
} else {
if (log.isDebugEnabled()) {
log.debug("[{}] [{}] Message is not corrupted, retry send-message with sequenceId {}", topic,
producerName, sequenceId);
}
}
} else {
if (log.isDebugEnabled()) {
log.debug("[{}] [{}] Corrupt message is already timed out {}", topic, producerName, sequenceId);
}
}
}
// as msg is not corrupted : let producer resend pending-messages again including checksum failed message
resendMessages(cnx, this.connectionHandler.getEpoch());
} | 3.68 |
flink_UserDefinedFunctionHelper_getReturnTypeOfTableFunction | /**
* Tries to infer the TypeInformation of an AggregateFunction's accumulator type.
*
* @param tableFunction The TableFunction for which the accumulator type is inferred.
* @param scalaType The implicitly inferred type of the accumulator type.
* @return The inferred accumulator type of the AggregateFunction.
*/
public static <T> TypeInformation<T> getReturnTypeOfTableFunction(
TableFunction<T> tableFunction, TypeInformation<T> scalaType) {
TypeInformation<T> userProvidedType = tableFunction.getResultType();
if (userProvidedType != null) {
return userProvidedType;
} else if (scalaType != null) {
return scalaType;
} else {
return TypeExtractor.createTypeInfo(
tableFunction, TableFunction.class, tableFunction.getClass(), 0);
}
} | 3.68 |
framework_MultiSelectionModelConnector_createSelectionModel | /**
* Creates an instance of MultiSelectionModel. Method provided overriding
* features of the selection model without copying all logic.
*
* @since 8.1
*
* @return selection model instance, not {@code null}
*/
protected MultiSelectionModel createSelectionModel() {
return new MultiSelectionModel();
} | 3.68 |
querydsl_AbstractSQLQuery_forShare | /**
* FOR SHARE causes the rows retrieved by the SELECT statement to be locked as though for update.
*
* Supported by MySQL, PostgreSQL, SQLServer.
*
* @param fallbackToForUpdate
* if the FOR SHARE is not supported and this parameter is <code>true</code>, the
* {@link #forUpdate()} functionality will be used.
*
* @return the current object
*
* @throws QueryException
* if the FOR SHARE is not supported and <i>fallbackToForUpdate</i> is set to
* <code>false</code>.
*/
public Q forShare(boolean fallbackToForUpdate) {
SQLTemplates sqlTemplates = configuration.getTemplates();
if (sqlTemplates.isForShareSupported()) {
QueryFlag forShareFlag = sqlTemplates.getForShareFlag();
return addFlag(forShareFlag);
}
if (fallbackToForUpdate) {
return forUpdate();
}
throw new QueryException("Using forShare() is not supported");
} | 3.68 |
framework_HierarchyMapper_isExpanded | /**
* Returns whether the given item is expanded.
*
* @param item
* the item to test
* @return {@code true} if item is expanded; {@code false} if not
*/
public boolean isExpanded(T item) {
if (item == null) {
// Root nodes are always visible.
return true;
}
return expandedItemIds.contains(getDataProvider().getId(item));
} | 3.68 |
hmily_ConfigLoader_getOriginal | /**
* Gets original.
*
* @return the original
*/
public ConfigLoader<Config> getOriginal() {
return original;
} | 3.68 |
flink_StateTableByKeyGroupReaders_readerForVersion | /**
* Creates a new StateTableByKeyGroupReader that inserts de-serialized mappings into the given
* table, using the de-serialization algorithm that matches the given version.
*
* @param <K> type of key.
* @param <N> type of namespace.
* @param <S> type of state.
* @param stateTable the {@link StateTable} into which de-serialized mappings are inserted.
* @param version version for the de-serialization algorithm.
* @return the appropriate reader.
*/
public static <K, N, S> StateSnapshotKeyGroupReader readerForVersion(
StateTable<K, N, S> stateTable, int version) {
switch (version) {
case 1:
return new StateTableByKeyGroupReaderV1<>(stateTable);
case 2:
case 3:
case 4:
case 5:
case 6:
return createV2PlusReader(stateTable);
default:
throw new IllegalArgumentException("Unknown version: " + version);
}
} | 3.68 |
dubbo_StandardMetadataServiceURLBuilder_build | /**
* Build the {@link URL urls} from {@link ServiceInstance#getMetadata() the metadata} of {@link ServiceInstance}
*
* @param serviceInstance {@link ServiceInstance}
* @return the not-null {@link List}
*/
@Override
public List<URL> build(ServiceInstance serviceInstance) {
Map<String, String> paramsMap = getMetadataServiceURLsParams(serviceInstance);
String serviceName = serviceInstance.getServiceName();
String host = serviceInstance.getHost();
URL url;
if (paramsMap.isEmpty()) {
// ServiceInstance Metadata is empty. Happened when registry not support metadata write.
url = generateUrlWithoutMetadata(serviceName, host, serviceInstance.getPort());
} else {
url = generateWithMetadata(serviceName, host, paramsMap);
}
url = url.setScopeModel(serviceInstance.getApplicationModel().getInternalModule());
return Collections.singletonList(url);
} | 3.68 |
hbase_AbstractFSWAL_tellListenersAboutPostLogRoll | /**
* Tell listeners about post log roll.
*/
private void tellListenersAboutPostLogRoll(final Path oldPath, final Path newPath)
throws IOException {
if (!this.listeners.isEmpty()) {
for (WALActionsListener i : this.listeners) {
i.postLogRoll(oldPath, newPath);
}
}
coprocessorHost.postWALRoll(oldPath, newPath);
} | 3.68 |
streampipes_AbstractProcessingElementBuilder_supportedFormats | /**
* Assigns supported transport formats to the pipeline elements that can be handled at runtime (e.g.,
* JSON or XMl).
*
* @param formats A list of supported {@link org.apache.streampipes.model.grounding.TransportFormat}s. Use
* {@link org.apache.streampipes.sdk.helpers.SupportedFormats} to assign formats from some pre-defined
* ones or create your own by following the developer guide.
* @return this
*/
public K supportedFormats(List<TransportFormat> formats) {
this.supportedGrounding.setTransportFormats(formats);
return me();
} | 3.68 |
morf_OracleDialect_dropPrimaryKeyConstraint | /**
* ALTER TABLE ABC.DEF DROP PRIMARY KEY DROP INDEX
*/
private String dropPrimaryKeyConstraint(String tableName) {
// Drop the associated unique index at the same time
return "ALTER TABLE " + schemaNamePrefix() + tableName + " DROP PRIMARY KEY DROP INDEX";
} | 3.68 |
dubbo_ProviderConfig_getDispather | /**
* typo, switch to use {@link #getDispatcher()}
*
* @deprecated {@link #getDispatcher()}
*/
@Deprecated
@Parameter(excluded = true, attribute = false)
public String getDispather() {
return getDispatcher();
} | 3.68 |
hbase_ScannerContext_getKeepProgress | /**
* @return true if the progress tracked so far in this instance will be considered during an
* invocation of {@link InternalScanner#next(java.util.List)} or
* {@link RegionScanner#next(java.util.List)}. false when the progress tracked so far
* should not be considered and should instead be wiped away via {@link #clearProgress()}.
* This only applies to per-row progress, like batch and data/heap size. Block size is
* never reset because it tracks all of the blocks scanned for an entire request.
*/
boolean getKeepProgress() {
return keepProgress;
} | 3.68 |
hudi_AvroSchemaConverter_convertToSchema | /**
* Converts Flink SQL {@link LogicalType} (can be nested) into an Avro schema.
*
* <p>The "{rowName}." is used as the nested row type name prefix in order to generate the right
* schema. Nested record type that only differs with type name is still compatible.
*
* @param logicalType logical type
* @param rowName the record name
* @return Avro's {@link Schema} matching this logical type.
*/
public static Schema convertToSchema(LogicalType logicalType, String rowName) {
int precision;
boolean nullable = logicalType.isNullable();
switch (logicalType.getTypeRoot()) {
case NULL:
return SchemaBuilder.builder().nullType();
case BOOLEAN:
Schema bool = SchemaBuilder.builder().booleanType();
return nullable ? nullableSchema(bool) : bool;
case TINYINT:
case SMALLINT:
case INTEGER:
Schema integer = SchemaBuilder.builder().intType();
return nullable ? nullableSchema(integer) : integer;
case BIGINT:
Schema bigint = SchemaBuilder.builder().longType();
return nullable ? nullableSchema(bigint) : bigint;
case FLOAT:
Schema f = SchemaBuilder.builder().floatType();
return nullable ? nullableSchema(f) : f;
case DOUBLE:
Schema d = SchemaBuilder.builder().doubleType();
return nullable ? nullableSchema(d) : d;
case CHAR:
case VARCHAR:
Schema str = SchemaBuilder.builder().stringType();
return nullable ? nullableSchema(str) : str;
case BINARY:
case VARBINARY:
Schema binary = SchemaBuilder.builder().bytesType();
return nullable ? nullableSchema(binary) : binary;
case TIMESTAMP_WITHOUT_TIME_ZONE:
// use long to represents Timestamp
final TimestampType timestampType = (TimestampType) logicalType;
precision = timestampType.getPrecision();
org.apache.avro.LogicalType timestampLogicalType;
if (precision <= 3) {
timestampLogicalType = LogicalTypes.timestampMillis();
} else if (precision <= 6) {
timestampLogicalType = LogicalTypes.timestampMicros();
} else {
throw new IllegalArgumentException(
"Avro does not support TIMESTAMP type with precision: "
+ precision
+ ", it only support precisions <= 6.");
}
Schema timestamp = timestampLogicalType.addToSchema(SchemaBuilder.builder().longType());
return nullable ? nullableSchema(timestamp) : timestamp;
case TIMESTAMP_WITH_LOCAL_TIME_ZONE:
// use long to represents LocalZonedTimestampType
final LocalZonedTimestampType localZonedTimestampType = (LocalZonedTimestampType) logicalType;
precision = localZonedTimestampType.getPrecision();
org.apache.avro.LogicalType localZonedTimestampLogicalType;
if (precision <= 3) {
localZonedTimestampLogicalType = LogicalTypes.localTimestampMillis();
} else if (precision <= 6) {
localZonedTimestampLogicalType = LogicalTypes.localTimestampMicros();
} else {
throw new IllegalArgumentException(
"Avro does not support LOCAL TIMESTAMP type with precision: "
+ precision
+ ", it only support precisions <= 6.");
}
Schema localZonedTimestamp = localZonedTimestampLogicalType.addToSchema(SchemaBuilder.builder().longType());
return nullable ? nullableSchema(localZonedTimestamp) : localZonedTimestamp;
case DATE:
// use int to represents Date
Schema date = LogicalTypes.date().addToSchema(SchemaBuilder.builder().intType());
return nullable ? nullableSchema(date) : date;
case TIME_WITHOUT_TIME_ZONE:
precision = ((TimeType) logicalType).getPrecision();
if (precision > 3) {
throw new IllegalArgumentException(
"Avro does not support TIME type with precision: "
+ precision
+ ", it only supports precision less than 3.");
}
// use int to represents Time, we only support millisecond when deserialization
Schema time =
LogicalTypes.timeMillis().addToSchema(SchemaBuilder.builder().intType());
return nullable ? nullableSchema(time) : time;
case DECIMAL:
DecimalType decimalType = (DecimalType) logicalType;
// store BigDecimal as Fixed
// for spark compatibility.
Schema decimal =
LogicalTypes.decimal(decimalType.getPrecision(), decimalType.getScale())
.addToSchema(SchemaBuilder
.fixed(String.format("%s.fixed", rowName))
.size(computeMinBytesForDecimalPrecision(decimalType.getPrecision())));
return nullable ? nullableSchema(decimal) : decimal;
case ROW:
RowType rowType = (RowType) logicalType;
List<String> fieldNames = rowType.getFieldNames();
// we have to make sure the record name is different in a Schema
SchemaBuilder.FieldAssembler<Schema> builder =
SchemaBuilder.builder().record(rowName).fields();
for (int i = 0; i < rowType.getFieldCount(); i++) {
String fieldName = fieldNames.get(i);
LogicalType fieldType = rowType.getTypeAt(i);
SchemaBuilder.GenericDefault<Schema> fieldBuilder =
builder.name(fieldName)
.type(convertToSchema(fieldType, rowName + "." + fieldName));
if (fieldType.isNullable()) {
builder = fieldBuilder.withDefault(null);
} else {
builder = fieldBuilder.noDefault();
}
}
Schema record = builder.endRecord();
return nullable ? nullableSchema(record) : record;
case MULTISET:
case MAP:
Schema map =
SchemaBuilder.builder()
.map()
.values(
convertToSchema(
extractValueTypeToAvroMap(logicalType), rowName));
return nullable ? nullableSchema(map) : map;
case ARRAY:
ArrayType arrayType = (ArrayType) logicalType;
Schema array =
SchemaBuilder.builder()
.array()
.items(convertToSchema(arrayType.getElementType(), rowName));
return nullable ? nullableSchema(array) : array;
case RAW:
default:
throw new UnsupportedOperationException(
"Unsupported to derive Schema for type: " + logicalType);
}
} | 3.68 |
hadoop_DockerContainerDeletionTask_convertDeletionTaskToProto | /**
* Convert the DockerContainerDeletionTask to the Protobuf representation for
* storing in the state store and recovery.
*
* @return the protobuf representation of the DockerContainerDeletionTask.
*/
public DeletionServiceDeleteTaskProto convertDeletionTaskToProto() {
DeletionServiceDeleteTaskProto.Builder builder =
getBaseDeletionTaskProtoBuilder();
builder.setTaskType(DeletionTaskType.DOCKER_CONTAINER.name());
if (getContainerId() != null) {
builder.setDockerContainerId(getContainerId());
}
return builder.build();
} | 3.68 |
morf_DataValueLookupBuilderImpl_set | /**
* Sets the value of a specified column. Resizes or allocates the array as required.
*/
private DataValueLookupBuilder set(String columnName, Object value) {
CaseInsensitiveString key = CaseInsensitiveString.of(columnName);
// No data yet - initialise
if (metadata == null) {
metadata = DataValueLookupMetadataRegistry.intern(key);
data = initialiseArray(DEFAULT_INITIAL_SIZE);
setAtIndex(0, value);
return this;
}
// Overwrite the existing value if it exists
Integer existingIndex = metadata.getIndexInArray(key);
if (existingIndex != null) {
setAtIndex(existingIndex, value);
return this;
}
// Expand the array if required
int newIndex = metadata.getColumnNames().size();
if (newIndex == data.length) {
grow();
}
// Update the metadata and store
metadata = DataValueLookupMetadataRegistry.appendAndIntern(metadata, key);
setAtIndex(newIndex, value);
return this;
} | 3.68 |
hmily_HmilyXaResource_forget | /**
* Forget.
*
* @throws XAException the xa exception
*/
public void forget() throws XAException {
this.forget(this.xid);
} | 3.68 |
hadoop_VolumeStates_addVolumeIfAbsent | /**
* Add volume if it is not yet added.
* If a new volume is added with a same {@link VolumeId}
* with a existing volume, existing volume will be returned.
* @param volume volume to add
* @return volume added or existing volume
*/
public Volume addVolumeIfAbsent(Volume volume) {
if (volume.getVolumeId() != null) {
return volumeStates.putIfAbsent(volume.getVolumeId(), volume);
} else {
// for dynamical provisioned volumes,
// the volume ID might not be available at time being.
// we can makeup one with the combination of driver+volumeName+timestamp
// once the volume ID is generated, we should replace ID.
return volume;
}
} | 3.68 |
rocketmq-connect_RocketMQSourceValueConverter_convertKafkaValue | /**
* convert value
*
* @param targetSchema
* @param originalValue
* @return
*/
private Object convertKafkaValue(Schema targetSchema, Object originalValue) {
if (targetSchema == null) {
if (originalValue == null) {
return null;
}
return originalValue;
}
switch (targetSchema.getFieldType()) {
case INT8:
case INT16:
case INT32:
case INT64:
case FLOAT32:
case FLOAT64:
case BOOLEAN:
case STRING:
case BYTES:
return originalValue;
case STRUCT:
Struct toStruct = new Struct(targetSchema);
if (originalValue != null) {
convertStructValue(toStruct, (org.apache.kafka.connect.data.Struct) originalValue);
}
return toStruct;
case ARRAY:
List<Object> array = (List<Object>) originalValue;
List<Object> newArray = new ArrayList<>();
array.forEach(item -> {
newArray.add(convertKafkaValue(targetSchema.getValueSchema(), item));
});
return newArray;
case MAP:
Map mapData = (Map) originalValue;
Map newMapData = new ConcurrentHashMap();
mapData.forEach((k, v) -> {
newMapData.put(
convertKafkaValue(targetSchema.getKeySchema(), k),
convertKafkaValue(targetSchema.getValueSchema(), v)
);
});
return newMapData;
default:
throw new RuntimeException(" Type not supported: {}" + targetSchema.getFieldType());
}
} | 3.68 |
hadoop_ClusterMetrics_getReservedReduceSlots | /**
* Get the number of reserved reduce slots in the cluster.
*
* @return reserved reduce slot count
*/
public int getReservedReduceSlots() {
return reservedReduceSlots;
} | 3.68 |
pulsar_ResourceGroup_rgFillResourceUsage | // Transport manager mandated op.
public void rgFillResourceUsage(ResourceUsage resourceUsage) {
NetworkUsage p;
resourceUsage.setOwner(this.getID());
p = resourceUsage.setPublish();
this.setUsageInMonitoredEntity(ResourceGroupMonitoringClass.Publish, p);
p = resourceUsage.setDispatch();
this.setUsageInMonitoredEntity(ResourceGroupMonitoringClass.Dispatch, p);
// Punt storage for now.
} | 3.68 |
morf_AbstractSqlDialectTest_testInsertWithNullDefaults | /**
* Test that an Insert statement is generated with a null value
*/
@Test
public void testInsertWithNullDefaults() {
InsertStatement stmt = new InsertStatement().into(new TableReference(TEST_TABLE))
.from(new TableReference(OTHER_TABLE)).withDefaults(
new NullFieldLiteral().as(DATE_FIELD),
new NullFieldLiteral().as(BOOLEAN_FIELD),
new NullFieldLiteral().as(CHAR_FIELD),
new NullFieldLiteral().as(BLOB_FIELD)
);
String expectedSql = "INSERT INTO " + tableName(TEST_TABLE) + " (id, version, stringField, intField, floatField, dateField, booleanField, charField, blobField, bigIntegerField, clobField) SELECT id, version, stringField, intField, floatField, null AS dateField, null AS booleanField, null AS charField, null AS blobField, 12345 AS bigIntegerField, null AS clobField FROM " + tableName(OTHER_TABLE);
List<String> sql = testDialect.convertStatementToSQL(stmt, metadata, SqlDialect.IdTable.withDeterministicName(ID_VALUES_TABLE));
assertEquals("Insert with null defaults", ImmutableList.of(expectedSql), sql);
} | 3.68 |
flink_MemorySegment_getCharBigEndian | /**
* Reads a character value (16 bit, 2 bytes) from the given position, in big-endian byte order.
* This method's speed depends on the system's native byte order, and it is possibly slower than
* {@link #getChar(int)}. For most cases (such as transient storage in memory or serialization
* for I/O and network), it suffices to know that the byte order in which the value is written
* is the same as the one in which it is read, and {@link #getChar(int)} is the preferable
* choice.
*
* @param index The position from which the value will be read.
* @return The character value at the given position.
* @throws IndexOutOfBoundsException Thrown, if the index is negative, or larger than the
* segment size minus 2.
*/
public char getCharBigEndian(int index) {
if (LITTLE_ENDIAN) {
return Character.reverseBytes(getChar(index));
} else {
return getChar(index);
}
} | 3.68 |
hbase_HBaseTestingUtility_getBaseTestDirOnTestFS | /**
* @return Where to write test data on the test filesystem; Returns working directory for the test
* filesystem by default
* @see #setupDataTestDirOnTestFS()
* @see #getTestFileSystem()
*/
private Path getBaseTestDirOnTestFS() throws IOException {
FileSystem fs = getTestFileSystem();
return new Path(fs.getWorkingDirectory(), "test-data");
} | 3.68 |
hbase_HFileBlockIndex_getRootBlockOffset | /**
* @param i from 0 to {@link #getRootBlockCount() - 1}
*/
public long getRootBlockOffset(int i) {
return blockOffsets[i];
} | 3.68 |
morf_SpreadsheetDataSetProducer_createRecord | /**
* Creates a record from a set of cells from a worksheet.
*
* @param id ID of the row
* @param columnHeadingsMap Map of column headings to their index
* @param translationColumn Column containing translations
* @param cells The cells to process
* @return the created record
*/
private Record createRecord(final long id, final Map<String, Integer> columnHeadingsMap, final int translationColumn, final Cell... cells) {
final int translationId;
if (translationColumn != -1 && cells[translationColumn].getContents().length() > 0) {
translationId = translations.size() + 1;
translations.add(createTranslationRecord(translationId, cells[translationColumn].getContents()));
} else {
translationId = 0;
}
final RecordBuilder record = DataSetUtils.record();
for (Entry<String, Integer> column : columnHeadingsMap.entrySet()) {
if (column.getValue() < cells.length) {
record.setString(column.getKey(), cells[column.getValue()].getContents());
} else {
// If the cell is actually specified then assume it is default blank
record.setString(column.getKey(), "");
}
}
record.setLong("id", id);
record.setInteger("translationId", translationId);
return record;
} | 3.68 |
hadoop_FSTreeTraverser_resolvePaths | /**
* Resolve the cursor of traverse to an inode.
* <p>
* The parent of the lowest level startAfter is returned. If somewhere in the
* middle of startAfters changed, the parent of the lowest unchanged level is
* returned.
*
* @param startId
* Id of the start inode.
* @param startAfters
* the cursor, represented by a list of path bytes.
* @return the parent inode corresponding to the startAfters, or null if the
* furthest parent is deleted.
*/
private INode resolvePaths(final long startId, List<byte[]> startAfters)
throws IOException {
// If the readlock was reacquired, we need to resolve the paths again
// in case things have changed. If our cursor file/dir is changed,
// continue from the next one.
INode zoneNode = dir.getInode(startId);
if (zoneNode == null) {
throw new FileNotFoundException("Zone " + startId + " is deleted.");
}
INodeDirectory parent = zoneNode.asDirectory();
for (int i = 0; i < startAfters.size(); ++i) {
if (i == startAfters.size() - 1) {
// last startAfter does not need to be resolved, since search for
// nextChild will cover that automatically.
break;
}
INode curr = parent.getChild(startAfters.get(i),
Snapshot.CURRENT_STATE_ID);
if (curr == null) {
// inode at this level has changed. Update startAfters to point to
// the next dir at the parent level (and dropping any startAfters
// at lower levels).
for (; i < startAfters.size(); ++i) {
startAfters.remove(startAfters.size() - 1);
}
break;
}
parent = curr.asDirectory();
}
return parent;
} | 3.68 |
graphhopper_Entity_writeDateField | /**
* Writes date as YYYYMMDD
*/
protected void writeDateField (LocalDate d) throws IOException {
writeStringField(d.format(DateTimeFormatter.BASIC_ISO_DATE));
} | 3.68 |
pulsar_AuthenticationDataProvider_getTlsCertificateFilePath | /**
* @return a client certificate file path
*/
default String getTlsCertificateFilePath() {
return null;
} | 3.68 |
open-banking-gateway_FintechConsentSpecSecureStorage_toInboxForAuth | /**
* Sends FinTech user keys to FinTech public key storage.
* @param authSession Authorization session associated with this user
* @param data FinTech users' private keys and other
*/
@SneakyThrows
public void toInboxForAuth(AuthSession authSession, FinTechUserInboxData data) {
try (OutputStream os = datasafeServices.inboxService().write(
WriteRequest.forDefaultPublic(
ImmutableSet.of(authSession.getFintechUser().getUserId()),
new FintechUserAuthSessionTuple(authSession).toDatasafePathWithoutParent()))
) {
os.write(mapper.writeValueAsBytes(data));
}
} | 3.68 |
zxing_MinimalEncoder_getNumberOfC40Words | /** @return the number of words in which the string starting at from can be encoded in c40 or text mode.
* The number of characters encoded is returned in characterLength.
* The number of characters encoded is also minimal in the sense that the algorithm stops as soon
* as a character encoding fills a C40 word competely (three C40 values). An exception is at the
* end of the string where two C40 values are allowed (according to the spec the third c40 value
* is filled with 0 (Shift 1) in this case).
*/
static int getNumberOfC40Words(Input input, int from, boolean c40,int[] characterLength) {
int thirdsCount = 0;
for (int i = from; i < input.length(); i++) {
if (input.isECI(i)) {
characterLength[0] = 0;
return 0;
}
char ci = input.charAt(i);
if (c40 && HighLevelEncoder.isNativeC40(ci) || !c40 && HighLevelEncoder.isNativeText(ci)) {
thirdsCount++; //native
} else if (!isExtendedASCII(ci, input.getFNC1Character())) {
thirdsCount += 2; //shift
} else {
int asciiValue = ci & 0xff;
if (asciiValue >= 128 && (c40 && HighLevelEncoder.isNativeC40((char) (asciiValue - 128)) ||
!c40 && HighLevelEncoder.isNativeText((char) (asciiValue - 128)))) {
thirdsCount += 3; // shift, Upper shift
} else {
thirdsCount += 4; // shift, Upper shift, shift
}
}
if (thirdsCount % 3 == 0 || ((thirdsCount - 2) % 3 == 0 && i + 1 == input.length())) {
characterLength[0] = i - from + 1;
return (int) Math.ceil(((double) thirdsCount) / 3.0);
}
}
characterLength[0] = 0;
return 0;
} | 3.68 |
hbase_CellComparator_compareRows | /**
* Lexicographically compare two rows
* @param row ByteBuffer that wraps a row; will read from current position and will reading all
* remaining; will not disturb the ByteBuffer internal state.
* @return greater than 0 if leftCell is bigger, less than 0 if rightCell is bigger, 0 if both
* cells are equal
*/
default int compareRows(ByteBuffer row, Cell cell) {
if (cell instanceof ByteBufferExtendedCell) {
return ByteBufferUtils.compareTo(row, row.position(), row.remaining(),
((ByteBufferExtendedCell) cell).getRowByteBuffer(),
((ByteBufferExtendedCell) cell).getRowPosition(), cell.getRowLength());
}
return ByteBufferUtils.compareTo(row, row.position(), row.remaining(), cell.getRowArray(),
cell.getRowOffset(), cell.getRowLength());
} | 3.68 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.