name
stringlengths
12
178
code_snippet
stringlengths
8
36.5k
score
float64
3.26
3.68
framework_VScrollTable_enableColumn
/** * Enable a column (Sets the footer cell). * * @param cid * The columnId * @param index * The index of the column */ public void enableColumn(String cid, int index) { final FooterCell c = getFooterCell(cid); if (!c.isEnabled() || getFooterCell(index) != c) { setFooterCell(index, c); if (initializedAndAttached) { headerChangedDuringUpdate = true; } } }
3.68
hbase_MasterObserver_postAssign
/** * Called after the region assignment has been requested. * @param ctx the environment to interact with the framework and master * @param regionInfo the regionInfo of the region */ default void postAssign(final ObserverContext<MasterCoprocessorEnvironment> ctx, final RegionInfo regionInfo) throws IOException { }
3.68
hbase_PermissionStorage_validateFilterUser
/* * Validate the cell key with the client filterUser if specified in the query input. 1. If cell * key (username) is not a group then check whether client filterUser is equal to username 2. If * cell key (username) is a group then check whether client filterUser belongs to the cell key * group (username) 3. In case when both filterUser and username are group names then cell will be * filtered if not equal. */ private static boolean validateFilterUser(String username, String filterUser, List<String> filterUserGroups) { if (filterUserGroups == null) { // Validate user name or group names whether equal if (filterUser.equals(username)) { return true; } } else { // Check whether filter user belongs to the cell key group. return filterUserGroups.contains(username.substring(1)); } return false; }
3.68
hadoop_AuxServiceFile_type
/** * Config file in the standard format like xml, properties, json, yaml, * template. **/ public AuxServiceFile type(TypeEnum t) { this.type = t; return this; }
3.68
mutate-test-kata_Company_findEmployeeById
/** * Finds an employee by their id * @param id the id of the employee to be found * @return the employee with the id passed as the parameter or null if no such employee exists */ public Employee findEmployeeById(String id) { int foundIndex = 0; for (int i = 0; i < this.employees.size(); i++) { if (this.employees.get(i).getId().equals(id)) { foundIndex = i; break; } } return this.employees.get(foundIndex); }
3.68
hbase_CanaryTool_sniff
/* * Loops over regions of this table, and outputs information about the state. */ private static List<Future<Void>> sniff(final Admin admin, final Sink sink, TableDescriptor tableDesc, ExecutorService executor, TaskType taskType, boolean rawScanEnabled, LongAdder rwLatency, boolean readAllCF) throws Exception { LOG.debug("Reading list of regions for table {}", tableDesc.getTableName()); try (Table table = admin.getConnection().getTable(tableDesc.getTableName())) { List<RegionTask> tasks = new ArrayList<>(); try (RegionLocator regionLocator = admin.getConnection().getRegionLocator(tableDesc.getTableName())) { for (HRegionLocation location : regionLocator.getAllRegionLocations()) { if (location == null) { LOG.warn("Null location"); continue; } ServerName rs = location.getServerName(); RegionInfo region = location.getRegion(); tasks.add(new RegionTask(admin.getConnection(), region, rs, (RegionStdOutSink) sink, taskType, rawScanEnabled, rwLatency, readAllCF)); Map<String, List<RegionTaskResult>> regionMap = ((RegionStdOutSink) sink).getRegionMap(); regionMap.put(region.getRegionNameAsString(), new ArrayList<RegionTaskResult>()); } return executor.invokeAll(tasks); } } catch (TableNotFoundException e) { return Collections.EMPTY_LIST; } }
3.68
framework_VaadinService_closeInactiveUIs
/** * Closes those UIs in the given session for which {@link #isUIActive} * yields false. * * @since 7.0.0 */ private void closeInactiveUIs(VaadinSession session) { final String sessionId = session.getSession().getId(); for (final UI ui : session.getUIs()) { if (!isUIActive(ui) && !ui.isClosing()) { ui.accessSynchronously(() -> { getLogger().log(Level.FINE, "Closing inactive UI #{0} in session {1}", new Object[] { ui.getUIId(), sessionId }); ui.close(); }); } } }
3.68
hbase_ColumnCountGetFilter_toByteArray
/** Returns The filter serialized using pb */ @Override public byte[] toByteArray() { FilterProtos.ColumnCountGetFilter.Builder builder = FilterProtos.ColumnCountGetFilter.newBuilder(); builder.setLimit(this.limit); return builder.build().toByteArray(); }
3.68
hmily_XaResourcePool_getResource
/** * Gets resource. * * @param xid the xid * @return the resource */ public XaResourceWrapped getResource(final Xid xid) { XaResourceWrapped xaResourceWrapped = pool.get(xid); // if (xaResourceWrapped == null) { // //todo:从日志中查找. // } return xaResourceWrapped; }
3.68
hudi_SqlQueryBuilder_on
/** * Appends an ON clause to a query. * * @param predicate The predicate to join on. * @return The {@link SqlQueryBuilder} instance. */ public SqlQueryBuilder on(String predicate) { if (StringUtils.isNullOrEmpty(predicate)) { throw new IllegalArgumentException(); } sqlBuilder.append(" on "); sqlBuilder.append(predicate); return this; }
3.68
querydsl_GeneratedAnnotationResolver_resolve
/** * Use the {@code generatedAnnotationClass} or use the JDK one. * <p> * A {@code null generatedAnnotationClass} will resolve to the java {@code @Generated} annotation (can be of type {@code javax.annotation.Generated} * or {@code javax.annotation.processing.Generated} depending on the java version. * * @param generatedAnnotationClass the fully qualified class name of the <em>Single-Element Annotation</em> (with {@code String} element) * to use or {@code null}. * @return the provided {@code generatedAnnotationClass} if not {@code null} or the one from java. Never {@code null}. * @see <a href="https://docs.oracle.com/javase/specs/jls/se8/html/jls-9.html#jls-9.7.3">Single-Element Annotation</a> */ public static Class<?extends Annotation> resolve(@Nullable String generatedAnnotationClass) { if (generatedAnnotationClass != null) { try { return (Class<? extends Annotation>) Class.forName(generatedAnnotationClass); } catch (Exception e) { // Try next one } } return resolveDefault(); }
3.68
hbase_MasterObserver_preGetLocks
/** * Called before a getLocks request has been processed. * @param ctx the environment to interact with the framework and master * @throws IOException if something went wrong */ default void preGetLocks(ObserverContext<MasterCoprocessorEnvironment> ctx) throws IOException { }
3.68
hbase_BalancerClusterState_checkLocationForPrimary
/** * Common method for better solution check. * @param colocatedReplicaCountsPerLocation colocatedReplicaCountsPerHost or * colocatedReplicaCountsPerRack * @return 1 for better, -1 for no better, 0 for unknown */ private int checkLocationForPrimary(int location, Int2IntCounterMap[] colocatedReplicaCountsPerLocation, int primary) { if (colocatedReplicaCountsPerLocation[location].containsKey(primary)) { // check for whether there are other Locations that we can place this region for (int i = 0; i < colocatedReplicaCountsPerLocation.length; i++) { if (i != location && !colocatedReplicaCountsPerLocation[i].containsKey(primary)) { return 1; // meaning there is a better Location } } return -1; // there is not a better Location to place this } return 0; }
3.68
flink_TGetQueryIdResp_findByThriftId
/** Find the _Fields constant that matches fieldId, or null if its not found. */ public static _Fields findByThriftId(int fieldId) { switch (fieldId) { case 1: // QUERY_ID return QUERY_ID; default: return null; } }
3.68
rocketmq-connect_AbstractStateManagementService_putSafe
/** * Safely set the state of the task to the given value. What is considered "safe" depends on the implementation, but * basically it means that the store can provide higher assurance that another worker hasn't concurrently written * any conflicting data. * * @param status the status of the task */ @Override public void putSafe(TaskStatus status) { sendTaskStatus(status, true); }
3.68
flink_ProjectOperator_projectTuple4
/** * Projects a {@link Tuple} {@link DataSet} to the previously selected fields. * * @return The projected DataSet. * @see Tuple * @see DataSet */ public <T0, T1, T2, T3> ProjectOperator<T, Tuple4<T0, T1, T2, T3>> projectTuple4() { TypeInformation<?>[] fTypes = extractFieldTypes(fieldIndexes, ds.getType()); TupleTypeInfo<Tuple4<T0, T1, T2, T3>> tType = new TupleTypeInfo<Tuple4<T0, T1, T2, T3>>(fTypes); return new ProjectOperator<T, Tuple4<T0, T1, T2, T3>>( this.ds, this.fieldIndexes, tType); }
3.68
flink_WebLogDataGenerator_genRanks
/** * Generates the files for the ranks relation. The ranks entries apply the following format: * <br> * <code>Rank | URL | Average Duration |\n</code> * * @param noDocs Number of entries in the documents relation * @param path Output path for the ranks relation */ private static void genRanks(int noDocs, String path) { Random rand = new Random(Calendar.getInstance().getTimeInMillis()); try (BufferedWriter fw = new BufferedWriter(new FileWriter(path))) { for (int i = 0; i < noDocs; i++) { // Rank StringBuilder rank = new StringBuilder(rand.nextInt(100) + "|"); // URL rank.append("url_" + i + "|"); // Average duration rank.append(rand.nextInt(10) + rand.nextInt(50) + "|\n"); fw.write(rank.toString()); } } catch (IOException e) { e.printStackTrace(); } }
3.68
flink_TableFactoryUtil_findAndCreateTableSource
/** * Creates a {@link TableSource} from a {@link CatalogTable}. * * <p>It considers {@link Catalog#getFactory()} if provided. */ @SuppressWarnings("unchecked") public static <T> TableSource<T> findAndCreateTableSource( @Nullable Catalog catalog, ObjectIdentifier objectIdentifier, CatalogTable catalogTable, ReadableConfig configuration, boolean isTemporary) { TableSourceFactory.Context context = new TableSourceFactoryContextImpl( objectIdentifier, catalogTable, configuration, isTemporary); Optional<TableFactory> factoryOptional = catalog == null ? Optional.empty() : catalog.getTableFactory(); if (factoryOptional.isPresent()) { TableFactory factory = factoryOptional.get(); if (factory instanceof TableSourceFactory) { return ((TableSourceFactory<T>) factory).createTableSource(context); } else { throw new ValidationException( "Cannot query a sink-only table. " + "TableFactory provided by catalog must implement TableSourceFactory"); } } else { return findAndCreateTableSource(context); } }
3.68
hbase_RegionServerFlushTableProcedureManager_waitForOutstandingTasks
/** * Wait for all of the currently outstanding tasks submitted via {@link #submitTask(Callable)}. * This *must* be called after all tasks are submitted via submitTask. * @return <tt>true</tt> on success, <tt>false</tt> otherwise */ boolean waitForOutstandingTasks() throws ForeignException, InterruptedException { LOG.debug("Waiting for local region flush to finish."); int sz = futures.size(); try { // Using the completion service to process the futures. for (int i = 0; i < sz; i++) { Future<Void> f = taskPool.take(); f.get(); if (!futures.remove(f)) { LOG.warn("unexpected future" + f); } LOG.debug("Completed " + (i + 1) + "/" + sz + " local region flush tasks."); } LOG.debug("Completed " + sz + " local region flush tasks."); return true; } catch (InterruptedException e) { LOG.warn("Got InterruptedException in FlushSubprocedurePool", e); if (!stopped) { Thread.currentThread().interrupt(); throw new ForeignException("FlushSubprocedurePool", e); } // we are stopped so we can just exit. } catch (ExecutionException e) { Throwable cause = e.getCause(); if (cause instanceof ForeignException) { LOG.warn("Rethrowing ForeignException from FlushSubprocedurePool", e); throw (ForeignException) e.getCause(); } else if (cause instanceof DroppedSnapshotException) { // we have to abort the region server according to contract of flush abortable.abort("Received DroppedSnapshotException, aborting", cause); } LOG.warn("Got Exception in FlushSubprocedurePool", e); throw new ForeignException(name, e.getCause()); } finally { cancelTasks(); } return false; }
3.68
hbase_StoreFileReader_passesGeneralRowBloomFilter
/** * A method for checking Bloom filters. Called directly from StoreFileScanner in case of a * multi-column query. * @return True if passes */ private boolean passesGeneralRowBloomFilter(byte[] row, int rowOffset, int rowLen) { BloomFilter bloomFilter = this.generalBloomFilter; if (bloomFilter == null) { bloomFilterMetrics.incrementEligible(); return true; } // Used in ROW bloom byte[] key = null; if (rowOffset != 0 || rowLen != row.length) { throw new AssertionError("For row-only Bloom filters the row must occupy the whole array"); } key = row; return checkGeneralBloomFilter(key, null, bloomFilter); }
3.68
dubbo_DubboConfigInitEvent_getApplicationContext
/** * Get the {@code ApplicationContext} that the event was raised for. */ public final ApplicationContext getApplicationContext() { return (ApplicationContext) getSource(); }
3.68
morf_SqlDialect_getSqlForRightTrim
/** * Converts the RIGHT_TRIM function into SQL. * * @param function the function to convert. * @return a string representation of the SQL. */ protected String getSqlForRightTrim(Function function) { return "RTRIM(" + getSqlFrom(function.getArguments().get(0)) + ")"; }
3.68
flink_CastRuleProvider_generateAlwaysNonNullCodeBlock
/** * This method wraps {@link #generateCodeBlock(CodeGeneratorCastRule.Context, String, String, * LogicalType, LogicalType)}, but adding the assumption that the inputTerm is always non-null. * Used by {@link CodeGeneratorCastRule}s which checks for nullability, rather than deferring * the check to the rules. */ static CastCodeBlock generateAlwaysNonNullCodeBlock( CodeGeneratorCastRule.Context context, String inputTerm, LogicalType inputLogicalType, LogicalType targetLogicalType) { if (inputLogicalType instanceof NullType) { return generateCodeBlock( context, inputTerm, "true", inputLogicalType, targetLogicalType); } return generateCodeBlock( context, inputTerm, "false", inputLogicalType.copy(false), targetLogicalType); }
3.68
hbase_TimestampsFilter_toByteArray
/** Returns The filter serialized using pb */ @Override public byte[] toByteArray() { FilterProtos.TimestampsFilter.Builder builder = FilterProtos.TimestampsFilter.newBuilder(); builder.addAllTimestamps(this.timestamps); builder.setCanHint(canHint); return builder.build().toByteArray(); }
3.68
pulsar_ModularLoadManagerImpl_disableBroker
/** * As any broker, disable the broker this manager is running on. * * @throws PulsarServerException * If there's a failure when disabling broker on metadata store. */ @Override public void disableBroker() throws PulsarServerException { if (StringUtils.isNotEmpty(brokerZnodePath)) { try { brokerDataLock.release().join(); } catch (CompletionException e) { if (e.getCause() instanceof NotFoundException) { throw new PulsarServerException.NotFoundException(MetadataStoreException.unwrap(e)); } else { throw new PulsarServerException(MetadataStoreException.unwrap(e)); } } } }
3.68
framework_ServerRpcQueue_clear
/** * Clears the queue. */ public void clear() { pendingInvocations.clear(); // Keep tag string short lastInvocationTag = 0; flushPending = false; doFlushStrategy = NO_OP; }
3.68
querydsl_DateTimeExpression_dayOfWeek
/** * Create a day of week expression (range 1-7 / SUN-SAT) * <p>NOT supported in JDOQL and not in Derby</p> * * @return day of week */ public NumberExpression<Integer> dayOfWeek() { if (dayOfWeek == null) { dayOfWeek = Expressions.numberOperation(Integer.class, Ops.DateTimeOps.DAY_OF_WEEK, mixin); } return dayOfWeek; }
3.68
pulsar_JwksCache_getJwkAndMaybeReload
/** * Retrieve the JWK for the given key ID from the given JWKS URI. If the key ID is not found, and failOnMissingKeyId * is false, then the JWK will be reloaded from the JWKS URI and the key ID will be searched for again. */ private CompletableFuture<Jwk> getJwkAndMaybeReload(Optional<String> maybeJwksUri, String keyId, boolean failOnMissingKeyId) { return cache .get(maybeJwksUri) .thenCompose(jwks -> { try { return CompletableFuture.completedFuture(getJwkForKID(maybeJwksUri, jwks, keyId)); } catch (IllegalArgumentException e) { if (failOnMissingKeyId) { throw e; } else { Long lastRefresh = jwksLastRefreshTime.get(maybeJwksUri); if (lastRefresh == null || System.nanoTime() - lastRefresh > keyIdCacheMissRefreshNanos) { // In this case, the key ID was not found, but we haven't refreshed the JWKS in a while, // so it is possible the key ID was added. Refresh the JWKS and try again. cache.synchronous().invalidate(maybeJwksUri); } // There is a small race condition where the JWKS could be refreshed by another thread, // so we retry getting the JWK, even though we might not have invalidated the cache. return getJwkAndMaybeReload(maybeJwksUri, keyId, true); } } }); }
3.68
dubbo_ServiceInstance_getMetadata
/** * Get the value of metadata by the specified name * * @param name the specified name * @return the value of metadata if found, or <code>defaultValue</code> * @since 2.7.8 */ default String getMetadata(String name, String defaultValue) { return getMetadata().getOrDefault(name, defaultValue); }
3.68
flink_StateHandleStoreUtils_deserialize
/** * Deserializes the passed data into a {@link RetrievableStateHandle}. * * @param data The data that shall be deserialized. * @param <T> The type of data handled by the deserialized {@code RetrievableStateHandle}. * @return The {@code RetrievableStateHandle} instance. * @throws IOException Any of the usual Input/Output related exceptions. * @throws ClassNotFoundException If the data couldn't be deserialized into a {@code * RetrievableStateHandle} referring to the expected type {@code <T>}. */ public static <T extends Serializable> T deserialize(byte[] data) throws IOException, ClassNotFoundException { return InstantiationUtil.deserializeObject( data, Thread.currentThread().getContextClassLoader()); }
3.68
pulsar_ConsumerImpl_releasePooledMessagesAndStopAcceptNew
/** * If enabled pooled messages, we should release the messages after closing consumer and stop accept the new * messages. */ private void releasePooledMessagesAndStopAcceptNew() { incomingMessages.terminate(message -> message.release()); clearIncomingMessages(); }
3.68
framework_Calendar_getLastVisibleHourOfDay
/** * Returns the last visible hour in the week view. Returns the hour using a * 24h time format * */ public int getLastVisibleHourOfDay() { return lastHour; }
3.68
hbase_MiniHBaseCluster_abortMaster
/** * Cause a master to exit without shutting down entire cluster. * @param serverNumber Used as index into a list. */ public String abortMaster(int serverNumber) { HMaster server = getMaster(serverNumber); LOG.info("Aborting " + server.toString()); server.abort("Aborting for tests", new Exception("Trace info")); return server.toString(); }
3.68
hadoop_HeaderProcessing_encodeBytes
/** * Stringify an object and return its bytes in UTF-8 encoding. * @param s source * @return encoded object or an empty buffer */ public static byte[] encodeBytes(@Nullable Object s) { return s == null ? EMPTY : s.toString().getBytes(StandardCharsets.UTF_8); }
3.68
hbase_SegmentScanner_isFileScanner
/** Returns true if this is a file scanner. Otherwise a memory scanner is assumed. */ @Override public boolean isFileScanner() { return false; }
3.68
hudi_Table_orderRows
/** * Sorting of rows by a specified field. * * @return */ private List<List<Comparable>> orderRows() { return orderingFieldNameOptional.map(orderingColumnName -> { return rawRows.stream().sorted((row1, row2) -> { Comparable fieldForRow1 = row1.get(rowHeader.indexOf(orderingColumnName)); Comparable fieldForRow2 = row2.get(rowHeader.indexOf(orderingColumnName)); int cmpRawResult = fieldForRow1.compareTo(fieldForRow2); return isDescendingOptional.map(isDescending -> isDescending ? -1 * cmpRawResult : cmpRawResult).orElse(cmpRawResult); }).collect(Collectors.toList()); }).orElse(rawRows); }
3.68
rocketmq-connect_RedisSourceConnector_validate
/** * Should invoke before start the connector. * * @param config * @return error message */ @Override public void validate(KeyValue config) { this.redisConfig.load(config); }
3.68
hbase_RegionState_hashCode
/** * Don't count timestamp in hash code calculation */ @Override public int hashCode() { return (serverName != null ? serverName.hashCode() * 11 : 0) + hri.hashCode() + 5 * state.ordinal(); }
3.68
druid_NodeListener_update
/** * Notify the Observer. */ public void update(List<NodeEvent> events) { if (events != null && !events.isEmpty()) { this.lastUpdateTime = new Date(); NodeEvent[] arr = new NodeEvent[events.size()]; for (int i = 0; i < events.size(); i++) { arr[i] = events.get(i); } this.setChanged(); this.notifyObservers(arr); } }
3.68
framework_DateUtil_compareDate
/** * Checks if dates are same day without checking datetimes. * * @param date1 * @param date2 * @return */ @SuppressWarnings("deprecation") public static boolean compareDate(Date date1, Date date2) { if (date1.getDate() == date2.getDate() && date1.getYear() == date2.getYear() && date1.getMonth() == date2.getMonth()) { return true; } return false; }
3.68
hbase_ProcedureCoordinator_defaultPool
/** * Default thread pool for the procedure * @param opThreads the maximum number of threads to allow in the pool * @param keepAliveMillis the maximum time (ms) that excess idle threads will wait for new tasks */ public static ThreadPoolExecutor defaultPool(String coordName, int opThreads, long keepAliveMillis) { return new ThreadPoolExecutor(1, opThreads, keepAliveMillis, TimeUnit.MILLISECONDS, new SynchronousQueue<>(), new ThreadFactoryBuilder().setNameFormat("(" + coordName + ")-proc-coordinator-pool-%d") .setDaemon(true).setUncaughtExceptionHandler(Threads.LOGGING_EXCEPTION_HANDLER).build()); }
3.68
shardingsphere-elasticjob_ElasticJobExecutorService_getActiveThreadCount
/** * Get active thread count. * * @return active thread count */ public int getActiveThreadCount() { return threadPoolExecutor.getActiveCount(); }
3.68
flink_SubtaskCommittableManager_hasReceivedAll
/** * Returns whether the received number of committables matches the expected number. * * @return if all committables have been received */ boolean hasReceivedAll() { return getNumCommittables() == numExpectedCommittables; }
3.68
dubbo_AbstractZookeeperTransporter_toClientURL
/** * redefine the url for zookeeper. just keep protocol, username, password, host, port, and individual parameter. * * @param url * @return */ URL toClientURL(URL url) { Map<String, String> parameterMap = new HashMap<>(); // for CuratorZookeeperClient if (url.getParameter(TIMEOUT_KEY) != null) { parameterMap.put(TIMEOUT_KEY, url.getParameter(TIMEOUT_KEY)); } if (url.getParameter(RemotingConstants.BACKUP_KEY) != null) { parameterMap.put(RemotingConstants.BACKUP_KEY, url.getParameter(RemotingConstants.BACKUP_KEY)); } return new ServiceConfigURL( url.getProtocol(), url.getUsername(), url.getPassword(), url.getHost(), url.getPort(), ZookeeperTransporter.class.getName(), parameterMap); }
3.68
framework_ServerRpcQueue_get
/** * Returns the server RPC queue for the given application. * * @param connection * the application connection which owns the queue * @return the server rpc queue for the given application */ public static ServerRpcQueue get(ApplicationConnection connection) { return connection.getServerRpcQueue(); }
3.68
dubbo_TriHttp2RemoteFlowController_isWritable
/** * Determine if the stream associated with {@code state} is writable. * @param state The state which is associated with the stream to test writability for. * @return {@code true} if {@link FlowState#stream()} is writable. {@code false} otherwise. */ final boolean isWritable(FlowState state) { return isWritableConnection() && state.isWritable(); }
3.68
flink_StateSerializerProvider_invalidateCurrentSchemaSerializerAccess
/** * Invalidates access to the current schema serializer. This lets {@link * #currentSchemaSerializer()} fail when invoked. * * <p>Access to the current schema serializer should be invalidated by the methods {@link * #registerNewSerializerForRestoredState(TypeSerializer)} or {@link * #setPreviousSerializerSnapshotForRestoredState(TypeSerializerSnapshot)} once the registered * serializer is determined to be incompatible. */ protected final void invalidateCurrentSchemaSerializerAccess() { this.isRegisteredWithIncompatibleSerializer = true; }
3.68
flink_KubernetesCheckpointStoreUtil_nameToCheckpointID
/** * Converts a key in ConfigMap to the checkpoint id. * * @param key in ConfigMap * @return Checkpoint id parsed from the key */ @Override public long nameToCheckpointID(String key) { try { return Long.parseLong(key.substring(CHECKPOINT_ID_KEY_PREFIX.length())); } catch (NumberFormatException e) { LOG.warn( "Could not parse checkpoint id from {}. This indicates that the " + "checkpoint id to path conversion has changed.", key); return INVALID_CHECKPOINT_ID; } }
3.68
flink_ArrowUtils_collectAsPandasDataFrame
/** Convert Flink table to Pandas DataFrame. */ public static CustomIterator<byte[]> collectAsPandasDataFrame( Table table, int maxArrowBatchSize) throws Exception { checkArrowUsable(); BufferAllocator allocator = getRootAllocator().newChildAllocator("collectAsPandasDataFrame", 0, Long.MAX_VALUE); RowType rowType = (RowType) table.getResolvedSchema().toSourceRowDataType().getLogicalType(); DataType defaultRowDataType = TypeConversions.fromLogicalToDataType(rowType); VectorSchemaRoot root = VectorSchemaRoot.create(ArrowUtils.toArrowSchema(rowType), allocator); ByteArrayOutputStream baos = new ByteArrayOutputStream(); ArrowStreamWriter arrowStreamWriter = new ArrowStreamWriter(root, null, baos); arrowStreamWriter.start(); Iterator<Row> results = table.execute().collect(); Iterator<Row> appendOnlyResults; if (isAppendOnlyTable(table)) { appendOnlyResults = results; } else { appendOnlyResults = filterOutRetractRows(results); } ArrowWriter arrowWriter = createRowDataArrowWriter(root, rowType); Iterator convertedResults = new Iterator<RowData>() { @Override public boolean hasNext() { return appendOnlyResults.hasNext(); } @Override public RowData next() { DataFormatConverters.DataFormatConverter converter = DataFormatConverters.getConverterForDataType(defaultRowDataType); return (RowData) converter.toInternal(appendOnlyResults.next()); } }; return new CustomIterator<byte[]>() { @Override public boolean hasNext() { return convertedResults.hasNext(); } @Override public byte[] next() { try { int i = 0; while (convertedResults.hasNext() && i < maxArrowBatchSize) { i++; arrowWriter.write(convertedResults.next()); } arrowWriter.finish(); arrowStreamWriter.writeBatch(); return baos.toByteArray(); } catch (Throwable t) { String msg = "Failed to serialize the data of the table"; LOG.error(msg, t); throw new RuntimeException(msg, t); } finally { arrowWriter.reset(); baos.reset(); if (!hasNext()) { root.close(); allocator.close(); } } } }; }
3.68
flink_AllWindowedStream_reduce
/** * Applies the given window function to each window. The window function is called for each * evaluation of the window for each key individually. The output of the window function is * interpreted as a regular non-windowed stream. * * <p>Arriving data is incrementally aggregated using the given reducer. * * @param reduceFunction The reduce function that is used for incremental aggregation. * @param function The process window function. * @param resultType Type information for the result type of the window function * @return The data stream that is the result of applying the window function to the window. */ @PublicEvolving public <R> SingleOutputStreamOperator<R> reduce( ReduceFunction<T> reduceFunction, ProcessAllWindowFunction<T, R, W> function, TypeInformation<R> resultType) { if (reduceFunction instanceof RichFunction) { throw new UnsupportedOperationException( "ReduceFunction of reduce can not be a RichFunction."); } // clean the closures function = input.getExecutionEnvironment().clean(function); reduceFunction = input.getExecutionEnvironment().clean(reduceFunction); String callLocation = Utils.getCallLocationName(); String udfName = "AllWindowedStream." + callLocation; String opName; KeySelector<T, Byte> keySel = input.getKeySelector(); OneInputStreamOperator<T, R> operator; if (evictor != null) { @SuppressWarnings({"unchecked", "rawtypes"}) TypeSerializer<StreamRecord<T>> streamRecordSerializer = (TypeSerializer<StreamRecord<T>>) new StreamElementSerializer( input.getType() .createSerializer( getExecutionEnvironment().getConfig())); ListStateDescriptor<StreamRecord<T>> stateDesc = new ListStateDescriptor<>("window-contents", streamRecordSerializer); opName = "TriggerWindow(" + windowAssigner + ", " + stateDesc + ", " + trigger + ", " + evictor + ", " + udfName + ")"; operator = new EvictingWindowOperator<>( windowAssigner, windowAssigner.getWindowSerializer( getExecutionEnvironment().getConfig()), keySel, input.getKeyType() .createSerializer(getExecutionEnvironment().getConfig()), stateDesc, new InternalIterableProcessAllWindowFunction<>( new ReduceApplyProcessAllWindowFunction<>( reduceFunction, function)), trigger, evictor, allowedLateness, lateDataOutputTag); } else { ReducingStateDescriptor<T> stateDesc = new ReducingStateDescriptor<>( "window-contents", reduceFunction, input.getType() .createSerializer(getExecutionEnvironment().getConfig())); opName = "TriggerWindow(" + windowAssigner + ", " + stateDesc + ", " + trigger + ", " + udfName + ")"; operator = new WindowOperator<>( windowAssigner, windowAssigner.getWindowSerializer( getExecutionEnvironment().getConfig()), keySel, input.getKeyType() .createSerializer(getExecutionEnvironment().getConfig()), stateDesc, new InternalSingleValueProcessAllWindowFunction<>(function), trigger, allowedLateness, lateDataOutputTag); } return input.transform(opName, resultType, operator).forceNonParallel(); }
3.68
framework_HierarchicalDataCommunicator_doExpand
/** * Expands the given item at given index. Calling this method will have no * effect if the row is already expanded. The index is provided by the * client-side or calculated from a full data request. * * @param item * the item to expand * @param index * the index of the item * @see #expand(Object) * @deprecated use {@link #expand(Object, Integer)} instead */ @Deprecated public void doExpand(T item, Optional<Integer> index) { expand(item, index.orElse(null)); }
3.68
hbase_ServerManager_isServerKnownAndOnline
/** Returns whether the server is online, dead, or unknown. */ public synchronized ServerLiveState isServerKnownAndOnline(ServerName serverName) { return onlineServers.containsKey(serverName) ? ServerLiveState.LIVE : (deadservers.isDeadServer(serverName) ? ServerLiveState.DEAD : ServerLiveState.UNKNOWN); }
3.68
framework_Embedded_setStandby
/** * This attribute specifies a message that a user agent may render while * loading the object's implementation and data. * * @param standby * The text to display while loading */ public void setStandby(String standby) { String oldStandby = getStandby(); if (standby != oldStandby || (standby != null && !standby.equals(oldStandby))) { getState().standby = standby; } }
3.68
hudi_WriteProfiles_getFilesFromMetadata
/** * Returns all the incremental write file statuses with the given commits metadata. * * @param basePath Table base path * @param hadoopConf The hadoop conf * @param metadataList The commit metadata list (should in ascending order) * @param tableType The table type * @param ignoreMissingFiles Whether to ignore the missing files from filesystem * @return the file status array or null if any file is missing with ignoreMissingFiles as false */ @Nullable public static FileStatus[] getFilesFromMetadata( Path basePath, Configuration hadoopConf, List<HoodieCommitMetadata> metadataList, HoodieTableType tableType, boolean ignoreMissingFiles) { FileSystem fs = FSUtils.getFs(basePath.toString(), hadoopConf); Map<String, FileStatus> uniqueIdToFileStatus = new HashMap<>(); // If a file has been touched multiple times in the given commits, the return value should keep the one // from the latest commit, so here we traverse in reverse order for (int i = metadataList.size() - 1; i >= 0; i--) { for (Map.Entry<String, FileStatus> entry : getFilesToRead(hadoopConf, metadataList.get(i), basePath.toString(), tableType).entrySet()) { if (StreamerUtil.isValidFile(entry.getValue()) && !uniqueIdToFileStatus.containsKey(entry.getKey())) { if (StreamerUtil.fileExists(fs, entry.getValue().getPath())) { uniqueIdToFileStatus.put(entry.getKey(), entry.getValue()); } else if (!ignoreMissingFiles) { return null; } } } } return uniqueIdToFileStatus.values().toArray(new FileStatus[0]); }
3.68
hudi_HoodieTableMetaClient_getBootstrapIndexByPartitionFolderPath
/** * @return Bootstrap Index By Partition Folder */ public String getBootstrapIndexByPartitionFolderPath() { return basePath + Path.SEPARATOR + BOOTSTRAP_INDEX_BY_PARTITION_FOLDER_PATH; }
3.68
hudi_HoodieTableMetaserverClient_getTableType
/** * @return Hoodie Table Type */ public HoodieTableType getTableType() { return HoodieTableType.valueOf(table.getTableType()); }
3.68
zxing_BitMatrix_getWidth
/** * @return The width of the matrix */ public int getWidth() { return width; }
3.68
flink_HiveGenericUDAF_createAccumulator
/** * This is invoked without calling open(), so we need to call init() for * getNewAggregationBuffer(). TODO: re-evaluate how this will fit into Flink's new type * inference and udf system */ @Override public GenericUDAFEvaluator.AggregationBuffer createAccumulator() { try { if (!initialized) { init(); } return partialEvaluator.getNewAggregationBuffer(); } catch (Exception e) { throw new FlinkHiveUDFException( String.format( "Failed to create accumulator for %s", hiveFunctionWrapper.getUDFClassName()), e); } }
3.68
flink_CircularElement_endMarker
/** * Gets the element that is passed as marker for the end of data. * * @return The element that is passed as marker for the end of data. */ static <T> CircularElement<T> endMarker() { @SuppressWarnings("unchecked") CircularElement<T> c = (CircularElement<T>) EOF_MARKER; return c; }
3.68
framework_CaptionLeak_getTestDescription
/* * (non-Javadoc) * * @see com.vaadin.tests.components.AbstractTestUI#getTestDescription() */ @Override protected String getTestDescription() { return "Open this UI with ?debug and count measured non-connector elements after setting leaky and non leaky content."; }
3.68
hadoop_OBSInputStream_seekQuietly
/** * Seek without raising any exception. This is for use in {@code finally} * clauses * * @param positiveTargetPos a target position which must be positive. */ private void seekQuietly(final long positiveTargetPos) { try { seek(positiveTargetPos); } catch (IOException ioe) { LOG.debug("Ignoring IOE on seek of {} to {}", uri, positiveTargetPos, ioe); } }
3.68
flink_JoinOperator_projectTuple15
/** * Projects a pair of joined elements to a {@link Tuple} with the previously selected * fields. Requires the classes of the fields of the resulting tuples. * * @return The projected data set. * @see Tuple * @see DataSet */ public <T0, T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14> ProjectJoin< I1, I2, Tuple15< T0, T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14>> projectTuple15() { TypeInformation<?>[] fTypes = extractFieldTypes(fieldIndexes); TupleTypeInfo<Tuple15<T0, T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14>> tType = new TupleTypeInfo< Tuple15< T0, T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14>>(fTypes); return new ProjectJoin< I1, I2, Tuple15<T0, T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14>>( this.ds1, this.ds2, this.keys1, this.keys2, this.hint, this.fieldIndexes, this.isFieldInFirst, tType, this); }
3.68
hadoop_ClasspathConstructor_insert
/** * Insert a path at the front of the list. This places it ahead of * the standard YARN artifacts * @param path path to the JAR. Absolute or relative -on the target * system */ public void insert(String path) { pathElements.add(0, path); }
3.68
hadoop_AzureBlobFileSystemStore_equals
/** Compare if this object is equal to another object. * @param obj the object to be compared. * @return true if two file status has the same path name; false if not. */ @Override public boolean equals(Object obj) { if (!(obj instanceof FileStatus)) { return false; } FileStatus other = (FileStatus) obj; if (!this.getPath().equals(other.getPath())) {// compare the path return false; } if (other instanceof VersionedFileStatus) { return this.version.equals(((VersionedFileStatus) other).version); } return true; }
3.68
hadoop_BytesWritable_getCapacity
/** * Get the capacity, which is the maximum size that could handled without * resizing the backing storage. * * @return The number of bytes */ public int getCapacity() { return bytes.length; }
3.68
hadoop_JobMetaData_setContainerStart
/** * Add container launch time. * * @param containerId id of the container. * @param time container launch time. * @return the reference to current {@link JobMetaData}. */ public final JobMetaData setContainerStart(final String containerId, final long time) { if (rawStart.put(containerId, time) != null) { LOGGER.warn("find duplicate container launch time for {}, so we replace" + " it with {}.", containerId, time); } return this; }
3.68
AreaShop_UnrentedRegionEvent_getOldRenter
/** * Get the player that the region was unrented for. * @return The UUID of the player that the region was unrented for */ public UUID getOldRenter() { return oldRenter; }
3.68
flink_LegacySourceTransformation_setBoundedness
/** Mutable for legacy sources in the Table API. */ public void setBoundedness(Boundedness boundedness) { this.boundedness = boundedness; }
3.68
hbase_WALEntryBatch_getLastWalPath
/** Returns the path of the last WAL that was read. */ public Path getLastWalPath() { return lastWalPath; }
3.68
hadoop_AbfsDelegationTokenManager_close
/** * Close. * If the token manager is closeable, it has its {@link Closeable#close()} * method (quietly) invoked. */ @Override public void close() { if (tokenManager instanceof Closeable) { IOUtils.cleanupWithLogger(LOG, (Closeable) tokenManager); } }
3.68
flink_InPlaceMutableHashTable_insertOrReplaceRecord
/** * Searches the hash table for a record with the given key. If it is found, then it is * overridden with the specified record. Otherwise, the specified record is inserted. * * @param record The record to insert or to replace with. * @throws IOException (EOFException specifically, if memory ran out) */ @Override public void insertOrReplaceRecord(T record) throws IOException { if (closed) { return; } T match = prober.getMatchFor(record, reuse); if (match == null) { prober.insertAfterNoMatch(record); } else { prober.updateMatch(record); } }
3.68
hudi_HoodieTableMetadataUtil_getFileGroupPrefix
/** * Extract the fileID prefix from the fileID of a file group in the MDT partition. See {@code getFileIDForFileGroup} for the format of the fileID. * * @param fileId fileID of a file group. * @return The fileID without the file index */ public static String getFileGroupPrefix(String fileId) { return fileId.substring(0, getFileIdLengthWithoutFileIndex(fileId)); }
3.68
framework_VAbstractPopupCalendar_updateTextFieldEnabled
/** * Updates the text field's enabled status to correspond with the latest * value set through {@link #setTextFieldEnabled(boolean)} and this * component's general {@link #setEnabled(boolean)}. * * @see #setTextFieldEnabled(boolean) */ protected void updateTextFieldEnabled() { boolean reallyEnabled = isEnabled() && isTextFieldEnabled(); // IE has a non input disabled themeing that can not be overridden so we // must fake the functionality using readonly and unselectable if (BrowserInfo.get().isIE()) { if (!reallyEnabled) { text.getElement().setAttribute("unselectable", "on"); text.getElement().setAttribute("readonly", ""); text.setTabIndex(-2); } else if (reallyEnabled && text.getElement().hasAttribute("unselectable")) { text.getElement().removeAttribute("unselectable"); text.getElement().removeAttribute("readonly"); text.setTabIndex(0); } } else { text.setEnabled(reallyEnabled); } if (reallyEnabled) { calendarToggle.setTabIndex(-1); Roles.getButtonRole() .setAriaHiddenState(calendarToggle.getElement(), true); } else { calendarToggle.setTabIndex(0); Roles.getButtonRole() .setAriaHiddenState(calendarToggle.getElement(), false); } handleAriaAttributes(); }
3.68
streampipes_InfluxDbClient_getTimestamp
// Converts a string date from ISO_INSTANT format in a unix timestamp in nanoseconds static String getTimestamp(String date) { TemporalAccessor temporalAccessor = DateTimeFormatter.ISO_INSTANT.parse(date); Instant time = Instant.from(temporalAccessor); return time.getEpochSecond() + String.format("%09d", time.getNano()); }
3.68
hbase_HFileBlock_getOnDiskSizeWithHeader
/** * Returns the on-disk size of the block. Can only be called in the "block ready" state. * @return the on-disk size of the block ready to be written, including the header size, the * data and the checksum data. */ int getOnDiskSizeWithHeader() { expectState(State.BLOCK_READY); return onDiskBlockBytesWithHeader.size() + onDiskChecksum.length; }
3.68
hadoop_OBSPosixBucketUtils_fsRenameToNewObject
/** * Used to rename a source object to a destination object which is not existed * before rename. * * @param owner OBS File System instance * @param srcKey source object key * @param dstKey destination object key * @throws IOException io exception */ static void fsRenameToNewObject(final OBSFileSystem owner, final String srcKey, final String dstKey) throws IOException { String newSrcKey = srcKey; String newdstKey = dstKey; newSrcKey = OBSCommonUtils.maybeDeleteBeginningSlash(newSrcKey); newdstKey = OBSCommonUtils.maybeDeleteBeginningSlash(newdstKey); if (newSrcKey.endsWith("/")) { // Rename folder. fsRenameToNewFolder(owner, newSrcKey, newdstKey); } else { // Rename file. innerFsRenameFile(owner, newSrcKey, newdstKey); } }
3.68
framework_Alignment_getVerticalAlignment
/** * Returns string representation of vertical alignment. * * @return vertical alignment as CSS value */ public String getVerticalAlignment() { if (isBottom()) { return "bottom"; } else if (isMiddle()) { return "middle"; } return "top"; }
3.68
framework_GridRefreshWithGetId_hashCode
/** * The class intentionally has strange {@code hashCode()} and * {@code equals()} implementation to ensure if {@code Grid} relies on * bean id rather than on bean hashcode/equals identification. * * {@see Object.hashCode} */ @Override public int hashCode() { int result = id; result = 31 * result + (name != null ? name.hashCode() : 0); return result; }
3.68
framework_AbstractMultiSelect_setItemEnabledProvider
/** * Sets the item enabled predicate for this multiselect. The predicate is * applied to each item to determine whether the item should be enabled ( * {@code true}) or disabled ({@code false}). Disabled items are displayed * as grayed out and the user cannot select them. The default predicate * always returns {@code true} (all the items are enabled). * <p> * <em>Implementation note:</em> Override this method and * {@link #getItemEnabledProvider()} as {@code public} and invoke * {@code super} methods to support this feature in the multiselect * component. * * @param itemEnabledProvider * the item enabled provider to set, not {@code null} */ protected void setItemEnabledProvider( SerializablePredicate<T> itemEnabledProvider) { Objects.requireNonNull(itemEnabledProvider); this.itemEnabledProvider = itemEnabledProvider; }
3.68
flink_KubernetesUtils_getCommonLabels
/** * Get the common labels for Flink native clusters. All the Kubernetes resources will be set * with these labels. * * @param clusterId cluster id * @return Return common labels map */ public static Map<String, String> getCommonLabels(String clusterId) { final Map<String, String> commonLabels = new HashMap<>(); commonLabels.put(Constants.LABEL_TYPE_KEY, Constants.LABEL_TYPE_NATIVE_TYPE); commonLabels.put(Constants.LABEL_APP_KEY, clusterId); return commonLabels; }
3.68
hbase_TableInfoModel_getName
/** Returns the table name */ @XmlAttribute public String getName() { return name; }
3.68
flink_TableSource_getReturnType
/** * @deprecated This method will be removed in future versions as it uses the old type system. It * is recommended to use {@link #getProducedDataType()} instead which uses the new type * system based on {@link DataTypes}. Please make sure to use either the old or the new type * system consistently to avoid unintended behavior. See the website documentation for more * information. */ @Deprecated default TypeInformation<T> getReturnType() { return null; }
3.68
hbase_ZKUtil_convert
/** * Convert a {@link DeserializationException} to a more palatable {@link KeeperException}. Used * when can't let a {@link DeserializationException} out w/o changing public API. * @param e Exception to convert * @return Converted exception */ public static KeeperException convert(final DeserializationException e) { KeeperException ke = new KeeperException.DataInconsistencyException(); ke.initCause(e); return ke; }
3.68
flink_WindowSavepointReader_reduce
/** * Reads window state generated using a {@link ReduceFunction}. * * @param uid The uid of the operator. * @param function The reduce function used to create the window. * @param readerFunction The window reader function. * @param keyType The key type of the window. * @param reduceType The type information of the reduce function. * @param outputType The output type of the reader function. * @param <K> The type of the key. * @param <T> The type of the reduce function. * @param <OUT> The output type of the reduce function. * @return A {@code DataStream} of objects read from keyed state. * @throws IOException If savepoint does not contain the specified uid. */ public <K, T, OUT> DataStream<OUT> reduce( String uid, ReduceFunction<T> function, WindowReaderFunction<T, OUT, K, W> readerFunction, TypeInformation<K> keyType, TypeInformation<T> reduceType, TypeInformation<OUT> outputType) throws IOException { WindowReaderOperator<?, K, T, W, OUT> operator = WindowReaderOperator.reduce( function, readerFunction, keyType, windowSerializer, reduceType); return readWindowOperator(uid, outputType, operator); }
3.68
morf_NamedParameterPreparedStatement_executeUpdate
/** * @see PreparedStatement#executeUpdate() * @return either (1) the row count for SQL Data Manipulation Language (DML) statements * or (2) 0 for SQL statements that return nothing * @exception SQLException if a database access error occurs; * this method is called on a closed <code>PreparedStatement</code> * or the SQL statement returns a <code>ResultSet</code> object * @throws SQLTimeoutException when the driver has determined that the * timeout value that was specified by the {@code setQueryTimeout} * method has been exceeded and has at least attempted to cancel * the currently running {@code Statement} */ public int executeUpdate() throws SQLException { return statement.executeUpdate(); }
3.68
hadoop_AbfsOutputStreamStatisticsImpl_timeSpentTaskWait
/** * {@inheritDoc} * * Records the total time spent waiting for a task to complete. * * When the thread executor has a task queue * {@link java.util.concurrent.BlockingQueue} of size greater than or * equal to 2 times the maxConcurrentRequestCounts then, it waits for a * task in that queue to finish, then do the next task in the queue. * * This time spent while waiting for the task to be completed is being * recorded in this counter. * */ @Override public DurationTracker timeSpentTaskWait() { return ioStatisticsStore.trackDuration(StreamStatisticNames.TIME_SPENT_ON_TASK_WAIT); }
3.68
AreaShop_ImportJob_importRegionSettings
/** * Import region specific settings from a RegionForSale source to an AreaShop target ConfigurationSection. * @param from RegionForSale config section that specifies region settings * @param to AreaShop config section that specifies region settings * @param region GeneralRegion to copy settings to, or null if doing generic settings * @param permanent Region cannot be rented or bought, disables some features */ private void importRegionSettings(ConfigurationSection from, ConfigurationSection to, GeneralRegion region, boolean permanent) { // Maximum rental time, TODO check if this is actually the same if(from.isLong("permissions.max-rent-time")) { to.set("rent.maxRentTime", minutesToString(from.getLong("permissions.max-rent-time"))); } // Region rebuild if(from.getBoolean("region-rebuilding.auto-rebuild")) { to.set("general.enableRestore", true); } // Get price settings String unit = from.getString("economic-settings.unit-type"); String rentPrice = from.getString("economic-settings.cost-per-unit.rent"); String buyPrice = from.getString("economic-settings.cost-per-unit.buy"); String sellPrice = from.getString("economic-settings.cost-per-unit.selling-price"); // TODO: There is no easy way to import this, setup eventCommandsProfile? // String taxes = from.getString("economic-settings.cost-per-unit.taxes"); // Determine unit and add that to the price String unitSuffix = ""; if("region".equalsIgnoreCase(unit)) { // add nothing } else if("m3".equalsIgnoreCase(unit)) { unitSuffix = "*%volume%"; } else { // m2 or nothing (in case not set, we should actually look in parent files to correctly set this...) unitSuffix = "*(%volume%/%height%)"; // This is better than width*depth because of polygon regions } // Apply settings if(rentPrice != null) { to.set("rent.price", rentPrice + unitSuffix); } if(buyPrice != null) { to.set("buy.price", buyPrice + unitSuffix); if(sellPrice != null) { try { double buyPriceAmount = Double.parseDouble(buyPrice); double sellPriceAmount = Double.parseDouble(sellPrice); to.set("buy.moneyBack", sellPriceAmount / buyPriceAmount * 100); } catch(NumberFormatException e) { // There is not always a region here for the message, should probably indicate something though message("import-moneyBackFailed", buyPrice, sellPrice); } } } // Apply permanent region settings if(permanent) { to.set("buy.resellDisabled", true); to.set("buy.sellDisabled", true); to.set("general.countForLimits", false); } // Set rented until if(from.isLong("info.last-withdrawal") && region instanceof RentRegion) { RentRegion rentRegion = (RentRegion)region; long lastWithdrawal = from.getLong("info.last-withdrawal"); // Because the rental duration is already imported into the region and its parents this should be correct rentRegion.setRentedUntil(lastWithdrawal + rentRegion.getDuration()); } // Import signs (list of strings like "297, 71, -22") if(from.isList("info.signs") && region != null) { for(String signLocation : from.getStringList("info.signs")) { String[] locationParts = signLocation.split(", "); if(locationParts.length != 3) { message("import-invalidSignLocation", region.getName(), signLocation); continue; } // Parse the location Location location; try { location = new Location(region.getWorld(), Double.parseDouble(locationParts[0]), Double.parseDouble(locationParts[1]), Double.parseDouble(locationParts[2])); } catch(NumberFormatException e) { message("import-invalidSignLocation", region.getName(), signLocation); continue; } // Check if this location is already added to a region RegionSign regionSign = SignsFeature.getSignByLocation(location); if(regionSign != null) { if(!regionSign.getRegion().equals(region)) { message("import-signAlreadyAdded", region.getName(), signLocation, regionSign.getRegion().getName()); } continue; } // SignType and Facing will be written when the sign is updated later region.getSignsFeature().addSign(location, null, null, null); } } }
3.68
MagicPlugin_SpellResult_isFree
/** * Determine if this result is a free cast or not. * * @return True if this cast should not consume costs. */ public boolean isFree() { return free; }
3.68
hbase_HttpServer_addJerseyResourcePackage
/** * Add a Jersey resource package. * @param packageName The Java package name containing the Jersey resource. * @param pathSpec The path spec for the servlet */ public void addJerseyResourcePackage(final String packageName, final String pathSpec) { LOG.info("addJerseyResourcePackage: packageName=" + packageName + ", pathSpec=" + pathSpec); ResourceConfig application = new ResourceConfig().packages(packageName); final ServletHolder sh = new ServletHolder(new ServletContainer(application)); webAppContext.addServlet(sh, pathSpec); }
3.68
morf_DataValueLookup_defaultEquals
/** * Default equals implementation for instances. * * @param obj1 this * @param obj2 the other * @return true if equivalent. */ public static boolean defaultEquals(DataValueLookup obj1, Object obj2) { if (obj1 == obj2) return true; if (obj2 == null) return false; if (!(obj2 instanceof DataValueLookup)) return false; DataValueLookup other = (DataValueLookup) obj2; return Iterables.elementsEqual(obj1.getValues(), other.getValues()); }
3.68
hadoop_HistoryServerStateStoreServiceFactory_getStore
/** * Constructs an instance of the configured storage class * * @param conf the configuration * @return the state storage instance */ public static HistoryServerStateStoreService getStore(Configuration conf) { Class<? extends HistoryServerStateStoreService> storeClass = HistoryServerNullStateStoreService.class; boolean recoveryEnabled = conf.getBoolean( JHAdminConfig.MR_HS_RECOVERY_ENABLE, JHAdminConfig.DEFAULT_MR_HS_RECOVERY_ENABLE); if (recoveryEnabled) { storeClass = conf.getClass(JHAdminConfig.MR_HS_STATE_STORE, null, HistoryServerStateStoreService.class); if (storeClass == null) { throw new RuntimeException("Unable to locate storage class, check " + JHAdminConfig.MR_HS_STATE_STORE); } } return ReflectionUtils.newInstance(storeClass, conf); }
3.68
flink_DataTypeTemplate_isAllowAnyPattern
/** Returns whether the given class is eligible for being treated as RAW type. */ boolean isAllowAnyPattern(@Nullable Class<?> clazz) { if (allowRawPattern == null || clazz == null) { return false; } final String className = clazz.getName(); for (String pattern : allowRawPattern) { if (className.startsWith(pattern)) { return true; } } return false; }
3.68
hbase_ConnectionUtils_validatePut
// validate for well-formedness static void validatePut(Put put, int maxKeyValueSize) { if (put.isEmpty()) { throw new IllegalArgumentException("No columns to insert"); } if (maxKeyValueSize > 0) { for (List<Cell> list : put.getFamilyCellMap().values()) { for (Cell cell : list) { if (cell.getSerializedSize() > maxKeyValueSize) { throw new IllegalArgumentException("KeyValue size too large"); } } } } }
3.68
morf_AbstractSqlDialectTest_testMathsMinus
/** * Test that adding numbers returns as expected. */ @Test public void testMathsMinus() { String result = testDialect.getSqlFrom(new MathsField(new FieldLiteral(1), MathsOperator.MINUS, new FieldLiteral(1))); assertEquals(expectedMathsMinus(), result); }
3.68
hadoop_JavaCommandLineBuilder_addConfOptions
/** * Add a varargs list of configuration parameters —if they are present * @param conf configuration source * @param keys keys */ public void addConfOptions(Configuration conf, String... keys) { for (String key : keys) { addConfOption(conf, key); } }
3.68
hadoop_AbfsOutputStreamStatisticsImpl_uploadFailed
/** * Records the total bytes failed to upload through AbfsOutputStream. * * @param bytes number of bytes failed to upload. Negative bytes are ignored. */ @Override public void uploadFailed(long bytes) { ioStatisticsStore.incrementCounter(StreamStatisticNames.BYTES_UPLOAD_FAILED, bytes); }
3.68
graphhopper_MiniPerfTest_getMean
/** * @return mean time per call, in ms */ public double getMean() { return getSum() / counts; }
3.68
flink_Watermark_getTimestamp
/** Returns the timestamp associated with this {@link Watermark} in milliseconds. */ public long getTimestamp() { return timestamp; }
3.68
hbase_GroupingTableMapper_createGroupKey
/** * Create a key by concatenating multiple column values. * <p> * Override this function in order to produce different types of keys. * @param vals The current key/values. * @return A key generated by concatenating multiple column values. */ protected ImmutableBytesWritable createGroupKey(byte[][] vals) { if (vals == null) { return null; } StringBuilder sb = new StringBuilder(); for (int i = 0; i < vals.length; i++) { if (i > 0) { sb.append(" "); } sb.append(Bytes.toString(vals[i])); } return new ImmutableBytesWritable(Bytes.toBytesBinary(sb.toString())); }
3.68
hbase_HRegion_hasSeenNoSuchFamily
/** Returns If a {@link NoSuchColumnFamilyException} has been observed. */ boolean hasSeenNoSuchFamily() { return wrongFamily; }
3.68