code
stringlengths
67
466k
docstring
stringlengths
1
13.2k
public boolean isSet(_Fields field) { if (field == null) { throw new IllegalArgumentException(); } switch (field) { case VALUES: return isSetValues(); case NULLS: return isSetNulls(); } throw new IllegalStateException(); }
Returns true if field corresponding to fieldID is set (has been assigned a value) and false otherwise
public static void checkVersion(DB db, StoreVersion newversion, ObjectMapper mapper) throws IOException { byte[] bytes = db.get(StoreVersion.KEY); if (bytes == null) { storeVersion(db, newversion, mapper); } else { StoreVersion version = mapper.readValue(bytes, StoreVersion.class); if (version.major != newversion.major) { throw new IOException("cannot read state DB with version " + version + ", incompatible " + "with current version " + newversion); } storeVersion(db, newversion, mapper); } }
Simple major.minor versioning scheme. Any incompatible changes should be across major versions. Minor version differences are allowed -- meaning we should be able to read dbs that are either earlier *or* later on the minor version.
public boolean isSet(_Fields field) { if (field == null) { throw new IllegalArgumentException(); } switch (field) { case COL_VALS: return isSetColVals(); } throw new IllegalStateException(); }
Returns true if field corresponding to fieldID is set (has been assigned a value) and false otherwise
@Override public Optional<Throwable> getError() { return redirector != null ? Optional.ofNullable(redirector.getError()) : Optional.empty(); }
Parses the logs of {@code spark-submit} and returns the last exception thrown. <p> Since {@link SparkLauncher} runs {@code spark-submit} in a sub-process, it's difficult to accurately retrieve the full {@link Throwable} from the {@code spark-submit} process. This method parses the logs of the sub-process and provides a best-effort attempt at returning the last exception thrown by the {@code spark-submit} process. Only the exception message is parsed, the associated stacktrace is meaningless. @return an {@link Optional} containing a {@link RuntimeException} with the parsed exception, otherwise returns a {@link Optional#EMPTY}
void monitorChild() { Process proc = childProc; if (proc == null) { // Process may have already been disposed of, e.g. by calling kill(). return; } while (proc.isAlive()) { try { proc.waitFor(); } catch (Exception e) { LOG.log(Level.WARNING, "Exception waiting for child process to exit.", e); } } synchronized (this) { if (isDisposed()) { return; } int ec; try { ec = proc.exitValue(); } catch (Exception e) { LOG.log(Level.WARNING, "Exception getting child process exit code, assuming failure.", e); ec = 1; } if (ec != 0) { State currState = getState(); // Override state with failure if the current state is not final, or is success. if (!currState.isFinal() || currState == State.FINISHED) { setState(State.FAILED, true); } } dispose(); } }
Wait for the child process to exit and update the handle's state if necessary, according to the exit code.
public boolean isSet(_Fields field) { if (field == null) { throw new IllegalArgumentException(); } switch (field) { case QUALIFIERS: return isSetQualifiers(); } throw new IllegalStateException(); }
Returns true if field corresponding to fieldID is set (has been assigned a value) and false otherwise
public boolean isSet(_Fields field) { if (field == null) { throw new IllegalArgumentException(); } switch (field) { case SESSION_ID: return isSetSessionId(); } throw new IllegalStateException(); }
Returns true if field corresponding to fieldID is set (has been assigned a value) and false otherwise
public SessionHandle openSession(TProtocolVersion protocol, String username, String password, String ipAddress, Map<String, String> sessionConf, boolean withImpersonation, String delegationToken) throws HiveSQLException { HiveSession session; // If doAs is set to true for HiveServer2, we will create a proxy object for the session impl. // Within the proxy object, we wrap the method call in a UserGroupInformation#doAs if (withImpersonation) { HiveSessionImplwithUGI sessionWithUGI = new HiveSessionImplwithUGI(protocol, username, password, hiveConf, ipAddress, delegationToken); session = HiveSessionProxy.getProxy(sessionWithUGI, sessionWithUGI.getSessionUgi()); sessionWithUGI.setProxySession(session); } else { session = new HiveSessionImpl(protocol, username, password, hiveConf, ipAddress); } session.setSessionManager(this); session.setOperationManager(operationManager); try { session.open(sessionConf); } catch (Exception e) { try { session.close(); } catch (Throwable t) { LOG.warn("Error closing session", t); } session = null; throw new HiveSQLException("Failed to open new session: " + e, e); } if (isOperationLogEnabled) { session.setOperationLogSessionDir(operationLogRootDir); } handleToSession.put(session.getSessionHandle(), session); return session.getSessionHandle(); }
Opens a new session and creates a session handle. The username passed to this method is the effective username. If withImpersonation is true (==doAs true) we wrap all the calls in HiveSession within a UGI.doAs, where UGI corresponds to the effective user. Please see {@code org.apache.hive.service.cli.thrift.ThriftCLIService.getUserName()} for more details. @param protocol @param username @param password @param ipAddress @param sessionConf @param withImpersonation @param delegationToken @return @throws HiveSQLException
public T setPropertiesFile(String path) { checkNotNull(path, "path"); builder.setPropertiesFile(path); return self(); }
Set a custom properties file with Spark configuration for the application. @param path Path to custom properties file to use. @return This launcher.
public T setConf(String key, String value) { checkNotNull(key, "key"); checkNotNull(value, "value"); checkArgument(key.startsWith("spark."), "'key' must start with 'spark.'"); builder.conf.put(key, value); return self(); }
Set a single configuration value for the application. @param key Configuration key. @param value The value to use. @return This launcher.
public T setAppName(String appName) { checkNotNull(appName, "appName"); builder.appName = appName; return self(); }
Set the application name. @param appName Application name. @return This launcher.
public T setMaster(String master) { checkNotNull(master, "master"); builder.master = master; return self(); }
Set the Spark master for the application. @param master Spark master. @return This launcher.
public T setDeployMode(String mode) { checkNotNull(mode, "mode"); builder.deployMode = mode; return self(); }
Set the deploy mode for the application. @param mode Deploy mode. @return This launcher.
public T setAppResource(String resource) { checkNotNull(resource, "resource"); builder.appResource = resource; return self(); }
Set the main application resource. This should be the location of a jar file for Scala/Java applications, or a python script for PySpark applications. @param resource Path to the main application resource. @return This launcher.
public T setMainClass(String mainClass) { checkNotNull(mainClass, "mainClass"); builder.mainClass = mainClass; return self(); }
Sets the application class name for Java/Scala applications. @param mainClass Application's main class. @return This launcher.
public T addSparkArg(String arg) { SparkSubmitOptionParser validator = new ArgumentValidator(false); validator.parse(Arrays.asList(arg)); builder.userArgs.add(arg); return self(); }
Adds a no-value argument to the Spark invocation. If the argument is known, this method validates whether the argument is indeed a no-value argument, and throws an exception otherwise. <p> Use this method with caution. It is possible to create an invalid Spark command by passing unknown arguments to this method, since those are allowed for forward compatibility. @since 1.5.0 @param arg Argument to add. @return This launcher.
public T addSparkArg(String name, String value) { SparkSubmitOptionParser validator = new ArgumentValidator(true); if (validator.MASTER.equals(name)) { setMaster(value); } else if (validator.PROPERTIES_FILE.equals(name)) { setPropertiesFile(value); } else if (validator.CONF.equals(name)) { String[] vals = value.split("=", 2); setConf(vals[0], vals[1]); } else if (validator.CLASS.equals(name)) { setMainClass(value); } else if (validator.JARS.equals(name)) { builder.jars.clear(); for (String jar : value.split(",")) { addJar(jar); } } else if (validator.FILES.equals(name)) { builder.files.clear(); for (String file : value.split(",")) { addFile(file); } } else if (validator.PY_FILES.equals(name)) { builder.pyFiles.clear(); for (String file : value.split(",")) { addPyFile(file); } } else { validator.parse(Arrays.asList(name, value)); builder.userArgs.add(name); builder.userArgs.add(value); } return self(); }
Adds an argument with a value to the Spark invocation. If the argument name corresponds to a known argument, the code validates that the argument actually expects a value, and throws an exception otherwise. <p> It is safe to add arguments modified by other methods in this class (such as {@link #setMaster(String)} - the last invocation will be the one to take effect. <p> Use this method with caution. It is possible to create an invalid Spark command by passing unknown arguments to this method, since those are allowed for forward compatibility. @since 1.5.0 @param name Name of argument to add. @param value Value of the argument. @return This launcher.
public T addAppArgs(String... args) { for (String arg : args) { checkNotNull(arg, "arg"); builder.appArgs.add(arg); } return self(); }
Adds command line arguments for the application. @param args Arguments to pass to the application's main class. @return This launcher.
public T addJar(String jar) { checkNotNull(jar, "jar"); builder.jars.add(jar); return self(); }
Adds a jar file to be submitted with the application. @param jar Path to the jar file. @return This launcher.
public T addFile(String file) { checkNotNull(file, "file"); builder.files.add(file); return self(); }
Adds a file to be submitted with the application. @param file Path to the file. @return This launcher.
public T addPyFile(String file) { checkNotNull(file, "file"); builder.pyFiles.add(file); return self(); }
Adds a python file / zip / egg to be submitted with the application. @param file Path to the file. @return This launcher.
public void zeroOut() { for (long off = baseOffset; off < baseOffset + length * WIDTH; off += WIDTH) { Platform.putLong(baseObj, off, 0); } }
Fill this all with 0L.
public void set(int index, long value) { assert index >= 0 : "index (" + index + ") should >= 0"; assert index < length : "index (" + index + ") should < length (" + length + ")"; Platform.putLong(baseObj, baseOffset + index * WIDTH, value); }
Sets the value at position {@code index}.
public long get(int index) { assert index >= 0 : "index (" + index + ") should >= 0"; assert index < length : "index (" + index + ") should < length (" + length + ")"; return Platform.getLong(baseObj, baseOffset + index * WIDTH); }
Returns the value at position {@code index}.
public static CatalogPlugin load(String name, SQLConf conf) throws CatalogNotFoundException, SparkException { String pluginClassName = conf.getConfString("spark.sql.catalog." + name, null); if (pluginClassName == null) { throw new CatalogNotFoundException(String.format( "Catalog '%s' plugin class not found: spark.sql.catalog.%s is not defined", name, name)); } ClassLoader loader = Utils.getContextOrSparkClassLoader(); try { Class<?> pluginClass = loader.loadClass(pluginClassName); if (!CatalogPlugin.class.isAssignableFrom(pluginClass)) { throw new SparkException(String.format( "Plugin class for catalog '%s' does not implement CatalogPlugin: %s", name, pluginClassName)); } CatalogPlugin plugin = CatalogPlugin.class.cast(pluginClass.newInstance()); plugin.initialize(name, catalogOptions(name, conf)); return plugin; } catch (ClassNotFoundException e) { throw new SparkException(String.format( "Cannot find catalog plugin class for catalog '%s': %s", name, pluginClassName)); } catch (IllegalAccessException e) { throw new SparkException(String.format( "Failed to call public no-arg constructor for catalog '%s': %s", name, pluginClassName), e); } catch (InstantiationException e) { throw new SparkException(String.format( "Failed while instantiating plugin for catalog '%s': %s", name, pluginClassName), e.getCause()); } }
Load and configure a catalog by name. <p> This loads, instantiates, and initializes the catalog plugin for each call; it does not cache or reuse instances. @param name a String catalog name @param conf a SQLConf @return an initialized CatalogPlugin @throws CatalogNotFoundException if the plugin class cannot be found @throws SparkException if the plugin class cannot be instantiated
private static CaseInsensitiveStringMap catalogOptions(String name, SQLConf conf) { Map<String, String> allConfs = mapAsJavaMapConverter(conf.getAllConfs()).asJava(); Pattern prefix = Pattern.compile("^spark\\.sql\\.catalog\\." + name + "\\.(.+)"); HashMap<String, String> options = new HashMap<>(); for (Map.Entry<String, String> entry : allConfs.entrySet()) { Matcher matcher = prefix.matcher(entry.getKey()); if (matcher.matches() && matcher.groupCount() > 0) { options.put(matcher.group(1), entry.getValue()); } } return new CaseInsensitiveStringMap(options); }
Extracts a named catalog's configuration from a SQLConf. @param name a catalog name @param conf a SQLConf @return a case insensitive string map of options starting with spark.sql.catalog.(name).
public boolean isSet(_Fields field) { if (field == null) { throw new IllegalArgumentException(); } switch (field) { case OPERATION_ID: return isSetOperationId(); case OPERATION_TYPE: return isSetOperationType(); case HAS_RESULT_SET: return isSetHasResultSet(); case MODIFIED_ROW_COUNT: return isSetModifiedRowCount(); } throw new IllegalStateException(); }
Returns true if field corresponding to fieldID is set (has been assigned a value) and false otherwise
@Override public SessionHandle openSession(String username, String password, Map<String, String> configuration) throws HiveSQLException { try { TOpenSessionReq req = new TOpenSessionReq(); req.setUsername(username); req.setPassword(password); req.setConfiguration(configuration); TOpenSessionResp resp = cliService.OpenSession(req); checkStatus(resp.getStatus()); return new SessionHandle(resp.getSessionHandle(), resp.getServerProtocolVersion()); } catch (HiveSQLException e) { throw e; } catch (Exception e) { throw new HiveSQLException(e); } }
/* (non-Javadoc) @see org.apache.hive.service.cli.ICLIService#openSession(java.lang.String, java.lang.String, java.util.Map)
@Override public void closeSession(SessionHandle sessionHandle) throws HiveSQLException { try { TCloseSessionReq req = new TCloseSessionReq(sessionHandle.toTSessionHandle()); TCloseSessionResp resp = cliService.CloseSession(req); checkStatus(resp.getStatus()); } catch (HiveSQLException e) { throw e; } catch (Exception e) { throw new HiveSQLException(e); } }
/* (non-Javadoc) @see org.apache.hive.service.cli.ICLIService#closeSession(org.apache.hive.service.cli.SessionHandle)
@Override public GetInfoValue getInfo(SessionHandle sessionHandle, GetInfoType infoType) throws HiveSQLException { try { // FIXME extract the right info type TGetInfoReq req = new TGetInfoReq(sessionHandle.toTSessionHandle(), infoType.toTGetInfoType()); TGetInfoResp resp = cliService.GetInfo(req); checkStatus(resp.getStatus()); return new GetInfoValue(resp.getInfoValue()); } catch (HiveSQLException e) { throw e; } catch (Exception e) { throw new HiveSQLException(e); } }
/* (non-Javadoc) @see org.apache.hive.service.cli.ICLIService#getInfo(org.apache.hive.service.cli.SessionHandle, java.util.List)
@Override public OperationHandle executeStatement(SessionHandle sessionHandle, String statement, Map<String, String> confOverlay) throws HiveSQLException { return executeStatementInternal(sessionHandle, statement, confOverlay, false); }
/* (non-Javadoc) @see org.apache.hive.service.cli.ICLIService#executeStatement(org.apache.hive.service.cli.SessionHandle, java.lang.String, java.util.Map)
@Override public OperationHandle executeStatementAsync(SessionHandle sessionHandle, String statement, Map<String, String> confOverlay) throws HiveSQLException { return executeStatementInternal(sessionHandle, statement, confOverlay, true); }
/* (non-Javadoc) @see org.apache.hive.service.cli.ICLIService#executeStatementAsync(org.apache.hive.service.cli.SessionHandle, java.lang.String, java.util.Map)
@Override public OperationHandle getTypeInfo(SessionHandle sessionHandle) throws HiveSQLException { try { TGetTypeInfoReq req = new TGetTypeInfoReq(sessionHandle.toTSessionHandle()); TGetTypeInfoResp resp = cliService.GetTypeInfo(req); checkStatus(resp.getStatus()); TProtocolVersion protocol = sessionHandle.getProtocolVersion(); return new OperationHandle(resp.getOperationHandle(), protocol); } catch (HiveSQLException e) { throw e; } catch (Exception e) { throw new HiveSQLException(e); } }
/* (non-Javadoc) @see org.apache.hive.service.cli.ICLIService#getTypeInfo(org.apache.hive.service.cli.SessionHandle)
@Override public OperationHandle getCatalogs(SessionHandle sessionHandle) throws HiveSQLException { try { TGetCatalogsReq req = new TGetCatalogsReq(sessionHandle.toTSessionHandle()); TGetCatalogsResp resp = cliService.GetCatalogs(req); checkStatus(resp.getStatus()); TProtocolVersion protocol = sessionHandle.getProtocolVersion(); return new OperationHandle(resp.getOperationHandle(), protocol); } catch (HiveSQLException e) { throw e; } catch (Exception e) { throw new HiveSQLException(e); } }
/* (non-Javadoc) @see org.apache.hive.service.cli.ICLIService#getCatalogs(org.apache.hive.service.cli.SessionHandle)
@Override public OperationHandle getSchemas(SessionHandle sessionHandle, String catalogName, String schemaName) throws HiveSQLException { try { TGetSchemasReq req = new TGetSchemasReq(sessionHandle.toTSessionHandle()); req.setCatalogName(catalogName); req.setSchemaName(schemaName); TGetSchemasResp resp = cliService.GetSchemas(req); checkStatus(resp.getStatus()); TProtocolVersion protocol = sessionHandle.getProtocolVersion(); return new OperationHandle(resp.getOperationHandle(), protocol); } catch (HiveSQLException e) { throw e; } catch (Exception e) { throw new HiveSQLException(e); } }
/* (non-Javadoc) @see org.apache.hive.service.cli.ICLIService#getSchemas(org.apache.hive.service.cli.SessionHandle, java.lang.String, java.lang.String)
@Override public OperationHandle getTables(SessionHandle sessionHandle, String catalogName, String schemaName, String tableName, List<String> tableTypes) throws HiveSQLException { try { TGetTablesReq req = new TGetTablesReq(sessionHandle.toTSessionHandle()); req.setTableName(tableName); req.setTableTypes(tableTypes); req.setSchemaName(schemaName); TGetTablesResp resp = cliService.GetTables(req); checkStatus(resp.getStatus()); TProtocolVersion protocol = sessionHandle.getProtocolVersion(); return new OperationHandle(resp.getOperationHandle(), protocol); } catch (HiveSQLException e) { throw e; } catch (Exception e) { throw new HiveSQLException(e); } }
/* (non-Javadoc) @see org.apache.hive.service.cli.ICLIService#getTables(org.apache.hive.service.cli.SessionHandle, java.lang.String, java.lang.String, java.lang.String, java.util.List)
@Override public OperationHandle getTableTypes(SessionHandle sessionHandle) throws HiveSQLException { try { TGetTableTypesReq req = new TGetTableTypesReq(sessionHandle.toTSessionHandle()); TGetTableTypesResp resp = cliService.GetTableTypes(req); checkStatus(resp.getStatus()); TProtocolVersion protocol = sessionHandle.getProtocolVersion(); return new OperationHandle(resp.getOperationHandle(), protocol); } catch (HiveSQLException e) { throw e; } catch (Exception e) { throw new HiveSQLException(e); } }
/* (non-Javadoc) @see org.apache.hive.service.cli.ICLIService#getTableTypes(org.apache.hive.service.cli.SessionHandle)
@Override public OperationHandle getColumns(SessionHandle sessionHandle, String catalogName, String schemaName, String tableName, String columnName) throws HiveSQLException { try { TGetColumnsReq req = new TGetColumnsReq(); req.setSessionHandle(sessionHandle.toTSessionHandle()); req.setCatalogName(catalogName); req.setSchemaName(schemaName); req.setTableName(tableName); req.setColumnName(columnName); TGetColumnsResp resp = cliService.GetColumns(req); checkStatus(resp.getStatus()); TProtocolVersion protocol = sessionHandle.getProtocolVersion(); return new OperationHandle(resp.getOperationHandle(), protocol); } catch (HiveSQLException e) { throw e; } catch (Exception e) { throw new HiveSQLException(e); } }
/* (non-Javadoc) @see org.apache.hive.service.cli.ICLIService#getColumns(org.apache.hive.service.cli.SessionHandle)
@Override public OperationHandle getFunctions(SessionHandle sessionHandle, String catalogName, String schemaName, String functionName) throws HiveSQLException { try { TGetFunctionsReq req = new TGetFunctionsReq(sessionHandle.toTSessionHandle(), functionName); req.setCatalogName(catalogName); req.setSchemaName(schemaName); TGetFunctionsResp resp = cliService.GetFunctions(req); checkStatus(resp.getStatus()); TProtocolVersion protocol = sessionHandle.getProtocolVersion(); return new OperationHandle(resp.getOperationHandle(), protocol); } catch (HiveSQLException e) { throw e; } catch (Exception e) { throw new HiveSQLException(e); } }
/* (non-Javadoc) @see org.apache.hive.service.cli.ICLIService#getFunctions(org.apache.hive.service.cli.SessionHandle)
@Override public OperationStatus getOperationStatus(OperationHandle opHandle) throws HiveSQLException { try { TGetOperationStatusReq req = new TGetOperationStatusReq(opHandle.toTOperationHandle()); TGetOperationStatusResp resp = cliService.GetOperationStatus(req); // Checks the status of the RPC call, throws an exception in case of error checkStatus(resp.getStatus()); OperationState opState = OperationState.getOperationState(resp.getOperationState()); HiveSQLException opException = null; if (opState == OperationState.ERROR) { opException = new HiveSQLException(resp.getErrorMessage(), resp.getSqlState(), resp.getErrorCode()); } return new OperationStatus(opState, opException); } catch (HiveSQLException e) { throw e; } catch (Exception e) { throw new HiveSQLException(e); } }
/* (non-Javadoc) @see org.apache.hive.service.cli.ICLIService#getOperationStatus(org.apache.hive.service.cli.OperationHandle)
@Override public void cancelOperation(OperationHandle opHandle) throws HiveSQLException { try { TCancelOperationReq req = new TCancelOperationReq(opHandle.toTOperationHandle()); TCancelOperationResp resp = cliService.CancelOperation(req); checkStatus(resp.getStatus()); } catch (HiveSQLException e) { throw e; } catch (Exception e) { throw new HiveSQLException(e); } }
/* (non-Javadoc) @see org.apache.hive.service.cli.ICLIService#cancelOperation(org.apache.hive.service.cli.OperationHandle)
@Override public void closeOperation(OperationHandle opHandle) throws HiveSQLException { try { TCloseOperationReq req = new TCloseOperationReq(opHandle.toTOperationHandle()); TCloseOperationResp resp = cliService.CloseOperation(req); checkStatus(resp.getStatus()); } catch (HiveSQLException e) { throw e; } catch (Exception e) { throw new HiveSQLException(e); } }
/* (non-Javadoc) @see org.apache.hive.service.cli.ICLIService#closeOperation(org.apache.hive.service.cli.OperationHandle)
@Override public TableSchema getResultSetMetadata(OperationHandle opHandle) throws HiveSQLException { try { TGetResultSetMetadataReq req = new TGetResultSetMetadataReq(opHandle.toTOperationHandle()); TGetResultSetMetadataResp resp = cliService.GetResultSetMetadata(req); checkStatus(resp.getStatus()); return new TableSchema(resp.getSchema()); } catch (HiveSQLException e) { throw e; } catch (Exception e) { throw new HiveSQLException(e); } }
/* (non-Javadoc) @see org.apache.hive.service.cli.ICLIService#getResultSetMetadata(org.apache.hive.service.cli.OperationHandle)
public boolean isSet(_Fields field) { if (field == null) { throw new IllegalArgumentException(); } switch (field) { case CLIENT_PROTOCOL: return isSetClient_protocol(); case USERNAME: return isSetUsername(); case PASSWORD: return isSetPassword(); case CONFIGURATION: return isSetConfiguration(); } throw new IllegalStateException(); }
Returns true if field corresponding to fieldID is set (has been assigned a value) and false otherwise
synchronized String registerHandle(AbstractAppHandle handle) { String secret = createSecret(); secretToPendingApps.put(secret, handle); return secret; }
Registers a handle with the server, and returns the secret the child app needs to connect back.
void unregister(AbstractAppHandle handle) { for (Map.Entry<String, AbstractAppHandle> e : secretToPendingApps.entrySet()) { if (e.getValue().equals(handle)) { String secret = e.getKey(); secretToPendingApps.remove(secret); break; } } unref(); }
Removes the client handle from the pending list (in case it's still there), and unrefs the server.
public static int sort( LongArray array, long numRecords, int startByteIndex, int endByteIndex, boolean desc, boolean signed) { assert startByteIndex >= 0 : "startByteIndex (" + startByteIndex + ") should >= 0"; assert endByteIndex <= 7 : "endByteIndex (" + endByteIndex + ") should <= 7"; assert endByteIndex > startByteIndex; assert numRecords * 2 <= array.size(); long inIndex = 0; long outIndex = numRecords; if (numRecords > 0) { long[][] counts = getCounts(array, numRecords, startByteIndex, endByteIndex); for (int i = startByteIndex; i <= endByteIndex; i++) { if (counts[i] != null) { sortAtByte( array, numRecords, counts[i], i, inIndex, outIndex, desc, signed && i == endByteIndex); long tmp = inIndex; inIndex = outIndex; outIndex = tmp; } } } return Ints.checkedCast(inIndex); }
Sorts a given array of longs using least-significant-digit radix sort. This routine assumes you have extra space at the end of the array at least equal to the number of records. The sort is destructive and may relocate the data positioned within the array. @param array array of long elements followed by at least that many empty slots. @param numRecords number of data records in the array. @param startByteIndex the first byte (in range [0, 7]) to sort each long by, counting from the least significant byte. @param endByteIndex the last byte (in range [0, 7]) to sort each long by, counting from the least significant byte. Must be greater than startByteIndex. @param desc whether this is a descending (binary-order) sort. @param signed whether this is a signed (two's complement) sort. @return The starting index of the sorted data within the given array. We return this instead of always copying the data back to position zero for efficiency.
private static void sortAtByte( LongArray array, long numRecords, long[] counts, int byteIdx, long inIndex, long outIndex, boolean desc, boolean signed) { assert counts.length == 256; long[] offsets = transformCountsToOffsets( counts, numRecords, array.getBaseOffset() + outIndex * 8L, 8, desc, signed); Object baseObject = array.getBaseObject(); long baseOffset = array.getBaseOffset() + inIndex * 8L; long maxOffset = baseOffset + numRecords * 8L; for (long offset = baseOffset; offset < maxOffset; offset += 8) { long value = Platform.getLong(baseObject, offset); int bucket = (int)((value >>> (byteIdx * 8)) & 0xff); Platform.putLong(baseObject, offsets[bucket], value); offsets[bucket] += 8; } }
Performs a partial sort by copying data into destination offsets for each byte value at the specified byte offset. @param array array to partially sort. @param numRecords number of data records in the array. @param counts counts for each byte value. This routine destructively modifies this array. @param byteIdx the byte in a long to sort at, counting from the least significant byte. @param inIndex the starting index in the array where input data is located. @param outIndex the starting index where sorted output data should be written. @param desc whether this is a descending (binary-order) sort. @param signed whether this is a signed (two's complement) sort (only applies to last byte).
private static long[][] getCounts( LongArray array, long numRecords, int startByteIndex, int endByteIndex) { long[][] counts = new long[8][]; // Optimization: do a fast pre-pass to determine which byte indices we can skip for sorting. // If all the byte values at a particular index are the same we don't need to count it. long bitwiseMax = 0; long bitwiseMin = -1L; long maxOffset = array.getBaseOffset() + numRecords * 8L; Object baseObject = array.getBaseObject(); for (long offset = array.getBaseOffset(); offset < maxOffset; offset += 8) { long value = Platform.getLong(baseObject, offset); bitwiseMax |= value; bitwiseMin &= value; } long bitsChanged = bitwiseMin ^ bitwiseMax; // Compute counts for each byte index. for (int i = startByteIndex; i <= endByteIndex; i++) { if (((bitsChanged >>> (i * 8)) & 0xff) != 0) { counts[i] = new long[256]; // TODO(ekl) consider computing all the counts in one pass. for (long offset = array.getBaseOffset(); offset < maxOffset; offset += 8) { counts[i][(int)((Platform.getLong(baseObject, offset) >>> (i * 8)) & 0xff)]++; } } } return counts; }
Computes a value histogram for each byte in the given array. @param array array to count records in. @param numRecords number of data records in the array. @param startByteIndex the first byte to compute counts for (the prior are skipped). @param endByteIndex the last byte to compute counts for. @return an array of eight 256-byte count arrays, one for each byte starting from the least significant byte. If the byte does not need sorting the array will be null.
private static long[] transformCountsToOffsets( long[] counts, long numRecords, long outputOffset, long bytesPerRecord, boolean desc, boolean signed) { assert counts.length == 256; int start = signed ? 128 : 0; // output the negative records first (values 129-255). if (desc) { long pos = numRecords; for (int i = start; i < start + 256; i++) { pos -= counts[i & 0xff]; counts[i & 0xff] = outputOffset + pos * bytesPerRecord; } } else { long pos = 0; for (int i = start; i < start + 256; i++) { long tmp = counts[i & 0xff]; counts[i & 0xff] = outputOffset + pos * bytesPerRecord; pos += tmp; } } return counts; }
Transforms counts into the proper unsafe output offsets for the sort type. @param counts counts for each byte value. This routine destructively modifies this array. @param numRecords number of data records in the original data array. @param outputOffset output offset in bytes from the base array object. @param bytesPerRecord size of each record (8 for plain sort, 16 for key-prefix sort). @param desc whether this is a descending (binary-order) sort. @param signed whether this is a signed (two's complement) sort. @return the input counts array.
public static int sortKeyPrefixArray( LongArray array, long startIndex, long numRecords, int startByteIndex, int endByteIndex, boolean desc, boolean signed) { assert startByteIndex >= 0 : "startByteIndex (" + startByteIndex + ") should >= 0"; assert endByteIndex <= 7 : "endByteIndex (" + endByteIndex + ") should <= 7"; assert endByteIndex > startByteIndex; assert numRecords * 4 <= array.size(); long inIndex = startIndex; long outIndex = startIndex + numRecords * 2L; if (numRecords > 0) { long[][] counts = getKeyPrefixArrayCounts( array, startIndex, numRecords, startByteIndex, endByteIndex); for (int i = startByteIndex; i <= endByteIndex; i++) { if (counts[i] != null) { sortKeyPrefixArrayAtByte( array, numRecords, counts[i], i, inIndex, outIndex, desc, signed && i == endByteIndex); long tmp = inIndex; inIndex = outIndex; outIndex = tmp; } } } return Ints.checkedCast(inIndex); }
Specialization of sort() for key-prefix arrays. In this type of array, each record consists of two longs, only the second of which is sorted on. @param startIndex starting index in the array to sort from. This parameter is not supported in the plain sort() implementation.
private static long[][] getKeyPrefixArrayCounts( LongArray array, long startIndex, long numRecords, int startByteIndex, int endByteIndex) { long[][] counts = new long[8][]; long bitwiseMax = 0; long bitwiseMin = -1L; long baseOffset = array.getBaseOffset() + startIndex * 8L; long limit = baseOffset + numRecords * 16L; Object baseObject = array.getBaseObject(); for (long offset = baseOffset; offset < limit; offset += 16) { long value = Platform.getLong(baseObject, offset + 8); bitwiseMax |= value; bitwiseMin &= value; } long bitsChanged = bitwiseMin ^ bitwiseMax; for (int i = startByteIndex; i <= endByteIndex; i++) { if (((bitsChanged >>> (i * 8)) & 0xff) != 0) { counts[i] = new long[256]; for (long offset = baseOffset; offset < limit; offset += 16) { counts[i][(int)((Platform.getLong(baseObject, offset + 8) >>> (i * 8)) & 0xff)]++; } } } return counts; }
Specialization of getCounts() for key-prefix arrays. We could probably combine this with getCounts with some added parameters but that seems to hurt in benchmarks.
private static void sortKeyPrefixArrayAtByte( LongArray array, long numRecords, long[] counts, int byteIdx, long inIndex, long outIndex, boolean desc, boolean signed) { assert counts.length == 256; long[] offsets = transformCountsToOffsets( counts, numRecords, array.getBaseOffset() + outIndex * 8L, 16, desc, signed); Object baseObject = array.getBaseObject(); long baseOffset = array.getBaseOffset() + inIndex * 8L; long maxOffset = baseOffset + numRecords * 16L; for (long offset = baseOffset; offset < maxOffset; offset += 16) { long key = Platform.getLong(baseObject, offset); long prefix = Platform.getLong(baseObject, offset + 8); int bucket = (int)((prefix >>> (byteIdx * 8)) & 0xff); long dest = offsets[bucket]; Platform.putLong(baseObject, dest, key); Platform.putLong(baseObject, dest + 8, prefix); offsets[bucket] += 16; } }
Specialization of sortAtByte() for key-prefix arrays.
public boolean isSet(_Fields field) { if (field == null) { throw new IllegalArgumentException(); } switch (field) { case SESSION_HANDLE: return isSetSessionHandle(); case CATALOG_NAME: return isSetCatalogName(); case SCHEMA_NAME: return isSetSchemaName(); case TABLE_NAME: return isSetTableName(); case COLUMN_NAME: return isSetColumnName(); } throw new IllegalStateException(); }
Returns true if field corresponding to fieldID is set (has been assigned a value) and false otherwise
private ByteBuf nextBufferForFrame(int bytesToRead) { ByteBuf buf = buffers.getFirst(); ByteBuf frame; if (buf.readableBytes() > bytesToRead) { frame = buf.retain().readSlice(bytesToRead); totalSize -= bytesToRead; } else { frame = buf; buffers.removeFirst(); totalSize -= frame.readableBytes(); } return frame; }
Takes the first buffer in the internal list, and either adjust it to fit in the frame (by taking a slice out of it) or remove it from the internal list.
public boolean isSet(_Fields field) { if (field == null) { throw new IllegalArgumentException(); } switch (field) { case OPERATION_HANDLE: return isSetOperationHandle(); case ORIENTATION: return isSetOrientation(); case MAX_ROWS: return isSetMaxRows(); case FETCH_TYPE: return isSetFetchType(); } throw new IllegalStateException(); }
Returns true if field corresponding to fieldID is set (has been assigned a value) and false otherwise
@Override public void initialize(String path, List<String> columns) throws IOException, UnsupportedOperationException { super.initialize(path, columns); initializeInternal(); }
Utility API that will read all the data in path. This circumvents the need to create Hadoop objects to use this class. `columns` can contain the list of columns to project.
private void initBatch( MemoryMode memMode, StructType partitionColumns, InternalRow partitionValues) { StructType batchSchema = new StructType(); for (StructField f: sparkSchema.fields()) { batchSchema = batchSchema.add(f); } if (partitionColumns != null) { for (StructField f : partitionColumns.fields()) { batchSchema = batchSchema.add(f); } } if (memMode == MemoryMode.OFF_HEAP) { columnVectors = OffHeapColumnVector.allocateColumns(capacity, batchSchema); } else { columnVectors = OnHeapColumnVector.allocateColumns(capacity, batchSchema); } columnarBatch = new ColumnarBatch(columnVectors); if (partitionColumns != null) { int partitionIdx = sparkSchema.fields().length; for (int i = 0; i < partitionColumns.fields().length; i++) { ColumnVectorUtils.populate(columnVectors[i + partitionIdx], partitionValues, i); columnVectors[i + partitionIdx].setIsConstant(); } } // Initialize missing columns with nulls. for (int i = 0; i < missingColumns.length; i++) { if (missingColumns[i]) { columnVectors[i].putNulls(0, capacity); columnVectors[i].setIsConstant(); } } }
Column 3: partitionValues[1]
public boolean nextBatch() throws IOException { for (WritableColumnVector vector : columnVectors) { vector.reset(); } columnarBatch.setNumRows(0); if (rowsReturned >= totalRowCount) return false; checkEndOfRowGroup(); int num = (int) Math.min((long) capacity, totalCountLoadedSoFar - rowsReturned); for (int i = 0; i < columnReaders.length; ++i) { if (columnReaders[i] == null) continue; columnReaders[i].readBatch(num, columnVectors[i]); } rowsReturned += num; columnarBatch.setNumRows(num); numBatched = num; batchIdx = 0; return true; }
Advances to the next batch of rows. Returns false if there are no more.
public static int indexOfDomainMatch(String userName) { if (userName == null) { return -1; } int idx = userName.indexOf('/'); int idx2 = userName.indexOf('@'); int endIdx = Math.min(idx, idx2); // Use the earlier match. // Unless at least one of '/' or '@' was not found, in // which case, user the latter match. if (endIdx == -1) { endIdx = Math.max(idx, idx2); } return endIdx; }
/* Get the index separating the user name from domain name (the user's name up to the first '/' or '@'). @param userName full user name. @return index of domain match or -1 if not found
public static void closeQuietly(Closeable closeable) { try { if (closeable != null) { closeable.close(); } } catch (IOException e) { logger.error("IOException should not have been thrown.", e); } }
Closes the given object, ignoring IOExceptions.
public static int nonNegativeHash(Object obj) { if (obj == null) { return 0; } int hash = obj.hashCode(); return hash != Integer.MIN_VALUE ? Math.abs(hash) : 0; }
Returns a hash consistent with Spark's Utils.nonNegativeHash().
public static ByteBuffer stringToBytes(String s) { return Unpooled.wrappedBuffer(s.getBytes(StandardCharsets.UTF_8)).nioBuffer(); }
Convert the given string to a byte buffer. The resulting buffer can be converted back to the same string through {@link #bytesToString(ByteBuffer)}.
public static String bytesToString(ByteBuffer b) { return Unpooled.wrappedBuffer(b).toString(StandardCharsets.UTF_8); }
Convert the given byte buffer to a string. The resulting string can be converted back to the same byte buffer through {@link #stringToBytes(String)}.
public static void deleteRecursively(File file, FilenameFilter filter) throws IOException { if (file == null) { return; } // On Unix systems, use operating system command to run faster // If that does not work out, fallback to the Java IO way if (SystemUtils.IS_OS_UNIX && filter == null) { try { deleteRecursivelyUsingUnixNative(file); return; } catch (IOException e) { logger.warn("Attempt to delete using native Unix OS command failed for path = {}. " + "Falling back to Java IO way", file.getAbsolutePath(), e); } } deleteRecursivelyUsingJavaIO(file, filter); }
Delete a file or directory and its contents recursively. Don't follow directories if they are symlinks. @param file Input file / dir to be deleted @param filter A filename filter that make sure only files / dirs with the satisfied filenames are deleted. @throws IOException if deletion is unsuccessful
public static long timeStringAs(String str, TimeUnit unit) { String lower = str.toLowerCase(Locale.ROOT).trim(); try { Matcher m = Pattern.compile("(-?[0-9]+)([a-z]+)?").matcher(lower); if (!m.matches()) { throw new NumberFormatException("Failed to parse time string: " + str); } long val = Long.parseLong(m.group(1)); String suffix = m.group(2); // Check for invalid suffixes if (suffix != null && !timeSuffixes.containsKey(suffix)) { throw new NumberFormatException("Invalid suffix: \"" + suffix + "\""); } // If suffix is valid use that, otherwise none was provided and use the default passed return unit.convert(val, suffix != null ? timeSuffixes.get(suffix) : unit); } catch (NumberFormatException e) { String timeError = "Time must be specified as seconds (s), " + "milliseconds (ms), microseconds (us), minutes (m or min), hour (h), or day (d). " + "E.g. 50s, 100ms, or 250us."; throw new NumberFormatException(timeError + "\n" + e.getMessage()); } }
Convert a passed time string (e.g. 50s, 100ms, or 250us) to a time count in the given unit. The unit is also considered the default if the given string does not specify a unit.
public static long byteStringAs(String str, ByteUnit unit) { String lower = str.toLowerCase(Locale.ROOT).trim(); try { Matcher m = Pattern.compile("([0-9]+)([a-z]+)?").matcher(lower); Matcher fractionMatcher = Pattern.compile("([0-9]+\\.[0-9]+)([a-z]+)?").matcher(lower); if (m.matches()) { long val = Long.parseLong(m.group(1)); String suffix = m.group(2); // Check for invalid suffixes if (suffix != null && !byteSuffixes.containsKey(suffix)) { throw new NumberFormatException("Invalid suffix: \"" + suffix + "\""); } // If suffix is valid use that, otherwise none was provided and use the default passed return unit.convertFrom(val, suffix != null ? byteSuffixes.get(suffix) : unit); } else if (fractionMatcher.matches()) { throw new NumberFormatException("Fractional values are not supported. Input was: " + fractionMatcher.group(1)); } else { throw new NumberFormatException("Failed to parse byte string: " + str); } } catch (NumberFormatException e) { String byteError = "Size must be specified as bytes (b), " + "kibibytes (k), mebibytes (m), gibibytes (g), tebibytes (t), or pebibytes(p). " + "E.g. 50b, 100k, or 250m."; throw new NumberFormatException(byteError + "\n" + e.getMessage()); } }
Convert a passed byte string (e.g. 50b, 100kb, or 250mb) to the given. If no suffix is provided, a direct conversion to the provided unit is attempted.
public static byte[] bufferToArray(ByteBuffer buffer) { if (buffer.hasArray() && buffer.arrayOffset() == 0 && buffer.array().length == buffer.remaining()) { return buffer.array(); } else { byte[] bytes = new byte[buffer.remaining()]; buffer.get(bytes); return bytes; } }
Returns a byte array with the buffer's contents, trying to avoid copying the data if possible.
public static void readFully(ReadableByteChannel channel, ByteBuffer dst) throws IOException { int expected = dst.remaining(); while (dst.hasRemaining()) { if (channel.read(dst) < 0) { throw new EOFException(String.format("Not enough bytes in channel (expected %d).", expected)); } } }
Fills a buffer with data read from the channel.
public boolean isSet(_Fields field) { if (field == null) { throw new IllegalArgumentException(); } switch (field) { case STATUS: return isSetStatus(); case OPERATION_STATE: return isSetOperationState(); case SQL_STATE: return isSetSqlState(); case ERROR_CODE: return isSetErrorCode(); case ERROR_MESSAGE: return isSetErrorMessage(); } throw new IllegalStateException(); }
Returns true if field corresponding to fieldID is set (has been assigned a value) and false otherwise
@Override protected void subAppend(LoggingEvent event) { super.subAppend(event); // That should've gone into our writer. Notify the LogContext. String logOutput = writer.toString(); writer.reset(); OperationLog log = operationManager.getOperationLogByThread(); if (log == null) { LOG.debug(" ---+++=== Dropped log event from thread " + event.getThreadName()); return; } log.writeOperationLog(logOutput); }
Overrides WriterAppender.subAppend(), which does the real logging. No need to worry about concurrency since log4j calls this synchronously.
public void pointTo(Object baseObject, long baseOffset, int sizeInBytes) { // Read the numBytes of key array from the first 8 bytes. final long keyArraySize = Platform.getLong(baseObject, baseOffset); assert keyArraySize >= 0 : "keyArraySize (" + keyArraySize + ") should >= 0"; assert keyArraySize <= Integer.MAX_VALUE : "keyArraySize (" + keyArraySize + ") should <= Integer.MAX_VALUE"; final int valueArraySize = sizeInBytes - (int)keyArraySize - 8; assert valueArraySize >= 0 : "valueArraySize (" + valueArraySize + ") should >= 0"; keys.pointTo(baseObject, baseOffset + 8, (int)keyArraySize); values.pointTo(baseObject, baseOffset + 8 + keyArraySize, valueArraySize); assert keys.numElements() == values.numElements(); this.baseObject = baseObject; this.baseOffset = baseOffset; this.sizeInBytes = sizeInBytes; }
Update this UnsafeMapData to point to different backing data. @param baseObject the base object @param baseOffset the offset within the base object @param sizeInBytes the size of this map's backing data, in bytes
public void reset() { if (isConstant) return; if (childColumns != null) { for (ColumnVector c: childColumns) { ((WritableColumnVector) c).reset(); } } elementsAppended = 0; if (numNulls > 0) { putNotNulls(0, capacity); numNulls = 0; } }
Resets this column for writing. The currently stored values are no longer accessible.
public WritableColumnVector reserveDictionaryIds(int capacity) { if (dictionaryIds == null) { dictionaryIds = reserveNewColumn(capacity, DataTypes.IntegerType); } else { dictionaryIds.reset(); dictionaryIds.reserve(capacity); } return dictionaryIds; }
Reserve a integer column for ids of dictionary.
public final int appendStruct(boolean isNull) { if (isNull) { appendNull(); for (WritableColumnVector c: childColumns) { if (c.type instanceof StructType) { c.appendStruct(true); } else { c.appendNull(); } } } else { appendNotNull(); } return elementsAppended; }
Appends a NULL struct. This *has* to be used for structs instead of appendNull() as this recursively appends a NULL to its children. We don't have this logic as the general appendNull implementation to optimize the more common non-struct case.
@Override public final ColumnarArray getArray(int rowId) { if (isNullAt(rowId)) return null; return new ColumnarArray(arrayData(), getArrayOffset(rowId), getArrayLength(rowId)); }
array offsets and lengths in the current column vector.
@Override public final ColumnarMap getMap(int rowId) { if (isNullAt(rowId)) return null; return new ColumnarMap(getChild(0), getChild(1), getArrayOffset(rowId), getArrayLength(rowId)); }
second child column vector, and puts the offsets and lengths in the current column vector.
public TStatus toTStatus() { // TODO: convert sqlState, etc. TStatus tStatus = new TStatus(TStatusCode.ERROR_STATUS); tStatus.setSqlState(getSQLState()); tStatus.setErrorCode(getErrorCode()); tStatus.setErrorMessage(getMessage()); tStatus.setInfoMessages(toString(this)); return tStatus; }
Converts current object to a {@link TStatus} object @return a {@link TStatus} object
public static TStatus toTStatus(Exception e) { if (e instanceof HiveSQLException) { return ((HiveSQLException)e).toTStatus(); } TStatus tStatus = new TStatus(TStatusCode.ERROR_STATUS); tStatus.setErrorMessage(e.getMessage()); tStatus.setInfoMessages(toString(e)); return tStatus; }
Converts the specified {@link Exception} object into a {@link TStatus} object @param e a {@link Exception} object @return a {@link TStatus} object
public static <T> TypedColumn<T, Double> avg(MapFunction<T, Double> f) { return new TypedAverage<T>(f).toColumnJava(); }
Average aggregate function. @since 2.0.0
public static <T> TypedColumn<T, Long> count(MapFunction<T, Object> f) { return new TypedCount<T>(f).toColumnJava(); }
Count aggregate function. @since 2.0.0
public static <T> TypedColumn<T, Double> sum(MapFunction<T, Double> f) { return new TypedSumDouble<T>(f).toColumnJava(); }
Sum aggregate function for floating point (double) type. @since 2.0.0
public static <T> TypedColumn<T, Long> sumLong(MapFunction<T, Long> f) { return new TypedSumLong<T>(f).toColumnJava(); }
Sum aggregate function for integral (long, i.e. 64 bit integer) type. @since 2.0.0
private void readAsync() throws IOException { stateChangeLock.lock(); final byte[] arr = readAheadBuffer.array(); try { if (endOfStream || readInProgress) { return; } checkReadException(); readAheadBuffer.position(0); readAheadBuffer.flip(); readInProgress = true; } finally { stateChangeLock.unlock(); } executorService.execute(() -> { stateChangeLock.lock(); try { if (isClosed) { readInProgress = false; return; } // Flip this so that the close method will not close the underlying input stream when we // are reading. isReading = true; } finally { stateChangeLock.unlock(); } // Please note that it is safe to release the lock and read into the read ahead buffer // because either of following two conditions will hold - 1. The active buffer has // data available to read so the reader will not read from the read ahead buffer. // 2. This is the first time read is called or the active buffer is exhausted, // in that case the reader waits for this async read to complete. // So there is no race condition in both the situations. int read = 0; int off = 0, len = arr.length; Throwable exception = null; try { // try to fill the read ahead buffer. // if a reader is waiting, possibly return early. do { read = underlyingInputStream.read(arr, off, len); if (read <= 0) break; off += read; len -= read; } while (len > 0 && !isWaiting.get()); } catch (Throwable ex) { exception = ex; if (ex instanceof Error) { // `readException` may not be reported to the user. Rethrow Error to make sure at least // The user can see Error in UncaughtExceptionHandler. throw (Error) ex; } } finally { stateChangeLock.lock(); readAheadBuffer.limit(off); if (read < 0 || (exception instanceof EOFException)) { endOfStream = true; } else if (exception != null) { readAborted = true; readException = exception; } readInProgress = false; signalAsyncReadComplete(); stateChangeLock.unlock(); closeUnderlyingInputStreamIfNecessary(); } }); }
Read data from underlyingInputStream to readAheadBuffer asynchronously.
private long skipInternal(long n) throws IOException { assert (stateChangeLock.isLocked()); waitForAsyncReadComplete(); if (isEndOfStream()) { return 0; } if (available() >= n) { // we can skip from the internal buffers int toSkip = (int) n; // We need to skip from both active buffer and read ahead buffer toSkip -= activeBuffer.remaining(); assert(toSkip > 0); // skipping from activeBuffer already handled. activeBuffer.position(0); activeBuffer.flip(); readAheadBuffer.position(toSkip + readAheadBuffer.position()); swapBuffers(); // Trigger async read to emptied read ahead buffer. readAsync(); return n; } else { int skippedBytes = available(); long toSkip = n - skippedBytes; activeBuffer.position(0); activeBuffer.flip(); readAheadBuffer.position(0); readAheadBuffer.flip(); long skippedFromInputStream = underlyingInputStream.skip(toSkip); readAsync(); return skippedBytes + skippedFromInputStream; } }
Internal skip function which should be called only from skip() api. The assumption is that the stateChangeLock is already acquired in the caller before calling this function.
public boolean isSet(_Fields field) { if (field == null) { throw new IllegalArgumentException(); } switch (field) { case OBJECT_TYPE_PTR: return isSetObjectTypePtr(); } throw new IllegalStateException(); }
Returns true if field corresponding to fieldID is set (has been assigned a value) and false otherwise
public boolean isSet(_Fields field) { if (field == null) { throw new IllegalArgumentException(); } switch (field) { case TYPE: return isSetType(); case TYPE_QUALIFIERS: return isSetTypeQualifiers(); } throw new IllegalStateException(); }
Returns true if field corresponding to fieldID is set (has been assigned a value) and false otherwise
public void cacheThreadLocalRawStore() { Long threadId = this.getId(); RawStore threadLocalRawStore = HiveMetaStore.HMSHandler.getRawStore(); if (threadLocalRawStore != null && !threadRawStoreMap.containsKey(threadId)) { LOG.debug("Adding RawStore: " + threadLocalRawStore + ", for the thread: " + this.getName() + " to threadRawStoreMap for future cleanup."); threadRawStoreMap.put(threadId, threadLocalRawStore); } }
Cache the ThreadLocal RawStore object. Called from the corresponding thread.
public boolean isSet(_Fields field) { if (field == null) { throw new IllegalArgumentException(); } switch (field) { case GUID: return isSetGuid(); case SECRET: return isSetSecret(); } throw new IllegalStateException(); }
Returns true if field corresponding to fieldID is set (has been assigned a value) and false otherwise
protected String convertIdentifierPattern(final String pattern, boolean datanucleusFormat) { if (pattern == null) { return convertPattern("%", true); } else { return convertPattern(pattern, datanucleusFormat); } }
Convert wildchars and escape sequence from JDBC format to datanucleous/regex
protected String convertSchemaPattern(final String pattern) { if ((pattern == null) || pattern.isEmpty()) { return convertPattern("%", true); } else { return convertPattern(pattern, true); } }
Convert wildchars and escape sequence of schema pattern from JDBC format to datanucleous/regex The schema pattern treats empty string also as wildchar
private String convertPattern(final String pattern, boolean datanucleusFormat) { String wStr; if (datanucleusFormat) { wStr = "*"; } else { wStr = ".*"; } return pattern .replaceAll("([^\\\\])%", "$1" + wStr).replaceAll("\\\\%", "%").replaceAll("^%", wStr) .replaceAll("([^\\\\])_", "$1.").replaceAll("\\\\_", "_").replaceAll("^_", "."); }
Convert a pattern containing JDBC catalog search wildcards into Java regex patterns. @param pattern input which may contain '%' or '_' wildcard characters, or these characters escaped using {@link #getSearchStringEscape()}. @return replace %/_ with regex search characters, also handle escaped characters. The datanucleus module expects the wildchar as '*'. The columns search on the other hand is done locally inside the hive code and that requires the regex wildchar format '.*' This is driven by the datanucleusFormat flag.
@Override public UnsafeRow getKeyRow(int rowId) { assert(rowId >= 0); assert(rowId < numRows); if (keyRowId != rowId) { // if keyRowId == rowId, desired keyRow is already cached long offset = keyOffsets[rowId]; int klen = Platform.getInt(base, offset - 4); keyRow.pointTo(base, offset, klen); // set keyRowId so we can check if desired row is cached keyRowId = rowId; } return keyRow; }
Returns the key row in this batch at `rowId`. Returned key row is reused across calls.
@Override public UnsafeRow getValueFromKey(int rowId) { if (keyRowId != rowId) { getKeyRow(rowId); } assert(rowId >= 0); long offset = keyRow.getBaseOffset(); int klen = keyRow.getSizeInBytes(); int vlen = Platform.getInt(base, offset - 8) - klen - 4; valueRow.pointTo(base, offset + klen, vlen); return valueRow; }
Returns the value row by two steps: 1) looking up the key row with the same id (skipped if the key row is cached) 2) retrieve the value row by reusing the metadata from step 1) In most times, 1) is skipped because `getKeyRow(id)` is often called before `getValueRow(id)`.
@Override public org.apache.spark.unsafe.KVIterator<UnsafeRow, UnsafeRow> rowIterator() { return new org.apache.spark.unsafe.KVIterator<UnsafeRow, UnsafeRow>() { private final UnsafeRow key = new UnsafeRow(keySchema.length()); private final UnsafeRow value = new UnsafeRow(valueSchema.length()); private long offsetInPage = 0; private int recordsInPage = 0; private int currentklen; private int currentvlen; private int totalLength; private boolean initialized = false; private void init() { if (page != null) { offsetInPage = page.getBaseOffset(); recordsInPage = numRows; } initialized = true; } @Override public boolean next() { if (!initialized) init(); //searching for the next non empty page is records is now zero if (recordsInPage == 0) { freeCurrentPage(); return false; } totalLength = Platform.getInt(base, offsetInPage) - 4; currentklen = Platform.getInt(base, offsetInPage + 4); currentvlen = totalLength - currentklen; key.pointTo(base, offsetInPage + 8, currentklen); value.pointTo(base, offsetInPage + 8 + currentklen, currentvlen); offsetInPage += 8 + totalLength + 8; recordsInPage -= 1; return true; } @Override public UnsafeRow getKey() { return key; } @Override public UnsafeRow getValue() { return value; } @Override public void close() { // do nothing } private void freeCurrentPage() { if (page != null) { freePage(page); page = null; } } }; }
Returns an iterator to go through all rows
public boolean isSet(_Fields field) { if (field == null) { throw new IllegalArgumentException(); } switch (field) { case SESSION_HANDLE: return isSetSessionHandle(); case INFO_TYPE: return isSetInfoType(); } throw new IllegalStateException(); }
Returns true if field corresponding to fieldID is set (has been assigned a value) and false otherwise
public Iterator<InternalRow> rowIterator() { final int maxRows = numRows; final MutableColumnarRow row = new MutableColumnarRow(columns); return new Iterator<InternalRow>() { int rowId = 0; @Override public boolean hasNext() { return rowId < maxRows; } @Override public InternalRow next() { if (rowId >= maxRows) { throw new NoSuchElementException(); } row.rowId = rowId++; return row; } @Override public void remove() { throw new UnsupportedOperationException(); } }; }
Returns an iterator over the rows in this batch.
public InternalRow getRow(int rowId) { assert(rowId >= 0 && rowId < numRows); row.rowId = rowId; return row; }
Returns the row in this batch at `rowId`. Returned row is reused across calls.
public void fetchChunk( long streamId, int chunkIndex, ChunkReceivedCallback callback) { if (logger.isDebugEnabled()) { logger.debug("Sending fetch chunk request {} to {}", chunkIndex, getRemoteAddress(channel)); } StreamChunkId streamChunkId = new StreamChunkId(streamId, chunkIndex); StdChannelListener listener = new StdChannelListener(streamChunkId) { @Override void handleFailure(String errorMsg, Throwable cause) { handler.removeFetchRequest(streamChunkId); callback.onFailure(chunkIndex, new IOException(errorMsg, cause)); } }; handler.addFetchRequest(streamChunkId, callback); channel.writeAndFlush(new ChunkFetchRequest(streamChunkId)).addListener(listener); }
Requests a single chunk from the remote side, from the pre-negotiated streamId. Chunk indices go from 0 onwards. It is valid to request the same chunk multiple times, though some streams may not support this. Multiple fetchChunk requests may be outstanding simultaneously, and the chunks are guaranteed to be returned in the same order that they were requested, assuming only a single TransportClient is used to fetch the chunks. @param streamId Identifier that refers to a stream in the remote StreamManager. This should be agreed upon by client and server beforehand. @param chunkIndex 0-based index of the chunk to fetch @param callback Callback invoked upon successful receipt of chunk, or upon any failure.
public void stream(String streamId, StreamCallback callback) { StdChannelListener listener = new StdChannelListener(streamId) { @Override void handleFailure(String errorMsg, Throwable cause) throws Exception { callback.onFailure(streamId, new IOException(errorMsg, cause)); } }; if (logger.isDebugEnabled()) { logger.debug("Sending stream request for {} to {}", streamId, getRemoteAddress(channel)); } // Need to synchronize here so that the callback is added to the queue and the RPC is // written to the socket atomically, so that callbacks are called in the right order // when responses arrive. synchronized (this) { handler.addStreamCallback(streamId, callback); channel.writeAndFlush(new StreamRequest(streamId)).addListener(listener); } }
Request to stream the data with the given stream ID from the remote end. @param streamId The stream to fetch. @param callback Object to call with the stream data.