Package gorsat.spark
Class GorBatchTable
- java.lang.Object
-
- gorsat.spark.GorBatchTable
-
- All Implemented Interfaces:
org.apache.spark.sql.connector.catalog.SupportsRead,org.apache.spark.sql.connector.catalog.SupportsWrite,org.apache.spark.sql.connector.catalog.Table,org.apache.spark.sql.connector.read.ScanBuilder,org.apache.spark.sql.connector.read.SupportsPushDownFilters
public abstract class GorBatchTable extends java.lang.Object implements org.apache.spark.sql.connector.catalog.Table, org.apache.spark.sql.connector.catalog.SupportsRead, org.apache.spark.sql.connector.catalog.SupportsWrite, org.apache.spark.sql.connector.read.SupportsPushDownFilters
-
-
Constructor Summary
Constructors Constructor Description GorBatchTable(java.lang.String query, boolean tag, java.lang.String path, java.lang.String filter, java.lang.String filterFile, java.lang.String filterColumn, java.lang.String splitFile, java.lang.String seek, java.lang.String redisUri, java.lang.String streamKey, java.lang.String jobId, java.lang.String cacheFile, java.lang.String securityContext, java.lang.String useCpp, boolean hadoopInfer)GorBatchTable(java.lang.String query, boolean tag, java.lang.String path, java.lang.String filter, java.lang.String filterFile, java.lang.String filterColumn, java.lang.String splitFile, java.lang.String seek, org.apache.spark.sql.types.StructType schema, java.lang.String redisUri, java.lang.String streamKey, java.lang.String jobId, java.lang.String cacheFile, java.lang.String securityContext, java.lang.String useCpp, boolean hadoopInfer)
-
Method Summary
All Methods Instance Methods Concrete Methods Modifier and Type Method Description org.apache.spark.sql.connector.read.Scanbuild()java.util.Set<org.apache.spark.sql.connector.catalog.TableCapability>capabilities()java.lang.Stringname()org.apache.spark.sql.connector.read.ScanBuildernewScanBuilder(org.apache.spark.sql.util.CaseInsensitiveStringMap caseInsensitiveStringMap)org.apache.spark.sql.connector.write.WriteBuildernewWriteBuilder(org.apache.spark.sql.connector.write.LogicalWriteInfo info)org.apache.spark.sql.connector.expressions.Transform[]partitioning()org.apache.spark.sql.sources.Filter[]pushedFilters()org.apache.spark.sql.sources.Filter[]pushFilters(org.apache.spark.sql.sources.Filter[] filters)org.apache.spark.sql.types.StructTypeschema()voidsetAliasFile(java.lang.String aliasFile)voidsetCacheDir(java.lang.String cacheDir)voidsetConfigFile(java.lang.String configFile)voidsetProjectRoot(java.lang.String projectRoot)
-
-
-
Constructor Detail
-
GorBatchTable
public GorBatchTable(java.lang.String query, boolean tag, java.lang.String path, java.lang.String filter, java.lang.String filterFile, java.lang.String filterColumn, java.lang.String splitFile, java.lang.String seek, java.lang.String redisUri, java.lang.String streamKey, java.lang.String jobId, java.lang.String cacheFile, java.lang.String securityContext, java.lang.String useCpp, boolean hadoopInfer) throws java.io.IOException- Throws:
java.io.IOException
-
GorBatchTable
public GorBatchTable(java.lang.String query, boolean tag, java.lang.String path, java.lang.String filter, java.lang.String filterFile, java.lang.String filterColumn, java.lang.String splitFile, java.lang.String seek, org.apache.spark.sql.types.StructType schema, java.lang.String redisUri, java.lang.String streamKey, java.lang.String jobId, java.lang.String cacheFile, java.lang.String securityContext, java.lang.String useCpp, boolean hadoopInfer) throws java.io.IOException- Throws:
java.io.IOException
-
-
Method Detail
-
setProjectRoot
public void setProjectRoot(java.lang.String projectRoot)
-
setCacheDir
public void setCacheDir(java.lang.String cacheDir)
-
setConfigFile
public void setConfigFile(java.lang.String configFile)
-
setAliasFile
public void setAliasFile(java.lang.String aliasFile)
-
build
public org.apache.spark.sql.connector.read.Scan build()
- Specified by:
buildin interfaceorg.apache.spark.sql.connector.read.ScanBuilder
-
pushFilters
public org.apache.spark.sql.sources.Filter[] pushFilters(org.apache.spark.sql.sources.Filter[] filters)
- Specified by:
pushFiltersin interfaceorg.apache.spark.sql.connector.read.SupportsPushDownFilters
-
pushedFilters
public org.apache.spark.sql.sources.Filter[] pushedFilters()
- Specified by:
pushedFiltersin interfaceorg.apache.spark.sql.connector.read.SupportsPushDownFilters
-
schema
public org.apache.spark.sql.types.StructType schema()
- Specified by:
schemain interfaceorg.apache.spark.sql.connector.catalog.Table
-
name
public java.lang.String name()
- Specified by:
namein interfaceorg.apache.spark.sql.connector.catalog.Table
-
capabilities
public java.util.Set<org.apache.spark.sql.connector.catalog.TableCapability> capabilities()
- Specified by:
capabilitiesin interfaceorg.apache.spark.sql.connector.catalog.Table
-
newWriteBuilder
public org.apache.spark.sql.connector.write.WriteBuilder newWriteBuilder(org.apache.spark.sql.connector.write.LogicalWriteInfo info)
- Specified by:
newWriteBuilderin interfaceorg.apache.spark.sql.connector.catalog.SupportsWrite
-
newScanBuilder
public org.apache.spark.sql.connector.read.ScanBuilder newScanBuilder(org.apache.spark.sql.util.CaseInsensitiveStringMap caseInsensitiveStringMap)
- Specified by:
newScanBuilderin interfaceorg.apache.spark.sql.connector.catalog.SupportsRead
-
partitioning
public org.apache.spark.sql.connector.expressions.Transform[] partitioning()
- Specified by:
partitioningin interfaceorg.apache.spark.sql.connector.catalog.Table
-
-