Package gorsat.spark

Class GorBatchTable

  • All Implemented Interfaces:
    org.apache.spark.sql.connector.catalog.SupportsRead, org.apache.spark.sql.connector.catalog.SupportsWrite, org.apache.spark.sql.connector.catalog.Table, org.apache.spark.sql.connector.read.ScanBuilder, org.apache.spark.sql.connector.read.SupportsPushDownFilters

    public abstract class GorBatchTable
    extends java.lang.Object
    implements org.apache.spark.sql.connector.catalog.Table, org.apache.spark.sql.connector.catalog.SupportsRead, org.apache.spark.sql.connector.catalog.SupportsWrite, org.apache.spark.sql.connector.read.SupportsPushDownFilters
    • Constructor Summary

      Constructors 
      Constructor Description
      GorBatchTable​(java.lang.String query, boolean tag, java.lang.String path, java.lang.String filter, java.lang.String filterFile, java.lang.String filterColumn, java.lang.String splitFile, java.lang.String seek, java.lang.String redisUri, java.lang.String streamKey, java.lang.String jobId, java.lang.String cacheFile, java.lang.String securityContext, java.lang.String useCpp, boolean hadoopInfer)  
      GorBatchTable​(java.lang.String query, boolean tag, java.lang.String path, java.lang.String filter, java.lang.String filterFile, java.lang.String filterColumn, java.lang.String splitFile, java.lang.String seek, org.apache.spark.sql.types.StructType schema, java.lang.String redisUri, java.lang.String streamKey, java.lang.String jobId, java.lang.String cacheFile, java.lang.String securityContext, java.lang.String useCpp, boolean hadoopInfer)  
    • Method Summary

      All Methods Instance Methods Concrete Methods 
      Modifier and Type Method Description
      org.apache.spark.sql.connector.read.Scan build()  
      java.util.Set<org.apache.spark.sql.connector.catalog.TableCapability> capabilities()  
      java.lang.String name()  
      org.apache.spark.sql.connector.read.ScanBuilder newScanBuilder​(org.apache.spark.sql.util.CaseInsensitiveStringMap caseInsensitiveStringMap)  
      org.apache.spark.sql.connector.write.WriteBuilder newWriteBuilder​(org.apache.spark.sql.connector.write.LogicalWriteInfo info)  
      org.apache.spark.sql.connector.expressions.Transform[] partitioning()  
      org.apache.spark.sql.sources.Filter[] pushedFilters()  
      org.apache.spark.sql.sources.Filter[] pushFilters​(org.apache.spark.sql.sources.Filter[] filters)  
      org.apache.spark.sql.types.StructType schema()  
      void setAliasFile​(java.lang.String aliasFile)  
      void setCacheDir​(java.lang.String cacheDir)  
      void setConfigFile​(java.lang.String configFile)  
      void setProjectRoot​(java.lang.String projectRoot)  
      • Methods inherited from class java.lang.Object

        clone, equals, finalize, getClass, hashCode, notify, notifyAll, toString, wait, wait, wait
      • Methods inherited from interface org.apache.spark.sql.connector.catalog.Table

        properties
    • Constructor Detail

      • GorBatchTable

        public GorBatchTable​(java.lang.String query,
                             boolean tag,
                             java.lang.String path,
                             java.lang.String filter,
                             java.lang.String filterFile,
                             java.lang.String filterColumn,
                             java.lang.String splitFile,
                             java.lang.String seek,
                             java.lang.String redisUri,
                             java.lang.String streamKey,
                             java.lang.String jobId,
                             java.lang.String cacheFile,
                             java.lang.String securityContext,
                             java.lang.String useCpp,
                             boolean hadoopInfer)
                      throws java.io.IOException
        Throws:
        java.io.IOException
      • GorBatchTable

        public GorBatchTable​(java.lang.String query,
                             boolean tag,
                             java.lang.String path,
                             java.lang.String filter,
                             java.lang.String filterFile,
                             java.lang.String filterColumn,
                             java.lang.String splitFile,
                             java.lang.String seek,
                             org.apache.spark.sql.types.StructType schema,
                             java.lang.String redisUri,
                             java.lang.String streamKey,
                             java.lang.String jobId,
                             java.lang.String cacheFile,
                             java.lang.String securityContext,
                             java.lang.String useCpp,
                             boolean hadoopInfer)
                      throws java.io.IOException
        Throws:
        java.io.IOException
    • Method Detail

      • setProjectRoot

        public void setProjectRoot​(java.lang.String projectRoot)
      • setCacheDir

        public void setCacheDir​(java.lang.String cacheDir)
      • setConfigFile

        public void setConfigFile​(java.lang.String configFile)
      • setAliasFile

        public void setAliasFile​(java.lang.String aliasFile)
      • build

        public org.apache.spark.sql.connector.read.Scan build()
        Specified by:
        build in interface org.apache.spark.sql.connector.read.ScanBuilder
      • pushFilters

        public org.apache.spark.sql.sources.Filter[] pushFilters​(org.apache.spark.sql.sources.Filter[] filters)
        Specified by:
        pushFilters in interface org.apache.spark.sql.connector.read.SupportsPushDownFilters
      • pushedFilters

        public org.apache.spark.sql.sources.Filter[] pushedFilters()
        Specified by:
        pushedFilters in interface org.apache.spark.sql.connector.read.SupportsPushDownFilters
      • schema

        public org.apache.spark.sql.types.StructType schema()
        Specified by:
        schema in interface org.apache.spark.sql.connector.catalog.Table
      • name

        public java.lang.String name()
        Specified by:
        name in interface org.apache.spark.sql.connector.catalog.Table
      • capabilities

        public java.util.Set<org.apache.spark.sql.connector.catalog.TableCapability> capabilities()
        Specified by:
        capabilities in interface org.apache.spark.sql.connector.catalog.Table
      • newWriteBuilder

        public org.apache.spark.sql.connector.write.WriteBuilder newWriteBuilder​(org.apache.spark.sql.connector.write.LogicalWriteInfo info)
        Specified by:
        newWriteBuilder in interface org.apache.spark.sql.connector.catalog.SupportsWrite
      • newScanBuilder

        public org.apache.spark.sql.connector.read.ScanBuilder newScanBuilder​(org.apache.spark.sql.util.CaseInsensitiveStringMap caseInsensitiveStringMap)
        Specified by:
        newScanBuilder in interface org.apache.spark.sql.connector.catalog.SupportsRead
      • partitioning

        public org.apache.spark.sql.connector.expressions.Transform[] partitioning()
        Specified by:
        partitioning in interface org.apache.spark.sql.connector.catalog.Table