class SQLConf extends Serializable with Logging
A class that enables the setting and getting of mutable config parameters/hints.
In the presence of a SQLContext, these can be set and queried by passing SET commands into Spark SQL's query functions (i.e. sql()). Otherwise, users of this class can modify the hints by programmatically calling the setters and getters of this class.
SQLConf is thread-safe (internally synchronized, so safe to be used in multiple threads).
- Alphabetic
- By Inheritance
- SQLConf
- Logging
- Serializable
- Serializable
- AnyRef
- Any
- Hide All
- Show All
- Public
- All
Instance Constructors
- new SQLConf()
Value Members
-
final
def
!=(arg0: Any): Boolean
- Definition Classes
- AnyRef → Any
-
final
def
##(): Int
- Definition Classes
- AnyRef → Any
-
final
def
==(arg0: Any): Boolean
- Definition Classes
- AnyRef → Any
- def adaptiveExecutionEnabled: Boolean
- def adaptiveExecutionLogLevel: String
- def addSingleFileInAddFile: Boolean
- def advancedPartitionPredicatePushdownEnabled: Boolean
- def allowNegativeScaleOfDecimalEnabled: Boolean
-
def
analyzerMaxIterations: Int
************************ Spark SQL Params/Hints *******************
- def ansiEnabled: Boolean
- def arrowMaxRecordsPerBatch: Int
- def arrowPySparkEnabled: Boolean
- def arrowPySparkFallbackEnabled: Boolean
- def arrowSafeTypeConversion: Boolean
- def arrowSparkREnabled: Boolean
-
final
def
asInstanceOf[T0]: T0
- Definition Classes
- Any
- def autoBroadcastJoinThreshold: Long
- def autoSizeUpdateEnabled: Boolean
- def avroCompressionCodec: String
- def avroDeflateLevel: Int
- def broadcastTimeout: Long
- def bucketingEnabled: Boolean
- def bucketingMaxBuckets: Int
- def cacheVectorizedReaderEnabled: Boolean
- def cartesianProductExecBufferInMemoryThreshold: Int
- def cartesianProductExecBufferSpillThreshold: Int
- def caseSensitiveAnalysis: Boolean
- def caseSensitiveInferenceMode: SQLConf.HiveCaseSensitiveInferenceMode.Value
- def castDatetimeToString: Boolean
- def cboEnabled: Boolean
- def checkpointLocation: Option[String]
- def clear(): Unit
-
def
clone(): SQLConf
- Definition Classes
- SQLConf → AnyRef
- def coalesceShufflePartitionsEnabled: Boolean
- def codegenCacheMaxEntries: Int
- def codegenComments: Boolean
- def codegenFallback: Boolean
- def codegenSplitAggregateFunc: Boolean
- def columnBatchSize: Int
- def columnNameOfCorruptRecord: String
- def concatBinaryAsString: Boolean
- def constraintPropagationEnabled: Boolean
-
def
contains(key: String): Boolean
Return whether a given key is set in this SQLConf.
- def continuousStreamingEpochBacklogQueueSize: Int
- def continuousStreamingExecutorPollIntervalMs: Long
- def continuousStreamingExecutorQueueSize: Int
- def convertCTAS: Boolean
- def copy(entries: (ConfigEntry[_], Any)*): SQLConf
- def crossJoinEnabled: Boolean
- def csvColumnPruning: Boolean
- def csvFilterPushDown: Boolean
- def dataFramePivotMaxValues: Int
- def dataFrameRetainGroupColumns: Boolean
- def dataFrameSelfJoinAutoResolveAmbiguity: Boolean
- def datetimeJava8ApiEnabled: Boolean
- def decimalOperationsAllowPrecisionLoss: Boolean
- def defaultDataSourceName: String
- def defaultNumShufflePartitions: Int
- def defaultSizeInBytes: Long
- def disabledV2StreamingMicroBatchReaders: String
- def disabledV2StreamingWriters: String
- def dynamicPartitionPruningEnabled: Boolean
- def dynamicPartitionPruningFallbackFilterRatio: Double
- def dynamicPartitionPruningReuseBroadcastOnly: Boolean
- def dynamicPartitionPruningUseStats: Boolean
- def eltOutputAsString: Boolean
- def enableRadixSort: Boolean
- def enableTwoLevelAggMap: Boolean
- def enableVectorizedHashMap: Boolean
-
final
def
eq(arg0: AnyRef): Boolean
- Definition Classes
- AnyRef
-
def
equals(arg0: Any): Boolean
- Definition Classes
- AnyRef → Any
- def escapedStringLiterals: Boolean
- def exchangeReuseEnabled: Boolean
- def exponentLiteralAsDecimalEnabled: Boolean
- def fallBackToHdfsForStatsEnabled: Boolean
- def fastHashAggregateRowMaxCapacityBit: Int
- def fetchShuffleBlocksInBatch: Boolean
- def fileCommitProtocolClass: String
- def fileCompressionFactor: Double
- def fileSinkLogCleanupDelay: Long
- def fileSinkLogCompactInterval: Int
- def fileSinkLogDeletion: Boolean
- def fileSourceLogCleanupDelay: Long
- def fileSourceLogCompactInterval: Int
- def fileSourceLogDeletion: Boolean
- def filesMaxPartitionBytes: Long
- def filesOpenCostInBytes: Long
- def filesourcePartitionFileCacheSize: Long
-
def
finalize(): Unit
- Attributes
- protected[lang]
- Definition Classes
- AnyRef
- Annotations
- @throws( classOf[java.lang.Throwable] )
- def gatherFastStats: Boolean
-
def
getAllConfs: Map[String, String]
Return all the configuration properties that have been set (i.e.
Return all the configuration properties that have been set (i.e. not the default). This creates a new copy of the config properties in the form of a Map.
-
def
getAllDefinedConfs: Seq[(String, String, String, String)]
Return all the configuration definitions that have been defined in SQLConf.
Return all the configuration definitions that have been defined in SQLConf. Each definition contains key, defaultValue and doc.
-
final
def
getClass(): Class[_]
- Definition Classes
- AnyRef → Any
- Annotations
- @native()
-
def
getConf[T](entry: OptionalConfigEntry[T]): Option[T]
Return the value of an optional Spark SQL configuration property for the given key.
Return the value of an optional Spark SQL configuration property for the given key. If the key is not set yet, returns None.
-
def
getConf[T](entry: ConfigEntry[T]): T
Return the value of Spark SQL configuration property for the given key.
Return the value of Spark SQL configuration property for the given key. If the key is not set yet, return
defaultValuein ConfigEntry. -
def
getConf[T](entry: ConfigEntry[T], defaultValue: T): T
Return the value of Spark SQL configuration property for the given key.
Return the value of Spark SQL configuration property for the given key. If the key is not set yet, return
defaultValue. This is useful whendefaultValuein ConfigEntry is not the desired one. -
def
getConfString(key: String, defaultValue: String): String
Return the
stringvalue of Spark SQL configuration property for the given key.Return the
stringvalue of Spark SQL configuration property for the given key. If the key is not set yet, returndefaultValue. -
def
getConfString(key: String): String
Return the value of Spark SQL configuration property for the given key.
Return the value of Spark SQL configuration property for the given key.
- Annotations
- @throws( "if key is not set" )
- def groupByAliases: Boolean
- def groupByOrdinal: Boolean
-
def
hashCode(): Int
- Definition Classes
- AnyRef → Any
- Annotations
- @native()
-
def
hintErrorHandler: HintErrorHandler
Returns the error handler for handling hint errors.
- def histogramEnabled: Boolean
- def histogramNumBins: Int
- def hiveThriftServerSingleSession: Boolean
- def hugeMethodLimit: Int
- def ignoreCorruptFiles: Boolean
- def ignoreDataLocality: Boolean
- def ignoreMissingFiles: Boolean
- def inMemoryPartitionPruning: Boolean
- def inMemoryTableScanStatisticsEnabled: Boolean
-
def
initializeLogIfNecessary(isInterpreter: Boolean, silent: Boolean): Boolean
- Attributes
- protected
- Definition Classes
- Logging
-
def
initializeLogIfNecessary(isInterpreter: Boolean): Unit
- Attributes
- protected
- Definition Classes
- Logging
-
final
def
isInstanceOf[T0]: Boolean
- Definition Classes
- Any
- def isModifiable(key: String): Boolean
- def isOrcSchemaMergingEnabled: Boolean
- def isParquetBinaryAsString: Boolean
- def isParquetINT96AsTimestamp: Boolean
- def isParquetINT96TimestampConversion: Boolean
- def isParquetSchemaMergingEnabled: Boolean
- def isParquetSchemaRespectSummaries: Boolean
- def isReplEagerEvalEnabled: Boolean
-
def
isTraceEnabled(): Boolean
- Attributes
- protected
- Definition Classes
- Logging
- def isUnsupportedOperationCheckEnabled: Boolean
- def joinReorderCardWeight: Double
- def joinReorderDPStarFilter: Boolean
- def joinReorderDPThreshold: Int
- def joinReorderEnabled: Boolean
- def jsonGeneratorIgnoreNullFields: Boolean
- def legacyMsSqlServerNumericMappingEnabled: Boolean
- def legacySizeOfNull: Boolean
- def legacyTimeParserPolicy: SQLConf.LegacyBehaviorPolicy.Value
- def limitScaleUpFactor: Int
- def literalPickMinimumPrecision: Boolean
-
def
log: Logger
- Attributes
- protected
- Definition Classes
- Logging
-
def
logDebug(msg: ⇒ String, throwable: Throwable): Unit
- Attributes
- protected
- Definition Classes
- Logging
-
def
logDebug(msg: ⇒ String): Unit
- Attributes
- protected
- Definition Classes
- Logging
-
def
logError(msg: ⇒ String, throwable: Throwable): Unit
- Attributes
- protected
- Definition Classes
- Logging
-
def
logError(msg: ⇒ String): Unit
- Attributes
- protected
- Definition Classes
- Logging
-
def
logInfo(msg: ⇒ String, throwable: Throwable): Unit
- Attributes
- protected
- Definition Classes
- Logging
-
def
logInfo(msg: ⇒ String): Unit
- Attributes
- protected
- Definition Classes
- Logging
-
def
logName: String
- Attributes
- protected
- Definition Classes
- Logging
-
def
logTrace(msg: ⇒ String, throwable: Throwable): Unit
- Attributes
- protected
- Definition Classes
- Logging
-
def
logTrace(msg: ⇒ String): Unit
- Attributes
- protected
- Definition Classes
- Logging
-
def
logWarning(msg: ⇒ String, throwable: Throwable): Unit
- Attributes
- protected
- Definition Classes
- Logging
-
def
logWarning(msg: ⇒ String): Unit
- Attributes
- protected
- Definition Classes
- Logging
- def loggingMaxLinesForCodegen: Int
- def manageFilesourcePartitions: Boolean
- def maxBatchesToRetainInMemory: Int
- def maxNestedViewDepth: Int
- def maxPlanStringLength: Int
- def maxRecordsPerFile: Long
- def maxToStringFields: Int
- def metastorePartitionPruning: Boolean
- def methodSplitThreshold: Int
- def minBatchesToRetain: Int
- def nameNonStructGroupingKeyAsValue: Boolean
- def ndvMaxError: Double
-
final
def
ne(arg0: AnyRef): Boolean
- Definition Classes
- AnyRef
- def nestedPruningOnExpressions: Boolean
- def nestedSchemaPruningEnabled: Boolean
- def nonEmptyPartitionRatioForBroadcastJoin: Double
-
final
def
notify(): Unit
- Definition Classes
- AnyRef
- Annotations
- @native()
-
final
def
notifyAll(): Unit
- Definition Classes
- AnyRef
- Annotations
- @native()
- def numShufflePartitions: Int
- def objectAggSortBasedFallbackThreshold: Int
- def offHeapColumnVectorEnabled: Boolean
- def optimizerExcludedRules: Option[String]
- def optimizerInSetConversionThreshold: Int
- def optimizerInSetSwitchThreshold: Int
- def optimizerMaxIterations: Int
- def optimizerMetadataOnly: Boolean
- def optimizerPlanChangeBatches: Option[String]
- def optimizerPlanChangeLogLevel: String
- def optimizerPlanChangeRules: Option[String]
- def orcCompressionCodec: String
- def orcFilterPushDown: Boolean
- def orcVectorizedReaderBatchSize: Int
- def orcVectorizedReaderEnabled: Boolean
- def orderByOrdinal: Boolean
- def pandasGroupedMapAssignColumnsByName: Boolean
- def pandasUDFBufferSize: Int
- def parallelFileListingInStatsComputation: Boolean
- def parallelPartitionDiscoveryParallelism: Int
- def parallelPartitionDiscoveryThreshold: Int
- def parquetCompressionCodec: String
- def parquetFilterPushDown: Boolean
- def parquetFilterPushDownDate: Boolean
- def parquetFilterPushDownDecimal: Boolean
- def parquetFilterPushDownInFilterThreshold: Int
- def parquetFilterPushDownStringStartWith: Boolean
- def parquetFilterPushDownTimestamp: Boolean
- def parquetOutputCommitterClass: String
- def parquetOutputTimestampType: SQLConf.ParquetOutputTimestampType.Value
- def parquetRecordFilterEnabled: Boolean
- def parquetVectorizedReaderBatchSize: Int
- def parquetVectorizedReaderEnabled: Boolean
- def partitionColumnTypeInferenceEnabled: Boolean
- def partitionOverwriteMode: SQLConf.PartitionOverwriteMode.Value
- def percentileAccuracy: Int
- def planStatsEnabled: Boolean
- def preferSortMergeJoin: Boolean
- def pysparkJVMStacktraceEnabled: Boolean
- def rangeExchangeSampleSizePerPartition: Int
-
val
reader: ConfigReader
- Attributes
- protected
-
def
redactOptions[K, V](options: Map[K, V]): Map[K, V]
Redacts the given option map according to the description of SQL_OPTIONS_REDACTION_PATTERN.
- def replEagerEvalMaxNumRows: Int
- def replEagerEvalTruncate: Int
- def replaceDatabricksSparkAvroEnabled: Boolean
- def replaceExceptWithFilter: Boolean
-
def
resolver: Resolver
Returns the Resolver for the current configuration, which can be used to determine if two identifiers are equal.
- def runSQLonFile: Boolean
- def serializerNestedSchemaPruningEnabled: Boolean
- def sessionLocalTimeZone: String
- def setCommandRejectsSparkCoreConfs: Boolean
-
def
setConf[T](entry: ConfigEntry[T], value: T): Unit
Set the given Spark SQL configuration property.
-
def
setConf(props: Properties): Unit
Set Spark SQL configuration properties.
-
def
setConfString(key: String, value: String): Unit
Set the given Spark SQL configuration property using a
stringvalue. -
def
setConfWithCheck(key: String, value: String): Unit
- Attributes
- protected
- def setOpsPrecedenceEnforced: Boolean
-
val
settings: Map[String, String]
Only low degree of contention is expected for conf, thus NOT using ConcurrentHashMap.
Only low degree of contention is expected for conf, thus NOT using ConcurrentHashMap.
- Attributes
- protected[spark]
- def sortBeforeRepartition: Boolean
- def sortMergeJoinExecBufferInMemoryThreshold: Int
- def sortMergeJoinExecBufferSpillThreshold: Int
- def starSchemaDetection: Boolean
- def starSchemaFTRatio: Double
- def stateStoreMinDeltasForSnapshot: Int
- def stateStoreProviderClass: String
- def storeAssignmentPolicy: SQLConf.StoreAssignmentPolicy.Value
- def streamingFileCommitProtocolClass: String
- def streamingMetricsEnabled: Boolean
- def streamingNoDataMicroBatchesEnabled: Boolean
- def streamingNoDataProgressEventInterval: Long
- def streamingPollingDelay: Long
- def streamingProgressRetention: Int
- def streamingSchemaInference: Boolean
- def stringRedactionPattern: Option[Regex]
- def subexpressionEliminationEnabled: Boolean
- def subqueryReuseEnabled: Boolean
- def supportQuotedRegexColumnName: Boolean
-
final
def
synchronized[T0](arg0: ⇒ T0): T0
- Definition Classes
- AnyRef
- def tableRelationCacheSize: Int
-
def
toString(): String
- Definition Classes
- AnyRef → Any
- def topKSortFallbackThreshold: Int
- def truncateTableIgnorePermissionAcl: Boolean
- def unsetConf(entry: ConfigEntry[_]): Unit
- def unsetConf(key: String): Unit
- def useCompression: Boolean
- def useObjectHashAggregation: Boolean
- def validatePartitionColumns: Boolean
- def variableSubstituteEnabled: Boolean
- def verifyPartitionPath: Boolean
-
final
def
wait(): Unit
- Definition Classes
- AnyRef
- Annotations
- @throws( ... )
-
final
def
wait(arg0: Long, arg1: Int): Unit
- Definition Classes
- AnyRef
- Annotations
- @throws( ... )
-
final
def
wait(arg0: Long): Unit
- Definition Classes
- AnyRef
- Annotations
- @throws( ... ) @native()
- def warehousePath: String
- def wholeStageEnabled: Boolean
- def wholeStageMaxNumFields: Int
- def wholeStageSplitConsumeFuncByOperator: Boolean
- def wholeStageUseIdInClassName: Boolean
- def windowExecBufferInMemoryThreshold: Int
- def windowExecBufferSpillThreshold: Int
- def writeLegacyParquetFormat: Boolean