Skip to content

Commit

Permalink
[SPARK-3131][SQL] compression codec config property name and default …
Browse files Browse the repository at this point in the history
…codec set to snappy
  • Loading branch information
chutium committed Aug 26, 2014
1 parent 21235dc commit e578e21
Showing 1 changed file with 2 additions and 2 deletions.
4 changes: 2 additions & 2 deletions sql/core/src/main/scala/org/apache/spark/sql/SQLConf.scala
Original file line number Diff line number Diff line change
Expand Up @@ -33,7 +33,7 @@ private[spark] object SQLConf {
val DIALECT = "spark.sql.dialect"
val PARQUET_BINARY_AS_STRING = "spark.sql.parquet.binaryAsString"
val PARQUET_CACHE_METADATA = "spark.sql.parquet.cacheMetadata"
val PARQUET_COMPRESSION = "spark.sql.parquet.compression"
val PARQUET_COMPRESSION = "spark.sql.parquet.compression.codec"

// This is only used for the thriftserver
val THRIFTSERVER_POOL = "spark.sql.thriftserver.scheduler.pool"
Expand Down Expand Up @@ -79,7 +79,7 @@ trait SQLConf {
private[spark] def useCompression: Boolean = getConf(COMPRESS_CACHED, "false").toBoolean

/** The compression codec for writing to a Parquetfile */
private[spark] def parquetCompressionCodec: String = getConf(PARQUET_COMPRESSION, "gzip")
private[spark] def parquetCompressionCodec: String = getConf(PARQUET_COMPRESSION, "snappy")

/** The number of rows that will be */
private[spark] def columnBatchSize: Int = getConf(COLUMN_BATCH_SIZE, "1000").toInt
Expand Down

0 comments on commit e578e21

Please sign in to comment.