From 958e50d2827fe9a294ffac450d6b9de6d95e913e Mon Sep 17 00:00:00 2001 From: glorysdj Date: Mon, 22 Nov 2021 13:33:57 +0800 Subject: [PATCH] fix hive warehouse path --- .../org/apache/spark/examples/sql/hive/SparkHiveExample.scala | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/examples/src/main/scala/org/apache/spark/examples/sql/hive/SparkHiveExample.scala b/examples/src/main/scala/org/apache/spark/examples/sql/hive/SparkHiveExample.scala index 3ecf8772c00ff..c67a0dc33345d 100644 --- a/examples/src/main/scala/org/apache/spark/examples/sql/hive/SparkHiveExample.scala +++ b/examples/src/main/scala/org/apache/spark/examples/sql/hive/SparkHiveExample.scala @@ -144,7 +144,8 @@ object SparkHiveExample { spark.sqlContext.setConf("hive.exec.dynamic.partition", "true") spark.sqlContext.setConf("hive.exec.dynamic.partition.mode", "nonstrict") // Create a Hive partitioned table using DataFrame API - df.write.partitionBy("key").format("hive").saveAsTable("hive_part_tbl") + df.write.partitionBy("key").format("hive") + .mode(SaveMode.Overwrite).saveAsTable("hive_part_tbl") // Partitioned column `key` will be moved to the end of the schema. sql("SELECT * FROM hive_part_tbl").show() // +-------+---+