diff --git a/sql/core/src/main/scala/org/apache/spark/sql/sources/DataSourceStrategy.scala b/sql/core/src/main/scala/org/apache/spark/sql/sources/DataSourceStrategy.scala index 76ee4b5d7d897..47f265b8db4a0 100644 --- a/sql/core/src/main/scala/org/apache/spark/sql/sources/DataSourceStrategy.scala +++ b/sql/core/src/main/scala/org/apache/spark/sql/sources/DataSourceStrategy.scala @@ -90,7 +90,7 @@ private[sql] object DataSourceStrategy extends Strategy with Logging { case PhysicalOperation(projectList, filters, l @ LogicalRelation(t: FSBasedRelation)) => val inputPaths = t.paths.map(new Path(_)).flatMap { path => val fs = path.getFileSystem(t.sqlContext.sparkContext.hadoopConfiguration) - val qualifiedPath = fs.makeQualified(path) + val qualifiedPath = path.makeQualified(fs.getUri, fs.getWorkingDirectory) SparkHadoopUtil.get.listLeafStatuses(fs, qualifiedPath).map(_.getPath).filterNot { path => val name = path.getName name.startsWith("_") || name.startsWith(".") diff --git a/sql/core/src/main/scala/org/apache/spark/sql/sources/commands.scala b/sql/core/src/main/scala/org/apache/spark/sql/sources/commands.scala index e25b818ceaf53..784483b57faea 100644 --- a/sql/core/src/main/scala/org/apache/spark/sql/sources/commands.scala +++ b/sql/core/src/main/scala/org/apache/spark/sql/sources/commands.scala @@ -71,7 +71,7 @@ private[sql] case class InsertIntoFSBasedRelation( val hadoopConf = sqlContext.sparkContext.hadoopConfiguration val outputPath = new Path(relation.paths.head) val fs = outputPath.getFileSystem(hadoopConf) - val qualifiedOutputPath = fs.makeQualified(outputPath) + val qualifiedOutputPath = outputPath.makeQualified(fs.getUri, fs.getWorkingDirectory) val doInsertion = (mode, fs.exists(qualifiedOutputPath)) match { case (SaveMode.ErrorIfExists, true) => diff --git a/sql/core/src/main/scala/org/apache/spark/sql/sources/interfaces.scala b/sql/core/src/main/scala/org/apache/spark/sql/sources/interfaces.scala index ecf5ad78e1f6b..7d5a87f708f11 100644 --- a/sql/core/src/main/scala/org/apache/spark/sql/sources/interfaces.scala +++ b/sql/core/src/main/scala/org/apache/spark/sql/sources/interfaces.scala @@ -301,7 +301,7 @@ abstract class OutputWriter { * * @constructor This constructor is for internal uses only. The [[PartitionSpec]] argument is for * implementing metastore table conversion. - * @param paths Base paths of this relation. For partitioned relations, it should be either root + * @param paths Base paths of this relation. For partitioned relations, it should be the root * directories of all partition directories. * @param maybePartitionSpec An [[FSBasedRelation]] can be created with an optional * [[PartitionSpec]], so that partition discovery can be skipped. @@ -371,8 +371,8 @@ abstract class FSBasedRelation private[sql]( } /** - * Schema of this relation. It consists of [[dataSchema]] and all partition columns not appeared - * in [[dataSchema]]. + * Schema of this relation. It consists of columns appearing in [[dataSchema]] and all partition + * columns not appearing in [[dataSchema]]. */ override lazy val schema: StructType = { val dataSchemaColumnNames = dataSchema.map(_.name.toLowerCase).toSet