Skip to content

Commit

Permalink
Minor bug fix and more tests
Browse files Browse the repository at this point in the history
  • Loading branch information
liancheng committed May 11, 2015
1 parent 56f2543 commit 8574dd4
Show file tree
Hide file tree
Showing 3 changed files with 112 additions and 71 deletions.
Original file line number Diff line number Diff line change
Expand Up @@ -151,8 +151,8 @@ private[sql] class FSBasedParquetRelation(

this.paths.toSet == that.paths.toSet &&
schemaEquality &&
this.maybeDataSchema == that.maybeDataSchema
this.maybePartitionSpec == that.maybePartitionSpec
this.maybeDataSchema == that.maybeDataSchema &&
this.partitionColumns == that.partitionColumns

case _ => false
}
Expand Down Expand Up @@ -214,7 +214,7 @@ private[sql] class FSBasedParquetRelation(
}

ParquetOutputFormat.setWriteSupportClass(job, writeSupportClass)
RowWriteSupport.setSchema(dataSchema.asNullable.toAttributes, conf)
RowWriteSupport.setSchema(dataSchema.toAttributes, conf)

// Sets compression scheme
conf.set(
Expand Down Expand Up @@ -271,7 +271,7 @@ private[sql] class FSBasedParquetRelation(

// TODO Stop using `FilteringParquetRowInputFormat` and overriding `getPartition`.
// After upgrading to Parquet 1.6.0, we should be able to stop caching `FileStatus` objects and
// footers. Especially when a global arbitratve schema (either from metastore or data source
// footers. Especially when a global arbitrative schema (either from metastore or data source
// DDL) is available.
new NewHadoopRDD(
sqlContext.sparkContext,
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -337,7 +337,7 @@ private[sql] abstract class BaseWriterContainer(
}

def abortJob(): Unit = {
// outputCommitter.abortJob(jobContext, JobStatus.State.FAILED)
outputCommitter.abortJob(jobContext, JobStatus.State.FAILED)
logError(s"Job $jobId aborted.")
}
}
Expand Down
Loading

0 comments on commit 8574dd4

Please sign in to comment.