-
Notifications
You must be signed in to change notification settings - Fork 28.2k
New issue
Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.
By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.
Already on GitHub? Sign in to your account
[SPARK-13568] [ML] Create feature transformer to impute missing values #11601
Changes from 10 commits
2999b26
8335cf2
7be5e9b
131f7d5
a72a3ea
78df589
b949be5
79b1c62
c3d5d55
1b39668
7f87ffb
4e45f81
e1dd0d2
12220eb
1b36deb
72d104d
c311b2e
d6b9421
d181b12
e211481
791533b
fdd6f94
8042cfb
4bdf595
e6ad69c
1718422
594c501
3043e7d
b3633e8
053d489
63e7032
4e1c34a
051aec6
aef094b
335ded7
949ed79
93bba63
cca8dd4
eea8947
4e07431
31556e6
d4f92e4
544a65c
910685e
91d4cee
8744524
ca45c33
e86d919
4f17c54
ce59a5b
41d91b9
9f6bd57
e378db5
c67afc1
File filter
Filter by extension
Conversations
Jump to
Diff view
Diff view
There are no files selected for viewing
Original file line number | Diff line number | Diff line change |
---|---|---|
@@ -0,0 +1,288 @@ | ||
/* | ||
* Licensed to the Apache Software Foundation (ASF) under one or more | ||
* contributor license agreements. See the NOTICE file distributed with | ||
* this work for additional information regarding copyright ownership. | ||
* The ASF licenses this file to You under the Apache License, Version 2.0 | ||
* (the "License"); you may not use this file except in compliance with | ||
* the License. You may obtain a copy of the License at | ||
* | ||
* http://www.apache.org/licenses/LICENSE-2.0 | ||
* | ||
* Unless required by applicable law or agreed to in writing, software | ||
* distributed under the License is distributed on an "AS IS" BASIS, | ||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. | ||
* See the License for the specific language governing permissions and | ||
* limitations under the License. | ||
*/ | ||
|
||
package org.apache.spark.ml.feature | ||
|
||
import org.apache.hadoop.fs.Path | ||
|
||
import org.apache.spark.annotation.{Experimental, Since} | ||
import org.apache.spark.ml.{Estimator, Model} | ||
import org.apache.spark.ml.param.{DoubleParam, Param, ParamMap, Params} | ||
import org.apache.spark.ml.param.shared.{HasInputCol, HasOutputCol} | ||
import org.apache.spark.ml.util._ | ||
import org.apache.spark.mllib.linalg._ | ||
import org.apache.spark.sql.{DataFrame, Row} | ||
import org.apache.spark.sql.functions.{col, udf} | ||
import org.apache.spark.sql.types.{DoubleType, StructField, StructType} | ||
|
||
/** | ||
* Params for [[Imputer]] and [[ImputerModel]]. | ||
*/ | ||
private[feature] trait ImputerParams extends Params with HasInputCol with HasOutputCol { | ||
|
||
/** | ||
* The imputation strategy. | ||
* If "mean", then replace missing values using the mean along the axis. | ||
* If "median", then replace missing values using the median along the axis. | ||
* If "most", then replace missing using the most frequent value along the axis. | ||
* Default: mean | ||
* | ||
* @group param | ||
*/ | ||
val strategy: Param[String] = new Param(this, "strategy", "strategy for imputation. " + | ||
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. Make all Param vals final |
||
"If mean, then replace missing values using the mean along the axis." + | ||
"If median, then replace missing values using the median along the axis." + | ||
"If most, then replace missing using the most frequent value along the axis.") | ||
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. Could you add a param validation function since there are a limited number of valid strategies? You can add an attribute like There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. I added the validation to validateParameter. (which should be moved since it's the deprecated). Thanks for the suggestion. I'll add them. |
||
|
||
/** @group getParam */ | ||
def getStrategy: String = $(strategy) | ||
|
||
/** | ||
* The placeholder for the missing values. All occurrences of missingvalues will be imputed. | ||
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. Space missing from "missingvalues" There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. This should be "missingValue". I'll change it. |
||
* Default: Double.NaN | ||
* | ||
* @group param | ||
*/ | ||
val missingValue: DoubleParam = new DoubleParam(this, "missingValue", | ||
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. What about null values? Should we treat all null values as missing? I could imagine cases in which people want to handle both NaN and null. There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. Yes. added support for null values. |
||
"The placeholder for the missing values. All occurrences of missingvalues will be imputed") | ||
|
||
/** @group getParam */ | ||
def getMissingValue: Double = $(missingValue) | ||
|
||
/** Validates and transforms the input schema. */ | ||
protected def validateAndTransformSchema(schema: StructType): StructType = { | ||
validateParams() | ||
val inputType = schema($(inputCol)).dataType | ||
require(inputType.isInstanceOf[VectorUDT] || inputType.isInstanceOf[DoubleType], | ||
s"Input column ${$(inputCol)} must of type Vector or Double") | ||
require(!schema.fieldNames.contains($(outputCol)), | ||
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. This is already checked in appendColumn |
||
s"Output column ${$(outputCol)} already exists.") | ||
val outputFields = schema.fields :+ StructField($(outputCol), new VectorUDT, false) | ||
StructType(outputFields) | ||
} | ||
|
||
override def validateParams(): Unit = { | ||
require(Seq("mean", "median", "most").contains($(strategy)), | ||
s"${$(strategy)} is not supported. Options are mean, median and most") | ||
} | ||
} | ||
|
||
/** | ||
* :: Experimental :: | ||
* Imputation estimator for completing missing values, either using the mean, the median or | ||
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. Document the accepted input schema |
||
* the most frequent value of the column in which the missing values are located. This class | ||
* also allows for different missing values encodings. | ||
* | ||
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. Can we document that we only support "Float" and "Double" types for now? There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. Cool. Thanks. |
||
*/ | ||
@Experimental | ||
class Imputer @Since("2.0.0")(override val uid: String) | ||
extends Estimator[ImputerModel] with ImputerParams with DefaultParamsWritable { | ||
|
||
@Since("2.0.0") | ||
def this() = this(Identifiable.randomUID("imputer")) | ||
|
||
/** @group setParam */ | ||
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. Shall we add Since annotations for the setters? There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. I've heard an argument that everything in the class is implicitly since 2.1.0 since the class itself is - unless otherwise stated. Which does make sense. But I do slightly favour being explicit about it (even if it is a bit pedantic) so yeah let's add the annotation to all the setters. |
||
def setInputCol(value: String): this.type = set(inputCol, value) | ||
|
||
/** @group setParam */ | ||
def setOutputCol(value: String): this.type = set(outputCol, value) | ||
|
||
/** @group setParam */ | ||
def setStrategy(value: String): this.type = set(strategy, value) | ||
|
||
/** @group setParam */ | ||
def setMissingValue(value: Double): this.type = set(missingValue, value) | ||
|
||
setDefault(strategy -> "mean", missingValue -> Double.NaN) | ||
|
||
override def fit(dataset: DataFrame): ImputerModel = { | ||
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. +1 for using existing implementations in DataFrame or spark.mllib.stats. I don't think we need to implement anything new here, right? There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. For single numerical column, we can indeed use existing stat functions for computing mean and median (after filtering the missing values), i.e. If we decide to support vector columns, then we need:
|
||
val alternate = dataset.select($(inputCol)).schema.fields(0).dataType match { | ||
case DoubleType => | ||
val colStatistics = getColStatistics(dataset, $(inputCol)) | ||
Vectors.dense(Array(colStatistics)) | ||
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. Perhaps |
||
case _: VectorUDT => | ||
val vl = dataset.first().getAs[Vector]($(inputCol)).size | ||
val statisticsArray = new Array[Double](vl) | ||
(0 until vl).foreach(i => { | ||
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. This could be a big performance issue with large vectors, as we could be running 100s (or millions!) of SQL queries sequentially... For vectors I favour the There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. I see. I'll run some performance benchmark to compare the solutions and get back to you. @MLnick Thanks for the review. |
||
val getI = udf((v: Vector) => v(i)) | ||
val tempColName = $(inputCol) + i | ||
val tempData = dataset.where(s"${$(inputCol)} is not null") | ||
.select($(inputCol)).withColumn(tempColName, getI(col($(inputCol)))) | ||
statisticsArray(i) = getColStatistics(tempData, tempColName) | ||
}) | ||
Vectors.dense(statisticsArray) | ||
} | ||
copyValues(new ImputerModel(uid, alternate).setParent(this)) | ||
} | ||
|
||
private def getColStatistics(dataset: DataFrame, colName: String): Double = { | ||
val missValue = $(missingValue) match { | ||
case Double.NaN => "NaN" | ||
case _ => $(missingValue).toString | ||
} | ||
val filteredDF = dataset.select(colName).where(s"$colName != '$missValue'") | ||
val colStatistics = $(strategy) match { | ||
case "mean" => | ||
filteredDF.selectExpr(s"avg($colName)").first().getDouble(0) | ||
case "median" => | ||
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. I think we should favour using the new |
||
// TODO: optimize the sort with quick-select or Percentile(Hive) if required | ||
val rddDouble = filteredDF.rdd.map(_.getDouble(0)) | ||
rddDouble.sortBy(d => d).zipWithIndex().map { | ||
case (v, idx) => (idx, v) | ||
}.lookup(rddDouble.count()/2).head | ||
case "most" => | ||
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. I'm a little worried about performance here - on huge columns of Will think about it a bit more. |
||
val input = filteredDF.rdd.map(_.getDouble(0)) | ||
val most = input.map(d => (d, 1)).reduceByKey(_ + _).sortBy(-_._2).first()._1 | ||
most | ||
} | ||
colStatistics | ||
} | ||
|
||
override def transformSchema(schema: StructType): StructType = { | ||
validateAndTransformSchema(schema) | ||
} | ||
|
||
override def copy(extra: ParamMap): Imputer = { | ||
val copied = new Imputer(uid) | ||
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. Use |
||
copyValues(copied, extra) | ||
} | ||
} | ||
|
||
@Since("1.6.0") | ||
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. Update Since versions |
||
object Imputer extends DefaultParamsReadable[Imputer] { | ||
|
||
@Since("1.6.0") | ||
override def load(path: String): Imputer = super.load(path) | ||
} | ||
|
||
/** | ||
* :: Experimental :: | ||
* Model fitted by [[Imputer]]. | ||
* | ||
* @param alternate statistics value for each original column during fitting | ||
*/ | ||
@Experimental | ||
class ImputerModel private[ml] ( | ||
override val uid: String, | ||
val alternate: Vector) | ||
extends Model[ImputerModel] with ImputerParams with MLWritable { | ||
|
||
import ImputerModel._ | ||
|
||
/** @group setParam */ | ||
def setInputCol(value: String): this.type = set(inputCol, value) | ||
|
||
/** @group setParam */ | ||
def setOutputCol(value: String): this.type = set(outputCol, value) | ||
|
||
private def matchMissingValue(value: Double): Boolean = { | ||
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. IMO There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. Agree |
||
val miss = $(missingValue) | ||
value == miss || (value.isNaN && miss.isNaN) | ||
} | ||
|
||
override def transform(dataset: DataFrame): DataFrame = { | ||
dataset.select($(inputCol)).schema.fields(0).dataType match { | ||
case DoubleType => | ||
val impute = udf { (d: Double) => | ||
if (matchMissingValue(d)) alternate(0) else d | ||
} | ||
dataset.withColumn($(outputCol), impute(col($(inputCol)))) | ||
case _: VectorUDT => | ||
val impute = udf { (vector: Vector) => | ||
if (vector == null) { | ||
alternate | ||
} | ||
else { | ||
val vCopy = vector.copy | ||
vCopy match { | ||
case d: DenseVector => | ||
var iter = 0 | ||
while(iter < d.size) { | ||
if (matchMissingValue(vCopy(iter))) { | ||
d.values(iter) = alternate(iter) | ||
} | ||
|
||
iter += 1 | ||
} | ||
case s: SparseVector => | ||
var iter = 0 | ||
while(iter < s.values.length) { | ||
if (matchMissingValue(s.values(iter))) { | ||
s.values(iter) = alternate(s.indices(iter)) | ||
} | ||
iter += 1 | ||
} | ||
} | ||
vCopy | ||
} | ||
} | ||
dataset.withColumn($(outputCol), impute(col($(inputCol)))) | ||
} | ||
} | ||
|
||
override def transformSchema(schema: StructType): StructType = { | ||
validateAndTransformSchema(schema) | ||
} | ||
|
||
override def copy(extra: ParamMap): ImputerModel = { | ||
val copied = new ImputerModel(uid, alternate) | ||
copyValues(copied, extra).setParent(parent) | ||
} | ||
|
||
@Since("2.0.0") | ||
override def write: MLWriter = new ImputerModelWriter(this) | ||
} | ||
|
||
|
||
@Since("2.0.0") | ||
object ImputerModel extends MLReadable[ImputerModel] { | ||
|
||
private[ImputerModel] | ||
class ImputerModelWriter(instance: ImputerModel) extends MLWriter { | ||
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. minor: this fits on one line. |
||
|
||
private case class Data(alternate: Vector) | ||
|
||
override protected def saveImpl(path: String): Unit = { | ||
DefaultParamsWriter.saveMetadata(instance, path, sc) | ||
val data = new Data(instance.alternate) | ||
val dataPath = new Path(path, "data").toString | ||
sqlContext.createDataFrame(Seq(data)).repartition(1).write.parquet(dataPath) | ||
} | ||
} | ||
|
||
private class ImputerReader extends MLReader[ImputerModel] { | ||
|
||
private val className = classOf[ImputerModel].getName | ||
|
||
override def load(path: String): ImputerModel = { | ||
val metadata = DefaultParamsReader.loadMetadata(path, sc, className) | ||
val dataPath = new Path(path, "data").toString | ||
val Row(alternate: Vector) = sqlContext.read.parquet(dataPath) | ||
.select("alternate") | ||
.head() | ||
val model = new ImputerModel(metadata.uid, alternate) | ||
DefaultParamsReader.getAndSetParams(model, metadata) | ||
model | ||
} | ||
} | ||
|
||
@Since("2.0.0") | ||
override def read: MLReader[ImputerModel] = new ImputerReader | ||
|
||
@Since("2.0.0") | ||
override def load(path: String): ImputerModel = super.load(path) | ||
} |
Original file line number | Diff line number | Diff line change |
---|---|---|
@@ -0,0 +1,105 @@ | ||
/* | ||
* Licensed to the Apache Software Foundation (ASF) under one or more | ||
* contributor license agreements. See the NOTICE file distributed with | ||
* this work for additional information regarding copyright ownership. | ||
* The ASF licenses this file to You under the Apache License, Version 2.0 | ||
* (the "License"); you may not use this file except in compliance with | ||
* the License. You may obtain a copy of the License at | ||
* | ||
* http://www.apache.org/licenses/LICENSE-2.0 | ||
* | ||
* Unless required by applicable law or agreed to in writing, software | ||
* distributed under the License is distributed on an "AS IS" BASIS, | ||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. | ||
* See the License for the specific language governing permissions and | ||
* limitations under the License. | ||
*/ | ||
package org.apache.spark.ml.feature | ||
|
||
import org.apache.spark.SparkFunSuite | ||
import org.apache.spark.ml.util.{DefaultReadWriteTest, MLTestingUtils} | ||
import org.apache.spark.mllib.linalg.{Vector, Vectors} | ||
import org.apache.spark.mllib.util.MLlibTestSparkContext | ||
import org.apache.spark.mllib.util.TestingUtils._ | ||
import org.apache.spark.sql.Row | ||
|
||
class ImputerSuite extends SparkFunSuite with MLlibTestSparkContext with DefaultReadWriteTest { | ||
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. We need tests for multiple columns too |
||
|
||
test("Imputer for Double column") { | ||
val df = sqlContext.createDataFrame( Seq( | ||
(0, 1.0, 1.0, 1.0, 1.0), | ||
(1, 1.0, 1.0, 1.0, 1.0), | ||
(2, 3.0, 3.0, 3.0, 3.0), | ||
(3, 4.0, 4.0, 4.0, 4.0), | ||
(4, Double.NaN, 2.25, 3.0, 1.0 ) | ||
)).toDF("id", "value", "mean", "median", "most") | ||
Seq("mean", "median", "most").foreach { strategy => | ||
val imputer = new Imputer().setInputCol("value").setOutputCol("out").setStrategy(strategy) | ||
val model = imputer.fit(df) | ||
model.transform(df).select(strategy, "out").collect() | ||
.foreach { case Row(d1: Double, d2: Double) => | ||
assert(d1 ~== d2 absTol 1e-5, s"Imputer ut error: $d2 should be $d1") | ||
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. Could we make this error message more informative, like "Imputed values differ. Expected: $d1, actual: $d2" There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. sure, changed. |
||
} | ||
} | ||
} | ||
|
||
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. We need to add tests for the case where the entire column is There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. Good catch yes - obviously the imputer can't actually do anything useful in that case - but it should either throw a useful error, or return the dataset unchanged. I would favor an error in this case as if a user is explicitly wanting to impute missing data and all their data is missing, rather blow up now than later in the pipeline. There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. Yeah, actually this also fails if the entire input column is the missing value as well. We need to beef up the test suite :) |
||
test("Imputer for Double with missing Value -1.0") { | ||
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. I'd argue this test is not necessary because of the test right below, where we test that it works for integer missing values and leaves |
||
val df = sqlContext.createDataFrame( Seq( | ||
(0, 1.0, 1.0, 1.0, 1.0), | ||
(1, 1.0, 1.0, 1.0, 1.0), | ||
(2, 3.0, 3.0, 3.0, 3.0), | ||
(3, 4.0, 4.0, 4.0, 4.0), | ||
(4, -1.0, 2.25, 3.0, 1.0 ) | ||
)).toDF("id", "value", "mean", "median", "most") | ||
Seq("mean", "median", "most").foreach { strategy => | ||
val imputer = new Imputer().setInputCol("value").setOutputCol("out").setStrategy(strategy) | ||
.setMissingValue(-1.0) | ||
val model = imputer.fit(df) | ||
model.transform(df).select(strategy, "out").collect() | ||
.foreach { case Row(d1: Double, d2: Double) => | ||
assert(d1 ~== d2 absTol 1e-5, s"Imputer ut error: $d2 should be $d1") | ||
} | ||
} | ||
} | ||
|
||
test("Imputer for Vector column with NaN and null") { | ||
val df = sqlContext.createDataFrame( Seq( | ||
(0, Vectors.dense(1, 2), Vectors.dense(1, 2), Vectors.dense(1, 2), Vectors.dense(1, 2)), | ||
(1, Vectors.dense(1, 2), Vectors.dense(1, 2), Vectors.dense(1, 2), Vectors.dense(1, 2)), | ||
(2, Vectors.dense(3, 2), Vectors.dense(3, 2), Vectors.dense(3, 2), Vectors.dense(3, 2)), | ||
(3, Vectors.dense(4, 2), Vectors.dense(4, 2), Vectors.dense(4, 2), Vectors.dense(4, 2)), | ||
(4, Vectors.dense(Double.NaN, 2), Vectors.dense(2.25, 2), Vectors.dense(3.0, 2), | ||
Vectors.dense(1.0, 2)), | ||
(5, Vectors.sparse(2, Array(0, 1), Array(Double.NaN, 2.0)), Vectors.dense(2.25, 2), | ||
Vectors.dense(3.0, 2), Vectors.dense(1.0, 2)), | ||
(6, null.asInstanceOf[Vector], Vectors.dense(2.25, 2), Vectors.dense(3.0, 2), | ||
Vectors.dense(1.0, 2)) | ||
)).toDF("id", "value", "mean", "median", "most") | ||
Seq("mean", "median", "most").foreach { strategy => | ||
val imputer = new Imputer().setInputCol("value").setOutputCol("out").setStrategy(strategy) | ||
val model = imputer.fit(df) | ||
model.transform(df).select(strategy, "out").collect() | ||
.foreach { case Row(v1: Vector, v2: Vector) => | ||
assert(v1 == v2, s"$strategy Imputer ut error: $v2 should be $v1") | ||
} | ||
} | ||
} | ||
|
||
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. we should also have a test for a There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. added |
||
test("Imputer read/write") { | ||
val t = new Imputer() | ||
.setInputCol("myInputCol") | ||
.setOutputCol("myOutputCol") | ||
.setMissingValue(-1.0) | ||
testDefaultReadWrite(t) | ||
} | ||
|
||
test("ImputerModel read/write") { | ||
val instance = new ImputerModel( | ||
"myImputer", Vectors.dense(1.0, 10.0)) | ||
.setInputCol("myInputCol") | ||
.setOutputCol("myOutputCol") | ||
val newInstance = testDefaultReadWrite(instance) | ||
assert(newInstance.alternate === instance.alternate) | ||
} | ||
|
||
} |
There was a problem hiding this comment.
Choose a reason for hiding this comment
The reason will be displayed to describe this comment to others. Learn more.
indentation errors here and elsewhere