diff --git a/mllib/src/main/scala/org/apache/spark/ml/feature/DCT.scala b/mllib/src/main/scala/org/apache/spark/ml/feature/DCT.scala
index c7d6e0f5bcb61..a33a48365fa18 100644
--- a/mllib/src/main/scala/org/apache/spark/ml/feature/DCT.scala
+++ b/mllib/src/main/scala/org/apache/spark/ml/feature/DCT.scala
@@ -33,7 +33,7 @@ import org.apache.spark.sql.types.DataType
* such that the transform matrix is unitary (aka scaled DCT-II).
*
* More information on
- * Wikipedia.
+ * DCT-II (Wikipedia).
*/
@Since("1.5.0")
class DCT @Since("1.5.0") (@Since("1.5.0") override val uid: String)
diff --git a/mllib/src/main/scala/org/apache/spark/ml/feature/PolynomialExpansion.scala b/mllib/src/main/scala/org/apache/spark/ml/feature/PolynomialExpansion.scala
index eaed2ff298d57..4be17da3e9f76 100644
--- a/mllib/src/main/scala/org/apache/spark/ml/feature/PolynomialExpansion.scala
+++ b/mllib/src/main/scala/org/apache/spark/ml/feature/PolynomialExpansion.scala
@@ -30,8 +30,9 @@ import org.apache.spark.sql.types.DataType
/**
* Perform feature expansion in a polynomial space. As said in wikipedia of Polynomial Expansion,
- * which is available at here,
- * "In mathematics, an expansion of a product of sums expresses it as a sum of products by using
+ * which is available at
+ * Polynomial expansion (Wikipedia)
+ * , "In mathematics, an expansion of a product of sums expresses it as a sum of products by using
* the fact that multiplication distributes over addition". Take a 2-variable feature vector
* as an example: `(x, y)`, if we want to expand it with degree 2, then we get
* `(x, x * x, y, x * y, y * y)`.
diff --git a/mllib/src/main/scala/org/apache/spark/ml/feature/StopWordsRemover.scala b/mllib/src/main/scala/org/apache/spark/ml/feature/StopWordsRemover.scala
index 802cbe95e522e..a55816249c74b 100755
--- a/mllib/src/main/scala/org/apache/spark/ml/feature/StopWordsRemover.scala
+++ b/mllib/src/main/scala/org/apache/spark/ml/feature/StopWordsRemover.scala
@@ -32,7 +32,7 @@ import org.apache.spark.sql.types.{ArrayType, StringType, StructType}
* @note null values from input array are preserved unless adding null to stopWords
* explicitly.
*
- * @see here
+ * @see Stop words (Wikipedia)
*/
@Since("1.5.0")
class StopWordsRemover @Since("1.5.0") (@Since("1.5.0") override val uid: String)
diff --git a/mllib/src/main/scala/org/apache/spark/ml/regression/AFTSurvivalRegression.scala b/mllib/src/main/scala/org/apache/spark/ml/regression/AFTSurvivalRegression.scala
index cb58e444838fc..d6ad1ea6d1096 100644
--- a/mllib/src/main/scala/org/apache/spark/ml/regression/AFTSurvivalRegression.scala
+++ b/mllib/src/main/scala/org/apache/spark/ml/regression/AFTSurvivalRegression.scala
@@ -119,7 +119,8 @@ private[regression] trait AFTSurvivalRegressionParams extends Params
/**
* :: Experimental ::
* Fit a parametric survival regression model named accelerated failure time (AFT) model
- * (see here)
+ * (see
+ * Accelerated failure time model (Wikipedia))
* based on the Weibull distribution of the survival time.
*/
@Experimental
diff --git a/mllib/src/main/scala/org/apache/spark/ml/regression/GeneralizedLinearRegression.scala b/mllib/src/main/scala/org/apache/spark/ml/regression/GeneralizedLinearRegression.scala
index 3e3517562fad0..bb6f1c93dac37 100644
--- a/mllib/src/main/scala/org/apache/spark/ml/regression/GeneralizedLinearRegression.scala
+++ b/mllib/src/main/scala/org/apache/spark/ml/regression/GeneralizedLinearRegression.scala
@@ -124,7 +124,8 @@ private[regression] trait GeneralizedLinearRegressionBase extends PredictorParam
* :: Experimental ::
*
* Fit a Generalized Linear Model
- * (see here)
+ * (see
+ * Generalized linear model (Wikipedia))
* specified by giving a symbolic description of the linear
* predictor (link function) and a description of the error distribution (family).
* It supports "gaussian", "binomial", "poisson" and "gamma" as family.
diff --git a/mllib/src/main/scala/org/apache/spark/mllib/evaluation/RegressionMetrics.scala b/mllib/src/main/scala/org/apache/spark/mllib/evaluation/RegressionMetrics.scala
index 202e4d3f65eba..ad99b00a31fd5 100644
--- a/mllib/src/main/scala/org/apache/spark/mllib/evaluation/RegressionMetrics.scala
+++ b/mllib/src/main/scala/org/apache/spark/mllib/evaluation/RegressionMetrics.scala
@@ -74,7 +74,8 @@ class RegressionMetrics @Since("2.0.0") (
/**
* Returns the variance explained by regression.
* explainedVariance = $\sum_i (\hat{y_i} - \bar{y})^2^ / n$
- * @see here
+ * @see
+ * Fraction of variance unexplained (Wikipedia)
*/
@Since("1.2.0")
def explainedVariance: Double = {
@@ -110,7 +111,8 @@ class RegressionMetrics @Since("2.0.0") (
/**
* Returns R^2^, the unadjusted coefficient of determination.
- * @see here
+ * @see
+ * Coefficient of determination (Wikipedia)
* In case of regression through the origin, the definition of R^2^ is to be modified.
* @see
* J. G. Eisenhauer, Regression through the Origin. Teaching Statistics 25, 76-80 (2003)
diff --git a/mllib/src/main/scala/org/apache/spark/mllib/stat/MultivariateOnlineSummarizer.scala b/mllib/src/main/scala/org/apache/spark/mllib/stat/MultivariateOnlineSummarizer.scala
index 114f76659c524..7dc0c459ec032 100644
--- a/mllib/src/main/scala/org/apache/spark/mllib/stat/MultivariateOnlineSummarizer.scala
+++ b/mllib/src/main/scala/org/apache/spark/mllib/stat/MultivariateOnlineSummarizer.scala
@@ -38,7 +38,7 @@ import org.apache.spark.mllib.linalg.{Vector, Vectors}
* For weighted instances, the unbiased estimation of variance is defined by the reliability
* weights:
* see
- * here.
+ * Reliability weights (Wikipedia).
*/
@Since("1.1.0")
@DeveloperApi
diff --git a/mllib/src/main/scala/org/apache/spark/mllib/stat/Statistics.scala b/mllib/src/main/scala/org/apache/spark/mllib/stat/Statistics.scala
index 9130f7b1fde01..7ba9b292969e7 100644
--- a/mllib/src/main/scala/org/apache/spark/mllib/stat/Statistics.scala
+++ b/mllib/src/main/scala/org/apache/spark/mllib/stat/Statistics.scala
@@ -186,7 +186,8 @@ object Statistics {
* distribution of the sample data and the theoretical distribution we can provide a test for the
* the null hypothesis that the sample data comes from that theoretical distribution.
* For more information on KS Test:
- * @see here
+ * @see
+ * Kolmogorov-Smirnov test (Wikipedia)
*
* @param data an `RDD[Double]` containing the sample of data to test
* @param cdf a `Double => Double` function to calculate the theoretical CDF at a given value
diff --git a/mllib/src/main/scala/org/apache/spark/mllib/stat/distribution/MultivariateGaussian.scala b/mllib/src/main/scala/org/apache/spark/mllib/stat/distribution/MultivariateGaussian.scala
index 09028b2c8775c..835a1e9d70d54 100644
--- a/mllib/src/main/scala/org/apache/spark/mllib/stat/distribution/MultivariateGaussian.scala
+++ b/mllib/src/main/scala/org/apache/spark/mllib/stat/distribution/MultivariateGaussian.scala
@@ -29,7 +29,7 @@ import org.apache.spark.mllib.util.MLUtils
* the event that the covariance matrix is singular, the density will be computed in a
* reduced dimensional subspace under which the distribution is supported.
* (see
- * here)
+ * Multivariate normal distribution (Wikipedia))
*
* @param mu The mean vector of the distribution
* @param sigma The covariance matrix of the distribution