Skip to content

Commit

Permalink
Fix javadoc8 errors
Browse files Browse the repository at this point in the history
  • Loading branch information
HyukjinKwon committed Mar 22, 2017
1 parent 82b598b commit 88ee198
Show file tree
Hide file tree
Showing 5 changed files with 19 additions and 19 deletions.
2 changes: 1 addition & 1 deletion core/src/main/scala/org/apache/spark/TaskContextImpl.scala
Original file line number Diff line number Diff line change
Expand Up @@ -38,7 +38,7 @@ import org.apache.spark.util._
* callbacks are protected by locking on the context instance. For instance, this ensures
* that you cannot add a completion listener in one thread while we are completing (and calling
* the completion listeners) in another thread. Other state is immutable, however the exposed
* [[TaskMetrics]] & [[MetricsSystem]] objects are not thread safe.
* `TaskMetrics` & `MetricsSystem` objects are not thread safe.
*/
private[spark] class TaskContextImpl(
val stageId: Int,
Expand Down
4 changes: 2 additions & 2 deletions mllib/src/main/scala/org/apache/spark/ml/fpm/FPGrowth.scala
Original file line number Diff line number Diff line change
Expand Up @@ -69,8 +69,8 @@ private[fpm] trait FPGrowthParams extends Params with HasPredictionCol {
def getMinSupport: Double = $(minSupport)

/**
* Number of partitions (>=1) used by parallel FP-growth. By default the param is not set, and
* partition number of the input dataset is used.
* Number of partitions (at least 1) used by parallel FP-growth. By default the param is not
* set, and partition number of the input dataset is used.
* @group expertParam
*/
@Since("2.2.0")
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -27,7 +27,7 @@
/**
* ::Experimental::
* Base interface for a map function used in
* {@link org.apache.spark.sql.KeyValueGroupedDataset#flatMapGroupsWithState(
* {@code org.apache.spark.sql.KeyValueGroupedDataset.flatMapGroupsWithState(
* FlatMapGroupsWithStateFunction, org.apache.spark.sql.streaming.OutputMode,
* org.apache.spark.sql.Encoder, org.apache.spark.sql.Encoder)}
* @since 2.1.1
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -298,7 +298,7 @@ class KeyValueGroupedDataset[K, V] private[sql](
* For a static batch Dataset, the function will be invoked once per group. For a streaming
* Dataset, the function will be invoked for each group repeatedly in every trigger, and
* updates to each group's state will be saved across invocations.
* See [[GroupState]] for more details.
* See `GroupState` for more details.
*
* @tparam S The type of the user-defined state. Must be encodable to Spark SQL types.
* @tparam U The type of the output objects. Must be encodable to Spark SQL types.
Expand Down Expand Up @@ -328,7 +328,7 @@ class KeyValueGroupedDataset[K, V] private[sql](
* For a static batch Dataset, the function will be invoked once per group. For a streaming
* Dataset, the function will be invoked for each group repeatedly in every trigger, and
* updates to each group's state will be saved across invocations.
* See [[GroupState]] for more details.
* See `GroupState` for more details.
*
* @tparam S The type of the user-defined state. Must be encodable to Spark SQL types.
* @tparam U The type of the output objects. Must be encodable to Spark SQL types.
Expand Down Expand Up @@ -360,7 +360,7 @@ class KeyValueGroupedDataset[K, V] private[sql](
* For a static batch Dataset, the function will be invoked once per group. For a streaming
* Dataset, the function will be invoked for each group repeatedly in every trigger, and
* updates to each group's state will be saved across invocations.
* See [[GroupState]] for more details.
* See `GroupState` for more details.
*
* @tparam S The type of the user-defined state. Must be encodable to Spark SQL types.
* @tparam U The type of the output objects. Must be encodable to Spark SQL types.
Expand Down Expand Up @@ -400,7 +400,7 @@ class KeyValueGroupedDataset[K, V] private[sql](
* For a static batch Dataset, the function will be invoked once per group. For a streaming
* Dataset, the function will be invoked for each group repeatedly in every trigger, and
* updates to each group's state will be saved across invocations.
* See [[GroupState]] for more details.
* See `GroupState` for more details.
*
* @tparam S The type of the user-defined state. Must be encodable to Spark SQL types.
* @tparam U The type of the output objects. Must be encodable to Spark SQL types.
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -18,18 +18,18 @@
package org.apache.spark.sql.streaming

import org.apache.spark.annotation.{Experimental, InterfaceStability}
import org.apache.spark.sql.{Encoder, KeyValueGroupedDataset}
import org.apache.spark.sql.KeyValueGroupedDataset
import org.apache.spark.sql.catalyst.plans.logical.LogicalGroupState

/**
* :: Experimental ::
*
* Wrapper class for interacting with per-group state data in `mapGroupsWithState` and
* `flatMapGroupsWithState` operations on [[KeyValueGroupedDataset]].
* `flatMapGroupsWithState` operations on `KeyValueGroupedDataset`.
*
* Detail description on `[map/flatMap]GroupsWithState` operation
* --------------------------------------------------------------
* Both, `mapGroupsWithState` and `flatMapGroupsWithState` in [[KeyValueGroupedDataset]]
* Both, `mapGroupsWithState` and `flatMapGroupsWithState` in `KeyValueGroupedDataset`
* will invoke the user-given function on each group (defined by the grouping function in
* `Dataset.groupByKey()`) while maintaining user-defined per-group state between invocations.
* For a static batch Dataset, the function will be invoked once per group. For a streaming
Expand Down Expand Up @@ -70,8 +70,8 @@ import org.apache.spark.sql.catalyst.plans.logical.LogicalGroupState
* `[map|flatMap]GroupsWithState`, but the exact timeout duration/timestamp is configurable per
* group by calling `setTimeout...()` in `GroupState`.
* - Timeouts can be either based on processing time (i.e.
* [[GroupStateTimeout.ProcessingTimeTimeout]]) or event time (i.e.
* [[GroupStateTimeout.EventTimeTimeout]]).
* `GroupStateTimeout.ProcessingTimeTimeout`) or event time (i.e.
* `GroupStateTimeout.EventTimeTimeout`).
* - With `ProcessingTimeTimeout`, the timeout duration can be set by calling
* `GroupState.setTimeoutDuration`. The timeout will occur when the clock has advanced by the set
* duration. Guarantees provided by this timeout with a duration of D ms are as follows:
Expand Down Expand Up @@ -177,7 +177,7 @@ import org.apache.spark.sql.catalyst.plans.logical.LogicalGroupState
* }}}
*
* @tparam S User-defined type of the state to be stored for each group. Must be encodable into
* Spark SQL types (see [[Encoder]] for more details).
* Spark SQL types (see `Encoder` for more details).
* @since 2.2.0
*/
@Experimental
Expand Down Expand Up @@ -224,7 +224,7 @@ trait GroupState[S] extends LogicalGroupState[S] {
/**
* Set the timeout duration for this key as a string. For example, "1 hour", "2 days", etc.
*
* @note, ProcessingTimeTimeout must be enabled in `[map/flatmap]GroupsWithStates`.
* @note ProcessingTimeTimeout must be enabled in `[map/flatmap]GroupsWithStates`.
*/
@throws[IllegalArgumentException]("if 'duration' is not a valid duration")
@throws[IllegalStateException]("when state is either not initialized, or already removed")
Expand All @@ -240,7 +240,7 @@ trait GroupState[S] extends LogicalGroupState[S] {
* Set the timeout timestamp for this key as milliseconds in epoch time.
* This timestamp cannot be older than the current watermark.
*
* @note, EventTimeTimeout must be enabled in `[map/flatmap]GroupsWithStates`.
* @note EventTimeTimeout must be enabled in `[map/flatmap]GroupsWithStates`.
*/
def setTimeoutTimestamp(timestampMs: Long): Unit

Expand All @@ -254,7 +254,7 @@ trait GroupState[S] extends LogicalGroupState[S] {
* The final timestamp (including the additional duration) cannot be older than the
* current watermark.
*
* @note, EventTimeTimeout must be enabled in `[map/flatmap]GroupsWithStates`.
* @note EventTimeTimeout must be enabled in `[map/flatmap]GroupsWithStates`.
*/
def setTimeoutTimestamp(timestampMs: Long, additionalDuration: String): Unit

Expand All @@ -265,7 +265,7 @@ trait GroupState[S] extends LogicalGroupState[S] {
* Set the timeout timestamp for this key as a java.sql.Date.
* This timestamp cannot be older than the current watermark.
*
* @note, EventTimeTimeout must be enabled in `[map/flatmap]GroupsWithStates`.
* @note EventTimeTimeout must be enabled in `[map/flatmap]GroupsWithStates`.
*/
def setTimeoutTimestamp(timestamp: java.sql.Date): Unit

Expand All @@ -279,7 +279,7 @@ trait GroupState[S] extends LogicalGroupState[S] {
* The final timestamp (including the additional duration) cannot be older than the
* current watermark.
*
* @note, EventTimeTimeout must be enabled in `[map/flatmap]GroupsWithStates`.
* @note EventTimeTimeout must be enabled in `[map/flatmap]GroupsWithStates`.
*/
def setTimeoutTimestamp(timestamp: java.sql.Date, additionalDuration: String): Unit
}

0 comments on commit 88ee198

Please sign in to comment.