Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Convert server javadoc to html5 #30279

Merged
merged 2 commits into from
May 2, 2018
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
Original file line number Diff line number Diff line change
Expand Up @@ -549,22 +549,6 @@ class BuildPlugin implements Plugin<Project> {
javadoc.classpath = javadoc.getClasspath().filter { f ->
return classes.contains(f) == false
}
/*
* Force html5 on projects that support it to silence the warning
* that `javadoc` will change its defaults in the future.
*
* But not all of our javadoc is actually valid html5. So we
* have to become valid incrementally. We only set html5 on the
* projects we have converted so that we still get the annoying
* warning on the unconverted ones. That will give us an
* incentive to convert them....
*/
List html4Projects = [
':server',
]
if (false == html4Projects.contains(project.path)) {
javadoc.options.addBooleanOption('html5', true)
}
}
configureJavadocJar(project)
}
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -64,7 +64,7 @@ class ClusterConfiguration {
boolean debug = false

/**
* Configuration of the setting <tt>discovery.zen.minimum_master_nodes</tt> on the nodes.
* Configuration of the setting {@code discovery.zen.minimum_master_nodes} on the nodes.
* In case of more than one node, this defaults to the number of nodes
*/
@Input
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -28,7 +28,7 @@

/**
* Extended version of {@link CommonTermsQuery} that allows to pass in a
* <tt>minimumNumberShouldMatch</tt> specification that uses the actual num of high frequent terms
* {@code minimumNumberShouldMatch} specification that uses the actual num of high frequent terms
* to calculate the minimum matching terms.
*/
public class ExtendedCommonTermsQuery extends CommonTermsQuery {
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -668,7 +668,7 @@ protected String getExceptionName() {
}

/**
* Returns a underscore case name for the given exception. This method strips <tt>Elasticsearch</tt> prefixes from exception names.
* Returns a underscore case name for the given exception. This method strips {@code Elasticsearch} prefixes from exception names.
*/
public static String getExceptionName(Throwable ex) {
String simpleName = ex.getClass().getSimpleName();
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -31,12 +31,12 @@
public interface ShardOperationFailedException extends Streamable, ToXContent {

/**
* The index the operation failed on. Might return <tt>null</tt> if it can't be derived.
* The index the operation failed on. Might return {@code null} if it can't be derived.
*/
String index();

/**
* The index the operation failed on. Might return <tt>-1</tt> if it can't be derived.
* The index the operation failed on. Might return {@code -1} if it can't be derived.
*/
int shardId();

Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -37,7 +37,7 @@ public byte id() {
}

/**
* <tt>true</tt> if the actual operation the action represents will be executed
* {@code true} if the actual operation the action represents will be executed
* on a different thread than the calling thread (assuming it will be executed
* on the same node).
*/
Expand All @@ -46,7 +46,7 @@ public boolean threadedOperation() {
}

/**
* <tt>true</tt> if the invocation of the action result listener will be executed
* {@code true} if the invocation of the action result listener will be executed
* on a different thread (than the calling thread or an "expensive" thread, like the
* IO thread).
*/
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -123,7 +123,7 @@ public int getDelayedUnassignedShards() {
}

/**
* <tt>true</tt> if the waitForXXX has timeout out and did not match.
* {@code true} if the waitForXXX has timeout out and did not match.
*/
public boolean isTimedOut() {
return this.timedOut;
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -51,7 +51,7 @@ public ClusterRerouteRequest add(AllocationCommand... commands) {
}

/**
* Sets a dry run flag (defaults to <tt>false</tt>) allowing to run the commands without
* Sets a dry run flag (defaults to {@code false}) allowing to run the commands without
* actually applying them to the cluster state, and getting the resulting cluster state back.
*/
public ClusterRerouteRequest dryRun(boolean dryRun) {
Expand All @@ -78,7 +78,7 @@ public ClusterRerouteRequest explain(boolean explain) {
}

/**
* Sets the retry failed flag (defaults to <tt>false</tt>). If true, the
* Sets the retry failed flag (defaults to {@code false}). If true, the
* request will retry allocating shards that can't currently be allocated due to too many allocation failures.
*/
public ClusterRerouteRequest setRetryFailed(boolean retryFailed) {
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -42,7 +42,7 @@ public ClusterRerouteRequestBuilder add(AllocationCommand... commands) {
}

/**
* Sets a dry run flag (defaults to <tt>false</tt>) allowing to run the commands without
* Sets a dry run flag (defaults to {@code false}) allowing to run the commands without
* actually applying them to the cluster state, and getting the resulting cluster state back.
*/
public ClusterRerouteRequestBuilder setDryRun(boolean dryRun) {
Expand All @@ -51,7 +51,7 @@ public ClusterRerouteRequestBuilder setDryRun(boolean dryRun) {
}

/**
* Sets the explain flag (defaults to <tt>false</tt>). If true, the
* Sets the explain flag (defaults to {@code false}). If true, the
* request will include an explanation in addition to the cluster state.
*/
public ClusterRerouteRequestBuilder setExplain(boolean explain) {
Expand All @@ -60,7 +60,7 @@ public ClusterRerouteRequestBuilder setExplain(boolean explain) {
}

/**
* Sets the retry failed flag (defaults to <tt>false</tt>). If true, the
* Sets the retry failed flag (defaults to {@code false}). If true, the
* request will retry allocating shards that can't currently be allocated due to too many allocation failures.
*/
public ClusterRerouteRequestBuilder setRetryFailed(boolean retryFailed) {
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -146,7 +146,7 @@ public ClusterSearchShardsRequest routing(String... routings) {

/**
* Sets the preference to execute the search. Defaults to randomize across shards. Can be set to
* <tt>_local</tt> to prefer local shards or a custom value, which guarantees that the same order
* {@code _local} to prefer local shards or a custom value, which guarantees that the same order
* will be used across different requests.
*/
public ClusterSearchShardsRequest preference(String preference) {
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -55,7 +55,7 @@ public ClusterSearchShardsRequestBuilder setRouting(String... routing) {

/**
* Sets the preference to execute the search. Defaults to randomize across shards. Can be set to
* <tt>_local</tt> to prefer local shards or a custom value, which guarantees that the same order
* {@code _local} to prefer local shards or a custom value, which guarantees that the same order
* will be used across different requests.
*/
public ClusterSearchShardsRequestBuilder setPreference(String preference) {
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -52,7 +52,7 @@ public ClusterStateRequestBuilder setBlocks(boolean filter) {

/**
* Should the cluster state result include the {@link org.elasticsearch.cluster.metadata.MetaData}. Defaults
* to <tt>true</tt>.
* to {@code true}.
*/
public ClusterStateRequestBuilder setMetaData(boolean filter) {
request.metaData(filter);
Expand All @@ -61,7 +61,7 @@ public ClusterStateRequestBuilder setMetaData(boolean filter) {

/**
* Should the cluster state result include the {@link org.elasticsearch.cluster.node.DiscoveryNodes}. Defaults
* to <tt>true</tt>.
* to {@code true}.
*/
public ClusterStateRequestBuilder setNodes(boolean filter) {
request.nodes(filter);
Expand All @@ -70,7 +70,7 @@ public ClusterStateRequestBuilder setNodes(boolean filter) {

/**
* Should the cluster state result include the {@link org.elasticsearch.cluster.ClusterState.Custom}. Defaults
* to <tt>true</tt>.
* to {@code true}.
*/
public ClusterStateRequestBuilder setCustoms(boolean filter) {
request.customs(filter);
Expand All @@ -79,7 +79,7 @@ public ClusterStateRequestBuilder setCustoms(boolean filter) {

/**
* Should the cluster state result include the {@link org.elasticsearch.cluster.routing.RoutingTable}. Defaults
* to <tt>true</tt>.
* to {@code true}.
*/
public ClusterStateRequestBuilder setRoutingTable(boolean filter) {
request.routingTable(filter);
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -51,15 +51,15 @@ public FlushRequest(String... indices) {
}

/**
* Returns <tt>true</tt> iff a flush should block
* if a another flush operation is already running. Otherwise <tt>false</tt>
* Returns {@code true} iff a flush should block
* if a another flush operation is already running. Otherwise {@code false}
*/
public boolean waitIfOngoing() {
return this.waitIfOngoing;
}

/**
* if set to <tt>true</tt> the flush will block
* if set to {@code true} the flush will block
* if a another flush operation is already running until the flush can be performed.
* The default is <code>true</code>
*/
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -27,7 +27,7 @@

/**
* A request to force merging the segments of one or more indices. In order to
* run a merge on all the indices, pass an empty array or <tt>null</tt> for the
* run a merge on all the indices, pass an empty array or {@code null} for the
* indices.
* {@link #maxNumSegments(int)} allows to control the number of segments
* to force merge down to. Defaults to simply checking if a merge needs
Expand Down Expand Up @@ -81,30 +81,30 @@ public ForceMergeRequest maxNumSegments(int maxNumSegments) {

/**
* Should the merge only expunge deletes from the index, without full merging.
* Defaults to full merging (<tt>false</tt>).
* Defaults to full merging ({@code false}).
*/
public boolean onlyExpungeDeletes() {
return onlyExpungeDeletes;
}

/**
* Should the merge only expunge deletes from the index, without full merge.
* Defaults to full merging (<tt>false</tt>).
* Defaults to full merging ({@code false}).
*/
public ForceMergeRequest onlyExpungeDeletes(boolean onlyExpungeDeletes) {
this.onlyExpungeDeletes = onlyExpungeDeletes;
return this;
}

/**
* Should flush be performed after the merge. Defaults to <tt>true</tt>.
* Should flush be performed after the merge. Defaults to {@code true}.
*/
public boolean flush() {
return flush;
}

/**
* Should flush be performed after the merge. Defaults to <tt>true</tt>.
* Should flush be performed after the merge. Defaults to {@code true}.
*/
public ForceMergeRequest flush(boolean flush) {
this.flush = flush;
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -24,7 +24,7 @@

/**
* A request to force merge one or more indices. In order to force merge all
* indices, pass an empty array or <tt>null</tt> for the indices.
* indices, pass an empty array or {@code null} for the indices.
* {@link #setMaxNumSegments(int)} allows to control the number of segments to force
* merge down to. By default, will cause the force merge process to merge down
* to half the configured number of segments.
Expand All @@ -47,15 +47,15 @@ public ForceMergeRequestBuilder setMaxNumSegments(int maxNumSegments) {

/**
* Should the merge only expunge deletes from the index, without full merging.
* Defaults to full merging (<tt>false</tt>).
* Defaults to full merging ({@code false}).
*/
public ForceMergeRequestBuilder setOnlyExpungeDeletes(boolean onlyExpungeDeletes) {
request.onlyExpungeDeletes(onlyExpungeDeletes);
return this;
}

/**
* Should flush be performed after the merge. Defaults to <tt>true</tt>.
* Should flush be performed after the merge. Defaults to {@code true}.
*/
public ForceMergeRequestBuilder setFlush(boolean flush) {
request.flush(flush);
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -154,7 +154,7 @@ public Integer version() {
}

/**
* Set to <tt>true</tt> to force only creation, not an update of an index template. If it already
* Set to {@code true} to force only creation, not an update of an index template. If it already
* exists, it will fail with an {@link IllegalArgumentException}.
*/
public PutIndexTemplateRequest create(boolean create) {
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -76,7 +76,7 @@ public PutIndexTemplateRequestBuilder setVersion(Integer version) {
}

/**
* Set to <tt>true</tt> to force only creation, not an update of an index template. If it already
* Set to {@code true} to force only creation, not an update of an index template. If it already
* exists, it will fail with an {@link IllegalArgumentException}.
*/
public PutIndexTemplateRequestBuilder setCreate(boolean create) {
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -27,7 +27,7 @@

/**
* A request to upgrade one or more indices. In order to update all indices, pass an empty array or
* <tt>null</tt> for the indices.
* {@code null} for the indices.
* @see org.elasticsearch.client.Requests#upgradeRequest(String...)
* @see org.elasticsearch.client.IndicesAdminClient#upgrade(UpgradeRequest)
* @see UpgradeResponse
Expand Down Expand Up @@ -67,7 +67,7 @@ public void writeTo(StreamOutput out) throws IOException {

/**
* Should the upgrade only the ancient (older major version of Lucene) segments?
* Defaults to <tt>false</tt>.
* Defaults to {@code false}.
*/
public boolean upgradeOnlyAncientSegments() {
return upgradeOnlyAncientSegments;
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -24,7 +24,7 @@

/**
* A request to upgrade one or more indices. In order to optimize on all the indices, pass an empty array or
* <tt>null</tt> for the indices.
* {@code null} for the indices.
*/
public class UpgradeRequestBuilder extends BroadcastOperationRequestBuilder<UpgradeRequest, UpgradeResponse, UpgradeRequestBuilder> {

Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -407,7 +407,7 @@ public long getVersion() {
}

/**
* The actual response ({@link IndexResponse} or {@link DeleteResponse}). <tt>null</tt> in
* The actual response ({@link IndexResponse} or {@link DeleteResponse}). {@code null} in
* case of failure.
*/
public <T extends DocWriteResponse> T getResponse() {
Expand All @@ -422,7 +422,7 @@ public boolean isFailed() {
}

/**
* The failure message, <tt>null</tt> if it did not fail.
* The failure message, {@code null} if it did not fail.
*/
public String getFailureMessage() {
if (failure != null) {
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -100,7 +100,7 @@ private Builder(BiConsumer<BulkRequest, ActionListener<BulkResponse>> consumer,
/**
* Sets the number of concurrent requests allowed to be executed. A value of 0 means that only a single
* request will be allowed to be executed. A value of 1 means 1 concurrent request is allowed to be executed
* while accumulating new bulk requests. Defaults to <tt>1</tt>.
* while accumulating new bulk requests. Defaults to {@code 1}.
*/
public Builder setConcurrentRequests(int concurrentRequests) {
this.concurrentRequests = concurrentRequests;
Expand All @@ -109,7 +109,7 @@ public Builder setConcurrentRequests(int concurrentRequests) {

/**
* Sets when to flush a new bulk request based on the number of actions currently added. Defaults to
* <tt>1000</tt>. Can be set to <tt>-1</tt> to disable it.
* {@code 1000}. Can be set to {@code -1} to disable it.
*/
public Builder setBulkActions(int bulkActions) {
this.bulkActions = bulkActions;
Expand All @@ -118,7 +118,7 @@ public Builder setBulkActions(int bulkActions) {

/**
* Sets when to flush a new bulk request based on the size of actions currently added. Defaults to
* <tt>5mb</tt>. Can be set to <tt>-1</tt> to disable it.
* {@code 5mb}. Can be set to {@code -1} to disable it.
*/
public Builder setBulkSize(ByteSizeValue bulkSize) {
this.bulkSize = bulkSize;
Expand All @@ -129,7 +129,7 @@ public Builder setBulkSize(ByteSizeValue bulkSize) {
* Sets a flush interval flushing *any* bulk actions pending if the interval passes. Defaults to not set.
* <p>
* Note, both {@link #setBulkActions(int)} and {@link #setBulkSize(org.elasticsearch.common.unit.ByteSizeValue)}
* can be set to <tt>-1</tt> with the flush interval set allowing for complete async processing of bulk actions.
* can be set to {@code -1} with the flush interval set allowing for complete async processing of bulk actions.
*/
public Builder setFlushInterval(TimeValue flushInterval) {
this.flushInterval = flushInterval;
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -493,15 +493,15 @@ public RefreshPolicy getRefreshPolicy() {
}

/**
* A timeout to wait if the index operation can't be performed immediately. Defaults to <tt>1m</tt>.
* A timeout to wait if the index operation can't be performed immediately. Defaults to {@code 1m}.
*/
public final BulkRequest timeout(TimeValue timeout) {
this.timeout = timeout;
return this;
}

/**
* A timeout to wait if the index operation can't be performed immediately. Defaults to <tt>1m</tt>.
* A timeout to wait if the index operation can't be performed immediately. Defaults to {@code 1m}.
*/
public final BulkRequest timeout(String timeout) {
return timeout(TimeValue.parseTimeValue(timeout, null, getClass().getSimpleName() + ".timeout"));
Expand Down
Loading