Skip to content

Commit

Permalink
Merge pull request #248 from palantir/resync-kube
Browse files Browse the repository at this point in the history
Resync with apache-spark-on-k8s upstream
  • Loading branch information
ash211 authored Aug 24, 2017
2 parents 6ecc757 + a41e968 commit 65f1048
Show file tree
Hide file tree
Showing 23 changed files with 546 additions and 155 deletions.
7 changes: 5 additions & 2 deletions resource-managers/kubernetes/README.md
Original file line number Diff line number Diff line change
Expand Up @@ -14,11 +14,14 @@ important matters to keep in mind when developing this feature.

# Building Spark with Kubernetes Support

To build Spark with Kubernetes support, use the `kubernetes` profile when invoking Maven. For example, to simply compile
the Kubernetes core implementation module along with its dependencies:
To build Spark with Kubernetes support, use the `kubernetes` profile when invoking Maven. For example, to simply compile the Kubernetes core implementation module along with its dependencies:

build/mvn compile -Pkubernetes -pl resource-managers/kubernetes/core -am -DskipTests

If this is the first time you compile the Kubernetes core implementation module, run the following command to install the dependencies and compile:

build/mvn install -Pkubernetes -pl resource-managers/kubernetes/core -am -DskipTests

To build a distribution of Spark with Kubernetes support, use the `dev/make-distribution.sh` script, and add the
`kubernetes` profile as part of the build arguments. Any other build arguments can be specified as one would expect when
building Spark normally. For example, to build Spark against Hadoop 2.7 and Kubernetes:
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -450,6 +450,22 @@ package object config extends Logging {
.timeConf(TimeUnit.MINUTES)
.createWithDefault(5)

private[spark] val EXECUTOR_SUBMITTED_SMALL_FILES_SECRET =
ConfigBuilder("spark.kubernetes.mountdependencies.smallfiles.executor.secretName")
.doc("Name of the secret that should be mounted into the executor containers for" +
" distributing submitted small files without the resource staging server.")
.internal()
.stringConf
.createOptional

private[spark] val EXECUTOR_SUBMITTED_SMALL_FILES_SECRET_MOUNT_PATH =
ConfigBuilder("spark.kubernetes.mountdependencies.smallfiles.executor.secretMountPath")
.doc(s"Mount path in the executors for the secret given by" +
s" ${EXECUTOR_SUBMITTED_SMALL_FILES_SECRET.key}")
.internal()
.stringConf
.createOptional

private[spark] val EXECUTOR_INIT_CONTAINER_CONFIG_MAP =
ConfigBuilder("spark.kubernetes.initcontainer.executor.configmapname")
.doc("Name of the config map to use in the init-container that retrieves submitted files" +
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -70,6 +70,7 @@ package object constants {
private[spark] val ENV_PYSPARK_FILES = "PYSPARK_FILES"
private[spark] val ENV_PYSPARK_PRIMARY = "PYSPARK_PRIMARY"
private[spark] val ENV_JAVA_OPT_PREFIX = "SPARK_JAVA_OPT_"
private[spark] val ENV_MOUNTED_FILES_FROM_SECRET_DIR = "SPARK_MOUNTED_FILES_FROM_SECRET_DIR"

// Bootstrapping dependencies with the init-container
private[spark] val INIT_CONTAINER_ANNOTATION = "pod.beta.kubernetes.io/init-containers"
Expand All @@ -92,6 +93,9 @@ package object constants {
private[spark] val DEFAULT_SHUFFLE_MOUNT_NAME = "shuffle"
private[spark] val INIT_CONTAINER_SECRET_VOLUME_NAME = "spark-init-secret"

// Bootstrapping dependencies via a secret
private[spark] val MOUNTED_SMALL_FILES_SECRET_MOUNT_PATH = "/etc/spark-submitted-files"

// Miscellaneous
private[spark] val ANNOTATION_EXECUTOR_NODE_AFFINITY = "scheduler.alpha.kubernetes.io/affinity"
private[spark] val DRIVER_CONTAINER_NAME = "spark-kubernetes-driver"
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -20,7 +20,7 @@ import org.apache.spark.SparkConf
import org.apache.spark.deploy.kubernetes.ConfigurationUtils
import org.apache.spark.deploy.kubernetes.config._
import org.apache.spark.deploy.kubernetes.constants._
import org.apache.spark.deploy.kubernetes.submit.submitsteps.{BaseDriverConfigurationStep, DependencyResolutionStep, DriverConfigurationStep, DriverKubernetesCredentialsStep, InitContainerBootstrapStep, PythonStep}
import org.apache.spark.deploy.kubernetes.submit.submitsteps.{BaseDriverConfigurationStep, DependencyResolutionStep, DriverConfigurationStep, DriverKubernetesCredentialsStep, InitContainerBootstrapStep, MountSmallLocalFilesStep, PythonStep}
import org.apache.spark.deploy.kubernetes.submit.submitsteps.initcontainer.InitContainerConfigurationStepsOrchestrator
import org.apache.spark.launcher.SparkLauncher
import org.apache.spark.util.Utils
Expand Down Expand Up @@ -99,40 +99,77 @@ private[spark] class DriverConfigurationStepsOrchestrator(
Option(new PythonStep(mainPyResource, additionalPythonFiles, filesDownloadPath))
case _ => Option.empty[DriverConfigurationStep]
}
val initContainerBootstrapStep = if ((sparkJars ++ sparkFiles).exists { uri =>
Option(Utils.resolveURI(uri).getScheme).getOrElse("file") != "local"
}) {
val initContainerConfigurationStepsOrchestrator =
new InitContainerConfigurationStepsOrchestrator(
namespace,
kubernetesResourceNamePrefix,
sparkJars,

val (localFilesDownloadPath, submittedDependenciesBootstrapSteps) =
if (areAnyFilesNonContainerLocal(sparkJars ++ sparkFiles)) {
val (submittedLocalFilesDownloadPath,
sparkFilesResolvedFromInitContainer,
mountSmallFilesWithoutInitContainerStep) =
// If the resource staging server is specified, submit all local files through that.
submissionSparkConf.get(RESOURCE_STAGING_SERVER_URI).map { _ =>
(filesDownloadPath, sparkFiles, Option.empty[DriverConfigurationStep])
}.getOrElse {
// Else - use a small files bootstrap that submits the local files via a secret.
// Then, indicate to the outer block that the init-container should not handle
// those local files simply by filtering them out.
val sparkFilesWithoutLocal = KubernetesFileUtils.getNonSubmitterLocalFiles(sparkFiles)
val smallFilesSecretName = s"${kubernetesAppId}-submitted-files"
val mountSmallFilesBootstrap = new MountSmallFilesBootstrapImpl(
smallFilesSecretName, MOUNTED_SMALL_FILES_SECRET_MOUNT_PATH)
val mountSmallLocalFilesStep = new MountSmallLocalFilesStep(
sparkFiles,
jarsDownloadPath,
filesDownloadPath,
dockerImagePullPolicy,
allDriverLabels,
smallFilesSecretName,
MOUNTED_SMALL_FILES_SECRET_MOUNT_PATH,
mountSmallFilesBootstrap)
(MOUNTED_SMALL_FILES_SECRET_MOUNT_PATH,
sparkFilesWithoutLocal.toArray,
Some(mountSmallLocalFilesStep))
}

val initContainerBootstrapStep =
if (areAnyFilesNonContainerLocal(sparkJars ++ sparkFilesResolvedFromInitContainer)) {
val initContainerConfigurationStepsOrchestrator =
new InitContainerConfigurationStepsOrchestrator(
namespace,
kubernetesResourceNamePrefix,
sparkJars,
sparkFilesResolvedFromInitContainer,
jarsDownloadPath,
filesDownloadPath,
dockerImagePullPolicy,
allDriverLabels,
initContainerConfigMapName,
INIT_CONTAINER_CONFIG_MAP_KEY,
submissionSparkConf)
val initContainerConfigurationSteps =
initContainerConfigurationStepsOrchestrator.getAllConfigurationSteps()
Some(new InitContainerBootstrapStep(initContainerConfigurationSteps,
initContainerConfigMapName,
INIT_CONTAINER_CONFIG_MAP_KEY,
submissionSparkConf)
val initContainerConfigurationSteps =
initContainerConfigurationStepsOrchestrator.getAllConfigurationSteps()
Some(new InitContainerBootstrapStep(initContainerConfigurationSteps,
initContainerConfigMapName,
INIT_CONTAINER_CONFIG_MAP_KEY))
INIT_CONTAINER_CONFIG_MAP_KEY))
} else Option.empty[DriverConfigurationStep]
(submittedLocalFilesDownloadPath,
mountSmallFilesWithoutInitContainerStep.toSeq ++
initContainerBootstrapStep.toSeq)
} else {
Option.empty[DriverConfigurationStep]
(filesDownloadPath, Seq.empty[DriverConfigurationStep])
}
val dependencyResolutionStep = new DependencyResolutionStep(
sparkJars,
sparkFiles,
jarsDownloadPath,
filesDownloadPath)
localFilesDownloadPath)
Seq(
initialSubmissionStep,
kubernetesCredentialsStep,
dependencyResolutionStep) ++
initContainerBootstrapStep.toSeq ++
submittedDependenciesBootstrapSteps ++
pythonStep.toSeq
}

private def areAnyFilesNonContainerLocal(files: Seq[String]): Boolean = {
files.exists { uri =>
Option(Utils.resolveURI(uri).getScheme).getOrElse("file") != "local"
}
}

}
Original file line number Diff line number Diff line change
@@ -0,0 +1,52 @@
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.deploy.kubernetes.submit

import io.fabric8.kubernetes.api.model.{Container, ContainerBuilder, Pod, PodBuilder}

import org.apache.spark.deploy.kubernetes.constants._

private[spark] trait MountSmallFilesBootstrap {
def mountSmallFilesSecret(pod: Pod, container: Container): (Pod, Container)
}

private[spark] class MountSmallFilesBootstrapImpl(
secretName: String, secretMountPath: String) extends MountSmallFilesBootstrap {
def mountSmallFilesSecret(pod: Pod, container: Container): (Pod, Container) = {
val resolvedPod = new PodBuilder(pod)
.editOrNewSpec()
.addNewVolume()
.withName("submitted-files")
.withNewSecret()
.withSecretName(secretName)
.endSecret()
.endVolume()
.endSpec()
.build()
val resolvedContainer = new ContainerBuilder(container)
.addNewEnv()
.withName(ENV_MOUNTED_FILES_FROM_SECRET_DIR)
.withValue(secretMountPath)
.endEnv()
.addNewVolumeMount()
.withName("submitted-files")
.withMountPath(secretMountPath)
.endVolumeMount()
.build()
(resolvedPod, resolvedContainer)
}
}
Original file line number Diff line number Diff line change
Expand Up @@ -23,6 +23,7 @@ import io.fabric8.kubernetes.api.model.ContainerBuilder

import org.apache.spark.deploy.kubernetes.constants._
import org.apache.spark.deploy.kubernetes.submit.KubernetesFileUtils
import org.apache.spark.deploy.kubernetes.submit.submitsteps.initcontainer.InitContainerConfigurationStep
import org.apache.spark.util.Utils

/**
Expand All @@ -36,11 +37,12 @@ private[spark] class DependencyResolutionStep(
sparkJars: Seq[String],
sparkFiles: Seq[String],
jarsDownloadPath: String,
filesDownloadPath: String) extends DriverConfigurationStep {
localFilesDownloadPath: String) extends DriverConfigurationStep {

override def configureDriver(driverSpec: KubernetesDriverSpec): KubernetesDriverSpec = {
val resolvedSparkJars = KubernetesFileUtils.resolveSubmittedUris(sparkJars, jarsDownloadPath)
val resolvedSparkFiles = KubernetesFileUtils.resolveSubmittedUris(sparkFiles, filesDownloadPath)
val resolvedSparkFiles = KubernetesFileUtils.resolveSubmittedUris(
sparkFiles, localFilesDownloadPath)
val sparkConfResolvedSparkDependencies = driverSpec.driverSparkConf.clone()
if (resolvedSparkJars.nonEmpty) {
sparkConfResolvedSparkDependencies.set("spark.jars", resolvedSparkJars.mkString(","))
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -33,32 +33,32 @@ private[spark] class InitContainerBootstrapStep(

override def configureDriver(driverSpec: KubernetesDriverSpec): KubernetesDriverSpec = {
var currentInitContainerSpec = InitContainerSpec(
initContainerProperties = Map.empty[String, String],
additionalDriverSparkConf = Map.empty[String, String],
initContainer = new ContainerBuilder().build(),
driverContainer = driverSpec.driverContainer,
podToInitialize = driverSpec.driverPod,
initContainerDependentResources = Seq.empty[HasMetadata])
initContainerProperties = Map.empty[String, String],
additionalDriverSparkConf = Map.empty[String, String],
initContainer = new ContainerBuilder().build(),
driverContainer = driverSpec.driverContainer,
podToInitialize = driverSpec.driverPod,
initContainerDependentResources = Seq.empty[HasMetadata])
for (nextStep <- initContainerConfigurationSteps) {
currentInitContainerSpec = nextStep.configureInitContainer(currentInitContainerSpec)
}
val configMap = PropertiesConfigMapFromScalaMapBuilder.buildConfigMap(
initContainerConfigMapName,
initContainerConfigMapKey,
currentInitContainerSpec.initContainerProperties)
initContainerConfigMapName,
initContainerConfigMapKey,
currentInitContainerSpec.initContainerProperties)
val resolvedDriverSparkConf = driverSpec.driverSparkConf.clone()
.set(EXECUTOR_INIT_CONTAINER_CONFIG_MAP, initContainerConfigMapName)
.set(EXECUTOR_INIT_CONTAINER_CONFIG_MAP_KEY, initContainerConfigMapKey)
.setAll(currentInitContainerSpec.additionalDriverSparkConf)
.set(EXECUTOR_INIT_CONTAINER_CONFIG_MAP, initContainerConfigMapName)
.set(EXECUTOR_INIT_CONTAINER_CONFIG_MAP_KEY, initContainerConfigMapKey)
.setAll(currentInitContainerSpec.additionalDriverSparkConf)
val resolvedDriverPod = InitContainerUtil.appendInitContainer(
currentInitContainerSpec.podToInitialize, currentInitContainerSpec.initContainer)
currentInitContainerSpec.podToInitialize, currentInitContainerSpec.initContainer)
driverSpec.copy(
driverPod = resolvedDriverPod,
driverContainer = currentInitContainerSpec.driverContainer,
driverSparkConf = resolvedDriverSparkConf,
otherKubernetesResources =
driverSpec.otherKubernetesResources ++
currentInitContainerSpec.initContainerDependentResources ++
Seq(configMap))
driverPod = resolvedDriverPod,
driverContainer = currentInitContainerSpec.driverContainer,
driverSparkConf = resolvedDriverSparkConf,
otherKubernetesResources =
driverSpec.otherKubernetesResources ++
currentInitContainerSpec.initContainerDependentResources ++
Seq(configMap))
}
}
Original file line number Diff line number Diff line change
@@ -0,0 +1,73 @@
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.deploy.kubernetes.submit.submitsteps

import java.io.File

import com.google.common.io.{BaseEncoding, Files}
import io.fabric8.kubernetes.api.model.SecretBuilder
import scala.collection.JavaConverters._

import org.apache.spark.deploy.kubernetes.config._
import org.apache.spark.deploy.kubernetes.submit.{KubernetesFileUtils, MountSmallFilesBootstrap}
import org.apache.spark.util.Utils

private[spark] class MountSmallLocalFilesStep(
sparkFiles: Seq[String],
smallFilesSecretName: String,
smallFilesSecretMountPath: String,
mountSmallFilesBootstrap: MountSmallFilesBootstrap) extends DriverConfigurationStep {

import MountSmallLocalFilesStep._
override def configureDriver(driverSpec: KubernetesDriverSpec): KubernetesDriverSpec = {
val localFiles = KubernetesFileUtils.getOnlySubmitterLocalFiles(sparkFiles).map(new File(_))
val totalSizeBytes = localFiles.map(_.length()).sum
val totalSizeBytesString = Utils.bytesToString(totalSizeBytes)
require(totalSizeBytes < MAX_SECRET_BUNDLE_SIZE_BYTES,
s"Total size of all files submitted must be less than $MAX_SECRET_BUNDLE_SIZE_BYTES_STRING" +
s" if you do not use a resource staging server. The total size of all submitted local" +
s" files is $totalSizeBytesString. Please install a resource staging server and configure" +
s" your application to use it via ${RESOURCE_STAGING_SERVER_URI.key}")
val localFileBase64Contents = localFiles.map { file =>
val fileBase64 = BaseEncoding.base64().encode(Files.toByteArray(file))
(file.getName, fileBase64)
}.toMap
val localFilesSecret = new SecretBuilder()
.withNewMetadata()
.withName(smallFilesSecretName)
.endMetadata()
.withData(localFileBase64Contents.asJava)
.build()
val (resolvedDriverPod, resolvedDriverContainer) =
mountSmallFilesBootstrap.mountSmallFilesSecret(
driverSpec.driverPod, driverSpec.driverContainer)
val resolvedSparkConf = driverSpec.driverSparkConf.clone()
.set(EXECUTOR_SUBMITTED_SMALL_FILES_SECRET, smallFilesSecretName)
.set(EXECUTOR_SUBMITTED_SMALL_FILES_SECRET_MOUNT_PATH, smallFilesSecretMountPath)
driverSpec.copy(
driverPod = resolvedDriverPod,
driverContainer = resolvedDriverContainer,
driverSparkConf = resolvedSparkConf,
otherKubernetesResources = driverSpec.otherKubernetesResources ++ Seq(localFilesSecret))
}
}

private[spark] object MountSmallLocalFilesStep {
val MAX_SECRET_BUNDLE_SIZE_BYTES = 10240
val MAX_SECRET_BUNDLE_SIZE_BYTES_STRING =
Utils.bytesToString(MAX_SECRET_BUNDLE_SIZE_BYTES)
}
Loading

0 comments on commit 65f1048

Please sign in to comment.