diff --git a/R-package/R/io.R b/R-package/R/io.R index 10298fbaf056..9f6a60702505 100644 --- a/R-package/R/io.R +++ b/R-package/R/io.R @@ -1,14 +1,12 @@ -is.MXDataIter <- function(x) { - inherits(x, "Rcpp_MXNativeDataIter") || - inherits(x, "Rcpp_MXArrayDataIter") -} - #' Judge if an object is mx.dataiter #' #' @return Logical indicator #' #' @export -is.mx.dataiter <- is.MXDataIter +is.mx.dataiter <- function(x) { + inherits(x, "Rcpp_MXNativeDataIter") || + inherits(x, "Rcpp_MXArrayDataIter") +} #' Extract a certain field from DataIter. #' diff --git a/R-package/R/metric.R b/R-package/R/metric.R index 5bf4390cd614..02572f4acdc3 100644 --- a/R-package/R/metric.R +++ b/R-package/R/metric.R @@ -78,3 +78,13 @@ mx.metric.rmsle <- mx.metric.custom("rmsle", function(label, pred) { return(res) }) +#' Perplexity metric for language model +#' +#' @export +mx.metric.Perplexity <- mx.metric.custom("Perplexity", function(label, pred) { + label_probs <- as.array(mx.nd.choose.element.0index(pred, label)) + batch <- length(label_probs) + NLL <- -sum(log(pmax(1e-15, as.array(label_probs)))) / batch + Perplexity <- exp(NLL) + return(Perplexity) +}) diff --git a/R-package/R/model.R b/R-package/R/model.R index 043d0e2433ea..64cc816f0ef4 100644 --- a/R-package/R/model.R +++ b/R-package/R/model.R @@ -279,7 +279,13 @@ mx.model.train <- function(symbol, ctx, input.shape, output.shape, return(model) } -# Initialize parameters +#' Parameter initialization +#' @param symbol The symbolic configuration of the neural network. +#' @param input.shape The shape of the input for the neural network. +#' @param output.shape The shape of the output for the neural network. It can be NULL. +#' @param initializer, initializer object. The initialization scheme for parameters. +#' @param ctx mx.context. The devices used to perform initialization. +#' @export mx.model.init.params <- function(symbol, input.shape, output.shape, initializer, ctx) { if (!is.MXSymbol(symbol)) stop("symbol need to be MXSymbol") @@ -296,7 +302,7 @@ mx.model.init.params <- function(symbol, input.shape, output.shape, initializer, # Initialize the data iter mx.model.init.iter <- function(X, y, batch.size, is.train) { - if (is.MXDataIter(X)) return(X) + if (is.mx.dataiter(X)) return(X) if (is.null(y)) { if (is.train) stop("Need to provide parameter y for training with R arrays.") shape <- dim(X) diff --git a/R-package/R/rnn_model.R b/R-package/R/rnn_model.R index b269d0722601..aa4a7d03ca9b 100644 --- a/R-package/R/rnn_model.R +++ b/R-package/R/rnn_model.R @@ -3,19 +3,9 @@ is.param.name <- function(name) { grepl('gamma$', name) || grepl('beta$', name) ) } -# Initialize parameters -mx.model.init.params.rnn <- function(symbol, input.shape, initializer, ctx) { - if (!is.mx.symbol(symbol)) stop("symbol need to be MXSymbol") - slist <- symbol$infer.shape(input.shape) - if (is.null(slist)) stop("Not enough information to get shapes") - arg.params <- mx.init.create(initializer, slist$arg.shapes, ctx, skip.unknown=TRUE) - aux.params <- mx.init.create(initializer, slist$aux.shapes, ctx, skip.unknown=FALSE) - return(list(arg.params=arg.params, aux.params=aux.params)) -} - # Initialize the data iter mx.model.init.iter.rnn <- function(X, y, batch.size, is.train) { - if (is.MXDataIter(X)) return(X) + if (is.mx.dataiter(X)) return(X) shape <- dim(X) if (is.null(shape)) { num.data <- length(X) @@ -56,11 +46,11 @@ setup.rnn.model <- function(rnn.sym, ctx, } } } - params <- mx.model.init.params.rnn(rnn.sym, input.shapes, initializer, mx.cpu()) + params <- mx.model.init.params(rnn.sym, input.shapes, NULL, initializer, mx.cpu()) args <- input.shapes args$symbol <- rnn.sym args$ctx <- ctx - args$grad.req <- "add" + args$grad.req <- "write" rnn.exec <- do.call(mx.simple.bind, args) mx.exec.update.arg.arrays(rnn.exec, params$arg.params, match.name=TRUE) diff --git a/R-package/tests/testthat/test_model.R b/R-package/tests/testthat/test_model.R index 7c5b04ac27b1..4cf2a8c8e070 100644 --- a/R-package/tests/testthat/test_model.R +++ b/R-package/tests/testthat/test_model.R @@ -127,11 +127,11 @@ test_that("Fine-tune", { new_fc <- mx.symbol.FullyConnected(data = flatten, num_hidden = 2, name = "fc1") new_soft <- mx.symbol.SoftmaxOutput(data = new_fc, name = "softmax") - arg_params_new <- mxnet:::mx.model.init.params(symbol = new_soft, - input.shape = list("data" = c(224, 224, 3, 8)), - output.shape = NULL, - initializer = mx.init.uniform(0.1), - ctx = mx.cpu())$arg.params + arg_params_new <- mx.model.init.params(symbol = new_soft, + input.shape = list("data" = c(224, 224, 3, 8)), + output.shape = NULL, + initializer = mx.init.uniform(0.1), + ctx = mx.cpu())$arg.params fc1_weights_new <- arg_params_new[["fc1_weight"]] fc1_bias_new <- arg_params_new[["fc1_bias"]] diff --git a/amalgamation/amalgamation.py b/amalgamation/amalgamation.py index da3b60ac8399..b33b81c62b4a 100644 --- a/amalgamation/amalgamation.py +++ b/amalgamation/amalgamation.py @@ -8,7 +8,8 @@ 'kvstore_dist.h', 'mach/clock.h', 'mach/mach.h', 'malloc.h', 'mkl.h', 'mkl_cblas.h', 'mkl_vsl.h', 'mkl_vsl_functions.h', 'nvml.h', 'opencv2/opencv.hpp', 'sys/stat.h', 'sys/types.h', 'cuda.h', 'cuda_fp16.h', - 'omp.h', 'execinfo.h', 'packet/sse-inl.h', 'emmintrin.h', 'thrust/device_vector.h' + 'omp.h', 'execinfo.h', 'packet/sse-inl.h', 'emmintrin.h', 'thrust/device_vector.h', + 'cusolverDn.h' ] minimum = int(sys.argv[6]) if len(sys.argv) > 5 else 0 diff --git a/amalgamation/mxnet_predict0.cc b/amalgamation/mxnet_predict0.cc index d51deb285c99..ca1b581ce195 100644 --- a/amalgamation/mxnet_predict0.cc +++ b/amalgamation/mxnet_predict0.cc @@ -26,6 +26,7 @@ #include "src/ndarray/ndarray_function.cc" +#include "src/ndarray/autograd.cc" #include "src/ndarray/ndarray.cc" #include "src/engine/engine.cc" diff --git a/docs/how_to/cloud.md b/docs/how_to/cloud.md index 47ea40cf4595..67b28f8b4338 100644 --- a/docs/how_to/cloud.md +++ b/docs/how_to/cloud.md @@ -1,183 +1,183 @@ -# MXNet on the Cloud - -Deep learning can require extremely powerful hardware, often for unpredictable durations of time. -Moreover, _MXNet_ can benefit from both multiple GPUs and multiple machines. -Accordingly, cloud computing, as offered by AWS and others, -is especially well suited to training deep learning models. -Using AWS, we can rapidly fire up multiple machines with multiple GPUs each at will -and maintain the resources for precisely the amount of time needed. - -## Set Up an AWS GPU Cluster from Scratch - -In this document, we provide a step-by-step guide that will teach you -how to set up an AWS cluster with _MXNet_. We show how to: - -- [Use Amazon S3 to host data](#use-amazon-s3-to-host-data) -- [Set up an EC2 GPU instance with all dependencies installed](#set-up-an-ec2-gpu-instance) -- [Build and run MXNet on a single computer](#build-and-run-mxnet-on-a-gpu-instance) -- [Set up an EC2 GPU cluster for distributed training](#set-up-an-ec2-gpu-cluster-for-distributed-training) - -### Use Amazon S3 to Host Data - -Amazon S3 provides distributed data storage which proves especially convenient for hosting large datasets. -To use S3, you need [AWS credentials](http://docs.aws.amazon.com/AWSSimpleQueueService/latest/SQSGettingStartedGuide/AWSCredentials.html), -including an `ACCESS_KEY_ID` and a `SECRET_ACCESS_KEY`. - -To use _MXNet_ with S3, set the environment variables `AWS_ACCESS_KEY_ID` and -`AWS_SECRET_ACCESS_KEY` by adding the following two lines in -`~/.bashrc` (replacing the strings with the correct ones): - -```bash -export AWS_ACCESS_KEY_ID=AKIAIOSFODNN7EXAMPLE -export AWS_SECRET_ACCESS_KEY=wJalrXUtnFEMI/K7MDENG/bPxRfiCYEXAMPLEKEY -``` - -There are several ways to upload data to S3. One simple way is to use -[s3cmd](http://s3tools.org/s3cmd). For example: - -```bash -wget http://data.mxnet.io/mxnet/data/mnist.zip -unzip mnist.zip && s3cmd put t*-ubyte s3://dmlc/mnist/ -``` - -### Use Pre-installed EC2 GPU Instance -The [Deep Learning AMI](https://aws.amazon.com/marketplace/pp/B01M0AXXQB?qid=1475211685369&sr=0-1&ref_=srh_res_product_title) is an Amazon Linux image -supported and maintained by Amazon Web Services for use on Amazon Elastic Compute Cloud (Amazon EC2). -It contains [MXNet-v0.9.3 tag](https://github.com/dmlc/mxnet) and the necessary components to get going with deep learning, -including Nvidia drivers, CUDA, cuDNN, Anaconda, Python2 and Python3. -The AMI IDs are the following: - -* us-east-1: ami-e7c96af1 -* us-west-2: ami-dfb13ebf -* eu-west-1: ami-6e5d6808 - -Now you can launch _MXNet_ directly on an EC2 GPU instance. -You can also use [Jupyter](http://jupyter.org) notebook on EC2 machine. -Here is a [good tutorial](https://github.com/dmlc/mxnet-notebooks) -on how to connect to a Jupyter notebook running on an EC2 instance. - -### Set Up an EC2 GPU Instance from Scratch - -_MXNet_ requires the following libraries: - -- C++ compiler with C++11 support, such as `gcc >= 4.8` -- `CUDA` (`CUDNN` in optional) for GPU linear algebra -- `BLAS` (cblas, open-blas, atblas, mkl, or others) for CPU linear algebra -- `opencv` for image augmentations -- `curl` and `openssl` for the ability to read/write to Amazon S3 - -Installing `CUDA` on EC2 instances requires some effort. Caffe has a good -[tutorial](https://github.com/BVLC/caffe/wiki/Install-Caffe-on-EC2-from-scratch-(Ubuntu,-CUDA-7,-cuDNN-3)) -on how to install CUDA 7.0 on Ubuntu 14.04. - -***Note:*** We tried CUDA 7.5 on Nov 7, 2015, but found it problematic. - -You can install the rest using the package manager. For example, on Ubuntu: - -``` -sudo apt-get update -sudo apt-get install -y build-essential git libcurl4-openssl-dev libatlas-base-dev libopencv-dev python-numpy -``` - -The Amazon Machine Image (AMI) [ami-12fd8178](https://console.aws.amazon.com/ec2/v2/home?region=us-east-1#LaunchInstanceWizard:ami=ami-12fd8178) has the packages listed above installed. - - -### Build and Run MXNet on a GPU Instance - -The following commands build _MXNet_ with CUDA/CUDNN, Amazon S3, and distributed -training. - -```bash -git clone --recursive https://github.com/dmlc/mxnet -cd mxnet; cp make/config.mk . -echo "USE_CUDA=1" >>config.mk -echo "USE_CUDA_PATH=/usr/local/cuda" >>config.mk -echo "USE_CUDNN=1" >>config.mk -echo "USE_BLAS=atlas" >> config.mk -echo "USE_DIST_KVSTORE = 1" >>config.mk -echo "USE_S3=1" >>config.mk -make -j$(nproc) -``` - -To test whether everything is installed properly, we can try training a convolutional neural network (CNN) on the MNIST dataset using a GPU: - -```bash -python tests/python/gpu/test_conv.py -``` - -If you've placed the MNIST data on `s3://dmlc/mnist`, you can read the data stored on Amazon S3 directly with the following command: - -```bash -sed -i.bak "s!data_dir = 'data'!data_dir = 's3://dmlc/mnist'!" tests/python/gpu/test_conv.py -``` - -***Note:*** You can use `sudo ln /dev/null /dev/raw1394` to fix the opencv error `libdc1394 error: Failed to initialize libdc1394`. - -### Set Up an EC2 GPU Cluster for Distributed Training - -A cluster consists of multiple computers. -You can use one computer with _MXNet_ installed as the root computer for submitting jobs,and then launch several -slave computers to run the jobs. For example, launch multiple instances using an -AMI, e.g., -[ami-12fd8178](https://console.aws.amazon.com/ec2/v2/home?region=us-east-1#LaunchInstanceWizard:ami=ami-12fd8178), -with dependencies installed. There are two options: - -- Make all slaves' ports accessible (same for the root) by setting type: All TCP, - Source: Anywhere in Configure Security Group. - -- Use the same `pem` as the root computer to access all slave computers, and - then copy the `pem` file into the root computer's `~/.ssh/id_rsa`. If you do this, all slave computers can be accessed with SSH from the root. - -Now, run the CNN on multiple computers. Assume that we are on a working -directory of the root computer, such as `~/train`, and MXNet is built as `~/mxnet`. - -1. Pack the _MXNet_ Python library into this working directory for easy - synchronization: - - ```bash - cp -r ~/mxnet/python/mxnet . - cp ~/mxnet/lib/libmxnet.so mxnet/ - ``` - - And then copy the training program: - - ```bash - cp ~/mxnet/example/image-classification/*.py . - cp -r ~/mxnet/example/image-classification/common . - ``` - -2. Prepare a host file with all slaves private IPs. For example, `cat hosts`: - - ```bash - 172.30.0.172 - 172.30.0.171 - ``` - -3. Assuming that there are two computers, train the CNN using two workers: - - ```bash - ../../tools/launch.py -n 2 -H hosts --sync-dir /tmp/mxnet python train_mnist.py --kv-store dist_sync - ``` - -***Note:*** Sometimes the jobs linger at the slave computers even though you've pressed `Ctrl-c` -at the root node. To terminate them, use the following command: - -```bash -cat hosts | xargs -I{} ssh -o StrictHostKeyChecking=no {} 'uname -a; pgrep python | xargs kill -9' -``` - -***Note:*** The preceding example is very simple to train and therefore isn't a good -benchmark for distributed training. Consider using other [examples](https://github.com/dmlc/mxnet/tree/master/example/image-classification). - -### More Options -#### Use Multiple Data Shards -It is common to pack a dataset into multiple files, especially when working in a distributed environment. -_MXNet_ supports direct loading from multiple data shards. -Put all of the record files into a folder, and point the data path to the folder. - -#### Use YARN and SGE -Although using SSH can be simple when you don't have a cluster scheduling framework, -_MXNet_ is designed to be portable to various platforms. -We provide scripts available in [tracker](https://github.com/dmlc/dmlc-core/tree/master/tracker) -to allow running on other cluster frameworks, including Hadoop (YARN) and SGE. -We welcome contributions from the community of examples of running _MXNet_ on your favorite distributed platform. +# MXNet on the Cloud + +Deep learning can require extremely powerful hardware, often for unpredictable durations of time. +Moreover, _MXNet_ can benefit from both multiple GPUs and multiple machines. +Accordingly, cloud computing, as offered by AWS and others, +is especially well suited to training deep learning models. +Using AWS, we can rapidly fire up multiple machines with multiple GPUs each at will +and maintain the resources for precisely the amount of time needed. + +## Set Up an AWS GPU Cluster from Scratch + +In this document, we provide a step-by-step guide that will teach you +how to set up an AWS cluster with _MXNet_. We show how to: + +- [Use Amazon S3 to host data](#use-amazon-s3-to-host-data) +- [Set up an EC2 GPU instance with all dependencies installed](#set-up-an-ec2-gpu-instance) +- [Build and run MXNet on a single computer](#build-and-run-mxnet-on-a-gpu-instance) +- [Set up an EC2 GPU cluster for distributed training](#set-up-an-ec2-gpu-cluster-for-distributed-training) + +### Use Amazon S3 to Host Data + +Amazon S3 provides distributed data storage which proves especially convenient for hosting large datasets. +To use S3, you need [AWS credentials](http://docs.aws.amazon.com/AWSSimpleQueueService/latest/SQSGettingStartedGuide/AWSCredentials.html), +including an `ACCESS_KEY_ID` and a `SECRET_ACCESS_KEY`. + +To use _MXNet_ with S3, set the environment variables `AWS_ACCESS_KEY_ID` and +`AWS_SECRET_ACCESS_KEY` by adding the following two lines in +`~/.bashrc` (replacing the strings with the correct ones): + +```bash +export AWS_ACCESS_KEY_ID=AKIAIOSFODNN7EXAMPLE +export AWS_SECRET_ACCESS_KEY=wJalrXUtnFEMI/K7MDENG/bPxRfiCYEXAMPLEKEY +``` + +There are several ways to upload data to S3. One simple way is to use +[s3cmd](http://s3tools.org/s3cmd). For example: + +```bash +wget http://data.mxnet.io/mxnet/data/mnist.zip +unzip mnist.zip && s3cmd put t*-ubyte s3://dmlc/mnist/ +``` + +### Use Pre-installed EC2 GPU Instance +The [Deep Learning AMI](https://aws.amazon.com/marketplace/pp/B01M0AXXQB?qid=1475211685369&sr=0-1&ref_=srh_res_product_title) is an Amazon Linux image +supported and maintained by Amazon Web Services for use on Amazon Elastic Compute Cloud (Amazon EC2). +It contains [MXNet-v0.9.3 tag](https://github.com/dmlc/mxnet) and the necessary components to get going with deep learning, +including Nvidia drivers, CUDA, cuDNN, Anaconda, Python2 and Python3. +The AMI IDs are the following: + +* us-east-1: ami-e7c96af1 +* us-west-2: ami-dfb13ebf +* eu-west-1: ami-6e5d6808 + +Now you can launch _MXNet_ directly on an EC2 GPU instance. +You can also use [Jupyter](http://jupyter.org) notebook on EC2 machine. +Here is a [good tutorial](https://github.com/dmlc/mxnet-notebooks) +on how to connect to a Jupyter notebook running on an EC2 instance. + +### Set Up an EC2 GPU Instance from Scratch + +_MXNet_ requires the following libraries: + +- C++ compiler with C++11 support, such as `gcc >= 4.8` +- `CUDA` (`CUDNN` in optional) for GPU linear algebra +- `BLAS` (cblas, open-blas, atblas, mkl, or others) for CPU linear algebra +- `opencv` for image augmentations +- `curl` and `openssl` for the ability to read/write to Amazon S3 + +Installing `CUDA` on EC2 instances requires some effort. Caffe has a good +[tutorial](https://github.com/BVLC/caffe/wiki/Install-Caffe-on-EC2-from-scratch-(Ubuntu,-CUDA-7,-cuDNN-3)) +on how to install CUDA 7.0 on Ubuntu 14.04. + +***Note:*** We tried CUDA 7.5 on Nov 7, 2015, but found it problematic. + +You can install the rest using the package manager. For example, on Ubuntu: + +``` +sudo apt-get update +sudo apt-get install -y build-essential git libcurl4-openssl-dev libatlas-base-dev libopencv-dev python-numpy +``` + +The Amazon Machine Image (AMI) [ami-12fd8178](https://console.aws.amazon.com/ec2/v2/home?region=us-east-1#LaunchInstanceWizard:ami=ami-12fd8178) has the packages listed above installed. + + +### Build and Run MXNet on a GPU Instance + +The following commands build _MXNet_ with CUDA/CUDNN, Amazon S3, and distributed +training. + +```bash +git clone --recursive https://github.com/dmlc/mxnet +cd mxnet; cp make/config.mk . +echo "USE_CUDA=1" >>config.mk +echo "USE_CUDA_PATH=/usr/local/cuda" >>config.mk +echo "USE_CUDNN=1" >>config.mk +echo "USE_BLAS=atlas" >> config.mk +echo "USE_DIST_KVSTORE = 1" >>config.mk +echo "USE_S3=1" >>config.mk +make -j$(nproc) +``` + +To test whether everything is installed properly, we can try training a convolutional neural network (CNN) on the MNIST dataset using a GPU: + +```bash +python example/image-classification/train_mnist.py +``` + +If you've placed the MNIST data on `s3://dmlc/mnist`, you can read the data stored on Amazon S3 directly with the following command: + +```bash +sed -i.bak "s!data_dir = 'data'!data_dir = 's3://dmlc/mnist'!" example/image-classification/train_mnist.py +``` + +***Note:*** You can use `sudo ln /dev/null /dev/raw1394` to fix the opencv error `libdc1394 error: Failed to initialize libdc1394`. + +### Set Up an EC2 GPU Cluster for Distributed Training + +A cluster consists of multiple computers. +You can use one computer with _MXNet_ installed as the root computer for submitting jobs,and then launch several +slave computers to run the jobs. For example, launch multiple instances using an +AMI, e.g., +[ami-12fd8178](https://console.aws.amazon.com/ec2/v2/home?region=us-east-1#LaunchInstanceWizard:ami=ami-12fd8178), +with dependencies installed. There are two options: + +- Make all slaves' ports accessible (same for the root) by setting type: All TCP, + Source: Anywhere in Configure Security Group. + +- Use the same `pem` as the root computer to access all slave computers, and + then copy the `pem` file into the root computer's `~/.ssh/id_rsa`. If you do this, all slave computers can be accessed with SSH from the root. + +Now, run the CNN on multiple computers. Assume that we are on a working +directory of the root computer, such as `~/train`, and MXNet is built as `~/mxnet`. + +1. Pack the _MXNet_ Python library into this working directory for easy + synchronization: + + ```bash + cp -r ~/mxnet/python/mxnet . + cp ~/mxnet/lib/libmxnet.so mxnet/ + ``` + + And then copy the training program: + + ```bash + cp ~/mxnet/example/image-classification/*.py . + cp -r ~/mxnet/example/image-classification/common . + ``` + +2. Prepare a host file with all slaves private IPs. For example, `cat hosts`: + + ```bash + 172.30.0.172 + 172.30.0.171 + ``` + +3. Assuming that there are two computers, train the CNN using two workers: + + ```bash + ../../tools/launch.py -n 2 -H hosts --sync-dir /tmp/mxnet python train_mnist.py --kv-store dist_sync + ``` + +***Note:*** Sometimes the jobs linger at the slave computers even though you've pressed `Ctrl-c` +at the root node. To terminate them, use the following command: + +```bash +cat hosts | xargs -I{} ssh -o StrictHostKeyChecking=no {} 'uname -a; pgrep python | xargs kill -9' +``` + +***Note:*** The preceding example is very simple to train and therefore isn't a good +benchmark for distributed training. Consider using other [examples](https://github.com/dmlc/mxnet/tree/master/example/image-classification). + +### More Options +#### Use Multiple Data Shards +It is common to pack a dataset into multiple files, especially when working in a distributed environment. +_MXNet_ supports direct loading from multiple data shards. +Put all of the record files into a folder, and point the data path to the folder. + +#### Use YARN and SGE +Although using SSH can be simple when you don't have a cluster scheduling framework, +_MXNet_ is designed to be portable to various platforms. +We provide scripts available in [tracker](https://github.com/dmlc/dmlc-core/tree/master/tracker) +to allow running on other cluster frameworks, including Hadoop (YARN) and SGE. +We welcome contributions from the community of examples of running _MXNet_ on your favorite distributed platform. diff --git a/tests/jenkins/run_test_pip_installations.sh b/tests/jenkins/run_test_pip_installations.sh index 9122ea4e7c77..9246708f4329 100755 --- a/tests/jenkins/run_test_pip_installations.sh +++ b/tests/jenkins/run_test_pip_installations.sh @@ -49,7 +49,7 @@ for DEV in "${DEVICES[@]}"; do DOCKER_CMD="${DOCKER_CMD} pip install mxnet-cu80; python tests/python/train/test_conv.py --gpu" fi - ${DOCKER_BINARY} run --rm -v ${WORKSPACE}:${WORKSPACE} ${DOCKER_TAG} bash -c "${DOCKER_CMD}" + ${DOCKER_BINARY} run --rm -v ${WORKSPACE}:${WORKSPACE} -w ${WORKSPACE} ${DOCKER_TAG} bash -c "tests/jenkins/run_as_user.sh `id -u` `id -un` `id -g` `id -un` '${DOCKER_CMD}'" done done diff --git a/tests/nightly/compilation_warnings/compilation_warnings.sh b/tests/nightly/compilation_warnings/compilation_warnings.sh index 9c377e2dd1fb..871d849b9bae 100644 --- a/tests/nightly/compilation_warnings/compilation_warnings.sh +++ b/tests/nightly/compilation_warnings/compilation_warnings.sh @@ -1,4 +1,5 @@ - +#!/bin/bash +set -e runme() { cmd=$* echo "$cmd" @@ -21,7 +22,6 @@ echo "Starting make" cp make/config.mk . sed -i -e 's/gcc/gcc-5/g' config.mk sed -i -e 's/g++/g++-5/g' config.mk -runme /usr/bin/time -f "%e" make -j$(nproc) &> build/compile_output.txt -cat build/compile_output.txt +runme /usr/bin/time -f "%e" make -j$(nproc) 2>&1 | tee build/compile_output.txt echo "Finished make. Now processing output" python tests/nightly/compilation_warnings/process_output.py build/compile_output.txt diff --git a/tests/nightly/compilation_warnings/process_output.py b/tests/nightly/compilation_warnings/process_output.py index 1a57d81f8dad..622598d72292 100644 --- a/tests/nightly/compilation_warnings/process_output.py +++ b/tests/nightly/compilation_warnings/process_output.py @@ -24,7 +24,7 @@ def generate_stats(warnings): def print_summary(time, warnings): sorted_warnings, total_count = generate_stats(warnings) print "START - Compilation warnings count" - print total_count + print total_count, 'warnings' print "END - Compilation warnings count" print 'START - Compilation warnings summary' print 'Time taken to compile:', time, 's'