From ab5cb0b6f1692e0e78ac46c8f16b50b587dd3083 Mon Sep 17 00:00:00 2001 From: Eren Golge Date: Mon, 16 Feb 2015 22:50:22 +0200 Subject: [PATCH] Implement RMSprop --- examples/mnist/lenet_solver_rmsprop.prototxt | 29 +++ examples/mnist/train_lenet_rmsprop.sh | 3 + include/caffe/solver.hpp | 17 ++ python/caffe/classifier.py | 17 ++ src/caffe/proto/caffe.proto | 8 + src/caffe/solver.cpp | 188 ++++++++++++++++++ src/caffe/test/test_gradient_based_solver.cpp | 120 +++++++++-- 7 files changed, 369 insertions(+), 13 deletions(-) create mode 100644 examples/mnist/lenet_solver_rmsprop.prototxt create mode 100755 examples/mnist/train_lenet_rmsprop.sh diff --git a/examples/mnist/lenet_solver_rmsprop.prototxt b/examples/mnist/lenet_solver_rmsprop.prototxt new file mode 100644 index 00000000000..2c9724be447 --- /dev/null +++ b/examples/mnist/lenet_solver_rmsprop.prototxt @@ -0,0 +1,29 @@ +# The train/test net protocol buffer definition +net: "examples/mnist/lenet_train_test.prototxt" +# test_iter specifies how many forward passes the test should carry out. +# In the case of MNIST, we have test batch size 100 and 100 test iterations, +# covering the full 10,000 testing images. +test_iter: 100 +# Carry out testing every 500 training iterations. +test_interval: 500 +# The base learning rate, momentum and the weight decay of the network. +base_lr: 0.01 +momentum: 0.0 +weight_decay: 0.0005 +# The learning rate policy +lr_policy: "inv" +gamma: 0.0001 +power: 0.75 +# Display every 100 iterations +display: 100 +# The maximum number of iterations +max_iter: 10000 +# snapshot intermediate results +snapshot: 5000 +snapshot_prefix: "examples/mnist/lenet_rmsprop" +# solver mode: CPU or GPU +solver_mode: GPU +solver_type:RMSPROP +rms_decay:0.98 + + diff --git a/examples/mnist/train_lenet_rmsprop.sh b/examples/mnist/train_lenet_rmsprop.sh new file mode 100755 index 00000000000..621cab238bf --- /dev/null +++ b/examples/mnist/train_lenet_rmsprop.sh @@ -0,0 +1,3 @@ +#!/usr/bin/env sh + +./build/tools/caffe train --solver=examples/mnist/lenet_solver_rmsprop.prototxt diff --git a/include/caffe/solver.hpp b/include/caffe/solver.hpp index c2ced487d6f..f57e3cec843 100644 --- a/include/caffe/solver.hpp +++ b/include/caffe/solver.hpp @@ -128,6 +128,21 @@ class AdaGradSolver : public SGDSolver { DISABLE_COPY_AND_ASSIGN(AdaGradSolver); }; + +template +class RMSpropSolver : public SGDSolver { +public: + explicit RMSpropSolver(const SolverParameter& param) + : SGDSolver(param) { } + explicit RMSpropSolver(const string& param_file) + : SGDSolver(param_file) { } + +protected: + virtual void ComputeUpdateValue(); + + DISABLE_COPY_AND_ASSIGN(RMSpropSolver); +}; + template Solver* GetSolver(const SolverParameter& param) { SolverParameter_SolverType type = param.solver_type(); @@ -139,6 +154,8 @@ Solver* GetSolver(const SolverParameter& param) { return new NesterovSolver(param); case SolverParameter_SolverType_ADAGRAD: return new AdaGradSolver(param); + case SolverParameter_SolverType_RMSPROP: + return new RMSpropSolver(param); default: LOG(FATAL) << "Unknown SolverType: " << type; } diff --git a/python/caffe/classifier.py b/python/caffe/classifier.py index 537193db8f8..d5fe8a14927 100644 --- a/python/caffe/classifier.py +++ b/python/caffe/classifier.py @@ -23,7 +23,24 @@ class Classifier(caffe.Net): def __init__(self, model_file, pretrained_file, image_dims=None, mean=None, input_scale=None, raw_scale=None, channel_swap=None): +<<<<<<< HEAD caffe.Net.__init__(self, model_file, pretrained_file, caffe.TEST) +======= + """ + Take + image_dims: dimensions to scale input for cropping/sampling. + Default is to scale to net input size for whole-image crop. + gpu, mean, input_scale, raw_scale, channel_swap: params for + preprocessing options. + """ + caffe.Net.__init__(self, model_file, pretrained_file, caffe.TEST) + caffe.set_phase_test() + + if gpu: + caffe.set_mode_gpu() + else: + caffe.set_mode_cpu() +>>>>>>> Implement RMSprop # configure pre-processing in_ = self.inputs[0] diff --git a/src/caffe/proto/caffe.proto b/src/caffe/proto/caffe.proto index 8c3f0723600..d2fd7ee1648 100644 --- a/src/caffe/proto/caffe.proto +++ b/src/caffe/proto/caffe.proto @@ -96,7 +96,11 @@ message NetParameter { // NOTE // Update the next available ID when you add a new SolverParameter field. // +<<<<<<< HEAD // SolverParameter next available ID: 37 (last added: iter_size) +======= +// SolverParameter next available ID: 37 (last added: rms_decay) +>>>>>>> Implement RMSprop message SolverParameter { ////////////////////////////////////////////////////////////////////////////// // Specifying the train and test networks @@ -191,10 +195,14 @@ message SolverParameter { SGD = 0; NESTEROV = 1; ADAGRAD = 2; + RMSPROP = 3; } optional SolverType solver_type = 30 [default = SGD]; // numerical stability for AdaGrad optional float delta = 31 [default = 1e-8]; + + //RMSprop decay value + optional float rms_decay = 36; // If true, print information about the state of the net that may help with // debugging learning problems. diff --git a/src/caffe/solver.cpp b/src/caffe/solver.cpp index aabe0edec80..2a02dd307fe 100644 --- a/src/caffe/solver.cpp +++ b/src/caffe/solver.cpp @@ -644,6 +644,7 @@ void NesterovSolver::ComputeUpdateValue(int param_id, Dtype rate) { Dtype momentum = this->param_.momentum(); Dtype local_rate = rate * net_params_lr[param_id]; switch (Caffe::mode()) { +<<<<<<< HEAD case Caffe::CPU: { // save history momentum for stepping back caffe_copy(net_params[param_id]->count(), @@ -664,6 +665,53 @@ void NesterovSolver::ComputeUpdateValue(int param_id, Dtype rate) { caffe_copy(net_params[param_id]->count(), this->update_[param_id]->cpu_data(), net_params[param_id]->mutable_cpu_diff()); +======= + case Caffe::CPU: + for (int param_id = 0; param_id < net_params.size(); ++param_id) { + // save history momentum for stepping back + caffe_copy(net_params[param_id]->count(), + this->history_[param_id]->cpu_data(), + this->update_[param_id]->mutable_cpu_data()); + + Dtype local_rate = rate * net_params_lr[param_id]; + Dtype local_decay = weight_decay * net_params_weight_decay[param_id]; + + if (local_decay) { + if (regularization_type == "L2") { + // add weight decay + caffe_axpy(net_params[param_id]->count(), + local_decay, + net_params[param_id]->cpu_data(), + net_params[param_id]->mutable_cpu_diff()); + } else if (regularization_type == "L1") { + caffe_cpu_sign(net_params[param_id]->count(), + net_params[param_id]->cpu_data(), + this->temp_[param_id]->mutable_cpu_data()); + caffe_axpy(net_params[param_id]->count(), + local_decay, + this->temp_[param_id]->cpu_data(), + net_params[param_id]->mutable_cpu_diff()); + } else { + LOG(FATAL) << "Unknown regularization type: " << regularization_type; + } + } + + // update history + caffe_cpu_axpby(net_params[param_id]->count(), local_rate, + net_params[param_id]->cpu_diff(), momentum, + this->history_[param_id]->mutable_cpu_data()); + + // compute uppate: step back then over step + caffe_cpu_axpby(net_params[param_id]->count(), Dtype(1) + momentum, + this->history_[param_id]->cpu_data(), -momentum, + this->update_[param_id]->mutable_cpu_data()); + + // copy + caffe_copy(net_params[param_id]->count(), + this->update_[param_id]->cpu_data(), + net_params[param_id]->mutable_cpu_diff()); + } +>>>>>>> Implement RMSprop break; } case Caffe::GPU: { @@ -775,9 +823,149 @@ void AdaGradSolver::ComputeUpdateValue(int param_id, Dtype rate) { } } +template +void RMSpropSolver::ComputeUpdateValue() { + const vector > >& net_params = this->net_->params(); + const vector& net_params_lr = this->net_->params_lr(); + const vector& net_params_weight_decay = + this->net_->params_weight_decay(); + + // get the learning rate + Dtype rate = this->GetLearningRate(); + Dtype delta = this->param_.delta(); + Dtype rms_decay = this->param_.rms_decay(); + + if (this->param_.display() && this->iter_ % this->param_.display() == 0) { + LOG(INFO) << "Iteration " << this->iter_ << ", lr = " << rate; + } + Dtype weight_decay = this->param_.weight_decay(); + string regularization_type = this->param_.regularization_type(); + switch (Caffe::mode()) { + case Caffe::CPU: + for (int param_id = 0; param_id < net_params.size(); ++param_id) { + Dtype local_rate = rate * net_params_lr[param_id]; + Dtype local_decay = weight_decay * net_params_weight_decay[param_id]; + + if (local_decay) { + if (regularization_type == "L2") { + // add weight decay + caffe_axpy(net_params[param_id]->count(), + local_decay, + net_params[param_id]->cpu_data(), + net_params[param_id]->mutable_cpu_diff()); + } else if (regularization_type == "L1") { + caffe_cpu_sign(net_params[param_id]->count(), + net_params[param_id]->cpu_data(), + this->temp_[param_id]->mutable_cpu_data()); + caffe_axpy(net_params[param_id]->count(), + local_decay, + this->temp_[param_id]->cpu_data(), + net_params[param_id]->mutable_cpu_diff()); + } else { + LOG(FATAL) << "Unknown regularization type: " << regularization_type; + } + } + + //Compute RMSstep + // compute square of gradient in update + caffe_powx(net_params[param_id]->count(), + net_params[param_id]->cpu_diff(), Dtype(2), + this->update_[param_id]->mutable_cpu_data()); + + // update history + caffe_cpu_axpby(net_params[param_id] -> count(), + Dtype(1-rms_decay), this->update_[param_id]->cpu_data(), + rms_decay, this->history_[param_id]-> mutable_cpu_data()); + + // prepare update + caffe_powx(net_params[param_id]->count(), + this->history_[param_id]->cpu_data(), Dtype(0.5), + this->update_[param_id]->mutable_cpu_data()); + + + caffe_add_scalar(net_params[param_id]->count(), + delta, this->update_[param_id]->mutable_cpu_data()); + + caffe_div(net_params[param_id]->count(), + net_params[param_id]->cpu_diff(), + this->update_[param_id]->cpu_data(), + this->update_[param_id]->mutable_cpu_data()); + + // scale and copy + caffe_cpu_axpby(net_params[param_id]->count(), local_rate, + this->update_[param_id]->cpu_data(), Dtype(0), + net_params[param_id]->mutable_cpu_diff()); + } + break; + case Caffe::GPU: +#ifndef CPU_ONLY + for (int param_id = 0; param_id < net_params.size(); ++param_id) { + + Dtype local_rate = rate * net_params_lr[param_id]; + Dtype local_decay = weight_decay * net_params_weight_decay[param_id]; + + if (local_decay) { + if (regularization_type == "L2") { + // add weight decay + caffe_gpu_axpy(net_params[param_id]->count(), + local_decay, + net_params[param_id]->gpu_data(), + net_params[param_id]->mutable_gpu_diff()); + } else if (regularization_type == "L1") { + caffe_gpu_sign(net_params[param_id]->count(), + net_params[param_id]->gpu_data(), + this->temp_[param_id]->mutable_gpu_data()); + caffe_gpu_axpy(net_params[param_id]->count(), + local_decay, + this->temp_[param_id]->gpu_data(), + net_params[param_id]->mutable_gpu_diff()); + } else { + LOG(FATAL) << "Unknown regularization type: " << regularization_type; + } + } + + //Compute RMSstep + // compute square of gradient in update + caffe_gpu_powx(net_params[param_id]->count(), + net_params[param_id]->gpu_diff(), Dtype(2), + this->update_[param_id]->mutable_gpu_data()); + + // update history + caffe_gpu_axpby(net_params[param_id] -> count(), + Dtype(1-rms_decay), this->update_[param_id]->gpu_data(), + rms_decay, this->history_[param_id]-> mutable_gpu_data()); + + // prepare update + caffe_gpu_powx(net_params[param_id]->count(), + this->history_[param_id]->gpu_data(), Dtype(0.5), + this->update_[param_id]->mutable_gpu_data()); + + + caffe_gpu_add_scalar(net_params[param_id]->count(), + delta, this->update_[param_id]->mutable_gpu_data()); + + caffe_gpu_div(net_params[param_id]->count(), + net_params[param_id]->gpu_diff(), + this->update_[param_id]->gpu_data(), + this->update_[param_id]->mutable_gpu_data()); + + caffe_gpu_axpby(net_params[param_id]->count(), local_rate, + this->update_[param_id]->gpu_data(), Dtype(0), + net_params[param_id]->mutable_gpu_diff()); + } +#else + NO_GPU; +#endif + break; + default: + LOG(FATAL) << "Unknown caffe mode: " << Caffe::mode(); + } +} + INSTANTIATE_CLASS(Solver); INSTANTIATE_CLASS(SGDSolver); INSTANTIATE_CLASS(NesterovSolver); INSTANTIATE_CLASS(AdaGradSolver); +INSTANTIATE_CLASS(RMSpropSolver); } // namespace caffe diff --git a/src/caffe/test/test_gradient_based_solver.cpp b/src/caffe/test/test_gradient_based_solver.cpp index c9135d64e70..22dbfc0d8ab 100644 --- a/src/caffe/test/test_gradient_based_solver.cpp +++ b/src/caffe/test/test_gradient_based_solver.cpp @@ -51,13 +51,18 @@ class GradientBasedSolverTest : public MultiDeviceTest { LOG(FATAL) << "Unknown Caffe mode: " << Caffe::mode(); } InitSolver(param); - delta_ = (solver_type() == SolverParameter_SolverType_ADAGRAD) ? + delta_ = (solver_type() == SolverParameter_SolverType_ADAGRAD || + solver_type() == SolverParameter_SolverType_RMSPROP) ? param.delta() : 0; } void RunLeastSquaresSolver(const Dtype learning_rate, +<<<<<<< HEAD const Dtype weight_decay, const Dtype momentum, const int num_iters, const int iter_size = 1) { +======= + const Dtype weight_decay, const Dtype momentum, const Dtype rms_decay, const int num_iters) { +>>>>>>> Implement RMSprop ostringstream proto; proto << "max_iter: " << num_iters << " " @@ -119,6 +124,9 @@ class GradientBasedSolverTest : public MultiDeviceTest { if (momentum != 0) { proto << "momentum: " << momentum << " "; } + if (rms_decay != 0) { + proto << "rms_decay: " << rms_decay << " "; + } Caffe::set_random_seed(this->seed_); this->InitSolverFromProtoString(proto.str()); this->solver_->Solve(); @@ -129,7 +137,7 @@ class GradientBasedSolverTest : public MultiDeviceTest { // updated_params will store the updated weight and bias results, // using the blobs' diffs to hold the update values themselves. void ComputeLeastSquaresUpdate(const Dtype learning_rate, - const Dtype weight_decay, const Dtype momentum, + const Dtype weight_decay, const Dtype momentum, const Dtype rms_decay, vector > >* updated_params) { const int N = num_; const int D = channels_ * height_ * width_; @@ -212,6 +220,9 @@ class GradientBasedSolverTest : public MultiDeviceTest { case SolverParameter_SolverType_ADAGRAD: update_value /= std::sqrt(history_value + grad * grad) + delta_; break; + case SolverParameter_SolverType_RMSPROP: + update_value /= (std::sqrt(rms_decay*history_value + grad*grad*(1-rms_decay)) + delta_) ; + break; default: LOG(FATAL) << "Unknown solver type: " << solver_type(); } @@ -332,18 +343,18 @@ class GradientBasedSolverTest : public MultiDeviceTest { // matches the solver's (K+1)th update. void TestLeastSquaresUpdate(const Dtype learning_rate = 1.0, const Dtype weight_decay = 0.0, const Dtype momentum = 0.0, - const int iter_to_check = 0) { + const Dtype rms_decay=0.0, const int iter_to_check = 0) { // Initialize the solver and run K (= iter_to_check) solver iterations. - RunLeastSquaresSolver(learning_rate, weight_decay, momentum, iter_to_check); + RunLeastSquaresSolver(learning_rate, weight_decay, momentum, rms_decay, iter_to_check ); // Compute the (K+1)th update using the analytic least squares gradient. vector > > updated_params; ComputeLeastSquaresUpdate(learning_rate, weight_decay, momentum, - &updated_params); + rms_decay,&updated_params); // Reinitialize the solver and run K+1 solver iterations. RunLeastSquaresSolver(learning_rate, weight_decay, momentum, - iter_to_check + 1); + rms_decay,iter_to_check + 1); // Check that the solver's solution matches ours. CheckLeastSquaresUpdate(updated_params); @@ -391,7 +402,19 @@ TYPED_TEST(SGDSolverTest, TestLeastSquaresUpdateWithMomentum) { const Dtype kMomentum = 0.5; const int kNumIters = 1; for (int i = 0; i <= kNumIters; ++i) { - this->TestLeastSquaresUpdate(kLearningRate, kWeightDecay, kMomentum, i); + this->TestLeastSquaresUpdate(kLearningRate, kWeightDecay, kMomentum, 0.0, i); + } +} + +TYPED_TEST(SGDSolverTest, TestLeastSquaresUpdateWithRsmDecay) { + typedef typename TypeParam::Dtype Dtype; + const Dtype kLearningRate = 1.0; + const Dtype kWeightDecay = 0.0; + const Dtype kMomentum = 0.5; + const Dtype kRmsDecay = 0.0; + const int kNumIters = 1; + for (int i = 0; i <= kNumIters; ++i) { + this->TestLeastSquaresUpdate(kLearningRate, kWeightDecay, kMomentum, kRmsDecay, i); } } @@ -402,7 +425,7 @@ TYPED_TEST(SGDSolverTest, TestLeastSquaresUpdateWithMomentumMultiIter) { const Dtype kMomentum = 0.5; const int kNumIters = 4; for (int i = 0; i <= kNumIters; ++i) { - this->TestLeastSquaresUpdate(kLearningRate, kWeightDecay, kMomentum, i); + this->TestLeastSquaresUpdate(kLearningRate, kWeightDecay, kMomentum, 0.0, i); } } @@ -413,7 +436,7 @@ TYPED_TEST(SGDSolverTest, TestLeastSquaresUpdateWithEverything) { const Dtype kMomentum = 0.9; const int kNumIters = 4; for (int i = 0; i <= kNumIters; ++i) { - this->TestLeastSquaresUpdate(kLearningRate, kWeightDecay, kMomentum, i); + this->TestLeastSquaresUpdate(kLearningRate, kWeightDecay, kMomentum, 0.0, i); } } @@ -465,21 +488,91 @@ TYPED_TEST(AdaGradSolverTest, TestAdaGradLeastSquaresUpdateWithEverything) { const Dtype kLearningRate = 0.01; const Dtype kWeightDecay = 0.1; const Dtype kMomentum = 0.0; + const Dtype kRmsDecay = 0.0; const int kNumIters = 4; for (int i = 0; i <= kNumIters; ++i) { - this->TestLeastSquaresUpdate(kLearningRate, kWeightDecay, kMomentum, i); + this->TestLeastSquaresUpdate(kLearningRate, kWeightDecay, kMomentum, kRmsDecay, i); } } + +template +class RMSpropSolverTest : public GradientBasedSolverTest { + typedef typename TypeParam::Dtype Dtype; + + protected: + virtual void InitSolver(const SolverParameter& param) { + this->solver_.reset(new RMSpropSolver(param)); + } + virtual SolverParameter_SolverType solver_type() { + return SolverParameter_SolverType_RMSPROP; + } +}; + +TYPED_TEST_CASE(RMSpropSolverTest, TestDtypesAndDevices); + +TYPED_TEST(RMSpropSolverTest, TestRMSpropLeastSquaresUpdate) { + this->TestLeastSquaresUpdate(); +} + +TYPED_TEST(RMSpropSolverTest, TestRMSpropLeastSquaresUpdateLROneTenth) { + typedef typename TypeParam::Dtype Dtype; + const Dtype kLearningRate = 0.1; + this->TestLeastSquaresUpdate(kLearningRate); +} + +<<<<<<< HEAD TYPED_TEST(AdaGradSolverTest, TestLeastSquaresUpdateWithEverythingAccum) { +======= +TYPED_TEST(RMSpropSolverTest, TestRMSpropLeastSquaresUpdateWithWeightDecay) { + typedef typename TypeParam::Dtype Dtype; + const Dtype kLearningRate = 1.0; + const Dtype kWeightDecay = 0.5; + this->TestLeastSquaresUpdate(kLearningRate, kWeightDecay); +} + +TYPED_TEST(RMSpropSolverTest, TestRMSpropLeastSquaresUpdateWithRmsDecay) { +>>>>>>> Implement RMSprop typedef typename TypeParam::Dtype Dtype; const Dtype kLearningRate = 0.01; const Dtype kWeightDecay = 0.1; const Dtype kMomentum = 0.0; +<<<<<<< HEAD const int kNumIters = 4; const int kIterSize = 2; this->CheckAccumulation(kLearningRate, kWeightDecay, kMomentum, kNumIters, kIterSize); +======= + const Dtype kRmsdecay = 0.95; + const int kNumIters = 4; + for (int i = 0; i <= kNumIters; ++i) { + this->TestLeastSquaresUpdate(kLearningRate, kWeightDecay, kMomentum, kRmsdecay, i); + } +} + +TYPED_TEST(RMSpropSolverTest, TestRMSpropLeastSquaresUpdateWithMomentum) { + typedef typename TypeParam::Dtype Dtype; + const Dtype kLearningRate = 0.01; + const Dtype kWeightDecay = 0.1; + const Dtype kMomentum = 0.9; + const Dtype kRmsdecay = 0.0; + const int kNumIters = 4; + for (int i = 0; i <= kNumIters; ++i) { + this->TestLeastSquaresUpdate(kLearningRate, kWeightDecay, kMomentum, kRmsdecay, i); + } +} + +TYPED_TEST(RMSpropSolverTest, TestRMSpropLeastSquaresUpdateWithEverything) { + typedef typename TypeParam::Dtype Dtype; + const Dtype kLearningRate = 0.01; + const Dtype kWeightDecay = 0.1; + const Dtype kMomentum = 0.9; + const Dtype kRmsdecay = 0.95; + const int kNumIters = 4; + for (int i = 0; i <= kNumIters; ++i) { + this->TestLeastSquaresUpdate(kLearningRate, kWeightDecay, kMomentum, kRmsdecay,i); + } +>>>>>>> Implement RMSprop } template @@ -521,7 +614,7 @@ TYPED_TEST(NesterovSolverTest, TestNesterovLeastSquaresUpdateWithMomentum) { const Dtype kMomentum = 0.5; const int kNumIters = 1; for (int i = 0; i <= kNumIters; ++i) { - this->TestLeastSquaresUpdate(kLearningRate, kWeightDecay, kMomentum, i); + this->TestLeastSquaresUpdate(kLearningRate, kWeightDecay, kMomentum, 0.0, i); } } @@ -532,7 +625,7 @@ TYPED_TEST(NesterovSolverTest, TestLeastSquaresUpdateWithMomentumMultiIter) { const Dtype kMomentum = 0.5; const int kNumIters = 4; for (int i = 0; i <= kNumIters; ++i) { - this->TestLeastSquaresUpdate(kLearningRate, kWeightDecay, kMomentum, i); + this->TestLeastSquaresUpdate(kLearningRate, kWeightDecay, kMomentum, 0.0, i); } } @@ -541,9 +634,10 @@ TYPED_TEST(NesterovSolverTest, TestNesterovLeastSquaresUpdateWithEverything) { const Dtype kLearningRate = 0.01; const Dtype kWeightDecay = 0.1; const Dtype kMomentum = 0.9; + const Dtype kRmsDecay = 0.0; const int kNumIters = 4; for (int i = 0; i <= kNumIters; ++i) { - this->TestLeastSquaresUpdate(kLearningRate, kWeightDecay, kMomentum, i); + this->TestLeastSquaresUpdate(kLearningRate, kWeightDecay, kMomentum, kRmsDecay, i); } }