Skip to content

Commit

Permalink
reduce duplicate computation in poisson, gamma, and tweedie objectives (
Browse files Browse the repository at this point in the history
#4950)

* ENH save computations of exp in objectives

* CLN missing declaration
  • Loading branch information
lorentzenchr authored Jan 20, 2022
1 parent a06fadf commit f85dfa2
Showing 1 changed file with 23 additions and 14 deletions.
37 changes: 23 additions & 14 deletions src/objective/regression_objective.hpp
Original file line number Diff line number Diff line change
Expand Up @@ -439,17 +439,20 @@ class RegressionPoissonLoss: public RegressionL2loss {
*/
void GetGradients(const double* score, score_t* gradients,
score_t* hessians) const override {
double exp_max_delta_step_ = std::exp(max_delta_step_);
if (weights_ == nullptr) {
#pragma omp parallel for schedule(static)
for (data_size_t i = 0; i < num_data_; ++i) {
gradients[i] = static_cast<score_t>(std::exp(score[i]) - label_[i]);
hessians[i] = static_cast<score_t>(std::exp(score[i] + max_delta_step_));
double exp_score = std::exp(score[i]);
gradients[i] = static_cast<score_t>(exp_score - label_[i]);
hessians[i] = static_cast<score_t>(exp_score * exp_max_delta_step_);
}
} else {
#pragma omp parallel for schedule(static)
for (data_size_t i = 0; i < num_data_; ++i) {
gradients[i] = static_cast<score_t>((std::exp(score[i]) - label_[i]) * weights_[i]);
hessians[i] = static_cast<score_t>(std::exp(score[i] + max_delta_step_) * weights_[i]);
double exp_score = std::exp(score[i]);
gradients[i] = static_cast<score_t>((exp_score - label_[i]) * weights_[i]);
hessians[i] = static_cast<score_t>(exp_score * exp_max_delta_step_ * weights_[i]);
}
}
}
Expand Down Expand Up @@ -689,14 +692,16 @@ class RegressionGammaLoss : public RegressionPoissonLoss {
if (weights_ == nullptr) {
#pragma omp parallel for schedule(static)
for (data_size_t i = 0; i < num_data_; ++i) {
gradients[i] = static_cast<score_t>(1.0 - label_[i] * std::exp(-score[i]));
hessians[i] = static_cast<score_t>(label_[i] * std::exp(-score[i]));
double exp_score = std::exp(-score[i]);
gradients[i] = static_cast<score_t>(1.0 - label_[i] * exp_score);
hessians[i] = static_cast<score_t>(label_[i] * exp_score);
}
} else {
#pragma omp parallel for schedule(static)
for (data_size_t i = 0; i < num_data_; ++i) {
gradients[i] = static_cast<score_t>((1.0 - label_[i] * std::exp(-score[i])) * weights_[i]);
hessians[i] = static_cast<score_t>(label_[i] * std::exp(-score[i]) * weights_[i]);
double exp_score = std::exp(-score[i]);
gradients[i] = static_cast<score_t>((1.0 - label_[i] * exp_score) * weights_[i]);
hessians[i] = static_cast<score_t>(label_[i] * exp_score * weights_[i]);
}
}
}
Expand Down Expand Up @@ -725,16 +730,20 @@ class RegressionTweedieLoss: public RegressionPoissonLoss {
if (weights_ == nullptr) {
#pragma omp parallel for schedule(static)
for (data_size_t i = 0; i < num_data_; ++i) {
gradients[i] = static_cast<score_t>(-label_[i] * std::exp((1 - rho_) * score[i]) + std::exp((2 - rho_) * score[i]));
hessians[i] = static_cast<score_t>(-label_[i] * (1 - rho_) * std::exp((1 - rho_) * score[i]) +
(2 - rho_) * std::exp((2 - rho_) * score[i]));
double exp_1_score = std::exp((1 - rho_) * score[i]);
double exp_2_score = std::exp((2 - rho_) * score[i]);
gradients[i] = static_cast<score_t>(-label_[i] * exp_1_score + exp_2_score);
hessians[i] = static_cast<score_t>(-label_[i] * (1 - rho_) * exp_1_score +
(2 - rho_) * exp_2_score);
}
} else {
#pragma omp parallel for schedule(static)
for (data_size_t i = 0; i < num_data_; ++i) {
gradients[i] = static_cast<score_t>((-label_[i] * std::exp((1 - rho_) * score[i]) + std::exp((2 - rho_) * score[i])) * weights_[i]);
hessians[i] = static_cast<score_t>((-label_[i] * (1 - rho_) * std::exp((1 - rho_) * score[i]) +
(2 - rho_) * std::exp((2 - rho_) * score[i])) * weights_[i]);
double exp_1_score = std::exp((1 - rho_) * score[i]);
double exp_2_score = std::exp((2 - rho_) * score[i]);
gradients[i] = static_cast<score_t>((-label_[i] * exp_1_score + exp_2_score) * weights_[i]);
hessians[i] = static_cast<score_t>((-label_[i] * (1 - rho_) * exp_1_score +
(2 - rho_) * exp_2_score) * weights_[i]);
}
}
}
Expand Down

0 comments on commit f85dfa2

Please sign in to comment.