Skip to content

Commit

Permalink
remove evals_result argument from train() function
Browse files Browse the repository at this point in the history
  • Loading branch information
StrikerRUS committed Dec 12, 2021
1 parent 9f13a9c commit 0c9eccf
Show file tree
Hide file tree
Showing 5 changed files with 341 additions and 148 deletions.
Original file line number Diff line number Diff line change
Expand Up @@ -148,8 +148,10 @@
" valid_sets=[lgb_train, lgb_test],\n",
" feature_name=[f'f{i + 1}' for i in range(X_train.shape[-1])],\n",
" categorical_feature=[21],\n",
" evals_result=evals_result,\n",
" callbacks=[lgb.log_evaluation(10)])"
" callbacks=[\n",
" lgb.log_evaluation(10),\n",
" lgb.record_evaluation(evals_result)\n",
" ])"
]
},
{
Expand Down
20 changes: 12 additions & 8 deletions examples/python-guide/plot_example.py
Original file line number Diff line number Diff line change
Expand Up @@ -36,14 +36,18 @@

print('Starting training...')
# train
gbm = lgb.train(params,
lgb_train,
num_boost_round=100,
valid_sets=[lgb_train, lgb_test],
feature_name=[f'f{i + 1}' for i in range(X_train.shape[-1])],
categorical_feature=[21],
evals_result=evals_result,
callbacks=[lgb.log_evaluation(10)])
gbm = lgb.train(
params,
lgb_train,
num_boost_round=100,
valid_sets=[lgb_train, lgb_test],
feature_name=[f'f{i + 1}' for i in range(X_train.shape[-1])],
categorical_feature=[21],
callbacks=[
lgb.log_evaluation(10),
lgb.record_evaluation(evals_result)
]
)

print('Plotting metrics recorded during training...')
ax = lgb.plot_metric(evals_result, metric='l1')
Expand Down
19 changes: 0 additions & 19 deletions python-package/lightgbm/engine.py
Original file line number Diff line number Diff line change
Expand Up @@ -34,7 +34,6 @@ def train(
feature_name: Union[List[str], str] = 'auto',
categorical_feature: Union[List[str], List[int], str] = 'auto',
early_stopping_rounds: Optional[int] = None,
evals_result: Optional[Dict[str, Any]] = None,
keep_training_booster: bool = False,
callbacks: Optional[List[Callable]] = None
) -> Booster:
Expand Down Expand Up @@ -119,19 +118,6 @@ def train(
To check only the first metric, set the ``first_metric_only`` parameter to ``True`` in ``params``.
The index of iteration that has the best performance will be saved in the ``best_iteration`` field
if early stopping logic is enabled by setting ``early_stopping_rounds``.
evals_result : dict or None, optional (default=None)
Dictionary used to store all evaluation results of all the items in ``valid_sets``.
This should be initialized outside of your call to ``train()`` and should be empty.
Any initial contents of the dictionary will be deleted.
.. rubric:: Example
With a ``valid_sets`` = [valid_set, train_set],
``valid_names`` = ['eval', 'train']
and a ``params`` = {'metric': 'logloss'}
returns {'train': {'logloss': ['0.48253', '0.35953', ...]},
'eval': {'logloss': ['0.480385', '0.357756', ...]}}.
keep_training_booster : bool, optional (default=False)
Whether the returned Booster will be used to keep training.
If False, the returned value will be converted into _InnerPredictor before returning.
Expand Down Expand Up @@ -221,11 +207,6 @@ def train(
if early_stopping_rounds is not None and early_stopping_rounds > 0:
callbacks_set.add(callback.early_stopping(early_stopping_rounds, first_metric_only))

if evals_result is not None:
_log_warning("'evals_result' argument is deprecated and will be removed in a future release of LightGBM. "
"Pass 'record_evaluation()' callback via 'callbacks' argument instead.")
callbacks_set.add(callback.record_evaluation(evals_result))

callbacks_before_iter_set = {cb for cb in callbacks_set if getattr(cb, 'before_iteration', False)}
callbacks_after_iter_set = callbacks_set - callbacks_before_iter_set
callbacks_before_iter = sorted(callbacks_before_iter_set, key=attrgetter('order'))
Expand Down
Loading

0 comments on commit 0c9eccf

Please sign in to comment.