Source: scikit-optimize
Version: 0.10.2-4
Severity: important
User: [email protected]
Usertags: scikit-learn-1.7

-----BEGIN PGP SIGNED MESSAGE-----
Hash: SHA512

Dear maintainer,

your package has an autopkgtest regression with scikit-learn 1.7.2.
Relevant excerpt from 
https://ci.debian.net/packages/s/scikit-optimize/unstable/amd64/65190549/ 
follows:


483s =================================== FAILURES 
===================================
483s _______________ test_minimizer_api[minimizer7-call_single-True] 
________________
483s
483s verbose = True, call = <function call_single at 0x7f8fed034220>
483s minimizer = functools.partial(<function gbrt_minimize at 0x7f8fecff1580>, 
acq_func='LCB')
483s
483s     @pytest.mark.slow_test
483s     @pytest.mark.parametrize("verbose", [True, False])
483s     @pytest.mark.parametrize("call", [call_single, [call_single, 
check_result_callable]])
483s     @pytest.mark.parametrize("minimizer", MINIMIZERS)
483s     def test_minimizer_api(verbose, call, minimizer):
483s         n_calls = 7
483s         n_initial_points = 3
483s         n_models = n_calls - n_initial_points + 1
483s
483s >       result = minimizer(
483s             branin,
483s             [(-5.0, 10.0), (0.0, 15.0)],
483s             n_initial_points=n_initial_points,
483s             n_calls=n_calls,
483s             random_state=1,
483s             verbose=verbose,
483s             callback=call,
483s         )
483s
483s tests/test_common.py:112:
483s _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ 
_ _ _
483s /usr/lib/python3/dist-packages/skopt/optimizer/gbrt.py:197: in 
gbrt_minimize
483s     return base_minimize(
483s /usr/lib/python3/dist-packages/skopt/optimizer/base.py:276: in 
base_minimize
483s     optimizer = Optimizer(
483s _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ 
_ _ _
483s
483s self = <skopt.optimizer.optimizer.Optimizer object at 0x7f8feb1c2d50>
483s dimensions = [(-5.0, 10.0), (0.0, 15.0)]
483s base_estimator = 
GradientBoostingQuantileRegressor(base_estimator=GradientBoostingRegressor(loss='quantile',
483s                           ...    quantiles=[0.16, 0.5, 0.84],
483s                                   random_state=RandomState(MT19937) at 
0x7F8FF0D98440)
483s n_random_starts = None, n_initial_points = 3, initial_point_generator = 
'random'
483s n_jobs = 1, acq_func = 'LCB', acq_optimizer = 'sampling', random_state = 1
483s model_queue_size = None, space_constraint = None
483s acq_func_kwargs = {'kappa': 1.96, 'xi': 0.01}
483s acq_optimizer_kwargs = {'n_jobs': 1, 'n_points': 10000, 
'n_restarts_optimizer': 5}
483s avoid_duplicates = True
483s
483s     def __init__(
483s         self,
483s         dimensions,
483s         base_estimator="gp",
483s         n_random_starts=None,
483s         n_initial_points=10,
483s         initial_point_generator="random",
483s         n_jobs=1,
483s         acq_func="gp_hedge",
483s         acq_optimizer="auto",
483s         random_state=None,
483s         model_queue_size=None,
483s         space_constraint=None,
483s         acq_func_kwargs=None,
483s         acq_optimizer_kwargs=None,
483s         avoid_duplicates=True,
483s     ):
483s         args = locals().copy()
483s         del args['self']
483s         self.specs = {"args": args, "function": "Optimizer"}
483s         self.rng = check_random_state(random_state)
483s
483s         # Configure acquisition function
483s
483s         # Store and creat acquisition function set
483s         self.acq_func = acq_func
483s         self.acq_func_kwargs = acq_func_kwargs
483s         self.avoid_duplicates = avoid_duplicates
483s
483s         allowed_acq_funcs = [
483s             "gp_hedge",
483s             "EI",
483s             "LCB",
483s             "MES",
483s             "PVRS",
483s             "PI",
483s             "EIps",
483s             "PIps",
483s         ]
483s         if self.acq_func not in allowed_acq_funcs:
483s             raise ValueError(
483s                 "expected acq_func to be in %s, got %s"
483s                 % (",".join(allowed_acq_funcs), self.acq_func)
483s             )
483s
483s         # treat hedging method separately
483s         if self.acq_func == "gp_hedge":
483s             self.cand_acq_funcs_ = ["EI", "LCB", "PI"]
483s             self.gains_ = np.zeros(3)
483s         else:
483s             self.cand_acq_funcs_ = [self.acq_func]
483s
483s         if acq_func_kwargs is None:
483s             acq_func_kwargs = dict()
483s         self.eta = acq_func_kwargs.get("eta", 1.0)
483s
483s         # Configure counters of points
483s
483s         # Check `n_random_starts` deprecation first
483s         if n_random_starts is not None:
483s             warnings.warn(
483s                 ("n_random_starts will be removed in favour of " 
"n_initial_points."),
483s                 DeprecationWarning,
483s             )
483s             n_initial_points = n_random_starts
483s
483s         if n_initial_points < 0:
483s             raise ValueError(
483s                 "Expected `n_initial_points` >= 0, got %d" % 
n_initial_points
483s             )
483s         self._n_initial_points = n_initial_points
483s         self.n_initial_points_ = n_initial_points
483s
483s         # Configure estimator
483s
483s         # build base_estimator if doesn't exist
483s         if isinstance(base_estimator, str):
483s             base_estimator = cook_estimator(
483s                 base_estimator,
483s                 space=dimensions,
483s                 random_state=self.rng.randint(0, np.iinfo(np.int32).max),
483s                 n_jobs=n_jobs,
483s             )
483s     483s         # check if regressor
483s         # check if regressor
483s         if not is_regressor(base_estimator) and base_estimator is not None:
483s >           raise ValueError("%s has to be a regressor." % base_estimator)
483s E           ValueError: 
GradientBoostingQuantileRegressor(base_estimator=GradientBoostingRegressor(loss='quantile',
483s E                                                                          
            n_estimators=30),
483s E                                             quantiles=[0.16, 0.5, 0.84],
483s E                                             
random_state=RandomState(MT19937) at 0x7F8FF0D98440) has to be a regressor.
483s
483s /usr/lib/python3/dist-packages/skopt/optimizer/optimizer.py:257: ValueError


Cheers
Timo


-----BEGIN PGP SIGNATURE-----

iQIzBAEBCgAdFiEEmwPruYMA35fCsSO/zIxr3RQD9MoFAmjs7J4ACgkQzIxr3RQD
9Moohg/9EnKczWoWNHeaW3C+HhoYmp7BZold/yi7CswmMxcwpxGKeq7Kc90VPht+
1QS9nbRPZqkTUawQU7I/uRMymhVouTaUcYcRl19N4Qe5hjdow4RmgJOWLjjFYbKt
Rn7g4/MhaLEHjHFeVW84VIJUZ3s/WBS2XJBmxfBnbUj0A04lAnDowvRRFWTlVWS3
PduLUkTDRDhdjo7XSvatYYZWbRk1Oa6TRkTT7tEextgN2ROdBqtf6INrflk0GzKD
5cLZSnn/MozGvFOdpF3MVM65m0fnEai8to8VGlTg/Tbe2gxxpZAEkVOHOdMM0GGW
dShoXTeMY/kOVTHeYBIT7/rp3D1s3xzeInDkPLaE7hAZc3rZ5329hV1yteevSPBn
iNiGdBaG44XjecYW6FzYQEcNeEvdRy6DezCY14J7DEvfZuVi92kARqNRWS2vOr0b
R0fGJFiGJjf031717ZwEATCdZuBsOmTeXkvy/gY+jAhde0Qelyw3RkiQq0Ptj/eT
nwxO4ue5jjksXrWtXOKe/NxvHz0jn02x+hgizak86SrAFA+8ExolGxP+UUjmz3t1
/pH5Jxe1gzS4srrfrtqrxiRRRcGgKE1yjwSGZ1tzAWnO540qH24lRPpuznZYO7JQ
BP8R/UcKHqwcCDQdBrfqxZXcqLu0fAttcm2rHT5c8noQGK5rTqM=
=FNWx
-----END PGP SIGNATURE-----

Reply via email to