Medial Code Documentation
Loading...
Searching...
No Matches
Variables
xgboost.testing.params Namespace Reference

Variables

 strategies = pytest.importorskip("hypothesis.strategies")
 
 exact_parameter_strategy
 
 hist_parameter_strategy
 
 hist_cache_strategy
 
 hist_multi_parameter_strategy
 
 cat_parameter_strategy
 
 lambdarank_parameter_strategy
 

Detailed Description

Strategies for updater tests.

Variable Documentation

◆ cat_parameter_strategy

xgboost.testing.params.cat_parameter_strategy
Initial value:
1= strategies.fixed_dictionaries(
2 {
3 "max_cat_to_onehot": strategies.integers(1, 128),
4 "max_cat_threshold": strategies.integers(1, 128),
5 }
6)

◆ exact_parameter_strategy

xgboost.testing.params.exact_parameter_strategy
Initial value:
1= strategies.fixed_dictionaries(
2 {
3 "nthread": strategies.integers(1, 4),
4 "max_depth": strategies.integers(1, 11),
5 "min_child_weight": strategies.floats(0.5, 2.0),
6 "alpha": strategies.floats(1e-5, 2.0),
7 "lambda": strategies.floats(1e-5, 2.0),
8 "eta": strategies.floats(0.01, 0.5),
9 "gamma": strategies.floats(1e-5, 2.0),
10 "seed": strategies.integers(0, 10),
11 # We cannot enable subsampling as the training loss can increase
12 # 'subsample': strategies.floats(0.5, 1.0),
13 "colsample_bytree": strategies.floats(0.5, 1.0),
14 "colsample_bylevel": strategies.floats(0.5, 1.0),
15 }
16)

◆ hist_cache_strategy

xgboost.testing.params.hist_cache_strategy
Initial value:
1= strategies.fixed_dictionaries(
2 {"max_cached_hist_node": strategies.sampled_from([1, 4, 1024, 2**31])}
3)

◆ hist_multi_parameter_strategy

xgboost.testing.params.hist_multi_parameter_strategy
Initial value:
1= strategies.fixed_dictionaries(
2 {
3 "max_depth": strategies.integers(1, 11),
4 "max_leaves": strategies.integers(0, 1024),
5 "max_bin": strategies.integers(2, 512),
6 "multi_strategy": strategies.sampled_from(
7 ["multi_output_tree", "one_output_per_tree"]
8 ),
9 "grow_policy": strategies.sampled_from(["lossguide", "depthwise"]),
10 "min_child_weight": strategies.floats(0.5, 2.0),
11 # We cannot enable subsampling as the training loss can increase
12 # 'subsample': strategies.floats(0.5, 1.0),
13 "colsample_bytree": strategies.floats(0.5, 1.0),
14 "colsample_bylevel": strategies.floats(0.5, 1.0),
15 }
16).filter(
17 lambda x: (cast(int, x["max_depth"]) > 0 or cast(int, x["max_leaves"]) > 0)
18 and (cast(int, x["max_depth"]) > 0 or x["grow_policy"] == "lossguide")
19)

◆ hist_parameter_strategy

xgboost.testing.params.hist_parameter_strategy
Initial value:
1= strategies.fixed_dictionaries(
2 {
3 "max_depth": strategies.integers(1, 11),
4 "max_leaves": strategies.integers(0, 1024),
5 "max_bin": strategies.integers(2, 512),
6 "grow_policy": strategies.sampled_from(["lossguide", "depthwise"]),
7 "min_child_weight": strategies.floats(0.5, 2.0),
8 # We cannot enable subsampling as the training loss can increase
9 # 'subsample': strategies.floats(0.5, 1.0),
10 "colsample_bytree": strategies.floats(0.5, 1.0),
11 "colsample_bylevel": strategies.floats(0.5, 1.0),
12 }
13).filter(
14 lambda x: (cast(int, x["max_depth"]) > 0 or cast(int, x["max_leaves"]) > 0)
15 and (cast(int, x["max_depth"]) > 0 or x["grow_policy"] == "lossguide")
16)

◆ lambdarank_parameter_strategy

xgboost.testing.params.lambdarank_parameter_strategy
Initial value:
1= strategies.fixed_dictionaries(
2 {
3 "lambdarank_unbiased": strategies.sampled_from([True, False]),
4 "lambdarank_pair_method": strategies.sampled_from(["topk", "mean"]),
5 "lambdarank_num_pair_per_sample": strategies.integers(1, 8),
6 "lambdarank_bias_norm": strategies.floats(0.5, 2.0),
7 "objective": strategies.sampled_from(
8 ["rank:ndcg", "rank:map", "rank:pairwise"]
9 ),
10 }
11).filter(
12 lambda x: not (x["lambdarank_unbiased"] and x["lambdarank_pair_method"] == "mean")
13)