31 (-1 &
static_cast<bst_node_t>((1U << 31) - 1));
42 if (device != Context::kCpuId) {
43 lower_bounds_.SetDevice(device);
44 upper_bounds_.SetDevice(device);
45 monotone_.SetDevice(device);
48 if (p.monotone_constraints.empty()) {
49 monotone_.HostVector().resize(n_features, 0);
50 has_constraint_ =
false;
52 CHECK_LE(p.monotone_constraints.size(), n_features)
53 <<
"The size of monotone constraint should be less or equal to the number of features.";
54 monotone_.HostVector() = p.monotone_constraints;
55 monotone_.HostVector().resize(n_features, 0);
57 lower_bounds_.Resize(256, -std::numeric_limits<float>::max());
58 upper_bounds_.Resize(256, std::numeric_limits<float>::max());
59 has_constraint_ =
true;
62 if (device_ != Context::kCpuId) {
64 lower_bounds_.ConstDeviceSpan();
65 upper_bounds_.ConstDeviceSpan();
66 monotone_.ConstDeviceSpan();
70 template <
typename ParamT>
72 const int* constraints;
77 template <
typename GradientSumT>
79 GradientSumT
const& left, GradientSumT
const& right)
const {
80 int constraint = has_constraint ? constraints[fidx] : 0;
81 const float negative_infinity = -std::numeric_limits<float>::infinity();
82 float wleft = this->CalcWeight(nidx, param, left);
83 float wright = this->CalcWeight(nidx, param, right);
85 float gain = this->CalcGainGivenWeight(param, left, wleft) +
86 this->CalcGainGivenWeight(param, right, wright);
88 if (constraint == 0) {
90 }
else if (constraint > 0) {
91 return wleft <= wright ? gain : negative_infinity;
93 return wleft >= wright ? gain : negative_infinity;
97 template <
typename GradientSumT>
99 GradientSumT
const& stats)
const {
100 float w = ::xgboost::tree::CalcWeight(param, stats);
101 if (!has_constraint) {
105 if (nodeid == kRootParentId) {
107 }
else if (w < lower[nodeid]) {
108 return lower[nodeid];
109 }
else if (w > upper[nodeid]) {
110 return upper[nodeid];
116 template <
typename GradientSumT>
117 XGBOOST_DEVICE float CalcWeightCat(ParamT
const& param, GradientSumT
const& stats)
const {
121 return ::xgboost::tree::CalcWeight(param, stats);
127 return __fdividef(a, b);
133 template <
typename GradientSumT>
134 XGBOOST_DEVICE float CalcGainGivenWeight(ParamT
const& p, GradientSumT
const& stats,
136 if (stats.GetHess() <= 0) {
140 if (p.max_delta_step == 0.0f && has_constraint ==
false) {
141 return Divide(common::Sqr(ThresholdL1(stats.GetGrad(), p.reg_alpha)),
142 (stats.GetHess() + p.reg_lambda));
144 return tree::CalcGainGivenWeight<ParamT, float>(p, stats.GetGrad(),
147 template <
typename GradientSumT>
149 GradientSumT
const& stats)
const {
150 return this->CalcGainGivenWeight(p, stats, this->CalcWeight(nid, p, stats));
156 template <
typename ParamT = TrainParam>
auto GetEvaluator()
const {
157 if (device_ != Context::kCpuId) {
158 auto constraints = monotone_.ConstDevicePointer();
160 upper_bounds_.ConstDevicePointer(), has_constraint_};
162 auto constraints = monotone_.ConstHostPointer();
163 return SplitEvaluator<ParamT>{constraints, lower_bounds_.ConstHostPointer(),
164 upper_bounds_.ConstHostPointer(), has_constraint_};
168 template <
bool CompiledWithCuda = WITH_CUDA()>
171 if (!has_constraint_) {
175 size_t max_nidx = std::max(leftid, rightid);
176 if (lower_bounds_.Size() <= max_nidx) {
177 lower_bounds_.Resize(max_nidx * 2 + 1, -std::numeric_limits<float>::max());
179 if (upper_bounds_.Size() <= max_nidx) {
180 upper_bounds_.Resize(max_nidx * 2 + 1, std::numeric_limits<float>::max());
185 common::Span<float> upper,
186 common::Span<int> monotone) {
187 lower[leftid] = lower[nodeid];
188 upper[leftid] = upper[nodeid];
190 lower[rightid] = lower[nodeid];
191 upper[rightid] = upper[nodeid];
192 int32_t c = monotone[f];
193 bst_float mid = (left_weight + right_weight) / 2;
195 SPAN_CHECK(!common::CheckNAN(mid));
199 upper[rightid] = mid;
202 lower[rightid] = mid;
205 common::Range(0, 1), 1, device_)
206 .Eval(&lower_bounds_, &upper_bounds_, &monotone_);
Copyright 2014-2023, XGBoost Contributors.