Medial Code Documentation
Loading...
Searching...
No Matches
array_interface.h
Go to the documentation of this file.
1
6#ifndef XGBOOST_DATA_ARRAY_INTERFACE_H_
7#define XGBOOST_DATA_ARRAY_INTERFACE_H_
8
9#include <algorithm>
10#include <cstddef> // for size_t
11#include <cstdint>
12#include <limits> // for numeric_limits
13#include <map>
14#include <string>
15#include <type_traits> // std::alignment_of,std::remove_pointer_t
16#include <utility>
17#include <vector>
18
19#include "../common/bitfield.h"
20#include "../common/common.h"
21#include "../common/error_msg.h" // for NoF128
22#include "xgboost/base.h"
23#include "xgboost/data.h"
24#include "xgboost/json.h"
25#include "xgboost/linalg.h"
26#include "xgboost/logging.h"
27#include "xgboost/span.h"
28
29#if defined(XGBOOST_USE_CUDA)
30#include "cuda_fp16.h"
31#endif
32
33namespace xgboost {
34// Common errors in parsing columnar format.
36 static char const *Contiguous() { return "Memory should be contiguous."; }
37 static char const *TypestrFormat() {
38 return "`typestr' should be of format <endian><type><size of type in bytes>.";
39 }
40 static char const *Dimension(int32_t d) {
41 static std::string str;
42 str.clear();
43 str += "Only ";
44 str += std::to_string(d);
45 str += " dimensional array is valid.";
46 return str.c_str();
47 }
48 static char const *Version() {
49 return "Only version <= 3 of `__cuda_array_interface__' and `__array_interface__' are "
50 "supported.";
51 }
52 static char const *OfType(std::string const &type) {
53 static std::string str;
54 str.clear();
55 str += " should be of ";
56 str += type;
57 str += " type.";
58 return str.c_str();
59 }
60
61 static std::string TypeStr(char c) {
62 switch (c) {
63 case 't':
64 return "Bit field";
65 case 'b':
66 return "Boolean";
67 case 'i':
68 return "Integer";
69 case 'u':
70 return "Unsigned integer";
71 case 'f':
72 return "Floating point";
73 case 'c':
74 return "Complex floating point";
75 case 'm':
76 return "Timedelta";
77 case 'M':
78 return "Datetime";
79 case 'O':
80 return "Object";
81 case 'S':
82 return "String";
83 case 'U':
84 return "Unicode";
85 case 'V':
86 return "Other";
87 default:
88 LOG(FATAL) << "Invalid type code: " << c << " in `typestr' of input array."
89 << "\nPlease verify the `__cuda_array_interface__/__array_interface__' "
90 << "of your input data complies to: "
91 << "https://docs.scipy.org/doc/numpy/reference/arrays.interface.html"
92 << "\nOr open an issue.";
93 return "";
94 }
95 }
96
97 static std::string UnSupportedType(StringView typestr) {
98 return TypeStr(typestr[1]) + "-" + typestr[2] + " is not supported.";
99 }
100};
101
106 public:
107 enum Type : std::int8_t { kF2, kF4, kF8, kF16, kI1, kI2, kI4, kI8, kU1, kU2, kU4, kU8 };
108
109 template <typename PtrType>
110 static PtrType GetPtrFromArrayData(Object::Map const &obj) {
111 auto data_it = obj.find("data");
112 if (data_it == obj.cend() || IsA<Null>(data_it->second)) {
113 LOG(FATAL) << "Empty data passed in.";
114 }
115 auto p_data = reinterpret_cast<PtrType>(
116 static_cast<size_t>(get<Integer const>(get<Array const>(data_it->second).at(0))));
117 return p_data;
118 }
119
120 static void Validate(Object::Map const &array) {
121 auto version_it = array.find("version");
122 if (version_it == array.cend() || IsA<Null>(version_it->second)) {
123 LOG(FATAL) << "Missing `version' field for array interface";
124 }
125 if (get<Integer const>(version_it->second) > 3) {
126 LOG(FATAL) << ArrayInterfaceErrors::Version();
127 }
128
129 auto typestr_it = array.find("typestr");
130 if (typestr_it == array.cend() || IsA<Null>(typestr_it->second)) {
131 LOG(FATAL) << "Missing `typestr' field for array interface";
132 }
133
134 auto typestr = get<String const>(typestr_it->second);
135 CHECK(typestr.size() == 3 || typestr.size() == 4) << ArrayInterfaceErrors::TypestrFormat();
136
137 auto shape_it = array.find("shape");
138 if (shape_it == array.cend() || IsA<Null>(shape_it->second)) {
139 LOG(FATAL) << "Missing `shape' field for array interface";
140 }
141 auto data_it = array.find("data");
142 if (data_it == array.cend() || IsA<Null>(data_it->second)) {
143 LOG(FATAL) << "Missing `data' field for array interface";
144 }
145 }
146
147 // Find null mask (validity mask) field
148 // Mask object is also an array interface, but with different requirements.
149 static size_t ExtractMask(Object::Map const &column,
151 auto &s_mask = *p_out;
152 auto const &mask_it = column.find("mask");
153 if (mask_it != column.cend() && !IsA<Null>(mask_it->second)) {
154 auto const &j_mask = get<Object const>(mask_it->second);
155 Validate(j_mask);
156
157 auto p_mask = GetPtrFromArrayData<RBitField8::value_type *>(j_mask);
158
159 auto j_shape = get<Array const>(j_mask.at("shape"));
160 CHECK_EQ(j_shape.size(), 1) << ArrayInterfaceErrors::Dimension(1);
161 auto typestr = get<String const>(j_mask.at("typestr"));
162 // For now this is just 1, we can support different size of interger in mask.
163 int64_t const type_length = typestr.at(2) - 48;
164
165 if (typestr.at(1) == 't') {
166 CHECK_EQ(type_length, 1) << "mask with bitfield type should be of 1 byte per bitfield.";
167 } else if (typestr.at(1) == 'i') {
168 CHECK_EQ(type_length, 1) << "mask with integer type should be of 1 byte per integer.";
169 } else {
170 LOG(FATAL) << "mask must be of integer type or bit field type.";
171 }
172 /*
173 * shape represents how many bits is in the mask. (This is a grey area, don't be
174 * suprised if it suddently represents something else when supporting a new
175 * implementation). Quoting from numpy array interface:
176 *
177 * The shape of this object should be "broadcastable" to the shape of the original
178 * array.
179 *
180 * And that's the only requirement.
181 */
182 size_t const n_bits = static_cast<size_t>(get<Integer>(j_shape.at(0)));
183 // The size of span required to cover all bits. Here with 8 bits bitfield, we
184 // assume 1 byte alignment.
185 size_t const span_size = RBitField8::ComputeStorageSize(n_bits);
186
187 auto strides_it = j_mask.find("strides");
188 if (strides_it != j_mask.cend() && !IsA<Null>(strides_it->second)) {
189 auto strides = get<Array const>(strides_it->second);
190 CHECK_EQ(strides.size(), 1) << ArrayInterfaceErrors::Dimension(1);
191 CHECK_EQ(get<Integer>(strides.at(0)), type_length) << ArrayInterfaceErrors::Contiguous();
192 }
193
194 s_mask = {p_mask, span_size};
195 return n_bits;
196 }
197 return 0;
198 }
202 template <int32_t D>
203 static void HandleRowVector(std::vector<size_t> const &shape, std::vector<size_t> *p_out) {
204 auto &out = *p_out;
205 if (shape.size() == 2 && D == 1) {
206 auto m = shape[0];
207 auto n = shape[1];
208 CHECK(m == 1 || n == 1);
209 if (m == 1) {
210 // keep the number of columns
211 out[0] = out[1];
212 out.resize(1);
213 } else if (n == 1) {
214 // keep the number of rows.
215 out.resize(1);
216 }
217 // when both m and n are 1, above logic keeps the column.
218 // when neither m nor n is 1, caller should throw an error about Dimension.
219 }
220 }
221
222 template <int32_t D>
223 static void ExtractShape(Object::Map const &array, size_t (&out_shape)[D]) {
224 auto const &j_shape = get<Array const>(array.at("shape"));
225 std::vector<size_t> shape_arr(j_shape.size(), 0);
226 std::transform(j_shape.cbegin(), j_shape.cend(), shape_arr.begin(),
227 [](Json in) { return get<Integer const>(in); });
228 // handle column vector vs. row vector
229 HandleRowVector<D>(shape_arr, &shape_arr);
230 // Copy shape.
231 size_t i;
232 for (i = 0; i < shape_arr.size(); ++i) {
233 CHECK_LT(i, D) << ArrayInterfaceErrors::Dimension(D);
234 out_shape[i] = shape_arr[i];
235 }
236 // Fill the remaining dimensions
237 std::fill(out_shape + i, out_shape + D, 1);
238 }
239
243 template <int32_t D>
244 static bool ExtractStride(Object::Map const &array, size_t itemsize,
245 size_t (&shape)[D], size_t (&stride)[D]) {
246 auto strides_it = array.find("strides");
247 // No stride is provided
248 if (strides_it == array.cend() || IsA<Null>(strides_it->second)) {
249 // No stride is provided, we can calculate it from shape.
250 linalg::detail::CalcStride(shape, stride);
251 // Quote:
252 //
253 // strides: Either None to indicate a C-style contiguous array or a Tuple of
254 // strides which provides the number of bytes
255 return true;
256 }
257 // Get shape, we need to make changes to handle row vector, so some duplicated code
258 // from `ExtractShape` for copying out the shape.
259 auto const &j_shape = get<Array const>(array.at("shape"));
260 std::vector<size_t> shape_arr(j_shape.size(), 0);
261 std::transform(j_shape.cbegin(), j_shape.cend(), shape_arr.begin(),
262 [](Json in) { return get<Integer const>(in); });
263 // Get stride
264 auto const &j_strides = get<Array const>(strides_it->second);
265 CHECK_EQ(j_strides.size(), j_shape.size()) << "stride and shape don't match.";
266 std::vector<size_t> stride_arr(j_strides.size(), 0);
267 std::transform(j_strides.cbegin(), j_strides.cend(), stride_arr.begin(),
268 [](Json in) { return get<Integer const>(in); });
269
270 // Handle column vector vs. row vector
271 HandleRowVector<D>(shape_arr, &stride_arr);
272 size_t i;
273 for (i = 0; i < stride_arr.size(); ++i) {
274 // If one of the dim has shape 0 then total size is 0, stride is meaningless, but we
275 // set it to 0 here just to be consistent
276 CHECK_LT(i, D) << ArrayInterfaceErrors::Dimension(D);
277 // We use number of items instead of number of bytes
278 stride[i] = stride_arr[i] / itemsize;
279 }
280 std::fill(stride + i, stride + D, 1);
281 // If the stride can be calculated from shape then it's contiguous.
282 size_t stride_tmp[D];
283 linalg::detail::CalcStride(shape, stride_tmp);
284 return std::equal(stride_tmp, stride_tmp + D, stride);
285 }
286
287 static void *ExtractData(Object::Map const &array, size_t size) {
288 Validate(array);
289 void *p_data = ArrayInterfaceHandler::GetPtrFromArrayData<void *>(array);
290 if (!p_data) {
291 CHECK_EQ(size, 0) << "Empty data with non-zero shape.";
292 }
293 return p_data;
294 }
298 static bool IsCudaPtr(void const *ptr);
302 static void SyncCudaStream(int64_t stream);
303};
304
308template <typename T, typename E = void>
309struct ToDType;
310// float
311#if defined(XGBOOST_USE_CUDA)
312template <>
313struct ToDType<__half> {
314 static constexpr ArrayInterfaceHandler::Type kType = ArrayInterfaceHandler::kF2;
315};
316#endif // defined(XGBOOST_USE_CUDA)
317template <>
318struct ToDType<float> {
319 static constexpr ArrayInterfaceHandler::Type kType = ArrayInterfaceHandler::kF4;
320};
321template <>
322struct ToDType<double> {
323 static constexpr ArrayInterfaceHandler::Type kType = ArrayInterfaceHandler::kF8;
324};
325template <typename T>
326struct ToDType<T,
327 std::enable_if_t<std::is_same<T, long double>::value && sizeof(long double) == 16>> {
328 static constexpr ArrayInterfaceHandler::Type kType = ArrayInterfaceHandler::kF16;
329};
330// uint
331template <>
332struct ToDType<uint8_t> {
333 static constexpr ArrayInterfaceHandler::Type kType = ArrayInterfaceHandler::kU1;
334};
335template <>
336struct ToDType<uint16_t> {
337 static constexpr ArrayInterfaceHandler::Type kType = ArrayInterfaceHandler::kU2;
338};
339template <>
340struct ToDType<uint32_t> {
341 static constexpr ArrayInterfaceHandler::Type kType = ArrayInterfaceHandler::kU4;
342};
343template <>
344struct ToDType<uint64_t> {
345 static constexpr ArrayInterfaceHandler::Type kType = ArrayInterfaceHandler::kU8;
346};
347// int
348template <>
349struct ToDType<int8_t> {
350 static constexpr ArrayInterfaceHandler::Type kType = ArrayInterfaceHandler::kI1;
351};
352template <>
353struct ToDType<int16_t> {
354 static constexpr ArrayInterfaceHandler::Type kType = ArrayInterfaceHandler::kI2;
355};
356template <>
357struct ToDType<int32_t> {
358 static constexpr ArrayInterfaceHandler::Type kType = ArrayInterfaceHandler::kI4;
359};
360template <>
361struct ToDType<int64_t> {
362 static constexpr ArrayInterfaceHandler::Type kType = ArrayInterfaceHandler::kI8;
363};
364
365#if !defined(XGBOOST_USE_CUDA)
366inline void ArrayInterfaceHandler::SyncCudaStream(int64_t) { common::AssertGPUSupport(); }
367inline bool ArrayInterfaceHandler::IsCudaPtr(void const *) { return false; }
368#endif // !defined(XGBOOST_USE_CUDA)
369
387template <int32_t D, bool allow_mask = (D == 1)>
389 static_assert(D > 0, "Invalid dimension for array interface.");
390
399 void Initialize(Object::Map const &array) {
400 ArrayInterfaceHandler::Validate(array);
401
402 auto typestr = get<String const>(array.at("typestr"));
403 this->AssignType(StringView{typestr});
404 ArrayInterfaceHandler::ExtractShape(array, shape);
405 size_t itemsize = typestr[2] - '0';
406 is_contiguous = ArrayInterfaceHandler::ExtractStride(array, itemsize, shape, strides);
407 n = linalg::detail::CalcSize(shape);
408
409 data = ArrayInterfaceHandler::ExtractData(array, n);
410 static_assert(allow_mask ? D == 1 : D >= 1, "Masked ndarray is not supported.");
411
412 auto alignment = this->ElementAlignment();
413 auto ptr = reinterpret_cast<uintptr_t>(this->data);
414 CHECK_EQ(ptr % alignment, 0) << "Input pointer misalignment.";
415
416 if (allow_mask) {
418 size_t n_bits = ArrayInterfaceHandler::ExtractMask(array, &s_mask);
419
420 valid = RBitField8(s_mask);
421
422 if (s_mask.data()) {
423 CHECK_EQ(n_bits, n) << "Shape of bit mask doesn't match data shape. "
424 << "XGBoost doesn't support internal broadcasting.";
425 }
426 } else {
427 auto mask_it = array.find("mask");
428 CHECK(mask_it == array.cend() || IsA<Null>(mask_it->second))
429 << "Masked array is not yet supported.";
430 }
431
432 auto stream_it = array.find("stream");
433 if (stream_it != array.cend() && !IsA<Null>(stream_it->second)) {
434 int64_t stream = get<Integer const>(stream_it->second);
436 }
437 }
438
439 public:
440 ArrayInterface() = default;
441 explicit ArrayInterface(Object::Map const &array) { this->Initialize(array); }
442
443 explicit ArrayInterface(Json const &array) {
444 if (IsA<Object>(array)) {
445 this->Initialize(get<Object const>(array));
446 return;
447 }
448 if (IsA<Array>(array)) {
449 CHECK_EQ(get<Array const>(array).size(), 1)
450 << "Column: " << ArrayInterfaceErrors::Dimension(1);
451 this->Initialize(get<Object const>(get<Array const>(array)[0]));
452 return;
453 }
454 }
455
456 explicit ArrayInterface(std::string const &str) : ArrayInterface{StringView{str}} {}
457
458 explicit ArrayInterface(StringView str) : ArrayInterface{Json::Load(str)} {}
459
460 void AssignType(StringView typestr) {
461 using T = ArrayInterfaceHandler::Type;
462 if (typestr.size() == 4 && typestr[1] == 'f' && typestr[2] == '1' && typestr[3] == '6') {
463 CHECK(sizeof(long double) == 16) << error::NoF128();
464 type = T::kF16;
465 } else if (typestr[1] == 'f' && typestr[2] == '2') {
466#if defined(XGBOOST_USE_CUDA)
467 type = T::kF2;
468#else
469 LOG(FATAL) << "Half type is not supported.";
470#endif // defined(XGBOOST_USE_CUDA)
471 } else if (typestr[1] == 'f' && typestr[2] == '4') {
472 type = T::kF4;
473 } else if (typestr[1] == 'f' && typestr[2] == '8') {
474 type = T::kF8;
475 } else if (typestr[1] == 'i' && typestr[2] == '1') {
476 type = T::kI1;
477 } else if (typestr[1] == 'i' && typestr[2] == '2') {
478 type = T::kI2;
479 } else if (typestr[1] == 'i' && typestr[2] == '4') {
480 type = T::kI4;
481 } else if (typestr[1] == 'i' && typestr[2] == '8') {
482 type = T::kI8;
483 } else if (typestr[1] == 'u' && typestr[2] == '1') {
484 type = T::kU1;
485 } else if (typestr[1] == 'u' && typestr[2] == '2') {
486 type = T::kU2;
487 } else if (typestr[1] == 'u' && typestr[2] == '4') {
488 type = T::kU4;
489 } else if (typestr[1] == 'u' && typestr[2] == '8') {
490 type = T::kU8;
491 } else {
492 LOG(FATAL) << ArrayInterfaceErrors::UnSupportedType(typestr);
493 return;
494 }
495 }
496
497 [[nodiscard]] XGBOOST_DEVICE std::size_t Shape(size_t i) const { return shape[i]; }
498 [[nodiscard]] XGBOOST_DEVICE std::size_t Stride(size_t i) const { return strides[i]; }
499
500 template <typename Fn>
501 XGBOOST_HOST_DEV_INLINE decltype(auto) DispatchCall(Fn func) const {
502 using T = ArrayInterfaceHandler::Type;
503 switch (type) {
504 case T::kF2: {
505#if defined(XGBOOST_USE_CUDA)
506 return func(reinterpret_cast<__half const *>(data));
507#endif // defined(XGBOOST_USE_CUDA)
508 }
509 case T::kF4:
510 return func(reinterpret_cast<float const *>(data));
511 case T::kF8:
512 return func(reinterpret_cast<double const *>(data));
513#ifdef __CUDA_ARCH__
514 case T::kF16: {
515 // CUDA device code doesn't support long double.
516 SPAN_CHECK(false);
517 return func(reinterpret_cast<double const *>(data));
518 }
519#else
520 case T::kF16:
521 return func(reinterpret_cast<long double const *>(data));
522#endif
523 case T::kI1:
524 return func(reinterpret_cast<int8_t const *>(data));
525 case T::kI2:
526 return func(reinterpret_cast<int16_t const *>(data));
527 case T::kI4:
528 return func(reinterpret_cast<int32_t const *>(data));
529 case T::kI8:
530 return func(reinterpret_cast<int64_t const *>(data));
531 case T::kU1:
532 return func(reinterpret_cast<uint8_t const *>(data));
533 case T::kU2:
534 return func(reinterpret_cast<uint16_t const *>(data));
535 case T::kU4:
536 return func(reinterpret_cast<uint32_t const *>(data));
537 case T::kU8:
538 return func(reinterpret_cast<uint64_t const *>(data));
539 }
540 SPAN_CHECK(false);
541 return func(reinterpret_cast<uint64_t const *>(data));
542 }
543
544 [[nodiscard]] XGBOOST_DEVICE std::size_t ElementSize() const {
545 return this->DispatchCall([](auto *typed_data_ptr) {
546 return sizeof(std::remove_pointer_t<decltype(typed_data_ptr)>);
547 });
548 }
549 [[nodiscard]] XGBOOST_DEVICE std::size_t ElementAlignment() const {
550 return this->DispatchCall([](auto *typed_data_ptr) {
551 return std::alignment_of<std::remove_pointer_t<decltype(typed_data_ptr)>>::value;
552 });
553 }
554
555 template <typename T = float, typename... Index>
556 XGBOOST_HOST_DEV_INLINE T operator()(Index &&...index) const {
557 static_assert(sizeof...(index) <= D, "Invalid index.");
558 return this->DispatchCall([=](auto const *p_values) -> T {
559 std::size_t offset = linalg::detail::Offset<0ul>(strides, 0ul, index...);
560#if defined(XGBOOST_USE_CUDA)
561 // No operator defined for half -> size_t
562 using Type = std::conditional_t<
563 std::is_same<__half,
564 std::remove_cv_t<std::remove_pointer_t<decltype(p_values)>>>::value &&
565 std::is_same<std::size_t, std::remove_cv_t<T>>::value,
566 unsigned long long, T>; // NOLINT
567 return static_cast<T>(static_cast<Type>(p_values[offset]));
568#else
569 return static_cast<T>(p_values[offset]);
570#endif // defined(XGBOOST_USE_CUDA)
571 });
572 }
573
574 // Used only by columnar format.
575 RBitField8 valid;
576 // Array stride
577 std::size_t strides[D]{0};
578 // Array shape
579 std::size_t shape[D]{0};
580 // Type earsed pointer referencing the data.
581 void const *data{nullptr};
582 // Total number of items
583 std::size_t n{0};
584 // Whether the memory is c-contiguous
585 bool is_contiguous{false};
586 // RTTI, initialized to the f16 to avoid masking potential bugs in initialization.
587 ArrayInterfaceHandler::Type type{ArrayInterfaceHandler::kF16};
588};
589
590template <std::int32_t D, typename Fn>
591void DispatchDType(ArrayInterface<D> const array, std::int32_t device, Fn fn) {
592 // Only used for cuDF at the moment.
593 CHECK_EQ(array.valid.Capacity(), 0);
594 auto dispatch = [&](auto t) {
595 using T = std::remove_const_t<decltype(t)> const;
596 // Set the data size to max as we don't know the original size of a sliced array:
597 //
598 // Slicing an array A with shape (4, 2, 3) and stride (6, 3, 1) by [:, 1, :] results
599 // in an array B with shape (4, 3) and strides (6, 1). We can't calculate the original
600 // size 24 based on the slice.
601 fn(linalg::TensorView<T, D>{common::Span<T const>{static_cast<T *>(array.data),
602 std::numeric_limits<std::size_t>::max()},
603 array.shape, array.strides, device});
604 };
605 switch (array.type) {
606 case ArrayInterfaceHandler::kF2: {
607#if defined(XGBOOST_USE_CUDA)
608 dispatch(__half{});
609#endif
610 break;
611 }
612 case ArrayInterfaceHandler::kF4: {
613 dispatch(float{});
614 break;
615 }
616 case ArrayInterfaceHandler::kF8: {
617 dispatch(double{});
618 break;
619 }
620 case ArrayInterfaceHandler::kF16: {
621 using T = long double;
622 CHECK(sizeof(long double) == 16) << error::NoF128();
623 dispatch(T{});
624 break;
625 }
626 case ArrayInterfaceHandler::kI1: {
627 dispatch(std::int8_t{});
628 break;
629 }
630 case ArrayInterfaceHandler::kI2: {
631 dispatch(std::int16_t{});
632 break;
633 }
634 case ArrayInterfaceHandler::kI4: {
635 dispatch(std::int32_t{});
636 break;
637 }
638 case ArrayInterfaceHandler::kI8: {
639 dispatch(std::int64_t{});
640 break;
641 }
642 case ArrayInterfaceHandler::kU1: {
643 dispatch(std::uint8_t{});
644 break;
645 }
646 case ArrayInterfaceHandler::kU2: {
647 dispatch(std::uint16_t{});
648 break;
649 }
650 case ArrayInterfaceHandler::kU4: {
651 dispatch(std::uint32_t{});
652 break;
653 }
654 case ArrayInterfaceHandler::kU8: {
655 dispatch(std::uint64_t{});
656 break;
657 }
658 }
659}
660
664template <typename T, int32_t D>
666 ArrayInterface<D> const &array;
667 template <typename... I>
668 XGBOOST_DEVICE T operator()(I &&...ind) const {
669 static_assert(sizeof...(ind) <= D, "Invalid index.");
670 return array.template operator()<T>(ind...);
671 }
672};
673
674template <int32_t D>
675inline void CheckArrayInterface(StringView key, ArrayInterface<D> const &array) {
676 CHECK(!array.valid.Data()) << "Meta info " << key << " should be dense, found validity mask";
677}
678} // namespace xgboost
679#endif // XGBOOST_DATA_ARRAY_INTERFACE_H_
Utilities for consuming array interface.
Definition array_interface.h:105
static void HandleRowVector(std::vector< size_t > const &shape, std::vector< size_t > *p_out)
Handle vector inputs.
Definition array_interface.h:203
static bool IsCudaPtr(void const *ptr)
Whether the ptr is allocated by CUDA.
Definition array_interface.h:367
static bool ExtractStride(Object::Map const &array, size_t itemsize, size_t(&shape)[D], size_t(&stride)[D])
Extracts the optiona ‘strides’ field and returns whether the array is c-contiguous.
Definition array_interface.h:244
static void SyncCudaStream(int64_t stream)
Sync the CUDA stream.
Definition array_interface.h:366
A type erased view over array_interface protocol defined by numpy.
Definition array_interface.h:388
Data structure representing JSON format.
Definition json.h:357
static Json Load(StringView str, std::ios::openmode mode=std::ios::in)
Decode the JSON object.
Definition json.cc:652
span class implementation, based on ISO++20 span<T>. The interface should be the same.
Definition span.h:424
A tensor view with static type and dimension.
Definition linalg.h:293
Copyright 2015-2023 by XGBoost Contributors.
#define XGBOOST_DEVICE
Tag function as usable by device.
Definition base.h:64
Copyright 2015-2023 by XGBoost Contributors.
defines console logging options for xgboost. Use to enforce unified print behavior.
Copyright 2021-2023 by XGBoost Contributors.
@ array
array (ordered collection of values)
Definition StdDeque.h:58
namespace of xgboost
Definition base.h:90
Definition array_interface.h:35
Definition string_view.h:15
Dispatch compile time type to runtime type.
Definition array_interface.h:309
Helper for type casting.
Definition array_interface.h:665
Definition version.h:16