Skip to content

Commit

Permalink
Rename to view.
Browse files Browse the repository at this point in the history
  • Loading branch information
trivialfis committed Oct 29, 2021
1 parent 79f1437 commit d84700b
Show file tree
Hide file tree
Showing 6 changed files with 19 additions and 19 deletions.
14 changes: 7 additions & 7 deletions include/xgboost/linalg.h
Original file line number Diff line number Diff line change
Expand Up @@ -74,7 +74,7 @@ constexpr detail::AllTag All() { return {}; }
* \brief A tensor view with static type and shape, it implements indexing and
* slicing.
*/
template <typename T, int32_t kDim = 5> class Tensor {
template <typename T, int32_t kDim = 5> class TensorView {
using ShapeT = size_t[kDim];
using StrideT = ShapeT;

Expand Down Expand Up @@ -148,7 +148,7 @@ template <typename T, int32_t kDim = 5> class Tensor {
* \brief Create a tensor with data and shape.
*/
template <typename I, int32_t D>
XGBOOST_DEVICE Tensor(common::Span<T> data, I const (&shape)[D],
XGBOOST_DEVICE TensorView(common::Span<T> data, I const (&shape)[D],
int32_t device)
: data_{data}, device_{device} {
static_assert(D > 0 && D <= kDim, "Invalid shape.");
Expand All @@ -170,7 +170,7 @@ template <typename T, int32_t kDim = 5> class Tensor {
* stride can be calculated from shape.
*/
template <typename I, int32_t D>
XGBOOST_DEVICE Tensor(common::Span<T> data, I const (&shape)[D],
XGBOOST_DEVICE TensorView(common::Span<T> data, I const (&shape)[D],
I const (&stride)[D], int32_t device)
: data_{data}, device_{device} {
static_assert(D == kDim, "Invalid shape & stride.");
Expand All @@ -181,7 +181,7 @@ template <typename T, int32_t kDim = 5> class Tensor {
this->CalcSize();
};

XGBOOST_DEVICE Tensor(Tensor const &that)
XGBOOST_DEVICE TensorView(TensorView const &that)
: data_{that.data_}, size_{that.size_}, device_{that.device_} {
detail::UnrollLoop<kDim>([&](auto i) {
stride_[i] = that.stride_[i];
Expand Down Expand Up @@ -238,7 +238,7 @@ template <typename T, int32_t kDim = 5> class Tensor {
new_stride, slices...);
// ret is a different type due to changed dimension, so we can not access its private
// fields.
Tensor<T, kNewDim> ret{data_.subspan(data_.empty() ? 0 : res.offset),
TensorView<T, kNewDim> ret{data_.subspan(data_.empty() ? 0 : res.offset),
new_shape, new_stride, device_};
return ret;
}
Expand All @@ -264,15 +264,15 @@ template <typename T, int32_t kDim = 5> class Tensor {
* \tparam T data type of vector
*/
template <typename T>
using VectorView = Tensor<T, 1>;
using VectorView = TensorView<T, 1>;

/**
* \brief A view over a matrix, specialization of Tensor.
*
* \tparam T data type of matrix
*/
template <typename T>
using MatrixView = Tensor<T, 2>;
using MatrixView = TensorView<T, 2>;
} // namespace linalg
} // namespace xgboost
#endif // XGBOOST_LINALG_H_
2 changes: 1 addition & 1 deletion src/gbm/gblinear.cc
Original file line number Diff line number Diff line change
Expand Up @@ -243,7 +243,7 @@ class GBLinear : public GradientBooster {
// The bias is the last weight
out_scores->resize(model_.weight.size() - learner_model_param_->num_output_group, 0);
auto n_groups = learner_model_param_->num_output_group;
linalg::Tensor<float, 2> scores{
linalg::TensorView<float, 2> scores{
*out_scores,
{learner_model_param_->num_feature, n_groups},
GenericParameter::kCpuId};
Expand Down
2 changes: 1 addition & 1 deletion src/gbm/gbtree.cc
Original file line number Diff line number Diff line change
Expand Up @@ -229,7 +229,7 @@ void GBTree::DoBoost(DMatrix* p_fmat,
auto device = tparam_.tree_method != TreeMethod::kGPUHist
? GenericParameter::kCpuId
: generic_param_->gpu_id;
auto out = linalg::Tensor<float, 2>{
auto out = linalg::TensorView<float, 2>{
device == GenericParameter::kCpuId ? predt->predictions.HostSpan()
: predt->predictions.DeviceSpan(),
{static_cast<size_t>(p_fmat->Info().num_row_),
Expand Down
2 changes: 1 addition & 1 deletion src/gbm/gbtree.cu
Original file line number Diff line number Diff line change
Expand Up @@ -12,7 +12,7 @@ namespace gbm {
void GPUCopyGradient(HostDeviceVector<GradientPair> const *in_gpair,
bst_group_t n_groups, bst_group_t group_id,
HostDeviceVector<GradientPair> *out_gpair) {
auto mat = linalg::Tensor<GradientPair const, 2>(
auto mat = linalg::TensorView<GradientPair const, 2>(
in_gpair->ConstDeviceSpan(),
{in_gpair->Size() / n_groups, static_cast<size_t>(n_groups)},
in_gpair->DeviceIdx());
Expand Down
4 changes: 2 additions & 2 deletions src/metric/auc.cc
Original file line number Diff line number Diff line change
Expand Up @@ -84,14 +84,14 @@ float MultiClassOVR(common::Span<float const> predts, MetaInfo const &info,
auto const &labels = info.labels_.ConstHostVector();

std::vector<float> results_storage(n_classes * 3, 0);
linalg::Tensor<float> results(results_storage,
linalg::TensorView<float> results(results_storage,
{n_classes, static_cast<size_t>(3)},
GenericParameter::kCpuId);
auto local_area = results.Slice(linalg::All(), 0);
auto tp = results.Slice(linalg::All(), 1);
auto auc = results.Slice(linalg::All(), 2);
auto weights = OptionalWeights{info.weights_.ConstHostSpan()};
auto predts_t = linalg::Tensor<float const, 2>(
auto predts_t = linalg::TensorView<float const, 2>(
predts, {static_cast<size_t>(info.num_row_), n_classes},
GenericParameter::kCpuId);

Expand Down
14 changes: 7 additions & 7 deletions tests/cpp/common/test_linalg.cc
Original file line number Diff line number Diff line change
Expand Up @@ -12,7 +12,7 @@ auto MakeMatrixFromTest(HostDeviceVector<float> *storage, size_t n_rows,

std::iota(h_storage.begin(), h_storage.end(), 0);

auto m = linalg::Tensor<float, 2>{
auto m = linalg::TensorView<float, 2>{
h_storage, {n_rows, static_cast<size_t>(n_cols)}, -1};
return m;
}
Expand Down Expand Up @@ -43,7 +43,7 @@ TEST(Linalg, Tensor) {
std::vector<double> data(2 * 3 * 4, 0);
std::iota(data.begin(), data.end(), 0);

Tensor<double> t{data, {2, 3, 4}, -1};
TensorView<double> t{data, {2, 3, 4}, -1};
ASSERT_EQ(t.Shape()[0], 2);
ASSERT_EQ(t.Shape()[1], 3);
ASSERT_EQ(t.Shape()[2], 4);
Expand All @@ -69,7 +69,7 @@ TEST(Linalg, Tensor) {

{
// as vector
Tensor<double, 1> vec{data, {data.size()}, -1};
TensorView<double, 1> vec{data, {data.size()}, -1};
ASSERT_EQ(vec.Size(), data.size());
ASSERT_EQ(vec.Shape(0), data.size());
ASSERT_EQ(vec.Shape().size(), 1);
Expand All @@ -80,7 +80,7 @@ TEST(Linalg, Tensor) {

{
// as matrix
Tensor<double, 2> mat(data, {6, 4}, -1);
TensorView<double, 2> mat(data, {6, 4}, -1);
auto s = mat.Slice(2, All());
ASSERT_EQ(s.Shape().size(), 1);
s = mat.Slice(All(), 1);
Expand All @@ -89,7 +89,7 @@ TEST(Linalg, Tensor) {

{
// assignment
Tensor<double, 3> t{data, {2, 3, 4}, 0};
TensorView<double, 3> t{data, {2, 3, 4}, 0};
double pi = 3.14159;
t(1, 2, 3) = pi;
ASSERT_EQ(t(1, 2, 3), pi);
Expand All @@ -98,14 +98,14 @@ TEST(Linalg, Tensor) {
{
// Don't assign the initial dimension, tensor should be able to deduce the correct dim
// for Slice.
Tensor<double> t{data, {2, 3, 4}, 0};
TensorView<double> t{data, {2, 3, 4}, 0};
auto s = t.Slice(1, 2, All());
static_assert(decltype(s)::kDimension == 1, "");
}
}

TEST(Linalg, Empty) {
auto t = Tensor<double, 2>{{}, {0, 3}, GenericParameter::kCpuId};
auto t = TensorView<double, 2>{{}, {0, 3}, GenericParameter::kCpuId};
for (int32_t i : {0, 1, 2}) {
auto s = t.Slice(All(), i);
ASSERT_EQ(s.Size(), 0);
Expand Down

0 comments on commit d84700b

Please sign in to comment.