Skip to content

Commit

Permalink
Rename BOOST_GET macros (#44368)
Browse files Browse the repository at this point in the history
* Rename BOOST_GET macros

* Fix conflicts
  • Loading branch information
From00 committed Jul 19, 2022
1 parent d4bb2ad commit 4b085c5
Show file tree
Hide file tree
Showing 278 changed files with 1,321 additions and 1,293 deletions.
8 changes: 4 additions & 4 deletions paddle/fluid/distributed/fleet_executor/dist_model.cc
Expand Up @@ -416,7 +416,7 @@ bool DistModel::PrepareFeedAndFetch() {
for (auto *op : program_->Block(0).AllOps()) {
if (op->Type() == "feed") {
VLOG(3) << "feed op with feed var: " << op->Output("Out")[0];
int idx = BOOST_GET_CONST(int, op->GetAttr("col"));
int idx = PADDLE_GET_CONST(int, op->GetAttr("col"));
if (feeds_.size() <= static_cast<size_t>(idx)) {
feeds_.resize(idx + 1);
}
Expand Down Expand Up @@ -446,7 +446,7 @@ bool DistModel::PrepareFeedAndFetch() {
}
} else if (op->Type() == "fetch") {
VLOG(3) << "fetch op with fetch var: " << op->Input("X")[0];
int idx = BOOST_GET_CONST(int, op->GetAttr("col"));
int idx = PADDLE_GET_CONST(int, op->GetAttr("col"));
if (fetches_.size() <= static_cast<size_t>(idx)) {
fetches_.resize(idx + 1);
}
Expand Down Expand Up @@ -507,7 +507,7 @@ bool DistModel::FetchResults(std::vector<DistModelTensor> *output_data,
VLOG(3) << "DistModel is fetch results.";
output_data->resize(fetches_.size());
for (size_t i = 0; i < fetches_.size(); ++i) {
int idx = BOOST_GET_CONST(int, fetches_[i]->GetAttr("col"));
int idx = PADDLE_GET_CONST(int, fetches_[i]->GetAttr("col"));
VLOG(3) << "Fetching data for [" << idx_to_fetches_[idx] << "]";
PADDLE_ENFORCE_EQ(
static_cast<size_t>(idx),
Expand All @@ -518,7 +518,7 @@ bool DistModel::FetchResults(std::vector<DistModelTensor> *output_data,
i));
framework::FetchType &fetch_var =
framework::GetFetchVariable(*scope, "fetch", idx);
auto &fetch = BOOST_GET(framework::LoDTensor, fetch_var);
auto &fetch = PADDLE_GET(framework::LoDTensor, fetch_var);
auto type = framework::TransToProtoVarType(fetch.dtype());
auto output = &(output_data->at(i));
output->name = idx_to_fetches_[idx];
Expand Down
Expand Up @@ -398,7 +398,7 @@ fused_attention_dygraph_function(

bool pre_layer_norm = false;
if (attrs.count("pre_layer_norm")) {
pre_layer_norm = BOOST_GET_CONST(bool, attrs.at("pre_layer_norm"));
pre_layer_norm = PADDLE_GET_CONST(bool, attrs.at("pre_layer_norm"));
}

// Set Attributes
Expand Down
Expand Up @@ -318,7 +318,7 @@ fused_feedforward_dygraph_function(

bool pre_layer_norm = false;
if (attrs.count("pre_layer_norm")) {
pre_layer_norm = BOOST_GET_CONST(bool, attrs.at("pre_layer_norm"));
pre_layer_norm = PADDLE_GET_CONST(bool, attrs.at("pre_layer_norm"));
}

// Set Attributes
Expand Down
Expand Up @@ -303,12 +303,12 @@ fused_gate_attention_dygraph_function(

bool merge_qkv = true;
if (attrs.count("merge_qkv")) {
merge_qkv = BOOST_GET_CONST(bool, attrs.at("merge_qkv"));
merge_qkv = PADDLE_GET_CONST(bool, attrs.at("merge_qkv"));
}

bool has_gating = true;
if (attrs.count("has_gating")) {
has_gating = BOOST_GET_CONST(bool, attrs.at("has_gating"));
has_gating = PADDLE_GET_CONST(bool, attrs.at("has_gating"));
}

// Set Attributes
Expand Down
Expand Up @@ -38,7 +38,7 @@ fused_attentionGradNodeCompat::operator()(

bool pre_layer_norm = false;
if (attr_map_.count("pre_layer_norm")) {
pre_layer_norm = BOOST_GET_CONST(bool, attr_map_.at("pre_layer_norm"));
pre_layer_norm = PADDLE_GET_CONST(bool, attr_map_.at("pre_layer_norm"));
}

std::map<std::string, std::vector<std::shared_ptr<egr::EagerVariable>>> ins0 =
Expand Down
Expand Up @@ -40,7 +40,7 @@ fused_feedforwardGradNodeCompat::operator()(

bool pre_layer_norm = false;
if (attr_map_.count("pre_layer_norm")) {
pre_layer_norm = BOOST_GET_CONST(bool, attr_map_.at("pre_layer_norm"));
pre_layer_norm = PADDLE_GET_CONST(bool, attr_map_.at("pre_layer_norm"));
}

std::map<std::string, std::vector<std::shared_ptr<egr::EagerVariable>>> ins0 =
Expand Down
Expand Up @@ -40,12 +40,12 @@ fused_gate_attentionGradNodeCompat::operator()(

bool merge_qkv = true;
if (attr_map_.count("merge_qkv")) {
merge_qkv = BOOST_GET_CONST(bool, attr_map_.at("merge_qkv"));
merge_qkv = PADDLE_GET_CONST(bool, attr_map_.at("merge_qkv"));
}

bool has_gating = true;
if (attr_map_.count("has_gating")) {
has_gating = BOOST_GET_CONST(bool, attr_map_.at("has_gating"));
has_gating = PADDLE_GET_CONST(bool, attr_map_.at("has_gating"));
}

std::map<std::string, std::vector<std::shared_ptr<egr::EagerVariable>>> ins0 =
Expand Down
8 changes: 4 additions & 4 deletions paddle/fluid/eager/auto_code_generator/eager_generator.cc
Expand Up @@ -352,7 +352,7 @@ static typename std::enable_if<IsVector, std::string>::type GetAttrValue(
const framework::Attribute& attr) {
std::string val = "";
val += "{";
for (auto x : BOOST_GET_CONST(std::vector<T>, attr)) {
for (auto x : PADDLE_GET_CONST(std::vector<T>, attr)) {
val += std::to_string(x) + ",";
}
if (val.size() > 1) val.pop_back();
Expand All @@ -363,7 +363,7 @@ static typename std::enable_if<IsVector, std::string>::type GetAttrValue(
template <typename T, bool IsVector>
static typename std::enable_if<!IsVector, std::string>::type GetAttrValue(
const framework::Attribute& attr) {
return std::to_string(BOOST_GET_CONST(T, attr));
return std::to_string(PADDLE_GET_CONST(T, attr));
}

static std::pair<std::string, std::string> GetAttrType(
Expand All @@ -385,7 +385,7 @@ static std::pair<std::string, std::string> GetAttrType(
case (3): {
ret = "std::string";
if (is_arg) ret += "&";
val = "\"" + BOOST_GET_CONST(std::string, attr) + "\"";
val = "\"" + PADDLE_GET_CONST(std::string, attr) + "\"";
break;
}
case (4): {
Expand All @@ -404,7 +404,7 @@ static std::pair<std::string, std::string> GetAttrType(
ret = "std::vector<std::string>";
if (is_arg) ret += "&";
val += "{";
for (auto x : BOOST_GET_CONST(std::vector<std::string>, attr)) {
for (auto x : PADDLE_GET_CONST(std::vector<std::string>, attr)) {
val += "\"" + x + "\"" + ",";
}
if (val.size() > 1) val.pop_back();
Expand Down
20 changes: 10 additions & 10 deletions paddle/fluid/eager/to_static/run_program_op_node.h
Expand Up @@ -191,16 +191,16 @@ inline void RunProgramAPI(
std::vector<paddle::experimental::Tensor *> &dout, // NOLINT
const paddle::framework::AttributeMap &attrs) {
VLOG(2) << "RunProgramOpKernel Compute";
auto start_op_index = BOOST_GET_CONST(int64_t, attrs.at("start_op_index"));
auto end_op_index = BOOST_GET_CONST(int64_t, attrs.at("end_op_index"));
auto start_op_index = PADDLE_GET_CONST(int64_t, attrs.at("start_op_index"));
auto end_op_index = PADDLE_GET_CONST(int64_t, attrs.at("end_op_index"));
// In the original run_program OP, the default value of the is_test
// attribute is false, we should check if there is is_test parameter
// in attrs
auto is_test = false;
if (attrs.count("is_test")) {
is_test = BOOST_GET_CONST(bool, attrs.at("is_test"));
is_test = PADDLE_GET_CONST(bool, attrs.at("is_test"));
}
auto program_id = BOOST_GET_CONST(int64_t, attrs.at("program_id"));
auto program_id = PADDLE_GET_CONST(int64_t, attrs.at("program_id"));

// NOTE(chenweihang): In order not to add new variable type, use vector
// here. Originally, here can use scope directly.
Expand All @@ -226,8 +226,8 @@ inline void RunProgramAPI(
details::ShareTensorsIntoScope(x, &scope);
details::ShareTensorsIntoScope(params, &scope);

auto *global_block =
BOOST_GET_CONST(paddle::framework::BlockDesc *, attrs.at("global_block"));
auto *global_block = PADDLE_GET_CONST(paddle::framework::BlockDesc *,
attrs.at("global_block"));
const auto &place = egr::Controller::Instance().GetExpectedPlace();

if (end_op_index > start_op_index) {
Expand Down Expand Up @@ -292,11 +292,11 @@ inline void RunProgramGradAPI(
// if all output vars are set to stop_gradient, grad op no need to executed
if (x_grad.empty() && params_grad.empty()) return;

auto *global_block =
BOOST_GET_CONST(paddle::framework::BlockDesc *, attrs.at("global_block"));
auto orig_end_op_index = BOOST_GET_CONST(int64_t, attrs.at("end_op_index"));
auto *global_block = PADDLE_GET_CONST(paddle::framework::BlockDesc *,
attrs.at("global_block"));
auto orig_end_op_index = PADDLE_GET_CONST(int64_t, attrs.at("end_op_index"));

auto program_id = BOOST_GET_CONST(int64_t, attrs.at("program_id"));
auto program_id = PADDLE_GET_CONST(int64_t, attrs.at("program_id"));
// NOTE: skip `shape` and `fill_constant` op created by
// fluid.backward.gradients, one forward output will generate one `shape`
// and `fill_constant`
Expand Down
26 changes: 13 additions & 13 deletions paddle/fluid/framework/attribute.cc
Expand Up @@ -21,31 +21,31 @@ namespace framework {
paddle::any GetAttrValue(const Attribute& attr) {
switch (AttrTypeID(attr)) {
case proto::AttrType::INT:
return BOOST_GET_CONST(int, attr);
return PADDLE_GET_CONST(int, attr);
case proto::AttrType::FLOAT:
return BOOST_GET_CONST(float, attr);
return PADDLE_GET_CONST(float, attr);
case proto::AttrType::STRING:
return BOOST_GET_CONST(std::string, attr);
return PADDLE_GET_CONST(std::string, attr);
case proto::AttrType::INTS:
return BOOST_GET_CONST(std::vector<int>, attr);
return PADDLE_GET_CONST(std::vector<int>, attr);
case proto::AttrType::FLOATS:
return BOOST_GET_CONST(std::vector<float>, attr);
return PADDLE_GET_CONST(std::vector<float>, attr);
case proto::AttrType::STRINGS:
return BOOST_GET_CONST(std::vector<std::string>, attr);
return PADDLE_GET_CONST(std::vector<std::string>, attr);
case proto::AttrType::BOOLEAN:
return BOOST_GET_CONST(bool, attr);
return PADDLE_GET_CONST(bool, attr);
case proto::AttrType::BOOLEANS:
return BOOST_GET_CONST(std::vector<bool>, attr);
return PADDLE_GET_CONST(std::vector<bool>, attr);
case proto::AttrType::LONG:
return BOOST_GET_CONST(int64_t, attr);
return PADDLE_GET_CONST(int64_t, attr);
case proto::AttrType::LONGS:
return BOOST_GET_CONST(std::vector<int64_t>, attr);
return PADDLE_GET_CONST(std::vector<int64_t>, attr);
case proto::AttrType::FLOAT64S:
return BOOST_GET_CONST(std::vector<double>, attr);
return PADDLE_GET_CONST(std::vector<double>, attr);
case proto::AttrType::BLOCK:
return BOOST_GET_CONST(BlockDesc*, attr);
return PADDLE_GET_CONST(BlockDesc*, attr);
case proto::AttrType::BLOCKS:
return BOOST_GET_CONST(std::vector<BlockDesc*>, attr);
return PADDLE_GET_CONST(std::vector<BlockDesc*>, attr);
default:
PADDLE_THROW(platform::errors::Unimplemented(
"Unsupported Attribute value type `%s` for phi.",
Expand Down
20 changes: 10 additions & 10 deletions paddle/fluid/framework/attribute.h
Expand Up @@ -72,10 +72,10 @@ struct ExtractAttribute<bool> {

bool* operator()(Attribute& attr) const {
if (attr.type() == typeid(int)) { // NOLINT
int val = BOOST_GET_CONST(int, attr);
int val = PADDLE_GET_CONST(int, attr);
attr = static_cast<bool>(val);
} else if (attr.type() == typeid(float)) { // NOLINT
float val = BOOST_GET_CONST(float, attr);
float val = PADDLE_GET_CONST(float, attr);
attr = static_cast<bool>(val);
}
bool* attr_value = nullptr;
Expand All @@ -100,10 +100,10 @@ struct ExtractAttribute<int64_t> {

int64_t* operator()(Attribute& attr) const {
if (attr.type() == typeid(int)) { // NOLINT
int val = BOOST_GET_CONST(int, attr);
int val = PADDLE_GET_CONST(int, attr);
attr = static_cast<int64_t>(val);
} else if (attr.type() == typeid(float)) { // NOLINT
int val = BOOST_GET_CONST(float, attr);
int val = PADDLE_GET_CONST(float, attr);
attr = static_cast<int64_t>(val);
}
int64_t* attr_value = nullptr;
Expand All @@ -128,11 +128,11 @@ struct ExtractAttribute<std::vector<int64_t>> {

std::vector<int64_t>* operator()(Attribute& attr) const {
if (attr.type() == typeid(std::vector<int>)) { // NOLINT
std::vector<int> val = BOOST_GET_CONST(std::vector<int>, attr);
std::vector<int> val = PADDLE_GET_CONST(std::vector<int>, attr);
std::vector<int64_t> vec(val.begin(), val.end());
attr = vec;
} else if (attr.type() == typeid(std::vector<float>)) { // NOLINT
std::vector<float> val = BOOST_GET_CONST(std::vector<float>, attr);
std::vector<float> val = PADDLE_GET_CONST(std::vector<float>, attr);
std::vector<int64_t> vec(val.begin(), val.end());
attr = vec;
}
Expand All @@ -159,10 +159,10 @@ struct ExtractAttribute<float> {

float* operator()(Attribute& attr) const {
if (attr.type() == typeid(int)) { // NOLINT
int val = BOOST_GET_CONST(int, attr);
int val = PADDLE_GET_CONST(int, attr);
attr = static_cast<float>(val);
} else if (attr.type() == typeid(int64_t)) { // NOLINT
int64_t val = BOOST_GET_CONST(int64_t, attr);
int64_t val = PADDLE_GET_CONST(int64_t, attr);
attr = static_cast<float>(val);
}
float* attr_value = nullptr;
Expand All @@ -187,11 +187,11 @@ struct ExtractAttribute<std::vector<double>> {

std::vector<double>* operator()(Attribute& attr) const {
if (attr.type() == typeid(std::vector<int>)) { // NOLINT
std::vector<int> val = BOOST_GET_CONST(std::vector<int>, attr);
std::vector<int> val = PADDLE_GET_CONST(std::vector<int>, attr);
std::vector<double> vec(val.begin(), val.end());
attr = vec;
} else if (attr.type() == typeid(std::vector<float>)) { // NOLINT
std::vector<float> val = BOOST_GET_CONST(std::vector<float>, attr);
std::vector<float> val = PADDLE_GET_CONST(std::vector<float>, attr);
std::vector<double> vec(val.begin(), val.end());
attr = vec;
}
Expand Down
5 changes: 3 additions & 2 deletions paddle/fluid/framework/block_desc.cc
Expand Up @@ -274,11 +274,12 @@ void BlockDesc::MoveFrom(BlockDesc *block) {
const auto &attr_value = pair.second;
auto attr_type = static_cast<proto::AttrType>(attr_value.index() - 1);
if (attr_type == proto::AttrType::BLOCK) {
auto block_id = BOOST_GET_CONST(BlockDesc *, attr_value)->ID();
auto block_id = PADDLE_GET_CONST(BlockDesc *, attr_value)->ID();
dst_op->SetBlockAttr(attr_name, prog_->MutableBlock(block_id));
VLOG(10) << "Set block attr " << attr_name << " id " << block_id;
} else if (attr_type == proto::AttrType::BLOCKS) {
auto old_blocks = BOOST_GET_CONST(std::vector<BlockDesc *>, attr_value);
auto old_blocks =
PADDLE_GET_CONST(std::vector<BlockDesc *>, attr_value);
std::vector<BlockDesc *> new_blocks;
new_blocks.reserve(old_blocks.size());
for (auto *b : old_blocks) {
Expand Down
6 changes: 3 additions & 3 deletions paddle/fluid/framework/details/async_ssa_graph_executor.cc
Expand Up @@ -174,16 +174,16 @@ FetchResultType AsyncSSAGraphExecutor::Run(
HandleException();

FetchList ret;
auto &val = BOOST_GET(FetchList, fetch_data);
auto &val = PADDLE_GET(FetchList, fetch_data);
for (size_t fetch_idx = 0; fetch_idx < fetch_tensors.size(); ++fetch_idx) {
if (data_is_lod_tensor(val.at(fetch_idx))) {
std::vector<const LoDTensor *> lodtensor_ptrs;
lodtensor_ptrs.push_back(&(BOOST_GET(LoDTensor, val.at(fetch_idx))));
lodtensor_ptrs.push_back(&(PADDLE_GET(LoDTensor, val.at(fetch_idx))));
LoDTensor var;
MergeLoDTensor(&var, lodtensor_ptrs, platform::CPUPlace());
ret.emplace_back(var);
} else {
auto array = BOOST_GET(LoDTensorArray, val.at(fetch_idx));
auto array = PADDLE_GET(LoDTensorArray, val.at(fetch_idx));
LoDTensorArray item_array;
item_array.reserve(array.size());
for (size_t i = 0; i < array.size(); ++i) {
Expand Down
4 changes: 2 additions & 2 deletions paddle/fluid/framework/details/fetch_async_op_handle.cc
Expand Up @@ -228,7 +228,7 @@ void FetchAsyncOpHandle::RunImpl() {
}

if (return_merged_) {
auto &val = BOOST_GET(FetchList, *data_);
auto &val = PADDLE_GET(FetchList, *data_);
if (src_vars[0]->IsType<LoDTensor>()) {
// to lodtensor type
std::vector<const LoDTensor *> src_lodtensors;
Expand Down Expand Up @@ -263,7 +263,7 @@ void FetchAsyncOpHandle::RunImpl() {
val.at(offset_) = std::move(dst_lodtensor_array);
}
} else {
auto &val = BOOST_GET(FetchUnmergedList, *data_);
auto &val = PADDLE_GET(FetchUnmergedList, *data_);
auto &dst_tensors = val.at(offset_);
dst_tensors.reserve(src_vars.size());

Expand Down

0 comments on commit 4b085c5

Please sign in to comment.