Skip to content

Commit

Permalink
delete debug codes
Browse files Browse the repository at this point in the history
  • Loading branch information
RichardWooSJTU committed Nov 3, 2022
1 parent 15ea2d7 commit 084c630
Show file tree
Hide file tree
Showing 2 changed files with 1 addition and 50 deletions.
27 changes: 1 addition & 26 deletions paddle/fluid/framework/ir/fuse_multi_transformer_layer_pass.cc
Expand Up @@ -37,15 +37,12 @@ MultiTransformerLayerPattern::operator()(bool enable_int8,
bool is_decoder) {
std::string fused_multi_transformer_name =
enable_int8 ? "fused_multi_transformer_int8" : "fused_multi_transformer";
// This map is used to store node_reprs, 3 * i names will be inserted
// cache_kv0_{i}, cache_kv1_{i}, fill_constant_batch_size_like_{i}
// This map is used to store node_reprs
std::unordered_map<std::string, std::string> node_reprs;

VLOG(0) << "num in pattern = " << num_fused_op;
// x0 and src_mask is unqiue input of subgraph
auto* x0 = pattern->NewNode(x0_repr());
x0->assert_is_op_input(fused_multi_transformer_name, "X")->AsInput();

auto* src_mask = pattern->NewNode(src_mask_repr());
src_mask->assert_is_op_input(fused_multi_transformer_name, "SrcMask")
->AsInput();
Expand Down Expand Up @@ -154,7 +151,6 @@ int FuseMultiTransformerLayerPass::BuildFusion(Graph* graph,
Scope* scope) const {
GraphPatternDetector gpd;
auto* pattern = gpd.mutable_pattern();
VLOG(0) << "In build fusion";

// TODO(wufeisheng): Get enable_int8 attr from graph after
// fused_multi_transformer pass with int8 merged
Expand All @@ -164,11 +160,9 @@ int FuseMultiTransformerLayerPass::BuildFusion(Graph* graph,
bool is_decoder = false;

if (graph->Has(kFusedMultiTransformerEncoderFusionCount)) {
VLOG(0) << "encoder fusion count";
num_fuse_op = graph->Get<int>(kFusedMultiTransformerEncoderFusionCount);
is_decoder = false;
} else if (graph->Has(kFusedMultiTransformerDecoderFusionCount)) {
VLOG(0) << "decoder fusion count";
num_fuse_op = graph->Get<int>(kFusedMultiTransformerDecoderFusionCount);
is_decoder = true;
}
Expand All @@ -186,27 +180,17 @@ int FuseMultiTransformerLayerPass::BuildFusion(Graph* graph,
patterns::MultiTransformerLayerPattern multi_layer_pattern(pattern,
name_scope);
auto node_reprs = multi_layer_pattern(enable_int8, num_fuse_op, is_decoder);
for (auto p : node_reprs) {
VLOG(0) << "key: " << p.first << " value: " << p.second;
}

VLOG(0) << "Finish build pattern";
int fusion_count{0};
auto handler = [&](const GraphPatternDetector::subgraph_t& subgraph,
Graph* graph) {
VLOG(0) << "handle FuseMultiTransformerLayerPass";
VLOG(0) << "subgraph.size()" << subgraph.size();

///////////////////
//// Get nodes ////
///////////////////

GET_IR_NODE_FROM_SUBGRAPH(src_mask, src_mask, multi_layer_pattern);

GET_IR_NODE_FROM_SUBGRAPH(x0, x0, multi_layer_pattern);

VLOG(0) << "Get input node";

std::vector<Node*> fuse_op_nodes;
std::vector<Node*> out_nodes;

Expand Down Expand Up @@ -278,7 +262,6 @@ int FuseMultiTransformerLayerPass::BuildFusion(Graph* graph,
for (const auto& input_name : inputs_names) {
MergeInput(fuse_op_descs[0], fuse_op_input_var_name_maps, input_name);
}
VLOG(0) << "Finsh Merge input";

// Merge outputs
fuse_op_descs[0]->SetOutput(
Expand All @@ -291,9 +274,6 @@ int FuseMultiTransformerLayerPass::BuildFusion(Graph* graph,
out_var_names.begin(),
out_var_names.end());
}
// for (auto out_name : output_names0["CacheKVOut"]) {
// VLOG(0) << "out_name " << out_name;
// }
fuse_op_descs[0]->SetOutput("CacheKVOut", merged_cache_kv_out_names);

////////////////
Expand All @@ -315,7 +295,6 @@ int FuseMultiTransformerLayerPass::BuildFusion(Graph* graph,
// Relink fuse op -> out
IR_NODE_UNLINK(fuse_op_nodes[num_fuse_op - 1], out_nodes[num_fuse_op - 1]);
IR_NODE_LINK_TO(fuse_op_nodes[0], out_nodes[num_fuse_op - 1]);
VLOG(0) << "Finsh relinks";

/////////////////////////////
//// Delete unused nodes ////
Expand All @@ -330,7 +309,6 @@ int FuseMultiTransformerLayerPass::BuildFusion(Graph* graph,
}

GraphSafeRemoveNodes(graph, marked_fuse_op_nodes);
VLOG(0) << "Finsh remove";
++fusion_count;
};

Expand All @@ -346,9 +324,6 @@ void FuseMultiTransformerLayerPass::ApplyImpl(Graph* graph) const {
platform::errors::Fatal("During the fuse_multi_transformer_layer pass, "
"The scope should not be null."));
int fusion_count = BuildFusion(graph, name_scope_, scope);
VLOG(0) << "fusion_count is " << fusion_count;

// PD_THROW("IMULTILAYER");

AddStatis(fusion_count);
}
Expand Down
Expand Up @@ -64,39 +64,17 @@ Scope* CreateParamScope() {
return param_scope;
}
TEST(FuseMultiTransformerLayerPass, encoder_fp) {
// Layers layers;
// int num_layers = 3;
// // Vars
// auto* x = layers.data("x", {1, 128, 1024});
// auto* src_mask = layers.data("src_mask", {1, 16, 128, 128});

// auto* ln_scale = layers.data("ln_scale", {1024}, true);
// auto* ln_bias = layers.data("ln_bias", {1024}, true);
// auto* ffn_ln_scale = layers.data("ffn_ln_scale", {1024}, true);
// auto* ffn_ln_bias = layers.data("ffn_ln_bias", {1024}, true);
// auto* qkv_w = layers.data("qkv_w", {3, 16, 64, 1024}, true);
// auto* out_linear_w = layers.data("out_linear_w", {1024, 1024}, true);
// auto* ffn1_w = layers.data("ffn1_w", {1024, 4096}, true);
// auto* ffn2_w = layers.data("ffn2_w", {4096, 1024}, true);
// auto* qkv_bias = layers.data("qkv_bias", {3072}, true);
// auto* out_linear_bias = layers.data("out_linear_bias", {1024}, true);
// auto* ffn1_bias = layers.data("ffn1_bias", {4096}, true);
// auto* ffn2_bias = layers.data("ffn2_bias", {1024}, true);

DEF_INPUT_DATA

// Layers
for (int i = 0; i < num_layers; ++i) {
std::cout << "begin to add fill const layer " << i << std::endl;
auto* cache_kv = layers.fill_constant_batch_size_like(
x,
static_cast<int>(proto::VarType::FP32),
0,
1,
{2, -1, 16, 1024, 64},
0);
std::cout << "begin to add fused_multi_transformer layer " << i
<< std::endl;
auto* out = layers.fused_multi_transformer(x,
cache_kv,
src_mask,
Expand Down Expand Up @@ -147,8 +125,6 @@ TEST(FuseMultiTransformerLayerPass, decoder_fp) {
for (int i = 0; i < num_layers; ++i) {
auto* shape_out = layers.shape(src_mask);
auto* time_stamp = layers.slice(shape_out, {0}, {3}, {4});
std::cout << "begin to add fused_multi_transformer layer " << i
<< std::endl;
auto* out = layers.fused_multi_transformer(x,
cache_kv,
src_mask,
Expand Down

0 comments on commit 084c630

Please sign in to comment.