Skip to content

Commit

Permalink
Automated Code Change
Browse files Browse the repository at this point in the history
PiperOrigin-RevId: 609295564
  • Loading branch information
tensorflower-gardener committed Feb 22, 2024
1 parent 9e40776 commit 5039c3a
Show file tree
Hide file tree
Showing 6 changed files with 15 additions and 15 deletions.
2 changes: 1 addition & 1 deletion tensorflow/core/kernels/uniform_quant_ops/math_utils.cc
Expand Up @@ -48,7 +48,7 @@ Status QuantizeMultiplier(double double_multiplier,
q_fixed = (1LL << 31) - 1;
}
quantized_multiplier = static_cast<int32_t>(q_fixed);
return OkStatus();
return absl::OkStatus();
}

} // namespace tensorflow
8 changes: 4 additions & 4 deletions tensorflow/core/kernels/uniform_quant_ops/math_utils.h
Expand Up @@ -137,7 +137,7 @@ Status AsymmetricQuantize(const ConstTensorTin& input_tensor,
quantized_tensor.setZero();
scale = 1.0;
zero_point = 0;
return OkStatus();
return absl::OkStatus();
}

// Using the scale calculated from the quantization range and data range,
Expand Down Expand Up @@ -166,7 +166,7 @@ Status AsymmetricQuantize(const ConstTensorTin& input_tensor,

AffineQuantize(input_tensor, inv_scale, zero_point, quantization_min_val,
quantization_max_val, quantized_tensor);
return OkStatus();
return absl::OkStatus();
}

// Given double_multiplier, quantize it where it is represented by two int32_t,
Expand Down Expand Up @@ -227,7 +227,7 @@ Status PerTensorToPerTensorRequantize(
input_zero_point, output_zero_point, quantization_min_val,
quantization_max_val);
});
return OkStatus();
return absl::OkStatus();
}

// Requantize where the input or output contains any per-axis quantized cases.
Expand Down Expand Up @@ -298,7 +298,7 @@ Status PerAxisRequantize(OpKernelContext* context, const Tensor& input,
quantization_min_val, quantization_max_val);
});
}
return OkStatus();
return absl::OkStatus();
}

} // namespace internal
Expand Down
2 changes: 1 addition & 1 deletion tensorflow/core/kernels/uniform_quant_ops/tensor_utils.cc
Expand Up @@ -53,7 +53,7 @@ Status QuantizationAxisAndShapeValid(const TensorShape& data_shape,
" and zero_points shape ", zero_points_shape.DebugString());
}
}
return OkStatus();
return absl::OkStatus();
}

TensorShape TransposedShape(const TensorShape& in_shape,
Expand Down
Expand Up @@ -158,7 +158,7 @@ Status EvalQuantizedAdd(OpKernelContext* context, const Tensor& lhs,
lhs_quantization_axis, rhs_quantization_axis,
output_quantization_axis, output);

return OkStatus();
return absl::OkStatus();
}

} // namespace
Expand Down
Expand Up @@ -302,7 +302,7 @@ Status EvalLhsPerTensorAndRhsPerTensorQuantizedConv(
/*input_zero_point=*/0, output_zero_point,
output_quantization_min_val, output_quantization_max_val);
});
return OkStatus();
return absl::OkStatus();
}

// Quantized Conv on per-tensor quantized padded and dilated transposed lhs and
Expand Down Expand Up @@ -383,7 +383,7 @@ Status EvalLhsPerTensorAndRhsPerChannelQuantizedConv(
: out_feature_idx],
output_quantization_min_val, output_quantization_max_val);
});
return OkStatus();
return absl::OkStatus();
}

// Quantized Conv on per-batch quantized padded and dilated transposed lhs and
Expand Down Expand Up @@ -509,7 +509,7 @@ Status EvalQuantizedConv(
// Transpose transposed_out back to out.
const auto& out_perm_back = OutBackTransposePerm(out_perm);
Transpose<Tout>(out_transposed, out_perm_back, out);
return OkStatus();
return absl::OkStatus();
}

// Given float `lhs` and quantized `rhs`, performs per-batch dynamic range
Expand Down Expand Up @@ -593,7 +593,7 @@ Status EvalHybridConv(
// Transpose transposed_out back to out.
const auto& out_perm_back = OutBackTransposePerm(out_perm);
Transpose<float>(out_transposed, out_perm_back, out);
return OkStatus();
return absl::OkStatus();
}

} // namespace
Expand Down
Expand Up @@ -39,7 +39,7 @@ Status DotInputShapeValid(const TensorShape& lhs_shape,
"shape ",
lhs_shape.DebugString(), " and rhs shape ", rhs_shape.DebugString());
}
return OkStatus();
return absl::OkStatus();
}

// Performs dot(lhs, rhs) and writes output to output. Assumes that output is
Expand Down Expand Up @@ -109,7 +109,7 @@ Status EvalLhsPerTensorAndRhsPerTensorQuantizedDot(
/*input_zero_point=*/0, output_zero_point,
output_quantization_min_val, output_quantization_max_val);
});
return OkStatus();
return absl::OkStatus();
}

// Performs dot on per-tensor quantized lhs and per-channel (dimension 1)
Expand Down Expand Up @@ -178,7 +178,7 @@ Status EvalLhsPerTensorAndRhsPerChannelQuantizedDot(
output_zero_points_data[is_output_scales_scalar ? 0 : out_c],
output_quantization_min_val, output_quantization_max_val);
});
return OkStatus();
return absl::OkStatus();
}

// Performs dot on per-batch (dimension 0) quantized lhs and per-tensor
Expand Down Expand Up @@ -300,7 +300,7 @@ Status EvalHybridDot(OpKernelContext* context, const Tensor& lhs,
rhs_scales.scalar<float>()(), rhs_zero_points.scalar<int32_t>()(),
output);
}
return OkStatus();
return absl::OkStatus();
}

} // namespace
Expand Down

0 comments on commit 5039c3a

Please sign in to comment.