Skip to content

Commit

Permalink
Automated Code Change
Browse files Browse the repository at this point in the history
PiperOrigin-RevId: 609312750
  • Loading branch information
tensorflower-gardener committed Feb 22, 2024
1 parent 4042539 commit b51db3c
Show file tree
Hide file tree
Showing 35 changed files with 86 additions and 86 deletions.
2 changes: 1 addition & 1 deletion tensorflow/core/kernels/sparse_conditional_accumulator.h
Expand Up @@ -140,7 +140,7 @@ class SparseConditionalAccumulator
}
}

return OkStatus();
return absl::OkStatus();
}

void AllocateAndAssignToAccumGradFunction(
Expand Down
4 changes: 2 additions & 2 deletions tensorflow/core/kernels/sparse_conditional_accumulator_op.cc
Expand Up @@ -37,15 +37,15 @@ class SparseConditionalAccumulatorOp : public ConditionalAccumulatorBaseOp {
new SparseConditionalAccumulator<Device, T>(
dtype_, shape_, cinfo_.name(), reduction_type_);
*ret = accumulator;
return OkStatus();
return absl::OkStatus();
};
}

// TODO(tanzheny): actually switch it to resource. You won't be able to use
// it with cond2 otherwise.
Status CheckSignature(OpKernelContext* ctx) override {
TF_RETURN_IF_ERROR(ctx->MatchSignature({}, {DT_STRING_REF}));
return OkStatus();
return absl::OkStatus();
}

void SetHandleToOutput(OpKernelContext* ctx)
Expand Down
4 changes: 2 additions & 2 deletions tensorflow/core/kernels/sparse_cross_op.cc
Expand Up @@ -591,7 +591,7 @@ Status ValidateInput(const OpInputList& indices_list_in,
}
}

return OkStatus();
return absl::OkStatus();
}

// Extracts data about the features and populates feature data.
Expand Down Expand Up @@ -733,7 +733,7 @@ Status CreateOutputTensors(
shape_vec(0) = batch_size;
shape_vec(1) = max_cross_count;

return OkStatus();
return absl::OkStatus();
}

template <bool HASHED_OUTPUT, typename InternalType>
Expand Down
2 changes: 1 addition & 1 deletion tensorflow/core/kernels/sparse_reduce_op.cc
Expand Up @@ -129,7 +129,7 @@ Status ValidateInputs(const Tensor *shape_t, const Tensor *reduction_axes_t) {
}
}

return OkStatus();
return absl::OkStatus();
}

struct SumOp {
Expand Down
2 changes: 1 addition & 1 deletion tensorflow/core/kernels/sparse_tensor_dense_add_op.cc
Expand Up @@ -91,7 +91,7 @@ Status ValidateInputs(const Tensor *a_indices, const Tensor *a_values,
}
}

return OkStatus();
return absl::OkStatus();
}

} // namespace
Expand Down
2 changes: 1 addition & 1 deletion tensorflow/core/kernels/sparse_tensor_dense_matmul_op.cc
Expand Up @@ -319,7 +319,7 @@ Status SparseTensorDenseMatMulImpl(
}
#undef LOOP_NNZ
}
return OkStatus();
return absl::OkStatus();
}
} // namespace

Expand Down
10 changes: 5 additions & 5 deletions tensorflow/core/kernels/sparse_tensors_map_ops.cc
Expand Up @@ -68,7 +68,7 @@ class SparseTensorsMap : public ResourceBase {
gtl::InlinedVector<int64_t, 8>(sp.shape().begin(), sp.shape().end())};
*handle = unique_st_handle;
}
return OkStatus();
return absl::OkStatus();
}

Status RetrieveAndClearSparseTensors(
Expand All @@ -95,7 +95,7 @@ class SparseTensorsMap : public ResourceBase {
}
}

return OkStatus();
return absl::OkStatus();
}

protected:
Expand Down Expand Up @@ -128,7 +128,7 @@ class SparseTensorAccessingOp : public OpKernel {

if (sparse_tensors_map_) {
*sparse_tensors_map = sparse_tensors_map_;
return OkStatus();
return absl::OkStatus();
}

TF_RETURN_IF_ERROR(cinfo_.Init(ctx->resource_manager(), def(),
Expand All @@ -137,7 +137,7 @@ class SparseTensorAccessingOp : public OpKernel {
CreatorCallback sparse_tensors_map_creator = [this](SparseTensorsMap** c) {
SparseTensorsMap* map = new SparseTensorsMap(cinfo_.name());
*c = map;
return OkStatus();
return absl::OkStatus();
};

TF_RETURN_IF_ERROR(
Expand All @@ -146,7 +146,7 @@ class SparseTensorAccessingOp : public OpKernel {
sparse_tensors_map_creator));

*sparse_tensors_map = sparse_tensors_map_;
return OkStatus();
return absl::OkStatus();
}

private:
Expand Down
2 changes: 1 addition & 1 deletion tensorflow/core/kernels/sparse_to_dense_op.cc
Expand Up @@ -85,7 +85,7 @@ Status CheckSparseToDenseShapes(const Tensor& indices,
if (!TensorShapeUtils::IsScalar(default_value.shape())) {
return errors::InvalidArgument("default_value should be a scalar.");
}
return OkStatus();
return absl::OkStatus();
}

} // end namespace
Expand Down
10 changes: 5 additions & 5 deletions tensorflow/core/kernels/sparse_utils.cc
Expand Up @@ -176,7 +176,7 @@ Status ValidateSparseTensorShape(const Tensor& indices, const Tensor& values,
shape.NumElements(), ") do not match");
}

return OkStatus();
return absl::OkStatus();
}

// Creates a debug string for the index tuple in indices(row, :).
Expand Down Expand Up @@ -215,7 +215,7 @@ Status ValidateSparseTensorIndicesUnordered(const Tensor& indices,
}
}

return OkStatus();
return absl::OkStatus();
}

// Ensures all sparse indices are within correct bounds and are
Expand All @@ -229,7 +229,7 @@ Status ValidateSparseTensorIndicesOrdered(const Tensor& indices,
int64_t ndims = indices.dim_size(1);

if (nnz == 0) {
return OkStatus();
return absl::OkStatus();
}

// First set of indices must be within range.
Expand Down Expand Up @@ -282,7 +282,7 @@ Status ValidateSparseTensorIndicesOrdered(const Tensor& indices,
}
} // for i in [1, nnz)

return OkStatus();
return absl::OkStatus();
}

} // namespace
Expand All @@ -300,7 +300,7 @@ Status ValidateSparseTensor(const Tensor& indices, const Tensor& values,
case IndexValidation::kNone: {
}
}
return OkStatus();
return absl::OkStatus();
}

#define REGISTER_SPARSE_UTIL_FUNCTIONS(TypeIndex) \
Expand Down
4 changes: 2 additions & 2 deletions tensorflow/core/kernels/sparse_xent_op.cc
Expand Up @@ -34,7 +34,7 @@ typedef Eigen::GpuDevice GPUDevice;

template <typename Index>
Status CheckInvalidLabelIndex(const Tensor& labels, int64_t max_index) {
if (labels.NumElements() == 0) return OkStatus();
if (labels.NumElements() == 0) return absl::OkStatus();
const auto label_values = labels.vec<Index>();
int64_t bad_index;
auto min_max_dim_value = std::minmax_element(
Expand All @@ -47,7 +47,7 @@ Status CheckInvalidLabelIndex(const Tensor& labels, int64_t max_index) {
" which is outside the valid range of [0, ", max_index,
"). Label values: ", labels.SummarizeValue(labels.NumElements()));
}
return OkStatus();
return absl::OkStatus();
}

template <typename Device, typename T, typename Index>
Expand Down
2 changes: 1 addition & 1 deletion tensorflow/core/kernels/spectrogram_convert_test_data.cc
Expand Up @@ -34,7 +34,7 @@ Status ConvertCsvToRaw(const string& input_filename) {
input_filename);
}
LOG(INFO) << "Wrote raw file to " << output_filename;
return OkStatus();
return absl::OkStatus();
}

} // namespace wav
Expand Down
2 changes: 1 addition & 1 deletion tensorflow/core/kernels/squared-loss.h
Expand Up @@ -64,7 +64,7 @@ class SquaredLossUpdater : public DualLossUpdater {

// Labels don't require conversion for linear regression.
Status ConvertLabel(float* const example_label) const final {
return OkStatus();
return absl::OkStatus();
}
};

Expand Down
8 changes: 4 additions & 4 deletions tensorflow/core/kernels/stack.cc
Expand Up @@ -63,7 +63,7 @@ class Stack : public ResourceBase {
"its max_size (", max_size_, ")");
}
stack_.push_back(value);
return OkStatus();
return absl::OkStatus();
}

Status Pop(TensorAndAllocation* value) {
Expand All @@ -75,7 +75,7 @@ class Stack : public ResourceBase {
}
*value = stack_.back();
stack_.pop_back();
return OkStatus();
return absl::OkStatus();
}

// We don't swap the first tensor on the stack and any subsequent tensors
Expand Down Expand Up @@ -121,7 +121,7 @@ class Stack : public ResourceBase {
return errors::InvalidArgument("Stack[", stack_name_,
"] has already been closed.");
}
return OkStatus();
return absl::OkStatus();
}
};

Expand All @@ -147,7 +147,7 @@ Status GetStack(OpKernelContext* ctx, Stack** stack) {
return errors::Internal("No step container.");
}
TF_RETURN_IF_ERROR(step_container->Lookup(rm, key, stack));
return OkStatus();
return absl::OkStatus();
}
}

Expand Down
8 changes: 4 additions & 4 deletions tensorflow/core/kernels/stage_op.cc
Expand Up @@ -82,7 +82,7 @@ class Buffer : public ResourceBase {
// we should wake them all.
non_empty_cond_var_.notify_all();

return OkStatus();
return absl::OkStatus();
}

// Get tuple at front of the buffer
Expand Down Expand Up @@ -115,7 +115,7 @@ class Buffer : public ResourceBase {
tuple->push_back(tensor);
}

return OkStatus();
return absl::OkStatus();
}

// Buffer size
Expand Down Expand Up @@ -187,13 +187,13 @@ Status GetBuffer(OpKernelContext* ctx, const NodeDef& ndef, Buffer** buf) {
TF_RETURN_IF_ERROR(GetNodeAttr(ndef, "capacity", &capacity));
TF_RETURN_IF_ERROR(GetNodeAttr(ndef, "memory_limit", &memory_limit));
*ret = new Buffer(capacity, memory_limit);
return OkStatus();
return absl::OkStatus();
};

TF_RETURN_IF_ERROR(cinfo.Init(rm, ndef, true /* use name() */));
TF_RETURN_IF_ERROR(rm->LookupOrCreate<Buffer>(cinfo.container(), cinfo.name(),
buf, create_fn));
return OkStatus();
return absl::OkStatus();
}

} // namespace
Expand Down
8 changes: 4 additions & 4 deletions tensorflow/core/kernels/stateful_random_ops.cc
Expand Up @@ -65,7 +65,7 @@ Status CheckState(const Tensor& state) {
return errors::InvalidArgument(
"RNG state must have one and only one dimension, not ", state.dims());
}
return OkStatus();
return absl::OkStatus();
}

Status CheckPhiloxState(const Tensor& state, int64_t alg_tag_skip = 0) {
Expand All @@ -80,7 +80,7 @@ Status CheckPhiloxState(const Tensor& state, int64_t alg_tag_skip = 0) {
" must be at least ",
min_size, "; got ", state.NumElements());
}
return OkStatus();
return absl::OkStatus();
}

template <typename AlgEnumType>
Expand Down Expand Up @@ -149,7 +149,7 @@ Status UpdateVariableAndFill(
arg.state_tensor = var_tensor;
functor::UpdateVariableAndFill_Philox<Device, Distribution>()(
ctx, ctx->eigen_device<Device>(), dist, &arg, output_data);
return OkStatus();
return absl::OkStatus();
case ConcreteRngAlgorithm::RNG_ALG_THREEFRY:
return errors::Unimplemented(
"Non-XLA devices don't support the ThreeFry algorithm.");
Expand Down Expand Up @@ -202,7 +202,7 @@ Status GetScalar(const Tensor& tensor, int input_idx, T* result) {
", not ", DataTypeString(tensor.dtype()));
}
*result = tensor.flat<T>()(0);
return OkStatus();
return absl::OkStatus();
}

template <typename Device, class Distribution>
Expand Down
2 changes: 1 addition & 1 deletion tensorflow/core/kernels/stateless_random_ops.cc
Expand Up @@ -62,7 +62,7 @@ Status GenerateKey(Tensor seed, random::PhiloxRandom::Key* out_key,
(*out_counter)[0] = (*out_counter)[1] = 0;
(*out_counter)[2] = mix[2];
(*out_counter)[3] = mix[3];
return OkStatus();
return absl::OkStatus();
}

StatelessRandomOpBase::StatelessRandomOpBase(OpKernelConstruction* context)
Expand Down
2 changes: 1 addition & 1 deletion tensorflow/core/kernels/stateless_random_ops_v2.h
Expand Up @@ -38,7 +38,7 @@ inline Status CheckKeyCounterShape(int minimum_counter_size,
"; got shape: ", counter_shape.DebugString(),
". (Note that batched counters are not supported yet.)");
}
return OkStatus();
return absl::OkStatus();
}

// A base class for kernels of stateless RNG ops that take shape, key, counter
Expand Down
2 changes: 1 addition & 1 deletion tensorflow/core/kernels/stateless_random_ops_v2_util.h
Expand Up @@ -41,7 +41,7 @@ Status GetScalar(const Tensor& tensor, int input_idx, T* result) {
", not ", DataTypeString(tensor.dtype()));
}
*result = tensor.flat<T>()(0);
return OkStatus();
return absl::OkStatus();
}

inline StatusOr<std::tuple<Tensor, Tensor, Algorithm> >
Expand Down
4 changes: 2 additions & 2 deletions tensorflow/core/kernels/string_util.cc
Expand Up @@ -31,7 +31,7 @@ Status ParseUnicodeEncoding(const string& str, UnicodeEncoding* encoding) {
strings::StrCat("Invalid encoding \"", str,
"\": Should be one of: UTF-8, UTF-16-BE, UTF-32-BE"));
}
return OkStatus();
return absl::OkStatus();
}

// Sets unit value based on str.
Expand All @@ -44,7 +44,7 @@ Status ParseCharUnit(const string& str, CharUnit* unit) {
return errors::InvalidArgument(strings::StrCat(
"Invalid unit \"", str, "\": Should be one of: BYTE, UTF8_CHAR"));
}
return OkStatus();
return absl::OkStatus();
}

// Return the number of Unicode characters in a UTF-8 string.
Expand Down
2 changes: 1 addition & 1 deletion tensorflow/core/kernels/summary_image_op.cc
Expand Up @@ -173,7 +173,7 @@ class SummaryImageOp : public OpKernel {
return errors::Internal("PNG encoding failed");
}
}
return OkStatus();
return absl::OkStatus();
}

template <class T>
Expand Down
2 changes: 1 addition & 1 deletion tensorflow/core/kernels/summary_kernels.cc
Expand Up @@ -97,7 +97,7 @@ class CreateSummaryDbWriterOp : public OpKernel {
TF_RETURN_IF_ERROR(SetupTensorboardSqliteDb(db));
TF_RETURN_IF_ERROR(CreateSummaryDbWriter(
db, experiment_name, run_name, user_name, ctx->env(), s));
return OkStatus();
return absl::OkStatus();
}));
}
};
Expand Down
2 changes: 1 addition & 1 deletion tensorflow/core/kernels/tensor_array.cc
Expand Up @@ -111,7 +111,7 @@ Status TensorArray::CopyShapesFrom(TensorArray* rhs,
tensors_[i].written = true;
}

return OkStatus();
return absl::OkStatus();
}

} // namespace tensorflow

0 comments on commit b51db3c

Please sign in to comment.