Skip to content
This repository has been archived by the owner on Nov 6, 2023. It is now read-only.

Commit

Permalink
Removed GRPC_ERROR_NONE (grpc#31131)
Browse files Browse the repository at this point in the history
  • Loading branch information
veblush authored Sep 28, 2022
1 parent 6fbff9c commit d43511f
Show file tree
Hide file tree
Showing 214 changed files with 1,130 additions and 1,052 deletions.
5 changes: 5 additions & 0 deletions BUILD
Original file line number Diff line number Diff line change
Expand Up @@ -1688,6 +1688,7 @@ grpc_cc_library(
hdrs = [
"src/core/lib/promise/exec_ctx_wakeup_scheduler.h",
],
external_deps = ["absl/status"],
language = "c++",
deps = [
"closure",
Expand Down Expand Up @@ -1837,6 +1838,7 @@ grpc_cc_library(
],
external_deps = [
"absl/container:inlined_vector",
"absl/status",
"absl/strings:str_format",
],
language = "c++",
Expand Down Expand Up @@ -1900,6 +1902,7 @@ grpc_cc_library(
external_deps = [
"absl/base:core_headers",
"absl/memory",
"absl/status",
"absl/strings",
"absl/types:optional",
],
Expand Down Expand Up @@ -1935,6 +1938,7 @@ grpc_cc_library(
external_deps = [
"absl/base:core_headers",
"absl/memory",
"absl/status",
"absl/status:statusor",
"absl/strings",
"absl/types:optional",
Expand Down Expand Up @@ -5700,6 +5704,7 @@ grpc_cc_library(
],
external_deps = [
"absl/memory",
"absl/status",
"absl/status:statusor",
"absl/strings",
],
Expand Down
4 changes: 2 additions & 2 deletions src/core/ext/filters/channel_idle/channel_idle_filter.cc
Original file line number Diff line number Diff line change
Expand Up @@ -158,7 +158,7 @@ void MaxAgeFilter::PostInit() {
auto* startup =
new StartupClosure{this->channel_stack()->Ref(), this, grpc_closure{}};
GRPC_CLOSURE_INIT(&startup->closure, run_startup, startup, nullptr);
ExecCtx::Run(DEBUG_LOCATION, &startup->closure, GRPC_ERROR_NONE);
ExecCtx::Run(DEBUG_LOCATION, &startup->closure, absl::OkStatus());

auto channel_stack = this->channel_stack()->Ref();

Expand Down Expand Up @@ -187,7 +187,7 @@ void MaxAgeFilter::PostInit() {
ExecCtx::Run(
DEBUG_LOCATION,
GRPC_CLOSURE_CREATE(fn, this->channel_stack(), nullptr),
GRPC_ERROR_NONE);
absl::OkStatus());
return Immediate(absl::OkStatus());
},
// Sleep for the grace period
Expand Down
2 changes: 1 addition & 1 deletion src/core/ext/filters/client_channel/backup_poller.cc
Original file line number Diff line number Diff line change
Expand Up @@ -118,7 +118,7 @@ static void g_poller_unref() {
static void run_poller(void* arg, grpc_error_handle error) {
backup_poller* p = static_cast<backup_poller*>(arg);
if (!error.ok()) {
if (error != GRPC_ERROR_CANCELLED) {
if (error != absl::CancelledError()) {
GRPC_LOG_IF_ERROR("run_poller", error);
}
backup_poller_shutdown_unref(p);
Expand Down
4 changes: 3 additions & 1 deletion src/core/ext/filters/client_channel/channel_connectivity.cc
Original file line number Diff line number Diff line change
Expand Up @@ -18,6 +18,8 @@

#include <inttypes.h>

#include "absl/status/status.h"

#include <grpc/grpc.h>
#include <grpc/impl/codegen/connectivity_state.h>
#include <grpc/impl/codegen/gpr_types.h>
Expand Down Expand Up @@ -195,7 +197,7 @@ class StateWatcher : public DualRefCounted<StateWatcher> {
grpc_error_handle error =
timer_fired_ ? GRPC_ERROR_CREATE_FROM_STATIC_STRING(
"Timed out waiting for connection state change")
: GRPC_ERROR_NONE;
: absl::OkStatus();
grpc_cq_end_op(cq_, tag_, error, FinishedCompletion, this,
&completion_storage_);
}
Expand Down
54 changes: 27 additions & 27 deletions src/core/ext/filters/client_channel/client_channel.cc
Original file line number Diff line number Diff line change
Expand Up @@ -233,7 +233,7 @@ class ClientChannel::CallData {
grpc_transport_stream_op_batch* pending_batches_[MAX_PENDING_BATCHES] = {};

// Set when we get a cancel_stream op.
grpc_error_handle cancel_error_ = GRPC_ERROR_NONE;
grpc_error_handle cancel_error_;
};

//
Expand Down Expand Up @@ -273,7 +273,7 @@ class DynamicTerminationFilter {
GPR_ASSERT(args->is_last);
GPR_ASSERT(elem->filter == &kFilterVtable);
new (elem->channel_data) DynamicTerminationFilter(args->channel_args);
return GRPC_ERROR_NONE;
return absl::OkStatus();
}

static void Destroy(grpc_channel_element* elem) {
Expand All @@ -300,7 +300,7 @@ class DynamicTerminationFilter::CallData {
static grpc_error_handle Init(grpc_call_element* elem,
const grpc_call_element_args* args) {
new (elem->call_data) CallData(*args);
return GRPC_ERROR_NONE;
return absl::OkStatus();
}

static void Destroy(grpc_call_element* elem,
Expand All @@ -316,7 +316,7 @@ class DynamicTerminationFilter::CallData {
subchannel_call->SetAfterCallStackDestroy(then_schedule_closure);
} else {
// TODO(yashkt) : This can potentially be a Closure::Run
ExecCtx::Run(DEBUG_LOCATION, then_schedule_closure, GRPC_ERROR_NONE);
ExecCtx::Run(DEBUG_LOCATION, then_schedule_closure, absl::OkStatus());
}
}

Expand Down Expand Up @@ -732,7 +732,7 @@ void ClientChannel::ExternalConnectivityWatcher::Notify(
chand_, on_complete_, /*cancel=*/false);
// Report new state to the user.
*state_ = state;
ExecCtx::Run(DEBUG_LOCATION, on_complete_, GRPC_ERROR_NONE);
ExecCtx::Run(DEBUG_LOCATION, on_complete_, absl::OkStatus());
// Hop back into the work_serializer to clean up.
// Not needed in state SHUTDOWN, because the tracker will
// automatically remove all watchers in that case.
Expand All @@ -755,7 +755,7 @@ void ClientChannel::ExternalConnectivityWatcher::Cancel() {
std::memory_order_relaxed)) {
return; // Already done.
}
ExecCtx::Run(DEBUG_LOCATION, on_complete_, GRPC_ERROR_CANCELLED);
ExecCtx::Run(DEBUG_LOCATION, on_complete_, absl::CancelledError());
// Hop back into the work_serializer to clean up.
// Note: The callback takes a ref in case the ref inside the state tracker
// gets removed before the callback runs via a SHUTDOWN notification.
Expand All @@ -769,7 +769,7 @@ void ClientChannel::ExternalConnectivityWatcher::Cancel() {
}

void ClientChannel::ExternalConnectivityWatcher::AddWatcherLocked() {
Closure::Run(DEBUG_LOCATION, watcher_timer_init_, GRPC_ERROR_NONE);
Closure::Run(DEBUG_LOCATION, watcher_timer_init_, absl::OkStatus());
// Add new watcher. Pass the ref of the object from creation to OrphanablePtr.
chand_->state_tracker_.AddWatcher(
initial_state_, OrphanablePtr<ConnectivityStateWatcherInterface>(this));
Expand Down Expand Up @@ -953,7 +953,7 @@ grpc_error_handle ClientChannel::Init(grpc_channel_element* elem,
grpc_channel_element_args* args) {
GPR_ASSERT(args->is_last);
GPR_ASSERT(elem->filter == &kFilterVtable);
grpc_error_handle error = GRPC_ERROR_NONE;
grpc_error_handle error;
new (elem->channel_data) ClientChannel(args, &error);
return error;
}
Expand Down Expand Up @@ -1005,7 +1005,7 @@ ClientChannel::ClientChannel(grpc_channel_element_args* args,
absl::optional<absl::string_view> service_config_json =
channel_args_.GetString(GRPC_ARG_SERVICE_CONFIG);
if (!service_config_json.has_value()) service_config_json = "{}";
*error = GRPC_ERROR_NONE;
*error = absl::OkStatus();
auto service_config =
ServiceConfigImpl::Create(channel_args_, *service_config_json);
if (!service_config.ok()) {
Expand Down Expand Up @@ -1055,7 +1055,7 @@ ClientChannel::ClientChannel(grpc_channel_element_args* args,
default_authority_ = std::move(*default_authority);
}
// Success.
*error = GRPC_ERROR_NONE;
*error = absl::OkStatus();
}

ClientChannel::~ClientChannel() {
Expand Down Expand Up @@ -1319,7 +1319,7 @@ void ClientChannel::OnResolverErrorLocked(absl::Status status) {
call = call->next) {
grpc_call_element* elem = call->elem;
CallData* calld = static_cast<CallData*>(elem->call_data);
grpc_error_handle error = GRPC_ERROR_NONE;
grpc_error_handle error;
if (calld->CheckResolutionLocked(elem, &error)) {
calld->AsyncResolutionDone(elem, error);
}
Expand Down Expand Up @@ -1486,7 +1486,7 @@ void ClientChannel::UpdateServiceConfigInDataPlaneLocked() {
ExecCtx::Get()->InvalidateNow();
grpc_call_element* elem = call->elem;
CallData* calld = static_cast<CallData*>(elem->call_data);
grpc_error_handle error = GRPC_ERROR_NONE;
grpc_error_handle error;
if (calld->CheckResolutionLocked(elem, &error)) {
calld->AsyncResolutionDone(elem, error);
}
Expand Down Expand Up @@ -1584,7 +1584,7 @@ void ClientChannel::UpdateStateAndPickerLocked(
// on the stale value, which results in the timer firing too early. To
// avoid this, we invalidate the cached value for each call we process.
ExecCtx::Get()->InvalidateNow();
grpc_error_handle error = GRPC_ERROR_NONE;
grpc_error_handle error;
if (call->lb_call->PickSubchannelLocked(&error)) {
call->lb_call->AsyncPickDone(error);
}
Expand Down Expand Up @@ -1650,7 +1650,7 @@ grpc_error_handle ClientChannel::DoPingLocked(grpc_transport_op* op) {
}
connected_subchannel->Ping(op->send_ping.on_initiate,
op->send_ping.on_ack);
return GRPC_ERROR_NONE;
return absl::OkStatus();
},
// Queue pick.
[](LoadBalancingPolicy::PickResult::Queue* /*queue_pick*/) {
Expand Down Expand Up @@ -1719,7 +1719,7 @@ void ClientChannel::StartTransportOpLocked(grpc_transport_op* op) {
}
}
GRPC_CHANNEL_STACK_UNREF(owning_stack_, "start_transport_op");
ExecCtx::Run(DEBUG_LOCATION, op->on_consumed, GRPC_ERROR_NONE);
ExecCtx::Run(DEBUG_LOCATION, op->on_consumed, absl::OkStatus());
}

void ClientChannel::StartTransportOp(grpc_channel_element* elem,
Expand Down Expand Up @@ -1847,7 +1847,7 @@ grpc_error_handle ClientChannel::CallData::Init(
grpc_call_element* elem, const grpc_call_element_args* args) {
ClientChannel* chand = static_cast<ClientChannel*>(elem->channel_data);
new (elem->call_data) CallData(elem, *chand, *args);
return GRPC_ERROR_NONE;
return absl::OkStatus();
}

void ClientChannel::CallData::Destroy(
Expand All @@ -1861,7 +1861,7 @@ void ClientChannel::CallData::Destroy(
dynamic_call->SetAfterCallStackDestroy(then_schedule_closure);
} else {
// TODO(yashkt) : This can potentially be a Closure::Run
ExecCtx::Run(DEBUG_LOCATION, then_schedule_closure, GRPC_ERROR_NONE);
ExecCtx::Run(DEBUG_LOCATION, then_schedule_closure, absl::OkStatus());
}
}

Expand Down Expand Up @@ -1945,7 +1945,7 @@ void ClientChannel::CallData::StartTransportStreamOpBatch(
"config",
chand, calld);
}
CheckResolution(elem, GRPC_ERROR_NONE);
CheckResolution(elem, absl::OkStatus());
} else {
// For all other batches, release the call combiner.
if (GRPC_TRACE_FLAG_ENABLED(grpc_client_channel_call_trace)) {
Expand Down Expand Up @@ -2076,7 +2076,7 @@ void ClientChannel::CallData::PendingBatchesResume(grpc_call_element* elem) {
batch->handler_private.extra_arg = elem;
GRPC_CLOSURE_INIT(&batch->handler_private.closure,
ResumePendingBatchInCallCombiner, batch, nullptr);
closures.Add(&batch->handler_private.closure, GRPC_ERROR_NONE,
closures.Add(&batch->handler_private.closure, absl::OkStatus(),
"resuming pending batch from client channel call");
batch = nullptr;
}
Expand Down Expand Up @@ -2218,7 +2218,7 @@ grpc_error_handle ClientChannel::CallData::ApplyServiceConfigToCallLocked(
// Set the dynamic filter stack.
dynamic_filters_ = chand->dynamic_filters_;
}
return GRPC_ERROR_NONE;
return absl::OkStatus();
}

void ClientChannel::CallData::
Expand Down Expand Up @@ -2312,7 +2312,7 @@ bool ClientChannel::CallData::CheckResolutionLocked(grpc_call_element* elem,
DEBUG_LOCATION);
},
chand, nullptr),
GRPC_ERROR_NONE);
absl::OkStatus());
}
// Get send_initial_metadata batch and flags.
auto& send_initial_metadata =
Expand Down Expand Up @@ -2364,7 +2364,7 @@ void ClientChannel::CallData::CreateDynamicCall(grpc_call_element* elem) {
arena_,
call_context_,
call_combiner_};
grpc_error_handle error = GRPC_ERROR_NONE;
grpc_error_handle error;
DynamicFilters* channel_stack = args.channel_stack.get();
if (GRPC_TRACE_FLAG_ENABLED(grpc_client_channel_call_trace)) {
gpr_log(
Expand Down Expand Up @@ -2570,7 +2570,7 @@ ClientChannel::LoadBalancedCall::~LoadBalancedCall() {
}
if (on_call_destruction_complete_ != nullptr) {
ExecCtx::Run(DEBUG_LOCATION, on_call_destruction_complete_,
GRPC_ERROR_NONE);
absl::OkStatus());
}
}

Expand Down Expand Up @@ -2693,7 +2693,7 @@ void ClientChannel::LoadBalancedCall::PendingBatchesResume() {
GRPC_CLOSURE_INIT(&batch->handler_private.closure,
ResumePendingBatchInCallCombiner, batch,
grpc_schedule_on_exec_ctx);
closures.Add(&batch->handler_private.closure, GRPC_ERROR_NONE,
closures.Add(&batch->handler_private.closure, absl::OkStatus(),
"resuming pending batch from LB call");
batch = nullptr;
}
Expand Down Expand Up @@ -2824,7 +2824,7 @@ void ClientChannel::LoadBalancedCall::StartTransportStreamOpBatch(
"chand=%p lb_call=%p: grabbing data plane mutex to perform pick",
chand_, this);
}
PickSubchannel(this, GRPC_ERROR_NONE);
PickSubchannel(this, absl::OkStatus());
} else {
// For all other batches, release the call combiner.
if (GRPC_TRACE_FLAG_ENABLED(grpc_client_channel_lb_call_trace)) {
Expand Down Expand Up @@ -2924,7 +2924,7 @@ void ClientChannel::LoadBalancedCall::RecvTrailingMetadataReady(
// Chain to original callback.
if (!self->failure_error_.ok()) {
error = self->failure_error_;
self->failure_error_ = GRPC_ERROR_NONE;
self->failure_error_ = absl::OkStatus();
}
Closure::Run(DEBUG_LOCATION, self->original_recv_trailing_metadata_ready_,
error);
Expand Down Expand Up @@ -2956,7 +2956,7 @@ void ClientChannel::LoadBalancedCall::CreateSubchannelCall() {
// TODO(roth): When we implement hedging support, we will probably
// need to use a separate call context for each subchannel call.
call_context_, call_combiner_};
grpc_error_handle error = GRPC_ERROR_NONE;
grpc_error_handle error;
subchannel_call_ = SubchannelCall::Create(std::move(call_args), &error);
if (GRPC_TRACE_FLAG_ENABLED(grpc_client_channel_lb_call_trace)) {
gpr_log(GPR_INFO,
Expand Down
7 changes: 3 additions & 4 deletions src/core/ext/filters/client_channel/client_channel.h
Original file line number Diff line number Diff line change
Expand Up @@ -362,8 +362,7 @@ class ClientChannel {
std::set<SubchannelWrapper*> subchannel_wrappers_
ABSL_GUARDED_BY(*work_serializer_);
int keepalive_time_ ABSL_GUARDED_BY(*work_serializer_) = -1;
grpc_error_handle disconnect_error_ ABSL_GUARDED_BY(*work_serializer_) =
GRPC_ERROR_NONE;
grpc_error_handle disconnect_error_ ABSL_GUARDED_BY(*work_serializer_);

//
// Fields guarded by a mutex, since they need to be accessed
Expand Down Expand Up @@ -508,10 +507,10 @@ class ClientChannel::LoadBalancedCall
gpr_cycle_counter lb_call_start_time_ = gpr_get_cycle_counter();

// Set when we get a cancel_stream op.
grpc_error_handle cancel_error_ = GRPC_ERROR_NONE;
grpc_error_handle cancel_error_;

// Set when we fail inside the LB call.
grpc_error_handle failure_error_ = GRPC_ERROR_NONE;
grpc_error_handle failure_error_;

grpc_closure pick_closure_;

Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -22,6 +22,7 @@

#include <new>

#include "absl/status/status.h"
#include "absl/types/optional.h"

#include <grpc/support/log.h>
Expand All @@ -36,7 +37,7 @@

static grpc_error_handle clr_init_channel_elem(
grpc_channel_element* /*elem*/, grpc_channel_element_args* /*args*/) {
return GRPC_ERROR_NONE;
return absl::OkStatus();
}

static void clr_destroy_channel_elem(grpc_channel_element* /*elem*/) {}
Expand Down Expand Up @@ -80,7 +81,7 @@ static grpc_error_handle clr_init_call_elem(
grpc_call_element* elem, const grpc_call_element_args* args) {
GPR_ASSERT(args->context != nullptr);
new (elem->call_data) call_data();
return GRPC_ERROR_NONE;
return absl::OkStatus();
}

static void clr_destroy_call_elem(grpc_call_element* elem,
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -263,7 +263,7 @@ class OrcaProducer::OrcaStreamEventHandler
// BackendMetricAllocator object.
void AsyncNotifyWatchersAndDelete() {
GRPC_CLOSURE_INIT(&closure_, NotifyWatchersInExecCtx, this, nullptr);
ExecCtx::Run(DEBUG_LOCATION, &closure_, GRPC_ERROR_NONE);
ExecCtx::Run(DEBUG_LOCATION, &closure_, absl::OkStatus());
}

private:
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -280,7 +280,7 @@ class RingHash : public LoadBalancingPolicy {
void Orphan() override {
// Hop into ExecCtx, so that we're not holding the data plane mutex
// while we run control-plane code.
ExecCtx::Run(DEBUG_LOCATION, &closure_, GRPC_ERROR_NONE);
ExecCtx::Run(DEBUG_LOCATION, &closure_, absl::OkStatus());
}

// Will be invoked inside of the WorkSerializer.
Expand Down
Loading

0 comments on commit d43511f

Please sign in to comment.