Skip to content

Commit

Permalink
Some small details optimization
Browse files Browse the repository at this point in the history
  • Loading branch information
wangeguo committed Oct 14, 2023
1 parent c86daba commit 6405185
Show file tree
Hide file tree
Showing 5 changed files with 23 additions and 22 deletions.
10 changes: 5 additions & 5 deletions Cargo.lock

Some generated files are not rendered by default. Learn more about how customized files appear on GitHub.

2 changes: 1 addition & 1 deletion Cargo.toml
Original file line number Diff line number Diff line change
@@ -1,5 +1,5 @@
[workspace.package]
version = "0.5.11"
version = "0.5.12"
edition = "2021"
license = "Apache-2.0"
repository = "https://github.com/amphitheatre-app/amphitheatre"
Expand Down
2 changes: 1 addition & 1 deletion apiserver/src/handlers/actor.rs
Original file line number Diff line number Diff line change
Expand Up @@ -150,7 +150,7 @@ pub async fn stats(
Ok(data(ActorService::stats(ctx, pid, name).await?))
}

/// Recive a actor's sources and publish them to Message Queue.
/// Receive a actor's sources and publish them to Message Queue.
#[utoipa::path(
post, path = "/v1/actors/{pid}/{name}/sync",
params(
Expand Down
15 changes: 8 additions & 7 deletions controllers/src/actor_controller.rs
Original file line number Diff line number Diff line change
Expand Up @@ -27,6 +27,7 @@ use kube::runtime::events::Recorder;
use kube::runtime::finalizer::{finalizer, Event as FinalizerEvent};
use kube::runtime::{watcher, Controller};
use kube::{Api, Resource, ResourceExt};
use tracing::{error, info};

use crate::context::Context;
use crate::error::{Error, Result};
Expand All @@ -36,8 +37,8 @@ pub async fn new(ctx: &Arc<Context>) {

// Ensure Actor CRD is installed before loop-watching
if let Err(e) = api.list(&ListParams::default().limit(1)).await {
tracing::error!("Actor CRD is not queryable; {e:?}. Is the CRD installed?");
tracing::info!("Installation: amp-crdgen | kubectl apply -f -");
error!("Actor CRD is not queryable; {e:?}. Is the CRD installed?");
info!("Installation: amp-crdgen | kubectl apply -f -");
std::process::exit(1);
}

Expand All @@ -49,7 +50,7 @@ pub async fn new(ctx: &Arc<Context>) {

/// The reconciler that will be called when either object change
pub async fn reconcile(actor: Arc<Actor>, ctx: Arc<Context>) -> Result<Action> {
tracing::info!("Reconciling Actor \"{}\"", actor.name_any());
info!("Reconciling Actor \"{}\"", actor.name_any());

let ns = actor.namespace().unwrap(); // actor is namespace scoped
let api: Api<Actor> = Api::namespaced(ctx.k8s.clone(), &ns);
Expand All @@ -69,7 +70,7 @@ pub async fn reconcile(actor: Arc<Actor>, ctx: Arc<Context>) -> Result<Action> {
/// an error handler that will be called when the reconciler fails with access to both the
/// object that caused the failure and the actual error
pub fn error_policy(_actor: Arc<Actor>, error: &Error, _ctx: Arc<Context>) -> Action {
tracing::error!("reconcile failed: {:?}", error);
error!("reconcile failed: {:?}", error);
Action::requeue(Duration::from_secs(60))
}

Expand Down Expand Up @@ -102,7 +103,7 @@ async fn init(actor: &Actor, ctx: &Arc<Context>, recorder: &Recorder) -> Result<
async fn build(actor: &Actor, ctx: &Arc<Context>, recorder: &Recorder) -> Result<Action> {
// Return if the actor is live
if actor.spec.live {
tracing::info!("The actor is live mode, Running");
info!("The actor is live mode, Running");
let condition = ActorState::running(true, "AutoRun", None);
actor::patch_status(&ctx.k8s, actor, condition)
.await
Expand All @@ -119,7 +120,7 @@ async fn build(actor: &Actor, ctx: &Arc<Context>, recorder: &Recorder) -> Result
let credential = match credential {
Ok(credential) => Some(credential),
Err(err) => {
tracing::error!("Error handling docker configuration: {}", err);
error!("Error handling docker configuration: {}", err);
None
}
};
Expand All @@ -128,7 +129,7 @@ async fn build(actor: &Actor, ctx: &Arc<Context>, recorder: &Recorder) -> Result
.await
.map_err(Error::DockerRegistryExistsFailed)?
{
tracing::info!("The images already exists, Running");
info!("The images already exists, Running");
let condition = ActorState::running(true, "AutoRun", None);
actor::patch_status(&ctx.k8s, actor, condition)
.await
Expand Down
16 changes: 8 additions & 8 deletions controllers/src/playbook_controller.rs
Original file line number Diff line number Diff line change
Expand Up @@ -28,7 +28,7 @@ use kube::runtime::events::Recorder;
use kube::runtime::finalizer::{finalizer, Event as FinalizerEvent};
use kube::runtime::{watcher, Controller};
use kube::{Api, Resource, ResourceExt};
use tracing::debug;
use tracing::{debug, error, info};

use crate::context::Context;
use crate::error::{Error, Result};
Expand All @@ -38,8 +38,8 @@ pub async fn new(ctx: &Arc<Context>) {

// Ensure Playbook CRD is installed before loop-watching
if let Err(e) = api.list(&ListParams::default().limit(1)).await {
tracing::error!("Playbook CRD is not queryable; {e:?}. Is the CRD installed?");
tracing::info!("Installation: amp-crdgen | kubectl apply -f -");
error!("Playbook CRD is not queryable; {e:?}. Is the CRD installed?");
info!("Installation: amp-crdgen | kubectl apply -f -");
std::process::exit(1);
}

Expand All @@ -51,7 +51,7 @@ pub async fn new(ctx: &Arc<Context>) {

/// The reconciler that will be called when either object change
pub async fn reconcile(playbook: Arc<Playbook>, ctx: Arc<Context>) -> Result<Action> {
tracing::info!("Reconciling Playbook \"{}\"", playbook.name_any());
info!("Reconciling Playbook \"{}\"", playbook.name_any());

let api: Api<Playbook> = Api::all(ctx.k8s.clone());
let recorder = ctx.recorder(reference(&playbook));
Expand All @@ -71,7 +71,7 @@ pub async fn reconcile(playbook: Arc<Playbook>, ctx: Arc<Context>) -> Result<Act
/// an error handler that will be called when the reconciler fails with access to both the
/// object that caused the failure and the actual error
pub fn error_policy(_playbook: Arc<Playbook>, error: &Error, _ctx: Arc<Context>) -> Action {
tracing::error!("reconcile failed: {:?}", error);
error!("reconcile failed: {:?}", error);
Action::requeue(Duration::from_secs(60))
}

Expand Down Expand Up @@ -128,10 +128,10 @@ async fn resolve(playbook: &Playbook, ctx: &Arc<Context>, recorder: &Recorder) -
}
}
}
tracing::debug!("The currently existing actors are: {exists:?}");
debug!("The currently existing actors are: {exists:?}");
}

tracing::debug!("The repositories to be fetched are: {fetches:?}");
debug!("The repositories to be fetched are: {fetches:?}");

for (name, partner) in fetches.iter() {
let character = resolver::partner::load(&ctx.k8s, &credentials, name, partner)
Expand Down Expand Up @@ -215,7 +215,7 @@ async fn run(playbook: &Playbook, ctx: &Arc<Context>, recorder: &Recorder) -> Re
pub async fn cleanup(playbook: &Playbook, ctx: &Arc<Context>, _recorder: &Recorder) -> Result<Action> {
// Try to delete the NATS stream for this playbook if it exists.
if ctx.jetstream.delete_stream(playbook.name_any()).await.is_ok() {
tracing::debug!("Deleted NATS stream for playbook {}", playbook.name_any());
debug!("Deleted NATS stream for playbook {}", playbook.name_any());
}

Ok(Action::await_change())
Expand Down

0 comments on commit 6405185

Please sign in to comment.