Compare commits
2 Commits
28476af222
...
d7de1ea752
Author | SHA1 | Date | |
---|---|---|---|
|
d7de1ea752 | ||
|
2d8bd5c4ae |
3
.gitignore
vendored
3
.gitignore
vendored
@ -2,7 +2,4 @@ target
|
||||
private_repos
|
||||
log/
|
||||
*.tgz
|
||||
examples/rust/examples/rust/webapp/helm/
|
||||
examples/rust/examples/rust/webapp/Dockerfile.harmony
|
||||
examples/rust/webapp/helm/harmony-example-rust-webapp-chart/
|
||||
.gitignore
|
||||
|
1
Cargo.lock
generated
1
Cargo.lock
generated
@ -1739,6 +1739,7 @@ name = "harmony"
|
||||
version = "0.1.0"
|
||||
dependencies = [
|
||||
"async-trait",
|
||||
"base64 0.22.1",
|
||||
"bollard",
|
||||
"chrono",
|
||||
"cidr",
|
||||
|
@ -44,12 +44,6 @@ async fn main() {
|
||||
};
|
||||
|
||||
let topology = K8sAnywhereTopology::from_env();
|
||||
|
||||
// topology
|
||||
// .provision_tenant(&tenant.config)
|
||||
// .await
|
||||
// .expect("couldn't provision tenant");
|
||||
|
||||
let mut maestro = Maestro::initialize(Inventory::autoload(), topology)
|
||||
.await
|
||||
.unwrap();
|
||||
@ -61,59 +55,16 @@ async fn main() {
|
||||
framework: Some(RustWebFramework::Leptos),
|
||||
});
|
||||
|
||||
let ntfy = NtfyScore {
|
||||
namespace: tenant.clone().config.name,
|
||||
};
|
||||
|
||||
let ntfy_default_auth_username = "harmony";
|
||||
let ntfy_default_auth_password = "harmony";
|
||||
let ntfy_default_auth_header = format!(
|
||||
"Basic {}",
|
||||
general_purpose::STANDARD.encode(format!(
|
||||
"{ntfy_default_auth_username}:{ntfy_default_auth_password}"
|
||||
))
|
||||
);
|
||||
|
||||
let ntfy_default_auth_param = general_purpose::STANDARD
|
||||
.encode(ntfy_default_auth_header)
|
||||
.rsplit("=")
|
||||
.collect::<Vec<&str>>()[0]
|
||||
.to_string();
|
||||
|
||||
let ntfy_receiver = WebhookReceiver {
|
||||
name: "ntfy-webhook".to_string(),
|
||||
url: Url::Url(
|
||||
url::Url::parse(
|
||||
format!(
|
||||
"http://ntfy.{}.svc.cluster.local/rust-web-app?auth={ntfy_default_auth_param}",
|
||||
tenant.clone().config.name
|
||||
)
|
||||
.as_str(),
|
||||
)
|
||||
.unwrap(),
|
||||
),
|
||||
};
|
||||
|
||||
let alerting_score = HelmPrometheusAlertingScore {
|
||||
receivers: vec![Box::new(ntfy_receiver)],
|
||||
rules: vec![],
|
||||
service_monitors: vec![],
|
||||
};
|
||||
|
||||
let app = ApplicationScore {
|
||||
features: vec![
|
||||
Box::new(ContinuousDelivery {
|
||||
application: application.clone(),
|
||||
}), // TODO add monitoring, backups, multisite ha, etc
|
||||
Box::new(Monitoring {}),
|
||||
],
|
||||
application,
|
||||
};
|
||||
|
||||
maestro.register_all(vec![
|
||||
Box::new(tenant),
|
||||
Box::new(ntfy),
|
||||
Box::new(alerting_score),
|
||||
Box::new(app),
|
||||
]);
|
||||
maestro.register_all(vec![Box::new(app)]);
|
||||
harmony_cli::init(maestro, None).await.unwrap();
|
||||
}
|
||||
|
@ -61,6 +61,7 @@ tempfile = "3.20.0"
|
||||
serde_with = "3.14.0"
|
||||
bollard.workspace = true
|
||||
tar.workspace = true
|
||||
base64.workspace = true
|
||||
|
||||
[dev-dependencies]
|
||||
pretty_assertions.workspace = true
|
||||
|
@ -247,39 +247,53 @@ impl K8sClient {
|
||||
|
||||
pub async fn apply_yaml_many(
|
||||
&self,
|
||||
api_resource: &ApiResource,
|
||||
yaml: &Vec<serde_yaml::Value>,
|
||||
ns: Option<&str>,
|
||||
) -> Result<(), Error> {
|
||||
for y in yaml.iter() {
|
||||
self.apply_yaml(api_resource, y, ns).await?;
|
||||
self.apply_yaml(y, ns).await?;
|
||||
}
|
||||
Ok(())
|
||||
}
|
||||
|
||||
pub async fn apply_yaml(
|
||||
&self,
|
||||
api_resource: &ApiResource,
|
||||
yaml: &serde_yaml::Value,
|
||||
ns: Option<&str>,
|
||||
) -> Result<(), Error> {
|
||||
let obj: DynamicObject = serde_yaml::from_value(yaml.clone()).expect("TODO do not unwrap");
|
||||
let name = obj.metadata.name.as_ref().expect("YAML must have a name");
|
||||
|
||||
let api_version = yaml
|
||||
.get("apiVersion")
|
||||
.expect("couldn't get apiVersion from YAML")
|
||||
.as_str()
|
||||
.expect("couldn't get apiVersion as str");
|
||||
let kind = yaml
|
||||
.get("kind")
|
||||
.expect("couldn't get kind from YAML")
|
||||
.as_str()
|
||||
.expect("couldn't get kind as str");
|
||||
|
||||
let split: Vec<&str> = api_version.splitn(2, "/").collect();
|
||||
let g = split[0];
|
||||
let v = split[1];
|
||||
|
||||
let gvk = GroupVersionKind::gvk(g, v, kind);
|
||||
let api_resource = ApiResource::from_gvk(&gvk);
|
||||
|
||||
let namespace = match ns {
|
||||
Some(n) => n,
|
||||
None => {
|
||||
obj
|
||||
None => obj
|
||||
.metadata
|
||||
.namespace
|
||||
.as_ref()
|
||||
.expect("YAML must have a namespace")
|
||||
},
|
||||
.expect("YAML must have a namespace"),
|
||||
};
|
||||
|
||||
// 5. Create a dynamic API client for this resource type.
|
||||
let api: Api<DynamicObject> =
|
||||
Api::namespaced_with(self.client.clone(), namespace, api_resource);
|
||||
Api::namespaced_with(self.client.clone(), namespace, &api_resource);
|
||||
|
||||
// 6. Apply the object to the cluster using Server-Side Apply.
|
||||
// This will create the resource if it doesn't exist, or update it if it does.
|
||||
|
@ -204,14 +204,7 @@ impl<
|
||||
.unwrap();
|
||||
}
|
||||
};
|
||||
|
||||
todo!("1. Create ArgoCD score that installs argo using helm chart, see if Taha's already done it
|
||||
- [X] Package app (docker image, helm chart)
|
||||
- [X] Push to registry
|
||||
- [X] Push only if staging or prod
|
||||
- [X] Deploy to local k3d when target is local
|
||||
- [ ] Poke Argo
|
||||
- [ ] Ensure app is up")
|
||||
Ok(())
|
||||
}
|
||||
fn name(&self) -> String {
|
||||
"ContinuousDelivery".to_string()
|
||||
|
@ -57,16 +57,9 @@ impl<T: Topology + K8sclient + HelmCommand> Interpret<T> for ArgoInterpret {
|
||||
.execute(inventory, topology)
|
||||
.await?;
|
||||
|
||||
let gvk = GroupVersionKind::gvk("argoproj.io", "v1alpha1", "Application");
|
||||
let api_resource = ApiResource::from_gvk_with_plural(&gvk, "applications");
|
||||
|
||||
let k8s_client = topology.k8s_client().await?;
|
||||
k8s_client
|
||||
.apply_yaml_many(
|
||||
&api_resource,
|
||||
&self.argo_apps.iter().map(|a| a.to_yaml()).collect(),
|
||||
None,
|
||||
)
|
||||
.apply_yaml_many(&self.argo_apps.iter().map(|a| a.to_yaml()).collect(), None)
|
||||
.await
|
||||
.unwrap();
|
||||
Ok(Outcome::success(format!(
|
||||
|
@ -1,4 +1,5 @@
|
||||
use async_trait::async_trait;
|
||||
use base64::{Engine as _, engine::general_purpose};
|
||||
use log::info;
|
||||
|
||||
use crate::{
|
||||
@ -6,28 +7,74 @@ use crate::{
|
||||
modules::{
|
||||
application::ApplicationFeature,
|
||||
monitoring::{
|
||||
alert_channel::webhook_receiver::WebhookReceiver,
|
||||
application_monitoring::k8s_application_monitoring_score::ApplicationPrometheusMonitoringScore,
|
||||
kube_prometheus::types::{NamespaceSelector, ServiceMonitor},
|
||||
ntfy::ntfy::NtfyScore,
|
||||
},
|
||||
},
|
||||
score::Score,
|
||||
topology::{HelmCommand, Topology, tenant::TenantManager},
|
||||
topology::{HelmCommand, K8sclient, Topology, Url, tenant::TenantManager},
|
||||
};
|
||||
|
||||
#[derive(Debug, Default, Clone)]
|
||||
pub struct Monitoring {}
|
||||
|
||||
#[async_trait]
|
||||
impl<T: Topology + HelmCommand + 'static + TenantManager> ApplicationFeature<T> for Monitoring {
|
||||
impl<T: Topology + HelmCommand + K8sclient + 'static + TenantManager> ApplicationFeature<T>
|
||||
for Monitoring
|
||||
{
|
||||
async fn ensure_installed(&self, topology: &T) -> Result<(), String> {
|
||||
info!("Ensuring monitoring is available for application");
|
||||
|
||||
let ntfy = NtfyScore {
|
||||
namespace: topology
|
||||
.get_tenant_config()
|
||||
.await
|
||||
.expect("couldn't get tenant config")
|
||||
.name,
|
||||
};
|
||||
ntfy.create_interpret()
|
||||
.execute(&Inventory::empty(), topology)
|
||||
.await
|
||||
.expect("couldn't create interpret for ntfy");
|
||||
|
||||
let ntfy_default_auth_username = "harmony";
|
||||
let ntfy_default_auth_password = "harmony";
|
||||
let ntfy_default_auth_header = format!(
|
||||
"Basic {}",
|
||||
general_purpose::STANDARD.encode(format!(
|
||||
"{ntfy_default_auth_username}:{ntfy_default_auth_password}"
|
||||
))
|
||||
);
|
||||
|
||||
let ntfy_default_auth_param = general_purpose::STANDARD
|
||||
.encode(ntfy_default_auth_header)
|
||||
.rsplit("=")
|
||||
.collect::<Vec<&str>>()[0]
|
||||
.to_string();
|
||||
|
||||
let ntfy_receiver = WebhookReceiver {
|
||||
name: "ntfy-webhook".to_string(),
|
||||
url: Url::Url(
|
||||
url::Url::parse(
|
||||
format!(
|
||||
"http://ntfy.{}.svc.cluster.local/rust-web-app?auth={ntfy_default_auth_param}",
|
||||
topology.get_tenant_config().await.expect("couldn't get tenant config").name
|
||||
)
|
||||
.as_str(),
|
||||
)
|
||||
.unwrap(),
|
||||
),
|
||||
};
|
||||
|
||||
let mut service_monitor = ServiceMonitor::default();
|
||||
service_monitor.namespace_selector = Some(NamespaceSelector {
|
||||
any: true,
|
||||
match_names: vec![],
|
||||
});
|
||||
let alerting_score = ApplicationPrometheusMonitoringScore {
|
||||
receivers: vec![],
|
||||
receivers: vec![Box::new(ntfy_receiver)],
|
||||
rules: vec![],
|
||||
service_monitors: vec![service_monitor],
|
||||
};
|
||||
|
@ -59,9 +59,7 @@ impl<A: Application, T: Topology + std::fmt::Debug> Interpret<T> for Application
|
||||
}
|
||||
};
|
||||
}
|
||||
todo!(
|
||||
"Do I need to do anything more than this here?? I feel like the Application trait itself should expose something like ensure_ready but its becoming redundant. We'll see as this evolves."
|
||||
)
|
||||
Ok(Outcome::success("successfully created app".to_string()))
|
||||
}
|
||||
|
||||
fn get_name(&self) -> InterpretName {
|
||||
|
@ -360,7 +360,11 @@ impl RustWebapp {
|
||||
image_url: &str,
|
||||
) -> Result<PathBuf, Box<dyn std::error::Error>> {
|
||||
let chart_name = format!("{}-chart", self.name);
|
||||
let chart_dir = self.project_root.join("helm").join(&chart_name);
|
||||
let chart_dir = self
|
||||
.project_root
|
||||
.join(".harmony_generated")
|
||||
.join("helm")
|
||||
.join(&chart_name);
|
||||
let templates_dir = chart_dir.join("templates");
|
||||
fs::create_dir_all(&templates_dir)?;
|
||||
|
||||
@ -537,11 +541,15 @@ spec:
|
||||
info!(
|
||||
"Launching `helm package {}` cli with CWD {}",
|
||||
chart_dirname.to_string_lossy(),
|
||||
&self.project_root.join("helm").to_string_lossy()
|
||||
&self
|
||||
.project_root
|
||||
.join(".harmony_generated")
|
||||
.join("helm")
|
||||
.to_string_lossy()
|
||||
);
|
||||
let output = process::Command::new("helm")
|
||||
.args(["package", chart_dirname.to_str().unwrap()])
|
||||
.current_dir(&self.project_root.join("helm")) // Run package from the parent dir
|
||||
.current_dir(&self.project_root.join(".harmony_generated").join("helm")) // Run package from the parent dir
|
||||
.output()?;
|
||||
|
||||
self.check_output(&output, "Failed to package Helm chart")?;
|
||||
@ -558,7 +566,11 @@ spec:
|
||||
}
|
||||
|
||||
// The output from helm is relative, so we join it with the execution directory.
|
||||
Ok(self.project_root.join("helm").join(tgz_name))
|
||||
Ok(self
|
||||
.project_root
|
||||
.join(".harmony_generated")
|
||||
.join("helm")
|
||||
.join(tgz_name))
|
||||
}
|
||||
|
||||
/// Pushes a packaged Helm chart to an OCI registry.
|
||||
|
Loading…
Reference in New Issue
Block a user