Compare commits

..

1 Commits

17 changed files with 150 additions and 329 deletions

View File

@@ -194,3 +194,11 @@ impl From<String> for InterpretError {
}
}
}
impl From<serde_yaml::Error> for InterpretError {
fn from(value: serde_yaml::Error) -> Self {
Self {
msg: format!("InterpretError : {value}"),
}
}
}

View File

@@ -1,17 +1,13 @@
use std::time::Duration;
use derive_new::new;
use k8s_openapi::{
ClusterResourceScope, NamespaceResourceScope,
api::{apps::v1::Deployment, core::v1::Pod},
apimachinery::pkg::version::Info,
};
use kube::{
Client, Config, Discovery, Error, Resource,
Client, Config, Error, Resource,
api::{Api, AttachParams, DeleteParams, ListParams, Patch, PatchParams, ResourceExt},
config::{KubeConfigOptions, Kubeconfig},
core::ErrorResponse,
error::DiscoveryError,
runtime::reflector::Lookup,
};
use kube::{api::DynamicObject, runtime::conditions};
@@ -23,7 +19,7 @@ use log::{debug, error, trace};
use serde::{Serialize, de::DeserializeOwned};
use serde_json::{Value, json};
use similar::TextDiff;
use tokio::{io::AsyncReadExt, time::sleep};
use tokio::io::AsyncReadExt;
#[derive(new, Clone)]
pub struct K8sClient {
@@ -57,17 +53,6 @@ impl K8sClient {
})
}
pub async fn get_apiserver_version(&self) -> Result<Info, Error> {
let client: Client = self.client.clone();
let version_info: Info = client.apiserver_version().await?;
Ok(version_info)
}
pub async fn discovery(&self) -> Result<Discovery, Error> {
let discovery: Discovery = Discovery::new(self.client.clone()).run().await?;
Ok(discovery)
}
pub async fn get_resource_json_value(
&self,
name: &str,
@@ -168,41 +153,6 @@ impl K8sClient {
}
}
pub async fn wait_for_pod_ready(
&self,
pod_name: &str,
namespace: Option<&str>,
) -> Result<(), Error> {
let mut elapsed = 0;
let interval = 5; // seconds between checks
let timeout_secs = 120;
loop {
let pod = self.get_pod(pod_name, namespace).await?;
if let Some(p) = pod {
if let Some(status) = p.status {
if let Some(phase) = status.phase {
if phase.to_lowercase() == "running" {
return Ok(());
}
}
}
}
if elapsed >= timeout_secs {
return Err(Error::Discovery(DiscoveryError::MissingResource(format!(
"'{}' in ns '{}' did not become ready within {}s",
pod_name,
namespace.unwrap(),
timeout_secs
))));
}
sleep(Duration::from_secs(interval)).await;
elapsed += interval;
}
}
/// Will execute a commond in the first pod found that matches the specified label
/// '{label}={name}'
pub async fn exec_app_capture_output(
@@ -469,12 +419,9 @@ impl K8sClient {
.as_str()
.expect("couldn't get kind as str");
let mut it = api_version.splitn(2, '/');
let first = it.next().unwrap();
let (g, v) = match it.next() {
Some(second) => (first, second),
None => ("", first),
};
let split: Vec<&str> = api_version.splitn(2, "/").collect();
let g = split[0];
let v = split[1];
let gvk = GroupVersionKind::gvk(g, v, kind);
let api_resource = ApiResource::from_gvk(&gvk);

View File

@@ -47,13 +47,6 @@ struct K8sState {
message: String,
}
#[derive(Debug, Clone)]
pub enum KubernetesDistribution {
OpenshiftFamily,
K3sFamily,
Default,
}
#[derive(Debug, Clone)]
enum K8sSource {
LocalK3d,
@@ -64,7 +57,6 @@ enum K8sSource {
pub struct K8sAnywhereTopology {
k8s_state: Arc<OnceCell<Option<K8sState>>>,
tenant_manager: Arc<OnceCell<K8sTenantManager>>,
flavour: Arc<OnceCell<KubernetesDistribution>>,
config: Arc<K8sAnywhereConfig>,
}
@@ -170,7 +162,6 @@ impl K8sAnywhereTopology {
Self {
k8s_state: Arc::new(OnceCell::new()),
tenant_manager: Arc::new(OnceCell::new()),
flavour: Arc::new(OnceCell::new()),
config: Arc::new(K8sAnywhereConfig::from_env()),
}
}
@@ -179,42 +170,10 @@ impl K8sAnywhereTopology {
Self {
k8s_state: Arc::new(OnceCell::new()),
tenant_manager: Arc::new(OnceCell::new()),
flavour: Arc::new(OnceCell::new()),
config: Arc::new(config),
}
}
pub async fn get_k8s_distribution(&self) -> Result<&KubernetesDistribution, PreparationError> {
self.flavour
.get_or_try_init(async || {
let client = self.k8s_client().await.unwrap();
let discovery = client.discovery().await.map_err(|e| {
PreparationError::new(format!("Could not discover API groups: {}", e))
})?;
let version = client.get_apiserver_version().await.map_err(|e| {
PreparationError::new(format!("Could not get server version: {}", e))
})?;
// OpenShift / OKD
if discovery
.groups()
.any(|g| g.name() == "project.openshift.io")
{
return Ok(KubernetesDistribution::OpenshiftFamily);
}
// K3d / K3s
if version.git_version.contains("k3s") {
return Ok(KubernetesDistribution::K3sFamily);
}
return Ok(KubernetesDistribution::Default);
})
.await
}
async fn get_cluster_observability_operator_prometheus_application_score(
&self,
sender: RHOBObservability,

View File

@@ -186,7 +186,7 @@ impl TopologyState {
}
}
#[derive(Debug, PartialEq)]
#[derive(Debug)]
pub enum DeploymentTarget {
LocalDev,
Staging,

View File

@@ -57,10 +57,8 @@ impl<T: Topology + K8sclient + HelmCommand + Ingress> Interpret<T> for ArgoInter
let k8s_client = topology.k8s_client().await?;
let svc = format!("argo-{}", self.score.namespace.clone());
let domain = topology.get_domain(&svc).await?;
// FIXME we now have a way to know if we're running on openshift family
let helm_score =
argo_helm_chart_score(&self.score.namespace, self.score.openshift, &domain);
argo_helm_chart_score(&self.score.namespace, self.score.openshift, &domain);
helm_score.interpret(inventory, topology).await?;

View File

@@ -10,11 +10,12 @@ use crate::{
data::Version,
inventory::Inventory,
modules::application::{
features::{ArgoApplication, ArgoHelmScore}, webapp::Webapp, ApplicationFeature, HelmPackage, InstallationError, InstallationOutcome, OCICompliant
ApplicationFeature, HelmPackage, InstallationError, InstallationOutcome, OCICompliant,
features::{ArgoApplication, ArgoHelmScore},
},
score::Score,
topology::{
ingress::Ingress, DeploymentTarget, HelmCommand, K8sclient, MultiTargetTopology, Topology
DeploymentTarget, HelmCommand, K8sclient, MultiTargetTopology, Topology, ingress::Ingress,
},
};
@@ -46,11 +47,11 @@ use crate::{
/// - ArgoCD to install/upgrade/rollback/inspect k8s resources
/// - Kubernetes for runtime orchestration
#[derive(Debug, Default, Clone)]
pub struct PackagingDeployment<A: OCICompliant + HelmPackage + Webapp> {
pub struct PackagingDeployment<A: OCICompliant + HelmPackage> {
pub application: Arc<A>,
}
impl<A: OCICompliant + HelmPackage + Webapp> PackagingDeployment<A> {
impl<A: OCICompliant + HelmPackage> PackagingDeployment<A> {
async fn deploy_to_local_k3d(
&self,
app_name: String,
@@ -136,7 +137,7 @@ impl<A: OCICompliant + HelmPackage + Webapp> PackagingDeployment<A> {
#[async_trait]
impl<
A: OCICompliant + HelmPackage + Webapp + Clone + 'static,
A: OCICompliant + HelmPackage + Clone + 'static,
T: Topology + HelmCommand + MultiTargetTopology + K8sclient + Ingress + 'static,
> ApplicationFeature<T> for PackagingDeployment<A>
{
@@ -145,15 +146,10 @@ impl<
topology: &T,
) -> Result<InstallationOutcome, InstallationError> {
let image = self.application.image_name();
let domain = if topology.current_target() == DeploymentTarget::Production {
self.application.dns()
} else {
topology
let domain = topology
.get_domain(&self.application.name())
.await
.map_err(|e| e.to_string())?
};
.map_err(|e| e.to_string())?;
// TODO Write CI/CD workflow files
// we can autotedect the CI type using the remote url (default to github action for github

View File

@@ -2,7 +2,6 @@ mod feature;
pub mod features;
pub mod oci;
mod rust;
mod webapp;
use std::sync::Arc;
pub use feature::*;

View File

@@ -16,7 +16,6 @@ use tar::{Builder, Header};
use walkdir::WalkDir;
use crate::config::{REGISTRY_PROJECT, REGISTRY_URL};
use crate::modules::application::webapp::Webapp;
use crate::{score::Score, topology::Topology};
use super::{Application, ApplicationFeature, ApplicationInterpret, HelmPackage, OCICompliant};
@@ -61,10 +60,6 @@ pub struct RustWebapp {
pub project_root: PathBuf,
pub service_port: u32,
pub framework: Option<RustWebFramework>,
/// Host name that will be used in production environment.
///
/// This is the place to put the public host name if this is a public facing webapp.
pub dns: String,
}
impl Application for RustWebapp {
@@ -73,12 +68,6 @@ impl Application for RustWebapp {
}
}
impl Webapp for RustWebapp {
fn dns(&self) -> String {
self.dns.clone()
}
}
#[async_trait]
impl HelmPackage for RustWebapp {
async fn build_push_helm_package(
@@ -268,6 +257,7 @@ impl RustWebapp {
".harmony_generated",
"harmony",
"node_modules",
"Dockerfile.harmony",
];
let mut entries: Vec<_> = WalkDir::new(project_root)
.into_iter()
@@ -471,53 +461,52 @@ impl RustWebapp {
let (image_repo, image_tag) = image_url.rsplit_once(':').unwrap_or((image_url, "latest"));
let app_name = &self.name;
let service_port = self.service_port;
// Create Chart.yaml
let chart_yaml = format!(
r#"
apiVersion: v2
name: {chart_name}
description: A Helm chart for the {app_name} web application.
name: {}
description: A Helm chart for the {} web application.
type: application
version: 0.2.0
appVersion: "{image_tag}"
version: 0.1.0
appVersion: "{}"
"#,
chart_name, self.name, image_tag
);
fs::write(chart_dir.join("Chart.yaml"), chart_yaml)?;
// Create values.yaml
let values_yaml = format!(
r#"
# Default values for {chart_name}.
# Default values for {}.
# This is a YAML-formatted file.
# Declare variables to be passed into your templates.
replicaCount: 1
image:
repository: {image_repo}
repository: {}
pullPolicy: IfNotPresent
# Overridden by the chart's appVersion
tag: "{image_tag}"
tag: "{}"
service:
type: ClusterIP
port: {service_port}
port: {}
ingress:
enabled: true
tls: true
# Annotations for cert-manager to handle SSL.
annotations:
# Add other annotations like nginx ingress class if needed
# kubernetes.io/ingress.class: nginx
hosts:
- host: {domain}
- host: {}
paths:
- path: /
pathType: ImplementationSpecific
"#,
chart_name, image_repo, image_tag, self.service_port, domain,
);
fs::write(chart_dir.join("values.yaml"), values_yaml)?;
@@ -594,11 +583,7 @@ spec:
);
fs::write(templates_dir.join("deployment.yaml"), deployment_yaml)?;
let service_port = self.service_port;
// Create templates/ingress.yaml
// TODO get issuer name and tls config from topology as it may be different from one
// cluster to another, also from one version to another
let ingress_yaml = format!(
r#"
{{{{- if $.Values.ingress.enabled -}}}}
@@ -611,11 +596,13 @@ metadata:
spec:
{{{{- if $.Values.ingress.tls }}}}
tls:
- secretName: {{{{ include "chart.fullname" . }}}}-tls
hosts:
{{{{- range $.Values.ingress.hosts }}}}
- {{{{ .host | quote }}}}
{{{{- range $.Values.ingress.tls }}}}
- hosts:
{{{{- range .hosts }}}}
- {{{{ . | quote }}}}
{{{{- end }}}}
secretName: {{{{ .secretName }}}}
{{{{- end }}}}
{{{{- end }}}}
rules:
{{{{- range $.Values.ingress.hosts }}}}
@@ -629,11 +616,12 @@ spec:
service:
name: {{{{ include "chart.fullname" $ }}}}
port:
number: {{{{ $.Values.service.port | default {service_port} }}}}
number: {{{{ $.Values.service.port | default {} }}}}
{{{{- end }}}}
{{{{- end }}}}
{{{{- end }}}}
"#,
self.service_port
);
fs::write(templates_dir.join("ingress.yaml"), ingress_yaml)?;

View File

@@ -1,7 +0,0 @@
use super::Application;
use async_trait::async_trait;
#[async_trait]
pub trait Webapp: Application {
fn dns(&self) -> String;
}

View File

@@ -1,20 +0,0 @@
/// Discover the current ArgoCD setup
///
/// 1. No argo installed
/// 2. Argo installed in current namespace
/// 3. Argo installed in different namespace (assuming cluster wide access)
///
/// For now we will go ahead with this very basic logic, there are many intricacies that can be
/// dealt with later, such as multitenant management in a single argo instance, credentials setup t
#[async_trait]
pub trait ArgoCD {
async fn ensure_installed() {
}
}
struct CurrentNamespaceArgo;
impl ArgoCD for CurrentNamespaceArgo {
}

View File

@@ -1,2 +0,0 @@
mod discover;
pub use discover::*;

View File

@@ -0,0 +1,106 @@
use std::sync::Arc;
use async_trait::async_trait;
use harmony_types::id::Id;
use serde::Serialize;
use crate::{
data::Version,
interpret::{Interpret, InterpretError, InterpretName, InterpretStatus, Outcome},
inventory::Inventory,
score::Score,
topology::{K8sclient, Topology, k8s::K8sClient},
};
#[derive(Clone, Serialize, Debug)]
pub struct GenerateCaCertScore {
cluster_issuer_name: String,
dns_names: String,
operator_namespace: String,
}
impl<T: Topology + K8sclient> Score<T> for GenerateCaCertScore {
fn name(&self) -> String {
"GenerateCaCertScore".to_string()
}
fn create_interpret(&self) -> Box<dyn Interpret<T>> {
Box::new(GenerateCaCertIntepret {
score: self.clone(),
})
}
}
#[derive(Clone, Serialize, Debug)]
pub struct GenerateCaCertIntepret {
score: GenerateCaCertScore,
}
#[async_trait]
impl<T: Topology + K8sclient> Interpret<T> for GenerateCaCertIntepret {
async fn execute(
&self,
_inventory: &Inventory,
topology: &T,
) -> Result<Outcome, InterpretError> {
let client = topology.k8s_client().await.unwrap();
let cert_yaml = self
.build_cert_request_yaml(&self.score.cluster_issuer_name, &self.score.dns_names)
.unwrap();
self.apply_cert_request(&client, cert_yaml, &self.score.operator_namespace)
.await?;
Ok(Outcome::success("created ca cert".to_string()))
}
fn get_name(&self) -> InterpretName {
InterpretName::Custom("GenerateCaCertInterpret")
}
fn get_version(&self) -> Version {
todo!()
}
fn get_status(&self) -> InterpretStatus {
todo!()
}
fn get_children(&self) -> Vec<Id> {
todo!()
}
}
impl GenerateCaCertIntepret {
pub fn build_cert_request_yaml(
&self,
cluster_issuer_name: &str,
dns_names: &str,
) -> Result<serde_yaml::Value, InterpretError> {
let cert_yaml = format!(
r#"
apiVersion: cert-manager.io/v1
kind: Certificate
metadata:
name: ingress-cert
namespace: openshift-ingress
spec:
secretName: ingress-cert-tls
issuerRef:
name: {cluster_issuer_name}
kind: ClusterIssuer
dnsNames:
- "*.{dns_names}"
"#
);
Ok(serde_yaml::to_value(cert_yaml)?)
}
pub async fn apply_cert_request(
&self,
client: &Arc<K8sClient>,
cert_yaml: serde_yaml::Value,
operator_namespace: &str,
) -> Result<(), InterpretError> {
Ok(client
.apply_yaml(&cert_yaml, Some(operator_namespace))
.await?)
}
}

View File

@@ -1,2 +1,3 @@
mod gen_ca_cert;
mod helm;
pub use helm::*;

View File

@@ -17,4 +17,3 @@ pub mod prometheus;
pub mod storage;
pub mod tenant;
pub mod tftp;
pub mod argocd;

View File

@@ -4,5 +4,4 @@ pub mod application_monitoring;
pub mod grafana;
pub mod kube_prometheus;
pub mod ntfy;
pub mod okd;
pub mod prometheus;

View File

@@ -1,149 +0,0 @@
use std::{collections::BTreeMap, sync::Arc};
use crate::{
data::Version,
interpret::{Interpret, InterpretError, InterpretName, InterpretStatus, Outcome},
inventory::Inventory,
score::Score,
topology::{K8sclient, Topology, k8s::K8sClient},
};
use async_trait::async_trait;
use harmony_types::id::Id;
use k8s_openapi::api::core::v1::ConfigMap;
use kube::api::ObjectMeta;
use serde::Serialize;
#[derive(Clone, Debug, Serialize)]
pub struct OpenshiftUserWorkloadMonitoring {}
impl<T: Topology + K8sclient> Score<T> for OpenshiftUserWorkloadMonitoring {
fn name(&self) -> String {
"OpenshiftUserWorkloadMonitoringScore".to_string()
}
fn create_interpret(&self) -> Box<dyn Interpret<T>> {
Box::new(OpenshiftUserWorkloadMonitoringInterpret {})
}
}
#[derive(Clone, Debug, Serialize)]
pub struct OpenshiftUserWorkloadMonitoringInterpret {}
#[async_trait]
impl<T: Topology + K8sclient> Interpret<T> for OpenshiftUserWorkloadMonitoringInterpret {
async fn execute(
&self,
_inventory: &Inventory,
topology: &T,
) -> Result<Outcome, InterpretError> {
let client = topology.k8s_client().await.unwrap();
self.update_cluster_monitoring_config_cm(&client).await?;
self.update_user_workload_monitoring_config_cm(&client)
.await?;
self.verify_user_workload(&client).await?;
Ok(Outcome::success(
"successfully enabled user-workload-monitoring".to_string(),
))
}
fn get_name(&self) -> InterpretName {
InterpretName::Custom("OpenshiftUserWorkloadMonitoring")
}
fn get_version(&self) -> Version {
todo!()
}
fn get_status(&self) -> InterpretStatus {
todo!()
}
fn get_children(&self) -> Vec<Id> {
todo!()
}
}
impl OpenshiftUserWorkloadMonitoringInterpret {
pub async fn update_cluster_monitoring_config_cm(
&self,
client: &Arc<K8sClient>,
) -> Result<Outcome, InterpretError> {
let mut data = BTreeMap::new();
data.insert(
"config.yaml".to_string(),
r#"
enableUserWorkload: true
alertmanagerMain:
enableUserAlertmanagerConfig: true
"#
.to_string(),
);
let cm = ConfigMap {
metadata: ObjectMeta {
name: Some("cluster-monitoring-config".to_string()),
namespace: Some("openshift-monitoring".to_string()),
..Default::default()
},
data: Some(data),
..Default::default()
};
client.apply(&cm, Some("openshift-monitoring")).await?;
Ok(Outcome::success(
"updated cluster-monitoring-config-map".to_string(),
))
}
pub async fn update_user_workload_monitoring_config_cm(
&self,
client: &Arc<K8sClient>,
) -> Result<Outcome, InterpretError> {
let mut data = BTreeMap::new();
data.insert(
"config.yaml".to_string(),
r#"
alertmanager:
enabled: true
enableAlertmanagerConfig: true
"#
.to_string(),
);
let cm = ConfigMap {
metadata: ObjectMeta {
name: Some("user-workload-monitoring-config".to_string()),
namespace: Some("openshift-user-workload-monitoring".to_string()),
..Default::default()
},
data: Some(data),
..Default::default()
};
client
.apply(&cm, Some("openshift-user-workload-monitoring"))
.await?;
Ok(Outcome::success(
"updated openshift-user-monitoring-config-map".to_string(),
))
}
pub async fn verify_user_workload(
&self,
client: &Arc<K8sClient>,
) -> Result<Outcome, InterpretError> {
let namespace = "openshift-user-workload-monitoring";
let alertmanager_name = "alertmanager-user-workload-0";
let prometheus_name = "prometheus-user-workload-0";
client
.wait_for_pod_ready(alertmanager_name, Some(namespace))
.await?;
client
.wait_for_pod_ready(prometheus_name, Some(namespace))
.await?;
Ok(Outcome::success(format!(
"pods: {}, {} ready in ns: {}",
alertmanager_name, prometheus_name, namespace
)))
}
}

View File

@@ -1 +0,0 @@
pub mod enable_user_workload;